id stringlengths 2 8 | text stringlengths 16 264k | dataset_id stringclasses 1 value |
|---|---|---|
9690358 | """*************************************************************************
* *
* Copyright (C) <NAME> - All Rights Reserved. *
* *
*************************************************************************"""
#**************************************************************************
#! @file ui.py
# @brief UI handler of the Reader
#**************************************************************************
#!/usr/bin/env python3
import cms50v45
import cms50v46
import utils
import config
import datetime
import math
from enum import Enum
from functools import partial
from threading import Thread, Lock
from Qtpy.Qt import QtCore, QtGui, QtWidgets
class ReaderEvent(Enum):
APNEA = 0
CONTRACTION = 1
BREATHE = 2
END = 3
class OximeterVersion(Enum):
FOURFIVE = 0
FOURSIX = 1
END = 3
""" Reader UI Updater in a separate thread """
class ReaderUIUpdater(Thread):
def __init__(self, ui, port, version):
Thread.__init__(self)
# Connect to the oximeter
self.ui = ui
if version == OximeterVersion.FOURFIVE:
self.oximeter = cms50v45.CMS50DDriver()
else:
self.oximeter = cms50v46.CMS50DDriver()
self.oximeter.connect(port)
self.threadActive = self.oximeter.isConnected()
self.eventLock = Lock()
# reset images
self.ui.pulseImage.fill(config.dfltBkgColor)
self.ui.bpmImage.fill(config.dfltBkgColor)
# Config & internal var
self.events = []
self.apneaTime = None
self.apneaStatus = ReaderEvent.END
self.pulseMaxValue = 100
self.bpmMaxValue = 127
self.o2MaxValue = 127
self.pulseFrequency = 2 # how many samples we skip
self.bpmFrequency = 60 # based on quick calculation, the oxymeter runs at 60hz
self.updateRate = int(self.bpmFrequency / (self.ui.bpmImage.width() / (int(self.ui.minuteField.value()) * 60)))
self.drawBpmLines()
self.previousYPulse = 0
""" Update the pulse images """
def updatePulseImage(self, iSample, liveDataSample):
pulseValue = min(liveDataSample[3], self.pulseMaxValue)
pulseXPixel = iSample % self.ui.pulseImage.width()
pulseYPixel = int(pulseValue / self.pulseMaxValue * self.ui.pulseImage.height())
#pixelColor = QtGui.QColor()
#pixelColor.setHsl(pulseYPixel / self.ui.pulseImage.height() * 255, 255, 127)
pixelColor = config.pulseColor
lineShift = int((pulseYPixel - self.previousYPulse) / 2)
# clean pulse image
bandWidth = int(self.ui.pulseImage.width() * 0.2)
utils.drawBox(self.ui.pulseImage, pulseXPixel + bandWidth / 2, self.ui.pulseImage.height() / 2, bandWidth, self.ui.pulseImage.height(), config.dfltBkgColor)
# update pulse image
utils.drawBox(self.ui.pulseImage, pulseXPixel, self.ui.pulseImage.height() - pulseYPixel - 1 + lineShift, config.curvePixelSize, config.curvePixelSize + abs(lineShift * 2), pixelColor)
self.ui.pulseImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.ui.pulseImage))
self.previousYPulse = pulseYPixel
""" Update the bpm image """
def updateBpmImage(self, iSample, liveDataSample):
bpmValue = min(liveDataSample[1], self.bpmMaxValue)
bpmXPixel = iSample
bpmYPixel = int(bpmValue / self.bpmMaxValue * self.ui.bpmImage.height())
o2Value = min(liveDataSample[2], self.o2MaxValue)
o2XPixel = iSample
o2YPixel = int(o2Value / self.o2MaxValue * self.ui.bpmImage.height())
# draw pixel
utils.drawBox(self.ui.bpmImage, bpmXPixel, self.ui.bpmImage.height() - bpmYPixel - 1, config.curvePixelSize, config.curvePixelSize, config.bmpColor)
utils.drawBox(self.ui.bpmImage, o2XPixel, self.ui.bpmImage.height() - o2YPixel - 1, config.curvePixelSize, config.curvePixelSize, config.o2Color)
self.ui.bpmImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.ui.bpmImage))
""" Update the bpm image with an event line """
def drawLineBpmImage(self, iSample, color):
utils.drawBox(self.ui.bpmImage, iSample, self.ui.bpmImage.height() / 2, config.curvePixelSize + 2, self.ui.bpmImage.height(), color)
""" Draw the time cols """
def drawTimeCols(self, iSample):
colSampleSize = math.ceil(self.ui.bpmImage.width() / (self.ui.minuteField.value() * 60) * config.timeColFrequency)
for iGrid in range(iSample, self.ui.bpmImage.width(), colSampleSize):
utils.drawBox(self.ui.bpmImage, iGrid, self.ui.bpmImage.height() / 2, 1, self.ui.bpmImage.height(), config.gridColColor)
""" Draw the bpm lines """
def drawBpmLines(self):
lineSampleSize = math.ceil(self.ui.bpmImage.height() / self.bpmMaxValue * config.bpmLineFrequency)
for iGrid in range(0, self.ui.bpmImage.height(), lineSampleSize):
utils.drawBox(self.ui.bpmImage, self.ui.bpmImage.width() / 2, self.ui.bpmImage.height() - iGrid -1, self.ui.bpmImage.width(), 1, config.gridLineColor)
# draw a specific line for the mark 100
lineSamplePixel = math.ceil(self.ui.bpmImage.height() / self.bpmMaxValue * 100)
utils.drawBox(self.ui.bpmImage, self.ui.bpmImage.width() / 2, self.ui.bpmImage.height() - lineSamplePixel - 1, self.ui.bpmImage.width(), 1, config.gridLine100Color)
""" Update time """
def updateTimer(self):
if self.apneaStatus == ReaderEvent.APNEA:
deltaDatetime = datetime.datetime.now() - self.apneaTime
deltaTime = (datetime.datetime.min + deltaDatetime).time()
self.ui.timeValueLabel.setText(deltaTime.strftime('%M:%S'))
elif self.apneaStatus == ReaderEvent.END:
self.ui.timeValueLabel.setText('--')
""" check the oximeter status and update the ui """
def checkOximeterStatus(self):
if self.oximeter.isConnected() is True:
if self.threadActive is True:
self.ui.footerLabel.setText('Oximeter Status: Connected')
self.ui.refreshApneaUI(True)
return True
else:
self.ui.footerLabel.setText('Oximeter Status: Not Connected (Manual deconnection)')
self.oximeter.disconnect()
self.ui.refreshApneaUI(False)
return False
self.ui.footerLabel.setText('Oximeter Status: Not Connected (No package sent)')
self.ui.refreshApneaUI(False)
return False
""" thread safe event feeder """
def feedEvent(self, event):
self.eventLock.acquire()
self.events.append(event)
self.eventLock.release()
""" thread sage event consumer """
def consumeEvent(self, iSample):
self.eventLock.acquire()
for event in self.events:
if event == ReaderEvent.APNEA:
self.apneaTime = datetime.datetime.now()
self.drawLineBpmImage(iSample, config.apneaColor)
self.drawTimeCols(iSample)
self.apneaStatus = ReaderEvent.APNEA
elif event == ReaderEvent.CONTRACTION:
self.drawLineBpmImage(iSample, config.contractionColor)
elif event == ReaderEvent.BREATHE:
self.drawLineBpmImage(iSample, config.breatheColor)
self.apneaStatus = ReaderEvent.BREATHE
self.events.clear()
self.eventLock.release()
""" Main thread run, read the packet loop """
def run(self):
iSample = 0
for liveData in self.oximeter.getLiveData():
# print(liveData)
if self.checkOximeterStatus() is False:
return
liveDataSample = liveData.getCsvData()
self.ui.bpmValueLabel.setText(str(liveDataSample[1]))
self.ui.o2ValueLabel.setText(str(liveDataSample[2]) + '%')
if iSample % self.pulseFrequency == 0:
self.updatePulseImage(int(iSample / self.pulseFrequency), liveDataSample)
if iSample % self.updateRate == 0:
self.consumeEvent(int(iSample / self.updateRate))
self.updateBpmImage(int(iSample / self.updateRate), liveDataSample)
self.updateTimer()
iSample += 1
self.oximeter.disconnect()
self.checkOximeterStatus()
""" Main QT Application """
class ReaderUI(QtWidgets.QMainWindow):
def __init__(self):
# Main
QtWidgets.QMainWindow.__init__(self)
self.setMinimumSize(QtCore.QSize(400, 100))
self.setWindowTitle('OximeterReader v0.0.1')
self.setWindowIcon(QtGui.QIcon(utils.getIconsDir() + "oxygen.png"))
textFont = QtGui.QFont( "Arial", 15, QtGui.QFont.Bold)
self.windowSize = None
self.bmpImageSize = QtCore.QSize(config.widthBpmCurveImage, config.heightImages)
# Main Layout
centralWidget = QtWidgets.QWidget()
self.setCentralWidget(centralWidget)
centralLayout = QtWidgets.QGridLayout(centralWidget)
iLine = 0
# Connect
connectWidget = QtWidgets.QWidget()
connectLayout = QtWidgets.QHBoxLayout(connectWidget)
connectLayout.addStretch(1)
portLabel = QtWidgets.QLabel('Ports:')
portLabel.setFixedWidth(30)
self.portCombo = QtWidgets.QComboBox()
self.portCombo.setFixedWidth(100)
self.versionCombo = QtWidgets.QComboBox()
self.versionCombo.setFixedWidth(50)
self.versionCombo.addItem('v4.5')
self.versionCombo.addItem('v4.6')
self.minuteField = QtWidgets.QSpinBox()
self.minuteField.setRange(1, 15)
self.minuteField.setValue(config.dfltMinutes)
self.minuteField.setFixedWidth(50)
minuteLabel = QtWidgets.QLabel('min')
minuteLabel.setFixedWidth(20)
self.refreshButton = QtWidgets.QPushButton()
self.refreshButton.setIcon(QtGui.QIcon(utils.getIconsDir() + 'refresh.png'))
self.refreshButton.setFixedWidth(25)
self.connectButton = QtWidgets.QPushButton()
self.connectButton.setIcon(QtGui.QIcon(utils.getIconsDir() + 'connect.png'))
self.connectButton.setFixedWidth(25)
self.disconnectButton = QtWidgets.QPushButton()
self.disconnectButton.setIcon(QtGui.QIcon(utils.getIconsDir() + 'disconnect.png'))
self.disconnectButton.setFixedWidth(25)
connectLayout.addWidget(portLabel)
connectLayout.addWidget(self.refreshButton)
connectLayout.addWidget(self.portCombo)
connectLayout.addWidget(self.versionCombo)
connectLayout.addWidget(self.minuteField)
connectLayout.addWidget(minuteLabel)
connectLayout.addWidget(self.connectButton)
connectLayout.addWidget(self.disconnectButton)
centralLayout.addWidget(connectWidget, iLine, 0, QtCore.Qt.AlignLeft)
iLine += 1
# Bottom
bottomWidget = QtWidgets.QWidget()
bottomLayout = QtWidgets.QGridLayout(bottomWidget)
# hold / spasm / breathe / reset buttons
controlWidget = QtWidgets.QWidget()
controlLayout = QtWidgets.QVBoxLayout(controlWidget)
controlLayout.addStretch(1)
self.apneaButton = QtWidgets.QPushButton('Hold')
utils.setBorderColor(self.apneaButton, config.apneaColor)
self.contractionButton = QtWidgets.QPushButton('Contraction')
utils.setBorderColor(self.contractionButton, config.contractionColor)
self.breatheButton = QtWidgets.QPushButton('Breathe')
utils.setBorderColor(self.breatheButton, config.breatheColor)
self.resetButton = QtWidgets.QPushButton('Reset')
controlLayout.addWidget(self.apneaButton)
controlLayout.addWidget(self.contractionButton)
controlLayout.addWidget(self.breatheButton)
controlLayout.addWidget(self.resetButton)
bottomLayout.addWidget(controlWidget, 0, 0, QtCore.Qt.AlignTop)
# pulse curve image
self.pulseImage = QtGui.QImage(config.widthPulseImage, self.bmpImageSize.height(), QtGui.QImage.Format_RGB32)
self.pulseImage.fill(config.dfltBkgColor)
self.pulseImageHolder = QtWidgets.QLabel()
self.pulseImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.pulseImage))
bottomLayout.addWidget(self.pulseImageHolder, 0, 1)
# o2 bpm image
self.bpmImage = QtGui.QImage(self.bmpImageSize, QtGui.QImage.Format_RGB32)
self.bpmImage.fill(config.dfltBkgColor)
self.bpmImageHolder = QtWidgets.QLabel()
self.bpmImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.bpmImage))
bottomLayout.addWidget(self.bpmImageHolder, 0, 2)
# o2 / bpm label
dataWidget = QtWidgets.QWidget()
dataLayout = QtWidgets.QGridLayout(dataWidget)
bmpIcon = QtWidgets.QLabel()
bmpIcon.setPixmap(QtGui.QPixmap(utils.getIconsDir() + 'heart.png'))
o2Icon = QtWidgets.QLabel()
o2Icon.setPixmap(QtGui.QPixmap(utils.getIconsDir() + 'oxygen.png'))
timeIcon = QtWidgets.QLabel()
timeIcon.setPixmap(QtGui.QPixmap(utils.getIconsDir() + 'time.png'))
self.bpmValueLabel = QtWidgets.QLabel()
self.o2ValueLabel = QtWidgets.QLabel()
self.timeValueLabel = QtWidgets.QLabel()
self.bpmValueLabel.setFixedWidth(45)
self.o2ValueLabel.setFixedWidth(45)
self.timeValueLabel.setFixedWidth(60)
self.bpmValueLabel.setFont(textFont)
self.o2ValueLabel.setFont(textFont)
self.timeValueLabel.setFont(textFont)
dataLayout.addWidget(bmpIcon, 0, 0)
dataLayout.addWidget(o2Icon, 1, 0)
dataLayout.addWidget(timeIcon, 2, 0)
dataLayout.addWidget(self.bpmValueLabel, 0, 1)
dataLayout.addWidget(self.o2ValueLabel, 1, 1)
dataLayout.addWidget(self.timeValueLabel, 2, 1)
bottomLayout.addWidget(dataWidget, 0, 3)
# add botom
centralLayout.addWidget(bottomWidget, iLine, 0)
iLine += 1
# Footer
self.footerLabel = QtWidgets.QLabel('Oximeter Status: Not Connected')
centralLayout.addWidget(self.footerLabel, iLine, 0)
iLine += 1
# Device Manager
self.readThread = None
# Connect UI
self.refreshButton.clicked.connect(self.refreshSerialPorts)
self.connectButton.clicked.connect(self.startThread)
self.disconnectButton.clicked.connect(self.stopThread)
self.resetButton.clicked.connect(self.resetThread)
self.apneaButton.clicked.connect(partial(self.sendEvent, ReaderEvent.APNEA))
self.contractionButton.clicked.connect(partial(self.sendEvent, ReaderEvent.CONTRACTION))
self.breatheButton.clicked.connect(partial(self.sendEvent, ReaderEvent.BREATHE))
# refresh UI
self.refreshUI()
# Close event
def closeEvent(self, event):
self.stopThread()
event.accept()
# Paint Event
def paintEvent(self, event):
# get window size
if self.windowSize is None:
self.windowSize = self.size()
self.setMinimumSize(self.windowSize)
# Resize Event
def resizeEvent(self, event):
if self.threadIsActive() is False and self.windowSize is not None:
self.bpmImage = QtGui.QImage(self.bmpImageSize + self.size() - self.windowSize, QtGui.QImage.Format_RGB32)
self.bpmImage.fill(config.dfltBkgColor)
self.bpmImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.bpmImage))
self.pulseImage = QtGui.QImage(config.widthPulseImage, self.bpmImage.height(), QtGui.QImage.Format_RGB32)
self.pulseImage.fill(config.dfltBkgColor)
self.pulseImageHolder.setPixmap(QtGui.QPixmap.fromImage(self.pulseImage))
def refreshUI(self):
self.refreshSerialPorts()
self.refreshApneaUI(False)
def refreshSerialPorts(self):
if self.threadIsActive() is False:
self.portCombo.clear()
ports = utils.listSerialPorts()
for port in ports:
self.portCombo.addItem(port)
def refreshApneaUI(self, enable):
self.apneaButton.setEnabled(enable)
self.contractionButton.setEnabled(enable)
self.breatheButton.setEnabled(enable)
self.resetButton.setEnabled(enable)
if enable is False:
self.bpmValueLabel.setText('--')
self.o2ValueLabel.setText('--%')
self.timeValueLabel.setText('--')
def startThread(self):
if self.threadIsActive() is False:
port = self.portCombo.currentText()
version = OximeterVersion(self.versionCombo.currentIndex())
self.readThread = ReaderUIUpdater(self, port, version)
self.readThread.start()
def stopThread(self):
if self.threadIsActive() is True:
self.readThread.threadActive = False
self.readThread.join()
self.readThread = None
def resetThread(self):
self.stopThread()
self.startThread()
def sendEvent(self, event):
self.readThread.feedEvent(event)
def threadIsActive(self):
return (self.readThread is not None and self.readThread.threadActive is True)
| StarcoderdataPython |
5012185 | <gh_stars>1-10
import os.path
class AbstractUserMap (object):
""" A UserMap is used to map an email address to a specific user's identity.
This is necessary because sometimes users have configured git with
different email addresses on different machines; as a result, we need to
define a mapping so we can account all of the commits from each user
together.
A UserMap extending this interface need only implement one method, map.
The map method takes one argument -- an email address -- and returns the
username to associate with that address.
"""
def map(self, email):
raise NotImplementedError()
class NullUserMap (AbstractUserMap):
""" Performs no mapping from email to username -- the email is assumed to be
the actual username.
"""
def map(self, email):
return email
class FileUserMap (AbstractUserMap):
""" Loads a text file consisting of (email, username) pairs which define the
mappings from emails to usernames.
"""
def __init__(self, filename):
self.name_map = {}
filename = os.path.expanduser(filename)
with open(filename, 'r') as file:
for line in file:
try:
email, name = line.split(" ", 1)
self.name_map[email.strip()] = name.strip()
except ValueError:
continue
def map(self, email):
try:
return self.name_map[email]
except KeyError:
return "unknown"
| StarcoderdataPython |
6484232 | <filename>src/examples/vision/start_recording_features.py<gh_stars>1-10
import time
import subprocess
import os
import signal
import RPi.GPIO as GPIO
from aiy.vision.leds import Leds, RgbLeds
aiy_command = 'python3 /home/pi/Repositories/aiyprojects-raspbian/src/' \
'examples/vision/record_features.py'
GPIO.setmode(GPIO.BCM)
GPIO_switch = 23
GPIO.setup(GPIO_switch, GPIO.IN)
print("Ready.")
print("Press push-button to start a recording.")
print("Press push-button again to stop the recording.")
print("Repeat to record new video.")
print("Press Ctrl-C to quit program.")
print("")
led_green = RgbLeds(Leds(), Leds.rgb_on((0, 255, 0)))
led_red = RgbLeds(Leds(), Leds.rgb_on((255, 0, 0)))
led_green.__enter__()
try:
run = 0
p = None
while True:
if GPIO.input(GPIO_switch) == 0 and run == 0:
print("Starting recording...")
p = subprocess.Popen(aiy_command, shell=True,
preexec_fn=os.setsid)
led_red.__enter__()
run = 1
while GPIO.input(GPIO_switch) == 0:
time.sleep(0.1)
if GPIO.input(GPIO_switch) == 0 and run == 1:
os.killpg(p.pid, signal.SIGINT)
run = 0
led_green.__enter__()
print("Stopped current recording.")
while GPIO.input(GPIO_switch) == 0:
time.sleep(0.1)
except KeyboardInterrupt:
print("Quit program.")
led_green._leds.update(Leds.rgb_off())
GPIO.cleanup()
| StarcoderdataPython |
393624 | """
Given a singly linked list, determine if it is a palindrome.
Example 1:
Input: 1->2
Output: false
Example 2:
Input: 1->2->2->1
Output: true
Follow up:
Could you do it in O(n) time and O(1) space?
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def isPalindrome(self, head):
"""
:type head: ListNode
:rtype: bool
"""
def reverse_ll(head):
prev, curr, ne = None, head, head.next
while curr is not None:
curr.next = prev
prev = curr
curr = ne
if ne:
ne = ne.next
return prev
if head is None or head.next is None:
return True
fast, slow = head, head
while fast is not None:
fast = fast.next
if fast is None:
break
fast = fast.next
if fast is None:
break
slow = slow.next
second = slow.next
slow.next = None
new_head = reverse_ll(second)
curr1, curr2 = head, new_head
ret = True
while curr1 is not None and curr2 is not None:
if curr1.val != curr2.val:
ret = False
break
curr1 = curr1.next
curr2 = curr2.next
slow.next = reverse_ll(new_head)
return ret
| StarcoderdataPython |
1614219 | <gh_stars>100-1000
#!/usr/bin/env python
# coding=utf8
from __future__ import unicode_literals
from datetime import timedelta
import collections
import functools
import os
import re
import string
from io import StringIO
import pytest
from hypothesis import given, settings, HealthCheck, assume
import hypothesis.strategies as st
import srt
REGISTER_SETTINGS = lambda name, **kwargs: settings.register_profile(
name, suppress_health_check=[HealthCheck.too_slow], deadline=None, **kwargs
)
REGISTER_SETTINGS("base")
REGISTER_SETTINGS("release", max_examples=1000)
settings.load_profile(os.getenv("HYPOTHESIS_PROFILE", "base"))
HOURS_IN_DAY = 24
TIMEDELTA_MAX_DAYS = 999999999
CONTENTLESS_SUB = functools.partial(
srt.Subtitle, index=1, start=timedelta(seconds=1), end=timedelta(seconds=2)
)
def is_strictly_legal_content(content):
"""
Filter out things that would violate strict mode. Illegal content
includes:
- A content section that starts or ends with a newline
- A content section that contains blank lines
"""
if content.strip("\r\n") != content:
return False
elif not content.strip():
return False
elif "\n\n" in content:
return False
else:
return True
def subs_eq(got, expected, any_order=False):
"""
Compare Subtitle objects using vars() so that differences are easy to
identify.
"""
got_vars = [frozenset(vars(sub).items()) for sub in got]
expected_vars = [frozenset(vars(sub).items()) for sub in expected]
if any_order:
assert collections.Counter(got_vars) == collections.Counter(expected_vars)
else:
assert got_vars == expected_vars
def timedeltas(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
"""
A Hypothesis strategy to generate timedeltas.
Right now {min,max}_value are shoved into multiple fields in timedelta(),
which is not very customisable, but it's good enough for our current test
purposes. If you need more precise control, you may need to add more
parameters to this function to be able to customise more freely.
"""
time_unit_strategy = st.integers(min_value=min_value, max_value=max_value)
timestamp_strategy = st.builds(
timedelta,
hours=time_unit_strategy,
minutes=time_unit_strategy,
seconds=time_unit_strategy,
)
return timestamp_strategy
def equivalent_timestamps(min_value=0, max_value=TIMEDELTA_MAX_DAYS):
def string_timestamp(hours, minutes, seconds, msecs, paddings):
hours, minutes, seconds, msecs = map(
lambda v_and_p: "0" * v_and_p[1] + str(v_and_p[0]),
zip((hours, minutes, seconds, msecs), paddings),
)
return "{}:{}:{},{}".format(hours, minutes, seconds, msecs)
def ts_field_value():
return st.integers(min_value=min_value, max_value=max_value)
def zero_padding():
return st.integers(min_value=0, max_value=2)
@st.composite
def maybe_off_by_one_fields(draw):
field = draw(ts_field_value())
field_maybe_plus_one = draw(st.integers(min_value=field, max_value=field + 1))
return field_maybe_plus_one, field
def get_equiv_timestamps(h, m, s, ms2, ts1paddings, ts2paddings):
h2, h1 = h
m2, m1 = m
s2, s1 = s
ms1 = (
(h2 - h1) * 60 * 60 * 1000 + (m2 - m1) * 60 * 1000 + (s2 - s1) * 1000 + ms2
)
return (
string_timestamp(h2, m2, s2, ms2, ts2paddings),
string_timestamp(h1, m1, s1, ms1, ts1paddings),
)
return st.builds(
get_equiv_timestamps,
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
maybe_off_by_one_fields(),
ts_field_value(),
st.tuples(*[zero_padding() for _ in range(4)]),
st.tuples(*[zero_padding() for _ in range(4)]),
)
def subtitles(strict=True):
"""A Hypothesis strategy to generate Subtitle objects."""
# max_value settings are just to avoid overflowing TIMEDELTA_MAX_DAYS by
# using arbitrary low enough numbers.
#
# We also skip subs with start time >= end time, so we split them into two
# groups to avoid overlap.
start_timestamp_strategy = timedeltas(min_value=0, max_value=500000)
end_timestamp_strategy = timedeltas(min_value=500001, max_value=999999)
# \r is not legal inside Subtitle.content, it should have already been
# normalised to \n.
content_strategy = st.text(min_size=1).filter(lambda x: "\r" not in x)
proprietary_strategy = st.text().filter(
lambda x: all(eol not in x for eol in "\r\n")
)
if strict:
content_strategy = content_strategy.filter(is_strictly_legal_content)
subtitle_strategy = st.builds(
srt.Subtitle,
index=st.integers(min_value=0),
start=start_timestamp_strategy,
end=end_timestamp_strategy,
proprietary=proprietary_strategy,
content=content_strategy,
)
return subtitle_strategy
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file(input_subs):
srt_file = StringIO(srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_from_file_bom(input_subs):
srt_file = StringIO("\ufeff" + srt.compose(input_subs, reindex=False))
reparsed_subs = srt.parse(srt_file)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict(input_subs):
composed = srt.compose(input_subs, reindex=False)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_ending_blank_line(input_subs):
"""
Many sub editors don't add a blank line to the end, and many editors accept
it. We should just accept this too in input.
"""
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed[:-1]
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_compose_without_eol_at_all(input_subs):
composed = srt.compose(input_subs, reindex=False)
composed_without_ending_blank = composed.rstrip("\r\n")
reparsed_subs = srt.parse(composed_without_ending_blank)
subs_eq(reparsed_subs, input_subs)
@given(st.text().filter(is_strictly_legal_content))
def test_compose_and_parse_strict_mode(content):
# sub.content should not have OS-specific line separators, only \n
assume("\r" not in content)
content = "\n" + content + "\n\n" + content + "\n"
sub = CONTENTLESS_SUB(content=content)
parsed_strict = list(srt.parse(sub.to_srt()))[0]
parsed_unstrict = list(srt.parse(sub.to_srt(strict=False)))[0]
# Strict mode should remove blank lines in content, leading, and trailing
# newlines.
assert not parsed_strict.content.startswith("\n")
assert not parsed_strict.content.endswith("\n")
assert "\n\n" not in parsed_strict.content
# When strict mode is false, no processing should be applied to the
# content (other than \r\n becoming \n).
assert parsed_unstrict.content == sub.content.replace("\r\n", "\n")
@given(st.integers(min_value=1, max_value=TIMEDELTA_MAX_DAYS))
def test_timedelta_to_srt_timestamp_can_go_over_24_hours(days):
srt_timestamp = srt.timedelta_to_srt_timestamp(timedelta(days=days))
srt_timestamp_hours = int(srt_timestamp.split(":")[0])
assert srt_timestamp_hours == days * HOURS_IN_DAY
@given(subtitles())
def test_subtitle_equality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
assert sub_1 == sub_2
@given(subtitles())
def test_subtitle_inequality(sub_1):
sub_2 = srt.Subtitle(**vars(sub_1))
sub_2.index += 1
assert sub_1 != sub_2
@given(subtitles())
def test_subtitle_from_scratch_equality(subtitle):
srt_block = subtitle.to_srt()
# Get two totally new sets of objects so as not to affect the hash
# comparison
sub_1 = list(srt.parse(srt_block))[0]
sub_2 = list(srt.parse(srt_block))[0]
subs_eq([sub_1], [sub_2])
# In case subs_eq and eq disagree for some reason
assert sub_1 == sub_2
assert hash(sub_1) == hash(sub_2)
@given(st.lists(subtitles()))
def test_parsing_spaced_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_spaced_ender_arrow(subs):
# Seen in BSG subtitles
spaced_block = srt.compose(subs, reindex=False, strict=False).replace("-->", "-- >")
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_ws_arrow(subs):
spaced_block = srt.compose(subs, reindex=False, strict=False).replace(
" --> ", "-->"
)
reparsed_subtitles = srt.parse(spaced_block)
subs_eq(reparsed_subtitles, subs)
@given(st.text(string.whitespace), st.lists(subtitles()))
def test_parsing_leading_whitespace(ws, subs):
prews_block = ws + srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_negative_index(subs):
for sub in subs:
sub.index *= -1
prews_block = srt.compose(subs, reindex=False, strict=False)
reparsed_subtitles = srt.parse(prews_block)
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_content_with_blank_lines(subs):
for subtitle in subs:
# We stuff a blank line in the middle so as to trigger the "special"
# content parsing for erroneous SRT files that have blank lines.
subtitle.content = subtitle.content + "\n\n" + subtitle.content
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()))
def test_parsing_no_content(subs):
for subtitle in subs:
subtitle.content = ""
reparsed_subtitles = srt.parse(srt.compose(subs, reindex=False, strict=False))
subs_eq(reparsed_subtitles, subs)
@given(st.lists(subtitles()), st.lists(subtitles()), st.text(alphabet="\n\r\t "))
def test_subs_missing_content_removed(content_subs, contentless_subs, contentless_text):
for sub in contentless_subs:
sub.content = contentless_text
subs = contentless_subs + content_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# We should have composed the same subs as there are in content_subs, as
# all contentless_subs should have been stripped.
subs_eq(composed_subs, content_subs, any_order=True)
# The subtitles should be reindexed starting at start_index, excluding
# contentless subs
default_start_index = 1
assert [sub.index for sub in composed_subs] == list(
range(default_start_index, default_start_index + len(composed_subs))
)
@given(
st.lists(subtitles()),
st.lists(subtitles()),
timedeltas(min_value=-999, max_value=-1),
)
def test_subs_starts_before_zero_removed(positive_subs, negative_subs, negative_td):
for sub in negative_subs:
sub.start = negative_td
sub.end = negative_td # Just to avoid tripping any start >= end errors
subs = positive_subs + negative_subs
composed_subs = list(srt.sort_and_reindex(subs, in_place=True))
# There should be no negative subs
subs_eq(composed_subs, positive_subs, any_order=True)
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex(input_subs, start_index):
for sub in input_subs:
# Pin all subs to same end time so that start time is compared only,
# must be guaranteed to be < sub.start, see how
# start_timestamp_strategy is done
sub.end = timedelta(500001)
reindexed_subs = list(
srt.sort_and_reindex(input_subs, start_index=start_index, in_place=True)
)
# The subtitles should be reindexed starting at start_index
assert [sub.index for sub in reindexed_subs] == list(
range(start_index, start_index + len(input_subs))
)
# The subtitles should be sorted by start time
expected_sorting = sorted(input_subs, key=lambda sub: sub.start)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles()))
def test_sort_and_reindex_no_skip(input_subs):
# end time > start time should not trigger a skip if skip=False
for sub in input_subs:
old_start = sub.start
sub.start = sub.end
sub.end = old_start
reindexed_subs = list(srt.sort_and_reindex(input_subs, skip=False))
# Nothing should have been skipped
assert len(reindexed_subs) == len(input_subs)
@given(st.lists(subtitles(), min_size=1))
def test_sort_and_reindex_same_start_time_uses_end(input_subs):
for sub in input_subs:
# Pin all subs to same start time so that end time is compared only
sub.start = timedelta(1)
reindexed_subs = list(srt.sort_and_reindex(input_subs, in_place=True))
# The subtitles should be sorted by end time when start time is the same
expected_sorting = sorted(input_subs, key=lambda sub: sub.end)
assert reindexed_subs == expected_sorting
@given(st.lists(subtitles(), min_size=1), st.integers(min_value=0))
def test_sort_and_reindex_not_in_place_matches(input_subs, start_index):
# Make copies for both sort_and_reindex calls so that they can't affect
# each other
not_in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
in_place_subs = [srt.Subtitle(**vars(sub)) for sub in input_subs]
nip_ids = [id(sub) for sub in not_in_place_subs]
ip_ids = [id(sub) for sub in in_place_subs]
not_in_place_output = list(
srt.sort_and_reindex(not_in_place_subs, start_index=start_index)
)
in_place_output = list(
srt.sort_and_reindex(in_place_subs, start_index=start_index, in_place=True)
)
# The results in each case should be the same
subs_eq(not_in_place_output, in_place_output)
# Not in place sort_and_reindex should have created new subs
assert not any(id(sub) in nip_ids for sub in not_in_place_output)
# In place sort_and_reindex should be reusing the same subs
assert all(id(sub) in ip_ids for sub in in_place_output)
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
# Put some garbage between subs that should trigger our failed parsing
# detection. Since we do some magic to try and detect blank lines that
# don't really delimit subtitles, it has to look at least a little like an
# SRT block.
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_noncontiguous_ignore_errors(subs, fake_idx, garbage, fake_timedelta):
composed = srt.compose(subs)
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
composed = composed.replace(
"\n\n", "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
)
# Should not raise, we have ignore_errors
list(srt.parse(composed, ignore_errors=True))
def _parseable_as_int(text):
try:
int(text)
except ValueError:
return False
return True
def _parseable_as_float(text):
try:
float(text)
except ValueError:
return False
return True
@given(st.lists(subtitles()), st.text(min_size=1))
def test_parser_noncontiguous_leading(subs, garbage):
# Issue #50 permits leading whitespace, see test_parsing_leading_whitespace
assume(not garbage.isspace())
# Issue #56 permits negative indexes, see test_parsing_negative_index. It
# also shouldn't just be a number, because then we'd confuse it with our
# index...
assume(garbage.strip()[0] != ".")
assume(garbage.strip()[0] != "-")
assume(not _parseable_as_int(garbage.strip()))
assume(not _parseable_as_float(garbage.strip()))
# Put some garbage at the beginning that should trigger our noncontiguity
# checks
composed = garbage + srt.compose(subs)
with pytest.raises(srt.SRTParseError):
list(srt.parse(composed))
@given(
st.lists(subtitles(), min_size=1),
st.integers(min_value=0),
st.text(min_size=1),
timedeltas(),
)
def test_parser_didnt_match_to_end_raises(subs, fake_idx, garbage, fake_timedelta):
srt_blocks = [sub.to_srt() for sub in subs]
srt_timestamp = srt.timedelta_to_srt_timestamp(fake_timedelta)
garbage = "\n\n%d\n%s %s" % (fake_idx, srt_timestamp, garbage)
srt_blocks.append(garbage)
composed = "".join(srt_blocks)
with pytest.raises(srt.SRTParseError) as thrown_exc:
list(srt.parse(composed))
# Since we will consume as many \n as needed until we meet the lookahead
# assertion, leading newlines in `garbage` will be stripped.
garbage_stripped = garbage.lstrip("\n")
assert garbage_stripped == thrown_exc.value.unmatched_content
assert len(composed) - len(garbage_stripped) == thrown_exc.value.expected_start
assert len(composed) == thrown_exc.value.actual_start
@given(st.lists(subtitles()))
def test_parser_can_parse_with_dot_msec_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
dot_timestamp = srt_lines[1].replace(",", ".", 2)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_dots = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_dots)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_fullwidth_delimiter(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
dot_srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
dot_timestamp = srt_lines[1].replace(",", ",", 1).replace(":", ":", 1)
srt_lines[1] = dot_timestamp
dot_srt_blocks.append("\n".join(srt_lines))
composed_with_fullwidth = "".join(dot_srt_blocks)
reparsed_subs = srt.parse(composed_with_fullwidth)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_can_parse_with_no_msec(subs):
original_srt_blocks = [sub.to_srt() for sub in subs]
srt_blocks = []
for srt_block in original_srt_blocks:
srt_lines = srt_block.split("\n")
# We should only do the first two, as it might also be in the
# proprietary metadata, causing this test to fail.
srt_lines[1] = re.sub(",[0-9]+", "", srt_lines[1], 2)
srt_blocks.append("\n".join(srt_lines))
composed = "".join(srt_blocks)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, subs)
@given(subtitles())
def test_repr_doesnt_crash(sub):
# Not much we can do here, but we should make sure __repr__ doesn't crash
# or anything and it does at least vaguely look like what we want
assert "Subtitle" in repr(sub)
assert str(sub.index) in repr(sub)
@given(subtitles(), subtitles())
def test_parser_accepts_final_no_newline_no_content(sub1, sub2):
# Limit size so we know how much to remove
sub2.content = ""
subs = [sub1, sub2]
# Remove the last newlines so that there are none. Cannot use rstrip since
# there might be other stuff that gets matched in proprietary
stripped_srt_blocks = srt.compose(subs, reindex=False)[:-2]
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_parser_accepts_newline_no_content(subs):
for sub in subs:
# Limit size so we know how many lines to remove
sub.content = ""
# Remove the last \n so that there is only one
stripped_srt_blocks = "".join(sub.to_srt()[:-1] for sub in subs)
reparsed_subs = srt.parse(stripped_srt_blocks)
subs_eq(reparsed_subs, subs)
@given(st.lists(subtitles()))
def test_compose_and_parse_strict_crlf(input_subs):
composed_raw = srt.compose(input_subs, reindex=False)
composed = composed_raw.replace("\n", "\r\n")
reparsed_subs = list(srt.parse(composed))
for sub in reparsed_subs:
sub.content = sub.content.replace("\r\n", "\n")
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.one_of(st.just("\n"), st.just("\r\n")))
def test_compose_and_parse_strict_custom_eol(input_subs, eol):
composed = srt.compose(input_subs, reindex=False, eol=eol)
reparsed_subs = srt.parse(composed)
subs_eq(reparsed_subs, input_subs)
@given(equivalent_timestamps())
def test_equal_timestamps_despite_different_fields_parsed_as_equal(timestamps):
ts1, ts2 = timestamps
assert srt.srt_timestamp_to_timedelta(ts1) == srt.srt_timestamp_to_timedelta(ts2)
@given(timedeltas())
def test_bad_timestamp_format_raises(ts):
ts = srt.timedelta_to_srt_timestamp(ts)
ts = ts.replace(":", "t", 1)
with pytest.raises(srt.TimestampParseError):
srt.srt_timestamp_to_timedelta(ts)
@given(st.lists(subtitles()), st.lists(st.sampled_from(string.whitespace)))
def test_can_parse_index_trailing_ws(input_subs, whitespace):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "".join(whitespace)
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()))
def test_can_parse_index_with_dot(input_subs):
# Seen in Battlestar Galactica subs
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = lines[0] + "." + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
@given(st.lists(subtitles()), st.lists(st.just("0")))
def test_can_parse_index_leading_zeroes(input_subs, zeroes):
out = ""
for sub in input_subs:
lines = sub.to_srt().split("\n")
lines[0] = "".join(zeroes) + lines[0]
out += "\n".join(lines)
reparsed_subs = srt.parse(out)
subs_eq(reparsed_subs, input_subs)
| StarcoderdataPython |
6470087 | my_expr = 42
s = f'foo{my_expr} bar{my_expr}' | StarcoderdataPython |
6672986 | # coding=utf-8
__author__ = 'weed'
import cv2
import numpy
import math
WHITE = (255,255,255)
RED = ( 0, 0,255)
GREEN = ( 0,128, 0)
BLUE = (255, 0, 0)
SKY_BLUE = (255,128,128)
BLACK = ( 0, 0, 0)
DARKSLATEGRAY = ( 79, 79, 47)
TEAL = (128,128, 0)
def getVelocityVector(positionHistory, population=1, numFramesDelay=0):
# populationは母集団。すなわち、何フレーム分の位置データを用いて速度を求めるか。
# populationが4、numFramesDelayが6の場合は
# vはPt[-1-6-2=-9],Pt[-1-6+2=-5]を参照する。
# indexPtBegin = -1-numFramesDelay-int(population/2) # ptBegin: 始点
# indexPtEnd = -1-numFramesDelay+int(population/2) # ptEnd : 終点
indexPtBegin = -1-numFramesDelay # ptBegin: 始点
indexPtEnd = -1-numFramesDelay+population # ptEnd : 終点
# 追跡開始直後
if len(positionHistory) < -indexPtBegin \
or positionHistory[indexPtBegin] is None \
or positionHistory[indexPtEnd] is None:
return None
else:
ptBeginNp = numpy.array(positionHistory[indexPtBegin])
ptEndNp = numpy.array(positionHistory[indexPtEnd] )
# 移動ベクトル Δpt = ptEnd - ptBegin
deltaPtNp = ptEndNp - ptBeginNp
# 移動してなければNoneを返す
notMoved = (deltaPtNp == numpy.array([0,0]))
if notMoved.all():
return None
# 移動していれば、速度ベクトル = 移動ベクトル / 母数
else:
velocityVectorNp = deltaPtNp / float(population)
velocityVector = tuple(velocityVectorNp)
return velocityVector
def getAccelerationVector2(positionHistory, populationVelocity, populationAcceleration=12,
numFramesDelay=0, coAcceleration=200):
velocityVectorEnd = getVelocityVector(positionHistory, populationVelocity,
populationVelocity)
# indexPtBegin = -1-popV = -1-popV # ptBegin: 始点
# indexPtEnd = -1+popV-popV = -1 # ptEnd : 終点
velocityVectorBegin = getVelocityVector(positionHistory, populationVelocity,
populationVelocity+populationAcceleration)
# indexPtBegin = -1 -(popV+popAcl) = -1-popV-popAcl # ptBegin: 始点
# indexPtEnd = -1+popV-(popV+popAcl) = -1 -popAcl # ptEnd : 終点
if velocityVectorBegin is None or velocityVectorEnd is None:
return None
else:
velocityVectorBeginNp = numpy.array(velocityVectorBegin)
velocityVectorEndNp = numpy.array(velocityVectorEnd)
# 移動ベクトル ΔVelocityVector = velocityVectorEnd - velocityVectorBegin
deltaVelocityVectorNp = velocityVectorEndNp - velocityVectorBeginNp
# 変化してなければNoneを返す
notChanged = (deltaVelocityVectorNp == numpy.array([0,0]))
if notChanged.all():
return None
# 移動していれば、速度ベクトル = 移動ベクトル * 係数 / 母数
else:
accelerationVectorNp = deltaVelocityVectorNp * coAcceleration / float(populationAcceleration)
accelerationVector = tuple(accelerationVectorNp)
return accelerationVector
def getAccelerationVector(positionHistory, population=2, numFramesDelay=0):
pop = int(population / 2) # 切り捨て
if len(positionHistory) < 1+2*pop+numFramesDelay \
or positionHistory[-1-numFramesDelay] is None \
or positionHistory[-1-pop-numFramesDelay] is None \
or positionHistory[-1-2*pop-numFramesDelay] is None:
return None
else:
# [-1-pop]から[-1-2*pop]のときの速度
velocity0 = getVelocityVector(positionHistory, pop, pop+numFramesDelay)
# [-1]から[-1-pop]のときの速度
velocity1 = getVelocityVector(positionHistory, pop, numFramesDelay)
if velocity0 is not None and velocity1 is not None:
printVector('v0', velocity0)
printVector('v1', velocity1)
v0np = numpy.array(velocity0)
v1np = numpy.array(velocity1)
dvnp = v1np - v0np # v1 - v0 = Δv
# 速度変化してなければNoneを返す
areSameVelocity_array = (dvnp == numpy.array([0,0]))
if areSameVelocity_array.all():
return None
else:
dvnp = dvnp * 10.0 / pop
vector = tuple(dvnp)
printVector('a ', vector)
return vector
def getAccelerationVectorStartStop(
positionHistory,
population=6,
numFramesDelay=3,
coForceVectorStrength=25.0):
### 静止判定
# v6 - v3 = Δv3 = a3
#
v6 = getVelocityVector(positionHistory, 3, 0+numFramesDelay)
v3 = getVelocityVector(positionHistory, 3, 3+numFramesDelay)
v6np = numpy.array([0,0]) if v6 is None else numpy.array(v6)
v3np = numpy.array([0,0]) if v3 is None else numpy.array(v3)
v6size = math.sqrt(v6np[0]**2 + v6np[1]**2)
v3size = math.sqrt(v3np[0]**2 + v3np[1]**2)
if 20 < math.fabs(v6size - v3size) and (v6size < 2.0 or v3size < 2.0):
# print '静止/急発進した ' + str(int(vSizeAfter - vSizeBefore))
a3np = (v6np - v3np) * coForceVectorStrength / 3
# 加速度が0ならNoneを返す
areSameVelocity_array = (a3np == numpy.array([0,0]))
if areSameVelocity_array.all():
return None
else:
vector = tuple(a3np)
return 'quickMotion', vector
else:
return 'usual'
def getAccelerationVectorFirFilter(
positionHistory,
population=6,
numFramesDelay=3,
coForceVectorStrength=25.0):
# populationVelocityは6
# v_6 - v_0 = Δv0 = a_0
v11 = getVelocityVector(positionHistory, 6, numFramesDelay)
v10 = getVelocityVector(positionHistory, 6, population+numFramesDelay)
if v11 is None or v10 is None:
pass
else:
v11np = numpy.array(v11)
v10np = numpy.array(v10)
anp = (v11np - v10np) * coForceVectorStrength / population
# 加速度が0ならNoneを返す
areSameVelocity_array = (anp == numpy.array([0,0]))
if areSameVelocity_array.all():
return None
else:
vector = tuple(anp)
return vector
def printVector(name, tuple):
tupleInt = (int(tuple[0]), int(tuple[1]))
# print name + ': ' + str(tupleInt)
def getAccelerationVectorVelocitySensitive(positionHistory):
# positionHistory[-6]とpositionHistory[-7]の
# あいだの距離が40ピクセル以上のときは母数2で加速度を求める
vVector = getVelocityVector(positionHistory, 1, 5)
if vVector is None:
pass
elif 40 < math.sqrt(vVector[0]**2 + vVector[1]**2):
# print '40 < v'
return getAccelerationVector(positionHistory, 6, 3)
else:
return getAccelerationVector(positionHistory, 12, 0)
def cvArrow(img, pt, vector, lengthTimes, color, thickness=1, lineType=8, shift=0):
if int(vector[0]) == 0 and int(vector[1]) == 0:
pass
else:
cvArrowBase(img, pt, vector, lengthTimes, WHITE, thickness+2, lineType=8, shift=0)
cvArrowBase(img, pt, vector, lengthTimes, color, thickness, lineType=8, shift=0)
def cvArrowBase(img, pt, vector, lengthTimes, color, thickness=1, lineType=8, shift=0):
"""
矢印を描画する
:param img: フレーム
:param pt: 起点(タプル)
:param vector: ベクトル(タプル)
:param lengthTimes: 矢印の長さの倍率
:param color: 色
:param thickness: 太さ
:param lineType: ?
:param shift: ?
:return:
"""
if int(vector[0]) == 0 and int(vector[1]) == 0:
pass
else:
pt1 = pt
pt2 = (int(pt1[0] + vector[0]*lengthTimes),
int(pt1[1] + vector[1]*lengthTimes))
cv2.line(img,pt1,pt2,color,thickness,lineType,shift)
vx = pt2[0] - pt1[0]
vy = pt2[1] - pt1[1]
v = math.sqrt(vx ** 2 + vy ** 2)
ux = vx / v
uy = vy / v
# 矢印の幅の部分
w = 5
h = 10
ptl = (int(pt2[0] - uy*w - ux*h), int(pt2[1] + ux*w - uy*h))
ptr = (int(pt2[0] + uy*w - ux*h), int(pt2[1] - ux*w - uy*h))
# 矢印の先端を描画する
cv2.line(img,pt2,ptl,color,thickness,lineType,shift)
cv2.line(img,pt2,ptr,color,thickness,lineType,shift)
def cvVerticalArrow(img, x, vector, lengthTimes, color, isSigned=False, thickness=1, lineType=8, shift=0):
vx, vy = vector
if isSigned:
verticalVector = (0, -vx)
baseY = img.shape[0] * 1 / 3 # 画面の下から1/3の高さ
else:
verticalVector = (0, -math.sqrt(vx ** 2 + vy ** 2))
baseY = img.shape[0] * 1 / 2 # 画面下端から20px上
cvArrow(img, (x, baseY), verticalVector,
lengthTimes, color, thickness, lineType, shift)
def cvLine(img, pt1, pt2, color, thickness=1):
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
cv2.line(img, pt1, pt2, color, thickness)
# TODO: cvLineから書き直して、ベクトル描画もそれに合わせて直すべき
def cvLine2(img, pt1, pt2, color, thickness=1):
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
cv2.line(img, pt1, pt2, WHITE, thickness+2)
cv2.line(img, pt1, pt2, color, thickness)
def cvLineGraph(img, x, pitchX, vector, nextVector, lengthTimes, color, isSigned=False, thickness=1, lineType=8, shift=0):
vx, vy = vector
nvx, nvy = nextVector
if isSigned:
verticalVector = (0, -vx)
nextVerticalVector = (0, -nvx)
baseY = img.shape[0] * 1 / 3 # 画面の下から1/3の高さ
else:
verticalVector = (0, -math.sqrt(vx ** 2 + vy ** 2))
nextVerticalVector = (0, -math.sqrt(nvx ** 2 + nvy ** 2))
baseY = img.shape[0] * 1 / 2 # 画面下端から20px上
cvLine(img, (x, baseY+verticalVector[1]*lengthTimes), (x+pitchX, baseY+nextVerticalVector[1]*lengthTimes),
color, thickness)
def cvXAxis(img, isSigned, thickness=1):
if isSigned:
baseY = img.shape[0] * 1 / 3 # 画面の下から1/3の高さ
else:
baseY = img.shape[0] * 1 / 2 # 画面下端から20px上
cvArrow(img, (0, baseY), (img.shape[1], 0), 1, BLACK, thickness)
#TODO: vector.x、vector.yで呼べるようにする(utilsの方も)
# 追跡中と検出中に呼ばれるのでメソッドにしている
def drawVelocityVectorsInStrobeMode(frameToDisplay, positionHistory,
numFramesDelay, numStrobeModeSkips,
velocityVectorsHistory,
color=BLUE, thickness=5):
for i in range(len(positionHistory) - numFramesDelay - 1):
if i % numStrobeModeSkips == 0 and \
velocityVectorsHistory[i] is not None:
cvArrow(
frameToDisplay,
positionHistory[i - numFramesDelay],
velocityVectorsHistory[i],
4, color, thickness
)
# if shouldDrawVelocityVectorsVerticallyInStrobeMode:
# cvVerticalArrow(
# frameToDisplay, spaceBetweenVerticalVectors*i,
# velocityVectorsHistory[i],
# 4, color, isSigned, thickness
# )
def drawVelocityVectorsVerticallyInStrobeMode(frameToDisplay, positionHistory,
velocityVectorsHistory, numFramesDelay,
numStrobeModeSkips, spaceBetweenVerticalVectors,
color=BLUE, thickness=5, isSigned=False, lengthTimes=5):
for i in range(len(positionHistory) - numFramesDelay - 1):
if i % numStrobeModeSkips == 0 and \
velocityVectorsHistory[i] is not None:
cvVerticalArrow(
frameToDisplay, spaceBetweenVerticalVectors*i/numStrobeModeSkips,
velocityVectorsHistory[i],
lengthTimes, color, isSigned, thickness
)
# 力ベクトルを描画する
def drawForceVector(img, aclVector, positionAclBegin, gravityStrength):
if aclVector is None:
aclVector = (0,0)
# 加速度ベクトル - 重力ベクトル = 力ベクトル
vector = (aclVector[0], aclVector[1] - gravityStrength)
if vector is not None:
cvArrow(img, positionAclBegin, vector, 1, BLUE, 5)
def getComponentVector(vector, axis):
if vector is None:
return None
elif axis is "x":
return (vector[0], 0) # x成分のみ使う
elif axis is "y":
return (0, vector[1]) # y成分のみ使う
else:
raise ValueError('axis is neither x nor y')
class fpsWithTick(object):
def __init__(self):
self._count = 0
self._oldCount = 0
self._freq = 1000 / cv2.getTickFrequency()
self._startTime = cv2.getTickCount()
def get(self):
nowTime = cv2.getTickCount()
diffTime = (nowTime - self._startTime) * self._freq
self._startTime = nowTime
fps = (self._count - self._oldCount) / (diffTime / 1000.0)
self._oldCount = self._count
self._count += 1
fpsRounded = round(fps, 1)
return fpsRounded
def getSubtractedFrame(frameFore, frameBackground, diffBgFg, iterations):
# まずカメラ画像をHSVに変換する
frameNowHsv = cv2.cvtColor(frameFore, cv2.COLOR_BGR2HSV)
# 保存しておいた画像もHSVに変換する
frameBackgroundHsv = cv2.cvtColor(frameBackground, cv2.COLOR_BGR2HSV)
# 変換した2つの画像をH,S,V各要素に分割する
frameFgH, frameFgS, frameFgV = cv2.split(frameNowHsv)
frameBgH, frameBgS, frameBgV = cv2.split(frameBackgroundHsv)
# 差分計算
diffH = cv2.absdiff(frameFgH, frameBgH)
diffS = cv2.absdiff(frameFgS, frameBgS)
diffV = cv2.absdiff(frameFgV, frameBgV)
# 差分が閾値より大きければTrue
maskH = diffBgFg < diffH
maskS = diffBgFg * 2 < diffS # 自動露出補正対策
maskV = diffBgFg < diffV
# 配列(画像)の高さ・幅
height = frameFgH.shape[0]
width = frameFgH.shape[1]
# 背景画像と同じサイズの配列生成
im_mask_h = numpy.zeros((height, width), numpy.uint8)
im_mask_s = numpy.zeros((height, width), numpy.uint8)
im_mask_v = numpy.zeros((height, width), numpy.uint8)
# Trueの部分(背景)は白塗り
im_mask_h[maskH] = 255
im_mask_s[maskS] = 255
im_mask_v[maskV] = 255
# 積集合(HSVのどれか1つでもdiffBgFgより大きい差があれば真)
im_mask = cv2.bitwise_or(im_mask_h, im_mask_s)
im_mask = cv2.bitwise_or(im_mask , im_mask_v)
# ノイズ除去
# 8近傍
element8 = numpy.array([[1,1,1],
[1,1,1],
[1,1,1]], numpy.uint8)
# cv2.morphologyEx(hTarget, cv2.MORPH_CLOSE, element8, hTarget, None, iterations)
cv2.morphologyEx(im_mask, cv2.MORPH_OPEN, element8, im_mask, None, iterations)
return cv2.bitwise_and(frameFore, frameFore, mask=im_mask)
def getBackProjectFrame(frame, roi_hist):
# HSV色空間に変換
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
# バックプロジェクションの計算
dst = cv2.calcBackProject([hsv],[0],roi_hist,[0,180],1)
# 8近傍
element8 = numpy.array([[1,1,1],
[1,1,1],
[1,1,1]], numpy.uint8)
# オープニング
cv2.morphologyEx(dst, cv2.MORPH_OPEN, element8, dst, None, 2)
return dst
def getMaskByHsv(src, hueMin, hueMax, valueMin, valueMax, gamma=96, sThreshold=5,
shouldProcessGaussianBlur=False, gaussianBlurKernelSize=5,
shouldProcessClosing=True, iterations=1):
_hueMin = hueMin / 2
_hueMax = hueMax / 2
src = cv2.cvtColor(src, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(src, numpy.array((
_hueMin, # H最小値
2 ** sThreshold - 1, # S最小値
valueMin # V最小値
)), numpy.array((
_hueMax, # H最大値
255, # S最大値
valueMax))) # V最大値
# 後処理する
if shouldProcessClosing:
# 8近傍
element8 = numpy.array([[1,1,1],
[1,1,1],
[1,1,1]], numpy.uint8)
# クロージング
# cv2.morphologyEx(hTarget, cv2.MORPH_CLOSE, element8, hTarget, None, iterations)
cv2.morphologyEx(mask, cv2.MORPH_OPEN, element8, mask, None, iterations)
# anchor – アンカー点.
# デフォルト値は(-1,-1) で、アンカーがカーネルの中心にあることを意味します
if shouldProcessGaussianBlur:
# ガウシアンフィルタを用いて画像の平滑化を行います.
# GaussianBlur(src, ksize, sigmaX[, dst[, sigmaY[, borderType]]]) -> dst
# ksize must pair of odd. (5,5),(7,7),(9,9)...
size = 2 * gaussianBlurKernelSize - 1
cv2.GaussianBlur(mask, (size,size), 0, mask)
return mask
def scan_color(frame, x, y, w, h):
"""
矩形内の色相Hue、明度Valueの最小値・最大値を求める
:param frame: カメラ画像
:param x: int 矩形左上の点のx座標
:param y: int 矩形左上の点のy座標
:param w: int 矩形の横幅
:param h: int 矩形の高さ
:return:
"""
hueArray = []
valueArray = []
frameHSV = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
for iy in range(x, x+w):
for ix in range(y, y+h):
dot = frameHSV[ix][iy]
# 色相
if dot[0] == 0:
pass
else:
dotHue = dot[0] * 2
hueArray.append(dotHue)
if 1000 < len(hueArray):
hueArray.pop()
# 明度
if dot[2] == 0:
pass
else:
dotValue = dot[2]
valueArray.append(dotValue)
if 1000 < len(valueArray):
valueArray.pop()
if 0 < len(hueArray):
hueMax = max(hueArray)
hueMin = min(hueArray)
else:
hueMin = -1
hueMax = -1
if 0 < len(valueArray):
valueMax = max(valueArray)
valueMin = min(valueArray)
else:
valueMin = -1
valueMax = -1
return hueMin, hueMax, valueMin, valueMax
def drawCalibrationTarget(frame, x, y, r):
height, width, numChannels = frame.shape
maskOfSquare = numpy.zeros((height, width), dtype=numpy.uint8)
maskOfCircle = numpy.zeros((height, width), dtype=numpy.uint8)
FILL = -1
cv2.rectangle(maskOfSquare, (x-2*r,y-2*r), (x+2*r,y+2*r), 255, FILL) # 白い正方形
cv2.circle (maskOfCircle, (x, y) , r , 255, FILL) # 白い円
maskOutOfCircle = 255 - maskOfCircle # 円の外側が白い
mask = 255 - cv2.bitwise_and(maskOfSquare, maskOutOfCircle) # 黒いターゲットマーク
frameOfRectangleWithoutCircle = numpy.zeros((height, width, 3), dtype=numpy.uint8)
cv2.merge((mask, mask, mask), frameOfRectangleWithoutCircle)
frame[:] = cv2.bitwise_and(frame, frameOfRectangleWithoutCircle)
def pasteRect(src, dst, frameToPaste, dstRect, interpolation = cv2.INTER_LINEAR):
"""
入力画像の部分矩形画像をリサイズして出力画像の部分矩形に貼り付ける
:param src: 入力画像
:type src: numpy.ndarray
:param dst: 出力画像
:type dst: numpy.ndarray
:param srcRect: (x, y, w, h)
:type srcRect: tuple
:param dstRect: (x, y, w, h)
:type dstRect: tuple
:param interpolation: 補完方法
:return: None
"""
height, width, _ = frameToPaste.shape
# x0, y0, w0, h0 = 0, 0, width, height
x1, y1, w1, h1 = dstRect
# コピー元の部分矩形画像をリサイズしてコピー先の部分矩形に貼り付ける
src[y1:y1+h1, x1:x1+w1] = \
cv2.resize(frameToPaste[0:height, 0:width], (w1, h1), interpolation = interpolation)
# Python: cv.Resize(src, dst, interpolation=CV_INTER_LINEAR) → None
# Parameters:
# src – input image.
# dst – output image; it has the size dsize (when it is non-zero) or
# the size computed from src.size(), fx, and fy; the type of dst is the same as of src.
# dsize –
# output image size; if it equals zero, it is computed as:
# dsize = Size(round(fx*src.cols), round(fy*src.rows))
# Either dsize or both fx and fy must be non-zero.
# fx –
# scale factor along the horizontal axis; when it equals 0, it is computed as
# (double)dsize.width/src.cols
# fy –
# scale factor along the vertical axis; when it equals 0, it is computed as
# (double)dsize.height/src.rows
# interpolation –
# interpolation method:
# INTER_NEAREST - a nearest-neighbor interpolation
# INTER_LINEAR - a bilinear interpolation (used by default)
# INTER_AREA - resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
# INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood
# INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood
dst[:] = src
| StarcoderdataPython |
4953137 | import ZSI
import ZSI.TCcompound
import ZSI.wstools.Namespaces as NS
from ZSI.schema import LocalElementDeclaration, ElementDeclaration, TypeDefinition, GTD, GED
from ZSI.generate.pyclass import pyclass_type
##############################
# targetNamespace
# http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd
##############################
class wsu_oasis:
targetNamespace = NS.OASIS.UTILITY
class tTimestampFault_Def(ZSI.TC.QName, TypeDefinition):
schema = NS.OASIS.UTILITY
type = (schema, "tTimestampFault")
def __init__(self, pname, **kw):
ZSI.TC.QName.__init__(self, pname, pyclass=None, **kw)
class Holder(str):
typecode = self
self.pyclass = Holder
class AttributedDateTime_Def(ZSI.TC.String, TypeDefinition):
# ComplexType/SimpleContent derivation of built-in type
schema = NS.OASIS.UTILITY
type = (schema, "AttributedDateTime")
def __init__(self, pname, **kw):
if getattr(self, "attribute_typecode_dict", None) is None: self.attribute_typecode_dict = {}
# attribute handling code
self.attribute_typecode_dict["Id"] = ZSI.TC.AnyType()
self.attribute_typecode_dict[(NS.SCHEMA.BASE,"anyAttribute")] = ZSI.TC.AnyElement()
ZSI.TC.String.__init__(self, pname, **kw)
class Holder(str):
__metaclass__ = pyclass_type
typecode = self
self.pyclass = Holder
class AttributedURI_Def(ZSI.TC.URI, TypeDefinition):
# ComplexType/SimpleContent derivation of built-in type
schema = NS.OASIS.UTILITY
type = (schema, "AttributedURI")
def __init__(self, pname, **kw):
if getattr(self, "attribute_typecode_dict", None) is None: self.attribute_typecode_dict = {}
# attribute handling code
self.attribute_typecode_dict["Id"] = ZSI.TC.AnyType()
self.attribute_typecode_dict[(NS.SCHEMA.BASE,"anyAttribute")] = ZSI.TC.AnyElement()
ZSI.TC.URI.__init__(self, pname, **kw)
class Holder(str):
__metaclass__ = pyclass_type
typecode = self
self.pyclass = Holder
class TimestampType_Def(ZSI.TCcompound.ComplexType, TypeDefinition):
schema = NS.OASIS.UTILITY
type = (schema, "TimestampType")
def __init__(self, pname, ofwhat=(), attributes=None, extend=False, restrict=False, **kw):
ns = wsu_oasis.TimestampType_Def.schema
TClist = [GED(NS.OASIS.UTILITY,"Created",lazy=False, isref=True)(minOccurs=0, maxOccurs=1, nillable=False, encoded=kw.get("encoded")), GED(NS.OASIS.UTILITY,"Expires",lazy=False, isref=True)(minOccurs=0, maxOccurs=1, nillable=False, encoded=kw.get("encoded")), ZSI.TC.AnyElement(aname="_any", minOccurs=0, maxOccurs="unbounded", nillable=False, processContents="lax")]
self.attribute_typecode_dict = attributes or {}
if extend: TClist += ofwhat
if restrict: TClist = ofwhat
else:
# attribute handling code
self.attribute_typecode_dict["Id"] = ZSI.TC.AnyType()
self.attribute_typecode_dict[(NS.SCHEMA.BASE,"anyAttribute")] = ZSI.TC.AnyElement()
ZSI.TCcompound.ComplexType.__init__(self, None, TClist, pname=pname, inorder=0, **kw)
class Holder:
__metaclass__ = pyclass_type
typecode = self
def __init__(self):
# pyclass
self._Created = None
self._Expires = None
self._any = []
return
Holder.__name__ = "TimestampType_Holder"
self.pyclass = Holder
class Timestamp_Dec(ElementDeclaration):
literal = "Timestamp"
schema = NS.OASIS.UTILITY
substitutionGroup = None
def __init__(self, **kw):
kw["pname"] = (NS.OASIS.UTILITY,"Timestamp")
kw["aname"] = "_Timestamp"
if wsu_oasis.TimestampType_Def not in wsu_oasis.Timestamp_Dec.__bases__:
bases = list(wsu_oasis.Timestamp_Dec.__bases__)
bases.insert(0, wsu_oasis.TimestampType_Def)
wsu_oasis.Timestamp_Dec.__bases__ = tuple(bases)
wsu_oasis.TimestampType_Def.__init__(self, **kw)
if self.pyclass is not None: self.pyclass.__name__ = "Timestamp_Dec_Holder"
class Expires_Dec(ElementDeclaration):
literal = "Expires"
schema = NS.OASIS.UTILITY
substitutionGroup = None
def __init__(self, **kw):
kw["pname"] = (NS.OASIS.UTILITY,"Expires")
kw["aname"] = "_Expires"
if wsu_oasis.AttributedDateTime_Def not in wsu_oasis.Expires_Dec.__bases__:
bases = list(wsu_oasis.Expires_Dec.__bases__)
bases.insert(0, wsu_oasis.AttributedDateTime_Def)
wsu_oasis.Expires_Dec.__bases__ = tuple(bases)
wsu_oasis.AttributedDateTime_Def.__init__(self, **kw)
if self.pyclass is not None: self.pyclass.__name__ = "Expires_Dec_Holder"
class Created_Dec(ElementDeclaration):
literal = "Created"
schema = NS.OASIS.UTILITY
substitutionGroup = None
def __init__(self, **kw):
kw["pname"] = (NS.OASIS.UTILITY,"Created")
kw["aname"] = "_Created"
if wsu_oasis.AttributedDateTime_Def not in wsu_oasis.Created_Dec.__bases__:
bases = list(wsu_oasis.Created_Dec.__bases__)
bases.insert(0, wsu_oasis.AttributedDateTime_Def)
wsu_oasis.Created_Dec.__bases__ = tuple(bases)
wsu_oasis.AttributedDateTime_Def.__init__(self, **kw)
if self.pyclass is not None: self.pyclass.__name__ = "Created_Dec_Holder"
# end class wsu_oasis (tns: http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd)
| StarcoderdataPython |
219998 | import math
import sys
import os
import time
import argparse
import pybullet as p
from .simulation import Simulation
def main():
parser = argparse.ArgumentParser(prog="onshape-to-robot-bullet")
parser.add_argument('-f', '--fixed', action='store_true')
parser.add_argument('-x', '--x', type=float, default=0)
parser.add_argument('-y', '--y', type=float, default=0)
parser.add_argument('-z', '--z', type=float, default=0)
parser.add_argument('directory')
args = parser.parse_args()
robotPath = args.directory
if not robotPath.endswith('.urdf'):
robotPath += '/robot.urdf'
sim = Simulation(robotPath, gui=True, panels=True, fixed=args.fixed)
pos, rpy = sim.getRobotPose()
_, orn = p.getBasePositionAndOrientation(sim.robot)
sim.setRobotPose([pos[0] + args.x, pos[1] + args.y, pos[2] + args.z], orn)
controls = {}
for name in sim.getJoints():
if name.endswith('_speed'):
controls[name] = p.addUserDebugParameter(
name, -math.pi*3, math.pi*3, 0)
else:
infos = sim.getJointsInfos(name)
low = -math.pi
high = math.pi
if 'lowerLimit' in infos:
low = infos['lowerLimit']
if 'upperLimit' in infos:
high = infos['upperLimit']
controls[name] = p.addUserDebugParameter(name, low, high, 0)
lastPrint = 0
while True:
targets = {}
for name in controls.keys():
targets[name] = p.readUserDebugParameter(controls[name])
sim.setJoints(targets)
if time.time() - lastPrint > 0.05:
lastPrint = time.time()
os.system("clear")
frames = sim.getFrames()
for frame in frames:
print(frame)
print("- x=%f\ty=%f\tz=%f" % frames[frame][0])
print("- r=%f\tp=%f\ty=%f" % frames[frame][1])
print("")
print("Center of mass:")
print(sim.getCenterOfMassPosition())
sim.tick()
if __name__ == "__main__":
main() | StarcoderdataPython |
12837228 | <gh_stars>1-10
import os
import struct
from JumpScale import j
import JumpScale.baselib.serializers
j.application.start("blowfishtest")
from random import randrange
msg = ""
for i in range(1000):
msg += chr(randrange(0, 256))
key = ""
for i in range(56):
key += chr(randrange(0, 256))
# b means blowfish
s = j.db.serializers.getSerializerType("b", key=key)
nr = 100000
j.base.timer.start()
for i in range(nr):
data = s.dumps(msg)
j.base.timer.stop(nr)
j.application.stop()
| StarcoderdataPython |
14683 | """
The container to store indexes in active learning.
Serve as the basic type of 'set' operation.
"""
# Authors: <NAME>
# License: BSD 3 clause
from __future__ import division
import collections
import copy
import numpy as np
from .multi_label_tools import check_index_multilabel, infer_label_size_multilabel, flattern_multilabel_index, \
integrate_multilabel_index
from ..utils.ace_warnings import *
from ..utils.interface import BaseCollection
from ..utils.misc import randperm
class IndexCollection(BaseCollection):
"""Index Collection.
Index Collection class is a basic data type of setting operation.
Multiple different type of element is supported for Active learning.
Also check the validity of given operation.
Note that:
1. The types of elements should be same
1. If multiple elements to update, it should be a list, numpy.ndarray or IndexCollection
object, otherwise, it will be cheated as one single element. (If single element
contains multiple values, take tuple as the type of element.)
Parameters
----------
data : list or np.ndarray or object, optional (default=None)
shape [n_element]. Element should be int or tuple.
The meaning of elements can be defined by users.
Some examples of elements:
(example_index, label_index) for instance-label pair query.
(example_index, feature_index) for feature query,
(example_index, example_index) for active clustering;
If int, it may be the index of an instance, for example.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> a = IndexCollection([1, 2, 3])
>>> a.update([4,5])
[1, 2, 3, 4, 5]
>>> a.difference_update([1,2])
[3, 4, 5]
"""
def __init__(self, data=None):
if data is None or len(data) == 0:
self._innercontainer = []
else:
if isinstance(data, IndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._element_type = data.get_elementType()
return
if not isinstance(data, (list, np.ndarray)):
data = [data]
self._innercontainer = list(np.unique([i for i in data], axis=0))
if len(self._innercontainer) != len(data):
warnings.warn("There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
datatype = collections.Counter([type(i) for i in self._innercontainer])
if len(datatype) != 1:
raise TypeError("Different types found in the given _indexes.")
tmp_data = self._innercontainer[0]
if isinstance(tmp_data, np.generic):
# self._element_type = type(np.asscalar(tmp_data)) # deprecated in numpy v1.16
self._element_type = type(tmp_data.item())
else:
self._element_type = type(tmp_data)
@property
def index(self):
"""
Get the index of data.
"""
return copy.deepcopy(self._innercontainer)
def __getitem__(self, item):
return self._innercontainer.__getitem__(item)
def get_elementType(self):
"""
Return the type of data.
"""
return self._element_type
def pop(self):
"""
Return the popped value. Raise KeyError if empty.
"""
return self._innercontainer.pop()
def add(self, value):
"""
Add element.
It will warn if the value to add is existent.
Parameters
----------
value: object
same type of the element already in the set.
Raise if unknown type is given.
Returns
-------
self: object
return self.
"""
if self._element_type is None:
self._element_type = type(value)
# check validation
if isinstance(value, np.generic):
# value = np.asscalar(value) # deprecated in numpy v1.16
value = value.item()
if not isinstance(value, self._element_type):
raise TypeError(
"A %s parameter is expected, but received: %s" % (str(self._element_type), str(type(value))))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.append(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent.
Parameters
----------
value: object
Value to discard.
Returns
-------
self: object
Return self.
"""
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.remove(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.discard(item)
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or IndexCollection should be given. Otherwise,
it will be cheated as an object.
Returns
-------
self: object
Return self.
"""
if not isinstance(other, (list, np.ndarray, IndexCollection)):
other = [other]
for item in other:
self.add(item)
return self
def random_sampling(self, rate=0.3):
"""Return a random sampled subset of this collection.
Parameters
----------
rate: float, optional (default=None)
The rate of sampling. Must be a number in [0,1].
Returns
-------
array: IndexCollection
The sampled index collection.
"""
assert (0 < rate < 1)
perm = randperm(len(self) - 1, round(rate * len(self)))
return IndexCollection([self.index[i] for i in perm])
class MultiLabelIndexCollection(IndexCollection):
"""Class for managing multi-label indexes.
This class stores indexes in multi-label. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all labels or
2 elements (example_index, [label_indexes]) to query specific labels.
Some examples of valid multi-label indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
label_size: int, optional (default=None)
The number of classes. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> multi_lab_ind1 = MultiLabelIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], label_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> multi_lab_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data=None, label_size=None):
if data is None or len(data) == 0:
self._innercontainer = set()
if label_size is None:
warnings.warn("This collection does not have a label_size value, set it manually or "
"it will raise when decomposing indexes.",
category=ValidityWarning)
self._label_size = label_size
else:
if isinstance(data, MultiLabelIndexCollection):
self._innercontainer = copy.deepcopy(data.index)
self._label_size = data._label_size
return
# check given indexes
data = check_index_multilabel(data)
if label_size is None:
self._label_size = infer_label_size_multilabel(data, check_arr=False)
else:
self._label_size = label_size
# decompose all label queries.
decomposed_data = flattern_multilabel_index(data, self._label_size, check_arr=False)
self._innercontainer = set(decomposed_data)
if len(self._innercontainer) != len(decomposed_data):
warnings.warn(
"There are %d same elements in the given data" % (len(data) - len(self._innercontainer)),
category=RepeatElementWarning,
stacklevel=3)
@property
def index(self):
"""
Get the index of data.
"""
return list(self._innercontainer)
def add(self, value):
"""Add element.
It will warn if the value to add is existent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
# check validation
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.update(value)
elif len(value) == 2:
if isinstance(value[1], collections.Iterable):
for item in value[1]:
if item >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(item), str(self._label_size)))
else:
if value[1] >= self._label_size:
raise ValueError("Index %s is out of bound %s" % (str(value[1]), str(self._label_size)))
else:
raise ValueError("A tuple with 1 or 2 elements is expected, but received: %s" % str(value))
if value in self._innercontainer:
warnings.warn("Adding element %s has already in the collection, skip." % (value.__str__()),
category=RepeatElementWarning,
stacklevel=3)
else:
self._innercontainer.add(value)
return self
def discard(self, value):
"""Remove an element.
It will warn if the value to discard is inexistent. Raise if
invalid type of value is given.
Parameters
----------
value: tuple
Index for adding. Raise if index is out of bound.
Returns
-------
self: object
return self.
"""
assert (isinstance(value, tuple))
if len(value) == 1:
value = [(value[0], i) for i in range(self._label_size)]
return self.difference_update(value)
if value not in self._innercontainer:
warnings.warn("Element %s to discard is not in the collection, skip." % (value.__str__()),
category=InexistentElementWarning,
stacklevel=3)
else:
self._innercontainer.discard(value)
return self
def difference_update(self, other):
"""Remove all elements of another array from this container.
Parameters
----------
other: object
Elements to discard. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.discard(j)
elif isinstance(other, tuple):
self.discard(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def update(self, other):
"""Update self with the union of itself and others.
Parameters
----------
other: object
Elements to add. Note that, if multiple indexes are contained,
a list, numpy.ndarray or MultiLabelIndexCollection should be given. Otherwise,
a tuple should be given.
Returns
-------
self: object
Return self.
"""
if isinstance(other, (list, np.ndarray, MultiLabelIndexCollection)):
label_ind = flattern_multilabel_index(other, self._label_size)
for j in label_ind:
self.add(j)
elif isinstance(other, tuple):
self.add(other)
else:
raise TypeError(
"A list or np.ndarray is expected if multiple indexes are "
"contained. Otherwise, a tuple should be provided")
return self
def get_onedim_index(self, order='C', ins_num=None):
"""Get the 1d index.
Parameters
----------
order : {'C', 'F'}, optional (default='C')
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
ins_num: int, optional
The total number of instance. Must be provided if the order is 'F'.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
if order == 'F':
if ins_num is None:
raise ValueError("The ins_num must be provided if the order is 'F'.")
return [tup[0] + tup[1] * ins_num for tup in self._innercontainer]
elif order == 'C':
return [tup[0] * self._label_size + tup[1] for tup in self._innercontainer]
else:
raise ValueError("The value of order must be one of {'C', 'F'}")
def get_instance_index(self):
"""Get the index of instances contained in this object.
If it is a labeled set, it is equivalent to the indexes of fully and partially labeled instances.
Returns
-------
partlab: list
The indexes of partially labeled instances.
"""
return np.unique([tp[0] for tp in self._innercontainer])
def _get_cond_instance(self, cond):
"""Return the indexes of instances according to the cond.
cond = 0: return the instances which are unbroken.
cond = 1: return the instances which have missing entries.
"""
tmp = integrate_multilabel_index(self.index, label_size=self._label_size, check_arr=False)
if cond == 0:
return [tp[0] for tp in tmp if len(tp) == 1]
else:
return [tp[0] for tp in tmp if len(tp) > 1]
def get_unbroken_instances(self):
"""Return the indexes of unbroken instances whose entries are all known."""
return self._get_cond_instance(cond=0)
def get_break_instances(self):
"""Return the indexes of break instances which have missing entries."""
return self._get_cond_instance(cond=1)
def get_matrix_mask(self, mat_shape, fill_value=1, sparse=True, sparse_format='lil_matrix'):
"""Return an array which has the same shape with the label matrix.
If an entry is known, then, the corresponding value in the mask is 1, otherwise, 0.
Parameters
----------
mat_shape: tuple
The shape of label matrix. [n_samples, n_classes]
fill_value: int
The value filled in the mask when the entry is in the container.
sparse: bool
Whether to return a sparse matrix or a dense matrix (numpy.ndarray).
sparse_format: str
The format of the returned sparse matrix. Only available if sparse==True
should be one onf [bsr_matrix, coo_matrix, csc_matrix, csr_matrix, dia_matrix, dok_matrix, lil_matrix].
Please refer to https://docs.scipy.org/doc/scipy-0.18.1/reference/sparse.html
for the definition of each sparse format.
Returns
-------
mask: {scipy.sparse.csr_matrix, scipy.sparse.csc_matrix}
The mask of the label matrix.
"""
assert isinstance(mat_shape, tuple)
if sparse:
try:
exec("from scipy.sparse import " + sparse_format)
except:
raise ValueError(
"sparse format " + sparse_format + "is not defined. Valid format should be one of "
"[bsr_matrix, coo_matrix, csc_matrix, csr_matrix, "
"dia_matrix, dok_matrix, lil_matrix].")
mask = eval(sparse_format + '(mat_shape)')
else:
if fill_value == 1:
mask = np.zeros(mat_shape, dtype=bool)
for item in self._innercontainer:
mask[item] = True
else:
mask = np.zeros(mat_shape)
for item in self._innercontainer:
mask[item] = fill_value
return mask
@classmethod
def construct_by_1d_array(cls, array, label_mat_shape, order='F'):
"""Construct a MultiLabelIndexCollection object by providing a
1d array, and the number of classes.
Parameters
----------
array: {list, np.ndarray}
An 1d array of indexes.
label_mat_shape: tuple of ints
The shape of label matrix. The 1st element is the number of instances,
and the 2nd element is the total classes.
order : {'C', 'F'}, optional
Determines whether the indices should be viewed as indexing in
row-major (C-style) or column-major (Matlab-style) order.
Returns
-------
multi_ind: MultiLabelIndexCollection
The MultiLabelIndexCollection object.
Examples
--------
>>> b = [1, 4, 11]
>>> mi = MultiLabelIndexCollection.construct_by_1d_array(array=b, label_mat_shape=(3, 4))
>>> print(mi)
{(1, 0), (2, 3), (1, 1)}
>>> print('col major:', mi.get_onedim_index(order='F', ins_num=3))
col major: [1, 11, 4]
>>> print('row major:', mi.get_onedim_index(order='C'))
row major: [4, 11, 5]
"""
assert len(label_mat_shape) == 2
row, col = np.unravel_index(array, dims=label_mat_shape, order=order)
return cls(data=[(row[i], col[i]) for i in range(len(row))], label_size=label_mat_shape[1])
@classmethod
def construct_by_element_mask(cls, mask):
"""Construct a MultiLabelIndexCollection object by providing a
2d array whose shape should be the same as the matrix shape.
Parameters
----------
mask: {list, np.ndarray}
The 2d mask matrix of elements.
There must be only 1 and 0 in the matrix, in which,
1 means the corresponding element is known, and will be
added to the MultiLabelIndexCollection container.
Otherwise, it will be cheated as an unknown element.
Examples
--------
>>> import numpy as np
>>> mask = np.asarray([
[0, 1],
[1, 0],
[1, 0]
]) # 3 rows, 2 lines
>>> mi = MultiLabelIndexCollection.construct_by_element_mask(mask=mask)
>>> print(mi)
{(0, 1), (2, 0), (1, 0)}
"""
mask = np.asarray(mask)
ue = np.unique(mask)
if not (len(mask.shape) == 2 and len(ue) == 2 and 0 in ue and 1 in ue):
raise ValueError("The mask matrix should be a 2d array, and there must be only "
"1 and 0 in the matrix, in which, 1 means the corresponding "
"element is known, and will be added to the MultiLabelIndexCollection container.")
nz_row, nz_col = np.nonzero(mask)
return cls(data=[(nz_row[i], nz_col[i]) for i in range(len(nz_row))], label_size=mask.shape[1])
class FeatureIndexCollection(MultiLabelIndexCollection):
"""Container to store the indexes in feature querying scenario.
This class stores indexes in incomplete feature matrix setting. Each element should be a tuple.
A single index should only have 1 element (example_index, ) to query all features or
2 elements (example_index, [feature_indexes]) to query specific features.
Some examples of valid indexes include:
queried_index = (1, [3,4])
queried_index = (1, [3])
queried_index = (1, 3)
queried_index = (1, (3))
queried_index = (1, (3,4))
queried_index = (1, ) # query all _labels
Several validity checking are implemented in this class.
Such as repeated elements, Index out of bound.
Parameters
----------
data : list or np.ndarray of a single tuple, optional (default=None)
shape [n_element]. All elements should be tuples.
feature_size: int, optional (default=None)
The number of features. If not provided, an infer is attempted, raise if fail.
Attributes
----------
index: list, shape (1, n_elements)
A list contains all elements in this container.
Examples
--------
>>> fea_ind1 = FeatureIndexCollection([(0, 1), (0, 2), (0, (3, 4)), (1, (0, 1))], feature_size=5)
{(0, 1), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update((0, 0))
{(0, 1), (0, 0), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(1, 2), (1, (3, 4))])
{(0, 1), (1, 2), (0, 0), (1, 3), (1, 4), (1, 1), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.update([(2,)])
{(0, 1), (1, 2), (0, 0), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (0, 4), (1, 0), (0, 2), (0, 3)}
>>> fea_ind1.difference_update([(0,)])
{(1, 2), (1, 3), (2, 2), (1, 4), (2, 1), (2, 0), (1, 1), (2, 3), (2, 4), (1, 0)}
"""
def __init__(self, data, feature_size=None):
try:
super(FeatureIndexCollection, self).__init__(data=data, label_size=feature_size)
except(Exception, ValueError):
raise Exception("The inference of feature_size is failed, please set a specific value.")
def map_whole_index_to_train(train_idx, index_in_whole):
"""Map the indexes from whole dataset to training set.
Parameters
----------
train_idx: {list, numpy.ndarray}
The training indexes.
index_in_whole: {IndexCollection, MultiLabelIndexCollection}
The indexes need to be mapped of the whole data.
Returns
-------
index_in_train: {IndexCollection, MultiLabelIndexCollection}
The mapped indexes.
Examples
--------
>>> train_idx = [231, 333, 423]
>>> index_in_whole = IndexCollection([333, 423])
>>> print(map_whole_index_to_train(train_idx, index_in_whole))
[1, 2]
"""
if isinstance(index_in_whole, MultiLabelIndexCollection):
ind_type = 2
elif isinstance(index_in_whole, IndexCollection):
ind_type = 1
else:
raise TypeError("index_in_whole must be one of {IndexCollection, MultiLabelIndexCollection} type.")
tr_ob = []
for entry in index_in_whole:
if ind_type == 2:
assert entry[0] in train_idx
ind_in_train = np.argwhere(train_idx == entry[0])[0][0]
tr_ob.append((ind_in_train, entry[1]))
else:
assert entry in train_idx
tr_ob.append(np.argwhere(train_idx == entry)[0][0])
if ind_type == 2:
return MultiLabelIndexCollection(tr_ob)
else:
return IndexCollection(tr_ob)
| StarcoderdataPython |
1736228 | """
Set up defaults and read sentinel.conf
"""
import sys
import os
from hatch_config import HatchConfig
default_sentinel_config = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../sentinel.conf')
)
sentinel_config_file = os.environ.get('SENTINEL_CONFIG', default_sentinel_config)
sentinel_cfg = HatchConfig.tokenize(sentinel_config_file)
sentinel_version = "1.4.0"
def get_hatch_conf():
if sys.platform == 'win32':
hatch_conf = os.path.join(os.getenv('APPDATA'), "HatchCore/hatch.conf")
else:
home = os.environ.get('HOME')
hatch_conf = os.path.join(home, ".hatchcore/hatch.conf")
if sys.platform == 'darwin':
hatch_conf = os.path.join(home, "Library/Application Support/HatchCore/hatch.conf")
hatch_conf = sentinel_cfg.get('hatch_conf', hatch_conf)
return hatch_conf
def get_network():
return sentinel_cfg.get('network', 'mainnet')
def get_rpchost():
return sentinel_cfg.get('rpchost', '127.0.0.1')
def sqlite_test_db_name(sqlite_file_path):
(root, ext) = os.path.splitext(sqlite_file_path)
test_sqlite_file_path = root + '_test' + ext
return test_sqlite_file_path
def get_db_conn():
import peewee
env = os.environ.get('SENTINEL_ENV', 'production')
# default values should be used unless you need a different config for development
db_host = sentinel_cfg.get('db_host', '127.0.0.1')
db_port = sentinel_cfg.get('db_port', None)
db_name = sentinel_cfg.get('db_name', 'sentinel')
db_user = sentinel_cfg.get('db_user', 'sentinel')
db_password = sentinel_cfg.get('db_password', '<PASSWORD>')
db_charset = sentinel_cfg.get('db_charset', 'utf8mb4')
db_driver = sentinel_cfg.get('db_driver', 'sqlite')
if (env == 'test'):
if db_driver == 'sqlite':
db_name = sqlite_test_db_name(db_name)
else:
db_name = "%s_test" % db_name
peewee_drivers = {
'mysql': peewee.MySQLDatabase,
'postgres': peewee.PostgresqlDatabase,
'sqlite': peewee.SqliteDatabase,
}
driver = peewee_drivers.get(db_driver)
dbpfn = 'passwd' if db_driver == 'mysql' else 'password'
db_conn = {
'host': db_host,
'user': db_user,
dbpfn: db_password,
}
if db_port:
db_conn['port'] = int(db_port)
if driver == peewee.SqliteDatabase:
db_conn = {}
db = driver(db_name, **db_conn)
return db
hatch_conf = get_hatch_conf()
network = get_network()
rpc_host = get_rpchost()
db = get_db_conn()
| StarcoderdataPython |
1830806 | import torch
import torchvision
from torch.utils.data import DataLoader, Subset
import pytorch_lightning as pl
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
import os, sys
from glob import glob
import cv2
from PIL import Image
sys.path.append('../')
from celeba.dataset import CelebaDataset
import albumentations as Augment
from albumentations.pytorch.transforms import ToTensor
def basic_transforms(img_height, img_width, image_pad=0):
return Augment.Compose([#Augment.ToGray(p=1.0),
Augment.Resize(img_height+image_pad, img_width+image_pad, interpolation=cv2.INTER_NEAREST, always_apply=True),
Augment.RandomCrop(img_height, img_width, always_apply=True),
Augment.HorizontalFlip(p=0.5),
Augment.RandomBrightnessContrast(p=1.0),
])#ToTensor()
def extra_transforms():
return Augment.Compose([Augment.GaussNoise(p=0.75),
Augment.CoarseDropout(p=0.5),])
class CelebaDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size, image_size):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.image_size = image_size
'''
self.transform = transforms.Compose(
[
#transforms.ColorJitter(brightness=0.1, contrast=0.1, saturation=0.1, hue=0.1),
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
#transforms.RandomCrop(image_size),
#transforms.Grayscale(),
transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
transforms.ToTensor(),
]
)
'''
#def prepare_data():
#download, unzip here. anything that should not be done distributed
def setup(self, stage=None):
if stage == 'fit' or stage is None:
self.data_train = CelebaDataset(os.path.join(self.data_dir,'train'),
transform=basic_transforms(img_height=self.image_size,
img_width=self.image_size,
image_pad=0),
)#noise_transform=extra_transforms())
self.data_val = CelebaDataset(os.path.join(self.data_dir,'val'),
transform=basic_transforms(self.image_size,self.image_size))
#self.data_train = CelebaDataset(os.path.join(self.data_dir,'train'), transform=self.transform)
#self.data_val = CelebaDataset(os.path.join(self.data_dir,'val'), transform=self.transform)
def train_dataloader(self):
return DataLoader(self.data_train, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.data_val, batch_size=self.batch_size, shuffle=False)
if __name__ == '__main__':
dm = CelebaDataModule(data_dir='/home/markpp/datasets/celeba/',
batch_size=16,
image_size=64)
dm.setup()
# cleanup output dir
import os, shutil
output_root = "output/"
if os.path.exists(output_root):
shutil.rmtree(output_root)
os.makedirs(output_root)
sample_idx = 0
for batch_id, batch in enumerate(dm.val_dataloader()):
imgs = batch
for img in imgs:
print(img.shape)
img = img.mul(255).permute(1, 2, 0).byte().numpy()
output_dir = os.path.join(output_root,str(batch_id).zfill(6))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
filename = "id-{}.png".format(str(sample_idx).zfill(6))
cv2.imwrite(os.path.join(output_dir,filename),img)
sample_idx = sample_idx + 1
if batch_id > 1:
break
| StarcoderdataPython |
1713153 | <gh_stars>0
from __future__ import division
from builtins import range
from future.utils import with_metaclass
import numpy as np
from numpy import newaxis as na
import abc
import copy
from scipy.special import logsumexp
from pyhsmm.util.stats import sample_discrete
try:
from pyhsmm.util.cstats import sample_markov, count_transitions
except ImportError:
from pyhsmm.util.stats import sample_markov, count_transitions
from pyhsmm.util.general import rle
######################
# Mixins and bases #
######################
class _StatesBase(with_metaclass(abc.ABCMeta, object)):
def __init__(
self,
model,
T=None,
data=None,
stateseq=None,
generate=True,
initialize_from_prior=True,
fixed_stateseq=False,
):
self.model = model
self.T = T if T is not None else data.shape[0]
self.data = data
self.clear_caches()
self.fixed_stateseq = fixed_stateseq
if fixed_stateseq:
assert (
stateseq is not None
), "fixed_stateseq requires a stateseq to be supplied"
if stateseq is not None:
self.stateseq = np.array(stateseq, dtype=np.int32)
elif generate:
if data is not None and not initialize_from_prior:
self.resample()
else:
self.generate_states()
def copy_sample(self, newmodel):
new = copy.copy(self)
new.clear_caches() # saves space, though may recompute later for likelihoods
new.model = newmodel
new.stateseq = self.stateseq.copy()
return new
_kwargs = {} # used in subclasses for joblib stuff
### model properties
@property
def obs_distns(self):
return self.model.obs_distns
@property
def trans_matrix(self):
return self.model.trans_distn.trans_matrix
@property
def pi_0(self):
return self.model.init_state_distn.pi_0
@property
def num_states(self):
return self.model.num_states
### convenience properties
@property
def stateseq_norep(self):
return rle(self.stateseq)[0]
@property
def durations(self):
return rle(self.stateseq)[1]
### generation
@abc.abstractmethod
def generate_states(self):
pass
### messages and likelihoods
# some cached things depends on model parameters, so caches should be
# cleared when the model changes (e.g. when parameters are updated)
def clear_caches(self):
self._aBl = self._mf_aBl = None
self._normalizer = None
@property
def aBl(self):
if self._aBl is None:
data = self.data
aBl = self._aBl = np.empty((data.shape[0], self.num_states))
for idx, obs_distn in enumerate(self.obs_distns):
aBl[:, idx] = obs_distn.log_likelihood(data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.0
return self._aBl
@abc.abstractmethod
def log_likelihood(self):
pass
class _SeparateTransMixin(object):
def __init__(self, group_id, **kwargs):
assert not isinstance(group_id, np.ndarray)
self.group_id = group_id
self._kwargs = dict(self._kwargs, group_id=group_id)
super(_SeparateTransMixin, self).__init__(**kwargs)
# access these to be sure they're instantiated
self.trans_matrix
self.pi_0
@property
def trans_matrix(self):
return self.model.trans_distns[self.group_id].trans_matrix
@property
def pi_0(self):
return self.model.init_state_distns[self.group_id].pi_0
@property
def mf_trans_matrix(self):
return np.maximum(
self.model.trans_distns[self.group_id].exp_expected_log_trans_matrix, 1e-3
)
@property
def mf_pi_0(self):
return self.model.init_state_distns[
self.group_id
].exp_expected_log_init_state_distn
class _PossibleChangepointsMixin(object):
def __init__(self, model, data, changepoints=None, **kwargs):
changepoints = (
changepoints
if changepoints is not None
else [(t, t + 1) for t in range(data.shape[0])]
)
self.changepoints = changepoints
self.segmentstarts = np.array(
[start for start, stop in changepoints], dtype=np.int32
)
self.segmentlens = np.array(
[stop - start for start, stop in changepoints], dtype=np.int32
)
assert all(l > 0 for l in self.segmentlens)
assert sum(self.segmentlens) == data.shape[0]
assert (
self.changepoints[0][0] == 0 and self.changepoints[-1][-1] == data.shape[0]
)
self._kwargs = dict(self._kwargs, changepoints=changepoints)
super(_PossibleChangepointsMixin, self).__init__(
model, T=len(changepoints), data=data, **kwargs
)
def clear_caches(self):
self._aBBl = self._mf_aBBl = None
self._stateseq = None
super(_PossibleChangepointsMixin, self).clear_caches()
@property
def Tblock(self):
return len(self.changepoints)
@property
def Tfull(self):
return self.data.shape[0]
@property
def stateseq(self):
if self._stateseq is None:
self._stateseq = self.blockstateseq.repeat(self.segmentlens)
return self._stateseq
@stateseq.setter
def stateseq(self, stateseq):
assert len(stateseq) == self.Tblock or len(stateseq) == self.Tfull
if len(stateseq) == self.Tblock:
self.blockstateseq = stateseq
else:
self.blockstateseq = stateseq[self.segmentstarts]
self._stateseq = None
def _expected_states(self, *args, **kwargs):
expected_states = super(_PossibleChangepointsMixin, self)._expected_states(
*args, **kwargs
)
return expected_states.repeat(self.segmentlens, axis=0)
@property
def aBl(self):
if self._aBBl is None:
aBl = super(_PossibleChangepointsMixin, self).aBl
aBBl = self._aBBl = np.empty((self.Tblock, self.num_states))
for idx, (start, stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._aBBl
@property
def mf_aBl(self):
if self._mf_aBBl is None:
aBl = super(_PossibleChangepointsMixin, self).mf_aBl
aBBl = self._mf_aBBl = np.empty((self.Tblock, self.num_states))
for idx, (start, stop) in enumerate(self.changepoints):
aBBl[idx] = aBl[start:stop].sum(0)
return self._mf_aBBl
def plot(self, *args, **kwargs):
from matplotlib import pyplot as plt
super(_PossibleChangepointsMixin, self).plot(*args, **kwargs)
plt.xlim((0, self.Tfull))
# TODO do generate() and generate_states() actually work?
####################
# States classes #
####################
class HMMStatesPython(_StatesBase):
### generation
def generate_states(self):
T = self.T
nextstate_distn = self.pi_0
A = self.trans_matrix
stateseq = np.zeros(T, dtype=np.int32)
for idx in range(T):
stateseq[idx] = sample_discrete(nextstate_distn)
nextstate_distn = A[stateseq[idx]]
self.stateseq = stateseq
return stateseq
### message passing
def log_likelihood(self):
if self._normalizer is None:
self.messages_forwards_normalized() # NOTE: sets self._normalizer
return self._normalizer
def _messages_log(self, trans_matrix, init_state_distn, log_likelihoods):
alphal = self._messages_forwards_log(
trans_matrix, init_state_distn, log_likelihoods
)
betal = self._messages_backwards_log(trans_matrix, log_likelihoods)
return alphal, betal
def messages_log(self):
return self._messages_log(self.trans_matrix, self.pi_0, self.aBl)
@staticmethod
def _messages_backwards_log(trans_matrix, log_likelihoods):
errs = np.seterr(over="ignore")
Al = np.log(trans_matrix)
aBl = log_likelihoods
betal = np.zeros_like(aBl)
for t in range(betal.shape[0] - 2, -1, -1):
betal[t] = logsumexp(Al + betal[t + 1] + aBl[t + 1], axis=1)
np.seterr(**errs)
return betal
def messages_backwards_log(self):
betal = self._messages_backwards_log(self.trans_matrix, self.aBl)
assert not np.isnan(betal).any()
self._normalizer = logsumexp(np.log(self.pi_0) + betal[0] + self.aBl[0])
return betal
@staticmethod
def _messages_forwards_log(trans_matrix, init_state_distn, log_likelihoods):
errs = np.seterr(over="ignore")
Al = np.log(trans_matrix)
aBl = log_likelihoods
alphal = np.zeros_like(aBl)
alphal[0] = np.log(init_state_distn) + aBl[0]
for t in range(alphal.shape[0] - 1):
alphal[t + 1] = logsumexp(alphal[t] + Al.T, axis=1) + aBl[t + 1]
np.seterr(**errs)
return alphal
def messages_forwards_log(self):
alphal = self._messages_forwards_log(self.trans_matrix, self.pi_0, self.aBl)
assert not np.any(np.isnan(alphal))
self._normalizer = logsumexp(alphal[-1])
return alphal
@staticmethod
def _messages_backwards_normalized(trans_matrix, init_state_distn, log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
betan = np.empty_like(aBl)
logtot = 0.0
betan[-1] = 1.0
for t in range(T - 2, -1, -1):
cmax = aBl[t + 1].max()
betan[t] = A.dot(betan[t + 1] * np.exp(aBl[t + 1] - cmax))
norm = betan[t].sum()
logtot += cmax + np.log(norm)
betan[t] /= norm
cmax = aBl[0].max()
logtot += cmax + np.log(
(np.exp(aBl[0] - cmax) * init_state_distn * betan[0]).sum()
)
return betan, logtot
def messages_backwards_normalized(self):
betan, self._normalizer = self._messages_backwards_normalized(
self.trans_matrix, self.pi_0, self.aBl
)
return betan
@staticmethod
def _messages_forwards_normalized(trans_matrix, init_state_distn, log_likelihoods):
aBl = log_likelihoods
A = trans_matrix
T = aBl.shape[0]
alphan = np.empty_like(aBl)
logtot = 0.0
in_potential = init_state_distn
for t in range(T):
cmax = aBl[t].max()
alphan[t] = in_potential * np.exp(aBl[t] - cmax)
norm = alphan[t].sum()
if norm != 0:
alphan[t] /= norm
logtot += np.log(norm) + cmax
else:
alphan[t:] = 0.0
return alphan, -np.inf
in_potential = alphan[t].dot(A)
return alphan, logtot
def messages_forwards_normalized(self):
alphan, self._normalizer = self._messages_forwards_normalized(
self.trans_matrix, self.pi_0, self.aBl
)
return alphan
### Gibbs sampling
def resample_log(self):
betal = self.messages_backwards_log()
self.sample_forwards_log(betal)
def resample_normalized(self):
alphan = self.messages_forwards_normalized()
self.sample_backwards_normalized(alphan)
def resample(self):
if not self.fixed_stateseq:
return self.resample_normalized()
@staticmethod
def _sample_forwards_log(betal, trans_matrix, init_state_distn, log_likelihoods):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T, dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in range(T):
logdomain = betal[idx] + aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
if np.any(np.isfinite(logdomain)):
stateseq[idx] = sample_discrete(
nextstate_unsmoothed * np.exp(logdomain - np.amax(logdomain))
)
else:
stateseq[idx] = sample_discrete(nextstate_unsmoothed)
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_log(self, betal):
self.stateseq = self._sample_forwards_log(
betal, self.trans_matrix, self.pi_0, self.aBl
)
@staticmethod
def _sample_forwards_normalized(
betan, trans_matrix, init_state_distn, log_likelihoods
):
A = trans_matrix
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T, dtype=np.int32)
nextstate_unsmoothed = init_state_distn
for idx in range(T):
logdomain = aBl[idx]
logdomain[nextstate_unsmoothed == 0] = -np.inf
stateseq[idx] = sample_discrete(
nextstate_unsmoothed * betan * np.exp(logdomain - np.amax(logdomain))
)
nextstate_unsmoothed = A[stateseq[idx]]
return stateseq
def sample_forwards_normalized(self, betan):
self.stateseq = self._sample_forwards_normalized(
betan, self.trans_matrix, self.pi_0, self.aBl
)
@staticmethod
def _sample_backwards_normalized(alphan, trans_matrix_transpose):
AT = trans_matrix_transpose
T = alphan.shape[0]
stateseq = np.empty(T, dtype=np.int32)
next_potential = np.ones(AT.shape[0])
for t in range(T - 1, -1, -1):
stateseq[t] = sample_discrete(next_potential * alphan[t])
next_potential = AT[stateseq[t]]
return stateseq
def sample_backwards_normalized(self, alphan):
self.stateseq = self._sample_backwards_normalized(
alphan, np.swapaxes(self.trans_matrix, -1, -2).copy()
)
### Mean Field
@property
def mf_aBl(self):
if self._mf_aBl is None:
T = self.data.shape[0]
self._mf_aBl = aBl = np.empty((T, self.num_states))
for idx, o in enumerate(self.obs_distns):
aBl[:, idx] = o.expected_log_likelihood(self.data).ravel()
aBl[np.isnan(aBl).any(1)] = 0.0
return self._mf_aBl
@property
def mf_trans_matrix(self):
return self.model.trans_distn.exp_expected_log_trans_matrix
@property
def mf_pi_0(self):
return self.model.init_state_distn.exp_expected_log_init_state_distn
@property
def all_expected_stats(self):
return self.expected_states, self.expected_transcounts, self._normalizer
@all_expected_stats.setter
def all_expected_stats(self, vals):
self.expected_states, self.expected_transcounts, self._normalizer = vals
self.stateseq = self.expected_states.argmax(1).astype("int32") # for plotting
def meanfieldupdate(self):
self.clear_caches()
self.all_expected_stats = self._expected_statistics(
self.mf_trans_matrix, self.mf_pi_0, self.mf_aBl
)
self._mf_param_snapshot = (
np.log(self.mf_trans_matrix),
np.log(self.mf_pi_0),
self.mf_aBl,
self._normalizer,
)
def _init_mf_from_gibbs(self):
expected_states = np.eye(self.num_states)[self.stateseq]
expected_transcounts = count_transitions(self.stateseq, self.num_states)
self.all_expected_stats = expected_states, expected_transcounts, -np.inf
def get_vlb(self, most_recently_updated=False):
if (
(self._normalizer is None)
or (self._mf_param_snapshot is None)
or not hasattr(self, "expected_states")
or not hasattr(self, "expected_transcounts")
):
self.meanfieldupdate()
# see https://github.com/mattjj/pyhsmm/issues/45#issuecomment-102721960
if most_recently_updated:
return self._normalizer
else:
# TODO TODO something wrong in here
_, _, new_normalizer = self._expected_statistics(
self.mf_trans_matrix, self.mf_pi_0, self.mf_aBl
)
new_params = np.log(self.mf_trans_matrix), np.log(self.mf_pi_0), self.mf_aBl
old_params, old_normalizer = (
self._mf_param_snapshot[:3],
self._mf_param_snapshot[-1],
)
E_stats = (
self.expected_transcounts,
self.expected_states[0],
self.expected_states,
)
linear_term = sum(
np.dot(np.ravel(a - b), np.ravel(c))
for a, b, c in zip(new_params, old_params, E_stats)
)
return linear_term - (new_normalizer - old_normalizer)
def _expected_statistics(
self, trans_potential, init_potential, likelihood_log_potential
):
if self.fixed_stateseq:
expected_states = np.zeros((self.T, self.num_states))
expected_states[np.arange(self.T), self.stateseq] = 1.0
expected_transcounts = np.zeros(
(self.T - 1, self.num_states, self.num_states)
)
expected_transcounts[
np.arange(self.T - 1), self.stateseq[:-1], self.stateseq[1:]
] = 1.0
normalizer = 0
else:
alphal = self._messages_forwards_log(
trans_potential, init_potential, likelihood_log_potential
)
betal = self._messages_backwards_log(
trans_potential, likelihood_log_potential
)
(
expected_states,
expected_transcounts,
normalizer,
) = self._expected_statistics_from_messages(
trans_potential, likelihood_log_potential, alphal, betal
)
assert not np.isinf(expected_states).any()
return expected_states, expected_transcounts, normalizer
@staticmethod
def _expected_statistics_from_messages(
trans_potential, likelihood_log_potential, alphal, betal
):
expected_states = alphal + betal
expected_states -= expected_states.max(1)[:, na]
np.exp(expected_states, out=expected_states)
expected_states /= expected_states.sum(1)[:, na]
Al = np.log(trans_potential)
log_joints = (
alphal[:-1, :, na]
+ (betal[1:, na, :] + likelihood_log_potential[1:, na, :])
+ Al[na, ...]
)
log_joints -= log_joints.max((1, 2))[:, na, na]
joints = np.exp(log_joints)
joints /= joints.sum((1, 2))[
:, na, na
] # NOTE: renormalizing each isnt really necessary
expected_transcounts = joints.sum(0)
normalizer = logsumexp(alphal[0] + betal[0])
return expected_states, expected_transcounts, normalizer
### EM
def E_step(self):
self.clear_caches()
self.all_expected_stats = self._expected_statistics(
self.trans_matrix, self.pi_0, self.aBl
)
### Viterbi
def Viterbi(self):
scores, args = self.maxsum_messages_backwards()
self.maximize_forwards(scores, args)
def maxsum_messages_backwards(self):
return self._maxsum_messages_backwards(self.trans_matrix, self.aBl)
def maximize_forwards(self, scores, args):
self.stateseq = self._maximize_forwards(scores, args, self.pi_0, self.aBl)
def mf_Viterbi(self):
scores, args = self.mf_maxsum_messages_backwards()
self.mf_maximize_forwards(scores, args)
def mf_maxsum_messages_backwards(self):
return self._maxsum_messages_backwards(self.mf_trans_matrix, self.mf_aBl)
def mf_maximize_forwards(self, scores, args):
self.stateseq = self._maximize_forwards(scores, args, self.mf_pi_0, self.mf_aBl)
@staticmethod
def _maxsum_messages_backwards(trans_matrix, log_likelihoods):
errs = np.seterr(divide="ignore")
Al = np.log(trans_matrix)
np.seterr(**errs)
aBl = log_likelihoods
scores = np.zeros_like(aBl)
args = np.zeros(aBl.shape, dtype=np.int32)
for t in range(scores.shape[0] - 2, -1, -1):
vals = Al + scores[t + 1] + aBl[t + 1]
vals.argmax(axis=1, out=args[t + 1])
vals.max(axis=1, out=scores[t])
return scores, args
@staticmethod
def _maximize_forwards(scores, args, init_state_distn, log_likelihoods):
aBl = log_likelihoods
T = aBl.shape[0]
stateseq = np.empty(T, dtype=np.int32)
stateseq[0] = (scores[0] + np.log(init_state_distn) + aBl[0]).argmax()
for idx in range(1, T):
stateseq[idx] = args[idx, stateseq[idx - 1]]
return stateseq
class HMMStatesEigen(HMMStatesPython):
def generate_states(self):
self.stateseq = sample_markov(
T=self.T, trans_matrix=self.trans_matrix, init_state_distn=self.pi_0
)
### common messages (Gibbs, EM, likelihood calculation)
@staticmethod
def _messages_backwards_log(trans_matrix, log_likelihoods):
from pyhsmm.internals.hmm_messages_interface import messages_backwards_log
return messages_backwards_log(
trans_matrix, log_likelihoods, np.empty_like(log_likelihoods)
)
@staticmethod
def _messages_forwards_log(trans_matrix, init_state_distn, log_likelihoods):
from pyhsmm.internals.hmm_messages_interface import messages_forwards_log
return messages_forwards_log(
trans_matrix,
log_likelihoods,
init_state_distn,
np.empty_like(log_likelihoods),
)
@staticmethod
def _messages_forwards_normalized(trans_matrix, init_state_distn, log_likelihoods):
from pyhsmm.internals.hmm_messages_interface import messages_forwards_normalized
return messages_forwards_normalized(
trans_matrix,
log_likelihoods,
init_state_distn,
np.empty_like(log_likelihoods),
)
# next three methods are just for convenient testing
def messages_backwards_log_python(self):
return super(HMMStatesEigen, self)._messages_backwards_log(
self.trans_matrix, self.aBl
)
def messages_forwards_log_python(self):
return super(HMMStatesEigen, self)._messages_forwards_log(
self.trans_matrix, self.pi_0, self.aBl
)
def messages_forwards_normalized_python(self):
return super(HMMStatesEigen, self)._messages_forwards_normalized(
self.trans_matrix, self.pi_0, self.aBl
)
### sampling
@staticmethod
def _sample_forwards_log(betal, trans_matrix, init_state_distn, log_likelihoods):
from pyhsmm.internals.hmm_messages_interface import sample_forwards_log
return sample_forwards_log(
trans_matrix,
log_likelihoods,
init_state_distn,
betal,
np.empty(log_likelihoods.shape[0], dtype="int32"),
)
@staticmethod
def _sample_backwards_normalized(alphan, trans_matrix_transpose):
from pyhsmm.internals.hmm_messages_interface import sample_backwards_normalized
return sample_backwards_normalized(
trans_matrix_transpose, alphan, np.empty(alphan.shape[0], dtype="int32")
)
@staticmethod
def _resample_multiple(states_list):
from pyhsmm.internals.hmm_messages_interface import resample_normalized_multiple
if len(states_list) > 0:
loglikes = resample_normalized_multiple(
states_list[0].trans_matrix,
states_list[0].pi_0,
[s.aBl for s in states_list],
[s.stateseq for s in states_list],
)
for s, loglike in zip(states_list, loglikes):
s._normalizer = loglike
### EM
@staticmethod
def _expected_statistics_from_messages(
trans_potential,
likelihood_log_potential,
alphal,
betal,
expected_states=None,
expected_transcounts=None,
):
from pyhsmm.internals.hmm_messages_interface import expected_statistics_log
expected_states = (
np.zeros_like(alphal) if expected_states is None else expected_states
)
expected_transcounts = (
np.zeros_like(trans_potential)
if expected_transcounts is None
else expected_transcounts
)
return expected_statistics_log(
np.log(trans_potential),
likelihood_log_potential,
alphal,
betal,
expected_states,
expected_transcounts,
)
### Vitberbi
def Viterbi(self):
from pyhsmm.internals.hmm_messages_interface import viterbi
self.stateseq = viterbi(
self.trans_matrix,
self.aBl,
self.pi_0,
np.empty(self.aBl.shape[0], dtype="int32"),
)
class HMMStatesEigenSeparateTrans(_SeparateTransMixin, HMMStatesEigen):
pass
class HMMStatesPossibleChangepoints(_PossibleChangepointsMixin, HMMStatesEigen):
pass
class HMMStatesPossibleChangepointsSeparateTrans(
_SeparateTransMixin, HMMStatesPossibleChangepoints
):
pass
| StarcoderdataPython |
1611903 | <reponame>Christian-B/my_spinnaker
from collections import namedtuple
import time
from typing import NamedTuple
class Foo(object):
__slots__ = ("alpha", "beta", "gamma")
def __init__(self, alpha, beta, gamma):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
Bar = namedtuple('Bar', ['alpha', 'beta', 'gamma'])
class Gamma(object):
__slots__ = ("_alpha", "_beta", "_gamma")
def __init__(self, alpha, beta, gamma):
self._alpha = alpha
self._beta = beta
self._gamma = gamma
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
class Epsilon(NamedTuple):
alpha:int
beta:int
gamma:int
class Bacon(object):
def __init__(self, alpha, beta, gamma):
self._alpha = alpha
self._beta = beta
self._gamma = gamma
@property
def alpha(self):
return self._alpha
@property
def beta(self):
return self._beta
@property
def gamma(self):
return self._gamma
class Eggs(object):
def __init__(self, alpha, beta, gamma):
self.alpha = alpha
self.beta = beta
self.gamma = gamma
loops = 1000000
start = time.time()
for i in range(loops):
foo = Foo(1,2,3)
a = foo.alpha
b = foo.beta
c = foo.gamma
end = time.time()
print("Foo", end-start)
start = time.time()
for i in range(loops):
bar = Bar(1,2,3)
a = bar.alpha
b = bar.beta
c= foo.gamma
end = time.time()
print("Bar", end-start)
start = time.time()
for i in range(loops):
gamma = Gamma(1,2,3)
a = gamma.alpha
g = gamma.beta
c = gamma.gamma
end = time.time()
print("Gamma", end-start)
start = time.time()
for i in range(loops):
epsilon = Epsilon(1,2,3)
a = epsilon.alpha
b = epsilon.beta
c = epsilon.gamma
end = time.time()
print("Epsilon", end-start)
boo = Epsilon("a","2","3")
print(boo)
start = time.time()
for i in range(loops):
bacon = Bacon(1,2,3)
a = bacon.alpha
b = bacon.beta
c = bacon.gamma
end = time.time()
print("Bacon", end-start)
start = time.time()
for i in range(loops):
eggs = Eggs(1,2,3)
a = eggs.alpha
b = eggs.beta
c = eggs.gamma
end = time.time()
print("Eggs", end-start)
| StarcoderdataPython |
1996103 | <filename>test/test_functions.py
import sys
from src.functions import multiply, add
def test_multiply():
assert multiply(1, 2) == 2
assert multiply(0, 1) == 0
def test_add():
assert add(1, 1) == 2
def test_python3():
if sys.version_info[0] == 3:
assert True
else:
assert True
| StarcoderdataPython |
9694799 | from dataclasses import dataclass
from time import time
@dataclass
class SearchParams:
'''Search Params for Ceo.ca intended to get spiels.'''
channel: str = '@newswire'
filter_terms: str = 'APHA'
filter_top: int = 100
load_more: str = 'top'
original_scroll_height: str = 0
# unix timestamp in millseconds
until: int = int(time() * 1000)
| StarcoderdataPython |
3314005 | """ rest subsystem's configuration
- config-file schema
- settings
"""
from typing import Dict
import trafaret as T
from aiohttp import web
from servicelib.application_keys import APP_CONFIG_KEY, APP_OPENAPI_SPECS_KEY
CONFIG_SECTION_NAME = "rest"
schema = T.Dict(
{T.Key("enabled", default=True, optional=True): T.Bool(), "version": T.Enum("v0"),}
)
def get_rest_config(app: web.Application) -> Dict:
return app[APP_CONFIG_KEY][CONFIG_SECTION_NAME]
__all__ = ["APP_OPENAPI_SPECS_KEY"]
| StarcoderdataPython |
5098780 | <reponame>imagect/imagect
from zope.interface import implementer
from collections import defaultdict
from imagect.api.opener import IOpener
import imagect.api.dataset as ds
import numpy as np
@implementer(IOpener)
class Opener(object) :
pass
# add to menu
from imagect.api.actmgr import addActFun, renameAct
@addActFun("file.open.sample", "Sample", index =1, shortcut="F12")
def newimage() :
sample = ds.DataSet.fromSample("chessboard").astype(np.float32)
ds.get().add(sample)
import imagect.api.viewmgr as vm
sm = vm.get()
if sm :
sm.insertVolSession(sample)
| StarcoderdataPython |
12842114 | <gh_stars>0
from dataclasses import dataclass
from typing import Any, Callable, Generic, Iterable, Optional, TypeVar
from .strings import StringPosition, StringPositions, to_string
__all__ = [
'Match',
'Matcher',
'MatchOption',
]
T = TypeVar('T')
@dataclass(frozen=True)
class MatchOption(Generic[T]):
data: T
combining: list[list[str]]
@dataclass(frozen=True)
class Match(Generic[T]):
length: int
options: list[MatchOption[T]]
@property
def primary_option(self) -> MatchOption[T]:
return self.options[0]
class Matcher(Generic[T]):
_max_length: int
_mapping: dict[str, list[tuple[StringPositions, T]]]
_option_sorting_key: Callable[[MatchOption[T]], Any]
def __init__(self, data: Iterable[tuple[StringPositions, T]],
option_sorting_key: Callable[[MatchOption[T]], Any]) -> None:
self._max_length = 0
self._mapping = {}
for positions, value in data:
self._max_length = max(self._max_length, len(positions))
key = to_string(positions, combining=False)
if key not in self._mapping:
self._mapping[key] = []
self._mapping[key].append((positions, value))
self._option_sorting_key = option_sorting_key
@staticmethod
def _match_with_combining_single(given: StringPosition, required: StringPosition) -> Optional[list[str]]:
combining: list[str] = []
required_index = 0
for character in given:
if required_index < len(required) and required[required_index] == character:
required_index += 1
else:
combining.append(character)
return combining if required_index == len(required) else None
@staticmethod
def _match_with_combining(given: StringPositions, required: StringPositions) -> Optional[list[list[str]]]:
assert len(given) == len(required)
combining: list[list[str]] = []
for given_position, required_position in zip(given, required):
combining_single = Matcher._match_with_combining_single(given_position, required_position)
if combining_single is None:
return None
combining.append(combining_single)
return combining
def match(self, positions: StringPositions, start: int, length: Optional[int] = None) -> Optional[Match[T]]:
for match_length in range(min(self._max_length, len(positions) - start), 0, -1) if length is None else [length]:
given = positions[start:start + match_length]
if options := [MatchOption(data, combining)
for matched, data in self._mapping.get(to_string(given, combining=False), [])
if (combining := Matcher._match_with_combining(given, matched)) is not None]:
return Match(match_length, sorted(options, key=self._option_sorting_key))
return None
| StarcoderdataPython |
5112204 | <filename>stlearn/plotting/trajectory/local_plot.py
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import pandas as pd
import matplotlib
import numpy as np
import networkx as nx
from stlearn._compat import Literal
from typing import Optional, Union
from anndata import AnnData
import warnings
def local_plot(
adata: AnnData,
use_label: str = "louvain",
use_cluster: int = None,
reverse: bool = False,
cluster: int = 0,
data_alpha: float = 1.0,
arrow_alpha: float = 1.0,
branch_alpha: float = 0.2,
spot_size: Union[float, int] = 1,
show_color_bar: bool = True,
show_axis: bool = False,
show_plot: bool = True,
name: str = None,
dpi: int = 150,
output: str = None,
copy: bool = False,
) -> Optional[AnnData]:
"""\
Local spatial trajectory inference plot.
Parameters
----------
adata
Annotated data matrix.
use_label
Use label result of clustering method.
use_cluster
Choose a specific clusters that will display in the plot.
data_alpha
Opacity of the spot.
arrow_alpha
Opacity of the arrow.
branch_alpha
Opacity of the branch edge.
edge_alpha
Opacity of edge in PAGA graph in the tissue.
node_alpha
Opacity of node in PAGA graph in the tissue.
spot_size
Size of the spot.
show_color_bar
Show color bar or not.
show_axis
Show axis or not.
show_legend
Show legend or not.
dpi
Set dpi as the resolution for the plot.
copy
Return a copy instead of writing to adata.
Returns
-------
Nothing
"""
tmp = adata.obs[adata.obs[use_label] == str(use_cluster)]
ref_cluster = adata[list(tmp.index)]
# plt.rcParams['figure.dpi'] = dpi
# plt.rcParams['figure.figsize'] = 5, 5
fig = plt.figure(figsize=(5, 5))
ax = fig.gca(projection="3d")
centroids_ = []
classes_ = []
order_dict = {}
order = 0
for i in ref_cluster.obs["sub_cluster_labels"].unique():
if (
len(adata.obs[adata.obs["sub_cluster_labels"] == str(i)])
> adata.uns["threshold_spots"]
):
classes_.append(i)
centroid_dict = adata.uns["centroid_dict"]
centroid_dict = {int(key): centroid_dict[key] for key in centroid_dict}
centroids_.append(centroid_dict[int(i)])
order_dict[int(i)] = int(order)
order += 1
stdm = adata.uns["ST_distance_matrix"]
non_abs_dpt = adata.uns["nonabs_dpt_distance_matrix"]
for i in range(0, len(centroids_)):
if i == len(centroids_) - 1:
break
j = 0
while j <= (len(centroids_) - 2 - i):
j = j + 1
m = stdm[order_dict[int(classes_[i])], order_dict[int(classes_[i + j])]]
dpt_distance = non_abs_dpt[
order_dict[int(classes_[i])], order_dict[int(classes_[i + j])]
]
y = calculate_y(np.abs(m))
x = np.linspace(centroids_[i][0], centroids_[i + j][0], 1000)
z = np.linspace(centroids_[i][1], centroids_[i + j][1], 1000)
branch = ax.plot(
x, y, z, zorder=10, c="#333333", linewidth=1, alpha=branch_alpha
)
if reverse:
dpt_distance = -dpt_distance
if dpt_distance <= 0:
xyz = ([x[500], x[520]], [y[500], y[520]], [z[500], z[520]])
else:
xyz = ([x[520], x[500]], [y[520], y[500]], [z[520], z[500]])
arrow = Arrow3D(
xyz[0],
xyz[1],
xyz[2],
zorder=10,
mutation_scale=7,
lw=1,
arrowstyle="->",
color="r",
alpha=arrow_alpha,
)
ax.add_artist(arrow)
ax.text(
x[500],
y[500] - 0.15,
z[500],
np.round(np.abs(m), 3),
color="black",
size=5,
zorder=100,
)
sc = ax.scatter(
ref_cluster.obs["imagecol"],
0,
ref_cluster.obs["imagerow"],
c=ref_cluster.obs["dpt_pseudotime"],
s=spot_size,
cmap="viridis",
zorder=0,
alpha=data_alpha,
)
_ = ax.scatter(
adata.obs[adata.obs[use_label] != cluster]["imagecol"],
0,
adata.obs[adata.obs[use_label] != cluster]["imagerow"],
c="grey",
s=spot_size,
zorder=0,
alpha=0.1,
)
if show_color_bar:
cb = fig.colorbar(sc, cax=fig.add_axes([0.1, 0.3, 0.03, 0.5]))
cb.outline.set_visible(False)
ax.set_ylim([-1, 0])
ax.set_xlim([min(adata.obs["imagecol"]) - 10, max(adata.obs["imagecol"]) + 10])
ax.set_zlim([min(adata.obs["imagerow"]) - 10, max(adata.obs["imagerow"]) + 10])
if not show_axis:
# make the grid lines transparent
ax.xaxis._axinfo["grid"]["color"] = (1, 1, 1, 0)
ax.yaxis._axinfo["grid"]["color"] = (1, 1, 1, 0)
ax.zaxis._axinfo["grid"]["color"] = (1, 1, 1, 0)
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
ax.get_zaxis().set_ticks([])
ax.invert_zaxis()
ax.patch.set_visible(False)
if show_plot:
plt.show()
# plt.rcParams['figure.figsize'] = 6, 4
if output is not None:
if name is None:
print("The file name is not defined!")
name = use_label
fig.savefig(output + "/" + name, dpi=dpi, bbox_inches="tight", pad_inches=0)
def calculate_y(m):
import math
mu = 0
variance = 1
sigma = math.sqrt(variance)
y = np.linspace(mu - 0.5 * sigma, mu + 0.5 * sigma, 1000)
y = np.cos(np.absolute(y))
y = -(m) * (y - np.min(y)) / np.ptp(y)
return y
class Arrow3D(FancyArrowPatch):
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0, 0), (0, 0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0], ys[0]), (xs[1], ys[1]))
FancyArrowPatch.draw(self, renderer)
| StarcoderdataPython |
6669255 | <gh_stars>1-10
#!/usr/bin/python
# Copyright (c) 2018 <NAME>, MIT License
"""
Save question history for all players
"""
from save_user_hist import save_user_hist
from llama_slobber import get_qhist
if __name__ == "__main__":
save_user_hist(get_qhist, 'question_data')
| StarcoderdataPython |
238420 | '''Edid helpers test module'''
from pyedid import get_edid_from_xrandr_verbose
from .data import PART_OF_XRANDR_VERBOSE_OUTPUT
def test_edid_from_xrandr_verbose_bytes():
edids = get_edid_from_xrandr_verbose(PART_OF_XRANDR_VERBOSE_OUTPUT)
assert isinstance(edids, list)
assert len(edids) == 2
for edid in edids:
assert isinstance(edid, bytes)
# edid header
assert edid[:8] == b'\x00\xff\xff\xff\xff\xff\xff\x00'
def test_edid_from_xrandr_verbose_str():
edids = get_edid_from_xrandr_verbose(PART_OF_XRANDR_VERBOSE_OUTPUT.decode())
assert isinstance(edids, list)
assert len(edids) == 2
for edid in edids:
assert isinstance(edid, bytes)
# edid header
assert edid[:8] == b'\x00\xff\xff\xff\xff\xff\xff\x00'
def test_edid_from_xrandr_verbose_bad_input():
empty = get_edid_from_xrandr_verbose("foo\nbar\nsome not edid data")
assert isinstance(empty, list)
assert len(empty) == 0
| StarcoderdataPython |
6574081 | <filename>src/textscreen.py<gh_stars>0
# Copyright 2022 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module provides a class to display running text
"""
from functools import partial
import pygame
from . import settings
from . import resources
from .gamedisplay import GameDisplay
class TextScreen(GameDisplay):
"""
Display a text in screenfuls.
"""
def __init__(self, media, font, text):
"""
Initialize the TextScreen object, paginating
"""
super().__init__(media, font)
# the colours used
self.colours = {
"foreground": resources.get_colour("paginated text"),
"background": resources.get_colour("paginated background"),
"footer": resources.get_colour("paginated footer"),
"error": resources.get_colour("invalid control sequence")
}
self.layout = settings.paginate_layout
#self.font = self.game.paginated_font
# paginate the text
self.__paginate(text)
# start with the first page
self.current_page = 0
self.key_bindings.update({
pygame.K_UP: self.__previous_page,
pygame.K_PAGEUP: self.__previous_page,
pygame.K_BACKSPACE: self.__previous_page,
pygame.K_DOWN: self.__next_page,
pygame.K_PAGEDOWN: self.__next_page,
pygame.K_SPACE: self.__next_page,
pygame.K_HOME: self.__first_page,
pygame.K_END: self.__last_page,
pygame.K_q: partial(self.set_status, self.QUIT),
pygame.K_ESCAPE: partial(self.set_status, self.QUIT)
})
def __paginate(self, text):
"""
The text is given as string ccontaining control sequences. A
control sequence has the form @control@. Unknown control
sequences are replaced by three red question marks.
Currently, no control sequences are implemented.
Multiple empty lines are combined to a single empty line.
Additionally, the control characters line feed (\\n) and form feed
(\\f) are interpreted.
"""
# List of text pages. Each page itself is a pygame surface.
# Initially the list is empty.
self.pages = []
# do the pagination
for block in text.split("\f"):
current_page = self.__new_page()
for line in block.split("\n"):
index = 0
while index != len(line):
if line[index] == "@":
control_end = line.find("@", index+1)
if control_end == -1:
# unterminated control, the "@invalid"
# string cannot be generated by any (valid
# or invalid) control sequence, therefore
# it is guaranteed to be invalid
self.__render_control(current_page, "@invalid")
index = len(line)
else:
self.__render_control(current_page,
line[index+1:control_end])
index = control_end + 1
else:
right_index = line.find("@", index)
if right_index == -1:
right_index = len(line)
index = current_page.word_wrap(
line, index, right_index,
self.colours["foreground"])
if index != right_index:
self.pages.append(current_page)
current_page = self.__new_page()
# finally, move to a new line, unless at the beginning
# of a page
if not current_page.at_beginning():
current_page.line_feed()
# commit the last page, if not empty
if not current_page.at_beginning():
self.pages += [current_page]
def __new_page(self):
"""
create an empty page
"""
screen = self.media.get_screen()
textwidth = (screen.get_width()
- self.layout["border"]["right"]
- self.layout["border"]["left"])
textheight = (screen.get_height()
- self.layout["border"]["bottom"]
- self.layout["border"]["top"])
return TextArea((textwidth, textheight),
self.colours["background"],
self.font,
self.layout["line spacing"])
def __render_control(self, current_page, control_name):
"""
Render a control sequence
"""
if control_name[:4] == "img ":
item = resources.load_image(control_name[4:])
elif control_name == "title":
item = self.font.render(
settings.game_info["title"],
True,
self.colours["foreground"])
else:
# invalid control
item = self.font.render("???", True, self.colours["error"])
current_page.render(item)
def draw(self):
"""
Draw the current page
"""
screen = self.media.get_screen()
screen.fill(self.colours["background"])
screen.blit(self.pages[self.current_page],
(self.layout["border"]["left"],
self.layout["border"]["top"]))
text = self.font.render(
f"Page {self.current_page+1} of {len(self.pages)}. "
+ "Up/Down: turn page, Q: back to menu.",
True,
self.colours["footer"])
posx = self.layout["border"]["left"]
posy = (screen.get_height() - self.layout["border"]["bottom"] + 10)
screen.blit(text, (posx, posy))
pygame.display.flip()
def __previous_page(self):
"""
Move to the previous page
"""
if self.current_page > 0:
self.current_page -= 1
def __next_page(self):
"""
Move to the next page
"""
if self.current_page < len(self.pages) - 1:
self.current_page += 1
def __first_page(self):
"""
Move to the first page
"""
self.current_page = 0
def __last_page(self):
"""
Move to the last page
"""
self.current_page = len(self.pages) - 1
class TextArea(pygame.Surface):
"""
This class represents a rectanglular area that contains running
text.
"""
def __init__(self, size, bg_colour, font, linespacing):
width, height = size
super().__init__((width, height))
self.font = font
self.linespacing = linespacing
self.line_height = font.size("")[1]
self.fill(bg_colour)
self.hpos = 0
self.vpos = 0
# at the beginning of a page, don't do line feeds
self.no_feed = True
def line_feed(self):
"""
Goes to the next line. Returns whether that line fits.
"""
# ignore line feed requests where line feeds are unwanted
if self.no_feed:
return True
# after an empty line, don't add another one
if self.hpos == 0:
self.no_feed = True
self.hpos = 0
self.vpos += self.linespacing
return self.vpos + self.line_height < self.get_height()
def __get_item_width(self, item):
"""
Gives the width of an item, which is either a string or a
surface
"""
if isinstance(item, str):
return self.font.size(item)[0]
return item.get_width()
def __fits_in_line(self, item_width):
"""
Test whether an item of given width fits into the current line
"""
return self.hpos + item_width <= self.get_width()
def render(self, item, colour = None):
"""
Render an item, if possible. The item can be a string or a
surface. Returns whether the item was rendered. If it is a
string, the colour must be given.
If the item fits into the current line, it is appended to it.
Otherwise, if a new line fits in the text area, render the
item in the next line. Otherwise, give up.
"""
item_width = self.__get_item_width(item)
if not self.__fits_in_line(item_width):
line_fits = self.line_feed()
if not line_fits:
return False
if isinstance(item, str):
surface = self.font.render(item, True, colour)
else:
surface = item
self.blit(surface, (self.hpos, self.vpos))
self.hpos += item_width
# after rendering an item, a line feed should be honoured
self.no_feed = False
return True
def word_wrap(self, text, start, end, colour):
"""
Word wrap text starting from index start until index end, or
until the text area is full, whichever happens first.
Returns the index up to which the text was rendered.
"""
chunk_start = start
def fits(chunk_end):
return self.__fits_in_line(
self.__get_item_width(text[chunk_start:chunk_end]))
while chunk_start != end:
if fits(end):
result = self.render(text[chunk_start:end], colour)
if not result:
return chunk_start
return end
chunk_end = resources.bisect(chunk_start, end, fits)
last_space = text.rfind(" ", chunk_start, chunk_end)
if last_space != -1:
chunk_end = last_space
result = self.render(text[chunk_start:chunk_end], colour)
if not result:
return chunk_start
chunk_start = chunk_end
if last_space != -1:
# since the omitted space may mean the next word now
# fits, an explicit line feed is needed to prevent
# adding the next word to the current line without
# space
self.line_feed()
chunk_start += 1
return end
def at_beginning(self):
"""
Return whether we are still at the beginning of the page
"""
return self.hpos == 0 and self.vpos == 0
| StarcoderdataPython |
176961 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import operator
from absl.testing import absltest
from absl.testing import parameterized
from jax import tree_util
import jax.numpy as jnp
import tree_math as tm
from tree_math._src import test_util
import numpy as np
# pylint: disable=g-complex-comprehension
class VectorTest(test_util.TestCase):
def test_vector(self):
tree = {"a": 0, "b": jnp.array([1, 2], dtype=jnp.int32)}
vector = tm.Vector(tree)
self.assertEqual(vector.size, 3)
self.assertLen(vector, 3)
self.assertEqual(vector.shape, (3,))
self.assertEqual(vector.ndim, 1)
self.assertEqual(vector.dtype, jnp.int32)
self.assertEqual(repr(tm.Vector({"a": 1})),
"tree_math.Vector({'a': 1})")
self.assertTreeEqual(tree_util.tree_leaves(tree),
tree_util.tree_leaves(vector), check_dtypes=True)
vector2 = tree_util.tree_map(lambda x: x, vector)
self.assertTreeEqual(vector, vector2, check_dtypes=True)
@parameterized.named_parameters(*(
{"testcase_name": op.__name__, "op": op}
for op in [operator.pos, operator.neg, abs, operator.invert]
))
def test_unary_math(self, op):
tree = {"a": 1, "b": -jnp.array([2, 3])}
expected = tm.Vector(tree_util.tree_map(op, tree))
actual = op(tm.Vector(tree))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_arithmetic_with_scalar(self):
vector = tm.Vector({"a": 0, "b": jnp.array([1, 2])})
expected = tm.Vector({"a": 1, "b": jnp.array([2, 3])})
self.assertTreeEqual(vector + 1, expected, check_dtypes=True)
self.assertTreeEqual(1 + vector, expected, check_dtypes=True)
with self.assertRaisesRegex(
TypeError, "non-tree_math.VectorMixin argument is not a scalar",
):
vector + jnp.ones((3,)) # pylint: disable=expression-not-assigned
@parameterized.named_parameters(*(
{"testcase_name": op.__name__, "op": op}
for op in [
operator.add,
operator.sub,
operator.mul,
operator.truediv,
operator.floordiv,
operator.mod,
]
))
def test_binary_arithmetic(self, op):
rng = np.random.default_rng(0)
tree1 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
tree2 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
expected = tm.Vector(tree_util.tree_map(op, tree1, tree2))
actual = op(tm.Vector(tree1), tm.Vector(tree2))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_pow(self):
expected = tm.Vector({"a": 2 ** 3})
actual = tm.Vector({"a": 2}) ** tm.Vector({"a": 3})
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_divmod(self):
x, y = divmod(jnp.arange(5), 2)
expected = tm.Vector({"a": x}), tm.Vector({"a": y})
actual = divmod(tm.Vector({"a": jnp.arange(5)}), 2)
self.assertTreeEqual(actual, expected, check_dtypes=True)
x, y = divmod(5, jnp.arange(5))
expected = tm.Vector({"a": x}), tm.Vector({"a": y})
actual = divmod(5, tm.Vector({"a": jnp.arange(5)}))
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_matmul_scalars(self):
actual = tm.Vector(1.0) @ tm.Vector(2.0)
expected = 2.0
self.assertAllClose(actual, expected)
def test_matmul(self):
rng = np.random.default_rng(0)
tree1 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
tree2 = {"a": rng.standard_normal(dtype=np.float32),
"b": rng.standard_normal((2, 3), dtype=np.float32)}
expected = tree1["a"] * tree2["a"] + tree1["b"].ravel() @ tree2["b"].ravel()
vector1 = tm.Vector(tree1)
vector2 = tm.Vector(tree2)
actual = vector1 @ vector2
self.assertAllClose(actual, expected)
actual = vector1.dot(vector2)
self.assertAllClose(actual, expected)
with self.assertRaisesRegex(
TypeError, "matmul arguments must both be tree_math.VectorMixin objects",
):
vector1 @ jnp.ones((7,)) # pylint: disable=expression-not-assigned
# TODO(shoyer): test comparisons and bitwise ops
def test_conj(self):
vector = tm.Vector({"a": jnp.array([1, 1j])})
actual = vector.conj()
expected = tm.Vector({"a": jnp.array([1, -1j])})
self.assertTreeEqual(actual, expected, check_dtypes=True)
def test_real_imag(self):
vector = tm.Vector({"a": jnp.array([1, 1j])})
real_part = tm.Vector({"a": jnp.array([1.0, 0.0])})
imag_part = tm.Vector({"a": jnp.array([0.0, 1.0])})
self.assertTreeEqual(vector.real, real_part, check_dtypes=True)
self.assertTreeEqual(vector.imag, imag_part, check_dtypes=True)
def test_sum_mean_min_max(self):
vector = tm.Vector({"a": 1, "b": jnp.array([2, 3, 4])})
self.assertTreeEqual(vector.sum(), 10, check_dtypes=False)
self.assertTreeEqual(vector.min(), 1, check_dtypes=False)
self.assertTreeEqual(vector.max(), 4, check_dtypes=False)
def test_custom_class(self):
@tree_util.register_pytree_node_class
class CustomVector(tm.VectorMixin):
def __init__(self, a: int, b: float):
self.a = a
self.b = b
def tree_flatten(self):
return (self.a, self.b), None
@classmethod
def tree_unflatten(cls, _, args):
return cls(*args)
v1 = CustomVector(1, 2.0)
v2 = v1 + 3
self.assertTreeEqual(v2, CustomVector(4, 5.0), check_dtypes=True)
v3 = v2 + v1
self.assertTreeEqual(v3, CustomVector(5, 7.0), check_dtypes=True)
if __name__ == "__main__":
absltest.main()
| StarcoderdataPython |
191437 | <reponame>ThomasVieth/WCS-Remastered
"""
"""
## python imports
from configobj import ConfigObj
from glob import glob
from os.path import dirname, basename, isfile
## warcraft.package imports
from warcraft.item import Item
from warcraft.utility import classproperty
## __all__ declaration
modules = glob(dirname(__file__) + '/*.py')
__all__ = ["ini_items", "load_ini_items"] + list(basename(f)[:-3] for f in modules if isfile(f))
ini_items = None
load_ini_items = None ## TODO | StarcoderdataPython |
6493495 | <filename>advent_2020/password_philosophy.py<gh_stars>0
import dataclasses
from typing import List
def filter_eq_func(eq):
def filter(x):
return 1 if x == eq else 0
return filter
def get_character_count(string: str, char: str):
return sum(map(filter_eq_func(char), list(string)), 0)
@dataclasses.dataclass()
class Interval:
lower_bound: int
upper_bound: int
def contains(self, item: int) -> bool:
return self.lower_bound <= item <= self.upper_bound
@dataclasses.dataclass
class Requirement:
character: str
bounds: Interval
def check_satisfy(self, password: str) -> bool:
character_count = get_character_count(password, self.character)
return self.bounds.contains(character_count)
def check_strict_satisfy(self, password: str) -> bool:
try:
return (password[self.bounds.lower_bound - 1] == self.character) != (password[self.bounds.upper_bound - 1] == self.character)
except IndexError:
return False
@dataclasses.dataclass
class Entry:
password: str
requirement: Requirement
def check_valid(self) -> bool:
return self.requirement.check_satisfy(self.password)
def check_strict_valid(self) -> bool:
return self.requirement.check_strict_satisfy(self.password)
def password_philosophy(input_data_filepath: str) -> int:
entries = parse_input(input_data_filepath)
return sum(map(lambda entry: 1 if entry.check_valid() else 0, entries))
def password_philosophy_strict(input_data_filepath: str) -> int:
entries = parse_input(input_data_filepath)
return sum(map(lambda entry: 1 if entry.check_strict_valid() else 0, entries))
def parse_input(input_data_filepath: str) -> List[Entry]:
with open(input_data_filepath) as file:
return list(map(parse_entry, file.read().split("\n")))
def parse_entry(entry: str) -> Entry:
(requirement_str, password_str) = entry.split(":")
(interval_str, character_str) = requirement_str.split(" ")
(lower_bound_str, upper_bound_str) = interval_str.split("-")
password = <PASSWORD>()
return Entry(password=password, requirement=Requirement(bounds=Interval(int(lower_bound_str), int(upper_bound_str)),
character=character_str))
| StarcoderdataPython |
360862 | #把引入模块都放入静态区域!!!!!!!!!!!!!
#他们在的区域是堆中,因为服务一直没停,所以一直占用内存.正好是我们需要的效果!!!!!!!!!!!!!!!!!!!!
#发现会重复引入下面的库包,加一个引用计数.也不行,用locals加flag也不行!!!
"""这个文件是用于测试"""
## 如果修改了原来的模块,那么就del 然后再import?????好像还是不行.
#只能点击pycharm 里面的+号按钮,重新建立一个python console
##
if 'flag' not in locals():
flag=1
import os
GPUID = '0' ##调用GPU序号
os.environ["CUDA_VISIBLE_DEVICES"] = GPUID
import torch
from apphelper.image import xy_rotate_box, box_rotate, solve
import model
#注意目前只支持4个方向,我要做成8个方向的,只是图片预处理时候多算几个方向即可.4个感觉不够.
import cv2
import numpy as np
print("zhuangzaile moxing")
def plot_boxes1(img,boxes):
blue = (0, 0, 0) #18
tmp = np.copy(img)
for box in boxes:
cv2.rectangle(tmp, (int(box[0]),int(box[1])), (int(box[2]), int(box[3])), blue, 1) #19
out=Image.fromarray(tmp).convert('RGB')
out.save("保存的图片看box.jpg")
return Image.fromarray(tmp)
#下面是函数主体.
##
#发现目前还是文字框,沙没法识别出来,当2行比较近时候就不行!!!!!!!!!!!!!!!!!!!!!!
'''
沙
我
'''
#2019-09-15,11点39开始继续搞识别.
#讨论的,沙竖着写识别不出来.为什么? 发现沙字下面写什么字,多会导致沙没法识别.
#发现观察图片时候需要一个按照像素移动鼠标的功能. 这个好像是nms算法的缺陷.
#https://www.cnblogs.com/zf-blog/p/8532228.html
import time
from PIL import Image
import os,sys
p = 'tmp.jpg' #看这个tmp5图片的区域2019-09-15,14点39,解析是不是对的
img = cv2.imread(p)
#2019-09-15,20点38下面进行2值化,看会不会变好.担心如果环境色跟文字一样会不会有bug.
def depoint(img): #input: gray image #去燥方案.
pixdata = img
pixdata = cv2.cvtColor(pixdata, cv2.COLOR_BGR2GRAY) # 保证不改变代码其他位置
print(pixdata.shape)
w,h = pixdata.shape
for y in range(1,h-1):
for x in range(1,w-1): #锐化,去除边缘的像素,边缘的像素周围会有接近于0的点.
count = 0
if pixdata[x,y-1] > 245:
count = count + 1
if pixdata[x,y+1] > 245:
count = count + 1
if pixdata[x-1,y] > 245:
count = count + 1
if pixdata[x+1,y] > 245:
count = count + 1
if count > 2:
pixdata[x,y] = 255
#高斯2值化.
pixdata = cv2.adaptiveThreshold(pixdata, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, 3, 5)
#高斯滤波
pixdata = cv2.bilateralFilter(pixdata, 40, 75, 75)
pixdata = src_RGB = cv2.cvtColor(pixdata, cv2.COLOR_GRAY2BGR) # 保证不改变代码其他位置
pixdata = cv2.fastNlMeansDenoisingColored(pixdata, None, 10, 10, 7, 21)
cv2.imwrite('11111.png',pixdata)
print(pixdata.shape)
return pixdata
# img=depoint(img) #发现还是预处理做的不对!,不要做预处理!做完预处理学不到了.
Image.fromarray(img).save("23321321.png")#看看预处理之后的结果.
h,w = img.shape[:2]
timeTake = time.time()
#这些参数很玄学.最好能给出一个自动调节的方案.
TEXT_PROPOSALS_MIN_SCORE=0.050 #阿西吧,居然要写这么低的参数才行!!
alph=0.04
TEXT_LINE_NMS_THRESH=0.1
MAX_HORIZONTAL_GAP=50
bili=1.2
_,result1,angle1,scores1,tex_rec,newBox,boxForSingle,scoresForSingle,keepIndexForSingle\
,tp_groups,Allboxes1,Allscores1= model.model(img,
detectAngle=True,##是否进行文字方向检测
config=dict(MAX_HORIZONTAL_GAP=MAX_HORIZONTAL_GAP,##字符之间的最大间隔,用于文本行的合并
MIN_V_OVERLAPS=0.6,
MIN_SIZE_SIM=0.6,
TEXT_PROPOSALS_MIN_SCORE=TEXT_PROPOSALS_MIN_SCORE,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH = TEXT_LINE_NMS_THRESH,##文本行之间测iou值
),
leftAdjust=True,##对检测的文本行进行向左延伸
rightAdjust=True,##对检测的文本行进行向右延伸
alph=alph##对检测的文本行进行向右、左延伸的倍数
,bili=bili
)
print(result1)
_, result2, angle2, scores2,tex_rec,newBox2,boxForSingle2,scoresForSingle2,keepIndexForSingle2\
,tp_groups2,Allboxes,Allscores2= model.model(img ,
detectAngle=False, ##是否进行文字方向检测
config=dict(MAX_HORIZONTAL_GAP=MAX_HORIZONTAL_GAP, ##字符之间的最大间隔,用于文本行的合并
MIN_V_OVERLAPS=0.6,
MIN_SIZE_SIM=0.6,
TEXT_PROPOSALS_MIN_SCORE=TEXT_PROPOSALS_MIN_SCORE,
TEXT_PROPOSALS_NMS_THRESH=0.3,
TEXT_LINE_NMS_THRESH=TEXT_LINE_NMS_THRESH, ##文本行之间测iou值
#需要修改上面这个参数,来让行识别率提升.
#参数越大,iou大于阈值的才会扔掉.
#所以越大结果越多.
),
leftAdjust=True, ##对检测的文本行进行向左延伸
rightAdjust=True, ##对检测的文本行进行向右延伸
alph=alph, ##对检测的文本行进行向右、左延伸的倍数
bili=bili
)
# print(result2)
alldex=[]
for i in tp_groups:
alldex+=i
plot_boxes1(img, boxForSingle[alldex])
# [print(i)for i in scoresForSingle if i not in alldex]
# print(len(boxForSingle))
# print(len(alldex))
##
alldex=[]
for i in tp_groups:
alldex+=i
plot_boxes1(img, boxForSingle[alldex])
print(len(boxForSingle))
print(len(alldex))
##
# 画出结果:
print(tp_groups)
if scores1.sum()>=scores2.sum():
try:
plot_boxes1(img, boxForSingle[tp_groups[2]])
print('huatu')
plot_boxes1(img, boxForSingle)
# plot_boxes1(img, [[64,58,66,82]])
except:
# plot_boxes1(img, boxForSingle2[tp_groups2[0]])
plot_boxes1(img, boxForSingle2)
Image.fromarray(img).save("画框之前的图片.png")#看看预处理之后的结果.
# 用下面的结果研究一下汉子的得分问题!
#
# plot_boxes1(img, Allboxes[8:12]) #批量画框
# plot_boxes1(img, Allboxes)#从这行代码看出来其实yolo找到了汉子的框.只是没识别出来.
# plot_boxes1(img, boxForSingle2) #跑这行代码发现,是yolo算法的结果是对的,还是那个聚类的图算法不对!!!!!!!!!!
#奇怪.我画的应该包括沙,但是结果居然没有.
#发现了,是tp_groups 这个变量的算法有bug!!!!!!!!!!!!!
#不对,是框里面没有.
##
if scores1.sum()>scores2.sum():
out={}
out['picName']='tmp'
out['parser']=result1
out['angle']=angle1
##
out={}
out['picName']='tmp'
out['parser']=result2
out['angle2']=angle2
# In[ ]:
'''
的
沙
发
斯
蒂
芬
'''
#对于图片结果,用画图打开之后,移动鼠标会看到对应的坐标.
## 测试cv的横纵.
im=cv2.imread('tmp.png')
im=im[0:250,100:250] # 是h,w 第二列是横坐标,第一列是纵坐标.
cv2.imwrite('11111.png',im)
##
#下面测试图像预处理方法:
def depoint(): #input: gray image #去燥方案.
pixdata = cv2.imread('tmp.png',flags=0)
print(pixdata.shape)
w,h = pixdata.shape
for y in range(1,h-1):
for x in range(1,w-1): #锐化,去除边缘的像素,边缘的像素周围会有接近于0的点.
count = 0
if pixdata[x,y-1] > 245:
count = count + 1
if pixdata[x,y+1] > 245:
count = count + 1
if pixdata[x-1,y] > 245:
count = count + 1
if pixdata[x+1,y] > 245:
count = count + 1
if count > 2:
pixdata[x,y] = 255
pixdata=src_RGB = cv2.cvtColor(pixdata, cv2.COLOR_GRAY2BGR)#保证不改变代码其他位置
pixdata = cv2.fastNlMeansDenoisingColored(pixdata, None, 10, 10, 7, 21)
cv2.imwrite('11111.png',pixdata)
print(pixdata.shape)
return pixdata
depoint()
#https://www.jianshu.com/p/921c1da740b5
##
| StarcoderdataPython |
5147525 | import numpy as np
import tensorflow as tf
from numpy import random as rnd
from numpy import testing as np_testing
import pymanopt
from pymanopt.manifolds import Product, Sphere, Stiefel
from pymanopt.solvers import TrustRegions
from ._test import TestCase
class TestProblem(TestCase):
def setUp(self):
self.n = 15
self.man = Sphere(self.n)
@pymanopt.function.TensorFlow(self.man)
def cost(X):
return tf.exp(tf.reduce_sum(X ** 2))
self.cost = cost
def test_prepare(self):
problem = pymanopt.Problem(self.man, self.cost)
x = rnd.randn(self.n)
np_testing.assert_allclose(
2 * x * np.exp(np.sum(x ** 2)), problem.egrad(x)
)
def test_attribute_override(self):
problem = pymanopt.Problem(self.man, self.cost)
with self.assertRaises(ValueError):
problem.verbosity = "0"
with self.assertRaises(ValueError):
problem.verbosity = -1
problem.verbosity = 2
with self.assertRaises(AttributeError):
problem.manifold = None
def test_vararg_cost_on_product(self):
shape = (3, 3)
manifold = Product([Stiefel(*shape)] * 2)
@pymanopt.function.TensorFlow(manifold)
def cost(*args):
X, Y = args
return tf.reduce_sum(X) + tf.reduce_sum(Y)
problem = pymanopt.Problem(manifold=manifold, cost=cost)
solver = TrustRegions(maxiter=1)
Xopt, Yopt = solver.solve(problem)
self.assertEqual(Xopt.shape, (3, 3))
self.assertEqual(Yopt.shape, (3, 3))
| StarcoderdataPython |
3203703 | #!/usr/bin/env python
from wand.image import Image
from wand.drawing import Drawing
from wand.color import Color
from wandplus.textutil import calcSuitableFontsize
# http://www.imagemagick.org/Usage/text/
# original imagemagick command:
# convert -background white -fill dodgerblue -font Candice \
# -strokewidth 2 -stroke blue -undercolor lightblue \
# -size 165x70 -gravity center label:Anthony label_color.gif
w = 165
h = 70
with Drawing() as draw:
text = 'Anthony'
draw.font = 'Candice'
draw.font_size = calcSuitableFontsize(draw, text, width=w, height=h)
with Image(width=w, height=h, background=Color('white')) as img:
draw.gravity = 'center'
draw.text_under_color = Color('lightblue')
draw.fill_color = Color('dodgerblue')
draw.stroke_color = Color('blue')
draw.stroke_width = 2
draw.text(0, 0, text)
draw(img)
img.save(filename='sample11.png')
| StarcoderdataPython |
5076484 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2016-12-17 23:54
from __future__ import unicode_literals
import c3nav.mapdata.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('mapdata', '0021_auto_20161217_2333'),
]
operations = [
migrations.CreateModel(
name='Escalator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.SlugField(unique=True, verbose_name='Name')),
('geometry', c3nav.mapdata.fields.GeometryField()),
('direction', models.BooleanField(choices=[(True, 'up'), (False, 'down')], verbose_name='direction')),
('level', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='escalators', to='mapdata.Level', verbose_name='level')),
('package', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='escalators', to='mapdata.Package', verbose_name='map package')),
],
options={
'verbose_name': 'Escalator',
'default_related_name': 'escalators',
'verbose_name_plural': 'Escalators',
},
),
]
| StarcoderdataPython |
3342062 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'WhereforeGUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(858, 595)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
self.centralwidget = QtWidgets.QWidget(MainWindow)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.centralwidget.sizePolicy().hasHeightForWidth())
self.centralwidget.setSizePolicy(sizePolicy)
self.centralwidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.consumerBarLayout = QtWidgets.QHBoxLayout()
self.consumerBarLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.consumerBarLayout.setObjectName("consumerBarLayout")
self.brokerAddressLayout = QtWidgets.QHBoxLayout()
self.brokerAddressLayout.setObjectName("brokerAddressLayout")
self.brokerAddressLabel = QtWidgets.QLabel(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.brokerAddressLabel.sizePolicy().hasHeightForWidth())
self.brokerAddressLabel.setSizePolicy(sizePolicy)
self.brokerAddressLabel.setObjectName("brokerAddressLabel")
self.brokerAddressLayout.addWidget(self.brokerAddressLabel)
self.brokerAddressEdit = QtWidgets.QLineEdit(self.centralwidget)
self.brokerAddressEdit.setObjectName("brokerAddressEdit")
self.brokerAddressLayout.addWidget(self.brokerAddressEdit)
self.consumerBarLayout.addLayout(self.brokerAddressLayout)
self.line1_1 = QtWidgets.QFrame(self.centralwidget)
self.line1_1.setFrameShape(QtWidgets.QFrame.VLine)
self.line1_1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line1_1.setObjectName("line1_1")
self.consumerBarLayout.addWidget(self.line1_1)
self.startAtLabel = QtWidgets.QLabel(self.centralwidget)
self.startAtLabel.setObjectName("startAtLabel")
self.consumerBarLayout.addWidget(self.startAtLabel)
self.startAtSelector = QtWidgets.QComboBox(self.centralwidget)
self.startAtSelector.setObjectName("startAtSelector")
self.consumerBarLayout.addWidget(self.startAtSelector)
self.startOffsetEdit = QtWidgets.QLineEdit(self.centralwidget)
self.startOffsetEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.startOffsetEdit.setObjectName("startOffsetEdit")
self.consumerBarLayout.addWidget(self.startOffsetEdit)
self.startTimeEdit = QtWidgets.QDateTimeEdit(self.centralwidget)
self.startTimeEdit.setObjectName("startTimeEdit")
self.consumerBarLayout.addWidget(self.startTimeEdit)
self.line1_2 = QtWidgets.QFrame(self.centralwidget)
self.line1_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line1_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line1_2.setObjectName("line1_2")
self.consumerBarLayout.addWidget(self.line1_2)
self.endAtLabel = QtWidgets.QLabel(self.centralwidget)
self.endAtLabel.setObjectName("endAtLabel")
self.consumerBarLayout.addWidget(self.endAtLabel)
self.endAtSelector = QtWidgets.QComboBox(self.centralwidget)
self.endAtSelector.setObjectName("endAtSelector")
self.consumerBarLayout.addWidget(self.endAtSelector)
self.endOffsetEdit = QtWidgets.QLineEdit(self.centralwidget)
self.endOffsetEdit.setMaximumSize(QtCore.QSize(100, 16777215))
self.endOffsetEdit.setObjectName("endOffsetEdit")
self.consumerBarLayout.addWidget(self.endOffsetEdit)
self.endTimeEdit = QtWidgets.QDateTimeEdit(self.centralwidget)
self.endTimeEdit.setObjectName("endTimeEdit")
self.consumerBarLayout.addWidget(self.endTimeEdit)
self.verticalLayout_3.addLayout(self.consumerBarLayout)
self.line = QtWidgets.QFrame(self.centralwidget)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.verticalLayout_2 = QtWidgets.QVBoxLayout()
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.topicPartitionSourceTree = QtWidgets.QTreeView(self.centralwidget)
self.topicPartitionSourceTree.setObjectName("topicPartitionSourceTree")
self.verticalLayout_2.addWidget(self.topicPartitionSourceTree)
self.enableDisableLayout = QtWidgets.QHBoxLayout()
self.enableDisableLayout.setObjectName("enableDisableLayout")
self.enableAllButton = QtWidgets.QPushButton(self.centralwidget)
self.enableAllButton.setObjectName("enableAllButton")
self.enableDisableLayout.addWidget(self.enableAllButton)
self.disableAllButton = QtWidgets.QPushButton(self.centralwidget)
self.disableAllButton.setObjectName("disableAllButton")
self.enableDisableLayout.addWidget(self.disableAllButton)
self.enableDefaultComboBox = QtWidgets.QComboBox(self.centralwidget)
self.enableDefaultComboBox.setPlaceholderText("")
self.enableDefaultComboBox.setObjectName("enableDefaultComboBox")
self.enableDisableLayout.addWidget(self.enableDefaultComboBox)
self.verticalLayout_2.addLayout(self.enableDisableLayout)
self.horizontalLayout_2.addLayout(self.verticalLayout_2)
self.line_2_1 = QtWidgets.QFrame(self.centralwidget)
self.line_2_1.setFrameShape(QtWidgets.QFrame.VLine)
self.line_2_1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2_1.setObjectName("line_2_1")
self.horizontalLayout_2.addWidget(self.line_2_1)
self.verticalLayout_4 = QtWidgets.QVBoxLayout()
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.partitionInfoBox = QtWidgets.QGroupBox(self.centralwidget)
self.partitionInfoBox.setMinimumSize(QtCore.QSize(500, 0))
self.partitionInfoBox.setObjectName("partitionInfoBox")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.partitionInfoBox)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.partitionInfoLayout = QtWidgets.QHBoxLayout()
self.partitionInfoLayout.setSizeConstraint(QtWidgets.QLayout.SetDefaultConstraint)
self.partitionInfoLayout.setObjectName("partitionInfoLayout")
self.lowLayout = QtWidgets.QFormLayout()
self.lowLayout.setObjectName("lowLayout")
self.lowOffsetLabel = QtWidgets.QLabel(self.partitionInfoBox)
self.lowOffsetLabel.setObjectName("lowOffsetLabel")
self.lowLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.lowOffsetLabel)
self.lowOffsetValue = QtWidgets.QLabel(self.partitionInfoBox)
self.lowOffsetValue.setObjectName("lowOffsetValue")
self.lowLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lowOffsetValue)
self.partitionInfoLayout.addLayout(self.lowLayout)
self.line3_1 = QtWidgets.QFrame(self.partitionInfoBox)
self.line3_1.setFrameShape(QtWidgets.QFrame.VLine)
self.line3_1.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line3_1.setObjectName("line3_1")
self.partitionInfoLayout.addWidget(self.line3_1)
self.highLayout = QtWidgets.QFormLayout()
self.highLayout.setObjectName("highLayout")
self.highOffsetLabel = QtWidgets.QLabel(self.partitionInfoBox)
self.highOffsetLabel.setObjectName("highOffsetLabel")
self.highLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.highOffsetLabel)
self.highOffsetValue = QtWidgets.QLabel(self.partitionInfoBox)
self.highOffsetValue.setObjectName("highOffsetValue")
self.highLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.highOffsetValue)
self.partitionInfoLayout.addLayout(self.highLayout)
self.line3_2 = QtWidgets.QFrame(self.partitionInfoBox)
self.line3_2.setFrameShape(QtWidgets.QFrame.VLine)
self.line3_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line3_2.setObjectName("line3_2")
self.partitionInfoLayout.addWidget(self.line3_2)
self.lagLayout = QtWidgets.QFormLayout()
self.lagLayout.setObjectName("lagLayout")
self.lagLabel = QtWidgets.QLabel(self.partitionInfoBox)
self.lagLabel.setObjectName("lagLabel")
self.lagLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.lagLabel)
self.lagValue = QtWidgets.QLabel(self.partitionInfoBox)
self.lagValue.setObjectName("lagValue")
self.lagLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lagValue)
self.partitionInfoLayout.addLayout(self.lagLayout)
self.verticalLayout_5.addLayout(self.partitionInfoLayout)
self.verticalLayout_4.addWidget(self.partitionInfoBox)
self.sourceInfoBox = QtWidgets.QGroupBox(self.centralwidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.sourceInfoBox.sizePolicy().hasHeightForWidth())
self.sourceInfoBox.setSizePolicy(sizePolicy)
self.sourceInfoBox.setObjectName("sourceInfoBox")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.sourceInfoBox)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.verticalLayout = QtWidgets.QVBoxLayout()
self.verticalLayout.setObjectName("verticalLayout")
self.gridLayout = QtWidgets.QGridLayout()
self.gridLayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.gridLayout.setObjectName("gridLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.sourceNameLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.sourceNameLabel.setObjectName("sourceNameLabel")
self.horizontalLayout.addWidget(self.sourceNameLabel)
self.sourceNameValue = QtWidgets.QLabel(self.sourceInfoBox)
self.sourceNameValue.setObjectName("sourceNameValue")
self.horizontalLayout.addWidget(self.sourceNameValue)
self.gridLayout.addLayout(self.horizontalLayout, 0, 0, 1, 1)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.sourceTypeLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.sourceTypeLabel.setObjectName("sourceTypeLabel")
self.horizontalLayout_3.addWidget(self.sourceTypeLabel)
self.sourceTypeValue = QtWidgets.QLabel(self.sourceInfoBox)
self.sourceTypeValue.setObjectName("sourceTypeValue")
self.horizontalLayout_3.addWidget(self.sourceTypeValue)
self.gridLayout.addLayout(self.horizontalLayout_3, 0, 1, 1, 1)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout_6.addLayout(self.verticalLayout)
self.line_2 = QtWidgets.QFrame(self.sourceInfoBox)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout_6.addWidget(self.line_2)
self.otherLayout = QtWidgets.QFormLayout()
self.otherLayout.setObjectName("otherLayout")
self.firstOffsetLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.firstOffsetLabel.setObjectName("firstOffsetLabel")
self.otherLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstOffsetLabel)
self.firstOffsetValue = QtWidgets.QLabel(self.sourceInfoBox)
self.firstOffsetValue.setObjectName("firstOffsetValue")
self.otherLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstOffsetValue)
self.currentOffsetLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.currentOffsetLabel.setObjectName("currentOffsetLabel")
self.otherLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.currentOffsetLabel)
self.currentOffsetValue = QtWidgets.QLabel(self.sourceInfoBox)
self.currentOffsetValue.setObjectName("currentOffsetValue")
self.otherLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.currentOffsetValue)
self.receivedMessagesLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.receivedMessagesLabel.setObjectName("receivedMessagesLabel")
self.otherLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.receivedMessagesLabel)
self.receivedMessagesValue = QtWidgets.QLabel(self.sourceInfoBox)
self.receivedMessagesValue.setObjectName("receivedMessagesValue")
self.otherLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.receivedMessagesValue)
self.consumptionRateValue = QtWidgets.QLabel(self.sourceInfoBox)
self.consumptionRateValue.setObjectName("consumptionRateValue")
self.otherLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.consumptionRateValue)
self.consumptionRateLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.consumptionRateLabel.setObjectName("consumptionRateLabel")
self.otherLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.consumptionRateLabel)
self.messageRateLabel = QtWidgets.QLabel(self.sourceInfoBox)
self.messageRateLabel.setObjectName("messageRateLabel")
self.otherLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.messageRateLabel)
self.messageRateValue = QtWidgets.QLabel(self.sourceInfoBox)
self.messageRateValue.setObjectName("messageRateValue")
self.otherLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.messageRateValue)
self.verticalLayout_6.addLayout(self.otherLayout)
self.timestampsBox = QtWidgets.QGroupBox(self.sourceInfoBox)
self.timestampsBox.setObjectName("timestampsBox")
self.formLayout_2 = QtWidgets.QFormLayout(self.timestampsBox)
self.formLayout_2.setObjectName("formLayout_2")
self.firstMsgTimLabel = QtWidgets.QLabel(self.timestampsBox)
self.firstMsgTimLabel.setObjectName("firstMsgTimLabel")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.firstMsgTimLabel)
self.firstMsgTimeValue = QtWidgets.QLabel(self.timestampsBox)
self.firstMsgTimeValue.setObjectName("firstMsgTimeValue")
self.formLayout_2.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.firstMsgTimeValue)
self.lastMsgReceiveTimeLabel = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgReceiveTimeLabel.setObjectName("lastMsgReceiveTimeLabel")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.lastMsgReceiveTimeLabel)
self.lastMsgReceiveTimeValue = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgReceiveTimeValue.setObjectName("lastMsgReceiveTimeValue")
self.formLayout_2.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lastMsgReceiveTimeValue)
self.lastMsgTimeLabel = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgTimeLabel.setObjectName("lastMsgTimeLabel")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.lastMsgTimeLabel)
self.lastMsgTimeValue = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgTimeValue.setObjectName("lastMsgTimeValue")
self.formLayout_2.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lastMsgTimeValue)
self.lastMsgKafkaTimeLabel = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgKafkaTimeLabel.setObjectName("lastMsgKafkaTimeLabel")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.lastMsgKafkaTimeLabel)
self.lastMsgKafkaTimeValue = QtWidgets.QLabel(self.timestampsBox)
self.lastMsgKafkaTimeValue.setObjectName("lastMsgKafkaTimeValue")
self.formLayout_2.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lastMsgKafkaTimeValue)
self.verticalLayout_6.addWidget(self.timestampsBox)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.verticalLayout_6.addItem(spacerItem)
self.verticalLayout_4.addWidget(self.sourceInfoBox)
self.horizontalLayout_2.addLayout(self.verticalLayout_4)
self.verticalLayout_3.addLayout(self.horizontalLayout_2)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 858, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "Wherefore"))
self.brokerAddressLabel.setText(_translate("MainWindow", "Broker address"))
self.startAtLabel.setText(_translate("MainWindow", "Start at"))
self.endAtLabel.setText(_translate("MainWindow", "Stop at"))
self.enableAllButton.setText(_translate("MainWindow", "Enable all"))
self.disableAllButton.setText(_translate("MainWindow", "Disable all"))
self.partitionInfoBox.setTitle(_translate("MainWindow", "Partition info"))
self.lowOffsetLabel.setText(_translate("MainWindow", "Low offset:"))
self.lowOffsetValue.setText(_translate("MainWindow", "n/a"))
self.highOffsetLabel.setText(_translate("MainWindow", "High offset:"))
self.highOffsetValue.setText(_translate("MainWindow", "n/a"))
self.lagLabel.setText(_translate("MainWindow", "Current lag:"))
self.lagValue.setText(_translate("MainWindow", "n/a"))
self.sourceInfoBox.setTitle(_translate("MainWindow", "Source info"))
self.sourceNameLabel.setText(_translate("MainWindow", "Source name:"))
self.sourceNameValue.setText(_translate("MainWindow", "n/a"))
self.sourceTypeLabel.setText(_translate("MainWindow", "Source type:"))
self.sourceTypeValue.setText(_translate("MainWindow", "n/a"))
self.firstOffsetLabel.setText(_translate("MainWindow", "First offset"))
self.firstOffsetValue.setText(_translate("MainWindow", "n/a"))
self.currentOffsetLabel.setText(_translate("MainWindow", "Current offset"))
self.currentOffsetValue.setText(_translate("MainWindow", "n/a"))
self.receivedMessagesLabel.setText(_translate("MainWindow", "Received messages"))
self.receivedMessagesValue.setText(_translate("MainWindow", "n/a"))
self.consumptionRateValue.setText(_translate("MainWindow", "n/a"))
self.consumptionRateLabel.setText(_translate("MainWindow", "Consumption rate"))
self.messageRateLabel.setText(_translate("MainWindow", "Message rate"))
self.messageRateValue.setText(_translate("MainWindow", "n/a"))
self.timestampsBox.setTitle(_translate("MainWindow", "Timestamps"))
self.firstMsgTimLabel.setText(_translate("MainWindow", "First message time"))
self.firstMsgTimeValue.setText(_translate("MainWindow", "n/a"))
self.lastMsgReceiveTimeLabel.setText(_translate("MainWindow", "Last received time"))
self.lastMsgReceiveTimeValue.setText(_translate("MainWindow", "n/a"))
self.lastMsgTimeLabel.setText(_translate("MainWindow", "Last message time"))
self.lastMsgTimeValue.setText(_translate("MainWindow", "n/a"))
self.lastMsgKafkaTimeLabel.setText(_translate("MainWindow", "Last Kafka time"))
self.lastMsgKafkaTimeValue.setText(_translate("MainWindow", "n/a"))
| StarcoderdataPython |
1654826 | """ API Endpont """
from http.server import BaseHTTPRequestHandler
import json
from datetime import datetime
import pytz
# pylint: disable=import-error
from api._utils import scrap_data
try:
data = scrap_data.get_data()
res = {
'notice': 'endpoint is deprecated, please use /api/v2',
'lastUpdate': data['lastUpdate'].isoformat(),
'states': data['states'],
'vaccinated': data['sumStates'],
'2nd_vaccination': {
'vaccinated': data['sumStates2nd'],
'difference_to_the_previous_day': data['sumDiffStates2nd']
},
'sum_vaccine_doses': data['sumStates'] + data['sumStates2nd'],
'difference_to_the_previous_day': data['sumDiffStates'],
'vaccinations_per_1000_inhabitants': round(data['sumStates'] / data['totalGermany'] * 1000, 2),
'total': data['totalGermany'],
'quote': round(data['sumStates'] / data['totalGermany'] * 100, 2)
}
HTTPCODE = 200
except TypeError:
res = {
'message': 'scrapping data from RKI excel failed'
}
HTTPCODE = 500
class Handler(BaseHTTPRequestHandler):
""" HTTP Handler """
# pylint: disable=invalid-name
def do_GET(self):
""" GET Method """
self.send_response(HTTPCODE)
self.send_header('Content-Type', 'application/json')
self.send_header('X-Cache-Timestamp',
pytz.timezone('Europe/Berlin').localize(datetime.now()).isoformat())
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
self.wfile.write(json.dumps(res).encode())
| StarcoderdataPython |
8028066 | <reponame>dataiku/plugin-finbert
# Code for custom code recipe fb2021 (imported from a Python recipe)
# To finish creating your custom recipe from your original PySpark recipe, you need to:
# - Declare the input and output roles in recipe.json
# - Replace the dataset names by roles access in your code
# - Declare, if any, the params of your custom recipe in recipe.json
# - Replace the hardcoded params values by acccess to the configuration map
# See sample code below for how to do that.
# The code of your original recipe is included afterwards for convenience.
# Please also see the "recipe.json" file for more information.
# import the classes for accessing DSS objects from the recipe
import dataiku
# Import the helpers for custom recipes
from dataiku.customrecipe import *
from dataiku import recipe
input_dataset = recipe.get_inputs_as_datasets()[0]
output_dataset = recipe.get_outputs_as_datasets()[0]
# The configuration consists of the parameters set up by the user in the recipe Settings tab.
# Parameters must be added to the recipe.json file so that DSS can prompt the user for values in
# the Settings tab of the recipe. The field "params" holds a list of all the params for wich the
# user will be prompted for values.
# The configuration is simply a map of parameters, and retrieving the value of one of them is simply:
# my_variable = get_recipe_config()['parameter_name']
text_column = get_recipe_config()['text_column_name']
# For optional parameters, you should provide a default value in case the parameter is not present:
# my_variable = get_recipe_config().get('parameter_name', None)
# Note about typing:
# The configuration of the recipe is passed through a JSON object
# As such, INT parameters of the recipe are received in the get_recipe_config() dict as a Python float.
# If you absolutely require a Python int, use int(get_recipe_config()["my_int_param"])
#############################
# Your original recipe
#############################
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
# -*- coding: utf-8 -*-
import dataiku
import pandas as pd, numpy as np
from dataiku import pandasutils as pdu
import nltk
nltk.download('punkt')
from finbert.finbert import *
import finbert.utils as tools
# -------------------------------------------------------------------------------- NOTEBOOK-CELL: CODE
# Read recipe inputs
#examples = dataiku.Dataset("input_A_datasets")
examples_df = input_dataset.get_dataframe()
# Compute recipe outputs from inputs
##load pretrained model from Huggingface with the trasnformers package
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("ProsusAI/finbert")
model = AutoModelForSequenceClassification.from_pretrained("ProsusAI/finbert")
def score_to_json(text, model):
result = predict(text, model)
result_dict = result.to_dict(orient = "index")
avg_sent_score = result['sentiment_score'].mean()
top_pred = result['prediction'].mode()[0]
return result_dict, avg_sent_score, top_pred
examples_df['bert_result'],examples_df['bert_avg_sent_score'],examples_df['bert_avg_pred'] = zip(*examples_df[text_column].apply(lambda x: score_to_json(x, model)))
examples_sentiment_df = examples_df # For this sample code, simply copy input to output
# Write recipe outputs
#examples_sentiment = dataiku.Dataset("output_A_datasets")
output_dataset.write_with_schema(examples_sentiment_df) | StarcoderdataPython |
367417 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import sys
import pygame as pg
import fill
from ui import Button, GameTable
from event import ClickEventListen
ZOOM = 1
WINDOW_SIZE = (int(580 * ZOOM), int(340 * ZOOM))
ORIGIN_POINT = (int(20 * ZOOM), int(20 * ZOOM))
TABLE_SIZE = (15, 15)
BLOCK_SIZE = int(20 * ZOOM)
BLOCK_COLORS = {1: (255, 0, 0),
2: (0, 0, 255),
3: (0, 255, 0),
4: (255, 255, 0),
5: (0, 255, 255),
6: (255, 0, 255)}
class Floodit(object):
"""游戏"""
def __init__(self, conf_dict={}):
self.__dict__.update(conf_dict)
self.screen = pg.display.set_mode(self.WINDOW_SIZE)
pg.display.set_caption("Flood it!")
pg.font.init()
self.font = pg.font.SysFont("AR PL UMing CN", int(20 * ZOOM))
self.screen.fill([200, 200, 200])
self.table = GameTable(self.COLORS.keys(),
self.TABLE_SIZE,
self.TABLE_POSITION,
self.BLOCK_SIDE)
# 色块按钮起始坐标
cl_x = self.TABLE_POSITION[0] + \
self.BLOCK_SIDE * (self.TABLE_SIZE[0] + 1)
cl_y = self.TABLE_POSITION[1] + \
self.BLOCK_SIDE * (self.TABLE_SIZE[1] - 1)
# "New game"按钮
x = self.TABLE_POSITION[0] + self.BLOCK_SIDE * (self.TABLE_SIZE[0] + 1)
w = self.BLOCK_SIDE * (len(self.COLORS) * 2 - 1)
y = self.TABLE_POSITION[1]
h = self.BLOCK_SIDE * 2
self.rb = Button((x, y), (w, h), text=u"New Game!",
fontsize=int(20 * ZOOM))
self.rb.show(self.screen)
self.events = ClickEventListen()
self.events.register(self.rb, self.reset)
# 初始化色块按钮
left = cl_x
self.color_buttons = []
for k, v in self.COLORS.iteritems():
button = Button((left, cl_y), (self.BLOCK_SIDE,
self.BLOCK_SIDE), color=v, fontsize=int(20 * ZOOM))
button.show(self.screen)
self.color_buttons.append(button)
self.events.register(button, self.colors_click, number=k)
left += self.BLOCK_SIDE * 2
self.table.draw(self.screen, BLOCK_COLORS)
self.wined = False
def show(self):
pg.display.flip()
def colors_click(self, number=None):
assert number, "CLICK ERROR!"
if self.wined:
return
if number in self.COLORS.keys():
fill.fill(self.table, number, x=0, y=0)
self.table.draw(self.screen, BLOCK_COLORS)
self.wined = self.win()
self.show()
def win(self):
if fill.filldone(self.table):
font = pg.font.SysFont("AR PL UMing CN", int(32 * ZOOM))
win_text = font.render("win", 1, (0, 0, 0))
self.winrect = win_text.get_rect()
self.winrect.centerx = (self.rb.x + self.rb.x1) // 2
self.winrect.centery = self.rb.y1 * 2
pg.draw.rect(self.screen, [200, 200, 200], self.winrect)
self.screen.blit(win_text, self.winrect)
return True
return False
def reset(self):
self.wined = False
del self.table
self.table = GameTable(self.COLORS.keys(),
self.TABLE_SIZE,
self.TABLE_POSITION,
self.BLOCK_SIDE)
self.table.draw(self.screen, BLOCK_COLORS)
pg.draw.rect(self.screen, [200, 200, 200], self.winrect)
self.show()
def mainloop(self):
while 1:
events = pg.event.get()
for event in events:
if event.type == pg.QUIT:
sys.exit(0)
self.events.listen(event)
if __name__ == '__main__':
conf_dict = {"COLORS": BLOCK_COLORS,
"WINDOW_SIZE": WINDOW_SIZE,
"TABLE_SIZE": TABLE_SIZE,
"TABLE_POSITION": ORIGIN_POINT,
"BLOCK_SIDE": BLOCK_SIZE}
fi = Floodit(conf_dict=conf_dict)
fi.show()
fi.mainloop()
| StarcoderdataPython |
3547157 | import numpy as np
from datetime import timedelta
from distutils.version import LooseVersion
import pandas as pd
import pandas.util.testing as tm
from pandas import to_timedelta
from pandas.util.testing import assert_series_equal, assert_frame_equal
from pandas import (Series, Timedelta, DataFrame, Timestamp, TimedeltaIndex,
timedelta_range, date_range, DatetimeIndex, Int64Index,
_np_version_under1p10, Float64Index, Index, tslib)
from pandas.tests.test_base import Ops
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [o for o in self.objs if mask(o)]
self.not_valid_objs = []
def test_ops_properties(self):
self.check_ops_properties(['days', 'hours', 'minutes', 'seconds',
'milliseconds'])
self.check_ops_properties(['microseconds', 'nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'),
Timedelta('3 days'), Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1), timedelta(days=2), pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'), Timedelta('2 days'), pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assert_index_equal(result, expected)
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
self.assertEqual(idx.argmin(), 0)
self.assertEqual(idx.argmax(), 2)
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_numpy_minmax(self):
dr = pd.date_range(start='2016-01-15', end='2016-01-20')
td = TimedeltaIndex(np.asarray(dr))
self.assertEqual(np.min(td), Timedelta('16815 days'))
self.assertEqual(np.max(td), Timedelta('16820 days'))
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.min, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.max, td, out=0)
self.assertEqual(np.argmin(td), 0)
self.assertEqual(np.argmax(td), 5)
if not _np_version_under1p10:
errmsg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, errmsg, np.argmin, td, out=0)
tm.assertRaisesRegexp(ValueError, errmsg, np.argmax, td, out=0)
def test_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00'),
])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
self.assertEqual(elt.round(freq='H'), expected_elt)
msg = pd.tseries.frequencies._INVALID_FREQ_ERROR
with self.assertRaisesRegexp(ValueError, msg):
td.round(freq='foo')
with tm.assertRaisesRegexp(ValueError, msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
tm.assertRaisesRegexp(ValueError, msg, td.round, freq='M')
tm.assertRaisesRegexp(ValueError, msg, elt.round, freq='M')
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = ("TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', "
"freq='D')")
exp3 = ("TimedeltaIndex(['1 days', '2 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp4 = ("TimedeltaIndex(['1 days', '2 days', '3 days'], "
"dtype='timedelta64[ns]', freq='D')")
exp5 = ("TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', "
"'3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)")
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = ("TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days "
"00:00:00")
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00', '10 days 02:00:00',
freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days', '10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
idx = TimedeltaIndex(['1 day', '2 day'])
msg = "cannot subtract a datelike from a TimedeltaIndex"
with tm.assertRaisesRegexp(TypeError, msg):
idx - Timestamp('2011-01-01')
result = Timestamp('2011-01-01') + idx
expected = DatetimeIndex(['2011-01-02', '2011-01-03'])
tm.assert_index_equal(result, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
rng = timedelta_range('1 days', '10 days', name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda: rng * offset)
# divide
expected = Int64Index((np.arange(10) + 1) * 12, name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected, exact=False)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result, expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda: rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda: tdi - dt)
self.assertRaises(TypeError, lambda: tdi - dti)
self.assertRaises(TypeError, lambda: td - dt)
self.assertRaises(TypeError, lambda: td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result, expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda: dt_tz - ts)
self.assertRaises(TypeError, lambda: dt_tz - dt)
self.assertRaises(TypeError, lambda: dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda: dt - dt_tz)
self.assertRaises(TypeError, lambda: ts - dt_tz)
self.assertRaises(TypeError, lambda: ts_tz2 - ts)
self.assertRaises(TypeError, lambda: ts_tz2 - dt)
self.assertRaises(TypeError, lambda: ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda: dti - ts_tz)
self.assertRaises(TypeError, lambda: dti_tz - ts)
self.assertRaises(TypeError, lambda: dti_tz - ts_tz2)
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'H']:
idx = pd.TimedeltaIndex(['1 hours', '2 hours'], freq=freq)
with tm.assertRaises(TypeError):
idx - p
with tm.assertRaises(TypeError):
p - idx
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda: tdi + dti[0:1])
self.assertRaises(ValueError, lambda: tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda: tdi + Int64Index([1, 2, 3]))
# this is a union!
# self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_comp_nat(self):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
for l, r in [(left, right), (left.asobject, right.asobject)]:
result = l == r
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = l != r
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == r, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(l != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != l, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(l < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > l, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00',
'1 days 09:00:00', '1 days 08:00:00',
'1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00',
pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
for obj in [idx, Series(idx)]:
tm.assert_series_equal(obj.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
# GH 9680
tdi = pd.timedelta_range(start=0, periods=10, freq='1s')
ts = pd.Series(np.random.normal(size=10), index=tdi)
self.assertNotIn('foo', ts.__dict__.keys())
self.assertRaises(AttributeError, lambda: ts.foo)
def test_order(self):
# GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D',
name='idx')
idx2 = TimedeltaIndex(
['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer,
np.array([0, 1, 2]),
check_dtype=False)
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
# TODO(wesm): unused?
# exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
# '3 day', '5 day'], name='idx2')
# idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
# '2 minute', pd.NaT], name='idx3')
# exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
# '5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
exp = np.array([0, 4, 3, 1, 2])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True,
ascending=False)
self.assert_index_equal(ordered, expected[::-1])
exp = np.array([2, 1, 3, 4, 0])
self.assert_numpy_array_equal(indexer, exp, check_dtype=False)
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day',
'2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
# GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_drop_duplicates(self):
# to check Index/Series compat
base = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
idx = base.append(base[:5])
res = idx.drop_duplicates()
tm.assert_index_equal(res, base)
res = Series(idx).drop_duplicates()
tm.assert_series_equal(res, Series(base))
res = idx.drop_duplicates(keep='last')
exp = base[5:].append(base[:5])
tm.assert_index_equal(res, exp)
res = Series(idx).drop_duplicates(keep='last')
tm.assert_series_equal(res, Series(exp, index=np.arange(5, 36)))
res = idx.drop_duplicates(keep=False)
tm.assert_index_equal(res, base[5:])
res = Series(idx).drop_duplicates(keep=False)
tm.assert_series_equal(res, Series(base[5:], index=np.arange(5, 31)))
def test_take(self):
# GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_take_invalid_kwargs(self):
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assertRaisesRegexp(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, idx.take,
indices, mode='clip')
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S'
]:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
def test_nat_new(self):
idx = pd.timedelta_range('1', freq='D', periods=5, name='x')
result = idx._nat_new()
exp = pd.TimedeltaIndex([pd.NaT] * 5, name='x')
tm.assert_index_equal(result, exp)
result = idx._nat_new(box=False)
exp = np.array([tslib.iNaT] * 5, dtype=np.int64)
tm.assert_numpy_array_equal(result, exp)
def test_shift(self):
# GH 9903
idx = pd.TimedeltaIndex([], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
tm.assert_index_equal(idx.shift(3, freq='H'), idx)
idx = pd.TimedeltaIndex(['5 hours', '6 hours', '9 hours'], name='xxx')
tm.assert_index_equal(idx.shift(0, freq='H'), idx)
exp = pd.TimedeltaIndex(['8 hours', '9 hours', '12 hours'], name='xxx')
tm.assert_index_equal(idx.shift(3, freq='H'), exp)
exp = pd.TimedeltaIndex(['2 hours', '3 hours', '6 hours'], name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='H'), exp)
tm.assert_index_equal(idx.shift(0, freq='T'), idx)
exp = pd.TimedeltaIndex(['05:03:00', '06:03:00', '9:03:00'],
name='xxx')
tm.assert_index_equal(idx.shift(3, freq='T'), exp)
exp = pd.TimedeltaIndex(['04:57:00', '05:57:00', '8:57:00'],
name='xxx')
tm.assert_index_equal(idx.shift(-3, freq='T'), exp)
def test_repeat(self):
index = pd.timedelta_range('1 days', periods=2, freq='D')
exp = pd.TimedeltaIndex(['1 days', '1 days', '2 days', '2 days'])
for res in [index.repeat(2), np.repeat(index, 2)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
index = TimedeltaIndex(['1 days', 'NaT', '3 days'])
exp = TimedeltaIndex(['1 days', '1 days', '1 days',
'NaT', 'NaT', 'NaT',
'3 days', '3 days', '3 days'])
for res in [index.repeat(3), np.repeat(index, 3)]:
tm.assert_index_equal(res, exp)
self.assertIsNone(res.freq)
def test_nat(self):
self.assertIs(pd.TimedeltaIndex._na_value, pd.NaT)
self.assertIs(pd.TimedeltaIndex([])._na_value, pd.NaT)
idx = pd.TimedeltaIndex(['1 days', '2 days'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))
self.assertFalse(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([], dtype=np.intp))
idx = pd.TimedeltaIndex(['1 days', 'NaT'])
self.assertTrue(idx._can_hold_na)
tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))
self.assertTrue(idx.hasnans)
tm.assert_numpy_array_equal(idx._nan_idxs,
np.array([1], dtype=np.intp))
def test_equals(self):
# GH 13107
idx = pd.TimedeltaIndex(['1 days', '2 days', 'NaT'])
self.assertTrue(idx.equals(idx))
self.assertTrue(idx.equals(idx.copy()))
self.assertTrue(idx.equals(idx.asobject))
self.assertTrue(idx.asobject.equals(idx))
self.assertTrue(idx.asobject.equals(idx.asobject))
self.assertFalse(idx.equals(list(idx)))
self.assertFalse(idx.equals(pd.Series(idx)))
idx2 = pd.TimedeltaIndex(['2 days', '1 days', 'NaT'])
self.assertFalse(idx.equals(idx2))
self.assertFalse(idx.equals(idx2.copy()))
self.assertFalse(idx.equals(idx2.asobject))
self.assertFalse(idx.asobject.equals(idx2))
self.assertFalse(idx.asobject.equals(idx2.asobject))
self.assertFalse(idx.equals(list(idx2)))
self.assertFalse(idx.equals(pd.Series(idx2)))
class TestTimedeltas(tm.TestCase):
_multiprocess_can_split_ = True
def test_ops(self):
td = Timedelta(10, unit='d')
self.assertEqual(-td, Timedelta(-10, unit='d'))
self.assertEqual(+td, Timedelta(10, unit='d'))
self.assertEqual(td - td, Timedelta(0, unit='ns'))
self.assertTrue((td - pd.NaT) is pd.NaT)
self.assertEqual(td + td, Timedelta(20, unit='d'))
self.assertTrue((td + pd.NaT) is pd.NaT)
self.assertEqual(td * 2, Timedelta(20, unit='d'))
self.assertTrue((td * pd.NaT) is pd.NaT)
self.assertEqual(td / 2, Timedelta(5, unit='d'))
self.assertEqual(abs(td), td)
self.assertEqual(abs(-td), td)
self.assertEqual(td / td, 1)
self.assertTrue((td / pd.NaT) is np.nan)
# invert
self.assertEqual(-td, Timedelta('-10d'))
self.assertEqual(td * -1, Timedelta('-10d'))
self.assertEqual(-1 * td, Timedelta('-10d'))
self.assertEqual(abs(-td), Timedelta('10d'))
# invalid
self.assertRaises(TypeError, lambda: Timedelta(11, unit='d') // 2)
# invalid multiply with another timedelta
self.assertRaises(TypeError, lambda: td * td)
# can't operate with integers
self.assertRaises(TypeError, lambda: td + 2)
self.assertRaises(TypeError, lambda: td - 2)
def test_ops_offsets(self):
td = Timedelta(10, unit='d')
self.assertEqual(Timedelta(241, unit='h'), td + pd.offsets.Hour(1))
self.assertEqual(Timedelta(241, unit='h'), pd.offsets.Hour(1) + td)
self.assertEqual(240, td / pd.offsets.Hour(1))
self.assertEqual(1 / 240.0, pd.offsets.Hour(1) / td)
self.assertEqual(Timedelta(239, unit='h'), td - pd.offsets.Hour(1))
self.assertEqual(Timedelta(-239, unit='h'), pd.offsets.Hour(1) - td)
def test_ops_ndarray(self):
td = Timedelta('1 day')
# timedelta, timedelta
other = pd.to_timedelta(['1 day']).values
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
self.assertRaises(TypeError, lambda: td + np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) + td)
expected = pd.to_timedelta(['0 days']).values
self.assert_numpy_array_equal(td - other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(-other + td, expected)
self.assertRaises(TypeError, lambda: td - np.array([1]))
self.assertRaises(TypeError, lambda: np.array([1]) - td)
expected = pd.to_timedelta(['2 days']).values
self.assert_numpy_array_equal(td * np.array([2]), expected)
self.assert_numpy_array_equal(np.array([2]) * td, expected)
self.assertRaises(TypeError, lambda: td * other)
self.assertRaises(TypeError, lambda: other * td)
self.assert_numpy_array_equal(td / other,
np.array([1], dtype=np.float64))
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other / td,
np.array([1], dtype=np.float64))
# timedelta, datetime
other = pd.to_datetime(['2000-01-01']).values
expected = pd.to_datetime(['2000-01-02']).values
self.assert_numpy_array_equal(td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other + td, expected)
expected = pd.to_datetime(['1999-12-31']).values
self.assert_numpy_array_equal(-td + other, expected)
if LooseVersion(np.__version__) >= '1.8':
self.assert_numpy_array_equal(other - td, expected)
def test_ops_series(self):
# regression test for GH8813
td = Timedelta('1 day')
other = pd.Series([1, 2])
expected = pd.Series(pd.to_timedelta(['1 day', '2 days']))
tm.assert_series_equal(expected, td * other)
tm.assert_series_equal(expected, other * td)
def test_ops_series_object(self):
# GH 13043
s = pd.Series([pd.Timestamp('2015-01-01', tz='US/Eastern'),
pd.Timestamp('2015-01-01', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timestamp('2015-01-02', tz='US/Eastern'),
pd.Timestamp('2015-01-02', tz='Asia/Tokyo')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('1 days'), exp)
tm.assert_series_equal(pd.Timedelta('1 days') + s, exp)
# object series & object series
s2 = pd.Series([pd.Timestamp('2015-01-03', tz='US/Eastern'),
pd.Timestamp('2015-01-05', tz='Asia/Tokyo')],
name='xxx')
self.assertEqual(s2.dtype, object)
exp = pd.Series([pd.Timedelta('2 days'), pd.Timedelta('4 days')],
name='xxx')
tm.assert_series_equal(s2 - s, exp)
tm.assert_series_equal(s - s2, -exp)
s = pd.Series([pd.Timedelta('01:00:00'), pd.Timedelta('02:00:00')],
name='xxx', dtype=object)
self.assertEqual(s.dtype, object)
exp = pd.Series([pd.Timedelta('01:30:00'), pd.Timedelta('02:30:00')],
name='xxx')
tm.assert_series_equal(s + pd.Timedelta('00:30:00'), exp)
tm.assert_series_equal(pd.Timedelta('00:30:00') + s, exp)
def test_ops_notimplemented(self):
class Other:
pass
other = Other()
td = Timedelta('1 day')
self.assertTrue(td.__add__(other) is NotImplemented)
self.assertTrue(td.__sub__(other) is NotImplemented)
self.assertTrue(td.__truediv__(other) is NotImplemented)
self.assertTrue(td.__mul__(other) is NotImplemented)
self.assertTrue(td.__floordiv__(td) is NotImplemented)
def test_ops_error_str(self):
# GH 13624
tdi = TimedeltaIndex(['1 day', '2 days'])
for l, r in [(tdi, 'a'), ('a', tdi)]:
with tm.assertRaises(TypeError):
l + r
with tm.assertRaises(TypeError):
l > r
with tm.assertRaises(TypeError):
l == r
with tm.assertRaises(TypeError):
l != r
def test_timedelta_ops(self):
# GH4984
# make sure ops return Timedelta
s = Series([Timestamp('20130101') + timedelta(seconds=i * i)
for i in range(10)])
td = s.diff()
result = td.mean()
expected = to_timedelta(timedelta(seconds=9))
self.assertEqual(result, expected)
result = td.to_frame().mean()
self.assertEqual(result[0], expected)
result = td.quantile(.1)
expected = Timedelta(np.timedelta64(2600, 'ms'))
self.assertEqual(result, expected)
result = td.median()
expected = to_timedelta('00:00:09')
self.assertEqual(result, expected)
result = td.to_frame().median()
self.assertEqual(result[0], expected)
# GH 6462
# consistency in returned values for sum
result = td.sum()
expected = to_timedelta('00:01:21')
self.assertEqual(result, expected)
result = td.to_frame().sum()
self.assertEqual(result[0], expected)
# std
result = td.std()
expected = to_timedelta(Series(td.dropna().values).std())
self.assertEqual(result, expected)
result = td.to_frame().std()
self.assertEqual(result[0], expected)
# invalid ops
for op in ['skew', 'kurt', 'sem', 'prod']:
self.assertRaises(TypeError, getattr(td, op))
# GH 10040
# make sure NaT is properly handled by median()
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07')])
self.assertEqual(s.diff().median(), timedelta(days=4))
s = Series([Timestamp('2015-02-03'), Timestamp('2015-02-07'),
Timestamp('2015-02-15')])
self.assertEqual(s.diff().median(), timedelta(days=6))
def test_timedelta_ops_scalar(self):
# GH 6808
base = pd.to_datetime('20130101 09:01:12.123456')
expected_add = pd.to_datetime('20130101 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta(10, unit='s'), timedelta(seconds=10),
np.timedelta64(10, 's'),
np.timedelta64(10000000000, 'ns'),
pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
base = pd.to_datetime('20130102 09:01:12.123456')
expected_add = pd.to_datetime('20130103 09:01:22.123456')
expected_sub = pd.to_datetime('20130101 09:01:02.123456')
for offset in [pd.to_timedelta('1 day, 00:00:10'),
pd.to_timedelta('1 days, 00:00:10'),
timedelta(days=1, seconds=10),
np.timedelta64(1, 'D') + np.timedelta64(10, 's'),
pd.offsets.Day() + pd.offsets.Second(10)]:
result = base + offset
self.assertEqual(result, expected_add)
result = base - offset
self.assertEqual(result, expected_sub)
def test_timedelta_ops_with_missing_values(self):
# setup
s1 = pd.to_timedelta(Series(['00:00:01']))
s2 = pd.to_timedelta(Series(['00:00:02']))
sn = pd.to_timedelta(Series([pd.NaT]))
df1 = DataFrame(['00:00:01']).apply(pd.to_timedelta)
df2 = DataFrame(['00:00:02']).apply(pd.to_timedelta)
dfn = DataFrame([pd.NaT]).apply(pd.to_timedelta)
scalar1 = pd.to_timedelta('00:00:01')
scalar2 = pd.to_timedelta('00:00:02')
timedelta_NaT = pd.to_timedelta('NaT')
NA = np.nan
actual = scalar1 + scalar1
self.assertEqual(actual, scalar2)
actual = scalar2 - scalar1
self.assertEqual(actual, scalar1)
actual = s1 + s1
assert_series_equal(actual, s2)
actual = s2 - s1
assert_series_equal(actual, s1)
actual = s1 + scalar1
assert_series_equal(actual, s2)
actual = scalar1 + s1
assert_series_equal(actual, s2)
actual = s2 - scalar1
assert_series_equal(actual, s1)
actual = -scalar1 + s2
assert_series_equal(actual, s1)
actual = s1 + timedelta_NaT
assert_series_equal(actual, sn)
actual = timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 - timedelta_NaT
assert_series_equal(actual, sn)
actual = -timedelta_NaT + s1
assert_series_equal(actual, sn)
actual = s1 + NA
assert_series_equal(actual, sn)
actual = NA + s1
assert_series_equal(actual, sn)
actual = s1 - NA
assert_series_equal(actual, sn)
actual = -NA + s1
assert_series_equal(actual, sn)
actual = s1 + pd.NaT
assert_series_equal(actual, sn)
actual = s2 - pd.NaT
assert_series_equal(actual, sn)
actual = s1 + df1
assert_frame_equal(actual, df2)
actual = s2 - df1
assert_frame_equal(actual, df1)
actual = df1 + s1
assert_frame_equal(actual, df2)
actual = df2 - s1
assert_frame_equal(actual, df1)
actual = df1 + df1
assert_frame_equal(actual, df2)
actual = df2 - df1
assert_frame_equal(actual, df1)
actual = df1 + scalar1
assert_frame_equal(actual, df2)
actual = df2 - scalar1
assert_frame_equal(actual, df1)
actual = df1 + timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 - timedelta_NaT
assert_frame_equal(actual, dfn)
actual = df1 + NA
assert_frame_equal(actual, dfn)
actual = df1 - NA
assert_frame_equal(actual, dfn)
actual = df1 + pd.NaT # NaT is datetime, not timedelta
assert_frame_equal(actual, dfn)
actual = df1 - pd.NaT
assert_frame_equal(actual, dfn)
def test_compare_timedelta_series(self):
# regresssion test for GH5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_compare_timedelta_ndarray(self):
# GH11835
periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]
arr = np.array(periods)
result = arr[0] > arr
expected = np.array([False, False])
self.assert_numpy_array_equal(result, expected)
class TestSlicing(tm.TestCase):
def test_tdi_ops_attributes(self):
rng = timedelta_range('2 days', periods=5, freq='2D', name='x')
result = rng + 1
exp = timedelta_range('4 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng - 2
exp = timedelta_range('-2 days', periods=5, freq='2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '2D')
result = rng * 2
exp = timedelta_range('4 days', periods=5, freq='4D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '4D')
result = rng / 2
exp = timedelta_range('1 days', periods=5, freq='D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
result = -rng
exp = timedelta_range('-2 days', periods=5, freq='-2D', name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, '-2D')
rng = pd.timedelta_range('-2 days', periods=5, freq='D', name='x')
result = abs(rng)
exp = TimedeltaIndex(['2 days', '1 days', '0 days', '1 days',
'2 days'], name='x')
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
def test_add_overflow(self):
# see gh-14068
msg = "too (big|large) to convert"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(106580, 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta(106580, 'D')
_NaT = int(pd.NaT) + 1
msg = "Overflow in int64 addition"
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([106580], 'D') + Timestamp('2000')
with tm.assertRaisesRegexp(OverflowError, msg):
Timestamp('2000') + to_timedelta([106580], 'D')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta([_NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
to_timedelta(['5 days', _NaT]) - Timedelta('1 days')
with tm.assertRaisesRegexp(OverflowError, msg):
(to_timedelta([_NaT, '5 days', '1 hours']) -
to_timedelta(['7 seconds', _NaT, '4 hours']))
# These should not overflow!
exp = TimedeltaIndex([pd.NaT])
result = to_timedelta([pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex(['4 days', pd.NaT])
result = to_timedelta(['5 days', pd.NaT]) - Timedelta('1 days')
tm.assert_index_equal(result, exp)
exp = TimedeltaIndex([pd.NaT, pd.NaT, '5 hours'])
result = (to_timedelta([pd.NaT, '5 days', '1 hours']) +
to_timedelta(['7 seconds', pd.NaT, '4 hours']))
tm.assert_index_equal(result, exp)
| StarcoderdataPython |
8086774 | import csv
import hashlib
import logging
import shutil
from datetime import datetime, timedelta
from ntpath import basename
from typing import Dict
from colorama import Fore
from dateutil import parser
from scp import SCPException
from sauronx import append_log_to_submission_log, stamp
from .alive import SauronxAlive, StatusValue
from .configuration import config
from .submission import CompletedRunInfo
def make_dirs_perm(pathname):
make_dirs(pathname)
os.chmod(pathname, 0o777)
class Results:
"""
Keeps track of frames and metadata in a SauronX submission.
Also uploads the data and notifies if requested.
The recommended usage is:
with Results() as results:
# let the camera write
results.upload()
"""
def __init__(
self, sx_alive: SauronxAlive, keep_raw_frames: bool = False, assume_clean: bool = False
) -> None:
self.coll = config.get_coll(sx_alive.submission_hash)
self.sx_alive = sx_alive
self.submission_hash = self.sx_alive.submission_hash
self.upload_params = config["connection.upload"]
self.notify_params = config["connection.notification"]
self.output_dir = self.coll.output_dir()
self.raw_frames_output_dir = self.coll.outer_frames_dir()
self.fps = config["sauron.hardware.camera.frames_per_second"]
plate_type_id = sx_alive.submission_obj.experiment.template_plate.plate_type.id
roi = config.camera_roi(plate_type_id)
self.frame_width = roi.x1 - roi.x0
self.frame_height = roi.y1 - roi.y0
# this gets uploaded to a 'pending' dir on valinor for notification
self.video_file = self.coll.avi_file()
self.qp = config["sauron.data.video.qp"]
self.x265_params = None
self.keyframe_interval = config["sauron.data.video.keyframe_interval"]
self.extra_x265_options = config["sauron.data.video.extra_x265_params"]
self.submission_log_file = append_log_to_submission_log(self.submission_hash)
self.file_hasher = FileHasher(algorithm=hashlib.sha256, extension=".sha256")
self.keep_raw_frames = keep_raw_frames
self.assume_clean = assume_clean
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
pass
def initialize_dir(self) -> None:
def change_perm(action, name, exc):
os.chmod(name, 0o777)
tmp_webcam_snap = ".{}.snap.tmp.jpg".format(self.submission_hash)
if os.path.exists(self.output_dir):
warn_user(
"Refusing to initialize output directory:",
self.output_dir,
"The path already exists.",
)
print(Fore.RED + "Overwrite? [yes/no]", end="")
if prompt_yes_no(""):
# a little weird, but copy webcam file first
# this is useful if --dark and --override are both set
if pexists(self.coll.webcam_snapshot()):
shutil.copy(self.coll.webcam_snapshot(), tmp_webcam_snap)
print("")
shutil.rmtree(self.output_dir, onerror=change_perm)
if os.path.exists(self.raw_frames_output_dir):
logging.info("Deleting raw frames at {}".format(self.raw_frames_output_dir))
if self.assume_clean:
logging.warning(
"Since --assume-clean is set, will not delete previous frames."
)
else:
shutil.rmtree(self.raw_frames_output_dir, onerror=change_perm)
else:
raise RefusingRequestException(
"The path {} already exists; refusing to proceed.".format(self.output_dir)
)
logging.info("Finished deleting")
make_dirs_perm(self.output_dir)
make_dirs_perm(self.raw_frames_output_dir)
make_dirs_perm(pjoin(self.output_dir, "timing"))
make_dirs_perm(pjoin(self.output_dir, "sensors"))
if pexists(tmp_webcam_snap) and os.path.getsize(tmp_webcam_snap) > 0:
logging.warning(
"Keeping webcam snapshot from previous run. It will be overwritten if a new capture is taken."
)
shutil.move(tmp_webcam_snap, self.coll.webcam_snapshot())
with open(pjoin(self.output_dir, "submission_hash.txt"), "w", encoding="utf8") as f:
f.write(self.sx_alive.submission_hash)
shutil.copyfile(os.environ["SAURONX_CONFIG_PATH"], self.coll.toml_file())
# copy os info, git hash, etc.
with open(self.coll.env_file(), "w", encoding="utf8") as f:
for key, value in config.environment_info.items():
f.write(key + "=" + value + "\n")
logging.info("Prepared output directory {}".format(self.output_dir))
if pexists(self.coll.log_file()):
logging.warning("The log file at {} already exists".format(self.coll.log_file()))
else:
os.symlink(self.submission_log_file, self.coll.log_file())
def finalize(self, run_info: CompletedRunInfo) -> None:
import valarpy.model as model
with open(self.coll.env_file(), "a", encoding="utf8") as f:
def write(key: str, value: str) -> None: # TODO strip control chars, etc, too
f.write(key + "=" + escape_for_properties(value) + "\n")
sub_obj = self.sx_alive.submission_obj
exp = sub_obj.experiment
# this is the critical start time, which is used to fill in runs.datetime_run
write("datetime_started", stamp(run_info.datetime_acclimation_started))
# and this end time is critical too
write("datetime_capture_finished", stamp(run_info.datetime_capture_finished))
write("original:experiment_id", exp.id)
write("original:experiment_name", exp.name)
write("original:description", sub_obj.description)
write("original:continuing_id", sub_obj.continuing_id)
write("original:datetime_plated", sub_obj.datetime_plated)
write("original:datetime_dosed", sub_obj.datetime_dosed)
write("original:notes", sub_obj.notes)
write("original:person_submitted", sub_obj.user_id)
write("original:battery", exp.battery_id)
write("original:template_plate", exp.template_plate_id)
for param in model.SubmissionParams.select(model.SubmissionParams).where(
model.SubmissionParams.submission == sub_obj.id
):
write("param:" + param.param_type + ":" + param.name, param.value)
write("datetime_environment_written", stamp(datetime.now()))
write("ffmpeg_version", self.find_ffmpeg_version())
try:
if os.path.exists(run_info.preview_path):
shutil.copyfile(run_info.preview_path, self.coll.preview_snapshot())
else:
logging.error("Missing preview snapshot {}".format(run_info.preview_path))
except:
logging.exception("Failed to copy preview frame at {}".format(run_info.preview_path))
success_to_user("Finalized run. All of the necessary data is now present.")
def find_ffmpeg_version(self):
out, err = wrap_cmd_call(["ffmpeg", "-version"])
return out
def upload(self) -> None:
try:
self.sx_alive.update_status(StatusValue.UPLOADING)
# noinspection PyTypeChecker
for i in range(0, int(self.upload_params["max_attempts"])):
try:
self._attempt_upload()
break
except (SCPException, ExternalCommandFailed) as e:
# noinspection PyTypeChecker
if i < int(self.upload_params["max_attempts"]) - 1:
logging.error("Upload failed (attempt {})".format(i), e)
warn_user("Upload failed (attempt {})".format(i))
else:
raise e
self.sx_alive.update_status(StatusValue.UPLOADED)
except Exception:
self.sx_alive.update_status(StatusValue.FAILED_DURING_UPLOAD)
raise
def copy_raw_to(self, path: str) -> None:
msg = "Moving raw frames to {}".format(path)
logging.warning(msg)
warn_user(msg, "==DO NOT INTERRUPT OR CANCEL==")
try:
shutil.move(self.raw_frames_output_dir, path)
except:
logging.fatal(
"Moving raw frames stopped partway. They are split between {} and {}".format(
self.raw_frames_output_dir, path
)
)
raise
warn_user(
"Moved raw frames to {}.".format(path, self.submission_hash),
"You will need to copy them back and",
"finish the submission with `sauronx continue {}`.",
)
def make_video(self) -> None:
try:
self._parse_frame_timing_file()
self._trim_frames()
logging.info(
"Making primary video with ffmpeg. At high framerates, this should take about as long as the duration of the run"
)
self._run_ffmpeg(self.raw_frames_output_dir, self.video_file)
logging.info("Compressing microphone recording.")
self._convert_microphone()
except Exception:
self.sx_alive.update_status(StatusValue.FAILED_DURING_POSTPROCESSING)
raise
def _run_ffmpeg(self, path, output_video):
make_dirs_perm(os.path.dirname(output_video))
if not pdir(path):
raise ValueError("Directory {} does not exist".format(path))
first_real_frame = self._first_frame(path)
# TODO change log level back to info
stream_cmd_call(
[
"ffmpeg",
"-loglevel",
"warning",
"-hwaccel",
"qsv",
"-f",
"image2",
"-nostdin",
"-c:v",
"rawvideo",
"-framerate",
str(self.fps),
"-video_size",
"{}x{}".format(self.frame_width, self.frame_height),
"-pixel_format",
"gray",
"-start_number",
str(first_real_frame),
"-i",
pjoin(path, "%08d.raw"),
"-y",
"-c:v",
"hevc_qsv",
"-g",
str(self.keyframe_interval),
"-q:v",
str(self.qp),
"-preset",
config["sauron.data.video.preset"],
"-load_plugin",
"2",
"-r",
str(self.fps),
output_video,
]
)
self.file_hasher.add_hash(output_video)
if not self.keep_raw_frames:
slow_delete(path, 3)
def _first_frame(self, path: str):
assert pexists(path), "The frame directory does not exist"
prop_files = list(sorted(scan_for_proper_files(path)))
if len(prop_files) == 0:
warn_user("No frames were found in {}".format(path))
raise ValueError("No frames were found in {}".format(path))
return basename(prop_files[0].rstrip(".raw"))
def _x265_params(self):
if self.x265_params is not None:
return self.x265_params
self.x265_params = "qp={}:keyint={}:min-keyint={}".format(
str(self.qp), self.keyframe_interval, self.keyframe_interval
)
if self.extra_x265_options is not None and len(self.extra_x265_options) > 0:
self.x265_params += ":" + ":".join(
[
str(k) + "=" + config.parse_path_format(v, self.output_dir)
for k, v in self.extra_x265_options.items()
]
)
logging.info(
"Encoding with framerate {} and x265 params '{}'".format(self.fps, self.x265_params)
)
return self.x265_params
def _convert_microphone(self) -> None:
microphone_input = self.coll.microphone_wav_path()
microphone_output = self.coll.microphone_flac_path()
if pexists(microphone_input):
logging.info("Compressing microphone data.")
stream_cmd_call(
[
"ffmpeg",
"-i",
microphone_input,
"-compression_level",
config["sauron.data.audio.flac_compression_level"],
"-c:a",
"flac",
microphone_output,
]
)
self.file_hasher.add_hash(microphone_output)
os.remove(microphone_input)
def _trim_frames(self) -> None:
"""Trims the frames that were captured before the first stimulus or after the last.
This works because StimulusTimeLog is defined to have its head be the start of the run and its tail be the end of the run, regardless of the stimuli.
To fulfill this definition, Board.run_scheduled_and_wait appends a stimulus for ID 0 at the beginning, and another at the end.
"""
if not self.coll.stimulus_timing_exists():
raise MissingResourceException(
"Cannot proceed: the stimulus timing log at {} is missing".format(
self.coll.stimulus_timing_log_file()
)
)
# first, check that we didn't fail partway through trimming
# this could be bad; let's fix that before making the trimming videos or the main video
def fixit(name: str):
output_video = pjoin(self.coll.trimmed_dir(), name + ".mkv")
hash_file = output_video + ".sha256"
tmpdir = pjoin(self.output_dir, name + "-trimmings")
if pexists(tmpdir) and not pexists(hash_file):
warn_user("Moving {} trimmed frames back".format(name))
logging.warning("Moving {} trimmed frames back".format(name))
for f in os.listdir(tmpdir):
if os.path.isdir(
self.coll.outer_frames_dir()
): # If directory exists, copy overwrites existing files in directory, move does not.
shutil.copy(pjoin(tmpdir, f), self.coll.outer_frames_dir())
os.remove(pjoin(tmpdir, f))
else:
shutil.move(pjoin(tmpdir, f), self.coll.outer_frames_dir())
elif pexists(tmpdir):
pass
fixit("start")
fixit("end")
stimulus_time_log = self._parse_stimulus_timing_file(self.coll.stimulus_timing_log_file())
if len(stimulus_time_log) == 0:
return # ok; nothing to remove
snapshots = [
parse_local_iso_datetime(snap) for snap in lines(self.coll.snapshot_timing_log_file())
]
first_stimulus = stimulus_time_log[0] # type: datetime
last_stimulus = stimulus_time_log[-1] # type: datetime
trimmings_start = []
trimmings_end = []
ran_off_end = 0
my_files = enumerate(sorted(scan_for_proper_files(self.coll.outer_frames_dir())))
for i, frame in my_files:
if i >= len(snapshots):
ran_off_end += 1
elif snapshots[i] < first_stimulus:
trimmings_start.append(frame)
elif snapshots[i] > last_stimulus:
trimmings_end.append(frame)
if ran_off_end > 0:
logging.error("{} stimuli occurred after the last snapshot!")
warn_user("{} stimuli occurred after the last snapshot!")
make_dirs_perm(self.coll.trimmed_dir())
self._make_trimmings_video(trimmings_start, "start")
self._make_trimmings_video(trimmings_end, "end")
def _make_trimmings_video(self, trimmings, name):
output_video = (
self.coll.trimmed_start_video() if name == "start" else self.coll.trimmed_end_video()
)
hash_file = output_video + ".sha256"
tmpdir = pjoin(self.output_dir, name + "-trimmings")
if os.path.exists(hash_file):
# TODO will fail if interrupted WHILE moving frames
logging.warning(
"{} trimmed frames video already exists at {}".format(name, output_video)
)
return
if os.path.exists(output_video) or os.path.exists(tmpdir):
warn_user(
"Making {} trimmed frames video failed partway through.".format(name),
"Deleting and remaking.",
)
logging.error(
"Making {} trimmed frames video failed partway through. Deleting and remaking.".format(
name
)
)
if len(trimmings) == 0:
logging.error("No {} trimmed frames".format(name))
warn_user(
"No {} trimmed frames.".format(name),
"Increase {} in {} to ensure that frames aren't lost.".format(
"padding_before_milliseconds"
if name == "start"
else "padding_after_milliseconds",
config.path,
),
)
return
# moving to another disk is a little slower
# but we don't want these deleted if sauronx fails
# tmpdir = pjoin(config.temp_dir(), name + '-trimmings')
logging.info("Trimmed {} {} frames. Compressing.".format(len(trimmings), name))
make_dirs_perm(tmpdir)
logging.debug("Created temp dir at {}".format(tmpdir))
for frame in trimmings:
if not pexists(pjoin(tmpdir, os.path.basename(frame))):
shutil.move(
frame, tmpdir
) # we HAVE to move here so they're not seen when making the primary video
self._run_ffmpeg(tmpdir, output_video)
def _parse_stimulus_timing_file(self, path: str) -> List[datetime]:
# datetime,id,intensity
return [parse_local_iso_datetime(line.split(",")[0]) for line in list(lines(path))[1:]]
def _parse_frame_timing_file(self):
strftime_fmt = "%Y-%m-%dT%H:%M:%S.%f"
raw_timing_file_path = self.coll.raw_snapshot_timing_log_file()
proc_timing_file_path = self.coll.snapshot_timing_log_file()
stream_cmd_call(["sudo", "chmod", "777", raw_timing_file_path])
with open(raw_timing_file_path, "r+", encoding="utf8") as f:
with open(proc_timing_file_path, "w", encoding="utf8") as o:
reader = csv.reader(f)
# get the two header lines?
prev_datetime = parser.parse(next(reader)[0])
# o.write("{}\n".format(prev_datetime.strftime(strftime_fmt))) # 20180920 added acquisition of an extra, unsaved frame before main acquisition loop
prev_us = int(next(reader)[0]) / 1000
for row in reader:
curr_us = int(row[0]) / 1000
diff_us = curr_us - prev_us
new_datetime = prev_datetime + timedelta(microseconds=diff_us)
o.write("{}\n".format(new_datetime.strftime(strftime_fmt)))
prev_datetime = new_datetime
prev_us = curr_us
def _attempt_upload(self) -> None:
logging.info("Uploading data via SSH...")
self._scp_files()
logging.info("Finished uploading data via SSH")
def _scp_files(self) -> None:
stream_cmd_call(
[
"scp",
"-r",
"-C",
os.path.normpath(self.output_dir),
str(self.upload_params["ssh_username"])
+ "@"
+ str(self.upload_params["hostname"])
+ ":"
+ self._remote_upload_dir(),
]
)
# noinspection PyBroadException
try:
self.sx_alive.notify_finished()
except:
warn_user("Failed to notify")
logging.error("Failed to notify")
def _remote_upload_dir(self) -> str:
remote_dir = self.upload_params["remote_upload_path"] # type: str
return (
str(self.upload_params["ssh_username"])
+ "@"
+ str(self.upload_params["hostname"])
+ ":"
+ remote_dir
if remote_dir.endswith(os.sep)
else remote_dir + os.sep
)
__all__ = ["Results"]
| StarcoderdataPython |
9610266 | # -*- coding: utf-8 -*-
import json
import boto3
import requests
from api.rdb.config import is_test, is_production
from api.rdb.utils.apigateway import get_api_url
from api.rdb.utils.service_framework import STATUS_OK
from ..utilities import invoke, get_lambda_test_data, get_lambda_fullpath
# noinspection PyUnusedLocal,PyTypeChecker
def test():
if is_test():
fullpath = get_lambda_fullpath("LambdaApiHealth")
event = get_lambda_test_data(fullpath)
# https://github.com/nficano/python-lambda
# noinspection PyTypeChecker
response1 = invoke(fullpath, event)
assert response1 is not None
assert response1['statusCode'] == STATUS_OK
# dict'{"health": "OK", "statusCode": 200, "headers": {"Content-Type": "application/json"}}'
response_data = json.loads(response1['body'])
assert response_data['health'] == "OK"
elif is_production():
event = get_lambda_test_data(get_lambda_fullpath("LambdaApiHealth"))
# http://docs.python-requests.org/en/master/user/quickstart
url = get_api_url(boto3.client("apigateway"), 'API', '/v1', '/health')
response2 = requests.get(url, params=event['queryStringParameters'])
assert response2 is not None
assert response2.status_code == STATUS_OK
response_data = response2.json()
# dict'{"health": "OK"}'
assert response_data['health'] == "OK"
| StarcoderdataPython |
119091 | import unittest
class TestCanary(unittest.TestCase):
def test_add_one_two(self):
self.assertEqual(3, 1 + 2)
| StarcoderdataPython |
1860161 | #!/usr/bin/env python3
import errno
import os.path
import re
import shlex
import stat
import string
import sys
from typing import IO
from typing import List
from typing import Optional
from typing import Set
from typing import Tuple
EXTENSIONS = {
'adoc': {'text', 'asciidoc'},
'asciidoc': {'text', 'asciidoc'},
'apinotes': {'text', 'apinotes'},
'asar': {'binary', 'asar'},
'avif': {'binary', 'image', 'avif'},
'bash': {'text', 'shell', 'bash'},
'bat': {'text', 'batch'},
'bib': {'text', 'bib'},
'bmp': {'binary', 'image', 'bitmap'},
'bz2': {'binary', 'bzip2'},
'c': {'text', 'c'},
'cc': {'text', 'c++'},
'cfg': {'text'},
'chs': {'text', 'c2hs'},
'clj': {'text', 'clojure'},
'cljc': {'text', 'clojure'},
'cljs': {'text', 'clojure', 'clojurescript'},
'cmake': {'text', 'cmake'},
'cnf': {'text'},
'coffee': {'text', 'coffee'},
'conf': {'text'},
'cpp': {'text', 'c++'},
'crt': {'text', 'pem'},
'cs': {'text', 'c#'},
'csproj': {'text', 'xml', 'csproj'},
'csh': {'text', 'shell', 'csh'},
'cson': {'text', 'cson'},
'css': {'text', 'css'},
'csv': {'text', 'csv'},
'cu': {'text', 'cuda'},
'cxx': {'text', 'c++'},
'dart': {'text', 'dart'},
'def': {'text', 'def'},
'dtd': {'text', 'dtd'},
'ear': {'binary', 'zip', 'jar'},
'edn': {'text', 'clojure', 'edn'},
'ejs': {'text', 'ejs'},
'eot': {'binary', 'eot'},
'eps': {'binary', 'eps'},
'erb': {'text', 'erb'},
'exe': {'binary'},
'eyaml': {'text', 'yaml'},
'feature': {'text', 'gherkin'},
'fish': {'text', 'fish'},
'gd': {'text', 'gdscript'},
'gemspec': {'text', 'ruby'},
'gif': {'binary', 'image', 'gif'},
'go': {'text', 'go'},
'gotmpl': {'text', 'gotmpl'},
'gpx': {'text', 'gpx', 'xml'},
'graphql': {'text', 'graphql'},
'gradle': {'text', 'groovy'},
'groovy': {'text', 'groovy'},
'gyb': {'text', 'gyb'},
'gyp': {'text', 'gyp', 'python'},
'gypi': {'text', 'gyp', 'python'},
'gz': {'binary', 'gzip'},
'h': {'text', 'header', 'c', 'c++'},
'hpp': {'text', 'header', 'c++'},
'hs': {'text', 'haskell'},
'htm': {'text', 'html'},
'html': {'text', 'html'},
'hxx': {'text', 'header', 'c++'},
'icns': {'binary', 'icns'},
'ico': {'binary', 'icon'},
'ics': {'text', 'icalendar'},
'idl': {'text', 'idl'},
'idr': {'text', 'idris'},
'inc': {'text', 'inc'},
'ini': {'text', 'ini'},
'inx': {'text', 'xml', 'inx'},
'ipynb': {'text', 'jupyter'},
'j2': {'text', 'jinja'},
'jade': {'text', 'jade'},
'jar': {'binary', 'zip', 'jar'},
'java': {'text', 'java'},
'jenkinsfile': {'text', 'groovy'},
'jinja': {'text', 'jinja'},
'jinja2': {'text', 'jinja'},
'jpeg': {'binary', 'image', 'jpeg'},
'jpg': {'binary', 'image', 'jpeg'},
'js': {'text', 'javascript'},
'json': {'text', 'json'},
'jsonnet': {'text', 'jsonnet'},
'jsx': {'text', 'jsx'},
'key': {'text', 'pem'},
'kml': {'text', 'kml', 'xml'},
'kt': {'text', 'kotlin'},
'lean': {'text', 'lean'},
'lektorproject': {'text', 'ini', 'lektorproject'},
'less': {'text', 'less'},
'lhs': {'text', 'literate-haskell'},
'libsonnet': {'text', 'jsonnet'},
'lidr': {'text', 'idris'},
'lr': {'text', 'lektor'},
'lua': {'text', 'lua'},
'm': {'text', 'c', 'objective-c'},
'manifest': {'text', 'manifest'},
'map': {'text', 'map'},
'markdown': {'text', 'markdown'},
'md': {'text', 'markdown'},
'mdx': {'text', 'mdx'},
'mib': {'text', 'mib'},
'mk': {'text', 'makefile'},
'ml': {'text', 'ocaml'},
'mli': {'text', 'ocaml'},
'mm': {'text', 'c++', 'objective-c++'},
'modulemap': {'text', 'modulemap'},
'myst': {'text', 'myst'},
'ngdoc': {'text', 'ngdoc'},
'nim': {'text', 'nim'},
'nims': {'text', 'nim'},
'nimble': {'text', 'nimble'},
'nix': {'text', 'nix'},
'otf': {'binary', 'otf'},
'p12': {'binary', 'p12'},
'patch': {'text', 'diff'},
'pdf': {'binary', 'pdf'},
'pem': {'text', 'pem'},
'php': {'text', 'php'},
'php4': {'text', 'php'},
'php5': {'text', 'php'},
'phtml': {'text', 'php'},
'pl': {'text', 'perl'},
'plantuml': {'text', 'plantuml'},
'pm': {'text', 'perl'},
'png': {'binary', 'image', 'png'},
'po': {'text', 'pofile'},
'pp': {'text', 'puppet'},
'properties': {'text', 'java-properties'},
'proto': {'text', 'proto'},
'puml': {'text', 'plantuml'},
'purs': {'text', 'purescript'},
'pxd': {'text', 'cython'},
'pxi': {'text', 'cython'},
'py': {'text', 'python'},
'pyi': {'text', 'pyi'},
'pyproj': {'text', 'xml', 'pyproj'},
'pyx': {'text', 'cython'},
'pyz': {'binary', 'pyz'},
'pyzw': {'binary', 'pyz'},
'r': {'text', 'r'},
'rake': {'text', 'ruby'},
'rb': {'text', 'ruby'},
'rs': {'text', 'rust'},
'rst': {'text', 'rst'},
's': {'text', 'asm'},
'sass': {'text', 'sass'},
'sbt': {'text', 'sbt', 'scala'},
'sc': {'text', 'scala'},
'scala': {'text', 'scala'},
'scm': {'text', 'scheme'},
'scss': {'text', 'scss'},
'sh': {'text', 'shell'},
'sln': {'text', 'sln'},
'sls': {'text', 'salt'},
'so': {'binary'},
'sol': {'text', 'solidity'},
'spec': {'text', 'spec'},
'sql': {'text', 'sql'},
'ss': {'text', 'scheme'},
'styl': {'text', 'stylus'},
'sv': {'text', 'system-verilog'},
'svg': {'text', 'image', 'svg', 'xml'},
'svh': {'text', 'system-verilog'},
'swf': {'binary', 'swf'},
'swift': {'text', 'swift'},
'swiftdeps': {'text', 'swiftdeps'},
'tac': {'text', 'twisted', 'python'},
'tar': {'binary', 'tar'},
'tex': {'text', 'tex'},
'tf': {'text', 'terraform'},
'tfvars': {'text', 'terraform'},
'tgz': {'binary', 'gzip'},
'thrift': {'text', 'thrift'},
'tiff': {'binary', 'image', 'tiff'},
'toml': {'text', 'toml'},
'ts': {'text', 'ts'},
'tsx': {'text', 'tsx'},
'ttf': {'binary', 'ttf'},
'twig': {'text', 'twig'},
'txsprofile': {'text', 'ini', 'txsprofile'},
'txt': {'text', 'plain-text'},
'v': {'text', 'verilog'},
'vb': {'text', 'vb'},
'vbproj': {'text', 'xml', 'vbproj'},
'vcxproj': {'text', 'xml', 'vcxproj'},
'vdx': {'text', 'vdx'},
'vh': {'text', 'verilog'},
'vhd': {'text', 'vhdl'},
'vim': {'text', 'vim'},
'vue': {'text', 'vue'},
'war': {'binary', 'zip', 'jar'},
'wav': {'binary', 'audio', 'wav'},
'webp': {'binary', 'image', 'webp'},
'whl': {'binary', 'wheel', 'zip'},
'wkt': {'text', 'wkt'},
'woff': {'binary', 'woff'},
'woff2': {'binary', 'woff2'},
'wsgi': {'text', 'wsgi', 'python'},
'xhtml': {'text', 'xml', 'html', 'xhtml'},
'xml': {'text', 'xml'},
'xq': {'text', 'xquery'},
'xql': {'text', 'xquery'},
'xqm': {'text', 'xquery'},
'xqu': {'text', 'xquery'},
'xquery': {'text', 'xquery'},
'xqy': {'text', 'xquery'},
'xsd': {'text', 'xml', 'xsd'},
'xsl': {'text', 'xml', 'xsl'},
'yaml': {'text', 'yaml'},
'yang': {'text', 'yang'},
'yin': {'text', 'xml', 'yin'},
'yml': {'text', 'yaml'},
'zig': {'text', 'zig'},
'zip': {'binary', 'zip'},
'zsh': {'text', 'shell', 'zsh'},
}
EXTENSIONS_NEED_BINARY_CHECK = {
'plist': {'plist'},
}
NAMES = {
'.babelrc': EXTENSIONS['json'] | {'babelrc'},
'.bash_aliases': EXTENSIONS['bash'],
'.bash_profile': EXTENSIONS['bash'],
'.bashrc': EXTENSIONS['bash'],
'.bowerrc': EXTENSIONS['json'] | {'bowerrc'},
'.browserslistrc': {'text', 'browserslistrc'},
'.clang-format': EXTENSIONS['yaml'],
'.clang-tidy': EXTENSIONS['yaml'],
'.codespellrc': EXTENSIONS['ini'] | {'codespellrc'},
'.coveragerc': EXTENSIONS['ini'] | {'coveragerc'},
'.cshrc': EXTENSIONS['csh'],
'.csslintrc': EXTENSIONS['json'] | {'csslintrc'},
'.dockerignore': {'text', 'dockerignore'},
'.editorconfig': {'text', 'editorconfig'},
'.flake8': EXTENSIONS['ini'] | {'flake8'},
'.gitattributes': {'text', 'gitattributes'},
'.gitconfig': EXTENSIONS['ini'] | {'gitconfig'},
'.gitignore': {'text', 'gitignore'},
'.gitlint': EXTENSIONS['ini'] | {'gitlint'},
'.gitmodules': {'text', 'gitmodules'},
'.hgrc': EXTENSIONS['ini'] | {'hgrc'},
'.jshintrc': EXTENSIONS['json'] | {'jshintrc'},
'.mailmap': {'text', 'mailmap'},
'.mention-bot': EXTENSIONS['json'] | {'mention-bot'},
'.npmignore': {'text', 'npmignore'},
'.pdbrc': EXTENSIONS['py'] | {'pdbrc'},
'.pypirc': EXTENSIONS['ini'] | {'pypirc'},
'.rstcheck.cfg': EXTENSIONS['ini'],
'.yamllint': EXTENSIONS['yaml'] | {'yamllint'},
'.zshrc': EXTENSIONS['zsh'],
'AUTHORS': EXTENSIONS['txt'],
'BUILD': {'text', 'bazel'},
'BUILD.bazel': {'text', 'bazel'},
'CMakeLists.txt': EXTENSIONS['cmake'],
'CHANGELOG': EXTENSIONS['txt'],
'CONTRIBUTING': EXTENSIONS['txt'],
'COPYING': EXTENSIONS['txt'],
'Dockerfile': {'text', 'dockerfile'},
'Gemfile': EXTENSIONS['rb'],
'Jenkinsfile': {'text', 'groovy'},
'LICENSE': EXTENSIONS['txt'],
'MAINTAINERS': EXTENSIONS['txt'],
'Makefile': EXTENSIONS['mk'],
'NEWS': EXTENSIONS['txt'],
'NOTICE': EXTENSIONS['txt'],
'PATENTS': EXTENSIONS['txt'],
'Pipfile': EXTENSIONS['toml'],
'Pipfile.lock': EXTENSIONS['json'],
'PKGBUILD': {'text', 'bash', 'pkgbuild', 'alpm'},
'pylintrc': EXTENSIONS['ini'] | {'pylintrc'},
'README': EXTENSIONS['txt'],
'Rakefile': EXTENSIONS['rb'],
'setup.cfg': EXTENSIONS['ini'],
}
INTERPRETERS = {
'ash': {'shell', 'ash'},
'awk': {'awk'},
'bash': {'shell', 'bash'},
'bats': {'shell', 'bash', 'bats'},
'csh': {'shell', 'csh'},
'dash': {'shell', 'dash'},
'expect': {'expect'},
'ksh': {'shell', 'ksh'},
'node': {'javascript'},
'nodejs': {'javascript'},
'perl': {'perl'},
'python': {'python'},
'python2': {'python', 'python2'},
'python3': {'python', 'python3'},
'ruby': {'ruby'},
'sh': {'shell', 'sh'},
'tcsh': {'shell', 'tcsh'},
'zsh': {'shell', 'zsh'},
}
printable = frozenset(string.printable)
DIRECTORY = 'directory'
SYMLINK = 'symlink'
SOCKET = 'socket'
FILE = 'file'
EXECUTABLE = 'executable'
NON_EXECUTABLE = 'non-executable'
TEXT = 'text'
BINARY = 'binary'
TYPE_TAGS = frozenset((DIRECTORY, FILE, SYMLINK, SOCKET))
MODE_TAGS = frozenset((EXECUTABLE, NON_EXECUTABLE))
ENCODING_TAGS = frozenset((BINARY, TEXT))
_ALL_TAGS = {*TYPE_TAGS, *MODE_TAGS, *ENCODING_TAGS}
_ALL_TAGS.update(*EXTENSIONS.values())
_ALL_TAGS.update(*EXTENSIONS_NEED_BINARY_CHECK.values())
_ALL_TAGS.update(*NAMES.values())
_ALL_TAGS.update(*INTERPRETERS.values())
ALL_TAGS = frozenset(_ALL_TAGS)
def tags_from_path(path: str) -> Set[str]:
try:
sr = os.lstat(path)
except (OSError, ValueError): # same error-handling as `os.lexists()`
raise ValueError(f'{path} does not exist.')
mode = sr.st_mode
if stat.S_ISDIR(mode):
return {DIRECTORY}
if stat.S_ISLNK(mode):
return {SYMLINK}
if stat.S_ISSOCK(mode):
return {SOCKET}
tags = {FILE}
executable = os.access(path, os.X_OK)
if executable:
tags.add(EXECUTABLE)
else:
tags.add(NON_EXECUTABLE)
# As an optimization, if we're able to read tags from the filename, then we
# don't peek at the file contents.
t = tags_from_filename(os.path.basename(path))
if len(t) > 0:
tags.update(t)
else:
if executable:
shebang = parse_shebang_from_file(path)
if len(shebang) > 0:
tags.update(tags_from_interpreter(shebang[0]))
# some can be both binary and text
# see NEED_BINARY_CHECK
if not ENCODING_TAGS & tags:
if file_is_text(path):
tags.add(TEXT)
else:
tags.add(BINARY)
assert ENCODING_TAGS & tags, tags
assert MODE_TAGS & tags, tags
return tags
def tags_from_filename(path: str) -> Set[str]:
_, filename = os.path.split(path)
_, ext = os.path.splitext(filename)
ret = set()
# Allow e.g. "Dockerfile.xenial" to match "Dockerfile"
for part in [filename] + filename.split('.'):
if part in NAMES:
ret.update(NAMES[part])
break
if len(ext) > 0:
ext = ext[1:].lower()
if ext in EXTENSIONS:
ret.update(EXTENSIONS[ext])
elif ext in EXTENSIONS_NEED_BINARY_CHECK:
ret.update(EXTENSIONS_NEED_BINARY_CHECK[ext])
return ret
def tags_from_interpreter(interpreter: str) -> Set[str]:
_, _, interpreter = interpreter.rpartition('/')
# Try "python3.5.2" => "python3.5" => "python3" until one matches.
while interpreter:
if interpreter in INTERPRETERS:
return INTERPRETERS[interpreter]
else:
interpreter, _, _ = interpreter.rpartition('.')
return set()
def is_text(bytesio: IO[bytes]) -> bool:
"""Return whether the first KB of contents seems to be binary.
This is roughly based on libmagic's binary/text detection:
https://github.com/file/file/blob/df74b09b9027676088c797528edcaae5a9ce9ad0/src/encoding.c#L203-L228
"""
text_chars = (
bytearray([7, 8, 9, 10, 11, 12, 13, 27]) +
bytearray(range(0x20, 0x7F)) +
bytearray(range(0x80, 0X100))
)
return not bool(bytesio.read(1024).translate(None, text_chars))
def file_is_text(path: str) -> bool:
if not os.path.lexists(path):
raise ValueError(f'{path} does not exist.')
with open(path, 'rb') as f:
return is_text(f)
def _shebang_split(line: str) -> List[str]:
try:
# shebangs aren't supposed to be quoted, though some tools such as
# setuptools will write them with quotes so we'll best-guess parse
# with shlex first
return shlex.split(line)
except ValueError:
# failing that, we'll do a more "traditional" shebang parsing which
# just involves splitting by whitespace
return line.split()
def _parse_nix_shebang(
bytesio: IO[bytes],
cmd: Tuple[str, ...],
) -> Tuple[str, ...]:
while bytesio.read(2) == b'#!':
next_line_b = bytesio.readline()
try:
next_line = next_line_b.decode('UTF-8')
except UnicodeDecodeError:
return cmd
for c in next_line:
if c not in printable:
return cmd
line_tokens = tuple(_shebang_split(next_line.strip()))
for i, token in enumerate(line_tokens[:-1]):
if token != '-i':
continue
# the argument to -i flag
cmd = (line_tokens[i + 1],)
return cmd
def parse_shebang(bytesio: IO[bytes]) -> Tuple[str, ...]:
"""Parse the shebang from a file opened for reading binary."""
if bytesio.read(2) != b'#!':
return ()
first_line_b = bytesio.readline()
try:
first_line = first_line_b.decode('UTF-8')
except UnicodeDecodeError:
return ()
# Require only printable ascii
for c in first_line:
if c not in printable:
return ()
cmd = tuple(_shebang_split(first_line.strip()))
if cmd and cmd[0] == '/usr/bin/env':
cmd = cmd[1:]
if cmd == ('nix-shell',):
return _parse_nix_shebang(bytesio, cmd)
return cmd
def parse_shebang_from_file(path: str) -> Tuple[str, ...]:
"""Parse the shebang given a file path."""
if not os.path.lexists(path):
raise ValueError(f'{path} does not exist.')
if not os.access(path, os.X_OK):
return ()
try:
with open(path, 'rb') as f:
return parse_shebang(f)
except OSError as e:
if e.errno == errno.EINVAL:
return ()
else:
raise
if __name__=='__main__':
print(tags_from_filename(sys.argv[1]))
| StarcoderdataPython |
8148835 | <gh_stars>0
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
import numpy as np
import matplotlib
def main():
name_lists = []
legend_lists = []
fignames = []
ylims = []
# 4x512 and variants
name_lists.append(['4x512',
'4x512_wn',
'4x512_rg',
'4x512_ln',
'4x591_gru'])
legend_lists.append(['4x512 LSTM',
'4x512 LSTM WN',
'4x512 LSTM RG',
'4x512 LSTM LN',
'4x591 GRU'])
fignames.append('exp_arch_lc_4x512')
ylims.append([1.45, 1.85])
# 3x1024 and variants
name_lists.append(['3x1024',
'3x1024_wn',
'3x1024_rg'])
legend_lists.append(['3x1024 LSTM',
'3x1024 LSTM WN',
'3x1024 LSTM RG'])
fignames.append('exp_arch_lc_3x1024')
ylims.append([1.45, 1.85])
# 1x2048 vs 4x1024
name_lists.append(['1x2700',
'4x1024_wn_rg',
'1x1024_rhn'])
legend_lists.append(['1x2700 LSTM',
'4x1024 LSTM WN RG',
'1x1024 10-layer RHN'])
fignames.append('exp_arch_lc_final')
ylims.append([1.3, 1.7])
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
figsize = [7, 3.5]
for names, legend, figname, ylim, i in \
zip(name_lists, legend_lists, fignames, ylims, range(len(ylims))):
lc = {} # learning curve data
for name in names:
data = np.loadtxt(name + '.csv', delimiter=',')
lc[name] = data[: -1]
#
fig = plt.figure(figsize=figsize)
plt.get_current_fig_manager().window.wm_geometry("+0+%d" % (350 * i))
l_va = []
for name in names:
l_va += [lc[name][:, 0] / 3600., lc[name][:, 2] / np.log(2), '-']
h_va = plt.plot(*l_va)
plt.ylim(ylim)
plt.xlabel('Training time (hour)')
plt.ylabel('Validation set BPC')
plt.legend(h_va, legend)
plt.tight_layout()
fig.savefig(figname + '.eps', format='eps')
plt.get_current_fig_manager().window.attributes('-topmost', 1)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4894426 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Little example on how to use a recurrent neural network to predict a math function
Reference: https://www.datatechnotes.com/2018/12/rnn-example-with-keras-simplernn-in.html
'''
# from NumPyNet.layers.input_layer import Input_layer
from NumPyNet.layers.rnn_layer import RNN_layer
from NumPyNet.layers.connected_layer import Connected_layer
from NumPyNet.layers.cost_layer import Cost_layer
# from NumPyNet.layers.dropout_layer import Dropout_layer
from NumPyNet.network import Network
from NumPyNet.optimizer import RMSprop
from NumPyNet.metrics import mean_absolute_error
from NumPyNet.utils import data_to_timesteps
import numpy as np
import pylab as plt
__author__ = ['<NAME>', '<NAME>']
__email__ = ['<EMAIL>', '<EMAIL>']
np.random.seed(42)
if __name__ == '__main__':
Npoints = 1000
train_size = 800
time = np.arange(0, Npoints)
noisy_signal = np.sin(0.02 * time) + 2 * np.random.rand(Npoints)
steps = 4
window_size=steps
X, _ = data_to_timesteps(noisy_signal, steps=steps)
y = np.concatenate([X[1:, 0, :], X[-1:, 0, :]], axis=0)
# Reshape the data according to a 4D tensor
num_samples, size, _ = X.shape
if size != steps:
raise ValueError('Something went wrong with the stride trick!')
if X.max() > noisy_signal.max() or X.min() < noisy_signal.min():
raise ValueError('Something went wrong with the stride trick!')
X = X.reshape(num_samples, 1, 1, size)
X_train, X_test = X[:train_size, ...], X[train_size:train_size+180, ...]
y_train, y_test = y[:train_size, ...], y[train_size:train_size+180, ...]
batch = 20
step = batch
y_train = y_train.reshape(-1, 1, 1, 1)
y_test = y_test.reshape(-1, 1, 1, 1)
# Create the model and training
model = Network(batch=batch, input_shape=X_train.shape[1:])
model.add(RNN_layer(outputs=32, steps=step, activation='linear'))
model.add(Connected_layer(outputs=8, activation='relu'))
model.add(Connected_layer(outputs=1, activation='linear'))
model.add(Cost_layer(cost_type='mse'))
# keras standard arguments
model.compile(optimizer=RMSprop(lr=0.001, epsilon=1e-7))#, metrics=[mean_absolute_error])
print('*************************************')
print('\n Total input dimension: {}'.format(X_train.shape), '\n')
print('**************MODEL SUMMARY***********')
model.summary()
print('\n***********START TRAINING***********\n')
# Fit the model on the training set
model.fit(X=X_train, y=y_train.reshape(-1, 1, 1, 1), max_iter=10)
print('\n***********START TESTING**************\n')
# Test the prediction with timing
loss, out = model.evaluate(X=X_test, truth=y_test, verbose=True)
mae = mean_absolute_error(y_test, out)
print('\n')
print('Loss Score: {:.3f}'.format(loss))
print('MAE Score: {:.3f}'.format(mae))
# concatenate the prediction
train_predicted = model.predict(X=X_train, verbose=False)
test_predicted = model.predict(X=X_test, verbose=False)
predicted = np.concatenate((train_predicted, test_predicted), axis=0)
fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(8, 6))
ax.plot(time[:- window_size*2], noisy_signal[:- window_size*2], 'b-', alpha=.75, label='true noisy signal')
ax.plot(time[:predicted.shape[0]], predicted[:, 0, 0, 0], '-', color='orange', alpha=1, label='predicted signal')
ax.vlines(time[train_predicted.shape[0]], noisy_signal.min(), noisy_signal.max(), colors='k', linestyle='dashed')
ax.set_xlabel('Time', fontsize=14)
ax.set_ylabel('Signal', fontsize=14)
fig.legend(loc='upper right', fontsize=14)
fig.tight_layout()
plt.show()
| StarcoderdataPython |
1666717 | from piservices import PiService
import pios.recovery
class OsService(PiService):
name = "os"
apt_get_install = [ 'unzip', 'zip', 'curl', 'ntp', 'ntpdate', 'git-core', 'git', 'wget',
'ca-certificates', 'binutils', 'raspi-config', 'mc', 'vim', 'vim-nox',
'htop']
managed_service = False
commands = ['stop', 'reboot', 'update_firmware', 'log', 'backup', 'restore', 'add_ssh_key']
def install(self):
#PiService.install(self)
if not self.is_local():
self.deploy()
self.sudo('update-alternatives --install /usr/bin/python python /usr/bin/python2.7 10')
def stop(self):
"""shout down the pi"""
self.sudo('shutdown -h now')
def reboot(self):
"""rboot system"""
self.sudo('reboot')
def deploy(self, restart=False):
"""run upgrade and clean on the pi"""
PiService.deploy(self, restart=False)
self.run('export TERM=linux && sudo apt-get -y update')
self.run('export TERM=linux && sudo apt-get -y dist-upgrade')
self.sudo('apt-get -y autoremove')
self.sudo('apt-get -y autoclean')
def update_firmware(self):
"""update the pi firmware"""
#download firmware update script
self.sudo('wget http://goo.gl/1BOfJ -O /usr/bin/rpi-update && chmod +x /usr/bin/rpi-update')
#(server usage: 240MB RAM / 16MB Video)
self.sudo('rpi-update 240') #rpi-update 224 for desktop
self.sudo('shutdown -r now')
def info(self, extended=False):
"""get basic information or with pios:info,extended more detailes"""
if not extended:
self.sudo('uname -a && uptime')
self.sudo('ifconfig -a | grep inet')
self.sudo('lsusb')
self.sudo('df -hT')
return
self.sudo('uname -a && uptime')
self.run('echo "\nmem, cpu, dev \n============="')
self.sudo('cat /proc/{meminfo,cpuinfo,devices} ')
self.run('echo "\nfstab, disk free \n============="')
self.sudo('cat /etc/fstab && df -hT')
self.run('echo "\nusb \n============="')
self.sudo('lsusb && lsusb -tv')
self.run('echo "\nifconfig \n============="')
self.sudo('ifconfig -a')
def log(self, path='/var/log/*'):
"""tail all logs or with pios:log,/var/log/messages a specific log"""
self.sudo('tail -f %s' % path)
def backup(self, sd=None, file_name=None):
"""store a bootable image from a sd device to ./images/ (run with sudo)"""
self.check_local_or_exit()
pios.recovery.backup(sd, file_name)
def restore(self, sd=None, file_name=None):
"""write a bootable image from ./images/ to sd device"""
self.check_local_or_exit()
pios.recovery.restore(sd, file_name)
def add_ssh_key(self):
key = self.ops.prompt("give public key content:")
self.sudo('mkdir -p /home/pi/.ssh')
self.sudo('touch /home/pi/.ssh/authorized_keys')
self.sudo('echo "%s" | sudo tee -a /home/pi/.ssh/authorized_keys' % key)
#sudo vim /etc/ssh/sshd_config
#add: AuthorizedKeysFile %h/.ssh/authorized_keys
#sudo service ssh restart
self.sudo('service ssh restart')
instance = OsService()
| StarcoderdataPython |
1910025 | {
"targets": [
{
"target_name": "nlopt",
"sources": [ "nlopt.cc" ],
"include_dirs": [
"./nlopt-2.3/api/"
],
"dependencies": [
"./nlopt-2.3/nlopt.gyp:nloptlib"
]
}
]
}
| StarcoderdataPython |
257123 | <filename>back/to_do/migrations/0003_auto_20201013_0149.py
# Generated by Django 3.1.2 on 2020-10-13 01:49
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('to_do', '0002_auto_20200924_2002'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='form',
field=models.JSONField(verbose_name=models.TextField(default='[]')),
),
]
| StarcoderdataPython |
1632682 | """Automation classes for use with AppDaemon, Climate and Fan Mqtt automations.
.. codeauthor:: <NAME> <<EMAIL>>
"""
from typing import Dict, Optional
import appdaemon.plugins.hass.hassapi as hass
import ir_packets_manager
import little_helpers
class HandleMqttFan(hass.Hass):
"""Automation for converting and sending Fan MQTT messages as ir packets.
Example:
.. code-block:: yaml
nursery_ceiling_fan_off:
module: ir_packets_control
class: HandleMqttFan
topic: 'tomerfi_custom_fan/nursery/command'
payload: 'off'
ir_transmitter_ip: "192.168.0.170"
fan_type: "hyundai_ceiling_fan"
command: 'off'
global_dependencies: ir_packets_manager
"""
def initialize(self) -> None:
"""Initialize the automation, and register the listenr."""
self.ir_transmitter_ip = self.args["ir_transmitter_ip"]
self.fan_type = self.args["fan_type"]
self.command = self.args["command"]
if self.args["payload"]:
self.fan_handler = self.listen_event(
self.message_arrived,
"MQTT_MESSAGE",
topic=self.args["topic"],
payload=self.args["payload"],
namespace="mqtt",
)
else:
self.fan_handler = self.listen_event(
self.message_arrived,
"MQTT_MESSAGE",
topic=self.args["topic"],
namespace="mqtt",
)
def terminate(self) -> None:
"""Cancel listener on termination."""
self.cancel_listen_event(self.fan_handler)
def message_arrived(
self, event_name: str, data: Optional[Dict], kwargs: Optional[Dict]
) -> None:
"""Use for handling mqtt message events."""
self.call_service(
"broadlink/send",
host=self.ir_transmitter_ip,
packet=ir_packets_manager.get_fan_packet(
self.fan_type, self.command
),
)
class HandleMqttACUnit(hass.Hass):
"""Automation for converting and sending AC MQTT messages as ir packets.
Example:
.. code-block:: yaml
nursery_ac_automation:
module: ir_packets_control
class: HandleMqttACUnit
climate_entity: climate.nursery_air_conditioner
ir_transmitter_ip: "192.168.0.170"
ac_type: "elco_small"
default_mode_for_on: "cool"
mode_command_topic: "tomerfi_custom_ac/nursery/mode"
temperature_command_topic: "tomerfi_custom_ac/nursery/temperature"
fan_mode_command_topic: "tomerfi_custom_ac/nursery/fan"
"""
def initialize(self) -> None:
"""Initialize the automation, and register the listenr."""
self.climate_entity = self.args["climate_entity"]
self.default_mode_for_on = self.args["default_mode_for_on"]
self.ir_transmitter_ip = self.args["ir_transmitter_ip"]
self.ac_type = self.args["ac_type"]
self.mode_command_topic = self.args["mode_command_topic"]
self.temperature_command_topic = self.args["temperature_command_topic"]
self.fan_mode_command_topic = self.args["fan_mode_command_topic"]
self.mode_command_handler = self.listen_event(
self.on_mode_command,
"MQTT_MESSAGE",
topic=self.mode_command_topic,
namespace="mqtt",
)
self.temperature_command_handler = self.listen_event(
self.on_temperature_command,
"MQTT_MESSAGE",
topic=self.temperature_command_topic,
namespace="mqtt",
)
self.fan_mode_command_handler = self.listen_event(
self.on_fan_mode_command,
"MQTT_MESSAGE",
topic=self.fan_mode_command_topic,
namespace="mqtt",
)
def terminate(self) -> None:
"""Cancel listeners on termination."""
self.cancel_listen_event(self.mode_command_handler)
self.cancel_listen_event(self.temperature_command_handler)
self.cancel_listen_event(self.fan_mode_command_handler)
def on_mode_command(
self, event_name: str, data: Dict, kwargs: Optional[Dict]
) -> None:
"""Use for handling mqtt message events for ac mode changes."""
if data["payload"] in little_helpers.false_strings:
packet = ir_packets_manager.get_ac_packet(
self.ac_type, data["off"]
)
else:
entity_data = self.get_state(self.climate_entity, attribute="all")
packet = ir_packets_manager.get_ac_packet(
self.ac_type,
data["payload"],
entity_data["attributes"]["fan_mode"],
entity_data["attributes"]["temperature"],
)
self._send_packet(packet)
def on_temperature_command(
self, event_name: str, data: Dict, kwargs: Optional[Dict]
) -> None:
"""Use for handling mqtt message events for ac temperature changes."""
entity_data = self.get_state(self.climate_entity, attribute="all")
self._send_packet(
ir_packets_manager.get_ac_packet(
self.ac_type,
entity_data["state"],
entity_data["attributes"]["fan_mode"],
float(data["payload"]),
)
)
def on_fan_mode_command(
self, event_name: str, data: Dict, kwargs: Optional[Dict]
) -> None:
"""Use for handling mqtt message events for ac fan changes."""
entity_data = self.get_state(self.climate_entity, attribute="all")
self._send_packet(
ir_packets_manager.get_ac_packet(
self.ac_type,
entity_data["state"],
data["payload"],
entity_data["attributes"]["temperature"],
)
)
def _send_packet(self, packet: str) -> None:
"""Use as helper function to send ir packets with broadlink."""
self.call_service(
"broadlink/send", host=self.ir_transmitter_ip, packet=packet
)
class TemperatureSensorToMqtt(hass.Hass):
"""Automation for publishing sensor state changes as mqtt messages.
Example:
.. code-block:: yaml
nursery_temperature_sensor_to_mqtt:
module: ir_packets_control
class: TemperatureSensorToMqtt
sensor_entity: sensor.nursery_broadlink_a1_temperature
topic: "tomerfi_custom_ac/nursery/current_temperature"
"""
def initialize(self) -> None:
"""Initialize the automation, and register the listenr."""
self.sensor_entity = self.args["sensor_entity"]
self.topic = self.args["topic"]
self.state_handler = self.listen_state(
self.state_changed, entity=self.sensor_entity
)
def terminate(self) -> None:
"""Cancel listener on termination."""
self.cancel_listen_state(self.state_handler)
def state_changed(
self,
entity: Optional[str],
attribute: Optional[str],
old: str,
new: str,
kwargs: Optional[Dict],
) -> None:
"""Use for handling state change events."""
self.call_service(
"mqtt/publish", **{"topic": self.topic, "payload": new}
)
| StarcoderdataPython |
1833208 | <filename>coils/__init__.py<gh_stars>1-10
from .Averager import Averager
from .MapSock import MapSockServer, MapSockClient, MapSockRequest
from .RateTicker import RateTicker
from .Ring import Ring
from .SocketTalk import SocketTalk
from .SortedList import SortedList
from .String import string2time, time2string, time2levels, time2dir, time2fname
from .Timer import Timer
from .UserInput import user_input
from .version import __version__
| StarcoderdataPython |
6586493 | import pexpect
host = '172.16.31.10'
username = 'pyclass'
password = '<PASSWORD>'
def send(ssh_connection, command, expect):
ssh_connection.sendline(command)
ssh_connection.expect(prompt)
ssh_connection = pexpect.spawn('ssh -l {} {}'.format(username, host))
ssh_connection.timeout = 3
ssh_connection.expect('assword:')
ssh_connection.sendline(password)
ssh_connection.expect('#')
prompt = ssh_connection.before.strip() + ssh_connection.after
send(ssh_connection, 'terminal length 0', prompt)
send(ssh_connection, 'show ip interface brief', prompt)
print ssh_connection.before
| StarcoderdataPython |
9791012 | <filename>lightning_run.py
import os
import argparse
import torch
import torch.optim as optim
import torch.nn as nn
from torchvision import datasets
from torchvision import transforms
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint
import utils
import models.ResNet as ResNet
class ClassificationPL(pl.LightningModule):
def __init__(self, net, configs):
super(ClassificationPL, self).__init__()
self.model = net
self.configs = configs
if self.configs['params']['loss'] == 'CE':
self.criterion = nn.CrossEntropyLoss(reduction='mean')
else:
raise ValueError('Unsupported loss: ' + str(self.configs['params']['loss']))
def prepare_data(self) -> None:
# In DDP, this function is called once in total at GLOBAL_RANK=0
# If, prepare_data_per_node is True, called once per node at LOCAL_RANK=0
# # good
# download_data()
# tokenize()
# etc()
#
# # bad
# self.split = data_split
# self.some_state = some_other_state()
# download dataset
if self.configs['data']['name'] == 'cifar100':
datasets.CIFAR100(os.getcwd(), train=True, download=True)
datasets.CIFAR100(os.getcwd(), train=False, download=True)
else:
raise NotImplementedError('Not supported dataset:' + str(self.configs['data']['name']))
def setup(self, stage: str):
# Example
# # step is either 'fit' or 'test' 90% of the time not relevant
# data = load_data()
# num_classes = data.classes
# self.l1 = nn.Linear(..., num_classes)
print('call setup: ' + str(stage))
if stage == 'fit':
if self.configs['data']['name'] == 'cifar100':
self.train_data = datasets.CIFAR100(os.getcwd(), train=True, download=False,
transform=transforms.ToTensor())
num_train = len(self.train_data)
num_valid = int(num_train * 0.1)
num_train = num_train - num_valid
self.train_data, self.valid_data = torch.utils.data.random_split(self.train_data,
[num_train, num_valid])
else:
raise NotImplementedError('Not supported dataset:' + str(self.configs['data']['name']))
elif stage == 'test':
if self.configs['data']['name'] == 'cifar100':
self.test_data = datasets.CIFAR100(os.getcwd(), train=False, download=False,
transform=transforms.ToTensor())
else:
raise NotImplementedError('Not supported dataset:' + str(self.configs['data']['name']))
else:
raise ValueError('Unexpected stage: ' + str(stage))
def configure_optimizers(self):
if self.configs['optimizer']['type'] == 'Adam':
return optim.Adam(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=float(self.configs['optimizer']['lr']))
elif self.configs['optimizer']['type'] == 'SGD':
return optim.SGD(filter(lambda p: p.requires_grad, self.model.parameters()),
lr=float(self.configs['optimizer']['lr']),
momentum=0.9, weight_decay=5e-4)
else:
raise ValueError()
def train_dataloader(self):
return torch.utils.data.DataLoader(
self.train_data,
batch_size=int(self.configs['params']['batch_size']),
shuffle=True,
num_workers=int(self.configs['params']['workers']),
pin_memory=True
)
def val_dataloader(self):
return torch.utils.data.DataLoader(
self.valid_data,
batch_size=int(self.configs['params']['batch_size']),
shuffle=False,
num_workers=int(self.configs['params']['workers']),
pin_memory=True
)
def test_dataloader(self):
return torch.utils.data.DataLoader(
self.test_data,
batch_size=int(self.configs['params']['batch_size']),
shuffle=False,
num_workers=int(self.configs['params']['workers']),
pin_memory=True
)
def forward(self, data):
return self.model(data)
def training_step(self, batch, batch_idx):
data, target = batch
output = self.forward(data)
loss = self.criterion(output, target)
logs = {'training_loss': loss}
return {'loss': loss, 'log': logs}
def validation_step(self, batch, batch_idx):
data, target = batch
output = self.forward(data)
loss = self.criterion(output, target)
logs = {'valid_loss': loss}
# pred = output.argmax(dim=1, keepdim=True)
# accuracy = pred.eq(target.view_as(pred)).float().mean()
return {'val_loss': loss, 'log': logs}
def validation_epoch_end(self, outputs):
# accuracy = sum(x['batch_val_acc'] for x in outputs) / len(outputs)
loss = sum(x['val_loss'] for x in outputs) / len(outputs)
# Pass the accuracy to the `DictLogger` via the `'log'` key.
return {'avg_val_loss': loss}
def test_step(self, batch, batch_idx):
data, target = batch
output = self.forward(data)
pred = output.argmax(dim=1, keepdim=True)
accuracy = pred.eq(target.view_as(pred)).float().mean()
return {'batch_test_acc': accuracy}
def test_epoch_end(self, outputs):
accuracy = sum(x['batch_test_acc'] for x in outputs) / len(outputs)
# Pass the accuracy to the `DictLogger` via the `'log'` key.
return {'log': {'test_acc': accuracy}}
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True, help='path of config file')
opt = parser.parse_args()
pl.seed_everything(1020)
config = utils.get_config(opt.config)
gpu_id = None
if config['gpu']['used'] is True and torch.cuda.is_available():
gpu_id = int(config['gpu']['ind'])
checkpoint_callback = ModelCheckpoint(filepath=os.path.join(config['exp']['path'], '\\'),
monitor='avg_val_loss',
verbose=False,
save_last=True,
save_top_k=1,
mode='min',
save_weights_only=False,
period=1)
logger = TensorBoardLogger(save_dir=os.path.dirname(config['exp']['path']),
name=os.path.basename(config['exp']['path']),
version='log')
net = ResNet.resnet18(pretrained=True, progress=True, num_classes=100)
model = ClassificationPL(net=net, configs=config)
trainer = pl.Trainer(fast_dev_run=False,
max_epochs=2,
precision=32,
check_val_every_n_epoch=1,
distributed_backend=None,
benchmark=True,
gpus=gpu_id,
limit_test_batches=1.0,
limit_val_batches=1.0,
log_save_interval=1,
row_log_interval=1,
logger=logger,
checkpoint_callback=checkpoint_callback
)
trainer.fit(model=model)
trainer.test(model=model)
| StarcoderdataPython |
157666 | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_linear_layer, build_transformer
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as
from .normed_predictor import NormedConv2d, NormedLinear
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, Transformer)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer'
]
| StarcoderdataPython |
3410483 | from DirectVOLayer import DirectVO
from networks import VggDepthEstimator, PoseNet, PoseExpNet
from ImagePyramid import ImagePyramidLayer
import torch.nn as nn
import torch
from torch.autograd import Variable
import numpy as np
import itertools
from timeit import default_timer as timer
class FlipLR(nn.Module):
def __init__(self, imW, dim_w):
super(FlipLR, self).__init__()
inv_indices = torch.arange(imW-1, -1, -1).long()
self.register_buffer('inv_indices', inv_indices)
self.dim_w = dim_w
def forward(self, input):
return input.index_select(self.dim_w, Variable(self.inv_indices))
class SfMLearner(nn.Module):
def __init__(self, img_size=[128, 416], ref_frame_idx=1,
lambda_S=.5, lambda_E=0.01, use_ssim=True, smooth_term = 'lap',
use_expl_mask=False, gpu_ids=[0]):
super(SfMLearner, self).__init__()
self.sfmkernel = nn.DataParallel(SfMKernel(img_size, smooth_term = smooth_term, use_expl_mask=use_expl_mask),
device_ids=gpu_ids)
self.ref_frame_idx = ref_frame_idx
self.lambda_S = lambda_S
self.lambda_E = lambda_E
self.use_ssim = use_ssim
self.use_expl_mask = use_expl_mask
def forward(self, frames, camparams, max_lk_iter_num=10):
cost, photometric_cost, smoothness_cost, ref_frame, ref_inv_depth, ref_expl_mask \
= self.sfmkernel.forward(frames, camparams, self.ref_frame_idx,
self.lambda_S, self.lambda_E, use_ssim=self.use_ssim)
return cost.mean(), photometric_cost.mean(), smoothness_cost.mean(), ref_frame, ref_inv_depth, ref_expl_mask
def save_model(self, file_path):
torch.save(self.cpu().sfmkernel.module.depth_net.state_dict(),
file_path+'_depth_net.pth')
torch.save(self.sfmkernel.module.pose_net.state_dict(),
file_path+'_pose_net.pth')
self.cuda()
def load_model(self, file_path):
self.sfmkernel.module.depth_net.load_state_dict(torch.load(file_path+'_depth_net.pth'))
self.sfmkernel.module.pose_net.load_state_dict(torch.load(file_path+'_pose_net.pth'))
def init_weights(self):
self.sfmkernel.module.depth_net.init_weights()
def get_parameters(self):
return itertools.chain(self.sfmkernel.module.depth_net.parameters(),
self.sfmkernel.module.pose_net.parameters())
class SfMKernel(nn.Module):
"""
only support single training isinstance
"""
def __init__(self, img_size=[128, 416], smooth_term = 'lap', use_expl_mask=False):
super(SfMKernel, self).__init__()
self.img_size = img_size
self.fliplr_func = FlipLR(imW=img_size[1], dim_w=3)
self.vo = DirectVO(imH=img_size[0], imW=img_size[1], pyramid_layer_num=4)
self.depth_net = VggDepthEstimator(img_size)
if use_expl_mask:
self.pose_net = PoseExpNet(3)
else:
self.pose_net = PoseNet(3)
self.pyramid_func = ImagePyramidLayer(chan=1, pyramid_layer_num=4)
self.smooth_term = smooth_term
self.use_expl_mask = use_expl_mask
def forward(self, frames, camparams, ref_frame_idx, lambda_S=.5, lambda_E=.01, do_data_augment=True, use_ssim=True):
assert(frames.size(0) == 1 and frames.dim() == 5)
frames = frames.squeeze(0)
camparams = camparams.squeeze(0).data
if do_data_augment:
if np.random.rand()>.5:
frames = self.fliplr_func(frames)
camparams[2] = self.img_size[1] - camparams[2]
bundle_size = frames.size(0)
src_frame_idx = tuple(range(0,ref_frame_idx)) + tuple(range(ref_frame_idx+1,bundle_size))
frames_pyramid = self.vo.pyramid_func(frames)
ref_frame_pyramid = [frame[ref_frame_idx, :, :, :] for frame in frames_pyramid]
src_frames_pyramid = [frame[src_frame_idx, :, :, :] for frame in frames_pyramid]
self.vo.setCamera(fx=camparams[0], cx=camparams[2],
fy=camparams[4], cy=camparams[5])
self.vo.init_xy_pyramid(ref_frame_pyramid)
if self.use_expl_mask:
p, expl_mask_pyramid = self.pose_net.forward((frames.view(1, -1, frames.size(2), frames.size(3))-127) / 127)
expl_mask_reg_cost = 0
for mask in expl_mask_pyramid:
expl_mask_reg_cost += mask.mean()
ref_expl_mask_pyramid = [mask.squeeze(0)[ref_frame_idx, ...] for mask in expl_mask_pyramid]
src_expl_mask_pyramid = [mask.squeeze(0)[src_frame_idx, ...] for mask in expl_mask_pyramid]
expl_mask = ref_expl_mask_pyramid[0]
else:
p = self.pose_net.forward((frames.view(1, -1, frames.size(2), frames.size(3))-127) / 127)
ref_expl_mask_pyramid = None
src_expl_mask_pyramid = None
expl_mask_reg_cost = 0
expl_mask = None
rot_mat_batch = self.vo.twist2mat_batch_func(p[0,:,0:3])
trans_batch = p[0,:,3:6]
inv_depth_pyramid = self.depth_net.forward((frames-127)/127)
inv_depth_mean_ten = inv_depth_pyramid[0].mean()*0.1 #uncommment this to use normalization
# normalize
#trans_batch = trans_batch*inv_depth_mean_ten
inv_depth_norm_pyramid = [depth/inv_depth_mean_ten for depth in inv_depth_pyramid]
ref_inv_depth_pyramid = [depth[ref_frame_idx, :, :] for depth in inv_depth_norm_pyramid]
src_inv_depth_pyramid = [depth[src_frame_idx, :, :] for depth in inv_depth_norm_pyramid]
photometric_cost = self.vo.compute_phtometric_loss(
ref_frame_pyramid,
src_frames_pyramid,
ref_inv_depth_pyramid,
src_inv_depth_pyramid,
rot_mat_batch, trans_batch,
levels=[0,1,2,3], use_ssim=use_ssim,
ref_expl_mask_pyramid=ref_expl_mask_pyramid,
src_expl_mask_pyramid=src_expl_mask_pyramid)
# compute smoothness smoothness loss
# instead of directly compute the loss on the finest level, it's evaluated on the downsamples.
inv_depth0_pyramid = self.pyramid_func(inv_depth_norm_pyramid[0], do_detach=False)
smoothness_cost = self.vo.multi_scale_image_aware_smoothness_cost(inv_depth0_pyramid, frames_pyramid, levels=[2,3], type=self.smooth_term) \
+ self.vo.multi_scale_image_aware_smoothness_cost(inv_depth_norm_pyramid, frames_pyramid, levels=[2,3], type=self.smooth_term)
cost = photometric_cost + lambda_S*smoothness_cost - lambda_E*expl_mask_reg_cost
return cost, photometric_cost, smoothness_cost, ref_frame_pyramid[0], ref_inv_depth_pyramid[0]*inv_depth_mean_ten, expl_mask
| StarcoderdataPython |
3358918 | <filename>source/suspension/package/WCRT.py
import math
from functions import *
def WCRT(CS,Tn,HPTasks):
R=0
while True:
if R> Tn:
return R
I=0
for itask in HPTasks:
I=I+Workload_w_C(itask['period'],itask['execution'],itask['period'],R)
if I+CS>R:
R=I+CS
else:
return R
| StarcoderdataPython |
9752721 | <reponame>jacebrowning/slackoff
# pylint: disable=redefined-outer-name,unused-variable,expression-not-assigned,singleton-comparison
from slackoff import slack
def describe_signout():
def it_indicates_success(expect):
expect(slack.signout("Foobar")) == False
| StarcoderdataPython |
9760184 | <filename>main.py
# import "packages" from flask
import json
# import app as app
from flask import render_template, redirect, request, url_for, send_from_directory
from flask_login import login_required
from __init__ import app, login_manager
from cruddy.app_crud import app_crud
from cruddy.app_crud_api import app_crud_api
from cruddy.login import login, logout, authorize
# from uploady.app_upload import app_upload
from notey.app_notes import app_notes
from events.app_events import app_events
# app.register_blueprint(app_upload)
app.register_blueprint(app_crud)
app.register_blueprint(app_crud_api)
app.register_blueprint(app_notes)
app.register_blueprint(app_events)
# create a Flask instance
# connects default URL to render index.html
@app.route('/logout/', methods=["GET", "POST"])
@login_required
def main_logout():
logout()
return redirect(url_for('index'))
@login_manager.unauthorized_handler
def unauthorized():
"""Redirect unauthorized users to Login page."""
app.config['NEXT_PAGE'] = request.endpoint
return redirect(url_for('main_login'))
# if login url, show phones table only
@app.route('/login/', methods=["GET", "POST"])
def main_login():
# obtains form inputs and fulfills login requirements
if request.form:
email = request.form.get("email")
password = request.form.get("password")
if login(email, password):
if (email == "<EMAIL>") and (password == "<PASSWORD>"): # this can be replaced with whatever login is needed
return redirect(url_for('crud.crud'))
else:
return redirect(url_for('crud.crud_view'))
# if not logged in, show the login page
return render_template("login.html")
@app.route('/authorize/', methods=["GET", "POST"])
def main_authorize():
error_msg = ""
# check form inputs and creates user
if request.form:
# validation should be in HTML
user_name = request.form.get("user_name")
email = request.form.get("email")
password1 = request.form.get("password1")
password2 = request.form.get("password2") # password should be verified
if password1 == password2:
if authorize(user_name, email, password1):
return redirect(url_for('main_login'))
else:
error_msg = "Passwords do not match"
# show the auth user page if the above fails for some reason
return render_template("authorize.html", error_msg=error_msg)
@app.route('/')
def index():
return render_template("index.html")
@app.route('/about/')
def about():
return render_template("about.html")
@app.route('/calendar')
def calendar():
return render_template("calendar.html")
@app.route('/activity')
def activity():
return render_template("activity.html")
@app.route('/generator')
def generator():
return render_template("generator.html")
@app.route('/shop')
def shop():
return render_template("shop.html")
if __name__ == "__main__":
app.run(
debug=True,
host="0.0.0.0",
port=5000
),
| StarcoderdataPython |
358892 | import pandas as pd
import parserExcel as parser
import xgboost as xgb
import classification as csf
db = parser.getDataFramefromExcel("./BPDdataset.csv")
# Remodel dataset
# Order columns like
# other, outcome, possible early risk factors, possible late risk factors
db = parser.orderColumns(db)
# Drop some ininfluent columns
db = db.drop(["die12", "zbw", "sga10", "locate", "pcare", "ox36", "vent36", "hfv36", "hfnc36", "nimv36", "cpap36", "sterbpd", "anyvent36w"], 1)
# Remove all rows where died==1 because they can't have bpd
db = db.drop(db[db["died"] == 1].index).reset_index(drop=True)
# Drop all rows with null value
for c in db.columns:
db = db.drop(db[pd.isnull(db[c])].index).reset_index(drop=True)
# Drop gadays and insert into gaweeks
db["gaweeks"] = db["gaweeks"].astype(float) # cast entire column to float
for i in range(0, len(db)):
db.at[i, "gaweeks"] = round(db.at[i, "gaweeks"] + (db.at[i, "gadays"] / 7), 4) # round to 4 decimals
db = db.drop(["gadays"], 1)
# Possible value for BPD: 0= no, 1= yes, 99= N/D; 7= unknown; 9= missing
# for this reason drop 99,9,7
l = len(db)
db = db.drop(db[db["bpd"] == 99].index).reset_index(drop=True)
db = db.drop(db[db["bpd"] == 9].index).reset_index(drop=True)
db = db.drop(db[db["bpd"] == 7].index).reset_index(drop=True)
#print(str(l-len(db))+" element was dropped beacuse bpd == 99 or 9 or 7 ( = N/D, unk, miss)")
# one hot encoding of race column
db["race_1"] = 0
db["race_2"] = 0
db["race_3"] = 0
db["race_4"] = 0
db["race_7"] = 0
for i in range(0, len(db)):
col = "race_"+str(int(db.at[i, "race"]))
db.at[i, col] = 1
# move "race_ " colunms instead of "race" and drop it
new_order = db.columns.tolist()
for i in range(1, 5):
new_order.remove("race_"+str(i))
new_order.insert(new_order.index("race"), "race_"+str(i))
new_order.remove("race_7")
new_order.insert(new_order.index("race"), "race_7")
db = db[new_order]
db = db.drop(["race"], 1)
# Features is all columns in possible early risk factors and possible late risk factors
features_prec_tard = []
notardive = []
tard = True
for c in range(db.columns.get_loc("drsurf"), len(db.columns)):
features_prec_tard.append(db.columns[c])
if "drcpap" in db.columns[c]:
tard = False
if tard:
notardive.append(db.columns[c])
features_prec = []
for c in range(db.columns.get_loc('drsurf'), db.columns.get_loc('drcpap')+1):
features_prec.append(db.columns[c])
target = "bpd"
# Split data in training and test set
# consider all data from 2015-2016 as test set
tr_set, ts_set = parser.splitFrom(db, "byear", 2015)
training_pt = tr_set[features_prec_tard]
training_p = tr_set[features_prec]
tr_target = tr_set[target]
ts_set.reset_index(inplace=True, drop=True)
test_pt = ts_set[features_prec_tard]
test_p = ts_set[features_prec]
ts_target = ts_set[target]
# create XGBoost version of tr_set and ts_set
xgtrain_pt = xgb.DMatrix(training_pt.values, tr_target.values)
xgtrain_p = xgb.DMatrix(training_p.values, tr_target.values)
xgtest_pt = xgb.DMatrix(test_pt.values)
xgtest_p = xgb.DMatrix(test_p.values)
gbc = xgb.XGBClassifier(n_job=-1, random_state=42, tree_method="auto")
param_grid_gb = {"n_estimators": [200, 300, 400],
"learning_rate": [1.00000000e-05, 1.00000000e-04, 1.00000000e-03, 1.00000000e-02, 1.00000000e-01, 1.00000000e+00],
"max_depth": [0, 3, 7, 13],# 0 indica nessun limite
"subsample": [0.5, 0.6, 0.7, 0.8, 0.9]
}
name = "precoci_tardivi"
n_job = -1
a = csf.classification(gbc, param_grid_gb, training_pt, tr_target.tolist(), test_pt, ts_target.tolist(), name, valid=True, n_job=n_job)
name = "precoci"
a = csf.classification(gbc, param_grid_gb, training_p, tr_target.tolist(), test_p, ts_target.tolist(), name, valid=True, n_job=n_job)
| StarcoderdataPython |
6495452 | <reponame>gretelai/safe-location-density
from typing import List
import requests
import pandas as pd
GBFS_FEEDS = [
"https://mds.bird.co/gbfs/v2/public/los-angeles/free_bike_status.json",
"https://s3.amazonaws.com/lyft-lastmile-production-iad/lbs/lax/free_bike_status.json", # noqa
"https://gbfs.spin.pm/api/gbfs/v2_2/los_angeles/free_bike_status"
]
def free_bike_status_to_df(feeds: List[str] = None) -> pd.DataFrame:
out_df = pd.DataFrame()
if feeds is None:
feeds = GBFS_FEEDS
for feed in feeds:
try:
resp = requests.get(feed)
except Exception as err:
print(f"Error on connect: {str(err)} for feed {feed}")
if resp.status_code != 200:
print(f"Got non-200 for URL {feed}, got: {resp.text}")
continue
bikes_list = resp.json().get("data", {}).get("bikes", None)
if bikes_list is None:
print(f"Could not extract bike list for: {feed}")
continue
_df = pd.DataFrame(bikes_list)
out_df = pd.concat([out_df, _df])
out_df.reset_index()
return out_df
| StarcoderdataPython |
3392606 | from enum import Enum
class CardType(Enum):
NONE = -1
DEV = 0
AGILE_COACH = 1
HR = 2
PM = 3
ACTION = 4
KNOWLEDGE = 5
| StarcoderdataPython |
4876109 | #!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import requests
from ansible.module_utils.message import MessageModule
DOCUMENTATION = '''
---
module: lb_load_monitoring
short_description: Load balancer response times.
version_added: "1.0.0"
author: "<NAME> (@anton-sidelnikov)"
description:
- Get metrics from load balancer hosts and push it to unix socket server
- APIMON_PROFILER_MESSAGE_SOCKET environment variable should be set.
options:
target_address:
description: IP address of target load balancer.
type: str
required: true
timeout:
description: Request timeout value.
type: int
default: 20
protocol:
description: Load balancer protocol.
type: str
default: http
protocol_port:
description: Load balancer listener port.
type: int
default: 80
request_count:
description: Count of requests.
type: int
default: 30
interface:
description: Public or internal address.
type: str
default: public
choices=['public', 'internal']
listener_type:
description: Type of the listener to be checked.
type: str
default: http
choices=['http', 'https', 'tcp']
requirements: []
'''
RETURN = '''
pushed_metrics:
description: List of metrics to be pushed into socket
type: complex
returned: On Success
contains:
name:
description: Name of metric.
type: str
sample: "mname"
environment:
description: From which environment run.
type: str
sample: "production_eu-de"
zone:
description: In which zone loadbalancer is deployed.
type: str
sample: "production_eu-de"
timestamp:
description: Current timestamp.
type: str
sample: "2021-02-15T08:57:23.701273"
metric_type:
description: Type of gathered value ('ms' for milliseconds).
type: str
sample: "ms"
value:
description: Response time in milliseconds
type: int
sample: 7
az:
description: AZ of responded server
type: str
sample: "eu-de-02"
__type:
description: Message type('metric' is default value).
type: str
sample: "metric"
'''
EXAMPLES = '''
# Get list of floating IPs statuses (all parameters are specified)
- lb_load_monitoring:
lb_ip: "172.16.58.3"
register: out
'''
SUCCESS_METRIC = 'csm_lb_timings'
TIMEOUT_METRIC = 'csm_lb_timeout'
class LbLoadMonitoring(MessageModule):
argument_spec = dict(
target_address=dict(type='str', required=True),
timeout=dict(type='int', default=20),
protocol=dict(type='str', default='http'),
request_count=dict(type='int', default=30),
protocol_port=dict(type='int', default=80),
interface=dict(type='str', default='public', choices=['public', 'internal']),
listener_type=dict(type='str', default='http', choices=['http', 'https', 'tcp'])
)
def run(self):
metrics = []
verify = True
timeout = self.params['timeout']
interface = self.params['interface']
listener_type = self.params['listener_type']
address = f"{self.params['protocol']}://{self.params['target_address']}" \
f":{self.params['protocol_port']}"
if self.params['protocol'] == 'https':
verify = False
for _ in range(self.params['request_count']):
try:
res = requests.get(
address, headers={'Connection': 'close'}, verify=verify, timeout=timeout
)
metrics.append(self.create_metric(
name=f'{SUCCESS_METRIC}.{interface}.{listener_type}',
value=int(res.elapsed.total_seconds() * 1000),
metric_type='ms',
az=re.search(r'eu-de-\d+', res.headers['Backend-Server']).group()
))
except requests.Timeout:
self.log('timeout sending request to LB')
metrics.append(self.create_metric(
name=f'{TIMEOUT_METRIC}.{interface}.{listener_type}.failed',
value=1,
metric_type='c',
az='default')
)
if self.params['socket']:
for metric in metrics:
self.push_metric(metric, self.params['socket'])
self.exit(changed=True, pushed_metrics=metrics)
self.fail_json(msg='socket must be set')
def main():
module = LbLoadMonitoring()
module()
if __name__ == '__main__':
main()
| StarcoderdataPython |
24425 | from __future__ import print_function
import os
import re
def openFile(f, m='r'):
if (os.path.exists(f)):
return open(f, m)
else:
return open('../' + f, m)
demo_test = ' '.join(openFile('mockito_test/demo_test.py').readlines())
demo_test = demo_test.split('#DELIMINATOR')[1]
readme_before = ''.join(openFile('README').readlines())
token = 'Basic usage:'
readme_after = re.compile(token + '.*', re.S).sub(token + '\n' + demo_test, readme_before)
if (readme_before != readme_after):
readme_file = openFile('README', 'w')
readme_file.write(readme_after)
print("README updated")
else:
print("README update not required")
| StarcoderdataPython |
8001482 | <filename>day-01.py
def fuel_requirement(mass):
fuel = mass // 3 - 2
return fuel if fuel>0 else 0
# Part 1
with open("input-01.txt") as f:
total_fuel_requirement = sum(fuel_requirement(int(mass)) for mass in f)
print(total_fuel_requirement)
# Part 2
def rec_fuel_requirement(mass):
if mass <= 0:
return 0
else:
fuel = fuel_requirement(mass)
return fuel + rec_fuel_requirement(fuel)
with open("input-01.txt") as f:
total_rec_fuel_requirement = sum(rec_fuel_requirement(int(mass)) for mass in f)
print(total_rec_fuel_requirement)
| StarcoderdataPython |
8064875 | <filename>src/pymodaq_plugins_newport/daq_move_plugins/daq_move_Newport_AgilisSerial.py
from pymodaq.daq_move.utility_classes import DAQ_Move_base, comon_parameters, main
from pymodaq.daq_utils.daq_utils import ThreadCommand, getLineInfo, set_logger, get_module_name
from easydict import EasyDict as edict
from pymodaq_plugins_newport.hardware.agilis_serial import AgilisSerial, COMPORTS
logger = set_logger(get_module_name(__file__))
class DAQ_Move_Newport_AgilisSerial(DAQ_Move_base):
"""
"""
_controller_units = 'step'
is_multiaxes = True
channel_names = AgilisSerial.channel_indexes
axis_names = AgilisSerial.axis_indexes
port = 'COM9' if 'COM9' in COMPORTS else COMPORTS[0] if len(COMPORTS) > 0 else ''
params = [
{'title': 'COM Port:', 'name': 'com_port', 'type': 'list', 'limits': COMPORTS, 'value': port},
{'title': 'Firmware:', 'name': 'firmware', 'type': 'str', 'value': ''},
{'title': 'Channel:', 'name': 'channel', 'type': 'list', 'limits': channel_names},
{'title': 'Axis:', 'name': 'axis', 'type': 'list', 'limits': axis_names},
{'title': 'Sleep time (s):', 'name': 'sleep_time', 'type': 'float', 'value': 0.25},
{'title': 'MultiAxes:', 'name': 'multiaxes', 'type': 'group', 'visible': is_multiaxes, 'children': [
{'title': 'is Multiaxes:', 'name': 'ismultiaxes','type': 'bool', 'value': is_multiaxes},
{'title': 'Status:', 'name': 'multi_status', 'type': 'list', 'limits': ['Master', 'Slave']},
]}
] + comon_parameters
def __init__(self, parent=None, params_state=None):
"""
Initialize the class.
"""
super().__init__(parent, params_state)
self.controller = None
self.current_position = 0
self.target_position = 0
def ini_stage(self, controller=None):
"""
Actuator communication initialization
Parameters
----------
controller: (object) custom object of a PyMoDAQ plugin (Slave case).
None if only one actuator by controller (Master case)
Returns
-------
self.status (edict): with initialization status: three fields:
* info (str)
* controller (object) initialized controller
* initialized: (bool): False if initialization failed otherwise True
"""
try:
self.status.update(edict(info="", controller=None, initialized=False))
if self.settings.child('multiaxes', 'ismultiaxes').value()\
and self.settings.child('multiaxes',
'multi_status').value() == "Slave":
if controller is None:
raise Exception('no controller has been defined externally while'
'this axe is a slave one')
else:
self.controller = controller
else: # Master stage
self.controller = AgilisSerial()
info = self.controller.init_com_remote(self.settings.child('com_port').value())
if self.controller.get_channel() != self.settings.child('channel').value():
self.controller.select_channel(self.settings.child('channel').value())
self.settings.child('firmware').setValue(info)
self.status.info = info
self.status.controller = self.controller
self.status.initialized = True
return self.status
except Exception as e:
self.emit_status(
ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))
self.status.info = getLineInfo() + str(e)
self.status.initialized = False
return self.status
def check_position(self):
"""
Get the current position from the hardware with scaling conversion.
Returns
-------
float: The position obtained after scaling conversion.
"""
return self.controller.get_step_counter(self.settings.child('axis').value())
def move_Abs(self, position):
"""
Move the actuator to the absolute target defined by position.
Parameters
----------
position: (flaot) value of the absolute target positioning
"""
position = self.check_bound(position)
rel_position = position - self.current_position
self.move_Rel(rel_position)
def move_Rel(self, relative_move):
"""
Move the actuator to the relative target actuator value defined by
relative_move
Parameters
----------
relative_move: (float) value of the relative distance to travel in
number of steps. It has to be converted to int since here the unit is in
number of steps.
"""
relative_move = self.check_bound(self.current_position + relative_move) - self.current_position
relative_move = self.set_position_relative_with_scaling(relative_move)
self.target_position = relative_move + self.current_position
self.controller.move_rel(self.settings.child('axis').value(), int(relative_move))
def move_Home(self):
"""
"""
self.controller.counter_to_zero(self.settings.child('axis').value())
self.current_position = 0.
self.target_position = 0.
def stop_motion(self):
"""
Stop an ongoing move.
Not implemented.
"""
self.controller.stop(self.settings.child('axis').value())
def commit_settings(self, param):
"""
Called after a param_tree_changed signal from DAQ_Move_main.
"""
if param.name() == 'channel':
self.controller.select_channel(param.value())
param.setValue(int(self.controller.get_channel()))
def close(self):
"""
Terminate the communication protocol.
"""
self.controller.close()
if __name__ == '__main__':
main(__file__, False)
| StarcoderdataPython |
3556844 | from singlecellmultiomics.utils.sequtils import reverse_complement
from singlecellmultiomics.fragment import Fragment
class ScarTraceFragment(Fragment):
"""
Fragment definition for ScarTrace
"""
def __init__(self, reads,scartrace_r1_primers=None, **kwargs):
Fragment.__init__(self, reads, **kwargs)
self.scartrace_r1_primers = scartrace_r1_primers
assert scartrace_r1_primers is not None, 'please supply primer sequences'
# remove the set_umi function
def set_umi(self, **kwargs):
pass
def is_valid(self):
""" Check if R1 starts with the defined primers and if R2 and R1 are mapped
Returns:
bool
"""
if not self.has_R1() or not self.has_R2():
if not self.has_R1():
self.set_meta('fr','no_R1', as_set=True)
if not self.has_R2():
self.set_meta('fr','no_R2', as_set=True)
return False
if self[0].is_unmapped or self[1].is_unmapped:
self.set_meta('fr','unmapped_mate', as_set=True)
return False
r1_seq = self.get_R1().seq
if self.get_R1().is_reverse:
r1_seq = reverse_complement(r1_seq)
if any( r1_seq.startswith(primer) for primer in self.scartrace_r1_primers ):
# Good
return True
self.set_meta('fr','primer_not_matching', as_set=True)
# Bad
return False
# Replace the equality function
def __eq__(self, other): # other can also be a Molecule!
# Make sure fragments map to the same strand, cheap comparisons
if self.sample != other.sample:
return False
if self.strand != other.strand:
return False
if min(abs(self.span[1] -
other.span[1]), abs(self.span[2] -
other.span[2])) > self.assignment_radius:
return False
# Sample matches and starting position is within the defined span
# radius
return True
| StarcoderdataPython |
11384354 | <reponame>simondolle/dgim
import unittest
import itertools
from collections import deque
from dgim import Dgim
from dgim.utils import generate_random_stream
class ExactAlgorithm(object):
"""Exact algorithm to count the number of "True"
in the last N elements of a boolean stream."""
def __init__(self, N):
"""Constructor
:param N: size of the sliding window
:type N: int
"""
self.N = N
self.sliding_window = deque()
def update(self, elt):
"""Update the stream with one element.
:param elt: the latest element of the stream
:type elt: bool
"""
self.sliding_window.append(elt)
if len(self.sliding_window) > self.N:
self.sliding_window.popleft()
def get_count(self):
"""Returns an estimate of the number of "True"
in the last N elements of the stream.
:returns: int
"""
return sum(self.sliding_window)
class TestDgimQuality(unittest.TestCase):
def check_quality_settings(self, dgim, stream):
"""Compare the result "e" returned by dgim with the exact result
"c" on a stream which elements are 0 or 1.
The test fails if the dgim result "e" is not in the expected bounds.
0.5 * c <= e <= 1.5 * c
:param dgim: the Dgim instance to test
:type dgim: Dgim
:param stream: the stream to use. It should contains only 0 or 1 as elements.
:type stream: iterator
"""
exact_algorithm = ExactAlgorithm(dgim.N)
for elt in stream:
dgim.update(elt)
exact_algorithm.update(elt)
exact_result = exact_algorithm.get_count()
error = abs(dgim.get_count() - exact_result)
self.assertTrue(error <= dgim.error_rate * exact_result)
def test_nominal_case(self):
dgim = Dgim(N=100, error_rate=0.5)
stream = generate_random_stream(length=10000)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_large_N(self):
dgim = Dgim(N=10000, error_rate=0.5)
stream = generate_random_stream(length=2000)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_short_stream(self):
dgim = Dgim(N=1000, error_rate=0.5)
# stream is shorter than N
stream = generate_random_stream(length=100)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_N_is_one(self):
dgim = Dgim(N=1, error_rate=0.5)
stream = generate_random_stream(length=10)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_N_is_two(self):
dgim = Dgim(N=2, error_rate=0.5)
stream = generate_random_stream(length=100)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_low_error_rate_case(self):
dgim = Dgim(N=100, error_rate=0.01)
stream = generate_random_stream(length=1000)
self.check_quality_settings(dgim=dgim, stream=stream)
def test_only_true_case(self):
dgim = Dgim(N=100, error_rate=0.5)
stream = itertools.repeat(True, 10000)
self.check_quality_settings(dgim=dgim, stream=stream)
| StarcoderdataPython |
6663776 | <gh_stars>0
# -*- coding: utf-8 -*-
import os
import approvaltests
from approvaltests.reporters import PythonNativeReporter
from pytest_approvaltests import get_reporter, clean, pytest_configure
def test_approvaltests_use_reporter(testdir):
# create a temporary pytest test module with a failing approval test
testdir.makepyfile("""
from approvaltests import verify
def test_sth():
verify("foo")
""")
# run pytest with approvaltests configuration to use the PythonNative diff tool
result = testdir.runpytest(
"--approvaltests-use-reporter='PythonNative'",
'-v'
)
# assert the test fails
# and these lines 'to approve this result' are produced by the PythonNative reporter
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
'*to approve this result:*'
])
def test_approvaltests_add_reporter(testdir, tmpdir):
# create a temporary pytest test module with a failing approval test
testdir.makepyfile("""
from approvaltests import verify
def test_sth():
verify("foo")
""")
# create a diff tool that just prints 'diff program is executing'
tmpdir = os.path.join(str(tmpdir), "path with spaces")
diff_program_contents = "print('diff program is executing')"
diff_tool = os.path.join(str(tmpdir), "diff.py")
os.makedirs(tmpdir)
with open(diff_tool, "w") as f:
f.write(diff_program_contents)
# run pytest with configuration for custom diff tool
result = testdir.runpytest(
'--approvaltests-add-reporter=python',
'--approvaltests-add-reporter-args=' + str(diff_tool),
'-v'
)
# assert that the diff program did execute on the test failure
result.stdout.fnmatch_lines([
'*::test_sth FAILED*',
'*GenericDiffReporter*'
])
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# assert the help text includes information about the
# approvaltests options
result.stdout.fnmatch_lines([
'approval testing:',
'*--approvaltests-use-reporter=*',
'*--approvaltests-add-reporter=*',
'*--approvaltests-add-reporter-args=*',
])
def test_difftool_path_with_spaces(testdir):
from pytest_approvaltests import create_reporter
from approvaltests.reporters import GenericDiffReporterFactory
factory = GenericDiffReporterFactory()
reporter = create_reporter(factory, "/path with spaces/to/difftool", [])
assert reporter.path == "/path with spaces/to/difftool"
def test_python_native_reporter():
assert type(get_reporter(None, None, clean("PythonNative"))) == PythonNativeReporter
assert type(get_reporter(None, None, clean("'PythonNative'"))) == PythonNativeReporter
assert type(get_reporter(None, None, clean('"PythonNative"'))) == PythonNativeReporter
def test_command_line():
def create_config(custom_reporter, custom_reporter_arg, reporter_name):
class config:
class option:
approvaltests_custom_reporter = custom_reporter
approvaltests_custom_reporter_args = custom_reporter_arg
approvaltests_reporter = reporter_name
return config
config=create_config(None, None, "'PythonNative'")
pytest_configure(config)
assert type(approvaltests.get_default_reporter()) == PythonNativeReporter
| StarcoderdataPython |
4879740 | # Generated by Django 2.2.5 on 2019-10-07 17:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0021_auto_20191007_1739'),
]
operations = [
migrations.AlterField(
model_name='order',
name='deliveredOn',
field=models.DateTimeField(blank=True, default=None, null=True),
),
]
| StarcoderdataPython |
4882860 | # [Shaolin Temple] Investigate the Sutra Repository
BOOK_OF_DEMONS = 4034637
WISE_CHIEF_PRIEST = 9310053
sm.removeEscapeButton()
sm.setSpeakerID(WISE_CHIEF_PRIEST)
sm.setBoxChat()
sm.sendNext("Oh! The #bBook of Demons#k! You didn't... You didn't open the book, did you?")
sm.flipBoxChat()
sm.flipBoxChatPlayerAsSpeaker()
sm.sendNext("I didn't have time. I was too busy rushing back here so we can save the world!")
sm.setSpeakerID(WISE_CHIEF_PRIEST)
sm.setBoxChat()
sm.sendNext("Wonderful. You did beautifully. Now, off with you. Rest Up. You've earned it. I have things do.")
sm.completeQuest(parentID)
sm.giveExp(3030679)
sm.consumeItem(BOOK_OF_DEMONS)
sm.flipBoxChat()
sm.flipBoxChatPlayerAsSpeaker()
sm.sendNext("(Whoa. the #b#p"+ str(WISE_CHIEF_PRIEST) +"##k rushed out of here. That book must be filled with really great info. His eyes lit up when he opened it.")
sm.sendNext("(Well, another job well done. Time to say goodbye to my favourite demon trio back at the #rSutra Depository Secret Library#k.)")
sm.chatScript("Say farewell to the demons at the Sutra Depository Secret Library") | StarcoderdataPython |
1616709 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 29 13:15:30 2021
http://www.kitconet.com/
@author: haoli
"""
commodityDir = "./data"
commodityShanghaiDir = commodityDir + "/Shanghai" #"./data/LME"
commodityShanghai_dataDir = commodityShanghaiDir + "/temp" #"./data/LME/temp"
commodityShanghaiDir_OIVolPrice = commodityDir + '/ShanghaitempOpenInterestVolumePrice'
commodityShanghai_dataDir_OIVolPrice = commodityShanghaiDir_OIVolPrice + '/temp'
# SHStock_url_weekly = "http://www.shfe.com.cn/statements/dataview.html?paramid=weeklystock"
SHStock_url_weekly = 'http://www.shfe.com.cn/en/MarketData/dataview.html?paramid=week'
SHStockfileName_weekly = "ShanghaiStock_weekly.csv"
SH_openInterest_Vol_fileName_weekly = "Shanghai_OpenInterest_Volumn_weekly.csv"
#----not used
SHCopper_url_daily = 'http://www.shfe.com.cn/statements/dataview.html?paramid=delaymarket_cu'
SHCOPPERfileName = "SH_copper.csv"
SHcrudeOil_url_daily = 'http://www.shfe.com.cn/statements/dataview.html?paramid=delaymarket_sc'
SHcrudeOilfileName = "SH_crudeOil.csv"
SHgold_url_daily = 'http://www.shfe.com.cn/statements/dataview.html?paramid=delaymarket_cu'
SHCOPPERfileName = "SH_copper.csv"
#not used end
cuKey = 'CuFileName'
cuBCKey = 'CuBCFileName'
alKey= 'ALFileName'
znKey = 'ZnFileName'
niKey = 'NiFileName'
pbKey = 'LeadFileName'
tinKey = 'TinFileName'
goldKey = 'GoldFileName'
silverKey = 'SilverFileName'
SH_stockFileName_disct = {
cuKey : "SH_copperStock.csv",
cuBCKey : "SH_copperBCStock.csv",
alKey : "SH_ALStock.csv",
znKey : "SH_zincStock.csv",
niKey : "SH_nickelStock.csv",
pbKey : "SH_leadStock.csv",
tinKey : "SH_tinStock.csv",
goldKey : "SH_goldStock.csv",
silverKey : "SH_silverStock.csv"
}
SHFE_StockCol1 = 'On Warrant'
SHFE_StockCol2 = 'Deliverable'
SH_openInterestVolumn_FileName_disct = {
cuKey : "SH_copperOpenInterestVolume.csv",
cuBCKey : "SH_copperBCOpenInterestVolume.csv",
alKey : "SH_ALOpenInterestVolume.csv",
znKey : "SH_zincOpenInterestVolume.csv",
niKey : "SH_nickelOpenInterestVolume.csv",
pbKey : "SH_leadOpenInterestVolume.csv",
tinKey : "SH_tinOpenInterestVolume.csv",
goldKey : "SH_goldOpenInterestVolume.csv",
silverKey : "SH_silverOpenInterestVolume.csv",
}
# SH_openInterestVolumn_FileName_disct = {
# cuKey : "SH_copperOpenInterestVolunm.csv",
# cuBCKey : "SH_copperBCOpenInterestVolunm.csv",
# alKey : "SH_ALOpenInterestVolunm.csv",
# znKey : "SH_zincOpenInterestVolunm.csv",
# niKey : "SH_nickelOpenInterestVolunm.csv",
# pbKey : "SH_leadOpenInterestVolunm.csv",
# tinKey : "SH_tinOpenInterestVolunm.csv",
# goldKey : "SH_goldOpenInterestVolunm.csv",
# silverKey : "SH_silverOpenInterestVolunm.csv",
# }
SHFE_OIVolCol1 = 'OpenInterest'
# SHFE_OIVolCol2 = 'Volumn'
SHFE_OIVolCol2 = 'Volume'
SHFE_OIVolCol3 = 'TurnOver' #total money exchanged
# SH_stockFileName_disct = {
# 'CuFileName' : "SH_copperStock.csv",
# 'ALFileName' : "SH_ALStock.csv",
# 'ZnFileName' : "SH_zincStock.csv",
# 'NiFileName' : "SH_nickelStock.csv",
# 'LeadFileName' : "SH_leadStock.csv",
# 'TinFileName' : "SH_tinStock.csv",
# 'GoldFileName' : "LME_goldStock.csv",
# 'SilverFileName' : "LME_silverStock.csv"
# }
#Deliverable = 4 On Warrant = 5 Stock = 4-5,
#注册了仓单的货物就是期货库存,没有注册仓单的 就是现货库存
| StarcoderdataPython |
5037200 | import os, sys; sys.path.insert(0, os.path.join("..", ".."))
from pattern.vector import Document, Corpus
# Latent Semantic Analysis (LSA) is a statistical machine learning method
# based on a matrix calculation called "singular value decomposition" (SVD).
# It discovers semantically related words across documents.
# It groups these into different "concepts"
# and creates a "concept vector" instead of a word vector for each document.
# This reduces the amount of data to work with (for example when clustering),
# and filters out noise, so that semantically related words come out stronger.
D1 = Document("The dog wags his tail.", threshold=0, name="dog")
D2 = Document("Curiosity killed the cat.", threshold=0, name="cat")
D3 = Document("Cats and dogs make good pets.", threshold=0, name="pet")
D4 = Document("Curiosity drives science.", threshold=0, name="science")
corpus = Corpus([D1,D2,D3,D4])
print corpus.search("curiosity")
print
corpus.reduce()
# A search on the reduced concept space also yields D3 ("pet") as a result,
# since D2 and D2 are slightly similar even though D3 does not explicitly contain "curiosity".
# Note how the results also yield stronger similarity scores (noise was filtered out).
print corpus.search("curiosity")
print
# The concept vector for document D1:
#print corpus.lsa.vectors[D1.id]
#print
# The word scores for each concept:
#print corpus.lsa.concepts | StarcoderdataPython |
1667207 | <reponame>andela-cnnadi/python-fire
from functools import wraps
def validate_payload(func):
@wraps(func)
def func_wrapper(instance, payload):
native_types = [bool, int, float, str, list, tuple, dict]
if type(payload) not in native_types:
raise ValueError("Invalid payload specified")
return func(instance, payload)
return func_wrapper
def parse_results(func):
@wraps(func)
def func_wrapper(instance, *args, **kwargs):
results = func(instance, *args, **kwargs)
if results.status_code == 200:
return results.json()
else:
results.raise_for_status()
return func_wrapper
| StarcoderdataPython |
3436284 | <reponame>ahmeddeladly/arch
"""
Simulation of ADF z-test critical values. Closely follows MacKinnon (2010).
Running this files requires an IPython cluster, which is assumed to be
on the local machine. This can be started using a command similar to
ipcluster start -n 4
Remote clusters can be used by modifying the Client initiation.
This version has been optimized for execution on a large cluster and should
scale well with 128 or more engines.
"""
from __future__ import annotations
import datetime
import time
from typing import cast
from ipyparallel import Client, DirectView
from numpy import array, nan, ndarray, percentile, savez
from arch.typing import UnitRootTrend
from .adf_simulation import adf_simulation
# Time in seconds to sleep before checking if ready
SLEEP = 10
# Number of repetitions
EX_NUM = 500
# Number of simulations per exercise
EX_SIZE = 200000
# Approximately controls memory use, in MiB
MAX_MEMORY_SIZE = 100
rc = Client()
dview = rc.direct_view()
with dview.sync_imports():
from numpy import arange, zeros
from numpy.random import RandomState
def clear_cache(client: Client, view: DirectView) -> None:
"""Cache-clearing function from mailing list"""
assert not rc.outstanding, "don't clear history when tasks are outstanding"
client.purge_results("all") # clears controller
client.results.clear()
client.metadata.clear()
view.results.clear()
client.history = []
view.history = []
client.session.digest_history.clear()
def wrapper(n: int, trend: UnitRootTrend, b: int, rng_seed: int = 0) -> ndarray:
"""
Wraps and blocks the main simulation so that the maximum amount of memory
can be controlled on multi processor systems when executing in parallel
"""
rng = RandomState()
rng.seed(rng_seed)
remaining = b
res = zeros(b)
finished = 0
block_size = int(2**20.0 * MAX_MEMORY_SIZE / (8.0 * n))
for _ in range(0, b, block_size):
if block_size < remaining:
count = block_size
else:
count = remaining
st = finished
en = finished + count
res[st:en] = adf_simulation(n, trend, count, rng)
finished += count
remaining -= count
return res
# Push variables and functions to all engines
dview.execute("import numpy as np")
dview["MAX_MEMORY_SIZE"] = MAX_MEMORY_SIZE
dview["wrapper"] = wrapper
dview["adf_simulation"] = adf_simulation
lview = rc.load_balanced_view()
trends = ("n", "c", "ct", "ctt")
T = array(
(
20,
25,
30,
35,
40,
45,
50,
60,
70,
80,
90,
100,
120,
140,
160,
180,
200,
250,
300,
350,
400,
450,
500,
600,
700,
800,
900,
1000,
1200,
1400,
2000,
)
)
T = T[::-1]
m = T.shape[0]
percentiles = list(arange(0.5, 100.0, 0.5))
rng = RandomState(0)
seeds = list(rng.random_integers(0, 2**31 - 2, size=EX_NUM))
for tr in trends:
results = cast(ndarray, zeros((len(percentiles), m, EX_NUM)) * nan)
filename = "adf_z_" + tr + ".npz"
for i, t in enumerate(T):
print("Time series length {0} for Trend {1}".format(t, tr))
now = datetime.datetime.now()
# Serial version
# args = ([t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds)
# out = [ wrapper(a, b, c, d) for a, b, c, d in zip(*args)]
# Parallel version
res = lview.map_async(
wrapper, [t] * EX_NUM, [tr] * EX_NUM, [EX_SIZE] * EX_NUM, seeds
)
sleep_count = 0
while not res.ready():
sleep_count += 1
elapsed = datetime.datetime.now() - now
if sleep_count % 10:
print("Elapsed time {0}, waiting for results".format(elapsed))
time.sleep(SLEEP)
out = res.get()
# Prevent unnecessary results from accumulating
clear_cache(rc, lview)
elapsed = datetime.datetime.now() - now
print("Total time {0} for T={1}".format(elapsed, t))
quantiles = [percentile(x, percentiles) for x in out]
results[:, i, :] = cast(ndarray, array(quantiles).T)
savez(filename, trend=tr, results=results, percentiles=percentiles, T=T)
| StarcoderdataPython |
8054377 | <gh_stars>1-10
__author__ = "konwar.m"
__copyright__ = "Copyright 2022, AI R&D"
__credits__ = ["konwar.m"]
__license__ = "Individual Ownership"
__version__ = "1.0.1"
__maintainer__ = "konwar.m"
__email__ = "<EMAIL>"
__status__ = "Development"
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
"""
Start by implementing the basic blocks i.e., the convolutional block and
the multi-layer perceptron block.
"""
def conv_block(x: tf.Tensor, filters: int, name: str) -> tf.Tensor:
x = layers.Conv1D(filters, kernel_size=1, padding="valid", name=f"{name}_conv")(x)
x = layers.BatchNormalization(momentum=0.0, name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)
def mlp_block(x: tf.Tensor, filters: int, name: str) -> tf.Tensor:
x = layers.Dense(filters, name=f"{name}_dense")(x)
x = layers.BatchNormalization(momentum=0.0, name=f"{name}_batch_norm")(x)
return layers.Activation("relu", name=f"{name}_relu")(x)
"""
Implement a regularizer (taken from this example) to enforce orthogonality
in the feature space. This is needed to ensure that the magnitudes of the transformed
features do not vary too much.
"""
class OrthogonalRegularizer(keras.regularizers.Regularizer):
"""Reference: https://keras.io/examples/vision/pointnet/#build-a-model"""
def __init__(self, num_features, l2reg=0.001):
self.num_features = num_features
self.l2reg = l2reg
self.identity = tf.eye(num_features)
def __call__(self, x):
x = tf.reshape(x, (-1, self.num_features, self.num_features))
xxt = tf.tensordot(x, x, axes=(2, 2))
xxt = tf.reshape(xxt, (-1, self.num_features, self.num_features))
return tf.reduce_sum(self.l2reg * tf.square(xxt - self.identity))
def get_config(self):
config = super(TransformerEncoder, self).get_config()
config.update({"num_features": self.num_features, "l2reg_strength": self.l2reg})
return config
def transformation_net(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor:
"""
Reference: https://keras.io/examples/vision/pointnet/#build-a-model.
The `filters` values come from the original paper:
https://arxiv.org/abs/1612.00593.
"""
x = conv_block(inputs, filters=64, name=f"{name}_1")
x = conv_block(x, filters=128, name=f"{name}_2")
x = conv_block(x, filters=1024, name=f"{name}_3")
x = layers.GlobalMaxPooling1D()(x)
x = mlp_block(x, filters=512, name=f"{name}_1_1")
x = mlp_block(x, filters=256, name=f"{name}_2_1")
return layers.Dense(
num_features * num_features,
kernel_initializer="zeros",
bias_initializer=keras.initializers.Constant(np.eye(num_features).flatten()),
activity_regularizer=OrthogonalRegularizer(num_features),
name=f"{name}_final",
)(x)
def transformation_block(inputs: tf.Tensor, num_features: int, name: str) -> tf.Tensor:
transformed_features = transformation_net(inputs, num_features, name=name)
transformed_features = layers.Reshape((num_features, num_features))(
transformed_features
)
return layers.Dot(axes=(2, 1), name=f"{name}_mm")([inputs, transformed_features])
def get_shape_segmentation_model(num_points: int, num_classes: int) -> keras.Model:
input_points = keras.Input(shape=(None, 3))
# PointNet Classification Network.
transformed_inputs = transformation_block(
input_points, num_features=3, name="input_transformation_block"
)
features_64 = conv_block(transformed_inputs, filters=64, name="features_64")
features_128_1 = conv_block(features_64, filters=128, name="features_128_1")
features_128_2 = conv_block(features_128_1, filters=128, name="features_128_2")
transformed_features = transformation_block(
features_128_2, num_features=128, name="transformed_features"
)
features_512 = conv_block(transformed_features, filters=512, name="features_512")
features_2048 = conv_block(features_512, filters=2048, name="pre_maxpool_block")
global_features = layers.MaxPool1D(pool_size=num_points, name="global_features")(
features_2048
)
global_features = tf.tile(global_features, [1, num_points, 1])
# Segmentation head.
segmentation_input = layers.Concatenate(name="segmentation_input")(
[
features_64,
features_128_1,
features_128_2,
transformed_features,
features_512,
global_features,
]
)
segmentation_features = conv_block(
segmentation_input, filters=128, name="segmentation_features"
)
outputs = layers.Conv1D(
num_classes, kernel_size=1, activation="softmax", name="segmentation_head"
)(segmentation_features)
return keras.Model(input_points, outputs)
def generate_pointnet_segmentation_model(num_points=1024, num_classes=5):
try:
segmentation_model = get_shape_segmentation_model(num_points, num_classes)
segmentation_model.summary()
return segmentation_model
except Exception as ex:
print('Caught Exception while Generating Pointnet Segmentation Model Architecture: %s' %(str(ex)))
"""
For the training the authors of pointnet segmnetation recommend using a learning rate schedule
that decays the initial learning rate by half every 20 epochs. For this experiment, we use to
change it every 15 epochs.
"""
def generate_pointnet_segmentation_lr_schedule(**kwargs):
total_training_examples=kwargs.get('total_training_examples')
BATCH_SIZE=kwargs.get('BATCH_SIZE')
EPOCHS=kwargs.get('EPOCHS')
INITIAL_LR=kwargs.get('INITIAL_LR')
training_step_size = total_training_examples // BATCH_SIZE
total_training_steps = training_step_size * EPOCHS
print(f"Total training steps: {total_training_steps}.")
lr_schedule = keras.optimizers.schedules.PiecewiseConstantDecay(
boundaries=[training_step_size * 15, training_step_size * 15],
values=[INITIAL_LR, INITIAL_LR * 0.5, INITIAL_LR * 0.25],
)
return lr_schedule
def train_pointnet_segmenter(model=None, train_dataset=None, test_dataset=None, model_history_logger=None, lr_schedule=None, epochs=None):
try:
if model is not None and train_dataset is not None and test_dataset is not None and model_history_logger is not None \
and lr_schedule is not None and epochs is not None:
model.compile(
loss=keras.losses.CategoricalCrossentropy(),
optimizer=keras.optimizers.Adam(learning_rate=lr_schedule),
metrics=["accuracy"],
)
checkpoint_filepath = r"modelling_pipeline\models\checkpoint"
checkpoint_callback = keras.callbacks.ModelCheckpoint(
checkpoint_filepath,
monitor="val_loss",
save_best_only=True,
save_weights_only=True,
)
model.fit(train_dataset,
epochs=epochs,
validation_data=test_dataset,
callbacks=[model_history_logger, checkpoint_callback])
model.load_weights(checkpoint_filepath)
return model
except Exception as ex:
print('Caught Exception while training the Segmentation Pointnet Model: %s' %(str(ex))) | StarcoderdataPython |
295304 | import numpy as NP
from scipy import signal
from scipy import interpolate
import mathops as OPS
import lookup_operations as LKP
#################################################################################
def unwrap_FFT2D(arg, **kwarg):
return NP.fft.fft2(*arg, **kwarg)
def unwrap_IFFT2D(arg, **kwarg):
return NP.fft.ifft2(*arg, **kwarg)
#################################################################################
def FT1D(inp, ax=-1, use_real=False, shift=False, inverse=False, verbose=True):
"""
-----------------------------------------------------------------------------
Compute FFT/IFFT using Numpy.
Inputs:
inp: Input data (vector or array) to be Fourier transformed
Keyword Inputs:
ax: Axis (scalar integer) over which FFT is performed. Default = -1
(last axis)
use_real: [Boolean scalar] If True, compute only the positive frequency
components using the real part of the data
shift: [Boolean] If True, shift the result to make the zeroth component
move to the center. Default=False
inverse: [olean] If True, compute the inverse FFT. If False, compute the
FFT. Default=False
oututs:
fftout: FFT/IFFT of input data over the specified axes
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('inp not defined. Aborting FT1D().')
if not isinstance(inp, NP.ndarray): # type(inp) is numpy.ndarray
raise TypeError('Input array should be Numpy array data type')
if inverse:
if use_real:
inp = NP.real(inp)
if verbose:
print "Opted for IFFT of real data. Hence performing numpy.irfft()."
print "numpy.irfft() returns only positive frequencies."
fftout = NP.fft.irfft(inp, axis=ax)
else:
fftout = NP.fft.ifft(inp, axis=ax)
if shift:
fftout = NP.fft.ifftshift(fftout, axes=ax)
else:
if use_real:
inp = NP.real(inp)
if verbose:
print "Opted for FFT of real data. Hence performing numpy.rfft()."
print "numpy.rfft() returns only positive frequencies."
fftout = NP.fft.rfft(inp, axis=ax)
else:
fftout = NP.fft.fft(inp, axis=ax)
if shift:
fftout = NP.fft.fftshift(fftout, axes=ax)
return fftout
#################################################################################
def spectral_axis(length, delx=1.0, shift=False, use_real=False):
"""
-----------------------------------------------------------------------------
Compute spectral axis in the FFT
Inputs:
length: Length of vector to be Fourier transformed
Keyword Inputs:
delx: x-axis interval, used only in case of 1D inp. Default = 1.0
shift: [Boolean scalar] True => Shift to center of frequencies
use_real: [Boolean scalar] True => Compute only positive frequencies using
numpy.fft.rfftfreq()
Output:
spaxis: Discrete spectral axis in the output FFT
-----------------------------------------------------------------------------
"""
if use_real:
spaxis = NP.fft.rfftfreq(length, d=delx)
else:
spaxis = NP.fft.fftfreq(length, d=delx)
if shift:
spaxis = NP.fft.fftshift(spaxis)
return spaxis
#################################################################################
def rfft_append(inp, axis=0):
"""
-----------------------------------------------------------------------------
Compute the negative frequency left out by numpy.rfft()
and append in the right order to the output from numpy.rfft().
Input:
inp Input data of any dimensions to which negative frequency
components have to be appended.
Keyword Input:
axis [scalar] Axis along which negative frequency components
are to be appended. It has to be a scalar in the range
0 to Ndim-1 where Ndim is the number of axes in the data.
Output:
Appended data along the axis specified.
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('inp undefined. Aborting rfft_append()')
if not isinstance(inp, NP.ndarray):
raise TypeError('inp should be Numpy array data type.')
if isinstance(axis, (list, tuple, str)):
raise TypeError('axis should be a scalar integer in the range 0 to Ndim-1')
axis = int(axis)
shp = NP.shape(inp)
ndim = len(shp)
if (axis < 0) or (axis >= ndim):
raise ValueError("Input data does not contain the axis specified. Aborted execution in reverse()")
if shp[axis] == 1:
return inp
return NP.append(inp, NP.conj(OPS.reverse(inp, axis=axis, ind_range=[1,shp[axis]-2])), axis=axis)
#################################################################################
def rfftfreq_append(rfft_freqs):
"""
-----------------------------------------------------------------------------
Compute the negative frequencies for the output of numpy.rfftfreq() and
rearrange the frequencies in the correct order.
Input:
rfft_freqs [Vector] Positive frequencies
Output:
Positive and negative frequencies computed from numpy.rfftfreq() made equal
to the output of numpy.fftfreq()
-----------------------------------------------------------------------------
"""
try:
rfft_freqs
except NameError:
raise NameError('Input rfft_freqs not specified. Aborting rfftfreq_append()')
if not isinstance(rfft_freqs, (list, NP.ndarray)):
raise TypeError('Input rfft_freqs should be a list or a 1D Numpy array')
rfft_freqs = NP.asarray(rfft_freqs)
return NP.append(rfft_freqs[:-1],-rfft_freqs[-1:0:-1],axis=0)
#################################################################################
def shaping(N_samples, fraction=1.0, shape='rect', area_normalize=False,
peak=None, verbose=True, centering=False):
"""
-----------------------------------------------------------------------------
Routine to produce sequences which can be used as shaping windows for other
sequences.
Inputs:
N_samples [Integer] Number of samples in the sequence. Should be positive
Keyword inputs:
fraction [Float] Fraction of N_samples outside of which the sequence is
zero-padded. The fraction of N_samples is where the shaped
sequence is actually generated. For fraction less than unity,
the zero padding comes about symmetrically at the edges.
shape [string] Shape type. Currently allowed values are 'rect', 'bhw'
and 'bnw' for rectangular, Blackman-Harris and Blackman-Nuttall
windows respectively
area_normalize
[Boolean] True mean re-normalize the sequence to have unit
area. False means no re-normalization is performed. Cannot be
set simulataneously if peak is set.
peak [Float] If set, rescale the sequence so the peak is set to the
specified value.
verbose [Boolean] If set, print progress and/or diagnostic messages.
Output:
samples [Numpy array] Sequence containing the required shape and zero
padding if fraction < 1.0
-----------------------------------------------------------------------------
"""
try:
N_samples
except NameError:
raise NameError('Number of samples undefined. Aborting shaping().')
if (area_normalize) and (peak is not None):
raise ValueError('Both area_normalize and peak cannot be set at the same time in shaping().')
if not isinstance(area_normalize, bool):
raise TypeError('area_normalize should be a boolean value. Aborting shaping().')
if peak is not None:
if not isinstance(peak, (int, float)):
raise ValueError('Peak should be a scalar value. Aborting shaping().')
if not isinstance(N_samples, (int, float)):
raise TypeError('N_samples should be a positive integer. Aborting shaping().')
else:
if N_samples < 1.0:
raise ValueError('N_samples should be a positive integer. Aborting shaping().')
N_samples = int(N_samples)
if fraction <= 0.0:
raise ValueError('fraction should be in the range 0.0 < fraction <= 1.0. Aborting shaping().')
if fraction > 1.0:
fraction = 1.0
if verbose:
print 'fraction was found to exceed 1.0. Resetting fraction to 1.0 in shaping().'
center = int(0.5 * N_samples)
N_window = N_samples * fraction
if (N_window % 2) == 0.0:
if (shape == 'bnw') or (shape == 'BNW') or (shape == 'bhw') or (shape == 'BHW'):
N_window = int(N_window - 1)
if (N_window < N_samples) and ((shape == 'rect') or (shape == 'RECT')):
N_window = int(N_window - 1)
elif (N_window % 2.0) < 1.0:
N_window = NP.ceil(N_window)
elif (N_window % 2.0) >= 1.0:
N_window = NP.floor(N_window)
if (shape == 'rect') or (shape == 'RECT'):
window = NP.ones(N_window)
elif (shape == 'bnw') or (shape == 'BNW'):
a = [0.3635819, -0.4891775, 0.1365995, -0.0106411]
window = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
elif (shape == 'bhw') or (shape == 'BHW'):
a = [0.35875, -0.48829, 0.14128, -0.01168]
window = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
N_zeros = N_samples - N_window
if N_zeros > 0:
N_zeros_pfx = NP.ceil(0.5*N_zeros)
samples = NP.concatenate((NP.zeros(N_zeros_pfx), window))
N_zeros_sfx = N_zeros - N_zeros_pfx
if N_zeros_sfx > 0:
samples = NP.concatenate((samples, NP.zeros(N_zeros_sfx)))
else:
samples = window
if peak is not None:
samples *= peak/NP.amax(NP.abs(samples))
if verbose:
print 'Rescaled the shaping window to peak value.'
elif area_normalize:
area = NP.trapz(samples) # Beware that NP.trapz could differ from NP.cumsum due to edge effects. Sufficient padding will make them converge
samples /= area
if verbose:
print 'Renormalized the shaping window to unit area.'
return samples
#################################################################################
def windowing(N_window, shape='rect', pad_width=0, pad_value=0.0,
area_normalize=False, peak=None, power_normalize=False,
verbose=True, centering=True):
"""
-----------------------------------------------------------------------------
Routine to produce sequences which can be used as shaping windows for other
sequences.
Inputs:
N_window [Integer] Number of samples in the actual window. Should be
positive
Keyword inputs:
shape [string] Shape type. Currently allowed values are 'rect',
'bnw' and 'bhw' for rectangular, Blackman-Nuttall and
Blackman-Harris windows respectively
pad_width [scalar integer] Number of padding samples. it has to be
non-negative. Padding values are provided in pad_values.
area_normalize
[Boolean] True means re-normalize the window to have unit
area. False means no re-normalization is performed. Cannot be
set simulataneously if peak or power_normalize is set.
peak [Float] If set, rescale the window so the peak is set to the
specified value. Only one of peak, area_normalize or
power_normalize can be set
power_normalize
[Boolean] True means re-normalize the window to have unit
power. False means no re-normalization is performed. Cannot be
set simulataneously if peak or area_normalize is set.
verbose [Boolean] If set, print progress and/or diagnostic messages.
centering [Boolean] If set to True, centers the window with close to
symmetric on either side. If False, padding is done on the
right side. Default = True
Output:
window [Numpy array] window containing the required shape and padding
if pad_width > 0
-----------------------------------------------------------------------------
"""
try:
N_window
except NameError:
raise NameError('Window size undefined. Aborting windowing().')
if not isinstance(area_normalize, bool):
raise TypeError('area_normalize should be a boolean value. Aborting windowing().')
if not isinstance(power_normalize, bool):
raise TypeError('power_normalize should be a boolean value. Aborting windowing().')
if peak is not None:
if not isinstance(peak, (int, float)):
raise ValueError('Peak should be a scalar value. Aborting windowing().')
num_norms = area_normalize + power_normalize + (peak is not None)
if num_norms > 1:
raise ValueError('Only one of peak, area_normalize or power_normalize can be set at the same time in windowing().')
# if (area_normalize) and (peak is not None):
# raise ValueError('Both area_normalize and peak cannot be set at the same time in windowing().')
if not isinstance(N_window, (int, float)):
raise TypeError('N_window should be a positive integer. Aborting windowing().')
else:
N_window = int(N_window)
if N_window < 1:
raise ValueError('N_window should be a positive integer. Aborting windowing().')
if isinstance(pad_width, (int, float)):
if pad_width < 0.0:
raise ValueError('pad_width must be non-negative')
pad_width = int(pad_width)
else:
raise TypeError('pad_width must be an integer.')
if (shape == 'rect') or (shape == 'RECT'):
if not centering:
window = NP.pad(NP.ones(N_window), (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(NP.ones(N_window), (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bnw') or (shape == 'BNW'):
a = [0.3635819, -0.4891775, 0.1365995, -0.0106411]
if (N_window % 2 == 1):
win = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
win = a[0]*NP.ones(N_window-1) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window-1)/(N_window-2))
if not centering:
window = NP.pad(win, (1, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*(pad_width+1))), int(NP.floor(0.5*(pad_width+1)))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bhw') or (shape == 'BHW'):
a = [0.35875, -0.48829, 0.14128, -0.01168]
if (N_window % 2 == 1):
win = a[0]*NP.ones(N_window) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window)/(N_window-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window)/(N_window-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window)/(N_window-1))
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
win = a[0]*NP.ones(N_window-1) + a[1]*NP.cos(2*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(N_window-1)/(N_window-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(N_window-1)/(N_window-2))
if not centering:
window = NP.pad(win, (1, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*(pad_width+1))), int(NP.floor(0.5*(pad_width+1)))), mode='constant', constant_values=(pad_value, pad_value))
if peak is not None:
window *= peak/NP.amax(NP.abs(window))
if verbose:
print '\tRescaled the shaping window to peak value.'
elif area_normalize:
# area = NP.trapz(window) # Beware that NP.trapz could differ from NP.cumsum due to edge effects. Sufficient padding will make them converge
area = NP.sum(window) # Using sum is preferable to using trapz although less accurate especially when FFT is going to be involved later on.
window /= area
if verbose:
print '\tRenormalized the shaping window to unit area.'
elif power_normalize:
powr = NP.sum(NP.abs(window)**2)
window /= NP.sqrt(powr)
if verbose:
print '\tRenormalized the shaping window to unit power.'
return window
#################################################################################
def window_fftpow(N_window, shape='rect', pad_width=0, pad_value=0.0,
fftpow=1.0, area_normalize=False, peak=None,
power_normalize=False, verbose=True, centering=True):
"""
-----------------------------------------------------------------------------
Routine to produce window functions including ability to raise the FFT to a
given power
Inputs:
N_window [Integer] Number of samples in the actual window. Should be
positive
Keyword inputs:
shape [string] Shape type. Currently allowed values are 'rect',
'bnw' and 'bhw' for rectangular, Blackman-Nuttall and
Blackman-Harris windows respectively
pad_width [scalar integer] Number of padding samples. it has to be
non-negative. Padding values are provided in pad_values.
fftpow [scalar] The FFT of the window will be raised to this power.
Must be positive. Default = 1.0
area_normalize
[Boolean] True means re-normalize the window to have unit
area. False means no re-normalization is performed. Cannot be
set simulataneously if peak or power_normalize is set.
peak [Float] If set, rescale the window so the peak is set to the
specified value. Only one of peak, area_normalize or
power_normalize can be set
power_normalize
[Boolean] True means re-normalize the window to have unit
power. False means no re-normalization is performed. Cannot be
set simulataneously if peak or area_normalize is set.
verbose [Boolean] If set, print progress and/or diagnostic messages.
centering [Boolean] If set to True, centers the window with close to
symmetric on either side. If False, padding is done on the
right side. Default = True
Output:
window [Numpy array] window containing the required shape and padding
if pad_width > 0
-----------------------------------------------------------------------------
"""
try:
N_window
except NameError:
raise NameError('Window size undefined. Aborting windowing().')
if not isinstance(area_normalize, bool):
raise TypeError('area_normalize should be a boolean value. Aborting windowing().')
if not isinstance(power_normalize, bool):
raise TypeError('power_normalize should be a boolean value. Aborting windowing().')
if peak is not None:
if not isinstance(peak, (int, float)):
raise ValueError('Peak should be a scalar value. Aborting windowing().')
num_norms = area_normalize + power_normalize + (peak is not None)
if num_norms > 1:
raise ValueError('Only one of peak, area_normalize or power_normalize can be set at the same time in windowing().')
# if (area_normalize) and (peak is not None):
# raise ValueError('Both area_normalize and peak cannot be set at the same time in windowing().')
if not isinstance(N_window, (int, float)):
raise TypeError('N_window should be a positive integer. Aborting windowing().')
else:
N_window = int(N_window)
if N_window < 1:
raise ValueError('N_window should be a positive integer. Aborting windowing().')
if not isinstance(pad_width, (int, float)):
raise TypeError('pad_width must be an integer.')
else:
pad_width = int(pad_width)
if pad_width < 0:
raise ValueError('pad_width should be non-negative. Aborting windowing().')
if not isinstance(fftpow, (int,float)):
raise TypeError('Input fftpow must be a scalar')
else:
fftpow = float(fftpow)
if fftpow < 0.0:
raise ValueError('Input fftpow must be non-negative')
eps = 1e-10
if (shape == 'rect') or (shape == 'RECT'):
if fftpow != 1.0:
nwin = int(NP.ceil(N_window/NP.float(fftpow)))
else:
nwin = N_window
win = NP.zeros(N_window, dtype=NP.float_)
if fftpow != 1.0:
win[:nwin] = 1.0
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
else:
win = NP.ones(N_window, dtype=NP.float_)
if not centering:
window = NP.pad(win, (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bnw') or (shape == 'BNW'):
a = [0.3635819, -0.4891775, 0.1365995, -0.0106411]
if fftpow != 1.0:
nwin = int(NP.ceil(N_window/NP.float(fftpow)))
else:
nwin = N_window
win = NP.zeros(N_window, dtype=NP.float_)
if (nwin % 2 == 1):
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
if nwin == N_window:
win[1:] = a[0]*NP.ones(nwin-1) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin-1)/(nwin-2))
else:
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
if not centering:
window = NP.pad(win, (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
elif (shape == 'bhw') or (shape == 'BHW'):
a = [0.35875, -0.48829, 0.14128, -0.01168]
if fftpow != 1.0:
nwin = int(NP.ceil(N_window/NP.float(fftpow)))
else:
nwin = N_window
win = NP.zeros(N_window, dtype=NP.float_)
if (nwin % 2 == 1):
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
if not centering:
if pad_width >= 1:
window = NP.pad(win, (1, pad_width-1), mode='constant', constant_values=(pad_value, pad_value))
else:
window = win
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
else:
if nwin == N_window:
win[1:] = a[0]*NP.ones(nwin-1) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin-1)/(nwin-2)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin-1)/(nwin-2))
else:
win[:nwin] = a[0]*NP.ones(nwin) + a[1]*NP.cos(2*NP.pi*NP.arange(nwin)/(nwin-1)) + a[2]*NP.cos(4*NP.pi*NP.arange(nwin)/(nwin-1)) + a[3]*NP.cos(6*NP.pi*NP.arange(nwin)/(nwin-1))
if fftpow != 1.0:
fftwin = NP.fft.fft(win)
fftwin = fftwin ** fftpow
win = NP.fft.ifft(fftwin)
if NP.abs(win.imag).max()/NP.abs(win).max() >= eps:
raise ValueError('Significant imaginary component found in FFT-based window generation. Need to investigate. Aborting...')
else:
win = win.real
nshift = 0
if NP.abs(fftpow % 1.0) < 1e-6:
nzeros = max(N_window - (fftpow * nwin - (fftpow - 1)), 0)
if nzeros > 0:
win[-int(nzeros):] = 0.0
nshift = int(NP.ceil(0.5*nzeros))
win = NP.roll(win, nshift)
if not centering:
window = NP.pad(win, (0, pad_width), mode='constant', constant_values=(pad_value, pad_value))
else:
window = NP.pad(win, (int(NP.ceil(0.5*pad_width)), int(NP.floor(0.5*pad_width))), mode='constant', constant_values=(pad_value, pad_value))
if peak is not None:
window *= peak/NP.amax(NP.abs(window))
if verbose:
print '\tRescaled the shaping window to peak value.'
elif area_normalize:
# area = NP.trapz(window) # Beware that NP.trapz could differ from NP.cumsum due to edge effects. Sufficient padding will make them converge
area = NP.sum(window) # Using sum is preferable to using trapz although less accurate especially when FFT is going to be involved later on.
window /= area
if verbose:
print '\tRenormalized the shaping window to unit area.'
elif power_normalize:
powr = NP.sum(NP.abs(window)**2)
window /= NP.sqrt(powr)
if verbose:
print '\tRenormalized the shaping window to unit power.'
return window
#################################################################################
def window_N2width(n_window=None, shape='rect', area_normalize=True,
power_normalize=False, fftpow=1.0):
"""
-----------------------------------------------------------------------------
Determine effective width of a window as a fraction of the total width
Inputs:
n_window [int] Number of samples in the window function. Default = None sets
it to 1000000
shape [string] Specifies window shape. Accepted values are 'rect'
(rectangular, default), 'bnw' (Blackman-Nuttall) and 'bhw'
(Blackman-Harris)
area_normalize
[Boolean] True gives fractional width relative to a
rectangular by computing area under window. One and only one
of area_normalize or power_normalize can bet set
power_normalize
[Boolean] True gives fractional width relative to a
rectangular by computing power under window. One and only one
of area_normalize or power_normalize can bet set
fftpow [scalar] The FFT of the window will be raised to this power.
Must be positive. Default = 1.0
Output:
frac_width is a fraction of the total number of samples. Thus the effective
width of the window function is frac_width * n_window. For instance,
frac_width = 1 for shape = 'rect'.
-----------------------------------------------------------------------------
"""
if n_window is None:
n_window = 1000000
elif not isinstance(n_window, int):
raise TypeError('Number of samples must be an integer')
elif n_window <= 0:
raise ValueError('Number of samples must be positive')
if not isinstance(fftpow, (int,float)):
raise TypeError('Input fftpow must be a scalar')
else:
fftpow = float(fftpow)
if fftpow < 0.0:
raise ValueError('Input fftpow must be non-negative')
num_norms = area_normalize + power_normalize
if num_norms != 1:
raise ValueError('One and only one of area_normalize or power_normalize can be set at the same time.')
if not isinstance(shape, str):
raise TypeError('Window shape must be a string')
elif shape not in ['rect', 'RECT', 'bnw', 'BNW', 'bhw', 'BHW']:
raise ValueError('Invalid window shape specified')
if shape in ['rect', 'RECT', 'bnw', 'BNW', 'bhw', 'BHW']:
window = window_fftpow(n_window, shape=shape, peak=None, fftpow=fftpow,
area_normalize=False, power_normalize=False)
# window = windowing(n_window, shape=shape, peak=None,
# area_normalize=False, power_normalize=False)
if area_normalize:
frac_width = NP.sum(window/window.max())/n_window
elif power_normalize:
frac_width = NP.sqrt(NP.sum((window/window.max())**2)/n_window)
return frac_width
#################################################################################
# def window_width2N(shape='rect'):
# """
# -----------------------------------------------------------------------------
# Determine total fractional width to produce a window of a specified shape.
# Inputs:
# shape [string] Specifies window shape. Accepted values are 'rect'
# (rectangular, default), 'bnw' (Blackman-Nuttall) and 'bhw'
# (Blackman-Harris)
# Output:
# f_window is the fractional width of the full window relative to the required
# effective width for a given window shape. For instance, f_window = 1 for
# shape = 'rect'
# -----------------------------------------------------------------------------
# """
# if not isinstance(shape, str):
# raise TypeError('Window shape must be a string')
# elif shape not in ['rect', 'RECT', 'bnw', 'BNW', 'bhw', 'BHW']:
# raise ValueError('Invalid window shape specified')
# frac_width = window_N2width(shape=shape)
# f_window = 1 / frac_width
# return f_window
#################################################################################
def downsampler(inp, factor, axis=-1, verbose=True, method='interp',
kind='linear', fill_value=NP.nan):
"""
-----------------------------------------------------------------------------
Routine to downsample a given input sequence along a specific dimension
where the input could be multi-dimensional (up to 8 dimensions)
Inputs:
inp [Numpy array] array which has to be downsampled. Cannot have
more than 8 dimensions
factor [scalar] downsampling factor. positive integer or floating
point number greater than or equal to unity. If an integer,
output is simply a sampled subset of the input. If not an
integer, downsampling is obtained by interpolation.
Keyword Inputs:
axis [scalar] Integer specifying the axis along which the array is
to be downsampled. Default = -1, the last axis.
verbose [Boolean] If set to True, will print progress and/or
diagnostic messages. If False, will suppress printing such
messages. Default = True
method [string] Specifies the method for resampling. Accepted values
are 'FFT' and 'interp' (default) for FFT-based and
interpolation based techniques respectively. If method chosen
is 'interp' then value in input keyword kind determines the
the kind of interpolation.
kind [string] Spcifies the kind of interpolation. Applies only if
value of keyword input method is set to 'interp'. This is
used only if factor is not an integer thus requiring
interpolation. Accepted values are 'linear', 'quadratic' and
'cubic'. Default = 'linear'
fill_value [scalar] Value to fill locations outside the index range of
input array. Default = NaN
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('No input specified. Aborting downsampler().')
try:
factor
except NameError:
if verbose:
print 'No downsampling factor specified. No downsampling performed on input.'
return input
if not isinstance(inp, NP.ndarray):
raise TypeError('Input should be a numpy array. Aborting downsampler().')
if not isinstance(factor, (int, float)):
raise TypeError('Downsampling factor must be a scalar value.')
if factor < 1.0:
raise ValueError('Downsampling factor must be greater than 1.')
if (axis < -inp.ndim) or (axis > inp.ndim):
raise IndexError('The axis specified does not exist in the input. Aborting downsampler().')
if inp.ndim > 8:
raise ValueError('The routine cannot handle inputs with more than 8 dimensions. Aborting downsampler().')
axis = range(inp.ndim)[axis]
if (factor % 1) == 0:
factor = int(factor)
if inp.ndim == 1:
return inp[::factor]
elif inp.ndim == 2:
if (axis + 8) % 8 == 0:
return inp[::factor,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor]
elif inp.ndim == 3:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor]
elif inp.ndim == 4:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor,:]
elif (axis + 8) % 8 == 3:
return inp[:,:,:,::factor]
elif inp.ndim == 5:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:,:,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor,:,:]
elif (axis + 8) % 8 == 3:
return inp[:,:,:,::factor,:]
elif (axis + 8) % 8 == 4:
return inp[:,:,:,:,::factor]
elif inp.ndim == 6:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:,:,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:,:,:,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor,:,:,:]
elif (axis + 8) % 8 == 3:
return inp[:,:,:,::factor,:,:]
elif (axis + 8) % 8 == 4:
return inp[:,:,:,:,::factor,:]
elif (axis + 8) % 8 == 5:
return inp[:,:,:,:,:,::factor]
elif inp.ndim == 7:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:,:,:,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:,:,:,:,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor,:,:,:,:]
elif (axis + 8) % 8 == 3:
return inp[:,:,:,::factor,:,:,:]
elif (axis + 8) % 8 == 4:
return inp[:,:,:,:,::factor,:,:]
elif (axis + 8) % 8 == 5:
return inp[:,:,:,:,:,::factor,:]
elif (axis + 8) % 8 == 6:
return inp[:,:,:,:,:,:,::factor]
elif inp.ndim == 8:
if (axis + 8) % 8 == 0:
return inp[::factor,:,:,:,:,:,:,:]
elif (axis + 8) % 8 == 1:
return inp[:,::factor,:,:,:,:,:,:]
elif (axis + 8) % 8 == 2:
return inp[:,:,::factor,:,:,:,:,:]
elif (axis + 8) % 8 == 3:
return inp[:,:,:,::factor,:,:,:,:]
elif (axis + 8) % 8 == 4:
return inp[:,:,:,:,::factor,:,:,:]
elif (axis + 8) % 8 == 5:
return inp[:,:,:,:,:,::factor,:,:]
elif (axis + 8) % 8 == 6:
return inp[:,:,:,:,:,:,::factor,:]
elif (axis + 8) % 8 == 7:
return inp[:,:,:,:,:,:,:,::factor]
else:
if method == 'interp':
if verbose:
print 'Determining the interpolating function for downsampling.'
tol = 1e-10
reqd_inds = NP.arange(0, inp.shape[axis]-1+tol, factor)
# intpfunc = interpolate.interp1d(NP.arange(inp.shape[axis]), inp,
# kind=kind, fill_value=fill_value,
# axis=axis)
# result = intpfunc(reqd_inds)
result = OPS.interpolate_array(inp, NP.arange(inp.shape[axis]), reqd_inds, axis=axis, kind=kind)
elif method in ['FFT', 'fft']:
nout = NP.round(inp.shape[axis] / factor).astype(int)
result = signal.resample(inp, nout, t=None, axis=axis, window=None)
else:
raise ValueError('Invalid method specified for downsampling')
if verbose:
print 'Returning the downsampled data.'
return result
#################################################################################
def upsampler(inp, factor, axis=-1, verbose=True, kind='linear',
fill_value=NP.nan):
"""
-----------------------------------------------------------------------------
Routine to upsample a given input sequence along a specific dimension
where the input could be multi-dimensional (up to 8 dimensions)
Inputs:
inp [Numpy array] array which has to be upsampled. Cannot have
more than 8 dimensions
factor [scalar] upsampling factor. positive integer or floating
point number greater than or equal to unity. Upsampling is
obtained by interpolation.
Keyword Inputs:
axis [scalar] Integer specifying the axis along which the array is
to be upsampled. Default = -1, the last axis.
verbose [Boolean] If set to True, will print progress and/or
diagnostic messages. If False, will suppress printing such
messages. Default = True
kind [string] Spcifies the kind of interpolation. Accepted values
are 'linear', 'quadratic' and 'cubic'. Default = 'linear'
fill_value [scalar] Value to fill locations outside the index range of
input array. Default = NaN
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('No input specified. Aborting upsampler().')
try:
factor
except NameError:
if verbose:
print 'No upsampling factor specified. No upsampling performed on input.'
return input
if not isinstance(inp, NP.ndarray):
raise TypeError('Input should be a numpy array. Aborting upsampler().')
if not isinstance(factor, (int, float)):
raise TypeError('Upsampling factor must be a scalar value.')
if factor < 1.0:
raise ValueError('Upsampling factor must be greater than 1.')
if len(inp.shape) < 2:
inp = inp.reshape(1,-1)
if (axis <= -len(inp.shape)) or (axis > len(inp.shape)):
raise IndexError('The axis specified does not exist in the input. Aborting upsampler().')
if len(inp.shape) > 8:
raise ValueError('The routine cannot handle inputs with more than 8 dimensions. Aborting upsampler().')
if factor == 1:
if verbose:
print 'Upsampling factor is 1. No upsampling performed. Returning the original array.'
return inp
else:
if verbose:
print 'Determing the interpolating function for upsampling.'
intpfunc = interpolate.interp1d(NP.arange(inp.shape[axis]), inp,
kind=kind, fill_value=fill_value,
axis=axis)
reqd_inds = NP.arange(0, inp.shape[axis], 1/factor)
if verbose:
print 'Returning the upsampled data.'
return intpfunc(reqd_inds)
#################################################################################
def XC(inp1, inp2=None, pow2=False, shift=True):
"""
-----------------------------------------------------------------------------
Cross-correlate two sequences.
Inputs:
inp1: [list or numpy array] First sequence.
inp2: [list or numpy array] If not given, auto-correlation of inp1 is
returned.
pow2 [boolean] If set to True, will pad the results of the correlation
with zeros so the length of the correlated sequence is equal to the
next power of 2. If set to False, the correlated sequence is just
padded with one sample of value 0. Default = False
shift: [Boolean] If True, shift the correlated product such that it is
represented in FFT format. i.e., the first sample corresponds to
zero lag followed by positive lags. The second half corresponds to
negative lags. Default = True
Output: The correlation of input sequences inp1 and inp2. The output is of
length len(inp1)+len(inp2)-1 zero padded to the nearest power of 2
(if pow2 is True) or zero padded by one sample (if pow2 is False)
and shifted to be identical to a Fourier transform based estimate.
-----------------------------------------------------------------------------
"""
try:
inp1
except NameError:
raise NameError('inp1 not defined. Aborting XC().')
if not isinstance(inp1, (list, tuple, NP.ndarray, int, float, complex)):
raise TypeError('inp1 is of the wrong data type. Check inputs again. Aborting XC().')
inp1 = NP.asarray(inp1)
if inp2 is None:
inp2 = NP.copy(inp1)
elif not isinstance(inp2, (list, tuple, int, float, complex, NP.ndarray)):
raise TypeError('inp2 has incompatible data type. Verify inputs. Aborting XC().')
inp2 = NP.asarray(inp2)
if pow2:
zero_pad_length = 2**NP.ceil(NP.log2(len(inp1)+len(inp2)-1))-(len(inp1)+len(inp2)-1)
else:
zero_pad_length = 1
xc = NP.pad(NP.correlate(inp1, inp2, mode='full'), (zero_pad_length,0), mode='constant', constant_values=(0.0,0.0))
xc = NP.roll(xc, -int(NP.floor(0.5*zero_pad_length)))
# xc = NP.append(NP.correlate(inp1, inp2, mode='full'), NP.zeros(zero_pad_length))
if shift:
# xc = NP.roll(xc, -(inp2.size-1))
xc = NP.fft.ifftshift(xc)
return xc
#################################################################################
def spectax(length, resolution=1.0, shift=True, use_real=False):
"""
-----------------------------------------------------------------------------
Determine the spectral axis after a Fourier Transform
Inputs:
length [Scalar] Positive integer specifying the length of sequence which is
to be Fourier transformed
resolution [Scalar] Positive value for resolution in the sequence before
Fourier Transform
Keyword Inputs:
use_real [Boolean] If true, the input sequence is assumed to consist only
of real values and the spectral axis is computed accordingly.
Default = False
shift [Boolean] If true, the spectral axis values are shifted
cooresponding to a fftshift. Default = True
Output:
Spectral axis for an input sequence of given length and resolution.
-----------------------------------------------------------------------------
"""
try:
length
except NameError:
raise NameError('Input length not defined. Aborting spectax().')
if not isinstance(resolution, (int, float)):
raise TypeError('Input resolution must be a positive scalar integer or floating point number. Aborting spectax().')
elif resolution < 0.0:
raise ValueError('Input resolution must be positive. Aborting spectax().')
return spectral_axis(length, resolution, shift, use_real)
#################################################################################
def smooth(inp, wts=None, width=None, stat='mean', verbose=True):
"""
-----------------------------------------------------------------------------
Smoothen the input data using a moving average or median window along an
axis
Inputs:
inp [Numpy vector or array] M x N numpy array which has to be
smoothed across columns.
Keyword Inputs:
wts [Numpy vector] 1 x P array which will be used as the window of
weights in case of a moving average. Will not be used if a
median is used in place of mean. P <= N. Sum of the weights
should equal unity, otherwise the weights will be accordingly
scaled. Default = None. If not set, then it will be set to a
rectangular window of width specified in width (see below)
width [scalar] Width of the moving window. Has to be positive. Default
is None. If width is None, wts should be set. One and only one
among wts and width should be set.
stat [string scalar] String specifying the statistic ('mean' or
'median') to be used. Default = 'mean'
verbose [boolean] If set to True (default), print messages indicating
progress
Output:
Smoothed output (M x N numpy array)
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('No input sequence specified.')
if stat is None:
stat = 'mean'
if (stat != 'mean') and (stat != 'median'):
raise ValueError('stat can only be either mean or median.')
if (wts is None) and (width is None):
raise NameError('Neither weights nor window width specified.')
if wts is not None:
wts = NP.asarray(wts)
wts_shape = wts.shape
if len(wts_shape) == 1:
wts /= NP.sum(wts)
elif (wts_shape[0] == 1) or (wts_shape[1] == 1):
wts = NP.ravel(wts)
else:
raise TypeError('wts must be a vector.')
width = wts.size
else:
width = int(width)
if width <= 0:
raise ValueError('Window width has to be positive.')
wts = NP.ones(width)/width
if width == 1:
if verbose:
print '\tWindow width is one. Input will be returned without smoothing.'
return inp
if stat == 'mean':
out = NP.convolve(inp, wts, mode='same')
else:
if width % 2 == 0:
if verbose:
raise ValueError('\tWindow width must be odd for median filtering.')
else:
out = signal.medfilt(inp, width)
return out
#################################################################################
def filter_(inp, wts=None, width=None, passband='low', verbose=True):
"""
-----------------------------------------------------------------------------
Filter the input data using a low or high pass filter in frequency domain
along an axis
Inputs:
inp [Numpy vector or array] M x N numpy array which has to be
filtered across columns.
Keyword Inputs:
wts [Numpy vector] 1 x P or M x P array which will be used as the
frequency window of weights. P <= N. Zeroth frequency of the
weights should equal unity, otherwise the weights will be
scaled accordingly. Default = None. If not set, then it will be
set to a rectangular window of width specified in width
(see below) and will be applied as a filter identically to all
rows
width [scalar] Width of the frequency window as a fraction of the
bandwidth (or equivalently N). Has to be positive. Default
is None. If width is None, wts should be set. One and only one
among wts and width should be set.
passband [string scalar] String specifying the passband ('low' or 'high')
to be used. Default = 'low'
verbose [boolean] If set to True (default), print messages indicating
progress
Output:
Filtered output (M x N numpy array)
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('No input specified for filtering.')
if isinstance(inp, list):
inp = NP.asarray(inp)
elif not isinstance(inp, NP.ndarray):
raise TypeError('Input should be of type list or numpy array.')
if len(inp.shape) == 1:
inp = inp.reshape(1,-1)
elif (inp.shape[0] == 1) or (inp.shape[1] == 1):
inp = inp.reshape(1,-1)
if (passband != 'low') and (passband != 'high'):
raise ValueError('Invalid passband specified. Valid passbands are low or high.')
if (wts is None) and (width is None):
raise NameError('Neither frequency weights nor filter width specified.')
if wts is None:
if not isinstance(width, (int,float)):
raise TypeError('Filter width should be a scalar.')
if width <= 0.0:
raise ValueError('Filter width should be positive.')
elif width >= 1.0:
if verbose:
print '\tFilter width exceeds 1.0. Returning input without filtering.'
return inp
filter_width = inp.shape[1] * width
# Even samples in input or low passband, keep the filter width odd
# Odd samples in input and high passband, keep the filter width even
# to have no imaginary parts after filtering
if (inp.shape[1] % 2 == 0) or (passband == 'low'):
if NP.floor(filter_width) % 2 == 0:
filter_width = NP.floor(filter_width) + 1
if filter_width > inp.shape[1]:
filter_width = inp.shape[1]
else:
filter_width = NP.floor(filter_width)
wts = NP.ones(filter_width).reshape(1,-1) # Simple rectangular filter
pads = inp.shape[1] - filter_width
if pads > 0:
wts = NP.hstack((wts, NP.zeros(pads).reshape(1,-1)))
wts = NP.repeat(wts, inp.shape[0], axis=0)
if passband == 'low':
wts = NP.roll(wts, -int(0.5*filter_width), axis=1)
else:
wts = NP.fft.fftshift(NP.roll(wts, -int(0.5*filter_width), axis=1), axes=1)
else:
if NP.floor(filter_width) % 2 != 0:
filter_width = NP.floor(filter_width) + 1
if filter_width > inp.shape[1]:
filter_width = inp.shape[1]
else:
filter_width = NP.floor(filter_width)
wts = NP.ones(filter_width).reshape(1,-1) # Simple rectangular filter
pads = inp.shape[1] - filter_width
if pads > 0:
wts = NP.hstack((wts, NP.zeros(pads).reshape(1,-1)))
wts = NP.repeat(wts, inp.shape[0], axis=0)
wts = NP.fft.fftshift(NP.roll(wts, -int(filter_width/2 - 1), axis=1), axes=1)
else:
if isinstance(wts, list):
wts = NP.asarray(list)
elif not isinstance(wts, NP.ndarray):
raise TypeError('Frequency weights should be a numpy array.')
if len(wts.shape) > 2:
raise IndexError('Dimensions of frequency weights exceed dimensions of input.')
elif len(wts.shape) == 1:
wts = wts.reshape(1,-1)
elif (wts.shape[0] == 1) or (wts.shape[1] == 1):
wts = wts.reshape(1,-1)
elif (wts.shape[0] > inp.shape[0]) or (wts.shape[1] > inp.shape[1]):
raise IndexError('Dimensions of frequency weights exceed dimensions of input.')
wshape = wts.shape
if (wts.shape[0] != 1) and (wts.shape[0] != inp.shape[0]):
raise IndexError('Dimensions of frequency weights exceed dimensions of input.')
pads = inp.shape[1] - wts.shape[1]
if pads > 0:
if (wts.shape[0] == 1):
wts = NP.hstack((wts, NP.zeros(pads).reshape(-1,1)))
wts = NP.repeat(wts, inp.shape[0], axis=0)
else:
wts = NP.hstack(wts, NP.zeros((inp.shape[0],pads)))
else:
if (wts.shape[0] == 1):
wts = NP.repeat(wts, inp.shape[0], axis=0)
if passband == 'low':
wts = NP.roll(wts, -int(0.5*wshape[1]), axis=1)
else:
wts = NP.fft.fftshift(NP.roll(wts, -int(0.5*wshape[1]), axis=1), axes=1)
wts = wts/wts[0,0] # Scale the weights to have zeroth frequency to have weight of unity
return NP.fft.ifft(NP.fft.fft(inp, axis=1) * wts, axis=1)
#################################################################################
def PFB_empirical(nchan, bank_nchan, rise_frac_half_max, fall_frac_half_max,
log_fall_rise=True, verbose=True):
"""
-----------------------------------------------------------------------------
Create a polyphase filter bank from empirical models.
Inputs:
nchan [scalar] total number of samples in the output
bank_nchan [scalar] number of channels in each filter bank. bank_nchan
must be less than nchan
rise_frac_half_max
[scalar] fraction of bank_nchan in which the function rises
from half-maximum to maximum value
fall_frac_half_max
[scalar] fraction of bank_nchan in which the function falls
from maximum to half-maximum value
log_fall_rise [boolean] flag indicating if the rise and fall between maximum
and half-maximum values occur in logarithmic or linear
intervals. if True, log scale is used (default), otherwise a
linear scale is used.
verbose [boolean] If True (default), prints progress and diagnostic
messages. If False, suppress printing such messages.
Output:
window [numpy vector] Frequency window function of size nchan.
-----------------------------------------------------------------------------
"""
try:
nchan, bank_nchan, rise_frac_half_max, fall_frac_half_max
except NameError:
raise NameError('nchan, bank_nchan, rise_frac_half_max, fall_frac_half_max must be specified.')
if not isinstance(nchan, (int, float)):
raise TypeError('nchan must be a scalar integer.')
if not isinstance(bank_nchan, (int, float)):
raise TypeError('bank_nchan must be a scalar integer.')
if not isinstance(rise_frac_half_max, (int, float)):
raise TypeError('rise_frac_half_max must be a scalar value.')
if not isinstance(fall_frac_half_max, (int, float)):
raise TypeError('fall_frac_half_max must be a scalar value.')
if nchan <= 0:
raise ValueError('nchan must be a positive integer.')
if bank_nchan <= 0:
raise ValueError('bank_nchan must be a positive integer.')
if (rise_frac_half_max < 0.0) or (rise_frac_half_max > 1.0):
raise ValueError('rise_frac_half_max must lie in the range 0 and 1.')
if (fall_frac_half_max < 0.0) or (fall_frac_half_max > 1.0):
raise ValueError('fall_frac_half_max must lie in the range 0 and 1.')
if bank_nchan > nchan:
raise TypeError('Number of channels, bank_nchan, in the filter bank must be less than the total number of channels, nchan.')
nfall = int(rise_frac_half_max * bank_nchan)
nrise = int(fall_frac_half_max * bank_nchan)
bank_window = NP.ones(bank_nchan)
if log_fall_rise:
fall = 10.0 ** NP.linspace(NP.log10(1.0), NP.log10(0.5), nfall)
rise = 10.0 ** NP.linspace(NP.log10(0.5), NP.log10(1.0), nrise)
else:
fall = NP.linspace(1.0, 0.5, nfall)
rise = NP.linspace(0.5, 1.0, nrise)
bank_window[:nrise] = rise
bank_window[bank_nchan-nfall:] = fall
window = NP.repeat(bank_window.reshape(1,-1), NP.ceil(1.0*nchan/bank_nchan), axis=0)
window = window.ravel()
window = window[:nchan]
return window
#################################################################################
def PFB_FIR_coeff(nside, ntap, cutoff, nbits, window=('kaiser',5)):
b = signal.firwin(nside*ntap, cutoff, window=window)
b = (1-2**(-(nbits-1))) * b/b.max()
return NP.round(b * 2**(nbits-1))
#################################################################################
def apply_PFB_filter(inp, nside, ntap, coeff=None, cutoff=None, nbits=None,
window=('kaiser',5)):
if coeff is None:
if cutoff is None:
cutoff = 1.0/nside
if nbits is None:
nbits = 12
coeff = PFB_FIR_coeff(nside, ntap, cutoff, nbits, window=window)
coeff = coeff/8192/32 # Normalize the amplitude
arr = (inp[:nside*ntap]*coeff).reshape(ntap, nside)
wsum = NP.sum(arr, axis=0)
wf = NP.fft.ifft(wsum) * wsum.size
return wf[:nside/2]
#################################################################################
def PFB_shape(coeffs):
pass
#################################################################################
def sinewave(freq, tlen=8192, clock_freq=655.36):
dt = 1./clock_freq
t = NP.arange(tlen) * dt
return NP.sin(2*NP.pi*freq.reshape(-1,1)*t.reshape(1,-1))
#################################################################################
def pfbshape(freq):
coeff = PFB_FIR_coeff(512, 8, 1.0/512, 12, window=('kaiser',5))
twaves = sinewave(freq, tlen=4096)
filter_twaves = NP.empty((freq.size, 512/2))
for i in range(freq.size):
filter_twaves[i,:] = apply_PFB_filter(twaves[i,:], 512, 8, coeff=coeff)
return NP.abs(filter_twaves)**2
#################################################################################
def fft_filter(inp, axis=None, wts=None, width=None, passband='low', verbose=True):
"""
-----------------------------------------------------------------------------
Filter the input data using a low or high pass filter in frequency domain
along an axis
Inputs:
inp [Numpy array] input data which has to be filtered across a given
axis
Keyword Inputs:
axis Axis (scalar integer) over which FFT is performed. Default = None
(last axis). Any negative value or values exceeding the number of
axes in the input data will be reset to use the last axis.
wts [Numpy array] frequency window of weights. Should either have
same shape as inp or have number of elements equal to the number
of elements in input data along specified axis. Default = None.
If not set, then it will be set to a rectangular window of width
specified in width (see below) and will be applied as a filter
identically to the entire data along the specified axis. The wts
should correspond to frequency domain wts that would have been
obtained after fftshift. For instance, a low pass filter weights
should be dominant around the middle of the sequence while a high
pass filter weights would be concentrated at the beginning and
end of the sequence.
width [scalar] Width of the frequency window as a fraction of the
bandwidth. Has to be positive. Default is None. If width is None,
wts should be set. One and only one among wts and width should be
set.
passband [string scalar] String specifying the passband ('low' or 'high')
to be used. Default = 'low'
verbose [boolean] If set to True (default), print messages indicating
progress
Output:
Filtered output with same shape as inp
-----------------------------------------------------------------------------
"""
if verbose:
print 'Entering fft_filtering()...'
print '\tChecking inputs for compatibility...'
try:
inp
except NameError:
raise NameError('No input specified for filtering.')
if isinstance(inp, list):
inp = NP.asarray(inp)
elif not isinstance(inp, NP.ndarray):
raise TypeError('Input should be of type list or numpy array.')
if (passband != 'low') and (passband != 'high'):
raise ValueError('Invalid passband specified. Valid passbands are low or high.')
if (wts is None) and (width is None):
raise NameError('Neither frequency weights nor filter width specified.')
if axis is None:
axis = len(inp.shape) - 1
elif not isinstance(axis, int):
raise TypeError('axis must be an integer')
else:
if (axis < 0) or (axis >= len(inp.shape)):
axis = len(inp.shape) - 1
if verbose:
print '\tSetting axis to be the last dimension of data'
if wts is None:
if not isinstance(width, (int,float)):
raise TypeError('Filter width should be a scalar.')
if verbose:
print '\tFrequency weights not provided. Using filter width instead.'
if width <= 0.0:
raise ValueError('Filter width should be positive.')
elif width >= 1.0:
if verbose:
print '\tFilter width exceeds 1.0. Returning input without filtering.'
return inp
filter_width = inp.shape[axis] * width
# Even samples in input or low passband, keep the filter width odd
# Odd samples in input and high passband, keep the filter width even
# to have no imaginary parts after filtering
shape = NP.asarray(inp.shape)
shape[NP.arange(len(shape)) != axis] = 1
shape = tuple(shape)
if (inp.shape[axis] % 2 == 0) or (passband == 'low'):
if NP.floor(filter_width) % 2 == 0:
filter_width = NP.floor(filter_width) + 1
if filter_width > inp.shape[axis]:
filter_width = inp.shape[axis]
else:
filter_width = NP.floor(filter_width)
if verbose:
print '\tAdjusting filter width to integer and to have FFT symmetry properties.'
wts = NP.ones(filter_width) # Simple rectangular filter
if verbose:
print '\tFrequency weights have been set to be a rectangular filter of unit height'
pads = shape[axis] - filter_width
if pads > 0:
wts = NP.pad(wts, (0,pads), mode='constant', constant_values=(0,))
wts = wts.reshape(shape)
if passband == 'low':
wts = NP.roll(wts, -int(0.5*filter_width), axis=axis)
else:
wts = NP.fft.fftshift(NP.roll(wts, -int(0.5*filter_width), axis=axis), axes=axis)
else:
if NP.floor(filter_width) % 2 != 0:
filter_width = NP.floor(filter_width) + 1
if filter_width > inp.shape[1]:
filter_width = inp.shape[1]
else:
filter_width = NP.floor(filter_width)
wts = NP.ones(filter_width) # Simple rectangular filter
pads = shape[axis] - filter_width
if pads > 0:
wts = NP.pad(wts, (0,pads), mode='constant', constant_values=(0,))
wts = wts.reshape(shape)
wts = NP.fft.fftshift(NP.roll(wts, -int(filter_width/2 - 1), axis=axis), axes=axis)
else:
if isinstance(wts, list):
wts = NP.asarray(list)
elif not isinstance(wts, NP.ndarray):
raise TypeError('Frequency weights should be a numpy array.')
shape = NP.asarray(inp.shape)
shape[NP.arange(len(shape)) != axis] = 1
if wts.size == inp.shape[axis]:
wts = wts.reshape(tuple(shape))
elif wts.shape != inp.shape:
raise IndexError('Dimensions of frequency weights do not match dimensions of input.')
wshape = wts.shape
if passband == 'low':
wts = NP.roll(wts, -int(0.5*wshape[axis]), axis=axis)
else:
wts = NP.fft.fftshift(NP.roll(wts, -int(0.5*wshape[axis]), axis=axis), axes=axis)
# wts = wts/wts.ravel()[0] # Scale the weights to have zeroth frequency to have weight of unity
out = NP.fft.ifft(NP.fft.fft(inp, axis=axis) * wts, axis=axis)
out = out.reshape(inp.shape)
if verbose:
print '\tInput data successfully filtered in the frequency domain.'
return out
#################################################################################
def discretize(inp, nbits=None, nlevels=None, inprange=None, mode='floor',
discrete_out=True, verbose=True):
"""
-----------------------------------------------------------------------------
Discretize the input sequence either through truncation or rounding to the
nearest levels
Inputs:
inp [numpy array] Input sequence as a vector or array, could also be
complex
nbits [scalar integer] Number of bits. Must be positive. Number of
levels will be 2**nbits. This takes precedence over nlevels if
both are specified.
nlevels [scalar integer] Number of levels. Must be positive. Will be used
only when nbits is not set.
inprange [2-element list] Consists of min and max bounds for the data. The
data will be clipped outside this range. If set to None
(default), min and max of the data will be used
mode [string] determines if the nearest neighbour is determined by
truncation to the next lower level or by round to the nearest
level. mode can be set to 'floor' or 'truncate' for truncation.
It must be set to 'round' or 'nearest' for rounding to the
nearest level. Default = None (applies truncation).
discrete_out
[boolean] If set to True, return the output in discrete levels.
If False, output is scaled back to the input scale. Default is
True.
verbose [boolean] If set to True, print progress and diagnostic messages
Outputs:
inpmod [numpy array] Discretized array same shape as input array. If
discrete_out is set to True, inpmod is given in discrete
integer levels. If discrete_out is False, inpmod is given in
the scale of the input corresponding to discrete integer levels.
If input is complex, real and imaginary parts are separately
discretized.
inpmin [scalar] Lower bound on the data range used in discretization
interval [scalar] Resolution between levels
Tuple consisting of discretized sequence (inpmod), minimum value determined
from original sequence or specified range (inpmin), and resolution between
levels (interval). If discrete_out is True, the rescaled output is given by
inpmin + interval * inpmod when the input sequence is real and
inpmin * (1+1j) + interval * inpmod when it is complex. When discrete_out is
False, the discretized values scaled to integer values are given by
(inpmod - inpmin*(1+1j))/interval when the input is complex and
(inpmod - inpmin)/interval when it is real.
-----------------------------------------------------------------------------
"""
try:
inp
except NameError:
raise NameError('inp must be provided')
if isinstance(inp, list):
inp = NP.asarray(inp)
elif not isinstance(inp, NP.ndarray):
raise TypeError('inp must be a list or numpy array')
if (nbits is None) and (nlevels is None):
if verbose:
print '\tnbits or nlevels must be specified for discretization. Returning input unmodified'
return inp
elif (nbits is not None) and (nlevels is not None):
nlevels = None
if nlevels is not None:
if not isinstance(nlevels, (int,float)):
raise TypeError('nlevels must be an integer')
nlevels = int(nlevels)
if nlevels <= 1:
raise ValueError('nlevels must be greater than 1')
if nbits is not None:
if not isinstance(nbits, (int,float)):
raise TypeError('nbits must be an integer')
nbits = int(nbits)
if nbits <= 0:
raise ValueError('nbits must be greater than 0')
nlevels = 2**nbits
if inprange is None:
if NP.iscomplexobj(inp):
inpmin = min(inp.real.min(), inp.imag.min())
inpmax = max(inp.real.max(), inp.imag.max())
else:
inpmin = inp.min()
inpmax = inp.max()
else:
inprange = NP.asarray(inprange).ravel()
inpmin = inprange[0]
inpmax = inprange[1]
interval = (inpmax - inpmin)/nlevels
levels = NP.arange(nlevels, dtype=NP.float)
if NP.iscomplexobj(inp):
scaled_inp = (inp - inpmin*(1+1j))/interval
else:
scaled_inp = (inp - inpmin)/interval
if (mode == 'floor') or (mode == 'truncate') or (mode is None):
if NP.iscomplexobj(scaled_inp):
inpmod = NP.clip(NP.floor(scaled_inp.real), levels.min(), levels.max()) + 1j * NP.clip(NP.floor(scaled_inp.imag), levels.min(), levels.max())
else:
inpmod = NP.clip(NP.floor(scaled_inp), levels.min(), levels.max())
elif (mode == 'round') or (mode == 'nearest'):
inpshape = scaled_inp.shape
scaled_inp = scaled_inp.reshape(-1,1)
if NP.iscomplexobj(scaled_inp):
inpind, distNN, refind = LKP.find_1NN(levels.reshape(-1,1), scaled_inp.real)
inpmod = levels[refind].astype(NP.complex64)
inpind, distNN, refind = LKP.find_1NN(levels.reshape(-1,1), scaled_inp.imag)
inpmod += 1j * levels[refind]
else:
inpind, distNN, refind = LKP.find_1NN(levels.reshape(-1,1), scaled_inp)
inpmod = levels[refind]
inpmod = inpmod.reshape(inpshape)
else:
raise ValueError('Invalid mode specified for discretization.')
if not discrete_out:
inpmod *= interval
return inpmod, inpmin, interval
#################################################################################
| StarcoderdataPython |
317721 | from rest_framework.viewsets import ModelViewSet
from content.api.serializer import ListProductSerializer, DetailProductSerializer
from content.models import Products
class ProductViewset(ModelViewSet):
queryset = Products.objects.all()
def get_serializer_class(self):
if self.action == 'list':
return ListProductSerializer
elif self.action == 'retrieve':
return DetailProductSerializer
else:
return DetailProductSerializer
| StarcoderdataPython |
9719996 | from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
from tests.integration.mci import mci_test_urls
class TestEmptyQuestionnaire(IntegrationTestCase):
def test_empty_questionnaire(self):
# Get a token
token = create_token('<PASSWORD>', '1')
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=False)
self.assertEqual(resp.status_code, 302)
intro_page_url = resp.location
# Navigate to the Introduction Page
resp = self.client.get(intro_page_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '>Start survey<')
post_data = {
'action[start_questionnaire]': "Submit Answers"
}
# Submit the Introduction page to get the first question page
resp = self.client.post(intro_page_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
first_question_page = resp.location
# We try to access the submission page without entering anything
resp = self.client.get(mci_test_urls.MCI_0205_SUMMARY, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# Check we are redirected back to the questionnaire
self.assertEqual(resp.location, first_question_page)
# We try posting to the submission page without our answers
post_data = {
'action[submit_answers]': "Submit Answers"
}
resp = self.client.post(mci_test_urls.MCI_0205_SUBMIT, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# Check we are redirected back to the questionnaire
self.assertEqual(resp.location, first_question_page)
| StarcoderdataPython |
4947782 | from django.contrib import admin
from django.urls import path, include
from rest_framework.authtoken import views
from api.urls import router
from element.views.element_wiews import ElementListView
# 网站标签页名称
admin.site.site_title = '北斗后台管理'
# 网站名称:显示在登录页和首页
admin.site.site_header = '北斗后台管理'
urlpatterns = [
path('admin/', admin.site.urls, ),
path('api-token-auth/', views.obtain_auth_token, name='auth-token'),
path('api-auth/', include('rest_framework.urls')),
path('', include('oauth.urls')),
path('api/', include(router.urls)),
path('project/', include('project.urls')),
path('tool/', include('tool.urls')),
path('eln/', ElementListView.as_view(), name='eln'),
]
handler400 = 'oauth.views.error_views.bad_request'
handler403 = 'oauth.views.error_views.permission_denied'
handler404 = 'oauth.views.error_views.page_not_found'
handler500 = 'oauth.views.error_views.server_error'
| StarcoderdataPython |
149786 | <gh_stars>1-10
import selenium
from functions.Functions import Functions as Selenium
import unittest
from classes.FormLogin import EventLogin
from classes.FormTerminosCondiciones import EventTerminosCondiciones as EventTC
class TratamientoDatos(Selenium,unittest.TestCase):
def setUp(self):
Selenium.abrir_navegador(self)
Selenium.get_json_file(self,"TerminosCondicion")
self.driver.maximize_window()
def testTerminosCondiciones_Aceptar(self):
Cedula=Selenium.leer_celda(self, 'K3')
EventLogin.Loguin(self,Cedula,Cedula)
EventTC.AceptarTratamientoDatos(self)
EventTC.AceptarEnrolamiento(self)
EventTC.AceptarFirmaElectronica(self)
Selenium.esperar(self, 2)
Selenium.assert_text(self, 'AsersionTituloAprobacionDoc','VISUALIZACIÓN Y APROBACIÓN DE DOCUMENTOS')
def testTratamientoDatos_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K4')
EventLogin.Loguin(self,Cedula,Cedula)
EventTC.RechazarTratamientoDatos(self)
Selenium.esperar(self, 2)
def testEnrolamiento_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K5')
EventLogin.Loguin(self,Cedula,Cedula)
Selenium.esperar(self, 2)
EventTC.AceptarTratamientoDatos(self)
Selenium.esperar(self, 2)
EventTC.RechazarEnrolamientoFacial(self)
Selenium.esperar(self, 2)
def testFirmaElectronica_Rechazado(self):
Cedula=Selenium.leer_celda(self, 'K6')
EventLogin.Loguin(self,Cedula,Cedula)
Selenium.esperar(self, 2)
EventTC.AceptarTratamientoDatos(self)
Selenium.esperar(self, 2)
EventTC.AceptarEnrolamiento(self)
Selenium.esperar(self, 2)
EventTC.RechazarFirmaElectronica(self)
Selenium.esperar(self, 2)
if __name__ == '__main__':
unittest.main() | StarcoderdataPython |
1734714 | import napari
import dask.array as da
import zarr
import tifffile
def lazy_view(
tiff_path, channel_names=None, colormaps=None, viewer=None,
channel_from_to=(None, None)
):
target_filepath = tiff_path
# workaround for Faas pyramid
tiff = tifffile.TiffFile(target_filepath, is_ome=False)
n_levels = len(tiff.series[0].levels)
base_shape = tiff.series[0].shape
if len(base_shape) == 2:
n_channels = 1
channel_axis = None
elif len(base_shape) == 3:
n_channels = tiff.series[0].shape[0]
channel_axis = 0
else:
raise NotImplementedError('Only 2D/3D images are currently supported')
tiff.close()
channel_from, channel_to = channel_from_to
n_channels = len(range(n_channels)[channel_from:channel_to])
if channel_names is not None:
assert n_channels == len(channel_names), (
f'number of channel names ({len(channel_names)}) must '
f'match number of channels ({n_channels})'
)
if colormaps is not None:
assert n_channels == len(colormaps), (
f'number of colormaps ({len(colormaps)}) must '
f'match number of channels ({n_channels})'
)
z = zarr.open(tiff.aszarr(), mode='r')
# FIXME would this still be the case for single level pyramid?
assert type(z) == zarr.hierarchy.Group
pyramid = [
da.from_zarr(z[i])[channel_from:channel_to]
for i in range(n_levels)
]
viewer = viewer if viewer is not None else napari.Viewer()
viewer.add_image(
pyramid, multiscale=True, channel_axis=channel_axis,
visible=False, name=channel_names, colormap=colormaps,
blending='additive'
)
return viewer
def pan_viewer(viewer, center):
current_zoom = viewer.camera.zoom
viewer.camera.center = center
viewer.camera.zoom *= 1.001
viewer.camera.zoom = current_zoom | StarcoderdataPython |
1648833 | <reponame>InesPessoa/datascience-challenger
from flask import Flask
from flask_cors import CORS
import json
app = Flask(__name__)
CORS(app, resources={r"*": {"origins": "http://localhost:3000"}})
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/questions', methods=["GET"])
def get_questions():
file = open("resources/questions.json", "r")
questions = json.load(file)
return questions
if __name__ == '__main__':
app.run()
| StarcoderdataPython |
633 | import tensorflow
from tensorflow import keras
Model = keras.models.Model
Dense = keras.layers.Dense
Activation = keras.layers.Activation
Flatten = keras.layers.Flatten
BatchNormalization= keras.layers.BatchNormalization
Conv2D = tensorflow.keras.layers.Conv2D
AveragePooling2D = keras.layers.AveragePooling2D
Input=keras.layers.Input
l2=keras.regularizers.l2
from tensorflow.keras import backend
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(
num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v2(input, complexityParameter, num_classes=10, dataset='cifar10'):
depth = complexityParameter * 9 + 2
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = input
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = tensorflow.keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
final_features = Flatten()(x)
logits = Dense(num_classes, kernel_initializer='he_normal')(final_features)
outputs = Activation('softmax')(logits)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model, inputs, outputs, logits, final_features | StarcoderdataPython |
3535929 | # pylint: disable=C0103,E0401,R0913,C0330,too-many-locals
"""
JFSP app.
See gui.py for the Dash components and GUI.
See luts.py for the lookup tables which drive both the data ingest and GUI.
See preprocess.py for the data structure that this code assumes!
"""
import os
import math
import plotly.graph_objs as go
from dash.dependencies import Input, Output
import dash
import pandas as pd
import luts
from gui import layout
total_area_burned = pd.read_pickle("total_area_burned.pickle")
veg_counts = pd.read_pickle("veg_counts.pickle")
costs = pd.read_pickle("costs.pickle")
# Window for doing rolling average/std
rolling_window = 10
app = dash.Dash(__name__)
app = dash.Dash(
__name__, requests_pathname_prefix=os.environ["REQUESTS_PATHNAME_PREFIX"]
)
# AWS Elastic Beanstalk looks for application by default,
# if this variable (application) isn't set you will get a WSGI error.
application = app.server
app.title = "Alaska Wildfire Management - Possible Futures"
app.layout = layout
@app.callback(
Output("total_area_burned", "figure"),
inputs=[
Input("region", "value"),
Input("scenarios_checklist", "value"),
Input("treatment_options_checklist", "value"),
],
)
def generate_total_area_burned(region, scenario, treatment_options):
""" Regenerate plot data for area burned """
data_traces = []
# Subset historical data
h = total_area_burned[
(total_area_burned.region == region)
& (total_area_burned.treatment == luts.historical_categories[1])
]
# For each trace, draw a box plot but don't repeat the
# plots for historical stuff. Use a counter to decide
# if to trim the data set.
counter = 0
for treatment in treatment_options:
dt = pd.DataFrame()
t = total_area_burned[
(total_area_burned.region == region)
& (total_area_burned.scenario == scenario)
& (total_area_burned.model == luts.MODEL_AVG)
& (total_area_burned.treatment == treatment)
]
t = t.append(h)
if counter > 0:
t["year"] = pd.to_numeric(t.index)
t = t[(t.year >= 2010) & (t.year <= 2100)]
# Trick to group the data into decadal buckets,
# to match what Dash wants for box plots.
t = t.groupby(t.index // 10 * 10)
for key, values in t: # pylint: disable=unused-variable
a = t.get_group(key)
a = a.assign(decade=key)
dt = dt.append(a)
data_traces.extend(
[
go.Box(
name="Area burned, " + luts.treatment_options[treatment],
x=dt.decade,
y=dt.area.apply(luts.to_acres),
)
]
)
counter += 1
graph_layout = go.Layout(
title="Total area burned, "
+ luts.regions[region]
+ ", "
+ luts.scenarios[scenario]
+ ", "
+ luts.models[luts.MODEL_AVG],
showlegend=True,
legend_orientation="h",
boxmode="group",
legend={"font": {"family": "Open Sans", "size": 10}, "y": -0.15},
xaxis={"title": "Year"},
yaxis={"title": "Acres", "range": [0, 1900000]},
height=550,
margin={"l": 50, "r": 50, "b": 50, "t": 50, "pad": 4},
)
return {"data": data_traces, "layout": graph_layout}
@app.callback(
Output("ia", "figure"),
inputs=[
Input("region", "value"),
Input("scenarios_checklist", "value"),
Input("treatment_options_checklist", "value"),
],
)
def generate_ia(region, scenario, treatment_options):
""" Regenerate plot data for area burned """
data_traces = []
for treatment in treatment_options:
t = total_area_burned[
(total_area_burned.region == region)
& (total_area_burned.scenario == scenario)
& (total_area_burned.model == luts.MODEL_AVG)
& (total_area_burned.treatment == treatment)
]
# Always merge the historical data for computing the
# rolling std so we get meaningful results for the 2010s.
rolling_std = t.area.rolling(rolling_window, center=True).std()
rolling_std = rolling_std.loc[2019:2095]
data_traces.extend(
[
{
"x": rolling_std.index.tolist(),
"y": rolling_std.apply(luts.to_acres),
"type": "line",
"name": ", ".join(
[
"10-year rolling standard deviation, "
+ luts.treatment_options[treatment]
]
),
},
]
)
graph_layout = go.Layout(
title="Inter-annual variability, "
+ luts.regions[region]
+ ", "
+ luts.scenarios[scenario]
+ ", "
+ luts.models[luts.MODEL_AVG],
showlegend=True,
legend_orientation="h",
boxmode="group",
legend={"font": {"family": "Open Sans", "size": 10}, "y": -0.15},
xaxis={"title": "Year"},
yaxis={"title": "Acres"},
height=550,
margin={"l": 50, "r": 50, "b": 50, "t": 50, "pad": 4},
)
return {"data": data_traces, "layout": graph_layout}
@app.callback(
Output("veg_counts", "figure"),
inputs=[
Input("region", "value"),
Input("scenarios_checklist", "value"),
Input("treatment_options_checklist", "value"),
],
)
def generate_veg_counts(region, scenario, treatment_options):
""" Display veg count graph """
data_traces = []
# Future!
for treatment in treatment_options:
vc = veg_counts.loc[
(veg_counts["treatment"] == treatment)
& (veg_counts["scenario"] == scenario)
& (veg_counts["model"] == luts.MODEL_AVG)
& (veg_counts["region"] == region)
]
data_traces.extend(
[
{
"x": vc.index.tolist(),
"y": vc["coniferous"] / vc["deciduous"],
"type": "line",
"name": ", ".join(
[
luts.treatment_options[treatment],
luts.scenarios[scenario],
luts.models[luts.MODEL_AVG],
]
),
}
]
)
graph_layout = go.Layout(
title="Ratio of Coniferous to Deciduous, by area, "
+ luts.regions[region]
+ ", "
+ luts.scenarios[scenario]
+ ", "
+ luts.models[luts.MODEL_AVG],
showlegend=True,
legend={"font": {"family": "Open Sans", "size": 10}, "y": -0.15},
xaxis={"title": "Year"},
height=550,
legend_orientation="h",
yaxis={"title": "Coniferous/Deciduous"},
margin={"l": 50, "r": 50, "b": 50, "t": 50, "pad": 4},
)
return {"data": data_traces, "layout": graph_layout}
@app.callback(
Output("costs", "figure"),
inputs=[
Input("scenarios_checklist", "value"),
Input("treatment_options_checklist", "value"),
Input("fmo_radio", "value"),
],
)
def generate_costs(scenario, treatment_options, option):
""" Generate costs graph """
data_traces = []
for treatment in treatment_options:
dt = pd.DataFrame()
hc = costs.loc[
(costs["treatment"] == treatment)
& (costs["scenario"] == scenario)
& (costs["model"] == luts.MODEL_AVG)
& (costs["option"] == option)
]
hc = hc.groupby(hc.index // 10 * 10)
for key, values in hc: # pylint: disable=unused-variable
d = hc.get_group(key)
d = d.assign(decade=key)
dt = dt.append(d)
data_traces.extend(
[go.Box(name=luts.treatment_options[treatment], x=dt.decade, y=dt.cost)]
)
if option == "total":
title_option = "Total Costs"
else:
title_option = luts.fmo_options[option] + " Option"
graph_layout = go.Layout(
title="Future Costs, Full Model Domain, " + title_option,
showlegend=True,
height=550,
legend_orientation="h",
boxmode="group",
legend={"font": {"family": "Open Sans", "size": 10}, "y": -0.15},
xaxis={"title": "Year"},
yaxis={"title": "Cost ($)"},
margin={"l": 50, "r": 50, "b": 50, "t": 50, "pad": 4},
)
return {"data": data_traces, "layout": graph_layout}
if __name__ == "__main__":
application.run(debug=False, port=8080)
| StarcoderdataPython |
4932011 | <gh_stars>1-10
import numpy as np
from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default,
tpfp_imagenet, tpfp_openimages)
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
# 1 image and 2 classes
det_results = [[det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
def test_tpfp_openimages():
det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98],
[10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97],
[30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]])
gt_bboxes = np.array([[10., 10., 30., 30.], [30., 30., 50., 50.]])
gt_groups_of = np.array([True, False], dtype=np.bool)
gt_ignore = np.zeros((0, 4))
# Open Images evaluation using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 4)
assert fp.shape == (1, 4)
assert cls_dets.shape == (4, 5)
assert (tp == np.array([[0, 1, 0, 1]])).all()
assert (fp == np.array([[1, 0, 1, 0]])).all()
cls_dets_gt = np.array([[28., 28., 35., 35., 0.97],
[30., 30., 51., 51., 0.96],
[100., 110., 120., 130., 0.15],
[10., 10., 15., 15., 1.]])
assert (cls_dets == cls_dets_gt).all()
# Open Images evaluation not using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=False,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 6)
assert fp.shape == (1, 6)
assert cls_dets.shape == (6, 5)
# Open Images evaluation using group of, and gt is all group of bboxes.
gt_groups_of = np.array([True, True], dtype=np.bool)
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert cls_dets.shape == (3, 5)
# Open Images evaluation with empty gt.
gt_bboxes = np.zeros((0, 4))
gt_groups_of = np.empty((0))
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
fp = result[1]
assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all()
| StarcoderdataPython |
1803718 | #!/usr/bin/env python
#
# !!! Needs psutil (+ dependencies) installing:
#
# $ sudo apt-get install python-dev
# $ sudo pip install psutil
#
import os
import sys
import fluidsynth_backend
import sys
from time import sleep
if os.name != 'posix':
sys.exit('platform not supported')
import psutil
from datetime import datetime
from oled.device import ssd1306, sh1106
from oled.render import canvas
from PIL import ImageFont
# TODO: custom font bitmaps for up/down arrows
# TODO: Load histogram
def test_text(oled):
font = ImageFont.load_default()
font2 = ImageFont.truetype('fonts/mobitec.ttf', 8)
with canvas(oled) as draw:
draw.text((0, 0), "Line 1....", font=font2, fill=255)
draw.text((0, 7), "Line 2....", font=font2, fill=255)
draw.text((0, 15), "Line 3....", font=font2, fill=255)
draw.text((0, 23), "Line 4....", font=font2, fill=255)
draw.text((0, 31), "Line 5....", font=font2, fill=255)
draw.text((0, 39), "Line 6....", font=font2, fill=255)
draw.text((0, 47), "Line 7....", font=font2, fill=255)
draw.text((0, 55), "Line 8....", font=font2, fill=255)
def test_menu():
from menu import Menu
from rotary import Rotary
m = Menu([
"First line",
"A second menu option",
"Now to the third",
"On to the forth",
"Follow the fifth",
"Support the sixth"
])
r = Rotary(**{'menu': m, 'clk': 11, 'dt': 13, 'btn': 15})
if len(sys.argv) > 1:
if sys.argv[1] == 'clear':
m.blank(True)
else:
m.set_highlight(int(sys.argv[1]))
m.render()
else:
m.render()
while True:
sleep(1)
def main():
oled = ssd1306(port=3, address=0x3C)
test_menu()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1887896 | # Copyright 2017, OpenCensus Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from opencensus.trace import execution_context
from opencensus.trace import print_exporter
from opencensus.trace import samplers
from opencensus.trace.tracer import Tracer
def function_to_trace():
time.sleep(1)
def main():
sampler = samplers.AlwaysOnSampler()
exporter = print_exporter.PrintExporter()
tracer = Tracer(sampler=sampler, exporter=exporter)
with tracer.span(name='root'):
tracer.add_attribute_to_current_span(
attribute_key='example key', attribute_value='example value')
function_to_trace()
with tracer.span(name='child'):
function_to_trace()
# Get the current tracer
tracer = execution_context.get_opencensus_tracer()
# Explicitly create spans
tracer.start_span()
# Get current span
execution_context.get_current_span()
# Explicitly end span
tracer.end_span()
if __name__ == '__main__':
main()
| StarcoderdataPython |
9657814 | '''
Created on Jun 12, 2017
@author: xinguan
'''
import mysql_inserter
import mysql.connector
################################ START database configuration ########################
host = 'localhost'
user = 'root'
password = '<PASSWORD>'
db = 'dice_test'
conn = mysql.connector.connect(user=user, password=password, database=db)
inserter = mysql_inserter.MySQLInserter(conn)
job_unique_id = "test"
job_title = "test"
job_url = "test"
company = "test"
post_date = "2017-06-12"
job_description = "test"
try:
inserter.insert(job_unique_id, job_title, job_url, company, post_date, job_description)
except:
raise
finally:
conn.close() | StarcoderdataPython |
3455582 | <reponame>sahilvora225/nursery-store
from rest_framework import serializers
from plant.models import Plant
from .models import Order
class OrderSerializer(serializers.ModelSerializer):
"""
Serializer for Order model.
"""
buyer = serializers.PrimaryKeyRelatedField(read_only=True)
plant = serializers.PrimaryKeyRelatedField(queryset=Plant.objects.all())
class Meta:
model = Order
fields = '__all__'
read_only = ('id', 'status', 'datetime') | StarcoderdataPython |
5109884 | from pyOER import StandardExperiment
import numpy as np
from matplotlib import pyplot as plt
forpublication = True
if forpublication: # for the publication figure
import matplotlib as mpl
mpl.rcParams["figure.figsize"] = (3.25, 2.75)
# plt.rc('text', usetex=True) # crashingly slow
plt.rc("font", family="sans-serif")
plt.rc("font", size=8)
plt.rc("lines", linewidth=0.6)
else:
plt.style.use("default")
lines = []
for panel, e_id in {"a": 33, "b": 41}.items(): # 5, 39
e = StandardExperiment.open(e_id)
axes = e.plot_EC_MS_ICPMS()
ylim_left = np.array([-1, 10])
axes[0].set_ylim(ylim_left)
axes[2].set_ylim(ylim_left / e.beta)
if e_id in [33, 5]:
axes[-1].set_ylim([0, 25])
if e_id in [39, 41]:
axes[-1].set_ylim([0, 2.5])
axes[0].set_ylabel(
"$^{16}$O$^{18}$O and $^{18}$O$_2$ / \n" "(pmol s$^{-1}$cm$^{-2}_{geo})$"
)
axes[1].set_ylabel("E vs RHE / (V)")
axes[3].set_ylabel("J / (mA cm$^{-2}_{geo}$)")
axes[2].set_ylabel("$^{16}$O$_2$ / \n" "(pmol s$^{-1}$cm$^{-2}_{geo})$")
axes[4].set_ylabel(
f"{e.sample.element} dissolution / \n" "(pmol s$^{-1}$cm$^{-2}_{geo})$"
)
fig = axes[0].get_figure()
if forpublication:
fig.set_figwidth(3.25)
fig.set_figheight(3.25)
fig.savefig(f"paper_II_v4_fig2{panel}.png")
fig.savefig(f"paper_II_v4_fig2{panel}.svg")
lines += ["\n", f"----- {panel} = {e}:\n"]
tofs = e.get_tofs()
for tof in tofs:
lines += [
f"{tof.tof_type} rate at {tof.description} = {tof.rate} [mol/s] \n",
(
f"\t= {tof.amount * 1e12 / 0.196} [pmol/cm^2] "
+ f"in the {tof.t_interval} [s] interval\n"
),
f"\twith average potential = {tof.potential} [V] vs RHE.\n",
]
with open("paper_II_v4_fig2_annotation.txt", "w") as f:
f.writelines(lines)
| StarcoderdataPython |
3343431 | <reponame>paulkarikari/PyDP
import pydp as dp
print("Successfully imported pydp")
# print("Here are the available Status codes from the Base library")
# x = range(18)
# for n in x:
# print(pd.StatusCode(n))
s = dp.Status(dp.Status.StatusCode(3), "New status object")
print(s)
url = "http://test.com"
payload_content = "example payload content"
print("Setting payload: " + payload_content)
s.set_payload(url, payload_content)
print("Getting payload: ")
return_payload = s.get_payload(url)
print(return_payload)
print("Erasing payload")
s.erase_payload(url)
new_payload=s.get_payload(url)
print("New payload:")
print(new_payload)
| StarcoderdataPython |
3239910 | import numpy as np
from extensive_form.optimistic_ulcb import OptimisticULCB
from extensive_form.strategic_ulcb import StrategicULCB
from extensive_form.optimistic_nash_q import OptimisticNashQ
from extensive_form.optimistic_nash_v import OptimisticNashV
from extensive_form.uniform_exploration import UniformExploration
from extensive_form.optimistic_q_learning import OptimisticQLearning
from extensive_form.nash_q import NashQ
from extensive_form.strategic_nash_q import StrategicNashQ
class UniformRandom:
def __init__(self, env, config):
self._env = env
def sample(self):
state = self._env.reset()
steps = 0
while state is not None:
action = []
for player_id in state.active_players():
action.append(np.random.randint(0, state.num_actions(player_id)))
state = self._env.step(action)
steps += 1
return steps
def strategy(self, state, player_id):
num_actions = state.num_actions(player_id)
return np.ones(num_actions) / num_actions
ALGORITHMS = {
"random": UniformRandom,
"uniform_exploration": UniformExploration,
"optimistic_nash_q": OptimisticNashQ,
"optimistic_nash_v": OptimisticNashV,
"optimistic_q_learning": OptimisticQLearning,
"nash_q": NashQ,
"strategic_nash_q": StrategicNashQ,
"optimistic_ulcb": OptimisticULCB,
"strategic_ulcb": StrategicULCB,
}
def build_algorithm(alg, env, config={}):
if alg not in ALGORITHMS:
raise ValueError(f"Leanring algorithm '{alg}' is not defined")
return ALGORITHMS[alg](env, config)
| StarcoderdataPython |
4993414 | """ Purpose: Automatic Speech Recognition module using Google's speech recognition
Source Code Creator: <NAME>
Project: WILPS Hamburg University
Term: Summer 2021
M.Sc. Intelligent Adaptive Systems """
from utils.getMeetingInfo import getcurrentMeetingInfo
from utils.chunkAudio import chuckAudio
from utils.convertToWav import convertToWavFile
import speech_recognition as sr
print(sr.__version__)
import os
from os import path
RECORD_OPUS_FILE = ""
CONF_ID = ""
def extractConfInfo():
currentMeetingInfo = getcurrentMeetingInfo()
CONF_ID = currentMeetingInfo['voiceConfID']
RECORD_OPUS_FILE = currentMeetingInfo['RecordPath']
# For testing on local
#CONF_ID = "26471"
#RECORD_OPUS_FILE = os.path.join(os.getcwd(), "var/freeswitch/meetings/cde145e5c20766d33c81bb492a157a94feb3a0f1-1603617884026-15243890485.opus")
return CONF_ID, RECORD_OPUS_FILE
def createTranscriptFileName(CONF_ID):
fName = os.getcwd() + "/MeetingTranscriptData/" + CONF_ID + "_transcript.txt"
return fName
def getFullPath(filename,CONF_ID):
return path.join(path.dirname(path.realpath(__file__)), "audio/tmp", CONF_ID, filename)
def convertAudiotoText(audioPath, transcriptFileName):
#get one wav file for now
for x in sorted(os.listdir(audioPath)):
if x.endswith('.wav'):
audioFile = str(x)
print("Currently Transcribing: " + audioFile)
r = sr.Recognizer()
# read the chunked audio file
with sr.AudioFile(getFullPath(audioFile,CONF_ID)) as source:
audio = r.record(source)
# recognize speech using Google Speech Recognition
try:
# for testing purposes, we're just using the default API key
# to use another API key, use `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
# instead of `r.recognize_google(audio)`
transribedText = r.recognize_google(audio)
print("Google Speech Recognition thinks you said: " + transribedText)
transribedFile = open(transcriptFileName, "a")
transribedFile.writelines(transribedText + ". ")
transribedFile.close()
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return transribedText
if __name__ == '__main__':
#Get meeting record path
#get the opus file
CONF_ID, RECORD_OPUS_FILE = extractConfInfo()
print("Conf_ID: " + CONF_ID)
print("Record File:" + str(RECORD_OPUS_FILE))
#convert to wav and store it in MeetingAudioWav Folder
wavFileName, wavFilePath = convertToWavFile(RECORD_OPUS_FILE)
print("Wave File Name: " + wavFileName)
print("Wave File Path:" + wavFilePath)
#chunk the file and store it in audio/tmp folder
tmpAudioPath = chuckAudio(wavFilePath, CONF_ID)
#create transcrition file in the "MeetingTranscriptData"
transcriptFileName = createTranscriptFileName(CONF_ID)
print("Transcript File Name:" + transcriptFileName)
#transribe each chunck file
transribedText = convertAudiotoText(tmpAudioPath,transcriptFileName)
print(transribedText)
#Delete the tmp chunked files | StarcoderdataPython |
5010966 | from .fgir import *
from .error import *
# Optimization pass interfaces
class Optimization(object):
def visit(self, obj): pass
class FlowgraphOptimization(Optimization):
'''Called on each flowgraph in a FGIR.
May modify the flowgraph by adding or removing nodes (return a new Flowgraph).
If you modify nodes, make sure inputs, outputs, and variables are all updated.
May NOT add or remove flowgraphs.'''
pass
class TopologicalFlowgraphOptimization(Optimization):
'''Called on each flowgraph in a FGIR, in dependent order.
Components which are used by other components will be called first.'''
pass
class NodeOptimization(Optimization):
'''Called on each node in a FGIR.
May modify the node (return a new Node object, and it will be assigned).
May NOT remove or add nodes (use a component pass).'''
pass
class TopologicalNodeOptimization(NodeOptimization): pass
# Optimization pass implementations
class PrintIR(TopologicalNodeOptimization):
'A simple "optimization" pass which can be used to debug topological sorting'
def visit(self, node):
print(str(node))
class AssignmentEllision(FlowgraphOptimization):
'''Eliminates all assignment nodes.
Assignment nodes are useful for the programmer to reuse the output of an
expression multiple times, and the lowering transformation generates explicit
flowgraph nodes for these expressions. However, they are not necessary for
execution, as they simply forward their value. This removes them and connects
their pre- and post-dependencies.'''
def visit(self, flowgraph):
# Check all the nodes in the flowgraph and remove assigment nodes
to_remove = []
# Keeps track of the predecessors to nodes to be removed
to_remove_inputs = {}
# Find all assignment nodes to remove
for node in flowgraph.nodes.keys():
# If the node is an assigment, eliminate it
if flowgraph.nodes[node].type == FGNodeType.assignment:
to_remove.append(node)
to_remove_inputs[node] = flowgraph.nodes[node].inputs
# Update the nodes that are before and after the assignment nodes
for node in flowgraph.nodes.keys():
for to_remove_node in to_remove_inputs:
if to_remove_node in flowgraph.nodes[node].inputs:
flowgraph.nodes[node].inputs.remove(to_remove_node)
flowgraph.nodes[node].inputs.extend(to_remove_inputs[to_remove_node])
for var in flowgraph.variables:
node = flowgraph.variables[var]
if node in to_remove:
flowgraph.variables[var] = to_remove_inputs[node][0]
# Clear out the nodes that should be removed
for node_to_remove in to_remove:
del flowgraph.nodes[node_to_remove]
return flowgraph
class DeadCodeElimination(FlowgraphOptimization):
'''Eliminates unreachable expression statements.
Statements which never affect any output are effectively useless, and we call
these "dead code" blocks. This optimization removes any expressions which can
be shown not to affect the output.
NOTE: input statements *cannot* safely be removed, since doing so would change
the call signature of the component. For example, it might seem that the input
x could be removed:
{ component1 (input x y) (output y) }
but imagine this component1 was in a file alongside this one:
{ component2 (input a b) (:= c (component a b)) (output c) }
By removing x from component1, it could no longer accept two arguments. So in
this instance, component1 will end up unmodified after DCE.'''
def visit(self, flowgraph):
# Check all the nodes in the flowgraph and remove dead nodes
# Start with assuming all nodes should be removed, and then
# work backwards from the outputs to remove unused ones
to_remove = list(flowgraph.nodes.keys())
# Need to copy this list, otherwise you are modifying the originals
to_check = flowgraph.outputs[:]
while len(to_check) > 0:
cur = to_check.pop()
to_remove.remove(cur)
# Add all previous to the to_check queue
for key in flowgraph.nodes[cur].inputs:
if key in to_remove and key not in to_check:
to_check.append(key)
# Now eliminate whatever is left and hasn't been visited
if len(to_remove) > 0:
print("Removing nodes", to_remove)
for key in to_remove:
del flowgraph.nodes[key]
to_remove_vars = []
# Remove unused variables as well
for var in flowgraph.variables:
node = flowgraph.variables[var]
if node in to_remove:
to_remove_vars.append(var)
for var in to_remove_vars:
del flowgraph.variables[var]
return flowgraph
class InlineComponents(TopologicalFlowgraphOptimization):
'''Replaces every component invocation with a copy of that component's flowgraph.
Topological order guarantees that we inline components before they are invoked.'''
def __init__(self):
self.component_cache = {}
def visit(self, flowgraph):
for (cnode_id, cnode) in [(nid,n) for (nid,n) in flowgraph.nodes.items() if n.type==FGNodeType.component]:
target = self.component_cache[cnode.ref]
# Add a copy of every node in target flowgraph
id_map = {} # maps node id's in the target to node id's in our flowgraph
for tnode in target.nodes.values():
if tnode.type==FGNodeType.input or tnode.type==FGNodeType.output:
newtype = FGNodeType.forward
else:
newtype = tnode.type
n = flowgraph.new_node(newtype, ref=tnode.ref)
id_map[tnode.nodeid] = n.nodeid
# Connect all copies together
for tid,tnode in target.nodes.items():
flowgraph.nodes[id_map[tid]].inputs = [id_map[i] for i in tnode.inputs]
# Link inputs of cnode to inputs of target flowgraph
for cnode_input,targ_input in zip(cnode.inputs, target.inputs):
flowgraph.nodes[id_map[targ_input]].inputs = [cnode_input]
# Link output of target flowgraph to outputs of cnode
for oid,onode in flowgraph.nodes.items():
if cnode_id in onode.inputs:
onode.inputs[onode.inputs.index(cnode_id)] = id_map[target.outputs[0]]
# Remove all other references to cnode in flowgraph
del flowgraph.nodes[cnode_id]
victims = [s for s,nid in flowgraph.variables.items() if nid==cnode_id]
for v in victims:
del flowgraph.variables[v]
self.component_cache[flowgraph.name] = flowgraph
return flowgraph | StarcoderdataPython |
3581132 | <filename>shibayama2009.py
import sympy
import Hamilton
import Birkhoff
x = sympy.IndexedBase("x")
y = sympy.IndexedBase("y")
p = sympy.IndexedBase("p")
q = sympy.IndexedBase("q")
t = sympy.IndexedBase("tau")
o = sympy.Symbol("omega", positive=True)
a = sympy.Symbol("alpha", positive=True)
b = sympy.Symbol("beta")
l = sympy.Symbol("lambda", positive=True)
H = p[1]*p[1]/4 + o*o/4/q[1]/q[1] - 1/a/2/q[1] - 2/sympy.sqrt(q[1]*q[1]+q[2]*q[2]) + (a+2)*p[2]*p[2]/a/4
ham = Hamilton.Hamiltonian(H, [p[1],p[2]], [q[1],q[2]])
equilibrium_points = [0, 0, o*o*a/(4*a+1), 0]
ham.expand_around_equilibrium(equilibrium_points, max_degree=4)
ham.rescale()
ham.coeff_subs([(a, (8-l*l)/(4*l*l-4))])
ham.rotate45()
birkhoff = Birkhoff.LieTransform.fromHamiltonian(ham)
birkhoff.exec()
print(birkhoff.normalform())
| StarcoderdataPython |
6543142 | <filename>fetch_all_exp.py
import neptune
from config import EnvConfig
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
comet_cfg = EnvConfig()
session = neptune.Session(api_token=comet_cfg.neptune_token)
project = session.get_project(project_qualified_name=comet_cfg.neptune_project_name)
experiments = project.get_experiments()[171:]
print(experiments)
params = []
lyap_inter = []
lyap_intra = []
neg_intra = []
for exp in experiments:
properties = exp.get_properties()
model_update = properties['target_model_update']
mem_len = properties['memory_limit']
alpha = properties['learning_rate']
try:
# print(exp.id)
lyap_intra = exp.get_numeric_channels_values('lyap_exp_intra_ins_0','lyap_exp_intra_ins_1','lyap_exp_intra_ins_2').to_numpy()[:,1:]
neg_intra.append(sum(sum(lyap_intra<0))/600)
# print(lyap)
params.append([model_update, mem_len, alpha])
except:
print('except '+exp.id)
params = np.array(params).astype(np.float)
lyap_intra = np.array(lyap_intra).astype(np.float)
plt.figure()
plt.subplot(1,3,1)
plt.scatter(params[:,0], neg_intra)
plt.xlabel('target_model_update')
plt.ylabel('Percentage of episodes w/ negative lyap exp')
plt.subplot(1,3,2)
plt.scatter(params[:,1], neg_intra)
plt.xlabel('memory_limit')
plt.subplot(1,3,3)
plt.scatter(params[:,2], neg_intra)
plt.xlabel('alpha')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
img = ax.scatter(params[:,0],params[:,1], params[:,2], c=neg_intra, cmap=plt.hot())
ax.set_xlabel('Model_update'); ax.set_ylabel('Memory_limit'); ax.set_zlabel('Learning_rate');
fig.colorbar(img)
plt.title('3d plot of percentage of negative exponent episodes')
plt.figure()
plt.scatter(range(len(neg_intra)), neg_intra)
plt.show()
print('done')
| StarcoderdataPython |
332133 | <gh_stars>10-100
#!/usr/env/bin/env python
# A script to convert various types of raster images into .tiff format
# that can be easier for MATLAB to read
# Refer to this question:
# https://gis.stackexchange.com/questions/42584/how-to-call-gdal-translate-from-python-code
# and this one: https://gdal.org/tutorials/raster_api_tut.html#using-createcopy
from osgeo import gdal
import sys
assert len(sys.argv) == 3
dataset_in = gdal.Open(sys.argv[1])
driver = gdal.GetDriverByName('GTiff')
dataset_out = driver.CreateCopy(sys.argv[2], dataset_in, 0)
dataset_in = None
dataset_out = None | StarcoderdataPython |
5195352 | <filename>src/auxil/readshp.py
#!/usr/bin/env python
# Name: readshp.py
# Purpose:
# Read ENVI ROI shapefiles and return training/test data and class labels
# Usage:
# import readshp
# Gs, ls, numclasses = readshp.readshp(<train shapefile>, <inDataset for image>, <band positions)> )
from osgeo import ogr, osr
from shapely.geometry import asPolygon, MultiPoint
import shapely.wkt
import numpy as np
def readshp(trnfile,inDataset,pos):
# projection info from input image
projection = inDataset.GetProjection()
geotransform = inDataset.GetGeoTransform()
gt = list(geotransform)
imsr = osr.SpatialReference()
imsr.ImportFromWkt(projection)
trnDriver = ogr.GetDriverByName('ESRI Shapefile')
trnDatasource = trnDriver.Open(trnfile,0)
trnLayer = trnDatasource.GetLayer()
trnsr = trnLayer.GetSpatialRef()
# coordinate transformation from training to image projection
ct = osr.CoordinateTransformation(trnsr,imsr)
# image bands
rasterBands = []
for b in pos:
rasterBands.append(inDataset.GetRasterBand(b))
# number of classes
K = 1
feature = trnLayer.GetNextFeature()
while feature:
classid = feature.GetField('CLASS_ID')
if int(classid)>K:
K = int(classid)
feature = trnLayer.GetNextFeature()
trnLayer.ResetReading()
K += 1
# loop through the polygons
Gs = [] # train observations (data matrix)
ls = [] # class labels (lists)
classnames = []
classids = set()
print 'reading training data...'
for i in range(trnLayer.GetFeatureCount()):
feature = trnLayer.GetFeature(i)
classid = str(feature.GetField('CLASS_ID'))
classname = feature.GetField('CLASS_NAME')
if classid not in classids:
classnames.append(classname)
classids = classids | set(classid)
# label for this ROI
y = int(classid)
l = [0 for i in range(K)]
l[y] = 1.0
polygon = feature.GetGeometryRef()
# transform to same projection as image
polygon.Transform(ct)
# convert to a Shapely object
poly = shapely.wkt.loads(polygon.ExportToWkt())
# transform the boundary to pixel coords in numpy
bdry = np.array(poly.boundary)
bdry[:,0] = bdry[:,0]-gt[0]
bdry[:,1] = bdry[:,1]-gt[3]
GT = np.mat([[gt[1],gt[2]],[gt[4],gt[5]]])
bdry = bdry*np.linalg.inv(GT)
# polygon in pixel coords
polygon1 = asPolygon(bdry)
# raster over the bounding rectangle
minx,miny,maxx,maxy = map(int,list(polygon1.bounds))
pts = []
for i in range(minx,maxx+1):
for j in range(miny,maxy+1):
pts.append((i,j))
multipt = MultiPoint(pts)
# intersection as list
intersection = np.array(multipt.intersection(polygon1),dtype=np.int).tolist()
# cut out the bounded image cube
cube = np.zeros((maxy-miny+1,maxx-minx+1,len(rasterBands)))
k=0
for band in rasterBands:
cube[:,:,k] = band.ReadAsArray(minx,miny,maxx-minx+1,maxy-miny+1)
k += 1
# get the training vectors
for (x,y) in intersection:
Gs.append(cube[y-miny,x-minx,:])
ls.append(l)
polygon = None
polygon1 = None
feature.Destroy()
trnDatasource.Destroy()
Gs = np.array(Gs)
ls = np.array(ls)
return (Gs,ls,K,classnames)
if __name__ == '__main__':
pass
| StarcoderdataPython |
6496516 | """ Module RxNSEM.py
Receivers for the NSEM problem
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from scipy.constants import mu_0
import SimPEG
import numpy as np
from SimPEG import mkvc
class BaseRxNSEM_Point(SimPEG.Survey.BaseRx):
"""
Natural source receiver base class.
Assumes that the data locations are xyz coordinates.
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'x', 'y' or 'z'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
assert(orientation in ['xx', 'xy', 'yx', 'yy', 'zx', 'zy']), "Orientation {0!s} not known. Orientation must be in 'x', 'y', 'z'. Arbitrary orientations have not yet been implemented.".format(orientation)
assert(component in ['real', 'imag']), "'component' must be 'real' or 'imag', not {0!s}".format(component)
self.orientation = orientation
self.component = component
SimPEG.Survey.BaseRx.__init__(self, locs, rxType=None) # TODO: remove rxType from baseRx
# Set a mesh property
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, value):
if value is getattr(self, '_mesh', None):
pass
else:
self._mesh = value
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def f(self):
return self._f
@f.setter
def f(self, value):
self._f = value
def _locs_e(self):
if self.locs.ndim == 3:
loc = self.locs[:, :, 0]
else:
loc = self.locs
return loc
def _locs_b(self):
if self.locs.ndim == 3:
loc = self.locs[:, :, 1]
else:
loc = self.locs
return loc
# Location projection
@property
def Pex(self):
if getattr(self, '_Pex', None) is None:
self._Pex = self._mesh.getInterpolationMat(self._locs_e(), 'Ex')
return self._Pex
@property
def Pey(self):
if getattr(self, '_Pey', None) is None:
self._Pey = self._mesh.getInterpolationMat(self._locs_e(), 'Ey')
return self._Pey
@property
def Pbx(self):
if getattr(self, '_Pbx', None) is None:
self._Pbx = self._mesh.getInterpolationMat(self._locs_b(), 'Fx')
return self._Pbx
@property
def Pby(self):
if getattr(self, '_Pby', None) is None:
self._Pby = self._mesh.getInterpolationMat(self._locs_b(), 'Fy')
return self._Pby
@property
def Pbz(self):
if getattr(self, '_Pbz', None) is None:
self._Pbz = self._mesh.getInterpolationMat(self._locs_e(), 'Fz')
return self._Pbz
# Utility for convienece
def _sDiag(self, t):
return SimPEG.Utils.sdiag(mkvc(t,2))
# Get the components of the fields
# px: x-polaration and py: y-polaration.
@property
def _ex_px(self):
return self.Pex*self.f[self.src, 'e_px']
@property
def _ey_px(self):
return self.Pey*self.f[self.src, 'e_px']
@property
def _ex_py(self):
return self.Pex*self.f[self.src, 'e_py']
@property
def _ey_py(self):
return self.Pey*self.f[self.src, 'e_py']
@property
def _hx_px(self):
return self.Pbx*self.f[self.src, 'b_px']/mu_0
@property
def _hy_px(self):
return self.Pby*self.f[self.src, 'b_px']/mu_0
@property
def _hz_px(self):
return self.Pbz*self.f[self.src, 'b_px']/mu_0
@property
def _hx_py(self):
return self.Pbx*self.f[self.src, 'b_py']/mu_0
@property
def _hy_py(self):
return self.Pby*self.f[self.src, 'b_py']/mu_0
@property
def _hz_py(self):
return self.Pbz*self.f[self.src, 'b_py']/mu_0
# Get the derivatives
def _ex_px_u(self, vec):
return self.Pex*self.f._e_pxDeriv_u(self.src, vec)
def _ey_px_u(self, vec):
return self.Pey*self.f._e_pxDeriv_u(self.src, vec)
def _ex_py_u(self, vec):
return self.Pex*self.f._e_pyDeriv_u(self.src, vec)
def _ey_py_u(self, vec):
return self.Pey*self.f._e_pyDeriv_u(self.src, vec)
def _hx_px_u(self, vec):
return self.Pbx*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hy_px_u(self, vec):
return self.Pby*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hz_px_u(self, vec):
return self.Pbz*self.f._b_pxDeriv_u(self.src, vec)/mu_0
def _hx_py_u(self, vec):
return self.Pbx*self.f._b_pyDeriv_u(self.src, vec)/mu_0
def _hy_py_u(self, vec):
return self.Pby*self.f._b_pyDeriv_u(self.src, vec)/mu_0
def _hz_py_u(self, vec):
return self.Pbz*self.f._b_pyDeriv_u(self.src, vec)/mu_0
# Define the components of the derivative
@property
def _Hd(self):
return self._sDiag(1./(
self._sDiag(self._hx_px)*self._hy_py -
self._sDiag(self._hx_py)*self._hy_px
))
def _Hd_uV(self, v):
return (
self._sDiag(self._hy_py)*self._hx_px_u(v) +
self._sDiag(self._hx_px)*self._hy_py_u(v) -
self._sDiag(self._hx_py)*self._hy_px_u(v) -
self._sDiag(self._hy_px)*self._hx_py_u(v)
)
# Adjoint
@property
def _aex_px(self):
return mkvc(mkvc(self.f[self.src, 'e_px'], 2).T*self.Pex.T)
@property
def _aey_px(self):
return mkvc(mkvc(self.f[self.src, 'e_px'], 2).T*self.Pey.T)
@property
def _aex_py(self):
return mkvc(mkvc(self.f[self.src, 'e_py'], 2).T*self.Pex.T)
@property
def _aey_py(self):
return mkvc(mkvc(self.f[self.src, 'e_py'], 2).T*self.Pey.T)
@property
def _ahx_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pbx.T)
@property
def _ahy_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pby.T)
@property
def _ahz_px(self):
return mkvc(mkvc(self.f[self.src, 'b_px'], 2).T/mu_0*self.Pbz.T)
@property
def _ahx_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pbx.T)
@property
def _ahy_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pby.T)
@property
def _ahz_py(self):
return mkvc(mkvc(self.f[self.src, 'b_py'], 2).T/mu_0*self.Pbz.T)
# NOTE: need to add a .T at the end for the output to be (nU,)
def _aex_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pxDeriv_u(self.src, self.Pex.T*mkvc(vec,), adjoint=True)
def _aey_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pxDeriv_u(self.src, self.Pey.T*mkvc(vec,), adjoint=True)
def _aex_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pyDeriv_u(self.src, self.Pex.T*mkvc(vec,), adjoint=True)
def _aey_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._e_pyDeriv_u(self.src, self.Pey.T*mkvc(vec,), adjoint=True)
def _ahx_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pbx.T*mkvc(vec,), adjoint=True)/mu_0
def _ahy_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pby.T*mkvc(vec,), adjoint=True)/mu_0
def _ahz_px_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pxDeriv_u(self.src, self.Pbz.T*mkvc(vec,), adjoint=True)/mu_0
def _ahx_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pbx.T*mkvc(vec,), adjoint=True)/mu_0
def _ahy_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pby.T*mkvc(vec,), adjoint=True)/mu_0
def _ahz_py_u(self, vec):
"""
"""
# vec is (nD,) and returns a (nU,)
return self.f._b_pyDeriv_u(self.src, self.Pbz.T*mkvc(vec,), adjoint=True)/mu_0
# Define the components of the derivative
@property
def _aHd(self):
return self._sDiag(1./(
self._sDiag(self._ahx_px)*self._ahy_py -
self._sDiag(self._ahx_py)*self._ahy_px
))
def _aHd_uV(self, x):
return (
self._ahx_px_u(self._sDiag(self._ahy_py)*x) +
self._ahx_px_u(self._sDiag(self._ahy_py)*x) -
self._ahy_px_u(self._sDiag(self._ahx_py)*x) -
self._ahx_py_u(self._sDiag(self._ahy_px)*x)
)
def eval(self, src, mesh, f, return_complex=False):
"""
Function to evaluate datum for this receiver
"""
raise NotImplementedError('SimPEG.EM.NSEM receiver has to have an eval method')
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
Function to evaluate datum for this receiver
"""
raise NotImplementedError('SimPEG.EM.NSEM receiver has to have an evalDeriv method')
class Point_impedance1D(SimPEG.Survey.BaseRx):
"""
Natural source 1D impedance receiver class
:param string component: real or imaginary component 'real' or 'imag'
"""
orientation = 'yx'
def __init__(self, locs, component=None):
assert(component in ['real', 'imag']), "'component' must be 'real' or 'imag', not {0!s}".format(component)
self.component = component
SimPEG.Survey.BaseRx.__init__(self, locs, rxType=None)
@property
def mesh(self):
return self._mesh
@mesh.setter
def mesh(self, value):
if value is getattr(self, '_mesh', None):
pass
else:
self._mesh = value
# Utility for convienece
def _sDiag(self, t):
return SimPEG.Utils.sdiag(mkvc(t, 2))
@property
def src(self):
return self._src
@src.setter
def src(self, value):
self._src = value
@property
def f(self):
return self._f
@f.setter
def f(self, value):
self._f = value
@property
def Pex(self):
if getattr(self, '_Pex', None) is None:
self._Pex = self._mesh.getInterpolationMat(self.locs[:, -1], 'Fx')
return self._Pex
@property
def Pbx(self):
if getattr(self, '_Pbx', None) is None:
self._Pbx = self._mesh.getInterpolationMat(self.locs[:, -1], 'Ex')
return self._Pbx
@property
def _ex(self):
return self.Pex * mkvc(self.f[self.src, 'e_1d'], 2)
@property
def _hx(self):
return self.Pbx * mkvc(self.f[self.src, 'b_1d'], 2) / mu_0
def _ex_u(self, v):
return self.Pex * self.f._eDeriv_u(self.src, v)
def _hx_u(self, v):
return self.Pbx * self.f._bDeriv_u(self.src, v) / mu_0
def _aex_u(self, v):
return self.f._eDeriv_u(self.src, self.Pex.T * v, adjoint=True)
def _ahx_u(self, v):
return self.f._bDeriv_u(self.src, self.Pbx.T * v, adjoint=True) / mu_0
@property
def _Hd(self):
return self._sDiag(1./self._hx)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param bool (optional) return_complex: Flag for return the complex evaluation
:rtype: numpy.array
:return: Evaluated data for the receiver
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
rx_eval_complex = -self._Hd * self._ex
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""method evalDeriv
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: vector of size (nU,) (adjoint=False) and size (nD,) (adjoint=True)
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True) for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
Z1d = self.eval(src, mesh, f, True)
def aZ_N_uV(x):
return -self._aex_u(x)
def aZ_D_uV(x):
return self._ahx_u(x)
rx_deriv = aZ_N_uV(self._Hd.T * v) - aZ_D_uV(self._sDiag(Z1d).T * self._Hd.T * v)
if self.component == 'imag':
rx_deriv_component = 1j*rx_deriv
elif self.component == 'real':
rx_deriv_component = rx_deriv.astype(complex)
else:
Z1d = self.eval(src, mesh, f, True)
Z_N_uV = -self._ex_u(v)
Z_D_uV = self._hx_u(v)
# Evaluate
rx_deriv = self._Hd * (Z_N_uV - self._sDiag(Z1d) * Z_D_uV)
rx_deriv_component = np.array(getattr(rx_deriv, self.component))
return rx_deriv_component
class Point_impedance3D(BaseRxNSEM_Point):
"""
Natural source 3D impedance receiver class
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'xx', 'xy', 'yx' or 'yy'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
BaseRxNSEM_Point.__init__(self, locs, orientation=orientation, component=component)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SrcNSEM src: The source of the fields to project
:param discretize.TensorMesh mesh: topological mesh corresponding to the fields
:param FieldsNSEM f: Natural source fields object to project
:rtype: numpy.array
:return: component of the impedance evaluation
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
if 'xx' in self.orientation:
Zij = ( self._ex_px * self._hy_py - self._ex_py * self._hy_px)
elif 'xy' in self.orientation:
Zij = (-self._ex_px * self._hx_py + self._ex_py * self._hx_px)
elif 'yx' in self.orientation:
Zij = ( self._ey_px * self._hy_py - self._ey_py * self._hy_px)
elif 'yy' in self.orientation:
Zij = (-self._ey_px * self._hx_py + self._ey_py * self._hx_px)
# Calculate the complex value
rx_eval_complex = self._Hd * Zij
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: vector of size (nU,) (adjoint=False) and size (nD,) (adjoint=True)
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True) for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
if 'xx' in self.orientation:
Zij = self._sDiag(self._aHd * (
self._sDiag(self._ahy_py)*self._aex_px -
self._sDiag(self._ahy_px)*self._aex_py
))
def ZijN_uV(x):
return (
self._aex_px_u(self._sDiag(self._ahy_py) * x) +
self._ahy_py_u(self._sDiag(self._aex_px) * x) -
self._ahy_px_u(self._sDiag(self._aex_py) * x) -
self._aex_py_u(self._sDiag(self._ahy_px) * x)
)
elif 'xy' in self.orientation:
Zij = self._sDiag(self._aHd * (
-self._sDiag(self._ahx_py) * self._aex_px +
self._sDiag(self._ahx_px) * self._aex_py
))
def ZijN_uV(x):
return (
-self._aex_px_u(self._sDiag(self._ahx_py) * x) -
self._ahx_py_u(self._sDiag(self._aex_px) * x) +
self._ahx_px_u(self._sDiag(self._aex_py) * x) +
self._aex_py_u(self._sDiag(self._ahx_px) * x)
)
elif 'yx' in self.orientation:
Zij = self._sDiag(self._aHd * (
self._sDiag(self._ahy_py) * self._aey_px -
self._sDiag(self._ahy_px) * self._aey_py
))
def ZijN_uV(x):
return (
self._aey_px_u(self._sDiag(self._ahy_py) * x) +
self._ahy_py_u(self._sDiag(self._aey_px) * x) -
self._ahy_px_u(self._sDiag(self._aey_py) * x) -
self._aey_py_u(self._sDiag(self._ahy_px) * x)
)
elif 'yy' in self.orientation:
Zij = self._sDiag(self._aHd * (
-self._sDiag(self._ahx_py) * self._aey_px +
self._sDiag(self._ahx_px) * self._aey_py))
def ZijN_uV(x):
return (
-self._aey_px_u(self._sDiag(self._ahx_py) * x) -
self._ahx_py_u(self._sDiag(self._aey_px) * x) +
self._ahx_px_u(self._sDiag(self._aey_py) * x) +
self._aey_py_u(self._sDiag(self._ahx_px) * x)
)
# Calculate the complex derivative
rx_deriv_real = ZijN_uV(self._aHd * v) - self._aHd_uV(Zij.T * self._aHd * v)
# NOTE: Need to reshape the output to go from 2*nU array to a (nU,2) matrix for each polarization
# rx_deriv_real = np.hstack((mkvc(rx_deriv_real[:len(rx_deriv_real)/2],2),mkvc(rx_deriv_real[len(rx_deriv_real)/2::],2)))
rx_deriv_real = rx_deriv_real.reshape((2, self.mesh.nE)).T
# Extract the data
if self.component == 'imag':
rx_deriv_component = 1j * rx_deriv_real
elif self.component == 'real':
rx_deriv_component = rx_deriv_real.astype(complex)
else:
if 'xx' in self.orientation:
ZijN_uV = (
self._sDiag(self._hy_py) * self._ex_px_u(v) +
self._sDiag(self._ex_px) * self._hy_py_u(v) -
self._sDiag(self._ex_py) * self._hy_px_u(v) -
self._sDiag(self._hy_px) * self._ex_py_u(v)
)
elif 'xy' in self.orientation:
ZijN_uV = (
-self._sDiag(self._hx_py) * self._ex_px_u(v) -
self._sDiag(self._ex_px) * self._hx_py_u(v) +
self._sDiag(self._ex_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._ex_py_u(v)
)
elif 'yx' in self.orientation:
ZijN_uV = (
self._sDiag(self._hy_py) * self._ey_px_u(v) +
self._sDiag(self._ey_px) * self._hy_py_u(v) -
self._sDiag(self._ey_py) * self._hy_px_u(v) -
self._sDiag(self._hy_px) * self._ey_py_u(v)
)
elif 'yy' in self.orientation:
ZijN_uV = (
-self._sDiag(self._hx_py) * self._ey_px_u(v) -
self._sDiag(self._ey_px) * self._hx_py_u(v) +
self._sDiag(self._ey_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._ey_py_u(v)
)
Zij = self.eval(src, self.mesh, self.f, True)
# Calculate the complex derivative
rx_deriv_real = self._Hd * (ZijN_uV - self._sDiag(Zij) * self._Hd_uV(v))
rx_deriv_component = np.array(getattr(rx_deriv_real, self.component))
return rx_deriv_component
class Point_tipper3D(BaseRxNSEM_Point):
"""
Natural source 3D tipper receiver base class
:param numpy.ndarray locs: receiver locations (ie. :code:`np.r_[x,y,z]`)
:param string orientation: receiver orientation 'x', 'y' or 'z'
:param string component: real or imaginary component 'real' or 'imag'
"""
def __init__(self, locs, orientation=None, component=None):
BaseRxNSEM_Point.__init__(
self, locs, orientation=orientation, component=component
)
def eval(self, src, mesh, f, return_complex=False):
'''
Project the fields to natural source data.
:param SrcNSEM src: The source of the fields to project
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param FieldsNSEM f: Natural source fields object to project
:rtype: numpy.array
:return: Evaluated component of the impedance data
'''
# NOTE: Maybe set this as a property
self.src = src
self.mesh = mesh
self.f = f
if 'zx' in self.orientation:
Tij = (- self._hy_px * self._hz_py + self._hy_py * self._hz_px)
if 'zy' in self.orientation:
Tij = (self._hx_px * self._hz_py - self._hx_py * self._hz_px)
rx_eval_complex = self._Hd * Tij
# Return the full impedance
if return_complex:
return rx_eval_complex
return getattr(rx_eval_complex, self.component)
def evalDeriv(self, src, mesh, f, v, adjoint=False):
"""
The derivative of the projection wrt u
:param SimPEG.EM.NSEM.SrcNSEM src: NSEM source
:param discretize.TensorMesh mesh: Mesh defining the topology of the problem
:param SimPEG.EM.NSEM.FieldsNSEM f: NSEM fields object of the source
:param numpy.ndarray v: Random vector of size
:rtype: numpy.array
:return: Calculated derivative (nD,) (adjoint=False) and (nP,2) (adjoint=True)
for both polarizations
"""
self.src = src
self.mesh = mesh
self.f = f
if adjoint:
if 'zx' in self.orientation:
Tij = self._sDiag(self._aHd * (
-self._sDiag(self._ahz_py) * self._ahy_px +
self._sDiag(self._ahz_px) * self._ahy_py)
)
def TijN_uV(x):
return (
-self._ahz_py_u(self._sDiag(self._ahy_px) * x) -
self._ahy_px_u(self._sDiag(self._ahz_py) * x) +
self._ahy_py_u(self._sDiag(self._ahz_px) * x) +
self._ahz_px_u(self._sDiag(self._ahy_py) * x)
)
elif 'zy' in self.orientation:
Tij = self._sDiag(self._aHd * (
self._sDiag(self._ahz_py) * self._ahx_px -
self._sDiag(self._ahz_px) * self._ahx_py)
)
def TijN_uV(x):
return (
self._ahx_px_u(self._sDiag(self._ahz_py) * x) +
self._ahz_py_u(self._sDiag(self._ahx_px) * x) -
self._ahx_py_u(self._sDiag(self._ahz_px) * x) -
self._ahz_px_u(self._sDiag(self._ahx_py) * x)
)
# Calculate the complex derivative
rx_deriv_real = (
TijN_uV(self._aHd * v) -
self._aHd_uV(Tij.T * self._aHd * v)
)
# NOTE: Need to reshape the output to go from 2*nU array to a (nU,2) matrix for each polarization
# rx_deriv_real = np.hstack((mkvc(rx_deriv_real[:len(rx_deriv_real)/2],2),mkvc(rx_deriv_real[len(rx_deriv_real)/2::],2)))
rx_deriv_real = rx_deriv_real.reshape((2, self.mesh.nE)).T
# Extract the data
if self.component == 'imag':
rx_deriv_component = 1j * rx_deriv_real
elif self.component == 'real':
rx_deriv_component = rx_deriv_real.astype(complex)
else:
if 'zx' in self.orientation:
TijN_uV = (
-self._sDiag(self._hy_px) * self._hz_py_u(v) -
self._sDiag(self._hz_py) * self._hy_px_u(v) +
self._sDiag(self._hy_py) * self._hz_px_u(v) +
self._sDiag(self._hz_px) * self._hy_py_u(v)
)
elif 'zy' in self.orientation:
TijN_uV = (
self._sDiag(self._hz_py) * self._hx_px_u(v) +
self._sDiag(self._hx_px) * self._hz_py_u(v) -
self._sDiag(self._hx_py) * self._hz_px_u(v) -
self._sDiag(self._hz_px) * self._hx_py_u(v)
)
Tij = self.eval(src, mesh, f, True)
# Calculate the complex derivative
rx_deriv_complex = (
self._Hd * (TijN_uV - self._sDiag(Tij) * self._Hd_uV(v))
)
rx_deriv_component = np.array(
getattr(rx_deriv_complex, self.component)
)
return rx_deriv_component
| StarcoderdataPython |
4914055 | <filename>test_graphlib.py
import itertools
from typing import Any, Collection, Dict, Generator, Hashable, Iterable, Sequence, Set, TypeVar
import sys
if sys.version_info < (3, 8):
from typing_extensions import Protocol
else:
from typing import Protocol
import graphlib2 as graphlib
import pytest
class Node(Hashable, Protocol):
"""Nodes need comparable to be sorted for determinsm in tests"""
def __lt__(self, __other: Any) -> bool: ...
def __hash__(self) -> int: ...
def __eq__(self, o: object) -> bool: ...
T = TypeVar("T", bound=Node)
def cycles_match(c1: Iterable[T], c2: Iterable[T]) -> bool:
c1, c2 = list(c1), list(c2)
# the first and last element in a cycle is the same
# but since the cycle could start anywhere
# there may not be the same elements in each
assert c1[0] == c1[-1]
c1.pop()
assert c2[0] == c2[-1]
c2.pop()
# now we should have exactly the same elements, but possibly not
# in the same order
# check that they are the same objects
assert set(c1) == set(c2)
# and check that the order is correct
s1 = " ".join([str(x) for x in c1])
s2 = " ".join([str(x) for x in c2] * 2)
return s1 in s2
def get_static_order_from_groups(ts: graphlib.TopologicalSorter[T]) -> Generator[Set[T], None, None]:
while ts.is_active():
nodes = ts.get_ready()
for node in nodes:
ts.done(node)
yield set(sorted(nodes))
def assert_expected_resolution(
graph: Dict[T, Iterable[T]],
expected: Iterable[Collection[T]]
):
ts = graphlib.TopologicalSorter(graph)
ts.prepare()
assert list(get_static_order_from_groups(ts)) == [set(e) for e in expected]
ts = graphlib.TopologicalSorter(graph)
group_iterator = iter(ts.static_order())
for group in expected:
got = itertools.islice(group_iterator, len(group))
assert set(got) == set(group)
def assert_cycles(
graph: Dict[T, Sequence[T]],
cycles: Iterable[Sequence[T]],
):
ts: graphlib.TopologicalSorter[T] = graphlib.TopologicalSorter()
for node, pred in graph.items():
ts.add(node, *pred)
try:
ts.prepare()
except graphlib.CycleError as e:
_, seq = e.args
for cycle in cycles:
if cycles_match(cycle, seq):
return
raise AssertionError(
f"Cycles did not match: {cycles} does not contain {seq}"
)
else:
raise AssertionError("CycleError was not raised")
@pytest.mark.parametrize(
"graph,expected", [
(
{2: {11}, 9: {11, 8}, 10: {11, 3}, 11: {7, 5}, 8: {7, 3}},
[(3, 5, 7), (8, 11), (2, 9, 10)],
),
({1: {}}, [(1,)]),
({x: {x + 1} for x in range(10)}, [(x,) for x in range(10, -1, -1)]),
({2: {3}, 3: {4}, 4: {5}, 5: {1}, 11: {12}, 12: {13}, 13: {14}, 14: {15}}, [(1, 15), (5, 14), (4, 13), (3, 12), (2, 11)]),
(
{
0: [1, 2],
1: [3],
2: [5, 6],
3: [4],
4: [9],
5: [3],
6: [7],
7: [8],
8: [4],
9: [],
},
[(9,), (4,), (3, 8), (1, 5, 7), (6,), (2,), (0,)],
),
({0: [1, 2], 1: [], 2: [3], 3: []}, [(1, 3), (2,), (0,)]),
(
{0: [1, 2], 1: [], 2: [3], 3: [], 4: [5], 5: [6], 6: []},
[(1, 3, 6), (2, 5), (0, 4)],
),
]
)
def test_simple_cases(
graph: Dict[int, Iterable[int]],
expected: Iterable[Collection[int]],
):
assert_expected_resolution(graph, expected)
@pytest.mark.parametrize(
"graph,expected", [
({1: {2}, 3: {4}, 5: {6}}, [(2, 4, 6), (1, 3, 5)]),
({1: set(), 3: set(), 5: set()}, [(1, 3, 5)]),
]
)
def test_no_dependencies(
graph: Dict[int, Iterable[int]],
expected: Iterable[Collection[int]],
):
assert_expected_resolution(graph, expected)
def test_node_repeated_in_dependencies():
# Test same node multiple times in dependencies
assert_expected_resolution(
{0: [2, 4, 4, 4, 4, 4], 1: {2}, 3: {4}},
[(2, 4), (0, 1, 3)]
)
# Test adding the same dependency multiple times
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts.add(1, 2)
ts.add(1, 2)
ts.add(1, 2)
assert list(ts.static_order()) == [2, 1]
def test_graph_with_iterables():
dependson = (2 * x + 1 for x in range(5))
graph = {0: dependson}
ts = graphlib.TopologicalSorter(graph)
expected = {1, 3, 5, 7, 9}
it = iter(ts.static_order())
assert set(itertools.islice(it, len(expected))) == expected
assert next(it) == 0
def test_add_dependencies_for_same_node_incrementally():
graph = {1: {2, 3, 4, 5}}
# Test same node multiple times
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
for k, vs in graph.items():
for v in vs:
ts.add(k, v)
ts2 = graphlib.TopologicalSorter(graph)
res1, res2 = list(ts.static_order()), list(ts2.static_order())
# our root (1) should be last, all others should be the same
assert res1.pop() == res2.pop()
assert set(res1) == set(res2)
def test_empty():
assert_expected_resolution({}, [])
@pytest.mark.parametrize(
"graph,cycles", [
({1: {1}}, [[1, 1]]),
({1: {2}, 2: {1}}, [[1, 2, 1]]),
({1: {2}, 2: {3}, 3: {1}}, [[1, 3, 2, 1]]),
({1: {2}, 2: {3}, 3: {1}, 5: {4}, 4: {6}}, [[1, 3, 2, 1]]),
({1: {2}, 2: {1}, 3: {4}, 4: {5}, 6: {7}, 7: {6}}, [[1, 2, 1], [7, 6, 7]]),
({1: {2}, 2: {3}, 3: {2, 4}, 4: {5}}, [[3, 2, 3]]),
],
ids=[
"self cycle",
"simple cycle",
"indirect cycle",
"not all elements involved in a cycle",
"multiple cycles",
"cycle in the middle of the graph",
]
)
def test_cycle(
graph: Dict[int, Sequence[int]],
cycles: Iterable[Sequence[int]],
):
assert_cycles(graph, cycles)
def test_calls_before_prepare():
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
with pytest.raises(ValueError, match=r"prepare\(\) must be called first"):
ts.get_ready()
with pytest.raises(ValueError, match=r"prepare\(\) must be called first"):
ts.done(3)
with pytest.raises(ValueError, match=r"prepare\(\) must be called first"):
ts.is_active()
def test_prepare_multiple_times():
ts: graphlib.TopologicalSorter[Node] = graphlib.TopologicalSorter()
ts.prepare()
with pytest.raises(ValueError, match=r"cannot prepare\(\) more than once"):
ts.prepare()
def test_invalid_nodes_in_done():
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts.add(1, 2, 3, 4)
ts.add(2, 3, 4)
ts.prepare()
ts.get_ready()
with pytest.raises(ValueError, match="node 2 was not passed out"):
ts.done(2)
with pytest.raises(ValueError, match=r"node 24 was not added using add\(\)"):
ts.done(24)
def test_done():
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts.add(1, 2, 3, 4)
ts.add(2, 3)
ts.prepare()
assert set(ts.get_ready()) == {3, 4}
# If we don't mark anything as done, get_ready() returns nothing
assert set(ts.get_ready()) == set()
ts.done(3)
# Now 2 becomes available as 3 is done
assert set(ts.get_ready()) == {2}
assert set(ts.get_ready()) == set()
ts.done(4)
ts.done(2)
# Only 1 is missing
assert set(ts.get_ready()) == {1}
assert set(ts.get_ready()) == set()
ts.done(1)
assert set(ts.get_ready()) == set()
assert not set(ts.get_ready())
def test_is_active():
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts.add(1, 2)
ts.prepare()
assert ts.is_active()
assert set(ts.get_ready()) == {2}
assert ts.is_active()
ts.done(2)
assert ts.is_active()
assert set(ts.get_ready()) == {1}
assert ts.is_active()
ts.done(1)
assert not ts.is_active()
def test_not_hashable_nodes():
ts: graphlib.TopologicalSorter[Any] = graphlib.TopologicalSorter()
with pytest.raises(TypeError):
ts.add(dict(), 1)
with pytest.raises(TypeError):
ts.add(1, dict())
with pytest.raises(TypeError):
ts.add(dict(), 1)
def test_order_of_insertion_does_not_matter_between_groups():
def get_groups(ts: graphlib.TopologicalSorter[int]) -> Generator[Set[int], None, None]:
ts.prepare()
while ts.is_active():
nodes = ts.get_ready()
ts.done(*nodes)
yield set(nodes)
ts: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts.add(3, 2, 1)
ts.add(1, 0)
ts.add(4, 5)
ts.add(6, 7)
ts.add(4, 7)
ts2: graphlib.TopologicalSorter[int] = graphlib.TopologicalSorter()
ts2.add(1, 0)
ts2.add(3, 2, 1)
ts2.add(4, 7)
ts2.add(6, 7)
ts2.add(4, 5)
assert list(get_groups(ts)) == list(get_groups(ts2))
def test_execute_after_copy():
graph = {0: [1]}
ts = graphlib.TopologicalSorter(graph)
ts2 = ts.copy()
assert list(ts.static_order()) == [1, 0]
assert not ts.is_active()
assert list(ts2.static_order()) == [1, 0]
assert not ts2.is_active()
if __name__ == "__main__":
pytest.main([__file__,])
| StarcoderdataPython |
12818109 | <reponame>harmonica-pacil/invid19
from django.db import models
from users.models import Profile
class Forum(models.Model):
title = models.CharField(max_length=50)
message = models.TextField()
creator = models.ForeignKey(Profile, on_delete=models.CASCADE, blank = True, null = True)
created_at = models.CharField(max_length=50)
creator_username = models.CharField(
max_length=200, null=True, blank=True
)
creator_image = models.ImageField(
blank=True,
null=True,
default="profiles/default-user_pfzkxt",
)
| StarcoderdataPython |
3215479 | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: <NAME>
## Computer Vision Center (CVC). Universitat Autonoma de Barcelona
## Email: <EMAIL>
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
import torch
import torch.nn as nn
import torch.nn.functional as F
class NaiveARC(nn.Module):
def __init__(self, numStates):
"""
Initializes the naive ARC
"""
super(NaiveARC, self).__init__()
self.dense1 = nn.Linear(numStates, 64)
self.dense2 = nn.Linear(64, 1)
#self.dense1 = nn.Linear(numStates, 1)
def forward(self, x):
d1 = torch.squeeze(F.elu(self.dense1(x)))
#d1 = torch.squeeze(self.dense1(x))
d2 = torch.sigmoid(torch.squeeze(self.dense2(d1)))
return d2
#decision = nn.Softmax(d2)
#return decision
#return d1
| StarcoderdataPython |
1736773 | import bcrypt
from flask import Blueprint, render_template, redirect, request, flash
from flask.helpers import url_for
from flask_login import current_user, LoginManager, login_user, logout_user
from flask_login.utils import login_required
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.fields.simple import BooleanField
from wtforms.validators import DataRequired, Email, EqualTo, Length, ValidationError
from app.models import models
mod = Blueprint('accounts', __name__, template_folder="../templates")
login_manager = LoginManager()
# Email Domain Validation (making sure the email ends in fcs domain)
class ValidEmailDomain:
def __init__(self, suffix, message = None):
assert suffix != None
self.suffix = suffix
self.message = message
def __call__(self, form, field):
if field.data.endswith(self.suffix):
return
else:
raise ValidationError(f'Email must be part of {self.suffix} domain')
class ValidRegistrationEmail:
def __call__(self, form, field):
exists = models.Account.query.filter_by(email=field.data.lower()).first()
if not exists:
raise ValidationError(f'Email not whitelisted, please contact your administrator')
# This will probably get deleted later if we go with the account claiming system.
class UniqueRegistrationEmail:
def __call__(self, form, field):
exists = models.User.query.filter_by(email=field.data.lower()).first()
if exists:
raise ValidationError(f'User with email already exists')
# Email Validation
class ValidLoginEmail:
def __call__(self, form, field):
user = models.User.query.filter_by(email=form.data['email'].lower()).first()
if user is None:
raise ValidationError(f'Email Not Registered')
# Password Validation
class CheckPassword:
def __call__(self, form, field):
user = models.User.query.filter_by(email=form.data['email'].lower()).first()
if user is None:
return
valid = bcrypt.hashpw(field.data.encode(), user.password) == user.password
if not valid:
raise ValidationError(f'Incorrect password')
# If the tech dept. ever decides to change the domain, we can just change it here
fcs_suffix = "friendscentral.org"
# Form for registration
class RegisterForm(FlaskForm):
fname = StringField('First Name', validators=[DataRequired()])
lname = StringField('Last Name', validators=[DataRequired()])
email = StringField('Email', validators=[DataRequired(), Email(), ValidEmailDomain(suffix=fcs_suffix), ValidRegistrationEmail(), UniqueRegistrationEmail()])
password = StringField(label='Password', validators=[
DataRequired(),
Length(min=8),
EqualTo('password_confirm', message='Passwords must match')
])
password_confirm = StringField(label='Password confirm', validators=[
DataRequired(),
Length(min=8),
EqualTo('password', message='Passwords must match')
])
# Form for logging in
class LoginForm(FlaskForm):
email = StringField('Email', validators=[DataRequired(), Email(), ValidEmailDomain(suffix=fcs_suffix), ValidLoginEmail()])
password = StringField(label='Password', validators=[DataRequired(), CheckPassword()])
remember_me = BooleanField(label = 'Remember Me')
# Registration Routing
@mod.route("/register", methods=['GET', 'POST'])
def register():
form = RegisterForm()
if request.method == 'GET':
return render_template("register.html", form=form)
if request.method == 'POST':
# This is where the registration request is handled
if form.validate_on_submit():
# Hash Password with Bcrypt
pw_hash = bcrypt.hashpw(form.password.data.encode(),bcrypt.gensalt())
# Add user data to user table
account = models.Account.query.filter_by(email=form.email.data).first()
models.db.session.add(models.User(account.id, form.data['fname'], form.data['lname'], form.data['email'], pw_hash))
models.db.session.commit()
return redirect("/")
# This person did not successfully enter the form
return render_template('register.html', form=form)
# Load User
@login_manager.user_loader
def load_user(userid):
return models.User.query.get(userid)
# Login Unauthorized Handler
@login_manager.unauthorized_handler
def handle_needs_login():
flash("You have to be logged in to access this page.")
return redirect(url_for('accounts.login', next=request.endpoint))
# Redirect Destination
def redirect_dest(fallback):
dest = request.args.get('next')
try:
dest_url = url_for(dest)
except:
return redirect(fallback)
return redirect(dest_url)
# Login Routing
@mod.route("/login", methods=['GET', 'POST'])
def login():
form = LoginForm()
if request.method == 'GET':
return render_template("login.html", form=form)
if request.method == 'POST':
# This is where the login request is handled
if form.validate_on_submit():
user = models.User.query.filter_by(email=form.email.data).first()
login_user(user, remember=form.remember_me)
flash('Logged in successfully.')
# This works, but does not redirect, only renders the index page
return redirect(request.args.get("next") or url_for('community.profile', userid=current_user.id))
# This person did not successfully enter the form
return render_template('login.html', form=form)
@mod.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('general.homepage')) | StarcoderdataPython |
3590319 | <reponame>cahudson94/Raven-Valley-Forge-Shop
# Generated by Django 2.0 on 2018-02-05 03:02
from django.db import migrations, models
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('catalog', '0009_remove_product_shipping_info'),
]
operations = [
migrations.CreateModel(
name='UserServiceImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', sorl.thumbnail.fields.ImageField(upload_to='service_images')),
],
),
]
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.