code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
try:
from micropython import const
except ImportError:
def const(n): return n
from rgb_display.rgb import DisplayDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoser/RGB_Display.git"
_SETCOLUMN = const(0x15)
_SETROW = const(0x75)
_WRITERAM = const(0x5C)
_READRAM = const(0x5D)
_SETREMAP = const(0xA0)
_STARTLINE = const(0xA1)
_DISPLAYOFFSET = const(0xA2)
_DISPLAYALLOFF = const(0xA4)
_DISPLAYALLON = const(0xA5)
_NORMALDISPLAY = const(0xA6)
_INVERTDISPLAY = const(0xA7)
_FUNCTIONSELECT = const(0xAB)
_DISPLAYOFF = const(0xAE)
_DISPLAYON = const(0xAF)
_PRECHARGE = const(0xB1)
_DISPLAYENHANCE = const(0xB2)
_CLOCKDIV = const(0xB3)
_SETVSL = const(0xB4)
_SETGPIO = const(0xB5)
_PRECHARGE2 = const(0xB6)
_SETGRAY = const(0xB8)
_USELUT = const(0xB9)
_PRECHARGELEVEL = const(0xBB)
_VCOMH = const(0xBE)
_CONTRASTABC = const(0xC1)
_CONTRASTMASTER = const(0xC7)
_MUXRATIO = const(0xCA)
_COMMANDLOCK = const(0xFD)
_HORIZSCROLL = const(0x96)
_STOPSCROLL = const(0x9E)
_STARTSCROLL = const(0x9F)
class SSD1351(DisplayDevice):
"""
A simple driver for the SSD1351-based displays.
>>> import busio
>>> import digitalio
>>> import board
>>> from adafruit_rgb_display import color565
>>> import adafruit_rgb_display.ssd1351 as ssd1351
>>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO)
>>> display = ssd1351.SSD1351(spi, cs=digitalio.DigitalInOut(board.GPIO0),
... dc=digitalio.DigitalInOut(board.GPIO15), rst=digitalio.DigitalInOut(board.GPIO16))
>>> display.fill(0x7521)
>>> display.pixel(32, 32, 0)
"""
_COLUMN_SET = _SETCOLUMN
_PAGE_SET = _SETROW
_RAM_WRITE = _WRITERAM
_RAM_READ = _READRAM
_INIT = (
(_COMMANDLOCK, b"\x12"),
(_COMMANDLOCK, b"\xb1"),
(_DISPLAYOFF, b""),
(_DISPLAYENHANCE, b"\xa4\x00\x00"),
# 7:4 = Oscillator Frequency,
# 3:0 = CLK Div Ratio (A[3:0]+1 = 1..16)
(_CLOCKDIV, b"\xf0"),
(_MUXRATIO, b"\x7f"), # 127
(_SETREMAP, b"\x74"),
(_STARTLINE, b"\x00"),
(_DISPLAYOFFSET, b"\x00"),
(_SETGPIO, b"\x00"),
(_FUNCTIONSELECT, b"\x01"),
(_PRECHARGE, b"\x32"),
(_PRECHARGELEVEL, b"\x1f"),
(_VCOMH, b"\x05"),
(_NORMALDISPLAY, b""),
(_CONTRASTABC, b"\xc8\x80\xc8"),
(_CONTRASTMASTER, b"\x0a"),
(_SETVSL, b"\xa0\xb5\x55"),
(_PRECHARGE2, b"\x01"),
(_DISPLAYON, b""),
)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">BB"
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=128,
height=128,
*,
x_offset=0,
y_offset=0,
rotation=0
):
# TODO: Limit to 16000000 baud rate?
super().__init__(
port,
dc,
rst,
width,
height,
x_offset=x_offset,
y_offset=y_offset,
rotation=rotation,
) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/ssd1351.py | 0.489748 | 0.162446 | ssd1351.py | pypi |
try:
import struct
except ImportError:
import ustruct as struct
try:
from micropython import const
except ImportError:
def const(n): return n
from rgb_display.rgb import DisplayDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoser/RGB_Display.git"
_NOP = const(0x00)
_SWRESET = const(0x01)
_RDDID = const(0x04)
_RDDST = const(0x09)
_SLPIN = const(0x10)
_SLPOUT = const(0x11)
_PTLON = const(0x12)
_NORON = const(0x13)
_INVOFF = const(0x20)
_INVON = const(0x21)
_DISPOFF = const(0x28)
_DISPON = const(0x29)
_CASET = const(0x2A)
_RASET = const(0x2B)
_RAMWR = const(0x2C)
_RAMRD = const(0x2E)
_PTLAR = const(0x30)
_COLMOD = const(0x3A)
_MADCTL = const(0x36)
_FRMCTR1 = const(0xB1)
_FRMCTR2 = const(0xB2)
_FRMCTR3 = const(0xB3)
_INVCTR = const(0xB4)
_DISSET5 = const(0xB6)
_PWCTR1 = const(0xC0)
_PWCTR2 = const(0xC1)
_PWCTR3 = const(0xC2)
_PWCTR4 = const(0xC3)
_PWCTR5 = const(0xC4)
_VMCTR1 = const(0xC5)
_RDID1 = const(0xDA)
_RDID2 = const(0xDB)
_RDID3 = const(0xDC)
_RDID4 = const(0xDD)
_PWCTR6 = const(0xFC)
_GMCTRP1 = const(0xE0)
_GMCTRN1 = const(0xE1)
class ST7789(DisplayDevice):
"""
A simple driver for the ST7789-based displays.
>>> import busio
>>> import digitalio
>>> import board
>>> from adafruit_rgb_display import color565
>>> import adafruit_rgb_display.st7789 as st7789
>>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO)
>>> display = st7789.ST7789(spi, cs=digitalio.DigitalInOut(board.GPIO0),
... dc=digitalio.DigitalInOut(board.GPIO15), rst=digitalio.DigitalInOut(board.GPIO16))
>>> display.fill(0x7521)
>>> display.pixel(64, 64, 0)
"""
_COLUMN_SET = _CASET
_PAGE_SET = _RASET
_RAM_WRITE = _RAMWR
_RAM_READ = _RAMRD
_INIT = (
(_SWRESET, None),
(_SLPOUT, None),
(_COLMOD, b"\x55"), # 16bit color
(_MADCTL, b"\x08"),
)
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=240,
height=320,
*,
x_offset=0,
y_offset=0,
rotation=0
):
super().__init__(
device,
dc,
rst,
width,
height,
x_offset=x_offset,
y_offset=y_offset,
rotation=rotation,
)
def init(self):
super().init()
cols = struct.pack(">HH", self._X_START, self.width + self._X_START)
rows = struct.pack(">HH", self._Y_START, self.height + self._Y_START)
for command, data in (
(_CASET, cols),
(_RASET, rows),
(_INVON, None),
(_NORON, None),
(_DISPON, None),
(_MADCTL, b"\xc0"), # Set rotation to 0 and use RGB
):
self.write(command, data) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/st7789.py | 0.455441 | 0.190159 | st7789.py | pypi |
try:
from micropython import const
except ImportError:
def const(n): return n
from rgb_display.rgb import DisplayDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoser/RGB_Display.git"
_SWRESET = const(0x01)
_SLPOUT = const(0x11)
_NORON = const(0x13)
_INVOFF = const(0x20)
_INVON = const(0x21)
_DISPOFF = const(0x28)
_DISPON = const(0x29)
_CASET = const(0x2A)
_PASET = const(0x2B)
_RAMWR = const(0x2C)
_RAMRD = const(0x2E)
_TEON = const(0x35)
_MADCTL = const(0x36)
_COLMOD = const(0x3A)
_TEARLINE = const(0x44)
_SETOSC = const(0xB0)
_SETPWR1 = const(0xB1)
_SETRGB = const(0xB3)
_SETCYC = const(0xB4)
_SETCOM = const(0xB6)
_SETC = const(0xB9)
_SETSTBA = const(0xC0)
_SETPANEL = const(0xCC)
_SETGAMMA = const(0xE0)
class HX8357(DisplayDevice):
"""
A simple driver for the HX8357-based displays.
>>> import busio
>>> import digitalio
>>> import board
>>> from adafruit_rgb_display import color565
>>> import adafruit_rgb_display.hx8357 as hx8357
>>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO)
>>> display = hx8357.HX8357(spi, cs=digitalio.DigitalInOut(board.GPIO0),
... dc=digitalio.DigitalInOut(board.GPIO15))
>>> display.fill(0x7521)
>>> display.pixel(64, 64, 0)
"""
_COLUMN_SET = _CASET
_PAGE_SET = _PASET
_RAM_WRITE = _RAMWR
_RAM_READ = _RAMRD
_INIT = (
(_SWRESET, None),
(_SETC, b"\xFF\x83\x57"),
(_SETRGB, b"\x80\x00\x06\x06"), # 0x80 enables SDO pin (0x00 disables)
(_SETCOM, b"\x25"), # -1.52V
(_SETOSC, b"\x68"), # Normal mode 70Hz, Idle mode 55 Hz
(_SETPANEL, b"\x05"), # BGR, Gate direction swapped
(_SETPWR1, b"\x00\x15\x1C\x1C\x83\xAA"), # Not deep standby BT VSPR VSNR AP
(_SETSTBA, b"\x50\x50\x01\x3C\x1E\x08"), # OPON normal OPON idle STBA GEN
(
_SETCYC,
b"\x02\x40\x00\x2A\x2A\x0D\x78",
), # NW 0x02 RTN DIV DUM DUM GDON GDOFF
(
_SETGAMMA,
b"\x02\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x02"
b"\x0A\x11\x1d\x23\x35\x41\x4b\x4b\x42\x3A\x27\x1B\x08\x09\x03\x00\x01",
),
(_COLMOD, b"\x55"), # 16 bit
(_MADCTL, b"\xc0"),
(_TEON, b"\x00"),
(_TEARLINE, b"\x00\x02"), # TW off
(_SLPOUT, None),
(_MADCTL, b"\xa0"),
(_DISPON, None),
)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
# pylint: disable-msg=useless-super-delegation, too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=480,
height=320,
rotation=0,
):
super().__init__(
port,
dc,
rst,
width,
height,
rotation=rotation,
) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/hx8357.py | 0.564219 | 0.156362 | hx8357.py | pypi |
try:
import struct
except ImportError:
import ustruct as struct
from rgb_display.rgb import DisplayDevice
__version__ = "0.0.0-auto.0"
__repo__ = "https://github.com/jrmoser/RGB_Display.git"
class ILI9341(DisplayDevice):
"""
A simple driver for the ILI9341/ILI9340-based displays.
>>> import busio
>>> import digitalio
>>> import board
>>> from adafruit_rgb_display import color565
>>> import adafruit_rgb_display.ili9341 as ili9341
>>> spi = busio.SPI(clock=board.SCK, MOSI=board.MOSI, MISO=board.MISO)
>>> display = ili9341.ILI9341(spi, cs=digitalio.DigitalInOut(board.GPIO0),
... dc=digitalio.DigitalInOut(board.GPIO15))
>>> display.fill(color565(0xff, 0x11, 0x22))
>>> display.pixel(120, 160, 0)
"""
_COLUMN_SET = 0x2A
_PAGE_SET = 0x2B
_RAM_WRITE = 0x2C
_RAM_READ = 0x2E
_INIT = (
(0xEF, b"\x03\x80\x02"),
(0xCF, b"\x00\xc1\x30"),
(0xED, b"\x64\x03\x12\x81"),
(0xE8, b"\x85\x00\x78"),
(0xCB, b"\x39\x2c\x00\x34\x02"),
(0xF7, b"\x20"),
(0xEA, b"\x00\x00"),
(0xC0, b"\x23"), # Power Control 1, VRH[5:0]
(0xC1, b"\x10"), # Power Control 2, SAP[2:0], BT[3:0]
(0xC5, b"\x3e\x28"), # VCM Control 1
(0xC7, b"\x86"), # VCM Control 2
(0x36, b"\x48"), # Memory Access Control
(0x3A, b"\x55"), # Pixel Format
(0xB1, b"\x00\x18"), # FRMCTR1
(0xB6, b"\x08\x82\x27"), # Display Function Control
(0xF2, b"\x00"), # 3Gamma Function Disable
(0x26, b"\x01"), # Gamma Curve Selected
(
0xE0, # Set Gamma
b"\x0f\x31\x2b\x0c\x0e\x08\x4e\xf1\x37\x07\x10\x03\x0e\x09\x00",
),
(
0xE1, # Set Gamma
b"\x00\x0e\x14\x03\x11\x07\x31\xc1\x48\x08\x0f\x0c\x31\x36\x0f",
),
(0x11, None),
(0x29, None),
)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
# pylint: disable-msg=too-many-arguments
def __init__(
self,
port,
dc,
rst=None,
width=240,
height=320,
rotation=0,
):
super().__init__(
port,
dc,
rst=rst,
width=width,
height=height,
rotation=rotation,
)
self._scroll = 0
# pylint: enable-msg=too-many-arguments
def scroll(self, dy=None): # pylint: disable-msg=invalid-name
"""Scroll the display by delta y"""
if dy is None:
return self._scroll
self._scroll = (self._scroll + dy) % self.height
self.write(0x37, struct.pack(">H", self._scroll))
return None | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/rgb_display/ili9341.py | 0.495361 | 0.20953 | ili9341.py | pypi |
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.ili9341 as ili9341
import adafruit_rgb_display.st7789 as st7789 # pylint: disable=unused-import
import adafruit_rgb_display.hx8357 as hx8357 # pylint: disable=unused-import
import adafruit_rgb_display.st7735 as st7735 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1351 as ssd1351 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1331 as ssd1331 # pylint: disable=unused-import
# First define some constants to allow easy resizing of shapes.
BORDER = 20
FONTSIZE = 24
# Configuration for CS and DC pins (these are PiTFT defaults):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = digitalio.DigitalInOut(board.D24)
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 24000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# pylint: disable=line-too-long
# Create the display:
# disp = st7789.ST7789(spi, rotation=90, # 2.0" ST7789
# disp = st7789.ST7789(spi, height=240, y_offset=80, rotation=180, # 1.3", 1.54" ST7789
# disp = st7789.ST7789(spi, rotation=90, width=135, height=240, x_offset=53, y_offset=40, # 1.14" ST7789
# disp = hx8357.HX8357(spi, rotation=180, # 3.5" HX8357
# disp = st7735.ST7735R(spi, rotation=90, # 1.8" ST7735R
# disp = st7735.ST7735R(spi, rotation=270, height=128, x_offset=2, y_offset=3, # 1.44" ST7735R
# disp = st7735.ST7735R(spi, rotation=90, bgr=True, # 0.96" MiniTFT ST7735R
# disp = ssd1351.SSD1351(spi, rotation=180, # 1.5" SSD1351
# disp = ssd1351.SSD1351(spi, height=96, y_offset=32, rotation=180, # 1.27" SSD1351
# disp = ssd1331.SSD1331(spi, rotation=180, # 0.96" SSD1331
disp = ili9341.ILI9341(
spi,
rotation=90, # 2.2", 2.4", 2.8", 3.2" ILI9341
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
)
# pylint: enable=line-too-long
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
if disp.rotation % 180 == 90:
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
else:
width = disp.width # we swap height/width to rotate it to landscape!
height = disp.height
image = Image.new("RGB", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a green filled box as the background
draw.rectangle((0, 0, width, height), fill=(0, 255, 0))
disp.image(image)
# Draw a smaller inner purple rectangle
draw.rectangle(
(BORDER, BORDER, width - BORDER - 1, height - BORDER - 1), fill=(170, 0, 136)
)
# Load a TTF Font
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", FONTSIZE)
# Draw Some Text
text = "Hello World!"
(font_width, font_height) = font.getsize(text)
draw.text(
(width // 2 - font_width // 2, height // 2 - font_height // 2),
text,
font=font,
fill=(255, 255, 0),
)
# Display image.
disp.image(image) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/examples/rgb_display_pillow_demo.py | 0.606032 | 0.183685 | rgb_display_pillow_demo.py | pypi |
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.ili9341 as ili9341
import adafruit_rgb_display.st7789 as st7789 # pylint: disable=unused-import
import adafruit_rgb_display.hx8357 as hx8357 # pylint: disable=unused-import
import adafruit_rgb_display.st7735 as st7735 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1351 as ssd1351 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1331 as ssd1331 # pylint: disable=unused-import
# Configuration for CS and DC pins (these are PiTFT defaults):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = digitalio.DigitalInOut(board.D24)
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 24000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# pylint: disable=line-too-long
# Create the display:
# disp = st7789.ST7789(spi, rotation=90, # 2.0" ST7789
# disp = st7789.ST7789(spi, height=240, y_offset=80, rotation=180, # 1.3", 1.54" ST7789
# disp = st7789.ST7789(spi, rotation=90, width=135, height=240, x_offset=53, y_offset=40, # 1.14" ST7789
# disp = hx8357.HX8357(spi, rotation=180, # 3.5" HX8357
# disp = st7735.ST7735R(spi, rotation=90, # 1.8" ST7735R
# disp = st7735.ST7735R(spi, rotation=270, height=128, x_offset=2, y_offset=3, # 1.44" ST7735R
# disp = st7735.ST7735R(spi, rotation=90, bgr=True, # 0.96" MiniTFT ST7735R
# disp = ssd1351.SSD1351(spi, rotation=180, # 1.5" SSD1351
# disp = ssd1351.SSD1351(spi, height=96, y_offset=32, rotation=180, # 1.27" SSD1351
# disp = ssd1331.SSD1331(spi, rotation=180, # 0.96" SSD1331
disp = ili9341.ILI9341(
spi,
rotation=90, # 2.2", 2.4", 2.8", 3.2" ILI9341
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
)
# pylint: enable=line-too-long
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
if disp.rotation % 180 == 90:
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
else:
width = disp.width # we swap height/width to rotate it to landscape!
height = disp.height
image = Image.new("RGB", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image)
# First define some constants to allow easy positioning of text.
padding = -2
x = 0
# Load a TTF font. Make sure the .ttf font file is in the
# same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24)
while True:
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
# Shell scripts for system monitoring from here:
# https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
cmd = "hostname -I | cut -d' ' -f1"
IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = 'df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\''
Disk = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'" # pylint: disable=line-too-long
Temp = subprocess.check_output(cmd, shell=True).decode("utf-8")
# Write four lines of text.
y = padding
draw.text((x, y), IP, font=font, fill="#FFFFFF")
y += font.getsize(IP)[1]
draw.text((x, y), CPU, font=font, fill="#FFFF00")
y += font.getsize(CPU)[1]
draw.text((x, y), MemUsage, font=font, fill="#00FF00")
y += font.getsize(MemUsage)[1]
draw.text((x, y), Disk, font=font, fill="#0000FF")
y += font.getsize(Disk)[1]
draw.text((x, y), Temp, font=font, fill="#FF00FF")
# Display image.
disp.image(image)
time.sleep(0.1) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/examples/rgb_display_pillow_stats.py | 0.450601 | 0.152379 | rgb_display_pillow_stats.py | pypi |
import digitalio
import board
from PIL import Image, ImageDraw
import adafruit_rgb_display.ili9341 as ili9341
import adafruit_rgb_display.st7789 as st7789 # pylint: disable=unused-import
import adafruit_rgb_display.hx8357 as hx8357 # pylint: disable=unused-import
import adafruit_rgb_display.st7735 as st7735 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1351 as ssd1351 # pylint: disable=unused-import
import adafruit_rgb_display.ssd1331 as ssd1331 # pylint: disable=unused-import
# Configuration for CS and DC pins (these are PiTFT defaults):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = digitalio.DigitalInOut(board.D24)
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 24000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# pylint: disable=line-too-long
# Create the display:
# disp = st7789.ST7789(spi, rotation=90, # 2.0" ST7789
# disp = st7789.ST7789(spi, height=240, y_offset=80, rotation=180, # 1.3", 1.54" ST7789
# disp = st7789.ST7789(spi, rotation=90, width=135, height=240, x_offset=53, y_offset=40, # 1.14" ST7789
# disp = hx8357.HX8357(spi, rotation=180, # 3.5" HX8357
# disp = st7735.ST7735R(spi, rotation=90, # 1.8" ST7735R
# disp = st7735.ST7735R(spi, rotation=270, height=128, x_offset=2, y_offset=3, # 1.44" ST7735R
# disp = st7735.ST7735R(spi, rotation=90, bgr=True, # 0.96" MiniTFT ST7735R
# disp = ssd1351.SSD1351(spi, rotation=180, # 1.5" SSD1351
# disp = ssd1351.SSD1351(spi, height=96, y_offset=32, rotation=180, # 1.27" SSD1351
# disp = ssd1331.SSD1331(spi, rotation=180, # 0.96" SSD1331
disp = ili9341.ILI9341(
spi,
rotation=90, # 2.2", 2.4", 2.8", 3.2" ILI9341
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
)
# pylint: enable=line-too-long
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
if disp.rotation % 180 == 90:
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
else:
width = disp.width # we swap height/width to rotate it to landscape!
height = disp.height
image = Image.new("RGB", (width, height))
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image)
image = Image.open("blinka.jpg")
# Scale the image to the smaller screen dimension
image_ratio = image.width / image.height
screen_ratio = width / height
if screen_ratio < image_ratio:
scaled_width = image.width * height // image.height
scaled_height = height
else:
scaled_width = width
scaled_height = image.height * width // image.width
image = image.resize((scaled_width, scaled_height), Image.BICUBIC)
# Crop and center the image
x = scaled_width // 2 - width // 2
y = scaled_height // 2 - height // 2
image = image.crop((x, y, x + width, y + height))
# Display image.
disp.image(image) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/examples/rgb_display_pillow_image.py | 0.484136 | 0.182553 | rgb_display_pillow_image.py | pypi |
# -*- coding: utf-8 -*-
import time
import subprocess
import digitalio
import board
from PIL import Image, ImageDraw, ImageFont
import adafruit_rgb_display.st7789 as st7789
# Configuration for CS and DC pins (these are FeatherWing defaults on M0/M4):
cs_pin = digitalio.DigitalInOut(board.CE0)
dc_pin = digitalio.DigitalInOut(board.D25)
reset_pin = None
# Config for display baudrate (default max is 24mhz):
BAUDRATE = 64000000
# Setup SPI bus using hardware SPI:
spi = board.SPI()
# Create the ST7789 display:
disp = st7789.ST7789(
spi,
cs=cs_pin,
dc=dc_pin,
rst=reset_pin,
baudrate=BAUDRATE,
width=135,
height=240,
x_offset=53,
y_offset=40,
)
# Create blank image for drawing.
# Make sure to create image with mode 'RGB' for full color.
height = disp.width # we swap height/width to rotate it to landscape!
width = disp.height
image = Image.new("RGB", (width, height))
rotation = 90
# Get drawing object to draw on image.
draw = ImageDraw.Draw(image)
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=(0, 0, 0))
disp.image(image, rotation)
# Draw some shapes.
# First define some constants to allow easy resizing of shapes.
padding = -2
top = padding
bottom = height - padding
# Move left to right keeping track of the current x position for drawing shapes.
x = 0
# Alternatively load a TTF font. Make sure the .ttf font file is in the
# same directory as the python script!
# Some other nice fonts to try: http://www.dafont.com/bitmap.php
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", 24)
# Turn on the backlight
backlight = digitalio.DigitalInOut(board.D22)
backlight.switch_to_output()
backlight.value = True
while True:
# Draw a black filled box to clear the image.
draw.rectangle((0, 0, width, height), outline=0, fill=0)
# Shell scripts for system monitoring from here:
# https://unix.stackexchange.com/questions/119126/command-to-display-memory-usage-disk-usage-and-cpu-load
cmd = "hostname -I | cut -d' ' -f1"
IP = "IP: " + subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "top -bn1 | grep load | awk '{printf \"CPU Load: %.2f\", $(NF-2)}'"
CPU = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "free -m | awk 'NR==2{printf \"Mem: %s/%s MB %.2f%%\", $3,$2,$3*100/$2 }'"
MemUsage = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = 'df -h | awk \'$NF=="/"{printf "Disk: %d/%d GB %s", $3,$2,$5}\''
Disk = subprocess.check_output(cmd, shell=True).decode("utf-8")
cmd = "cat /sys/class/thermal/thermal_zone0/temp | awk '{printf \"CPU Temp: %.1f C\", $(NF-0) / 1000}'" # pylint: disable=line-too-long
Temp = subprocess.check_output(cmd, shell=True).decode("utf-8")
# Write four lines of text.
y = top
draw.text((x, y), IP, font=font, fill="#FFFFFF")
y += font.getsize(IP)[1]
draw.text((x, y), CPU, font=font, fill="#FFFF00")
y += font.getsize(CPU)[1]
draw.text((x, y), MemUsage, font=font, fill="#00FF00")
y += font.getsize(MemUsage)[1]
draw.text((x, y), Disk, font=font, fill="#0000FF")
y += font.getsize(Disk)[1]
draw.text((x, y), Temp, font=font, fill="#FF00FF")
# Display image.
disp.image(image, rotation)
time.sleep(0.1) | /rgb-display-0.0.5.tar.gz/rgb-display-0.0.5/examples/rgb_display_minipitftstats.py | 0.406862 | 0.21626 | rgb_display_minipitftstats.py | pypi |
# rgbloom
This Python script retrieves RGB magnitudes computed from low resolution
spectra published in *Gaia* DR3, following the work described in
[Carrasco et al. (2023)](#3).
These magnitudes are given in the standard system defined by
[Cardiel et al. (2021a)](#1).
This code is an updated version of [rgblues](https://github.com/guaix-ucm/rgblues),
which provides RGB magnitudes from *Gaia* EDR3 photometric data, as
explained in [Cardiel et al. (2021b)](#2).
The RGB magnitudes provided by [Carrasco et al. (2023)](#3)
are more reliable because
they have been directly computed from the source spectrum without
the need to employ any approximate calibration, nor introducing
constraints on the source colour or extinction. In addition, the number
of sources with RGB estimates has increased from ~15 million to
~200 million objects (the 200M sample). Anyway, since the sky
coverage of the 200M sample is still not very good at some
high Galactic latitudes, `rgbloom` also provides RGB estimates
for sources that do not belong to the 200M sample making use of the
polynomial calibrations of [Cardiel et al. (2021b)](#2), which
may still be useful for those users requiring calibrated RGB
sources at those sky regions.
The code `rgbloom` performs a cone search defined by coordinates
right ascension and declination on the sky and a search radius.
The cone search is performed making use of the Astroquery coordinated
package of astropy.
You need to have a live connection to the Internet for
the script to work!
## Installing the code
In order to keep your current Python installation clean, it is highly
recommended to install a python 3 *virtual environment* first.
### Creating and activating the python virtual environment
```bash
$ python3 -m venv venv_rgb
$ . venv_rgb/bin/activate
(venv_rgb) $
```
### Installing the package
```bash
(venv_rgb) $ pip install git+https://github.com/guaix-ucm/rgbloom.git@main#egg=rgbloom
```
## Executing the program
Just execute it from the command line:
```bash
(venv_rgb) $ rgbloom 56.66 24.10 1.0 12
```
The last instruction executes the program providing the
four positional arguments: right ascension, declination, search radius and
limiting *Gaia* G magnitude. *Note that the coordinates and search radius
must be given in decimal degrees*.
Each time the code is executed, some auxiliary files are
downloaded to your computer (if they have not been downloaded
in a previous execution). These files are kept in a cache directory
that is displayed in the terminal output (you do not have to worry
about its location unless you need to delete them in order to
recover disk space).
The execution of this example should led to the following output in the
terminal (except for the absolute path where the auxiliary downloaded files
are stored):
```
Welcome to rgbloom version 1.2
==============================
Downloading data from 'http://nartex.fis.ucm.es/~ncl/rgbphot/gaiaDR3/reference_healpix8.csv' to file '/Users/cardiel/Library/Caches/pooch/635cd722cf61b23bd8eee20635e4d580-reference_healpix8.csv'.
<STEP1> Starting cone search in Gaia DR3... (please wait)
INFO: Query finished. [astroquery.utils.tap.core]
--> 310 objects found
--> 23 objects classified as VARIABLE
<STEP2> Estimating RGB magnitudes in DR3 query using C21 polynomials OK!
<STEP3> Retrieving objects from the 200M sample in the enclosing HEALPIx level-8 tables
Downloading data from 'http://nartex.fis.ucm.es/~ncl/rgbphot/gaiaDR3/RGBsynthetic_NOVARIABLES/sortida_XpContinuousMeanSpectrum_006602-007952_RGB_NOVARIABLES_final.csv.gz' to file '/Users/cardiel/Library/Caches/pooch/2d94d5acfcb380d6dff1eaa207caa086-sortida_XpContinuousMeanSpectrum_006602-007952_RGB_NOVARIABLES_final.csv.gz'.
* Required file: /Users/cardiel/Library/Caches/pooch/2d94d5acfcb380d6dff1eaa207caa086-sortida_XpContinuousMeanSpectrum_006602-007952_RGB_NOVARIABLES_final.csv.gz
md5:f9cf7ed0f84eecda13ef6a408d291b96
--> Number of objects: 100553
--> Total number of objects: 100553
<STEP4> Cross-matching DR3 with 200M sample
--> Number of objects in the 200M subsample............: 100553
--> Number of objects in DR3 query.....................: 310
--> Number of DR3 objects within the 200M sample.......: 248
--> Number of DR3 objects no present in the 200M sample: 62
<STEP5> Saving output CSV files
--> file rgbloom_200m.csv saved
--> file rgbloom_no200m.csv saved
<STEP6> Generating PDF plot
End of program
```
The `rgbloom` script executes the following steps:
- Step 1: cone search in *Gaia* DR3, gathering the following parameters:
`source_id`, `ra`, `dec`, `phot_g_mean_mag`, `phot_bp_mean_mag`,
`phot_rp_mean_mag` and `phot_variable_flag`
- Step 2: initial RGB magnitude estimation using the polynomial
transformations given in Eqs. (2)-(4) of [Cardiel et al. (2021b)](#2).
These values are only provided for objects in the field of view
that do not belong to the 200M sample.
- Step 3: downloading of the RGB magnitude estimates corresponding to
the 200M sample objects within the HEALPIx level-8 tables enclosing
the region of the sky defined in the initial cone search.
- Step 4: cross-matching between the DR3 and 200M subsamples to identify objects
with RGB estimates derived from the low resolution *Gaia* DR3 spectra.
- Step 5: generation of the output files. Two files (in CSV format) are
generated:
- `rgbloom_200m.csv`: objects belonging to the 200M sample
with RGB magnitudes computed as described in [Carrasco et al. (2023)](#3).
This CSV file provides the following columns:
- `number`: consecutive number of the object in the CSV file (used in the final plot)
- `source_id`: identification in *Gaia* DR3
- `ra`: right ascension (from *Gaia* DR3)
- `dec`: declination (from *Gaia* DR3)
- `RGB_B`: blue RGB magnitude estimate
- `RGB_G`: green RGB magnitude estimate
- `RGB_R`: red RGB magnitude estimate
- `errRGB_B`: uncertainty in the blue RGB magnitude estimate
- `errRGB_G`: uncertainty in the green RGB magnitude estimate
- `errRGB_R`: uncertainty in the red RGB magnitude estimate
- `objtype`: type of source, according to the classification provided by
*Gaia* DR3 (see [description of
`GAIA_SOURCE`](https://gea.esac.esa.int/archive/documentation/GDR3/Gaia_archive/chap_datamodel/sec_dm_main_source_catalogue/ssec_dm_gaia_source.html) table for details):
- `1`: object flagged as `NON_SINGLE_STAR`
- `2`: object flagged as `IN_QSO_CANDIDATES`
- `3`: object flagged as `IN_GALAXY_CANDIDATES`
- `0`: none of the above
- `qlflag`: global quality flag:
- `0`: reliable source
- `1`: suspicious source (blending, contamination, non-stellar
identification)
- `rgbloom_no200m.csv`: objects not included in the 200M sample, which
RGB magnitudes are estimated using the approximate polynomial
calibrations of [Cardiel et al. (2021b)](#2).
This CSV file contains the following columns:
- `number`: consecutive number of the object in the CSV file (used in the final plot)
- `source_id`: identification in *Gaia* DR3
- `ra`: right ascension (from *Gaia* DR3)
- `dec`: declination (from *Gaia* DR3)
- `phot_variable_flag`: photometric variability flag (from *Gaia* DR3)
- `bp_rp`: G_BP-G_RP colour (from *Gaia* DR3)
- `RGB_B`: blue RGB magnitude estimate
- `RGB_G`: green RGB magnitude estimate
- `RGB_R`: red RGB magnitude estimate
The list of objects in these two files is sorted by right ascension.
- Step 6: generation of a finding chart plot (in PDF format): `rgbloom.pdf`.
The execution of the previous example generates a cone search around
the [Pleiades](https://en.wikipedia.org/wiki/Pleiades) star cluster:

The objects in this plot are color coded based on the *Gaia* G_BP - G_RP
colour. Stars brighter than a pre-defined threshold are displayed
with big star symbols. To facilitate the identification of each object, the
consecutive identification numbers in the two files `rgbloom_200m.csv` and
`rgbloom_no200m.csv` are also displayed, in red
and black, respectively. The identification numbers corresponding to the less
reliable sources in `rgbloom_20m.csv` (`qlflag=1`) appear inside a rectangle
with a light-gray border. Note that the identification numbers are not
displayed when using the parameter `--nonumbers` in the command line.
In the case of objects that do not belong to the 200M sample (i.e., those in
`rgbloom_no200m.csv`), a blue square
has been overplotted on the sources flagged as variable in *Gaia* DR3, and a
grey diamond on objects outside the *Gaia* -0.5 < G_BP - G_RP < 2.0 colour
interval.
Note that the three output archives (1 PDF and 2 CSV files) share the same root
name `rgbloom`. This can be easily modified using the optional argument
`--basename <newbasename>` in the command line.
### Additional help
Some auxiliary optional arguments are also available. See description
invoking the script help:
```bash
$ rgbloom --help
...
...
positional arguments:
ra_center right Ascension (decimal degrees)
dec_center declination (decimal degrees)
search_radius search radius (decimal degrees)
g_limit limiting Gaia G magnitude
optional arguments:
-h, --help show this help message and exit
--basename BASENAME file basename for output files
--brightlimit BRIGHTLIMIT
objects brighter than this Gaia G limit are displayed with star symbols (default=8.0)
--symbsize SYMBSIZE multiplying factor for symbol size (default=1.0)
--nonumbers do not display object identification number in PDF chart
--noplot skip PDF chart generation
--nocolor do not use colors in PDF chart
--verbose increase program verbosity
```
## Citation
If you find this Python package useful,
please cite [Cardiel et al. (2021a)](#3)
(to quote the use of the standard RGB system)
and [Carrasco et al. (2023)](#3) (where the computation of the RGB magnitudes
from the low resolution spectra published in *Gaia* DR3 is explained).
## Related information
You can visit the [RGB Photometry](https://guaix.ucm.es/rgbphot) web page at
the Universidad Complutense de Madrid.
## Bibliography
<a id="1">Cardiel et al. (2021a)</a>,
MNRAS, https://ui.adsabs.harvard.edu/abs/2021MNRAS.504.3730C/abstract
<a id="2">Cardiel et al. (2021b)</a>,
MNRAS, https://ui.adsabs.harvard.edu/abs/2021MNRAS.507..318C/abstract
<a id="3">Carrasco et al. (2023)</a>, Remote Sensing, https://www.mdpi.com/2072-4292/15/7/1767
| /rgbloom-1.3.tar.gz/rgbloom-1.3/README.md | 0.511473 | 0.90355 | README.md | pypi |
# rgblues
This Python script predicts RGB magnitudes from *Gaia* EDR3
photometric data. These magnitudes are given in the standard system defined by
[Cardiel et al. (2021a)](#1).
The code performs a cone search defined by coordinates
right ascension and declination on the sky and a search radius. The
predictions make use of the polynomial transformations given by Eqs. (2)-(5)
in [Cardiel et al. (2021b; hereafter C21)](#2)
The cone search is performed making use of the Astroquery coordinated
package of astropy.
You need to have a live connection to the Internet for
the script to work!
## Installing the code
In order to keep your current Python installation clean, it is highly
recommended to install a python 3 *virtual environment* first.
### Creating and activating the python virtual environment
```bash
$ python3 -m venv venv_rgb
$ . venv_rgb/bin/activate
(venv_rgb) $
```
### Installing the package
```bash
(venv_rgb) $ pip install git+https://github.com/guaix-ucm/rgblues.git@main#egg=rgblues
```
## Executing the program
Just execute it from the command line:
```buildoutcfg
(venv_rgb) $ rgblues 56.66 24.10 1.0 12
```
The last instruction executes the program providing the
four positional arguments: right ascension, declination, search radius and
limiting *Gaia* G magnitude. *Note that the coordinates and search radius
must be given in decimal degrees*.
The first time you execute the code, the auxiliary file
`edr3_source_id_15M_allsky.fits` (size 129 Mb), containing the `source_id`of
the *Gaia* EDR3 stars belonging to the ~15 million star sample of C21, is
automatically downloaded to a cache directory (you do not have to worry
about its location).
The script executes the following steps:
- Step 1: cone search in *Gaia* EDR3, gathering the following parameters:
`source_id`, `ra`, `dec`, `phot_g_mean_mag`, `phot_bp_mean_mag` and
`phot_rp_mean_mag`.
- Step 2: cone search in StarHorse to retrieve interstellar extinction,
metallicity and distance, among other parameters. This step is optional and
only executed when `--starhorse_block <number>` is employed (in this case
`<number>` is an integer number indicating the number of stars whose
parameters are retrieved in each single query to Gaia@AIP; a typical useful
value is 100).
- Step 3: cross-matching of the previous EDR3 sample with the list of ~15
million stars from C21. This step determines the
subsample of EDR3 stars for which the RGB photometric calibration is
reliable.
- Step 4: cone search in *Gaia* DR2. This additional step is performed in
order to retrieve the `phot_variable_flag` parameter indicating whether
the star was flagged as variable in DR2. Note that this flag is not
available in EDR3.
- Step 5: cross-matching between DR2 and EDR3 to identify the variable
stars in EDR3. This step is required because it is not guaranteed that
the same astronomical source will always have the same source identifier
in the different Gaia Data Releases.
- Step 6: computation of the RGB magnitudes using the polynomial
transformations given in Eqs. (2)-(5) of C21.
- Step 7: generation of the output files. Three files (in CSV format) are
generated:
- `rgblues_15m.csv`: stars belonging to the ~15 million star sample
of C21 (with reliable RGB magnitude estimates).
- `rgblues_var.csv`: objects flagged as variable in DR2.
- `rgblues_edr3.csv`: remaining objects in EDR3. The RGB magnitude
estimates of these objects can be potentially biased due to
systematic effects introduced by interstellar extinction, or by
exhibiting non-solar metallicity, or a colour outside the *Gaia* -0.5 <
G_BP-G_RP < 2.0 interval. This file will typically contain more stars
than the `rgblues_15m.csv` selection.
The three CSV files provide the same 11 columns:
- `number`: consecutive number of the star in each CSV file
- `source_id`: identification in EDR3
- `ra`: right ascension (from EDR3)
- `dec`: declination (from EDR3)
- `b_rgb`: blue RGB magnitude estimate
- `g_rgb`: green RGB magnitude estimate
- `r_rgb`: red RGB magnitude estimate
- `g_br_rgb`: pseudo-green RGB magnitude estimate, defined in C21 as
the arithmetic mean of the blue and red RGB magnitudes
- `phot_g_mean_mag`: *Gaia* G magnitude (EDR3)
- `phot_bp_mean_mag`: *Gaia* G_BP magnitude (EDR3)
- `phot_rp_mean_mag`: *Gaia* G_RP magnitude (EDR3)
The list of objects in those files is sorted by right ascension.
When using `--starhorse_block <number>`, the files `rgblues_15m.csv` and
`rgblues_edr3.csv` contain 3 additional
columns providing parameters derived by [Anders et al. (2019)](#3):
- `av50`: 50th percentile of the interstellar extinction
- `met50`: 50th percentile of the metallicity [M/H]
- `dist50`: 50th percentile of the distance (kpc)
These three values are set to 99.999 for those stars that do not belong to
the StarHorse sample.
- Step 8: generation of a finding chart plot (in PDF format): `rgblues.pdf`.
The execution of the previous example generates a cone search around
the [Pleiades](https://en.wikipedia.org/wiki/Pleiades) star cluster:

The stars in this plot are color coded based on the *Gaia* G_BP - G_RP
colour. A red circle has been overplotted on the stars belonging to
the ~15 million star sample of C21, a blue square on the variable
objects in DR2, and a grey diamond on EDR3 stars outside the *Gaia*
-0.5 < G_BP - G_RP < 2.0 colour interval.
Stars brighter than a pre-defined threshold are displayed
with big star symbols. To facilitate the identification of each star, the
consecutive star number in the three files (`rgblues_15m.csv`,
`rgblues_edr3.csv` and `rgblues_var.csv`) is also displayed (in red,
black and blue, respectively). These numbers are not displayed when using the
parameter `--nonumbers` in the command line.
Note that the four output archives (1 PDF and 3 CSV files) share the same root
name `rgblues`. This can be easily modified using the optional argument
`--basename <newbasename>` in the command line.
### Additional help
Some auxiliary optional arguments are also available. See description
invoking the script help:
```buildoutcfg
$ rgblues --help
...
...
positional arguments:
ra_center right Ascension (decimal degrees)
dec_center declination (decimal degrees)
search_radius search radius (decimal degrees)
g_limit limiting Gaia G magnitude
optional arguments:
-h, --help show this help message and exit
--basename BASENAME file basename for output files
--brightlimit BRIGHTLIMIT
stars brighter than this Gaia G limit are displayed
with star symbols (default=8.0)
--symbsize SYMBSIZE multiplying factor for symbol size (default=1.0)
--nonumbers do not display star numbers in PDF chart
--noplot skip PDF chart generation
--nocolor do not use colors in PDF chart
--starhorse_block STARHORSE_BLOCK
number of stars/query (default=0, no query)
--verbose increase program verbosity
--debug debug flag
```
## Citation
If you find this Python package useful,
please cite [Cardiel et al. (2021a)](#1)
(to quote the use of the standard RGB system)
and [Cardiel et al. (2021b)](#2) (where the transformation between the *Gaia*
photometry and the RGB magnitudes is derived).
## Related information
You can visit the [RGB Photometry](https://guaix.ucm.es/rgbphot) web page at
the Universidad Complutense de Madrid.
## Bibliography
<a id="3">Anders et al. (2019)</a>,
https://ui.adsabs.harvard.edu/abs/2019A%26A...628A..94A/abstract
<a id="1">Cardiel et al. (2021a)</a>,
MNRAS, https://ui.adsabs.harvard.edu/abs/2021MNRAS.504.3730C/abstract
<a id="2">Cardiel et al. (2021b)</a>,
MNRAS, https://ui.adsabs.harvard.edu/abs/2021MNRAS.507..318C/abstract
| /rgblues-1.1.tar.gz/rgblues-1.1/README.md | 0.474388 | 0.928733 | README.md | pypi |
from .basedataset import BaseRGBTDataet,_basepath
from rgbt.utils import *
import os
from rgbt.metrics import PR,SR
class RGBT210(BaseRGBTDataet):
"""
RGBT210 dataset is the subset of RGBT234.\r
`Weighted Sparse Representation Regularized Graph Learning for RGB-T Object Tracking.`\r
[Paper](https://dl.acm.org/doi/pdf/10.1145/3123266.3123289) \r
[Download Dataset.](https://github.com/mmic-lcl/Datasets-and-benchmark-code)
"""
def __init__(self, gt_path=f'{_basepath}/gt_file/RGBT210/groundtruth/',
seq_name_path=f"{_basepath}/gt_file/RGBT210/SequencesName.txt") -> None:
seqs = load_text(seq_name_path, dtype=str)
super().__init__(gt_path=gt_path, seqs=seqs, bbox_type='ltwh', v_name='init.txt', i_name='init.txt')
self.name = 'RGBT210'
self.PR_fun = PR()
self.SR_fun = SR()
# Challenge attributes
self._attr_list = ("BC","CM","DEF","FM","HO","LI","LR","MB","NO","TC","PO","SC")
self.BC = self.choose_serial_by_att("BC")
self.CM = self.choose_serial_by_att("CM")
self.DEF = self.choose_serial_by_att("DEF")
self.FM = self.choose_serial_by_att("FM")
self.HO = self.choose_serial_by_att("HO")
self.LI = self.choose_serial_by_att("LI")
self.LR = self.choose_serial_by_att("LR")
self.MB = self.choose_serial_by_att("MB")
self.NO = self.choose_serial_by_att("NO")
self.TC = self.choose_serial_by_att("TC")
self.PO = self.choose_serial_by_att("PO")
self.SC = self.choose_serial_by_att("SC")
def __call__(self, tracker_name, result_path: str, seqs=None, prefix='', bbox_type='ltwh'):
RGBT_start()
res = super().__call__(tracker_name, result_path, seqs, prefix, bbox_type)
RGBT_end()
return res
def get_attr_list(self):
return self._attr_list
def choose_serial_by_att(self, attr):
if attr==self.ALL:
return self.seqs_name
else:
p = load_text(os.path.join(self.gt_path, '..', 'attr_txt', attr+'.txt'))
with open(os.path.join(self.gt_path, '..', 'attr_txt', 'SequencesName.txt')) as f:
seq_name_s = f.read().split('\n')
seq_name_s = seq_name_s[:len(p)]
return [seq_name for i,seq_name in zip(p, seq_name_s) if (i and seq_name in self.seqs_name)]
def PR(self, tracker_name=None, seqs=None):
"""
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return PR and the precision Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.PR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.PR_fun(self, v, seqs)
return res
def SR(self, tracker_name=None, seqs=None):
"""
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return SR and the Success Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.SR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.SR_fun(self, v, seqs)
return res
def draw_attributeRadar(self, metric_fun, filename=None):
if filename==None:
filename = self.name
if metric_fun==self.PR:
filename+="_PR"
elif metric_fun==self.SR:
filename+="_SR"
filename+="_radar.png"
return super().draw_attributeRadar(metric_fun, filename)
def draw_plot(self, metric_fun, filename=None, title=None, seqs=None):
assert metric_fun==self.SR or metric_fun==self.PR
if filename==None:
filename = self.name
if metric_fun==self.PR:
filename+="_PR"
axis = self.PR_fun.thr
loc = "lower right"
x_label = "Location error threshold"
y_label = "Precision"
elif metric_fun==self.SR:
filename+="_SR"
axis = self.SR_fun.thr
loc = "lower left"
x_label = "overlap threshold"
y_label = "Success Rate"
filename+="_plot.png"
if title==None:
if metric_fun==self.PR:
title="Precision Plot"
elif metric_fun==self.SR:
title="Success Plot"
return super().draw_plot(axis=axis,
metric_fun=metric_fun,
filename=filename,
title=title,
seqs=seqs, y_max=1.0, y_min=0.0, loc=loc,
x_label=x_label, y_label=y_label) | /dataset/rgbt210_dataset.py | 0.72027 | 0.211661 | rgbt210_dataset.py | pypi |
from rgbt.dataset.basedataset import BaseRGBTDataet, TrackerResult,_basepath
from rgbt.utils import *
import os
from rgbt.metrics import MPR,MSR
class RGBT234(BaseRGBTDataet):
"""
RGBT234 dataset: `RGB-T Object Tracking: Benchmark and Baseline.`\r
[Paper.](https://arxiv.org/abs/1805.08982) \r
[Download Dataset.](https://github.com/mmic-lcl/Datasets-and-benchmark-code)
"""
def __init__(self, gt_path=f'{_basepath}/gt_file/RGBT234/rgbt234_gt/',
seq_name_path=f"{_basepath}/gt_file/RGBT234/attr_txt/SequencesName.txt") -> None:
seqs = load_text(seq_name_path, dtype=str)
super().__init__(gt_path=gt_path, seqs=seqs, bbox_type='ltwh', v_name='visible.txt', i_name='infrared.txt')
self.name = 'RGBT234'
self.MPR_fun = MPR()
self.MSR_fun = MSR()
# Challenge attributes
self._attr_list = ("BC","CM","DEF","FM","HO","LI","LR","MB","NO","TC","PO","SC")
self.BC = self.choose_serial_by_att("BC")
self.CM = self.choose_serial_by_att("CM")
self.DEF = self.choose_serial_by_att("DEF")
self.FM = self.choose_serial_by_att("FM")
self.HO = self.choose_serial_by_att("HO")
self.LI = self.choose_serial_by_att("LI")
self.LR = self.choose_serial_by_att("LR")
self.MB = self.choose_serial_by_att("MB")
self.NO = self.choose_serial_by_att("NO")
self.TC = self.choose_serial_by_att("TC")
self.PO = self.choose_serial_by_att("PO")
self.SC = self.choose_serial_by_att("SC")
def __call__(self, tracker_name, result_path: str, seqs=None, prefix='', bbox_type='ltwh') -> TrackerResult:
RGBT_start()
res = super().__call__(tracker_name, result_path, seqs, prefix, bbox_type)
RGBT_end()
return res
def get_attr_list(self):
return self._attr_list
def choose_serial_by_att(self, attr):
if attr==self.ALL:
return self.seqs_name
else:
p = load_text(os.path.join(self.gt_path, '..', 'attr_txt', attr+'.txt'))
return [seq_name for i,seq_name in zip(p, self.seqs_name) if i]
def MPR(self, tracker_name=None, seqs=None):
"""
NOTE
---------
> Maximum Precision Rate (MPR). PR is the percentage of frames whose output location
is within the given threshold distance of ground truth. That is to say, it computes
the average Euclidean distance between the center locations of the tracked target
and the manually labeled ground-truth positions of all the frames. Although our
alignment between two modalities is highly accurate, there still exist small alignment
errors. Therefore, we use maximum precision rate (MPR) instead of PR in this paper.
Specifically, for each frame, we compute the above Euclidean distance on both RGB and
thermal modalities, and adopt the smaller distance to compute the precision.
We set the threshold to be 20 pixels to obtain the representative MPR.
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return MPR and the precision Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.MPR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.MPR_fun(self, v, seqs)
return res
def MSR(self, tracker_name=None, seqs=None):
"""
NOTE
---------
> Maximum Success Rate (MSR). SR is the ratio of the number of successful frames whose
overlap is larger than a threshold. Similar to MPR, we also define maximum success
rate (MSR) to measure the tracker results. By varying the threshold, the MSR plot can
be obtained, and we employ the area under curve of MSR plot to define the representative MSR.
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return MSR and the Success Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.MSR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.MSR_fun(self, v, seqs)
return res
def draw_attributeRadar(self, metric_fun, filename=None):
if filename==None:
filename = self.name
if metric_fun==self.MPR:
filename+="_MPR"
elif metric_fun==self.MSR:
filename+="_MSR"
filename+="_radar.png"
return super().draw_attributeRadar(metric_fun, filename)
def draw_plot(self, metric_fun, filename=None, title=None, seqs=None):
assert metric_fun==self.MSR or metric_fun==self.MPR
if filename==None:
filename = self.name
if metric_fun==self.MPR:
filename+="_MPR"
axis = self.MPR_fun.thr
loc = "lower right"
x_label = "Location error threshold"
y_label = "Precision"
elif metric_fun==self.MSR:
filename+="_MSR"
axis = self.MSR_fun.thr
loc = "lower left"
x_label = "overlap threshold"
y_label = "Success Rate"
filename+="_plot.png"
if title==None:
if metric_fun==self.MPR:
title="Precision Plot"
elif metric_fun==self.MSR:
title="Success Plot"
return super().draw_plot(axis=axis,
metric_fun=metric_fun,
filename=filename,
title=title,
seqs=seqs, y_max=1.0, y_min=0.0, loc=loc,
x_label=x_label, y_label=y_label) | /dataset/rgbt234_dataset.py | 0.812682 | 0.249219 | rgbt234_dataset.py | pypi |
from .basedataset import BaseRGBTDataet,_basepath
from rgbt.utils import *
import os
from rgbt.metrics import MPR_GTOT,MSR_GTOT
class GTOT(BaseRGBTDataet):
"""
Publication: `Learning collaborative sparse representation for grayscale-thermal tracking` 2016\\
IEEE Transactions on Image Processing \\
[Download Dataset.](https://github.com/mmic-lcl/Datasets-and-benchmark-code)
NOTE: this is not support attribute test. [Just here, not GTOT]
"""
def __init__(self, gt_path=f"{_basepath}/gt_file/GTOT/groundtruth/",
seq_name_path=f"{_basepath}/gt_file/GTOT/SequencesName.txt") -> None:
seqs = load_text(seq_name_path, dtype=str)
super().__init__(gt_path=gt_path, seqs=seqs, bbox_type='ltrb', v_name='groundTruth_v.txt', i_name='groundTruth_i.txt')
# super().__init__(gt_path=gt_path, seqs=seqs, bbox_type='ltwh', v_name='init.txt', i_name='init.txt')
self.name = 'GTOT'
self.MPR_fun = MPR_GTOT()
self.MSR_fun = MSR_GTOT()
# Challenge attributes
self._attr_list = (None)
def get_attr_list(self):
return self._attr_list
def choose_serial_by_att(self, attr):
return None
def MPR(self, tracker_name=None, seqs=None):
"""
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return MPR and the precision Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.MPR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.MPR_fun(self, v, seqs)
return res
def MSR(self, tracker_name=None, seqs=None):
"""
NOTE
---------
> Maximum Success Rate (MSR). SR is the ratio of the number of successful frames whose
overlap is larger than a threshold. Similar to MPR, we also define maximum success
rate (MSR) to measure the tracker results. By varying the threshold, the MSR plot can
be obtained, and we employ the area under curve of MSR plot to define the representative MSR.
Parameters
----------
[in] tracker_name - str
Default is None, evaluate all registered trackers.
[in] seqs - list
Sequence to be evaluated, default is all.
Returns
-------
[out0] When evaluating a single tracker, return MSR and the Success Rate at different thresholds.
[out1] Other cases return a dictionary with all tracker results.
"""
if seqs==None:
seqs = self.seqs_name
if tracker_name!=None:
return self.MSR_fun(self, self.trackers[tracker_name], seqs)
else:
res = {}
for k,v in self.trackers.items():
res[k] = self.MSR_fun(self, v, seqs)
return res
def draw_plot(self, metric_fun, filename=None, title=None, seqs=None):
assert metric_fun==self.MSR or metric_fun==self.MPR
if filename==None:
filename = self.name
if metric_fun==self.MPR:
filename+="_MPR"
axis = self.MPR_fun.thr
loc = "lower right"
x_label = "Location error threshold"
y_label = "Precision"
elif metric_fun==self.MSR:
filename+="_MSR"
axis = self.MSR_fun.thr
loc = "lower left"
x_label = "overlap threshold"
y_label = "Success Rate"
filename+="_plot.png"
if title==None:
if metric_fun==self.MPR:
title="Precision Plot"
elif metric_fun==self.MSR:
title="Success Plot"
return super().draw_plot(axis=axis,
metric_fun=metric_fun,
filename=filename,
title=title,
seqs=seqs, y_max=1.0, y_min=0.0, loc=loc,
x_label=x_label, y_label=y_label) | /dataset/gtot_dataset.py | 0.822153 | 0.276573 | gtot_dataset.py | pypi |
# HBP: RGBW Color Space Converter Between HSL / RGB / HSi / HSV / HEX
## Specifically For LED Based Projects

[](https://wakatime.com/badge/github/iamh2o/rgbw_colorspace_converter) [](https://github.com/iamh2o/rgbw_colorspace_converter/actions/workflows/pytest.yml) [](https://github.com/iamh2o/rgbw_colorspace_converter/actions/workflows/black.yaml) [](https://github.com/iamh2o/rgbw_colorspace_converter/actions/workflows/bashLint.yml) [](https://github.com/psf/black)  [](http://placeholder.com) [](https://badge.fury.io/gh/iamh2o%2Frgbw_colorspace_converter)
### Briefly: What is the utility of this module?
tldr: The `color` module in this package will translate various color systems into RGBW, which is primarily of interest to people playing around with LEDs. RGBW does not have much utility beyond physical lighting really. The color module is also just generally useful for creating generative art in several color spaces, it's got the ability to on the fly translate between 6 schemes, plus has a nice interface and a few neat bells and whistles. To see is in action, there are 3 example scripts in the bin dir of varying complexity.
More or less the process is: Instantiate a color object from any of the supported types. Use this object to emit values for all types(including RGBW). Modify the RGB or HSV objects by thier r/g/b or h/s/v properties, and the values for all ojects update to reflect the change. This is mostly of use for translating the multiple spaces to RGBW for use in LED or other lighting fixtures which support RGBW, but can be used also as a general color manipulator and translator.
> We've become accostomed to the limited ability of RGB LEDs to produce truly diverse colors, but with the introduction of RGBW(white) LEDs, the ability of LEDs to replicate a more realistic spectrum of colors is dramatically increased. The problem however, is decades of systems based on RGB, HEX, HSL do not offer easy transformations to RGBW from each system. This package does just this, and only this. If will return you RGBW for given tuples of other spaces, and do so fast enough for interactive LED projects. There are a few helper functions and whatnot, but it's really as simple as (r,g,b,w) = Color.RGB(255,10,200). Where 4 channel RGBW LEDs will translate the returned values to represent the richer color specified by the RGB tuple.
> Or! Go ahead and use this for non LED projects where you need to convert between color spaces. Say for controlling old skool DMX lighting rigs.
### 3 Main Projects Shaped This Module: HEX, BAAAHS and Pyramid Scheme.... hence.... HEXBASPYR ?
<pre>
___ ___ _______ ___ ___ ________ ________ ________ ________ ___ ___ ________
\ \ \\\ \ \ \ __/| \ \ \/ / /\ \ \|\ /_ \ \ \|\ \ \ \ \___|_ \ \ \|\ \ \ \ \/ / /\ \ \|\ \
\ \ __ \ \ \ \_|/__ \ \ / / \ \ __ \ \ \ __ \ \ \_____ \ \ \ ____\ \ \ / / \ \ _ _\
\ \ \ \ \ \ \ \_|\ \ / \/ \ \ \|\ \ \ \ \ \ \ \|____|\ \ \ \ \___| \/ / / \ \ \\ \|
\ \__\ \__\ \ \_______\ / /\ \ \ \_______\ \ \__\ \__\ ____\_\ \ \ \__\ __/ / / \ \__\\ _\
\|__|\|__| \|_______|/__/ /\ __\ \|_______| \|__|\|__| |\_________\ \|__| |\___/ / \|__|\|__|
|__|/ \|__| \|_________| \|___|/
</pre>

# Let's Do It: INSTALL
## Requirements
* [Python >= 3.7](https://www.python.org)
### Nice to have requirements....
* For the tests scripts // generative ansi art code. One of the scripts will record an HTML version of what is displayed on the screen, and when exited, render the page as a png. This rendering step is not required to run the script, watch it, get the html copy (and ansi output too!)... but if you don't have chrome installled (or safari as an alternate), the image creation won't work. You can install it before or after you install this code, it will be autodetected when available and you'll start getting png's. But again, only required for one script, not for the core library.
## Install Options
### PIP From PyPi
```
pip install rgbw_colorspace_converter ;
Try a few of the three test scripts which use the color library for some ansi escape color art :-)
./bin/path_between_2_colors.py
./bin/run_color_module_RGB_HSV_HEX_demo.py -z -y -g -u 33
./bin/run_spectrum_saturation_cycler.py
```
* Staring from conda [Example](https://asciinema.org/a/UvOQ4Nvb6ux0id6Ie0E8EelRH)
* The three scripts in the bin dir will work in most any terminal. You may only have 16 colors, but may have more. I took it as a challenge to write some debugging and teaching tools that would not require a whole pile of LED gear to get going. you can get started in a very simple way with the command line color_printer, which accepts this packages color objects (among other things). It even manages to make some reasonably interesting art!
### Pip Github
* Clone repo
* cd into clone dir
* type ```pip install -e .```
* This should instal the main branch active in git (no promises it's stable!)
### Add to PYTHONPATH
* Put rgbw_colorspace_converter in your PYTHONPATH. You won't have the bin scripts auto in your path however.
#### Quick Start Crash Cource
> from rgbw_colorspace_converter.colors.converters import RGB, HSV, HSL, HSI, Hex
>
> The Color class is the top level class, but the RGB and HSV classes inherit from it and do all of the same work. Its intended to be expanded upon at some point, but for now, you can honesly choose any of them. You can instantiate 'Color(RGB/HSL)' objext only. Once instantiated, they calculate and maintain the state of the 5 other color spaces these objects manage (HSL, HSi, HEX, RGBW, i guess just 4 others, 6 total.
# Begin Like So:
<pre>
from rgbw_colorspace_converter.colors.converters import RGB, HSV, HSL, HSI, Hex
<p valign="middle">rgb = RGB(255,125,22)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff7d16><img src="https://via.placeholder.com/47x20/ff7d16/000000?text=+" valign="bottom" ></a></p>
rgb.(press tab in an interactive shell) and you'll see:
</pre>
>``````
>
> rgb...
> copy() hsl hsv_s rgb rgb_r rgbw_b gbw_w
> hex hsv hsv_t rgb_b rgbw_w rgbw_g
> hsi hsv_h hsv_v rgb_g rgbw rgbw_r
>``````
These are the objects and functions available to the Color/HSV and RGB top level objects alike. There are a handful of important types.
> 1) Objects, which when called will give you that color schemes encoding for whatever is currently set by RGB/HSV.
> 1b) Note, the core color space used in this module is actually HSV. The HSV and RGB mappings are tightly coupled. If you change the RGB.RED value, the HSV values immediately recalculate (as do the values for all of the second order color space objects.
> 2) The second order color space objects will generallty let you instantiate an object with their values, but you will get back Color object which will not accept modifications of the second order object properties (again- to make changes you'll need to modify RGB or HSV values. Then there are third order objects which it is not yet possible to instantiate them directly from their native parameters, but we can calculate their values given any first or second order object- this mostly applies to RGBW-- but the problem is small in our exper4ience, nearly all of the use cases for RGBW is getting a realistic transofrmation to RGBW space from these others. We're here to help!
> 3) Recap: First order objects: Color, RGB, HSV. Second order (HSL, HSi, HEX. Third order object, but still loved, RGBW.
> 4) Sll obect used by name (ie: rgb.hsi ) return a tuple of their values refkectiung the color represented by the RGB and HSV internal values. The same is tru for .hsv, .hsi, .rgbw....
> 5) First order objects have the special features of getters and setters. HSV objects have hsv_v, hsv_s, hsv_h. Used w/out assignment they reuturn the single value. Used with assignment, the valiue is updated, and all of the other objects have their values recalculated immediately. The same goes for RGB, there is rgb_r, rgb_g, rgb_b. The setters are the encourated way to update the global color of the color objexts. No save is required. The hsv_t property is a special internal use tuple of the HSV representation of the current color. Do not muck around with please. Lastly, there is a function 'copy'. If you with to spin off a safe Color object to play with, in say, multithreaded envirionments, use copy to deepcopy the Color object you are using.
> 6) oh! for colorspaces which typically have values that span 0-360 degrees, those have been normalized to a 0-1 scale for easier programatic use.
#### A micro example of how this can work
<pre>
# Instantiate a color object from RGB (you can instantiate from RGB/HSV/HSL/HSi/Hex, and get translations
# to all the others plus rgbw immediately. Further, the RGB and HSV objects are special in that they can
# be manipulated in real time and all the other conversions happen along with the RGB/HSV changes. Meaning
# you can write programs that operate in RGB/HSV space and control lighting in RGBW space. Technically
# you can do the same with the HSL/HSI/Hex objects, but way more clunkly.
# Something to note... is how counter intuitive many RGBW transforms are once you get away from primary colors.
# To start simple- here a color object representing Red as defined by RGB is initialized-- and the translations to all
# the other spaces immediately available.
from rgbw_colorspace_converter.colors.converters import RGB, HSV
color = RGB(255,0,0)
color.rgb
<p valign="middle">(255, 0, 0)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
In [34]: color.hsv
<p valign="middle">(0.0, 1.0, 1.0)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
color.hsl
<p valign="middle">(0.0, 1.0, 0.5)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
color.hsi
<p valign="middle">(0.0, 1.0, 0.33333)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
color.hex
<p valign="middle">'#ff0000'<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
color.rgbw
<p valign="middle">(254, 0, 0, 0)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ff0000><img src="https://via.placeholder.com/47x20/ff0000/000000?text=+" valign="bottom" ></a></p>
# We can change the red color object to yellow by adding green by directly changing the <code>rgb_g</code> property
# of the color object (which maps all RGB and HSV changes to all other color spaces in real time.
# We add max green
color.rgb_g = 255
color.rgb
<p valign="middle">rgb(255, 255, 0)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ffff00><img src="https://via.placeholder.com/47x20/ffff00/000000?text=+" valign="bottom" ></a></p>
color.hsv
<p valign="middle">(0.16666666666666666, 1.0, 1.0)<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ffff00><img src="https://via.placeholder.com/47x20/ffff00/000000?text=+" valign="bottom" ></a></p>
In [17]: color.rgbw
Out[17]: (254, 254, 0, 0)
color.hex
<p valign="middle">'ffff00'<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=ffff00><img src="https://via.placeholder.com/47x20/ffff00/000000?text=+" valign="bottom" ></a></p>
# and the rest were all translated to represent yellow as well
</pre>
##### An worked use case
* Lets say you wanted to write s/w to control something that emits light- probably using colors. This could be LEDs or other lighting hardware, or even sofware or APIs/services. Each have their own interfaces with what color codes they accept. LEDs are primarily RGB or RGBW, but working directly in RGB is a pain. So this module can let you work in the space you grok, and spit out the translations to the thing you are controlling in the protocol it expects (I guyess we suopport DMX too if you want to ask me about that.
* I wrote two simple scripts that acheive all of the above. I instantiate objects using RGB color codes, I work with the objects in HSV space to move through the color space in various ways (and to show how straight forward it is. And in a supremely awesome way :-) I found a way to use a terminal tool called colr to act as my display I'm controlling...... and it only accepted Hex codes. So I was using 3 spaces actively just for one simeple project. The colored output I produce with these tools also emits the color codes for all of the color spaces represented with each line of color so you can take a peek at how all the differnt ones represnet different things. RGB and RGBW get really strange when complex mixtures of colors happen.
* So, generally RGB / RGBW and Hex are not the most pleasant to work directly in.... this is a good read if you're curious why [RGB/RGBW/Hex are not the most intuitive ways to think about color](https://www.maketecheasier.com/difference-between-hex-rgb-hsl/). To perform simple organic operations, like fading through saturations of a color, or cycling smoothly through various colors, the manipulation of HSV/HSL/HSI are far more intuitive (and far more amenable to programatic manipulation) than the others. So, I'll write a toy script (which you can run here using a very low tech display), which I think will demonstrate how this package was intended to be used. There are functional scripts you can run (if you install!) [here ---](https://github.com/iamh2o/rgbw_colorspace_converter/blob/main/bin/run_spectrum_saturation_cycler.py) and another named `path_between_2_colors.py`.
```
The second looks like this when executed:
```
** LINK TO SS **

## Contribute
Please do ask questions, discuss new feature requests, file bugs, etc. You are empowered to add new features, but try to talk it through with the repo admins first- though if youre really burning to code, we can talk with the code in front of us. PRs are the way to propose changes. No commits to main are allowed. Actions/Tests must all pass as well as review by 2 folks equiped to eval the proposed changes.
Development (less stable)
### Install Dev Env
```
cd environment
./setup.sh # Read the help text. To proceed with install:
./setup.sh HBP ~/conda # or wherever your conda is installed or you want it installed
source env.sh # which you need to do anytime you wish to run things.
# To Test
./bin/path_between_2_colors.py
./bin/run_spectrum_saturation_cycler.py
./bin/run_color_module_RGB_HSV_HEX_demo.py -b "___|||))--WWWW________///====\__" -z -y -g -u 33 -f
```
* This will install a conda environment you can source with conda activate HBP. If you don't have conda, it is installed where you specify. Mamba is also installed (read about it. tldr: lightning fast conda, will change your life). The codebase adheres to black style and mostly to flake8.
* During the running of setup above, pre-commit checks will be installed to enforce black and flake 8 compliance before any pushes are permitted. Full disclosure. Black will auto-fix problems when it fails a commit, so you just run again and all is good. RARELY, you'll have to run 'black' directly on the file. Flake8, you need to go manually address the issues is spits out. If there are a ton, chip away at a few, then you can use the --skip-verify commit flag- but don't abuse it please.
* Upon commit, flake 8 and black linter checks are run, as well as the pyunit tests for each commit and pull request. The status of each can be seen in the actions tab and reflected in some of the badges.
## A Fun Thing.
* I've worked up a lowtech way to demonstrating cycling through various color spaces programatically using the terminal. If you have pip installed or run setup.sh, this should work. Try running (in dev)```conda activate HBP; python bin/run_color_module_RGB_HSV_HEX_demo.py``` (after pip)```run_color_module_RGB_HSV_HEX_demo.py```. You get a taste for how the spaces cycle differently and what the encoding for each looks like.
## Quick Note on Our Hardware Setup
* We used OLA + DMXkings to run LEDs via DMX for many BIG projects controlling thousands of LEDS. And this library controlling and mapping colors.
* Other projects used processing as intermediate, among other things.
## More Examples
### A Bit More
Not only does the package allow translation of one color space to another, but it also allows modifications of the color object in real time that re-calculates all of the other color space values at the same time. This is *EXCEEDINGLY* helpful if you wish to do things like slice through HSV space, and only change the saturation, or the hue. This is simply decrementing the H or S value incremntally, but in RGB space, is a complex juggling of changing all 3 RGB values in non intuitive ways. The same applies for transversals of HSI or HSL space to RGB. We often found ourselves writing our shows in HSV/HSL and trnanslating to RGBW for the LED hardware to display b/c the show were more natural to design in non-RGB.
<pre>
see examples in the ./bin and ./tests directories.
# Moving through the HSV color wheel is simply cycling 0->1.0->0->and so on
# Moving through the color wheel in RGB, is a lot more of a pain in the add. Here is an example.
# Lets start with a complicated color, crimson: http://www.workwithcolor.com/color-converter-01.htm?cp=D92008
color = RGB(217,32,8) <p valign="middle">rgb 217,32,8<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=D92008><img src="https://via.placeholder.com/47x20/D92008/000000?text=+" valign="bottom" ></a></p>
color.rgbw
(217,32,8)
color.rgbw
(207, 27, 0, 6)
color.hsv
(0.01913875598086125, 0.9631336405529954, 0.8509803921568627)
# As we swing through the color wheel, we change just the h value, note the changes in RGB/W values are not easily predictable considering it's a pretty simple operation.
# Gold: <p valign="middle">rgb 217,208,7<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=D9C709><img src="https://via.placeholder.com/47x20/D9C709/000000?text=+" valign="bottom" ></a></p>
# Moving the HSV colorwheel value 'h' only yields these changes
color.hsv_h = 0.16
(0.16, 0.9631336405529954, 0.8509803921568627)
color.rgb
(217, 208, 7)
color.rgbw
(210, 200, 0, 7)
# LawnGreen: <p valign="middle">rgb 112,217,7<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=70D907><img src="https://via.placeholder.com/47x20/70D907/000000?text=+" valign="bottom" ></a></p>
# Moving the HSV colorwheel value 'h' only yields these changes
color.hsv_h = 0.25
(0.25, 0.9631336405529954, 0.8509803921568627)
color.rgb
(112, 217, 7)
color.rgbw
(104, 209, 0, 7)
# DeepTurquoise:<p valign="middle">rgb7,154,217<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=079AD9 ><img src="https://via.placeholder.com/47x20/709AD9/000000?text=+" valign="bottom" ></a></p>
# Moving the HSV colorwheel value 'h' only yields these changes
color.hsv_h = 0.55
color.hsv
(0.55, 0.9631336405529954, 0.8509803921568627)
color.rgb
(7, 154, 217)
color.rgbw
(0, 145, 211, 6)
# DarkViolet: <p valign="middle">rgb 87,7,217<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=5707D9 ><img src="https://via.placeholder.com/47x20/5707D9/000000?text=+" valign="bottom" ></a></p>
# Moving the HSV colorwheel value 'h' only yields these changes
color.hsv_h = 0.73
color.hsv
(0.73, 0.9631336405529954, 0.8509803921568627)
color.rgb
(87, 7, 217)
color.rgbw
(81, 0, 208, 7)
# And if we set color.hsv_h = 0.0191, we'd be back to <p valign="middle">crimson<a href=http://www.workwithcolor.com/color-converter-01.htm?cp=D92008><img src="https://via.placeholder.com/47x20/D92008/000000?text=+" valign="bottom" ></a></p>.
# The same exercise could be repeated with the hsv_s or hsv_v properties (singly, or together)... and if you wished to modify in RGB space, the same setters are available as rgb_r, rgb_g, rgb_b
</pre>

# Tests
## Command Line
* Simple at the moment, but you may run:
* ```pytest --exitfirst --verbose --failed-first --cov=. --cov-report html```
## Github Actions
* Pytests, Flake8 and Python Black are all tested with github commit actions.
## Fun & Kind Of Weird Tests
```python ./bin/run_color_module_RGB_HSV_HEX_demo.py``` and ```./bin/run_spectrum_saturation_cycler.py```
* Needs to run on a unix-like terminal. OSX, seems fine. Windows.... I'm not sure.
# In The Works
* OLA Integration To Allow Testing LED Strips
* Example mini project to see for yourself the difference in vividness and saturation of RGBW vs RGB LEDs. You'll need hardware for this fwiw.
# Detailed Docs
<pre>
Color
Color class that allows you to ** initialize ** a color in any of HSL, HSV, RGB, Hex and HSI color spaces. Once initialized,with one of these specific types, you get a Color object back (or possibly a subclass of the Color objext- RGB or HSV- all the same ). This object will automatically report the color space values for all color spaces based on what you entered. Notably, it will also translate to RGBW!
Further, from the returned object, you may modify it in 2 ways- via the r/g/b properties of the RGB Color object, or via the h/s/v properties of the HSV color object. Any changes in any of the r/g/b or h/s/v properties (even if mixed and matched) will automatically re-calculate the correct values for the other color space represnetations, which can then be accessed. You can not modify the other color space object properties and get the same effect (yet).
The color representation is maintained in HSV internally and translated to RGB and RGBW and all the others.
Use whichever is more convenient at the time - RGB for familiarity, HSV to fade colors easily.
The main goal of this package is to translate various color spaces into RGBW for use in RGBW LED or DMX/RGB accepting hardware. There is a strange dearth of translators from ANY color space to RGBW.
RGB values range from 0 to 255
HSV values range from 0.0 to 1.0 *Note the H value has been normalized to range between 0-1 in instead of 0-360 to allow
for easier cycling of values.
HSL/HSI values range from 0-360 for H, 0-1 for S/[L|I]
Hex is hex...
# INITIALIZING COLOR OBJECTS -- it is not advised to init Color directly. These below all basically return a valid Color obj # RGBW can not be initialized directly- it is calculate from the initialized, or modified values of the color objs below from rgbw_colorspace_converter.colors.converters import RGB, HSV, HSL, HSI, Hex
>>> red = RGB(255, 0 ,0)
>>> green = HSV(0.33, 1.0, 1.0)
>>> fuchsia = RGB(180, 48, 229)
Colors may also be specified as hexadecimal string:
>>> blue = Hex('#0000ff')
Both RGB and HSV components are available as attributes
and may be set.
>>> red = RGB(255,0,0)
>>> red.rgb_r
255
>>> red.rgb_g = 128
>>> red.rgb
(255, 128, 0)
>>> red.hsv
(0.08366013071895424, 1.0, 1.0)
>>> red.hsv_v = 0.5
>>> red.rgb
(127, 64, 0)
>>> red.hsv
(0.08366013071895424, 1.0, 0.5)
# Note how as you change the hsv_(h|s|v) or rgb_(r|g|b) properties, the other values are recalculated for the other color types
--->>> # IMPORTANT -- This recalculation after instantiation *only* is allowed for hsv and rgb types. The HSL/HSV/HSI/RGBW values are all calculated upone instantiation of the Color object **AND** the values for each are updated in real time as the hsv(h|s|v) and rgb(r|g|b) values are modified in the Color object. But, you can not modify the individual elements of HSL/HSI/RGBW/HEX objects directly after instantiating each. Put another way. If you create a HSI object, to get a new HSI color value you need to modify r/g/b or h/s/v (or create a new HSI object).
These objects are mutable, so you may want to make a
copy before changing a Color that may be shared
>>> red = RGB(255,0,0)
>>> purple = red.copy()
>>> purple.rgb_b = 255
>>> red.rgb
(255, 0, 0)
>>> purple.rgb
(255, 0, 255)
Brightness can be adjusted by setting the 'color.hsv_v' property, even
when you're working in RGB because the native movel is maintained in HSV.
For example: to gradually dim a color
(ranges from 0.0 to 1.0)
>>> col = RGB(0,255,0)
>>> while col.hsv_v > 0:
... col.hsv_v -= 0.1
... print col.rgb
(0, 255, 0)
(0, 229, 0)
(0, 204, 0)
(0, 178, 0)
(0, 153, 0)
(0, 127, 0)
(0, 102, 0)
(0, 76, 0)
(0, 51, 0)
(0, 25, 0)
# And you could mix and match if you're feeling crazy. col.hsv_v -=10 ; col_rgb_g = 102; print(col);
A more complex example is if you wished to move through HUE space in HSV and display that in RGB (or RGBW)
from rgbw_colorspace_converter import RGB
magenta = RGB(255, 120, 255)
# in HSV it looks like this
magenta.hsv
(0.8333333333333334, 0.5294117647058824, 1.0)
To cycle through hue's in RGB space is incredibly cumbersome. But in HSV space, you simply cycle through 0-1 (and loop back around bc the space is a cylinder!). So, something like this:
magenta = RGB(255, 120, 255)
In [12]: while ctr < 8:
...: magenta.h = magenta.h - .1
...: print(magenta.hsv, magenta.rgb)
...: ctr = ctr + 1
...:
...:
(0.73333333, 0.5294117647058824, 1.0) (173, 120, 255)
(0.63333333, 0.5294117647058824, 1.0) (120, 147, 255)
(0.53333333, 0.5294117647058824, 1.0) (120, 228, 255)
(0.43333333, 0.5294117647058824, 1.0) (120, 255, 200)
(0.33333333, 0.5294117647058824, 1.0) (120, 255, 120)
(0.23333333, 0.5294117647058824, 1.0) (201, 255, 120)
(0.13333333, 0.5294117647058824, 1.0) (255, 227, 120)
(0.03333333, 0.5294117647058824, 1.0) (255, 146, 120)
(0.0, 0.5294117647058824, 1.0) (255, 120, 120)
! Note how clear the movement through HSV space is, and how unintuituve the RGB transitions are. This module helps make this usability gap between the more intuitive color spaces and RGB smaller (AND GIVES US RGBW!)
<p valign="middle"> <img src="https://via.placeholder.com/43x20/ff0058/000000?text=+" valign="bottom" > <code>#ff0058</code> ... and some more stuff</p>
RGBW
To get the (r,g,b,w) tuples back from a Color object, simply call Color.rgbw and you will return the (r,g,b,w) tuple.
-----------------------------------| |-----------------------------------
╦ ╦╔╗ ╔═╗
╠═╣╠╩╗╠═╝
╩ ╩╚═╝╩
</pre>
<b>run_color_module_RGB_HSV_HEX_demo.py</b> generates scrolling patterns by cycling through the various color spaces, this is a screenshot:

## Authors
> GrgB wrote the vast majority of the core. JM translated Brian Nettlers theoretical work into code to allow the translation from RGB/HSV/HSI to RGBW. JC added a lot of robustness and and in latter instances (which you can find in the pyramid triangles repo) filled in some algorithmic gaps, which I have been unable to test yet, so have not included them yet. This nugget of code has been present in projects that now span > 10 years. With multiple artists and show designers also contributing to the s/w (ie: TL, JH, SD, ZB, MF, LN). This library is a small component of a much more elaborate framework to control custom fabricated LED installations. Most recentlyly for Pyramid Scheme v-3 [PyramdTriangles](https://github.com/pyramidscheme/pyramidtriangles), which was a fork of the v-1 code [pyramid triangles codebase v1.0](https://github.com/iamh2o/pyramidtriangles), Pyramid Scheme followed several years of running the BAAAHS lighting installation (codebase lost to bitbucket purgatory). And the BAAAHS installation was the first gigantic project of the then baby faced HEX Collective (whom developed the core of this code speficially for a comissioned piece, aptlt dubbed [The Hex's](l;ink)... this repo also sadly lost to time and space. This color library being an original component, and largely untouched until the need to support RGBW LEDs (and wow, rgbw LEDS are really stunning).
### Roar
It would be remiss of us not to thank Steve Dudek for his Buffalo soothsaying and accurate measuring of 3 inch increments.
# Credits // References
- [Shield.io](https://shields.io)
- [Placeholder](https://placeholder.com/) for allowing there to be color images in this readme!
- [Colorblind Aware Design[(https://davidmathlogic.com/colorblind/)
| /rgbw_colorspace_converter-0.1.3.tar.gz/rgbw_colorspace_converter-0.1.3/README.md | 0.728362 | 0.877791 | README.md | pypi |
# RGCosm - Reverse Geocode for OpenStreetmap
[](https://github.com/BlackCatDevel0per/rgcosm/actions/workflows/python-publish.yml)
A Python library for offline reverse geocoding from osm(.pbf) GIS converted to sqlite3 data - based on code from [rgcoms scripts](https://github.com/punnerud/rgcosm)
### Install by:
```bash
pip install rgcoms
```
or from source by:
```bash
git clone https://github.com/BlackCatDevel0per/rgcosm
cd rgcoms
pip install build
python -m build
```
### Dependencies
1. osmium
### CLI
See cli commands by:
```
python coordinates_to_address.py -h
```
output:
```
usage: rgcosm [-h] [-ci CINPUT] [-co COUTPUT] [-ai ADD_INDEXES]
[-db DATABASE] [-ltln LAT_LON] [-lat LATITUDE] [-lon LONGITUDE] [-st SEARCH_TAGS] [-mtc MIN_TAGS_COUNT]
rgcosm cli
optional arguments:
-h, --help show this help message and exit
-ci CINPUT, --cinput CINPUT
Path to input pbf file
-co COUTPUT, --coutput COUTPUT
Path to output db file
-ai ADD_INDEXES, --add_indexes ADD_INDEXES
Add indexes for faster search default yes
-db DATABASE, --database DATABASE
Path to db file
-ltln LAT_LON, --lat_lon LAT_LON
latitude with longitude separated by space
-lat LATITUDE, --latitude LATITUDE
latitude
-lon LONGITUDE, --longitude LONGITUDE
longitude
-st SEARCH_TAGS, --search_tags SEARCH_TAGS
tags to search, default: `addr:`
-mtc MIN_TAGS_COUNT, --min_tags_count MIN_TAGS_COUNT
Minimal tags count to filter
```
### First convert downloaded osm(.pbf) files from:
https://download.geofabrik.de/
Then use convert.py to create the database (speedupped by using db in ram & dump in to disk):
```bash
python rgcosm -ci some-place.osm.pbf -co some-place.db
```
The output file can be x7-13 (for maldives file ~12.74 times) times larger then the source file, for example [maldives](https://download.geofabrik.de/asia/maldives-latest.osm.pbf) file size is 2.7 mb, and after conversion size increased to 34.4 mb (time: ~14 sec.) with added indexes and 20.1 mb without (time: ~13 sec.).
You can disable adding indexes by `-ai=no` or `--add_indexes=no` arg.
Adding indexes speedups searching time up to 70 times.
### Usage
```python
from rgcosm import get_address
db_path = 'maldives-latest.db'
coordinates = (6.5506617, 72.9530232)
addr = get_address(db_path, coordinates)
print(addr)
```
result:
```bash
[{'id': 9508099415, 'lat': 6.5506617, 'lon': 72.9530232, 'tags': {'addr:block_number': '26', 'generator:method': 'combustion', 'generator:output:electricity': '200 kV', 'generator:source': 'diesel', 'name': 'Vaikaradhoo Fenaka Power Plant 3', 'operator': 'Fenaka Corporation Limited Vaikaradhoo', 'power': 'generator'}}]
```
or with multiple coordinates:
```python
from rgcosm import get_address
db_path = 'maldives-latest.db'
coordinates = [(6.5506617, 72.9530232), (4.172474, 73.5083067), (4.1718557, 73.5154427)]
addr = get_address(db_path, coordinates)
print(addr)
```
result:
```bash
: 'generator'}}, {'id': 2521220337, 'lat': 4.172474, 'lon': 73.5083067, 'tags': {'addr:city': "Male'", 'addr:housename': 'Ma.Seventy Flower', 'addr:street': 'Iskandharu Magu', 'amenity': 'cafe', 'cuisine': 'coffee_shop', 'internet_access': 'yes', 'name': "Chili's Café"}}, {'id': 7987147424, 'lat': 4.1718557, 'lon': 73.5154427, 'tags': {'addr:city': "Male'", 'addr:housenumber': 'H.Hostside', 'addr:postcode': '20053', 'addr:street': 'Irudheymaa Hingun', 'clothes': 'women;wedding;men;suits;fashion;children', 'contact:facebook': 'https://m.facebook.com/Aiccet/', 'currency:EUR': 'yes', 'currency:GBP': 'yes', 'currency:USD': 'yes', 'name': 'Aiccet', 'opening_hours': '24/7', 'operator': 'Aiccet', 'payment:american_express': 'yes', 'payment:cash': 'yes', 'payment:credit_cards': 'yes', 'payment:mastercard': 'yes', 'payment:visa': 'yes', 'payment:visa_debit': 'yes', 'phone': '+960 7997323', 'shop': 'clothes'}}]
```
Advanced (for keep connection to db):
```python
from rgcosm import RGeocoder
db_path = 'maldives-latest.db'
geo = RGeocoder(db_path)
coordinates = [(4.1758869, 73.5094013), (-0.6699146, 73.1228688), (5.159217, 73.1312907)]
addrs = geo.locate(coordinates, 'addr:', 1)
print(addrs)
```
result:
```
[{'id': 10300135473, 'lat': 4.1758869, 'lon': 73.5094013, 'tags': {'addr:city': "Male'", 'email': 'silverlinehotelsupplier@gmail.com', 'name': 'Silverline Hotel Supplies', 'office': 'company', 'phone': '732-9577', 'website': 'http://www.silverlineenterprise.com/'}}, {'id': 9446166886, 'lat': -0.6699146, 'lon': 73.1228688, 'tags': {'addr:city': 'Addu City', 'addr:housenumber': 'Mushkuraanaage', 'addr:postcode': '19030', 'addr:street': 'Dhandivara Maga'}}, {'id': 8439302155, 'lat': 5.159217, 'lon': 73.1312907, 'tags': {'addr:city': 'Dharavandhoo', 'addr:postcode': '06060', 'amenity': 'courthouse', 'name': 'Dharavandhoo Magistrate Court', 'opening_hours': 'Sa-Th 08:00-14:00', 'operator': 'Government of Maldives'}}]
```
### In plans:
- [ ] db serializing with lz4 compression & etc.
- [ ] Add more formats for addresses
- [ ] Add caching results
- [ ] More speedup conversion & less memory usage
- [ ] Add some features from other similar libs
- [ ] More documentation
| /rgcosm-0.0.5.tar.gz/rgcosm-0.0.5/README.md | 0.631594 | 0.796253 | README.md | pypi |
[](https://github.com/ResonantGeoData/ResonantGeoData/)
# rgd_client - Resonant GeoDataClient
The **rgd_client** Python package is a well typed, easy to use, and extendable Python client for Resonant GeoData APIs.
# Installation
To install the core client
```
pip install rgd-client
```
To use other core modules or plugins, install the corresponding client packages. For example, the imagery client plugin is installed with
```
pip install rgd-imagery-client
```
All the functions added via a plugin are namespaced under a name defined by that plugin. For the imagery client plugin, this is `imagery`, so all of these plugin's features are accessed through `client.imagery.*`. Examples of this are shown below.
# Usage
### Search and display results
```python
import json
import matplotlib.pyplot as plt
import numpy as np
from rgd_client import create_rgd_client
def plot_geojson(gjs, *args, **kwargs):
points = np.array(gjs['coordinates'])
if points.ndim == 3:
points = points[0]
if points.ndim == 1:
points = points.reshape((1, points.size, ))
return plt.plot(points[:,0], points[:,1], *args, **kwargs)
client = create_rgd_client(username='username', password='password')
bbox = {
"type":"Polygon",
"coordinates":[
[
[-105.45091240368326,39.626245373878696],
[-105.45091240368326,39.929904289147274],
[-104.88775649170178,39.929904289147274],
[-104.88775649170178,39.626245373878696],
[-105.45091240368326,39.626245373878696]
]
]
}
q = client.rgd.search(query=json.dumps(bbox), predicate='intersects')
for s in q:
print(s['subentry_name'])
plot_geojson(bbox, 'k--', label='Search Region')
for s in q:
plot_geojson(s['footprint'], label=s['subentry_name'])
plt.legend()
plt.title(f'Count: {len(q)}')
```
### Inspect raster
Preview thumbnails of the raster
```python
import imageio
from io import BytesIO
raster = client.imagery.get_raster(q[0])
plot_geojson(bbox, 'k--')
plot_geojson(raster['outline'], 'r')
load_image = lambda imbytes: imageio.imread(BytesIO(imbytes))
count = len(raster['parent_raster']['image_set']['images'])
for i in range(count):
thumb_bytes = client.imagery.download_raster_thumbnail(q[0], band=i)
thumb = load_image(thumb_bytes)
plt.subplot(1, count, i+1)
plt.imshow(thumb)
plt.tight_layout()
plt.show()
```
### Download Raster
Download the entire image set of the raster
```python
import rasterio
from rasterio.plot import show
paths = client.imagery.download_raster(q[0])
rasters = [rasterio.open(im) for im in paths.images]
for i, src in enumerate(rasters):
plt.subplot(1, len(rasters), i+1)
ax = plt.gca()
show(src, ax=ax)
plt.tight_layout()
plt.show()
```
### STAC Item Support
The Python client has a search endpoint specifically for Raster data that
returns each record in the search results as a STAC Item.
```py
q = client.imagery.search_raster_stac(query=json.dumps(bbox), predicate='intersects')
print(q[0]) # view result as STAC Item
# Download using the search result
paths = client.imagery.download_raster(q[0])
print(paths)
```
We can also upload new data in the STAC Item format. Here we simply pass back
the same STAC Item JSON which will not actually do anything because RGD
recognizes that these files are already present with a Raster.
```py
client.imagery.create_raster_stac(q[0])
```
Please note that the assets in the STAC Item must already be uploaded to a
cloud storage provider with either `s3://` or `https://` URLs. Further, the
images must have the `data` tag on each asset. e.g.:
```py
{
... # other STAC Item fields
'assets': {
'image-15030': {
'href': 'http://storage.googleapis.com/gcp-public-data-sentinel-2/tiles/17/S/MS/S2A_MSIL1C_20210302T161201_N0209_R140_T17SMS_20210302T200521.SAFE/GRANULE/L1C_T17SMS_A029738_20210302T161751/IMG_DATA/T17SMS_20210302T161201_B01.jp2',
'title': 'GRANULE/L1C_T17SMS_A029738_20210302T161751/IMG_DATA/T17SMS_20210302T161201_B01.jp2',
'eo:bands': [{'name': 'B1'}],
'roles': ['data'],
},
'image-15041': {
'href': 'http://storage.googleapis.com/gcp-public-data-sentinel-2/tiles/17/S/MS/S2A_MSIL1C_20210302T161201_N0209_R140_T17SMS_20210302T200521.SAFE/GRANULE/L1C_T17SMS_A029738_20210302T161751/IMG_DATA/T17SMS_20210302T161201_B02.jp2',
'title': 'GRANULE/L1C_T17SMS_A029738_20210302T161751/IMG_DATA/T17SMS_20210302T161201_B02.jp2',
'eo:bands': [{'name': 'B1'}],
'roles': ['data'],
},
... # ancillary files can lack a role but we like to see `metadata` used.
'ancillary-30687': {
'href': 'http://storage.googleapis.com/gcp-public-data-sentinel-2/tiles/17/S/MS/S2A_MSIL1C_20210302T161201_N0209_R140_T17SMS_20210302T200521.SAFE/GRANULE/L1C_T17SMS_A029738_20210302T161751/QI_DATA/MSK_TECQUA_B03.gml',
'title': 'GRANULE/L1C_T17SMS_A029738_20210302T161751/QI_DATA/MSK_TECQUA_B03.gml',
'roles': ['metadata'],
},
}
}
```
# Plugin Development
For instructions on how to develop a plugin for `rgd_client`, see `PLUGINS.md`.
| /rgd-client-0.3.11.tar.gz/rgd-client-0.3.11/README.md | 0.557725 | 0.856212 | README.md | pypi |
from pathlib import Path
from typing import Dict, List, Optional, Tuple, Union
import validators
from .session import RgdClientSession
from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
from .utils import download_checksum_file_to_path, spatial_search_params
class RgdPlugin:
"""The base plugin that all other plugins must inherit from."""
def __init__(self, session: RgdClientSession):
self.session = session
class CorePlugin(RgdPlugin):
"""The core django-rgd client plugin."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.session.base_url += 'rgd/'
def get_collection(self, pk: int):
"""Get Collection by primary key."""
r = self.session.get(f'collection/{pk}')
r.raise_for_status()
return r.json()
def get_collection_item(self, pk: Union[int, dict], index: int):
"""Get Collection by primary key."""
if isinstance(pk, dict):
pk = pk['id']
r = self.session.get(f'collection/{pk}/item/{index}')
r.raise_for_status()
return r.json()
def get_collection_by_name(self, name: str):
"""Get collection by name."""
payload = {'name': name}
data = self.session.get('collection', params=payload).json()
try:
if isinstance(data, list) and data:
# Test env returns list
return data[0]
elif isinstance(data, dict) and data['results']:
# User env returns dict
return data['results'][0]
except (IndexError, KeyError):
pass
raise ValueError(f'Collection ({name}) cannot be found.')
def search(
self,
query: Optional[Union[Dict, str]] = None,
predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
relates: Optional[str] = None,
distance: Optional[Tuple[float, float]] = None,
acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
instrumentation: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
time_of_day: Optional[DATETIME_OR_STR_TUPLE] = None,
collections: Optional[List[Union[str, int]]] = None,
) -> List[Dict]:
"""
Search for geospatial entries based on various criteria.
For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
Args:
query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
be used to filter data such that predicate(a, b) where b is the queried geometry.
relates: Specify exactly how the queried geometry should relate to the data using a
DE-9IM string code.
distance: The min/max distance around the queried geometry in meters.
acquired: The min/max date and time (ISO 8601) when data was acquired.
instrumentation: The instrumentation used to acquire at least one of these data.
limit: The maximum number of results to return.
offset: The number of results to skip.
collection: The collection ID or name
Returns:
A list of Spatial Entries.
"""
if collections is not None:
for i, collection in enumerate(collections):
if isinstance(collection, str):
collections[i] = self.get_collection_by_name(collection)['id']
params = spatial_search_params(
query=query,
predicate=predicate,
relates=relates,
distance=distance,
acquired=acquired,
instrumentation=instrumentation,
limit=limit,
offset=offset,
time_of_day=time_of_day,
collections=collections,
)
r = self.session.get('search', params=params)
r.raise_for_status()
return r.json()
def create_collection(self, name: str):
"""Get or create collection by name."""
try:
return self.get_collection_by_name(name)
except ValueError:
r = self.session.post('collection', json={'name': name})
r.raise_for_status()
return r.json()
def create_file_from_url(
self,
url: str,
name: Optional[str] = None,
collection: Optional[int] = None,
description: Optional[str] = None,
) -> Dict:
"""
Get or create a ChecksumFile from a URL.
Args:
url: The URL to retrieve the file from
name: The name of the file
collection: The integer collection ID to associate this ChecksumFile with
description: The description of the file
"""
# Verify that url is valid in shape, will raise error on failure
validators.url(url)
if isinstance(collection, str):
collection = self.create_collection(collection)['id']
# Check if url/collection combination already exists, and return it
payload = {'url': url}
if collection is not None:
payload['collection'] = collection
data = self.session.get('checksum_file', params=payload).json()
# TODO: This is absolutely stumping me...
if isinstance(data, list) and data:
# Test env returns list
return data[0]
elif isinstance(data, dict) and data['results']:
# User env returns dict
return data['results'][0]
# Create new checksum file
# Construct payload, leaving out empty arguments
payload['type'] = 2
if name is not None:
payload['name'] = name
if collection is not None:
payload['collection'] = collection
if description is not None:
payload['description'] = description
r = self.session.post('checksum_file', json=payload)
r.raise_for_status()
return r.json()
def file_tree_search(self, path: str = ''):
"""
Search files in a hierarchical format, from a provided folder path.
This endpoint returns all files and folders that are "within" the specified "folder" (the path argument).
An example is
Args:
path: The path to apply to the search. This can be thought of as the folder path that you'd like to search.
Returns:
A dictionary, containing all direct subfolders (`folders`), and files (`files`) under the specified path.
"""
return self.session.get('checksum_file/tree', params={'path_prefix': path}).json()
def download_checksum_file_to_path(
self, id: int, path: Optional[Path] = None, keep_existing=False, use_id=False
):
"""
Download a RGD ChecksumFile to a given path.
Args:
id: The id of the RGD ChecksumFile to download.
path: The root path to download this file to.
keep_existing: If False, replace files existing on disk.
use_id: If True, save this file to disk using it's ID, rather than it's name.
Returns:
The path on disk the file was downloaded to.
"""
r = self.session.get(f'checksum_file/{id}')
r.raise_for_status()
return download_checksum_file_to_path(
r.json(), path, keep_existing=keep_existing, use_id=use_id
) | /rgd-client-0.3.11.tar.gz/rgd-client-0.3.11/rgd_client/plugin.py | 0.915119 | 0.219976 | plugin.py | pypi |
from datetime import datetime
import json
from json.decoder import JSONDecodeError
from pathlib import Path
from typing import Dict, Generator, Iterator, List, Optional, Tuple, Union
from geomet import wkt
import requests
from requests import Response, Session
from .types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
DEFAULT_RGD_API = 'https://www.resonantgeodata.com/api'
API_KEY_DIR_PATH = Path('~/.rgd/').expanduser()
API_KEY_FILE_NAME = 'token'
def pager(session: Session, url: str, **kwargs) -> Iterator[Response]:
"""Exhaust a DRF Paginated list."""
while True:
r = session.get(url, **kwargs)
yield r
if 'next' in r.links:
url = r.links['next']['url']
else:
break
def limit_offset_pager(session: Session, url: str, **kwargs) -> Generator[Dict, None, None]:
"""Exhaust a DRF Paginated list, respecting limit/offset."""
# Default params kwarg
if not kwargs.get('params'):
kwargs['params'] = {}
params: Optional[Dict] = kwargs['params']
total_limit = params.get('limit')
# Default offset
if params.get('offset') is None:
params['offset'] = 0
num_results = 0
while True:
# Update limit
if total_limit:
params['limit'] = total_limit - num_results
# Make request and raise exception if failed
r = session.get(url, **kwargs)
r.raise_for_status()
# Yield results
results = r.json()['results']
yield from results
# Update offset and num_results
params['offset'] += len(results)
num_results += len(results)
# Check if there is no more data, or if we've reached our limit
no_more_data = 'next' not in r.links or not results
limit_reached = total_limit and num_results >= total_limit
if no_more_data or limit_reached:
break
def iterate_response_bytes(
url: str, chunk_size: int = 1024 * 1024, raise_for_status: bool = True
) -> Iterator[bytes]:
"""Return the response body as an iterator of bytes, where each item is `chunk_size` bytes long."""
r = requests.get(url, stream=True)
if raise_for_status:
r.raise_for_status()
return r.iter_content(chunk_size=chunk_size)
def datetime_to_str(value: object):
"""Convert datetime objects to ISO8601 strings."""
if value is not None:
if isinstance(value, datetime):
return value.isoformat()
return value
def datetime_to_time(value: object):
"""Convert datetime objects to HH:MM strings."""
if value is not None:
if isinstance(value, datetime):
return value.strftime('%H:%M')
return value
def order_datetimes(value1: object, value2: object):
"""
Sort 2 objects if they are datetimes.
Example:
>>> from rgd_client.utils import * # NOQA
>>> value1 = datetime.fromisoformat('2000-01-01T00:13:30')
>>> value2 = datetime.fromisoformat('1999-01-01T00:13:30')
>>> result = order_datetimes(value1, value2)
>>> assert len(result) == 2
"""
if isinstance(value1, datetime) and isinstance(value2, datetime):
return (value1, value2) if value1 < value2 else (value2, value1)
return value1, value2
def spatial_subentry_id(search_result):
"""Get the id of a returned SpatialEntry."""
if 'stac_version' in search_result:
return search_result['id']
return search_result['spatial_id']
def spatial_search_params(
query: Optional[Union[Dict, str]] = None,
predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
relates: Optional[str] = None,
distance: Optional[Tuple[float, float]] = None,
acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
instrumentation: Optional[str] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
time_of_day: Optional[DATETIME_OR_STR_TUPLE] = None,
collections: Optional[List[int]] = None,
) -> Dict:
# The dict that will be used to store params.
# Initialize with queries that won't be additionally processed.
params = {
'predicate': predicate,
'relates': relates,
'instrumentation': instrumentation,
'limit': limit,
'offset': offset,
}
if query:
if isinstance(query, str):
try:
query = json.loads(query)
except JSONDecodeError:
pass
if isinstance(query, dict):
# Allow failure on invalid format
query = wkt.dumps(query)
params['q'] = query
# Process range params
if distance and len(distance) == 2:
dmin, dmax = distance
params['distance_min'] = dmin
params['distance_max'] = dmax
# TODO: Determine if the before/after param order needs to be swapped?
if acquired and len(acquired) == 2:
amin, amax = order_datetimes(*acquired)
params['acquired_before'] = datetime_to_str(amax)
params['acquired_after'] = datetime_to_str(amin)
if time_of_day and len(time_of_day) == 2:
after, before = order_datetimes(*time_of_day)
params['time_of_day_after'] = datetime_to_time(after)
params['time_of_day_before'] = datetime_to_time(before)
if collections:
params['collections'] = collections
return params
def download_checksum_file_to_path(
file: Dict,
path: Optional[Path] = None,
keep_existing=False,
use_id=False,
) -> Path:
"""
Download a RGD ChecksumFile to a given path.
Args:
file: A RGD ChecksumFile serialized as a Dict.
path: The root path to download this file to. Defaults to the current working directory.
keep_existing: If False, replace files existing on disk.
use_id: If True, save this file to disk using it's ID, rather than it's name.
Returns:
The path on disk the file was downloaded to.
"""
filepath: str = str(file['id']) if use_id else file['name']
file_download_url: str = file['download_url']
if not path:
path = Path.cwd()
# Parse file path to identifiy nested directories
filepath: str = filepath.lstrip('/')
split_filepath: List[str] = filepath.split('/')
parent_dirname = '/'.join(split_filepath[:-1])
filename = split_filepath[-1]
# Create nested directory if necessary
parent_path = path / parent_dirname if parent_dirname else path
parent_path.mkdir(parents=True, exist_ok=True)
# Download contents to file
file_path = parent_path / filename
if not (file_path.is_file() and keep_existing):
with open(file_path, 'wb') as open_file_path:
for chunk in iterate_response_bytes(file_download_url):
open_file_path.write(chunk)
return file_path | /rgd-client-0.3.11.tar.gz/rgd-client-0.3.11/rgd_client/utils.py | 0.809765 | 0.188903 | utils.py | pypi |
from dataclasses import dataclass
from pathlib import Path
import tempfile
import time
from typing import Dict, Iterable, Iterator, List, Optional, Tuple, Union
from rgd_client.plugin import RgdPlugin
from rgd_client.types import DATETIME_OR_STR_TUPLE, SEARCH_PREDICATE_CHOICE
from rgd_client.utils import (
download_checksum_file_to_path,
limit_offset_pager,
spatial_search_params,
spatial_subentry_id,
)
from tqdm import tqdm
from .types import PROCESSED_IMAGE_TYPES
@dataclass
class RasterDownload:
path: Path
images: List[Path]
ancillary: List[Path]
class ImageryPlugin(RgdPlugin):
"""The django-rgd-imagery client plugin."""
def list_image_tiles(self, image_id: Union[str, int]) -> Dict:
"""List geodata imagery tiles."""
r = self.session.get(f'rgd_imagery/tiles/{image_id}/info/metadata')
return r.json()
def download_image_file(
self, image_id: Union[str, int], chunk_size: int = 1024 * 1024
) -> Iterator[bytes]:
"""
Download the associated ImageFile data for this ImageEntry directly from S3.
Args:
image_id: The ID of the ImageEntry to download.
chunk_size: The size (in bytes) of each item in the returned iterator (defaults to 1MB).
Returns:
An iterator of byte chunks.
"""
r = self.session.get(f'rgd_imagery/{image_id}/data', stream=True)
return r.iter_content(chunk_size=chunk_size)
def download_image_thumbnail(
self,
image_id: Union[str, int],
) -> bytes:
"""
Download the generated thumbnail for this ImageEntry.
Args:
image_id: The ID of the ImageEntry to download.
Returns:
Thumbnail bytes.
"""
r = self.session.get(
f'rgd_imagery/tiles/{image_id}/data/thumbnail.png?max_height=256&max_width=256',
headers={'accept': 'image/png'},
)
return r.content
def download_raster_thumbnail(
self,
raster_meta_id: Union[str, int, dict],
band: int = 0,
) -> bytes:
"""
Download the generated thumbnail for this ImageEntry.
Args:
raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
band: The index of the image in the raster's image set to produce thumbnail from.
Returns:
Thumbnail bytes.
"""
if isinstance(raster_meta_id, dict):
raster_meta_id = spatial_subentry_id(raster_meta_id)
r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
parent_raster = r.json().get('parent_raster', {})
images = parent_raster.get('image_set', {}).get('images', [])
try:
return self.download_image_thumbnail(images[band]['id'])
except IndexError:
raise IndexError(f'Band index ({band}) out of range.')
def get_raster(self, raster_meta_id: Union[str, int, dict], stac: bool = False) -> Dict:
"""Get raster entry detail.
Args:
stac: Optionally return as STAC Item dictionary/JSON.
Returns:
Serialized object representation.
"""
if isinstance(raster_meta_id, dict):
raster_meta_id = spatial_subentry_id(raster_meta_id)
r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}').json()
if stac:
r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}/stac').json()
return r
def download_raster(
self,
raster_meta_id: Union[str, int, dict],
pathname: Optional[str] = None,
nest_with_name: bool = False,
keep_existing: bool = True,
) -> RasterDownload:
"""
Download the image set associated with a raster entry to disk.
Args:
raster_meta_id: The id of the RasterMeta, which is a child to the desired raster entry, or search result.
pathname: The directory to download the image set to. If not supplied, a temporary directory will be used.
nest_with_name: If True, nests the download within an additional directory, using the raster entry name.
keep_existing: If False, replace files existing on disk. Only valid if `pathname` is given.
Returns:
A dictionary of the paths to all files downloaded under the directory.
"""
if isinstance(raster_meta_id, dict):
raster_meta_id = spatial_subentry_id(raster_meta_id)
r = self.session.get(f'rgd_imagery/raster/{raster_meta_id}')
parent_raster = r.json().get('parent_raster', {})
# Create dirs after request to avoid empty dirs if failed
if pathname is None:
pathname = tempfile.mkdtemp()
# Handle optional nesting with raster entry name
path = Path(pathname)
parent_raster_name: Optional[str] = parent_raster.get('name')
if nest_with_name and parent_raster_name:
path = path / parent_raster_name
# Ensure base download directory exists
if not path.exists():
path.mkdir()
# Initialize dataclass
raster_download = RasterDownload(path, [], [])
# Download images
images = parent_raster.get('image_set', {}).get('images', [])
processed_images = parent_raster.get('image_set', {}).get('processed_images', [])
images += processed_images
for image in tqdm(images, desc='Downloading image files'):
file = image.get('file', {})
file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
if file_path:
raster_download.images.append(file_path)
# Download ancillary files
ancillary = parent_raster.get('ancillary_files', [])
for file in tqdm(ancillary, desc='Downloading ancillary files'):
file_path = download_checksum_file_to_path(file, path, keep_existing=keep_existing)
if file_path:
raster_download.ancillary.append(file_path)
return raster_download
def create_raster_stac(self, raster: Dict) -> Dict:
"""Create a raster entry using STAC format."""
r = self.session.post('rgd_imagery/raster/stac', json=raster)
r.raise_for_status()
return r.json()
def search_raster_stac(
self,
query: Optional[Union[Dict, str]] = None,
predicate: Optional[SEARCH_PREDICATE_CHOICE] = None,
relates: Optional[str] = None,
distance: Optional[Tuple[float, float]] = None,
acquired: Optional[DATETIME_OR_STR_TUPLE] = None,
instrumentation: Optional[str] = None,
num_bands: Optional[Tuple[int, int]] = None,
resolution: Optional[Tuple[int, int]] = None,
cloud_cover: Optional[Tuple[float, float]] = None,
limit: Optional[int] = None,
offset: Optional[int] = None,
) -> List[Dict]:
"""
Search for raster entries based on various criteria.
For Ranges (Tuples), an entry of `None` means that side of the range is unbounded.
E.g. a range of (2, None) is 2 or more, (None, 5) is at most 5, (2, 5) is between 2 and 5.
Args:
query: Either a WKT GeoJSON representation, a GeoJSON string, or a GeoJSON dict.
predicate: A named spatial predicate based on the DE-9IM. This spatial predicate will
be used to filter data such that predicate(a, b) where b is the queried geometry.
relates: Specify exactly how the queried geometry should relate to the data using a
DE-9IM string code.
distance: The min/max distance around the queried geometry in meters.
acquired: The min/max date and time (ISO 8601) when data was acquired.
instrumentation: The instrumentation used to acquire at least one of these data.
num_bands: The min/max number of bands in the raster.
resolution: The min/max resolution of the raster.
cloud_cover: The min/max cloud coverage of the raster.
limit: The maximum number of results to return.
offset: The number of results to skip.
Returns:
A list of Spatial Entries in STAC Item format.
"""
params = spatial_search_params(
query=query,
predicate=predicate,
relates=relates,
distance=distance,
acquired=acquired,
instrumentation=instrumentation,
limit=limit,
offset=offset,
)
if num_bands and len(num_bands) == 2:
nbmin, nbmax = num_bands
params['num_bands_min'] = nbmin
params['num_bands_max'] = nbmax
if resolution and len(resolution) == 2:
rmin, rmax = resolution
params['resolution_min'] = rmin
params['resolution_max'] = rmax
if cloud_cover and len(cloud_cover) == 2:
ccmin, ccmax = cloud_cover
params['cloud_cover_min'] = ccmin
params['cloud_cover_max'] = ccmax
return list(limit_offset_pager(self.session, 'rgd_imagery/raster/search', params=params))
def create_image_from_file(self, checksum_file: Dict) -> Dict:
"""
Create an image from a ChecksumFile.
Args:
checksum_file: The checksum file to create an image with.
"""
r = self.session.post('rgd_imagery', json={'file': checksum_file.get('id')})
r.raise_for_status()
return r.json()
def create_image_set(
self,
images: Iterable[Union[dict, int]],
name: Optional[str] = None,
description: Optional[str] = None,
) -> Dict:
"""
Create an image set from an iterable of images.
Args:
images: The images to create the image set from. These can be either dicts or integers (image ids).
name: (optional) The name of the image set.
description: (optional) The description of the image set.
"""
# Ensure all images are represented by their IDs
image_ids = [im['id'] if isinstance(im, dict) else im for im in images]
payload = {'images': image_ids}
if name is not None:
payload['name'] = name
if description is not None:
payload['description'] = description
return self.session.post('rgd_imagery/image_set', json=payload).json()
def get_raster_status(
self,
raster: Union[Dict, int],
):
"""Get raster processing status.
Parameters
----------
raster : dict, int
Accepts the Raster (not RasterMeta) primary key.
"""
if isinstance(raster, dict):
raster = raster['id']
return self.session.get(f'rgd_imagery/raster/{raster}/status').json()
def create_raster_from_image_set(
self,
image_set: Union[Dict, int],
ancillary_files: Optional[Iterable[Union[dict, int]]] = None,
name: Optional[str] = None,
description: Optional[str] = None,
) -> Dict:
# Construct payload, leaving out empty arguments
payload = {'image_set': image_set['id'] if isinstance(image_set, dict) else image_set}
if ancillary_files is not None:
# Ensure all files are represented by their IDs
payload['ancillary_files'] = [
file['id'] if isinstance(file, dict) else file for file in ancillary_files
]
if name is not None:
payload['name'] = name
if description is not None:
payload['description'] = description
raster = self.session.post('rgd_imagery/raster', json=payload).json()
def poll():
response = self.get_raster_status(raster)
if response['status'] in ['created', 'queued', 'running']:
time.sleep(1)
return True
return False
# Poll raster for status
while poll():
pass
# Final refresh to ensure integrity of raster_meta_id
raster = self.get_raster_status(raster)
# Get and return RasterMeta
return self.get_raster(raster['raster_meta_id'])
def create_processed_image_group(
self,
process_type: PROCESSED_IMAGE_TYPES,
parameters: Optional[dict] = None,
):
if parameters is None:
parameters = {}
r = self.session.post(
'image_process/group',
json=dict(
process_type=process_type,
parameters=parameters,
),
)
r.raise_for_status()
return r.json()
def get_processed_image_group_status(self, group_id: Union[str, int, dict]):
if isinstance(group_id, dict):
group_id = group_id['id']
r = self.session.get(f'image_process/group/{group_id}/status')
r.raise_for_status()
return r.json()
def create_processed_image(
self, image_ids: List[Union[str, int]], group_id: Union[str, int, dict]
) -> Dict:
if isinstance(group_id, dict):
group_id = group_id['id']
r = self.session.post(
'image_process',
json=dict(
group=group_id,
source_images=image_ids,
),
)
r.raise_for_status()
return r.json()
def get_leaflet_tile_source(
self,
image_id: Union[str, int],
band: int = None,
palette: str = None,
vmin: Union[float, int] = None,
vmax: Union[float, int] = None,
nodata: Union[float, int] = None,
**kwargs,
):
"""Generate an ipyleaflet TileLayer for the given Image.
Parameters
----------
image_id : Union[str, int]
The image ID to serve tiles from
**kwargs
All additional keyword arguments are passed to TileLayer
Return
------
ipyleaflet.TileLayer
"""
# Safely import ipyleaflet
try:
from ipyleaflet import TileLayer
except ImportError:
raise ImportError('Please install `ipyleaflet` and `jupyter`.')
# Check that the image source is valid and no server errors
r = self.session.get(f'rgd_imagery/tiles/{image_id}/info/metadata')
r.raise_for_status()
params = {}
if band is not None:
params['band'] = band
if palette is not None:
# TODO: check this value as an incorrect one can lead to server errors
# perhaps we should catch this, server side and ignore bad ones
params['palette'] = palette
if vmin is not None:
params['min'] = vmin
if vmax is not None:
params['max'] = vmax
if nodata is not None:
params['nodata'] = nodata
r = self.session.post('signature')
r.raise_for_status()
params.update(r.json())
url = self.session.create_url(
f'rgd_imagery/tiles/{image_id}/tiles/{{z}}/{{x}}/{{y}}.png?projection=EPSG:3857'
)
for k, v in params.items():
url += f'&{k}={v}'
# Set a default attribution let's folks know how awesome RGD is
kwargs.setdefault(
'attribution',
'<a href="https://github.com/ResonantGeoData">Resonant GeoData</a> (Kitware, Inc.)',
)
return TileLayer(url=url, **kwargs) | /rgd-imagery-client-0.3.11.tar.gz/rgd-imagery-client-0.3.11/rgd_imagery_client/plugin.py | 0.919715 | 0.233619 | plugin.py | pypi |
from glob import glob
from math import ceil
from shutil import copyfile
import numpy as np
from joblib import Parallel, delayed, cpu_count
from sklearn.base import ClassifierMixin, RegressorMixin, is_classifier
from sklearn.exceptions import NotFittedError
from rgf import utils
ALGORITHMS = ("RGF", "RGF_Opt", "RGF_Sib")
LOSSES = ("LS", "Expo", "Log", "Abs")
rgf_estimator_docstring_template = \
"""
A Regularized Greedy Forest [1] {%estimator_type%}.
Tuning parameters detailed instruction:
https://github.com/RGF-team/rgf/blob/master/RGF/rgf-guide.rst#432-parameters-to-control-training
Parameters
----------
max_leaf : int, optional (default={%max_leaf_default_value%})
Training will be terminated when the number of
leaf nodes in the forest reaches this value.
(Original name: max_leaf_forest.)
test_interval : int, optional (default=100)
Test interval in terms of the number of leaf nodes.
algorithm : string ("RGF" or "RGF_Opt" or "RGF_Sib"), optional (default="RGF")
Regularization algorithm.
RGF: RGF with L2 regularization on leaf-only models.
RGF Opt: RGF with min-penalty regularization.
RGF Sib: RGF with min-penalty regularization with the sum-to-zero sibling constraints.
loss : string ("LS" or "Expo" or "Log" or "Abs"), optional (default="{%loss_default_value%}")
Loss function.
LS: Square loss.
Expo: Exponential loss.
Log: Logistic loss.
Abs: Absolute error loss.
reg_depth : float, optional (default=1.0)
Must be no smaller than 1.0.
Meant for being used with algorithm="RGF Opt"|"RGF Sib".
A larger value penalizes deeper nodes more severely.
l2 : float, optional (default=0.1)
Used to control the degree of L2 regularization.
(Original name: reg_L2.)
sl2 : float or None, optional (default=None)
Override L2 regularization parameter l2
for the process of growing the forest.
That is, if specified, the weight correction process uses l2
and the forest growing process uses sl2.
If None, no override takes place and
l2 is used throughout training.
(Original name: reg_sL2.)
normalize : boolean, optional (default={%normalize_default_value%})
If True, training targets are normalized
so that the average becomes zero.
(Original name: NormalizeTarget.)
min_samples_leaf : int or float, optional (default=10)
Minimum number of training data points in each leaf node.
If int, then consider min_samples_leaf as the minimum number.
If float, then min_samples_leaf is a percentage and
ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.
(Original name: min_pop.)
n_iter : int or None, optional (default=None)
Number of iterations of coordinate descent to optimize weights.
If None, 10 is used for loss="LS" and 5 for loss="Expo"|"Log".
(Original name: num_iteration_opt.)
n_tree_search : int, optional (default=1)
Number of trees to be searched for the nodes to split.
The most recently grown trees are searched first.
(Original name: num_tree_search.)
opt_interval : int, optional (default=100)
Weight optimization interval in terms of the number of leaf nodes.
For example, by default, weight optimization is performed
every time approximately 100 leaf nodes are newly added to the forest.
learning_rate : float, optional (default=0.5)
Step size of Newton updates used in coordinate descent to optimize weights.
(Original name: opt_stepsize.)
{%calc_prob_parameter%}{%n_jobs_parameter%}
memory_policy : string ("conservative" or "generous"), optional (default="generous")
Memory using policy.
Generous: it runs faster using more memory by keeping the sorted orders
of the features on memory for reuse.
Conservative: it uses less memory at the expense of longer runtime. Try only when
with default value it uses too much memory.
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
init_model : None or string, optional (default=None)
Filename of a previously saved model from which training should do warm-start.
If model has been saved into multiple files,
do not include numerical suffixes in the filename.
Note
----
Make sure you haven't forgotten to increase the value of the max_leaf parameter
regarding to the specified warm-start model
because warm-start model trees are counted in the overall number of trees.
Attributes:
-----------
estimators_ : {%estimators_property_type_desc%}
The collection of fitted sub-estimators when `fit` is performed.
{%classes_property%}{%n_classes_property%}
n_features_ : int
The number of features when `fit` is performed.
fitted_ : boolean
Indicates whether `fit` is performed.
sl2_ : float
The concrete regularization value for the process of growing the forest
used in model building process.
min_samples_leaf_ : int
Minimum number of training data points in each leaf node
used in model building process.
n_iter_ : int
Number of iterations of coordinate descent to optimize weights
used in model building process depending on the specified loss function.
Reference
---------
[1] Rie Johnson and Tong Zhang,
Learning Nonlinear Functions Using Regularized Greedy Forest
(https://arxiv.org/abs/1109.0887).
"""
class RGFEstimatorBase(utils.CommonRGFEstimatorBase):
def _validate_params(self,
max_leaf,
test_interval,
algorithm,
loss,
reg_depth,
l2,
sl2,
normalize,
min_samples_leaf,
n_iter,
n_tree_search,
opt_interval,
learning_rate,
verbose,
memory_policy,
init_model,
calc_prob="sigmoid",
n_jobs=-1):
if not isinstance(max_leaf, utils.INTS):
raise ValueError(
"max_leaf must be an integer, got {0}.".format(type(max_leaf)))
elif max_leaf <= 0:
raise ValueError(
"max_leaf must be greater than 0 but was %r." % max_leaf)
if not isinstance(test_interval, utils.INTS):
raise ValueError(
"test_interval must be an integer, got {0}.".format(
type(test_interval)))
elif test_interval <= 0:
raise ValueError(
"test_interval must be greater than 0 but was %r." % test_interval)
if not isinstance(algorithm, str):
raise ValueError(
"algorithm must be a string, got {0}.".format(type(algorithm)))
elif algorithm not in ALGORITHMS:
raise ValueError(
"algorithm must be 'RGF' or 'RGF_Opt' or 'RGF_Sib' but was %r." % algorithm)
if not isinstance(loss, str):
raise ValueError(
"loss must be a string, got {0}.".format(type(loss)))
elif loss not in LOSSES:
raise ValueError(
"loss must be 'LS' or 'Expo' or 'Log' but was %r." % loss)
if not isinstance(reg_depth, (utils.INTS, utils.FLOATS)):
raise ValueError(
"reg_depth must be an integer or float, got {0}.".format(
type(reg_depth)))
elif reg_depth < 1:
raise ValueError(
"reg_depth must be no smaller than 1.0 but was %r." % reg_depth)
if not isinstance(l2, utils.FLOATS):
raise ValueError("l2 must be a float, got {0}.".format(type(l2)))
elif l2 < 0:
raise ValueError("l2 must be no smaller than 0.0 but was %r." % l2)
if sl2 is not None and not isinstance(sl2, utils.FLOATS):
raise ValueError(
"sl2 must be a float or None, got {0}.".format(type(sl2)))
elif sl2 is not None and sl2 < 0:
raise ValueError(
"sl2 must be no smaller than 0.0 but was %r." % sl2)
if not isinstance(normalize, bool):
raise ValueError(
"normalize must be a boolean, got {0}.".format(type(normalize)))
err_desc = "min_samples_leaf must be at least 1 or in (0, 0.5], got %r." % min_samples_leaf
if isinstance(min_samples_leaf, utils.INTS):
if min_samples_leaf < 1:
raise ValueError(err_desc)
elif isinstance(min_samples_leaf, utils.FLOATS):
if not 0.0 < min_samples_leaf <= 0.5:
raise ValueError(err_desc)
else:
raise ValueError(
"min_samples_leaf must be an integer or float, got {0}.".format(
type(min_samples_leaf)))
if n_iter is not None and not isinstance(n_iter, utils.INTS):
raise ValueError(
"n_iter must be an integer or None, got {0}.".format(
type(n_iter)))
elif n_iter is not None and n_iter < 1:
raise ValueError(
"n_iter must be no smaller than 1 but was %r." % n_iter)
if not isinstance(n_tree_search, utils.INTS):
raise ValueError(
"n_tree_search must be an integer, got {0}.".format(
type(n_tree_search)))
elif n_tree_search < 1:
raise ValueError(
"n_tree_search must be no smaller than 1 but was %r." % n_tree_search)
if not isinstance(opt_interval, utils.INTS):
raise ValueError("opt_interval must be an integer, got {0}.".format(
type(opt_interval)))
elif opt_interval < 1:
raise ValueError(
"opt_interval must be no smaller than 1 but was %r." % opt_interval)
if not isinstance(learning_rate, utils.FLOATS):
raise ValueError("learning_rate must be a float, got {0}.".format(
type(learning_rate)))
elif learning_rate <= 0:
raise ValueError(
"learning_rate must be greater than 0 but was %r." % learning_rate)
if not isinstance(verbose, utils.INTS):
raise ValueError(
"verbose must be an integer, got {0}.".format(type(verbose)))
elif verbose < 0:
raise ValueError(
"verbose must be no smaller than 0 but was %r." % verbose)
if not isinstance(memory_policy, str):
raise ValueError("memory_policy must be a string, got {0}.".format(
type(memory_policy)))
elif memory_policy not in ("conservative", "generous"):
raise ValueError(
"memory_policy must be 'conservative' or 'generous' but was %r." % memory_policy)
if init_model is not None and not isinstance(init_model, str):
raise ValueError(
"init_model must be a string or None, got {0}.".format(
type(init_model)))
if not isinstance(calc_prob, str):
raise ValueError(
"calc_prob must be a string, got {0}.".format(type(calc_prob)))
elif calc_prob not in ("sigmoid", "softmax"):
raise ValueError(
"calc_prob must be 'sigmoid' or 'softmax' but was %r." % calc_prob)
if not isinstance(n_jobs, utils.INTS):
raise ValueError(
"n_jobs must be an integer, got {0}.".format(type(n_jobs)))
@property
def sl2_(self):
"""
The concrete regularization value for the process of growing the forest
used in model building process.
"""
if not hasattr(self, '_sl2'):
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
else:
return self._sl2
@property
def min_samples_leaf_(self):
"""
Minimum number of training data points in each leaf node
used in model building process.
"""
if not hasattr(self, '_min_samples_leaf'):
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
else:
return self._min_samples_leaf
@property
def n_iter_(self):
"""
Number of iterations of coordinate descent to optimize weights
used in model building process depending on the specified loss function.
"""
if not hasattr(self, '_n_iter'):
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
else:
return self._n_iter
def _set_params_with_dependencies(self):
if self.sl2 is None:
self._sl2 = self.l2
else:
self._sl2 = self.sl2
if isinstance(self.min_samples_leaf, utils.FLOATS):
self._min_samples_leaf = ceil(self.min_samples_leaf * self._n_samples)
else:
self._min_samples_leaf = self.min_samples_leaf
if self.n_iter is None:
if self.loss == "LS":
self._n_iter = 10
else:
self._n_iter = 5
else:
self._n_iter = self.n_iter
def _get_params(self):
res = super()._get_params()
res.update(dict(max_leaf=self.max_leaf,
test_interval=self.test_interval,
algorithm=self.algorithm,
loss=self.loss,
reg_depth=self.reg_depth,
l2=self.l2,
sl2=self._sl2,
normalize=self.normalize,
min_samples_leaf=self._min_samples_leaf,
n_iter=self._n_iter,
n_tree_search=self.n_tree_search,
opt_interval=self.opt_interval,
learning_rate=self.learning_rate,
memory_policy=self.memory_policy,
verbose=self.verbose,
init_model=self.init_model,
is_classification=is_classifier(self)))
return res
def _fit_binary_task(self, X, y, sample_weight, params):
if self.n_jobs != 1 and self.verbose:
print('n_jobs = {}, but RGFClassifier uses one CPU because classes_ is 2'.format(self.n_jobs))
self._estimators[0] = RGFExecuter(**params).fit(X, y, sample_weight)
def _fit_regression_task(self, X, y, sample_weight, params):
self._estimators[0] = RGFExecuter(**params).fit(X, y, sample_weight)
def _fit_multiclass_task(self, X, y, sample_weight, params):
if params['init_model'] is not None:
max_digits = len(str(len(self._classes)))
init_model_filenames = ['{}.{}'.format(params['init_model'],
str(i + 1).zfill(max_digits)) for i in range(self._n_classes)]
ovr_list = [None] * self._n_classes
for i, cls_num in enumerate(self._classes):
if params['init_model'] is not None:
params['init_model'] = init_model_filenames[i]
self._classes_map[i] = cls_num
ovr_list[i] = (y == cls_num).astype(int)
self._estimators[i] = RGFExecuter(**params)
n_jobs = self.n_jobs if self.n_jobs > 0 else cpu_count() + self.n_jobs + 1
substantial_n_jobs = max(n_jobs, self.n_classes_)
if substantial_n_jobs < n_jobs and self.verbose:
print('n_jobs = {0}, but RGFClassifier uses {1} CPUs because '
'classes_ is {2}'.format(n_jobs, substantial_n_jobs,
self.n_classes_))
self._estimators = Parallel(n_jobs=self.n_jobs)(delayed(utils.fit_ovr_binary)(self._estimators[i],
X,
ovr_list[i],
sample_weight)
for i in range(self._n_classes))
def dump_model(self):
"""
Dump forest information to console.
Examples:
---------
[ 0], depth=0, gain=0.599606, F11, 392.8
[ 1], depth=1, gain=0.818876, F4, 0.6275
[ 3], depth=2, gain=0.806904, F5, 7.226
[ 4], depth=2, gain=0.832003, F4, 0.686
[ 2], (-0.0146), depth=1, gain=0
Here, [ x] is order of generated, (x) is weight for leaf nodes, last value is a border.
"""
if not hasattr(self, '_fitted') or not self._fitted:
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
for est in self._estimators:
est.dump_model()
def save_model(self, filename):
"""
Save model to {%file_singular_or_plural%} from which training can do warm-start in the future.
{%note%}
Parameters
----------
filename : string
Filename to save model.
Returns
-------
self : object
Returns self.
"""
if not hasattr(self, '_fitted') or not self._fitted:
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
if len(self._estimators) > 1:
max_digits = len(str(len(self._estimators)))
for idx, est in enumerate(self._estimators, 1):
est.save_model('{}.{}'.format(filename, str(idx).zfill(max_digits)))
else:
self._estimators[0].save_model(filename)
return self
@property
def feature_importances_(self):
"""
The feature importances.
The importance of a feature is computed from sum of gain of each node.
"""
if not hasattr(self, '_fitted') or not self._fitted:
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
return np.mean([est.feature_importances_ for est in self._estimators], axis=0)
class RGFRegressor(RegressorMixin, utils.RGFRegressorMixin, RGFEstimatorBase):
def __init__(self,
max_leaf=500,
test_interval=100,
algorithm="RGF",
loss="LS",
reg_depth=1.0,
l2=0.1,
sl2=None,
normalize=True,
min_samples_leaf=10,
n_iter=None,
n_tree_search=1,
opt_interval=100,
learning_rate=0.5,
memory_policy="generous",
verbose=0,
init_model=None):
if not utils.Config().RGF_AVAILABLE:
raise Exception('RGF estimators are unavailable for usage.')
super().__init__()
self.max_leaf = max_leaf
self.test_interval = test_interval
self.algorithm = algorithm
self.loss = loss
self.reg_depth = reg_depth
self.l2 = l2
self.sl2 = sl2
self.normalize = normalize
self.min_samples_leaf = min_samples_leaf
self.n_iter = n_iter
self.n_tree_search = n_tree_search
self.opt_interval = opt_interval
self.learning_rate = learning_rate
self.memory_policy = memory_policy
self.verbose = verbose
self.init_model = init_model
_regressor_init_specific_values = {
'{%estimator_type%}': 'regressor',
'{%max_leaf_default_value%}': '500',
'{%loss_default_value%}': 'LS',
'{%normalize_default_value%}': 'True',
'{%calc_prob_parameter%}': '',
'{%n_jobs_parameter%}': '',
'{%estimators_property_type_desc%}': 'one-element list of underlying regressors',
'{%classes_property%}': '',
'{%n_classes_property%}': ''
}
__doc__ = rgf_estimator_docstring_template
for _template, _value in _regressor_init_specific_values.items():
__doc__ = __doc__.replace(_template, _value)
def save_model(self, filename):
super().save_model(filename)
_regressor_save_model_specific_values = {
'{%file_singular_or_plural%}': 'file',
'{%note%}': ''
}
save_model.__doc__ = RGFEstimatorBase.save_model.__doc__
for _template, _value in _regressor_save_model_specific_values.items():
save_model.__doc__ = save_model.__doc__.replace(_template, _value)
class RGFClassifier(ClassifierMixin, utils.RGFClassifierMixin, RGFEstimatorBase):
def __init__(self,
max_leaf=1000,
test_interval=100,
algorithm="RGF",
loss="Log",
reg_depth=1.0,
l2=0.1,
sl2=None,
normalize=False,
min_samples_leaf=10,
n_iter=None,
n_tree_search=1,
opt_interval=100,
learning_rate=0.5,
calc_prob="sigmoid",
n_jobs=-1,
memory_policy="generous",
verbose=0,
init_model=None):
if not utils.Config().RGF_AVAILABLE:
raise Exception('RGF estimators are unavailable for usage.')
super().__init__()
self.max_leaf = max_leaf
self.test_interval = test_interval
self.algorithm = algorithm
self.loss = loss
self.reg_depth = reg_depth
self.l2 = l2
self.sl2 = sl2
self.normalize = normalize
self.min_samples_leaf = min_samples_leaf
self.n_iter = n_iter
self.n_tree_search = n_tree_search
self.opt_interval = opt_interval
self.learning_rate = learning_rate
self.calc_prob = calc_prob
self.n_jobs = n_jobs
self.memory_policy = memory_policy
self.verbose = verbose
self.init_model = init_model
_classifier_init_specific_values = {
'{%estimator_type%}': 'classifier',
'{%max_leaf_default_value%}': '1000',
'{%loss_default_value%}': 'Log',
'{%normalize_default_value%}': 'False',
'{%calc_prob_parameter%}': """
calc_prob : string ("sigmoid" or "softmax"), optional (default="sigmoid")
Method of probability calculation.
""",
'{%n_jobs_parameter%}': """
n_jobs : integer, optional (default=-1)
The number of jobs to use for the computation.
The substantial number of the jobs dependents on classes_.
If classes_ = 2, the substantial max number of the jobs is one.
If classes_ > 2, the substantial max number of the jobs is the same as classes_.
If n_jobs = 1, no parallel computing code is used at all regardless of classes_.
If n_jobs = -1 and classes_ >= number of CPU, all CPUs are used.
For n_jobs = -2, all CPUs but one are used.
For n_jobs below -1, (n_cpus + 1 + n_jobs) are used.
""",
'{%estimators_property_type_desc%}': 'list of binary classifiers',
'{%classes_property%}': """
classes_ : array of shape = [n_classes]
The classes labels when `fit` is performed.
""",
'{%n_classes_property%}': """
n_classes_ : int
The number of classes when `fit` is performed.
"""
}
__doc__ = rgf_estimator_docstring_template
for _template, _value in _classifier_init_specific_values.items():
__doc__ = __doc__.replace(_template, _value)
def save_model(self, filename):
super().save_model(filename)
_classifier_save_model_specific_values = {
'{%file_singular_or_plural%}': 'file(s)',
'{%note%}': """
Note
----
Due to the fact that multiclass classification problems are handled by the OvR method,
such models are saved into multiple files with numerical suffixes,
like filename.1, filename.2, ..., filename.n_classes.
"""
}
save_model.__doc__ = RGFEstimatorBase.save_model.__doc__
for _template, _value in _classifier_save_model_specific_values.items():
save_model.__doc__ = save_model.__doc__.replace(_template, _value)
class RGFExecuter(utils.CommonRGFExecuterBase):
def _save_sparse_X(self, path, X):
utils.sparse_savetxt(path, X, including_header=True)
def _save_dense_files(self, X, y, sample_weight):
np.savetxt(self._train_x_loc, X, delimiter=' ', fmt="%s")
np.savetxt(self._train_y_loc, y, delimiter=' ', fmt="%s")
if self._use_sample_weight:
np.savetxt(self._train_weight_loc, sample_weight, delimiter=' ', fmt="%s")
def _get_train_command(self):
params = []
if self.verbose > 0:
params.append("Verbose")
if self.verbose > 5:
params.append("Verbose_opt") # Add some info on weight optimization
if self.normalize:
params.append("NormalizeTarget")
params.append("train_x_fn=%s" % self._train_x_loc)
params.append("train_y_fn=%s" % self._train_y_loc)
params.append("algorithm=%s" % self.algorithm)
params.append("loss=%s" % self.loss)
params.append("max_leaf_forest=%s" % self.max_leaf)
params.append("test_interval=%s" % self.test_interval)
params.append("reg_L2=%s" % self.l2)
params.append("reg_sL2=%s" % self.sl2)
params.append("reg_depth=%s" % self.reg_depth)
params.append("min_pop=%s" % self.min_samples_leaf)
params.append("num_iteration_opt=%s" % self.n_iter)
params.append("num_tree_search=%s" % self.n_tree_search)
params.append("opt_interval=%s" % self.opt_interval)
params.append("opt_stepsize=%s" % self.learning_rate)
params.append("memory_policy=%s" % self.memory_policy.title())
params.append("model_fn_prefix=%s" % self._model_file_loc)
if self._use_sample_weight:
params.append("train_w_fn=%s" % self._train_weight_loc)
if self.init_model is not None:
params.append("model_fn_for_warmstart=%s" % self.init_model)
cmd = (self.config.RGF_PATH, "train", ",".join(params))
return cmd
def _find_model_file(self):
model_files = glob(self._model_file_loc + "*")
if not model_files:
raise Exception('Model learning result is not found in {0}. '
'Training is abnormally finished.'.format(self.config.TEMP_PATH))
self._model_file = sorted(model_files, reverse=True)[0]
def _get_test_command(self, is_sparse_test_X):
params = []
params.append("test_x_fn=%s" % self._test_x_loc)
params.append("prediction_fn=%s" % self._pred_loc)
params.append("model_fn=%s" % self._model_file)
cmd = (self.config.RGF_PATH, "predict", ",".join(params))
return cmd
def dump_model(self):
self._check_fitted()
cmd = (self.config.RGF_PATH, "dump_model", "model_fn=%s" % self._model_file)
self._execute_command(cmd, force_verbose=True)
def save_model(self, filename):
self._check_fitted()
copyfile(self._model_file, filename)
@property
def feature_importances_(self):
params = []
params.append("train_x_fn=%s" % self._train_x_loc)
params.append("feature_importances_fn=%s" % self._feature_importances_loc)
params.append("model_fn=%s" % self._model_file)
cmd = (self.config.RGF_PATH, "feature_importances", ",".join(params))
self._execute_command(cmd)
return np.loadtxt(self._feature_importances_loc) | /rgf_python-3.12.0.tar.gz/rgf_python-3.12.0/rgf/rgf_model.py | 0.856002 | 0.517815 | rgf_model.py | pypi |
import os
from math import ceil
import numpy as np
from joblib import cpu_count
from sklearn.base import ClassifierMixin, RegressorMixin, is_classifier
from sklearn.exceptions import NotFittedError
from rgf import utils
ALGORITHMS = ("rgf", "epsilon-greedy")
LOSSES = ("LS", "MODLS", "LOGISTIC")
fastrgf_estimator_docstring_template = \
"""
A Fast Regularized Greedy Forest [1] {%estimator_type%}.
Parameters
----------
n_estimators : int, optional (default=500)
The number of trees in the forest.
(Original name: forest.ntrees.)
max_depth : int, optional (default=6)
Maximum tree depth.
(Original name: dtree.max_level.)
max_leaf : int, optional (default=50)
Maximum number of leaf nodes in best-first search.
(Original name: dtree.max_nodes.)
tree_gain_ratio : float, optional (default=1.0)
New tree is created when leaf-nodes gain < this value * estimated gain
of creating new tree.
(Original name: dtree.new_tree_gain_ratio.)
min_samples_leaf : int or float, optional (default=5)
Minimum number of training data points in each leaf node.
If int, then consider min_samples_leaf as the minimum number.
If float, then min_samples_leaf is a percentage and
ceil(min_samples_leaf * n_samples) are the minimum number of samples for each node.
(Original name: dtree.min_sample.)
{%loss_parameter%}
l1 : float, optional (default=1.0)
Used to control the degree of L1 regularization.
(Original name: dtree.lamL1.)
l2 : float, optional (default=1000.0)
Used to control the degree of L2 regularization.
(Original name: dtree.lamL2.)
opt_algorithm : string ("rgf" or "epsilon-greedy"), optional (default="rgf")
Optimization method for training forest.
(Original name: forest.opt.)
learning_rate : float, optional (default=0.001)
Step size of epsilon-greedy boosting.
Meant for being used with opt_algorithm="epsilon-greedy".
(Original name: forest.stepsize.)
max_bin : int or None, optional (default=None)
Maximum number of discretized values (bins).
If None, 65000 is used for dense data and 200 for sparse data.
(Original name: discretize.(sparse/dense).max_buckets.)
min_child_weight : float, optional (default=5.0)
Minimum number of effective samples for each discretized value (bin).
(Original name: discretize.(sparse/dense).min_bucket_weights.)
data_l2 : float, optional (default=2.0)
Used to control the degree of L2 regularization for discretization.
(Original name: discretize.(sparse/dense).lamL2.)
sparse_max_features : int, optional (default=80000)
Maximum number of selected features.
Meant for being used with sparse data.
(Original name: discretize.sparse.max_features.)
sparse_min_occurences : int, optional (default=5)
Minimum number of occurrences for a feature to be selected.
Meant for being used with sparse data.
(Original name: discretize.sparse.min_occurrences.)
{%calc_prob_parameter%}
n_jobs : int, optional (default=-1)
The number of jobs to run in parallel for both fit and predict.
If -1, all CPUs are used.
If -2, all CPUs but one are used.
If < -1, (n_cpus + 1 + n_jobs) are used.
(Original name: set.nthreads.)
verbose : int, optional (default=0)
Controls the verbosity of the tree building process.
(Original name: set.verbose.)
Attributes:
-----------
estimators_ : {%estimators_property_type_desc%}
The collection of fitted sub-estimators when `fit` is performed.
{%classes_property%}{%n_classes_property%}
n_features_ : int
The number of features when `fit` is performed.
fitted_ : boolean
Indicates whether `fit` is performed.
max_bin_ : int
The concrete maximum number of discretized values (bins)
used in model building process for given data.
min_samples_leaf_ : int
Minimum number of training data points in each leaf node
used in model building process.
Reference
---------
[1] Tong Zhang,
FastRGF: C++ Multi-core Implementation of Regularized Greedy Forest (RGF)
(https://github.com/RGF-team/rgf/tree/master/FastRGF).
"""
class FastRGFEstimatorBase(utils.CommonRGFEstimatorBase):
def _validate_params(self,
n_estimators,
max_depth,
max_leaf,
tree_gain_ratio,
min_samples_leaf,
l1,
l2,
opt_algorithm,
learning_rate,
max_bin,
min_child_weight,
data_l2,
sparse_max_features,
sparse_min_occurences,
n_jobs,
verbose,
loss="LS",
calc_prob="sigmoid"):
if not isinstance(n_estimators, utils.INTS):
raise ValueError("n_estimators must be an integer, got {0}.".format(
type(n_estimators)))
elif n_estimators <= 0:
raise ValueError(
"n_estimators must be greater than 0 but was %r." % n_estimators)
if not isinstance(max_depth, utils.INTS):
raise ValueError("max_depth must be an integer, got {0}.".format(
type(max_depth)))
elif max_depth <= 0:
raise ValueError(
"max_depth must be greater than 0 but was %r." % max_depth)
if not isinstance(max_leaf, utils.INTS):
raise ValueError(
"max_leaf must be an integer, got {0}.".format(type(max_leaf)))
elif max_leaf <= 0:
raise ValueError(
"max_leaf must be greater than 0 but was %r." % max_leaf)
if not isinstance(tree_gain_ratio, utils.FLOATS):
raise ValueError("tree_gain_ratio must be a float, got {0}.".format(
type(tree_gain_ratio)))
elif not 0.0 < tree_gain_ratio <= 1.0:
raise ValueError(
"tree_gain_ratio must be in (0, 1.0] but was %r." % tree_gain_ratio)
err_desc = "min_samples_leaf must be at least 1 or in (0, 0.5], got %r." % min_samples_leaf
if isinstance(min_samples_leaf, utils.INTS):
if min_samples_leaf < 1:
raise ValueError(err_desc)
elif isinstance(min_samples_leaf, utils.FLOATS):
if not 0.0 < min_samples_leaf <= 0.5:
raise ValueError(err_desc)
else:
raise ValueError(
"min_samples_leaf must be an integer or float, got {0}.".format(
type(min_samples_leaf)))
if not isinstance(l1, utils.FLOATS):
raise ValueError("l1 must be a float, got {0}.".format(type(l1)))
elif l1 < 0:
raise ValueError("l1 must be no smaller than 0.0 but was %r." % l1)
if not isinstance(l2, utils.FLOATS):
raise ValueError("l2 must be a float, got {0}.".format(type(l2)))
elif l2 < 0:
raise ValueError("l2 must be no smaller than 0.0 but was %r." % l2)
if not isinstance(opt_algorithm, str):
raise ValueError("opt_algorithm must be a string, got {0}.".format(
type(opt_algorithm)))
elif opt_algorithm not in ALGORITHMS:
raise ValueError(
"opt_algorithm must be 'rgf' or 'epsilon-greedy' but was %r." % opt_algorithm)
if not isinstance(learning_rate, utils.FLOATS):
raise ValueError("learning_rate must be a float, got {0}.".format(
type(learning_rate)))
elif learning_rate <= 0:
raise ValueError(
"learning_rate must be greater than 0.0 but was %r." % learning_rate)
if max_bin is not None and not isinstance(max_bin, utils.INTS):
raise ValueError(
"max_bin must be an integer or None, got {0}.".format(
type(max_bin)))
elif max_bin is not None and max_bin < 1:
raise ValueError(
"max_bin must be no smaller than 1 but was %r." % max_bin)
if not isinstance(min_child_weight, utils.FLOATS):
raise ValueError(
"min_child_weight must be a float, got {0}.".format(
type(min_child_weight)))
elif min_child_weight < 0:
raise ValueError(
"min_child_weight must be no smaller than 0.0 but was %r." % min_child_weight)
if not isinstance(data_l2, utils.FLOATS):
raise ValueError(
"data_l2 must be a float, got {0}.".format(type(data_l2)))
elif data_l2 < 0:
raise ValueError(
"data_l2 must be no smaller than 0.0 but was %r." % data_l2)
if not isinstance(sparse_max_features, utils.INTS):
raise ValueError(
"sparse_max_features must be an integer, got {0}.".format(
type(sparse_max_features)))
elif sparse_max_features <= 0:
raise ValueError(
"sparse_max_features must be greater than 0 but was %r." % sparse_max_features)
if not isinstance(sparse_min_occurences, utils.INTS):
raise ValueError(
"sparse_min_occurences must be an integer, got {0}.".format(
type(sparse_min_occurences)))
elif sparse_min_occurences < 0:
raise ValueError(
"sparse_min_occurences be no smaller than 0 but was %r." % sparse_min_occurences)
if not isinstance(n_jobs, utils.INTS):
raise ValueError(
"n_jobs must be an integer, got {0}.".format(type(n_jobs)))
if not isinstance(verbose, utils.INTS):
raise ValueError(
"verbose must be an integer, got {0}.".format(type(verbose)))
elif verbose < 0:
raise ValueError(
"verbose must be no smaller than 0 but was %r." % verbose)
if not isinstance(loss, str):
raise ValueError(
"loss must be a string, got {0}.".format(type(loss)))
elif loss not in LOSSES:
raise ValueError(
"loss must be 'LS' or 'MODLS' or 'LOGISTIC' but was %r." % loss)
if not isinstance(calc_prob, str):
raise ValueError(
"calc_prob must be a string, got {0}.".format(type(calc_prob)))
elif calc_prob not in ("sigmoid", "softmax"):
raise ValueError(
"calc_prob must be 'sigmoid' or 'softmax' but was %r." % calc_prob)
@property
def max_bin_(self):
"""
The concrete maximum number of discretized values (bins)
used in model building process for given data.
"""
if not hasattr(self, '_max_bin'):
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
else:
return self._max_bin
@property
def min_samples_leaf_(self):
"""
Minimum number of training data points in each leaf node
used in model building process.
"""
if not hasattr(self, '_min_samples_leaf'):
raise NotFittedError(utils.NOT_FITTED_ERROR_DESC)
else:
return self._min_samples_leaf
def _set_params_with_dependencies(self):
if self.max_bin is None:
if self._is_sparse_train_X:
self._max_bin = 200
else:
self._max_bin = 65000
else:
self._max_bin = self.max_bin
if isinstance(self.min_samples_leaf, utils.FLOATS):
self._min_samples_leaf = ceil(self.min_samples_leaf * self._n_samples)
else:
self._min_samples_leaf = self.min_samples_leaf
if self.n_jobs == -1:
self._n_jobs = 0
elif self.n_jobs < 0:
self._n_jobs = cpu_count() + self.n_jobs + 1
else:
self._n_jobs = self.n_jobs
self._set_target_and_loss()
def _get_params(self):
res = super()._get_params()
res.update(dict(max_depth=self.max_depth,
max_leaf=self.max_leaf,
tree_gain_ratio=self.tree_gain_ratio,
min_samples_leaf=self._min_samples_leaf,
loss=self._loss,
l1=self.l1,
l2=self.l2,
opt_algorithm=self.opt_algorithm,
n_estimators=self.n_estimators,
learning_rate=self.learning_rate,
max_bin=self._max_bin,
data_l2=self.data_l2,
min_child_weight=self.min_child_weight,
sparse_max_features=self.sparse_max_features,
sparse_min_occurences=self.sparse_min_occurences,
n_jobs=self._n_jobs,
verbose=self.verbose,
is_classification=is_classifier(self),
target=self._target))
return res
def _fit_binary_task(self, X, y, sample_weight, params):
self._estimators[0] = FastRGFExecuter(**params).fit(X, y, sample_weight)
def _fit_regression_task(self, X, y, sample_weight, params):
self._estimators[0] = FastRGFExecuter(**params).fit(X, y, sample_weight)
def _fit_multiclass_task(self, X, y, sample_weight, params):
for i, cls_num in enumerate(self._classes):
self._classes_map[i] = cls_num
self._estimators[i] = \
FastRGFExecuter(**params).fit(X, (y == cls_num).astype(int),
sample_weight)
class FastRGFRegressor(RegressorMixin, utils.RGFRegressorMixin, FastRGFEstimatorBase):
def __init__(self,
n_estimators=500,
max_depth=6,
max_leaf=50,
tree_gain_ratio=1.0,
min_samples_leaf=5,
l1=1.0,
l2=1000.0,
opt_algorithm="rgf",
learning_rate=0.001,
max_bin=None,
min_child_weight=5.0,
data_l2=2.0,
sparse_max_features=80000,
sparse_min_occurences=5,
n_jobs=-1,
verbose=0):
if not utils.Config().FASTRGF_AVAILABLE:
raise Exception('FastRGF estimators are unavailable for usage.')
super().__init__()
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_leaf = max_leaf
self.tree_gain_ratio = tree_gain_ratio
self.min_samples_leaf = min_samples_leaf
self.l1 = l1
self.l2 = l2
self.opt_algorithm = opt_algorithm
self.learning_rate = learning_rate
self.max_bin = max_bin
self.min_child_weight = min_child_weight
self.data_l2 = data_l2
self.sparse_max_features = sparse_max_features
self.sparse_min_occurences = sparse_min_occurences
self.n_jobs = n_jobs
self.verbose = verbose
def _set_target_and_loss(self):
self._target = "REAL"
self._loss = "LS" # Regressor can use only LS loss.
_regressor_specific_values = {
'{%estimator_type%}': 'regressor',
'{%loss_parameter%}': '',
'{%calc_prob_parameter%}': '',
'{%estimators_property_type_desc%}': 'one-element list of underlying regressors',
'{%classes_property%}': '',
'{%n_classes_property%}': ''
}
__doc__ = fastrgf_estimator_docstring_template
for _template, _value in _regressor_specific_values.items():
__doc__ = __doc__.replace(_template, _value)
class FastRGFClassifier(ClassifierMixin, utils.RGFClassifierMixin, FastRGFEstimatorBase):
def __init__(self,
n_estimators=500,
max_depth=6,
max_leaf=50,
tree_gain_ratio=1.0,
min_samples_leaf=5,
loss="LS",
l1=1.0,
l2=1000.0,
opt_algorithm="rgf",
learning_rate=0.001,
max_bin=None,
min_child_weight=5.0,
data_l2=2.0,
sparse_max_features=80000,
sparse_min_occurences=5,
calc_prob="sigmoid",
n_jobs=-1,
verbose=0):
if not utils.Config().FASTRGF_AVAILABLE:
raise Exception('FastRGF estimators are unavailable for usage.')
super().__init__()
self.n_estimators = n_estimators
self.max_depth = max_depth
self.max_leaf = max_leaf
self.tree_gain_ratio = tree_gain_ratio
self.min_samples_leaf = min_samples_leaf
self.loss = loss
self.l1 = l1
self.l2 = l2
self.opt_algorithm = opt_algorithm
self.learning_rate = learning_rate
self.max_bin = max_bin
self.min_child_weight = min_child_weight
self.data_l2 = data_l2
self.sparse_max_features = sparse_max_features
self.sparse_min_occurences = sparse_min_occurences
self.calc_prob = calc_prob
self.n_jobs = n_jobs
self.verbose = verbose
def _set_target_and_loss(self):
self._target = "BINARY"
self._loss = self.loss
_classifier_specific_values = {
'{%estimator_type%}': 'classifier',
'{%loss_parameter%}': """
loss : string ("LS" or "MODLS" or "LOGISTIC"), optional (default="LS")
Loss function.
LS: Least squares loss.
MODLS: Modified least squares loss.
LOGISTIC: Logistic loss.
(Original name: dtree.loss.)
""",
'{%calc_prob_parameter%}': """
calc_prob : string ("sigmoid" or "softmax"), optional (default="sigmoid")
Method of probability calculation.
""",
'{%estimators_property_type_desc%}': 'list of binary classifiers',
'{%classes_property%}': """
classes_ : array of shape = [n_classes]
The classes labels when `fit` is performed.
""",
'{%n_classes_property%}': """
n_classes_ : int
The number of classes when `fit` is performed.
"""
}
__doc__ = fastrgf_estimator_docstring_template
for _template, _value in _classifier_specific_values.items():
__doc__ = __doc__.replace(_template, _value)
class FastRGFExecuter(utils.CommonRGFExecuterBase):
def _save_sparse_X(self, path, X):
utils.sparse_savetxt(path, X, including_header=False)
def _save_dense_files(self, X, y, sample_weight):
self._train_x_loc = self._train_x_loc[:-2]
if self._use_sample_weight:
arrs = (sample_weight, y, X)
else:
arrs = (y, X)
np.savetxt(self._train_x_loc, np.c_[arrs], delimiter=' ', fmt="%s")
def _get_train_command(self):
params = []
params.append("forest.ntrees=%s" % self.n_estimators)
params.append("forest.stepsize=%s" % self.learning_rate)
params.append("forest.opt=%s" % self.opt_algorithm)
params.append("dtree.max_level=%s" % self.max_depth)
params.append("dtree.max_nodes=%s" % self.max_leaf)
params.append("dtree.new_tree_gain_ratio=%s" % self.tree_gain_ratio)
params.append("dtree.min_sample=%s" % self.min_samples_leaf)
params.append("dtree.loss=%s" % self.loss)
params.append("dtree.lamL1=%s" % self.l1)
params.append("dtree.lamL2=%s" % self.l2)
if self._is_sparse_train_X:
params.append("discretize.sparse.max_features=%s" % self.sparse_max_features)
params.append("discretize.sparse.max_buckets=%s" % self.max_bin)
params.append("discretize.sparse.lamL2=%s" % self.data_l2)
params.append("discretize.sparse.min_bucket_weights=%s" % self.min_child_weight)
params.append("discretize.sparse.min_occurrences=%s" % self.sparse_min_occurences)
params.append("trn.x-file_format=x.sparse")
params.append("trn.y-file=%s" % self._train_y_loc)
if self._use_sample_weight:
params.append("trn.w-file=%s" % self._train_weight_loc)
else:
params.append("discretize.dense.max_buckets=%s" % self.max_bin)
params.append("discretize.dense.lamL2=%s" % self.data_l2)
params.append("discretize.dense.min_bucket_weights=%s" % self.min_child_weight)
if self._use_sample_weight:
fmt = "w.y.x"
else:
fmt = "y.x"
params.append("trn.x-file_format=%s" % fmt)
params.append("trn.x-file=%s" % self._train_x_loc)
params.append("trn.target=%s" % self.target)
params.append("set.nthreads=%s" % self.n_jobs)
params.append("set.verbose=%s" % self.verbose)
params.append("model.save=%s" % self._model_file_loc)
cmd = [os.path.join(self.config.FASTRGF_PATH,
self.config.FASTRGF_TRAIN_EXECUTABLE_FILE)]
cmd.extend(params)
return cmd
def _find_model_file(self):
if not os.path.isfile(self._model_file_loc):
raise Exception('Model learning result is not found in {0}. '
'Training is abnormally finished.'.format(self.config.TEMP_PATH))
self._model_file = self._model_file_loc
def _get_test_command(self, is_sparse_test_X):
params = []
params.append("model.load=%s" % self._model_file)
params.append("tst.x-file=%s" % self._test_x_loc)
if is_sparse_test_X:
params.append("tst.x-file_format=x.sparse")
params.append("tst.target=BINARY")
params.append("tst.output-prediction=%s" % self._pred_loc)
params.append("set.nthreads=%s" % self.n_jobs)
params.append("set.verbose=%s" % self.verbose)
cmd = [os.path.join(self.config.FASTRGF_PATH,
self.config.FASTRGF_PREDICT_EXECUTABLE_FILE)]
cmd.extend(params)
return cmd | /rgf_python-3.12.0.tar.gz/rgf_python-3.12.0/rgf/fastrgf_model.py | 0.758332 | 0.567997 | fastrgf_model.py | pypi |
from rgkit.render.settings import settings as render_settings
from rgkit.render.utils import rgb_to_hex, blend_colors, compute_color
class HighlightSprite(object):
def __init__(self, loc, target, render):
self.location = loc
self.target = target
self.renderer = render
self.hlt_square = None
self.target_square = None
def get_bot_color(self, loc):
display_turn = self.renderer.current_turn_int()
display_state = self.renderer._game.get_state(display_turn)
if display_state.is_robot(loc):
robot = display_state.robots[loc]
bot_action = self.renderer._game.get_actions_on_turn(
display_turn)[loc]['name']
robot_color = compute_color(robot.player_id, robot.hp,
bot_action)
return robot_color
return None
def get_mixed_color(self, color, loc):
bot_color = self.get_bot_color(loc)
if bot_color is not None:
color = blend_colors(color, bot_color, 0.7)
return rgb_to_hex(*color)
def clear_target_square(self):
self.renderer.remove_object(self.target_square)
self.target_square = None
def clear(self):
self.renderer.remove_object(self.hlt_square)
self.hlt_square = None
self.clear_target_square()
def animate(self, delta=0):
if render_settings.highlight_cursor_blink:
if not delta < render_settings.highlight_cursor_blink_interval:
self.clear()
return
if self.location is not None:
if self.hlt_square is None:
color = render_settings.highlight_color
color = self.get_mixed_color(color, self.location)
self.hlt_square = self.renderer.draw_grid_object(
self.location, fill=color, layer=3, width=0)
if not self.renderer.show_arrows.get():
if self.target is not None and self.target_square is None:
color = render_settings.target_color
color = self.get_mixed_color(color, self.target)
self.target_square = self.renderer.draw_grid_object(
self.target, fill=color, layer=3, width=0)
else:
self.clear_target_square() | /rgkit_py3-1.1.1.tar.gz/rgkit_py3-1.1.1/rgkit/render/highlightsprite.py | 0.721841 | 0.17434 | highlightsprite.py | pypi |
"""Evaluate a review graph mining algorithm with the amazon dataset.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division
import logging
from logging import getLogger
import sys
import dsargparse
import amazon
LOGGER = getLogger(__name__)
#--------------------------
# Loading algorithms
#--------------------------
ALGORITHMS = {}
"""Dictionary of graph loading functions associated with installed algorithms.
"""
# Load and register RIA.
try:
import ria
except ImportError:
LOGGER.info("rgmining-ria is not installed.")
else:
def ignore_args(func):
"""Returns a wrapped function which ignore given arguments."""
def _(*_args):
"""The function body."""
return func()
return _
ALGORITHMS["ria"] = ria.ria_graph
ALGORITHMS["one"] = ignore_args(ria.one_graph)
ALGORITHMS["onesum"] = ignore_args(ria.one_sum_graph)
ALGORITHMS["mra"] = ignore_args(ria.mra_graph)
# Load and register RSD.
try:
import rsd # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-rsd is not installed.")
else:
ALGORITHMS["rsd"] = rsd.ReviewGraph
# Load and register Fraud Eagle.
try:
import fraud_eagle # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraud-eagle is not installed.")
else:
ALGORITHMS["feagle"] = fraud_eagle.ReviewGraph
# Load and register FRAUDAR.
try:
import fraudar # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraudar is not installed.")
else:
def create_fraudar_graph(nblock=1):
"""Create a review graph defined in Fraud Eagle package.
"""
return fraudar.ReviewGraph(int(nblock))
ALGORITHMS["fraudar"] = create_fraudar_graph
#--------------------------
def run(method, loop, threshold, output, param):
"""Run a given algorithm with the Amazon dataset.
Runs a given algorithm and outputs anomalous scores and summaries after
each iteration finishes. The function will ends if a given number of loops
ends or the update of one iteration becomes smaller than a given threshold.
Some algorithm requires a set of parameters. For example, feagle requires
parameter `epsilon`. Argument `param` specifies those parameters, and
if you want to set 0.1 to the `epsilon`, pass `epsilon=0.1` via the
argument.
Args:
method: name of algorithm.
loop: the number of iteration (default: 20).
threshold: threshold to judge an update is negligible (default: 10^-3).
output: writable object where the output will be written.
param: list of key and value pair which are connected with "=".
"""
kwargs = {key: float(value)
for key, value in [v.split("=") for v in param]}
g = ALGORITHMS[method](**kwargs)
amazon.load(g)
amazon.print_state(g, 0, output)
# Updates
logging.info("Start iterations.")
for i in xrange(loop if not method.startswith("one") else 1):
diff = g.update()
if diff is not None and diff < threshold:
break
# Current summary
logging.info("Iteration %d ends. (diff=%s)", i + 1, diff)
amazon.print_state(g, i + 1, output)
# Print final state.
amazon.print_state(g, "final", output)
def main():
"""The main function.
"""
if not ALGORITHMS:
logging.error("No algorithms are installed.")
sys.exit(1)
parser = dsargparse.ArgumentParser(main=main)
parser.add_argument("method", choices=sorted(ALGORITHMS.keys()))
parser.add_argument(
"--output", default=sys.stdout,
type=dsargparse.FileType("w"), # pylint: disable=no-member
help="file path to store results (Default: stdout).")
parser.add_argument("--loop", type=int, default=20)
parser.add_argument("--threshold", type=float, default=10^-3)
parser.add_argument(
"--param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
run(**vars(parser.parse_args()))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
try:
main()
except KeyboardInterrupt:
pass
except Exception: # pylint: disable=broad-except
logging.exception("Untracked exception occurred.")
finally:
logging.shutdown() | /rgmining-amazon-dataset-0.5.1.tar.gz/rgmining-amazon-dataset-0.5.1/example.py | 0.767559 | 0.439687 | example.py | pypi |
from __future__ import division
import datetime
import json
from os.path import exists, join
import site
import sys
import zipfile
_DATE_FORMAT = "%B %d, %Y"
"""Data format in the dataset.
"""
CATEGORIES = [
"cameras", "laptops", "mobilephone", "tablets", "TVs", "video_surveillance"]
"""Categories this dataset has.
"""
def load(graph, categories=None):
"""Load the Amazon dataset to a given graph object.
The graph object must implement the
:ref:`graph interface <dataset-io:graph-interface>`.
If a list of categories is given, only reviews which belong to one of the
given categories are added to the graph.
Args:
graph: an instance of bipartite graph.
Returns:
The graph instance *graph*.
"""
if categories and isinstance(categories, (list, tuple)):
categories = list(categories)
base = "AmazonReviews.zip"
path = join(".", base)
if not exists(path):
path = join(sys.prefix, "rgmining", "data", base)
if not exists(path):
path = join(sys.prefix, "local", "rgmining", "data", base)
if not exists(path):
path = join(site.getuserbase(), "rgmining", "data", base)
R = {} # Reviewers dict.
with zipfile.ZipFile(path) as archive:
for info in archive.infolist():
if not info.file_size:
continue
if categories:
category = info.filename.split("/")[0]
if category not in categories:
continue
with archive.open(info) as fp:
obj = json.load(fp)
target = obj["ProductInfo"]["ProductID"]
# To prevent adding product without any reviews,
# create a product object before adding at least one review.
product = None
for r in obj["Reviews"]:
name = r["ReviewID"]
try:
score = (float(r["Overall"]) - 1) / 4
except ValueError:
continue
date = r["Date"]
if date:
try:
date = datetime.datetime.strptime(
r["Date"], _DATE_FORMAT).strftime("%Y%m%d")
except ValueError:
pass
if not product:
product = graph.new_product(name=target)
if name not in R:
R[name] = graph.new_reviewer(name=name)
graph.add_review(R[name], product, score, date)
return graph
def print_state(g, i, output=sys.stdout):
"""Print a current state of a given graph.
This method outputs a current of a graph as a set of json objects.
Graph objects must have two properties, `reviewers` and `products`.
Those properties returns a set of reviewers and products respectively.
See the :ref:`graph interface <dataset-io:graph-interface>`
for more information.
In this output format, each line represents a reviewer or product object.
Reviewer objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"reviewer_id": <Reviewer's ID>
"score": <Anomalous score of the reviewer>
}
}
Product objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"product_id": <Product's ID>
"sumarry": <Summary of the reviews for the product>
}
}
Args:
g: Graph instance.
i: Iteration number.
output: A writable object (default: sys.stdout).
"""
for r in g.reviewers:
json.dump({
"iteration": i,
"reviewer": {
"reviewer_id": r.name,
"score": r.anomalous_score
}
}, output)
output.write("\n")
for p in g.products:
json.dump({
"iteration": i,
"product": {
"product_id": p.name,
"summary": float(str(p.summary))
}
}, output)
output.write("\n") | /rgmining-amazon-dataset-0.5.1.tar.gz/rgmining-amazon-dataset-0.5.1/amazon.py | 0.618896 | 0.33734 | amazon.py | pypi |
"""Mixins for algebraic classes.
"""
import copy
import json
class ImmutableAdditiveGroup(object):
# Needs, __add__, __neg__, __eq__,
def __add__(self, _):
raise NotImplementedError("Subclasses must implement __add__.")
def __sub__(self, other):
return self + (-other)
def __neg__(self):
raise NotImplementedError("Subclasses must implement __neg__.")
def __eq__(self, _):
raise NotImplementedError("Subclasses must implement __eq__.")
def __ne__(self, other):
return not self == other
class MultipliableImmutableAdditiveGroup(ImmutableAdditiveGroup):
# Needs, __add__, __rmul__, __eq__,
def __rmul__(self, _):
# value must be a number.
raise NotImplementedError("Subclasses must implement __rmul__")
def __div__(self, value):
# value must be a number.
return (1./value) * self
def __neg__(self):
return -1 * self
class JSONable(object):
def __json__(self):
raise NotImplementedError("Subclasses must implement __json__")
def __str__(self):
return json.dumps(self.__json__())
class ImmutableMathHelper(object):
# add, mul, div, eq
def __add__(self, _):
raise NotImplementedError("Subclasses must implement __add__.")
def __sub__(self, other):
return self.__add__(-other)
def __neg__(self):
return -1 * self
def __eq__(self, _):
raise NotImplementedError("Subclasses must implement __eq__.")
def __ne__(self, other):
return not self.__eq__(other)
class MathHelper(ImmutableMathHelper):
# iadd, imul, idiv, neg, eq, __deepcopy__
def __iadd__(self, _):
raise NotImplementedError("Subclasses must implement __iadd__.")
def __add__(self, other):
res = copy.deepcopy(self)
res += other
return res
def __isub__(self, other):
self.__iadd__(-other)
return self
def __mul__(self, other):
res = copy.deepcopy(self)
res *= other
return res
def __rmul__(self, other):
return self.__mul__(other)
def __div__(self, other):
res = copy.deepcopy(self)
res /= other
return res | /rgmining-common-0.9.1.tar.gz/rgmining-common-0.9.1/common/helper.py | 0.800107 | 0.209652 | helper.py | pypi |
import gc
from functools import wraps
import sys
def print_args(output=sys.stdout):
"""Decorate a function so that print arguments before calling it.
Args:
output: writable to print args. (Default: sys.stdout)
"""
def decorator(func):
"""The decorator function.
"""
@wraps(func)
def _(*args, **kwargs):
"""The decorated function.
"""
output.write(
"Args: {0}, KwArgs: {1}\n".format(str(args), str(kwargs)))
return func(*args, **kwargs)
return _
return decorator
def print_return(output=sys.stdout):
"""Decorate a function so that print result after calling it.
Args:
output: writable to print return values. (Default: sys.stdout)
"""
def decorator(func):
"""The decorator.
"""
@wraps(func)
def _(*args, **kwargs):
"""The decorated function.
"""
res = func(*args, **kwargs)
output.write("Return: {0}\n".format(str(res)))
return res
return _
return decorator
def constant(func):
"""Decorate a function so that the result is a constant value.
Functions wraped by this decorator will be run just one time.
The computational result will be stored and reused for any other input.
To store each result for each input, use :func:`memoized` instead.
"""
@wraps(func)
def _(*args, **kwargs):
"""The decorated function.
"""
if not _.res:
_.res = func(*args, **kwargs)
return _.res
_.res = None
return _
def memoized(func):
"""Decorate a function to memoize results.
Functions wraped by this decorator won't compute twice for each input.
Any results will be stored. This decorator might increase used memory
in order to shorten computational time.
"""
cache = {}
@wraps(func)
def memoized_function(*args):
"""The decorated function.
"""
try:
return cache[args]
except KeyError:
value = func(*args)
try:
cache[args] = value
except MemoryError:
cache.clear()
gc.collect()
return value
return memoized_function | /rgmining-common-0.9.1.tar.gz/rgmining-common-0.9.1/common/decorator.py | 0.510985 | 0.230616 | decorator.py | pypi |
"""Provide helper functions and classes.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
try:
from itertools import imap, ifilter
except ImportError:
imap = map
ifilter = filter
import json
import sys
from functools import wraps
from collections import namedtuple
from dataset_io.constants import PRODUCT_ID
from dataset_io.constants import REVIEWER_ID
Reviewer = namedtuple("Reviewer", (REVIEWER_ID, "score"))
"""Named tuple to access reviewer's attributes easily.
"""
Product = namedtuple("Product", (PRODUCT_ID, "summary"))
"""Named tuple to access product's attribute easily.
"""
def quiet(f):
"""Decorator ignoring ValueError.
Args:
f: A function
Returns:
A decorated function which ignores ValueError and returns None
when such exceptions happen.
"""
@wraps(f)
def _(*args, **kwargs):
"""Decorated function returns None when ValueError occurs.
"""
try:
return f(*args, **kwargs)
except ValueError:
return None
return _
def convert_date(date):
"""Convert data-type data to int.
For example, date `2016-01-02` is converted to integer `20160102`.
Args:
data: data to convert
Returns:
Int-type date data.
"""
return int(str(date).replace("-", ""))
def normalize_rating(v):
"""Normalize five star ratings between 0 to 1.
Args:
v: rating which is between 1 to 5
Returns:
Normalized rating data between 0 to 1
"""
return (v - 1.) / 4.
def print_state(g, i, output=sys.stdout):
"""Print a current state of a given graph.
This method outputs a current of a graph as a set of json objects.
Graph objects must have two properties; `reviewers` and `products`.
Those properties returns a set of reviewers and products respectively.
In this output format, each line represents a reviewer or product object.
Reviewer objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"reviewer_id": <Reviewer's ID>
"score": <Anomalous score of the reviewer>
}
}
Product objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"product_id": <Product's ID>
"sumarry": <Summary of the reviews for the product>
}
}
Args:
g: Graph instance.
i: Iteration number.
output: A writable object (default: sys.stdout).
"""
for r in g.reviewers:
json.dump({
"iteration": i,
"reviewer": {
REVIEWER_ID: r.name,
"score": r.anomalous_score
}
}, output)
output.write("\n")
for p in g.products:
json.dump({
"iteration": i,
"product": {
PRODUCT_ID: p.name,
"summary": float(str(p.summary)) * 4. + 1
}
}, output)
output.write("\n")
def parse_state(fp, reviewer_handler=None, product_handler=None, iteration="final"):
"""Parse a state of a graph from an iterable.
Parse a state outputted from print_state and call callback functions.
The callback for reviewer must receive two arguments;
*iteration* and *review object*.
The review object has two attributes; *reviewer_id* and *score*.
The callback for product must Recife's two arguments;
*iteration* and *product object*.
The product object has two attributes; *product_id* and *summary*.
See print_state for more detail.
If the callback is set None, associated objects are not parsed.
Args:
fp: An iterable object containing state data.
reviewer_handler: A callback for reviewer (default: None).
product_handler: A callback for product (default: None).
iteration: Choose iteration to be parsed (default: 'final').
"""
for item in ifilter(bool, imap(quiet(json.loads), fp)):
if not isinstance(item, dict) or not "iteration" in item:
continue
if str(item["iteration"]) == str(iteration):
if reviewer_handler and "reviewer" in item:
reviewer_handler(iteration, Reviewer(**item["reviewer"]))
elif product_handler and "product" in item:
p = item["product"]
product_handler(iteration, Product(
p["product_id"], normalize_rating(p["summary"]))) | /rgmining-dataset-io-0.9.2.tar.gz/rgmining-dataset-io-0.9.2/dataset_io/helper.py | 0.751101 | 0.548432 | helper.py | pypi |
import csv
from os import path
from numpy import random
_DATA_FILE = "rating.csv"
def _fullpath(filename):
"""Compute the full path of a given filename.
Args:
filename: Filename.
Returns:
The full path of the given filename.
"""
return path.join(path.dirname(__file__), filename)
class UniformSampler(object):
"""Sampling review scores from a uniform distribution.
This sampler is a callable object. To generate random ratings,
.. code-block:: python
sampler = UniformSampler()
for rating in sampler():
# use the rating.
Note that in the above example, sampler never ends and break is required to
stop the generation.
"""
__slots__ = ()
def __call__(self):
"""Create a iterator returns sampled values.
Yields:
A normalized uniform random review score.
"""
while True:
yield random.randint(0, 4) / 4.
class RatingBasedSampler(object):
"""Sampling review scores from a distribution based on actual reviews.
This sampler generate ratings from a rating distribution computed from a real
dataset provided by Amazon.com.
According to the dataset, the distribution is the followings;
.. csv-table::
:header: rating, the number of reviews
1, 167137
2, 122025
3, 189801
4, 422698
5, 1266919
This sampler is a callable object. To generate random ratings,
.. code-block:: python
sampler = UniformSampler()
for rating in sampler():
# use the rating.
Note that in the above example, sampler never ends and break is required to
stop the generation.
"""
__slots__ = ("_dist", "_keys")
def __init__(self):
self._dist = {}
with open(_fullpath(_DATA_FILE)) as fp:
for k, v in csv.reader(fp):
self._dist[int(k)] = float(v)
total = sum(self._dist.values())
for k in self._dist:
self._dist[k] /= total
self._keys = sorted(self._dist.keys())
def __call__(self):
"""Create a iterator returns sampled values.
Yields:
A normalized uniform random review score.
"""
while True:
U = random.random()
for k in self._keys:
U -= self._dist[k]
if U < 0:
yield (k - 1) / 4. | /rgmining-dataset-io-0.9.2.tar.gz/rgmining-dataset-io-0.9.2/dataset_io/sampler.py | 0.865551 | 0.448306 | sampler.py | pypi |
"""Load review data formatted in JSON to a graph object.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import
import json
try:
from itertools import imap, ifilter
except ImportError:
imap = map
ifilter = filter
from dataset_io.helper import convert_date
from dataset_io.helper import normalize_rating
from dataset_io.helper import quiet
from dataset_io.constants import PRODUCT_ID
from dataset_io.constants import MEMBER_ID
def load(g, fp, anomalous=None, normalize=normalize_rating):
"""Load a review dataset to a given graph object.
The graph object must implement the :ref:`graph-interface` i.e.
it must have the following methods:
new_reviewer(name, anomalous)
create and register a new reviewer which has a given `name` and
be initialized by a given `anomalous` score,
new_product(name)
create and register a new product which has a given `name`,
find_reviewer(name)
find and return a reviewer which has given `name`,
find_product(name)
find and return a product which has given `name`,
add_review(self, reviewer, product, review, date)
add a new review from `reviewer` to `product` issued in `date`,
in which the review is a float value.
and must have the following properties:
reviewers (readable)
a set of reviewers,
products (readable)
a set of products.
`fp` is an iterative object which yields a JSON string representing a review.
Each review must have the following elements::
{
"member_id": "A1AF30H2MPOO9",
"product_id": "0001056530",
"rating": 4.0,
"date": "2000-08-21"
}
where `member_id` is a reviewer's id, i.e. name, `product_id` is a product's
id which the reviewer posts a review. Rating is a five-star score for the
product. Date is the date the review issued.
Args:
g: graph object where loaded review data are stored.
fp: readable object containing JSON data of a loading table.
anomalous: default anomalous scores (Default: None).
normalize: normalize function of rating scores; if set Nont, scores are
not normalized.
Returns:
The graph instance, which is as same as *g*.
"""
if not normalize:
normalize = lambda v: v
reviewers = {r.name: r for r in g.reviewers}
products = {p.name: p for p in g.products}
for review in ifilter(bool, imap(quiet(json.loads), fp)):
member_id = review[MEMBER_ID]
product_id = review[PRODUCT_ID]
rating = normalize(review["rating"])
date = convert_date(review["date"])
if member_id in reviewers:
r = reviewers[member_id]
else:
r = g.new_reviewer(name=member_id, anomalous=anomalous)
reviewers[member_id] = r
if product_id in products:
p = products[product_id]
else:
p = g.new_product(name=product_id.strip())
products[product_id] = p
g.add_review(r, p, rating, date)
return g | /rgmining-dataset-io-0.9.2.tar.gz/rgmining-dataset-io-0.9.2/dataset_io/loader.py | 0.882782 | 0.597549 | loader.py | pypi |
from fraud_eagle.labels import ProductLabel, ReviewLabel, UserLabel
# pylint: disable=too-many-branches
def psi(u_label: UserLabel, p_label: ProductLabel, r_label: ReviewLabel, epsilon: float) -> float:
"""Likelihood of a pair of user and product.
The likelihood is dependent on the review of the user gives the product.
The review is one of {+, -}. We defined constant representing "+" and "-",
thus the review is one of the {:data:`PLUS<fraud_eagle.labels.PLUS>`,
:data:`MINUS<fraud_eagle.labels.MINUS>`}.
On the other hand, epsilon is a given parameter.
The likelihood :math:`\\psi_{ij}^{s}`, where :math:`i` and :math:`j` are
indexes of user and produce, respectively, and :math:`s` is a review
i.e. :math:`s \\in {+, -}`, is given as following tables.
If the review is :data:`PLUS<fraud_eagle.labels.PLUS>`,
.. csv-table::
:header: review: +, Product: Good, Product: Bad
User: Honest, 1 - :math:`\\epsilon`, :math:`\\epsilon`
User: Fraud, 2 :math:`\\epsilon`, 1 - 2 :math:`\\epsilon`
If the review is :data:`MINUS<fraud_eagle.labels.MINUS>`,
.. csv-table::
:header: review: -, Product: Good, Product: Bad
User: Honest, :math:`\\epsilon`, 1 - :math:`\\epsilon`
User: Fraud, 1 - 2 :math:`\\epsilon`, 2 :math:`\\epsilon`
Args:
u_label: user label which must be one of the { \
:data:`UserLabel.HONEST<fraud_eagle.labels.UserLabel.HONEST>`, \
:data:`UserLabel.FRAUD<fraud_eagle.labels.UserLabel.FRAUD>`}.
p_label: product label which must be one of the \
{:data:`ProductLabel.GOOD<fraud_eagle.labels.ProductLabel.GOOD>`, \
:data:`ProductLabel.BAD<fraud_eagle.labels.ProductLabel.BAD>`}.
r_label: review label which must be one of the \
{:data:`ReviewLabel.PLUS<fraud_eagle.labels.ReviewLabel.PLUS>`, \
:data:`ReviewLabel.MINUS<fraud_eagle.labels.ReviewLabel.MINUS>`}.
epsilon: a float parameter in :math:`[0,1]`.
Returns:
Float value representing a likelihood of the given values.
"""
if r_label == ReviewLabel.PLUS:
if u_label == UserLabel.HONEST:
if p_label == ProductLabel.GOOD:
return 1 - epsilon
elif p_label == ProductLabel.BAD:
return epsilon
elif u_label == UserLabel.FRAUD:
if p_label == ProductLabel.GOOD:
return 2 * epsilon
elif p_label == ProductLabel.BAD:
return 1 - 2 * epsilon
elif r_label == ReviewLabel.MINUS:
if u_label == UserLabel.HONEST:
if p_label == ProductLabel.GOOD:
return epsilon
elif p_label == ProductLabel.BAD:
return 1 - epsilon
elif u_label == UserLabel.FRAUD:
if p_label == ProductLabel.GOOD:
return 1 - 2 * epsilon
elif p_label == ProductLabel.BAD:
return 2 * epsilon
raise ValueError("arguments are invalid") | /rgmining_fraud_eagle-0.10.3.tar.gz/rgmining_fraud_eagle-0.10.3/fraud_eagle/likelihood.py | 0.917078 | 0.68433 | likelihood.py | pypi |
"""Define prior beliefs of users and products.
"""
from typing import Final
import numpy as np
from fraud_eagle.labels import ProductLabel, UserLabel
_LOG_2: Final = float(np.log(2.0))
"""Precomputed value, the logarithm of 2.0."""
def phi_u(_u_label: UserLabel) -> float:
"""Logarithm of a prior belief of a user.
The definition is
.. math::
\\phi_{i}^{\\cal{U}}: \\cal{L}_{\\cal{U}} \\rightarrow \\mathbb{R}_{\\geq 0},
where :math:`\\cal{U}` is a set of user nodes, :math:`\\cal{L}_{\\cal{U}}`
is a set of user labels, and :math:`\\mathbb{R}_{\\geq 0}` is a set of real
numbers grater or equals to :math:`0`.
The implementation of this mapping is given as
.. math::
\\phi_{i}^{\\cal{U}}(y_{i}) \\leftarrow \\|\\cal{L}_{\\cal{U}}\\|.
On the other hand, :math:`\\cal{L}_{\\cal{U}}` is given as {honest, fraud}.
It means the mapping returns :math:`\\phi_{i} = 2` for any user.
This function returns the logarithm of such :math:`\\phi_{i}`, i.e.
:math:`\\log(\\phi_{i}(u))` for any user :math:`u`.
Args:
_u_label: User label.
Returns:
The logarithm of the prior belief of the label of the given user.
However, it returns :math:`\\log 2` whatever the given user is.
"""
return _LOG_2
def phi_p(_p_label: ProductLabel) -> float:
"""Logarithm of a prior belief of a product.
The definition is
.. math::
\\phi_{j}^{\\cal{P}}: \\cal{L}_{\\cal{P}} \\rightarrow \\mathbb{R}_{\\geq 0},
where :math:`\\cal{P}` is a set of produce nodes, :math:`\\cal{L}_{\\cal{P}}`
is a set of product labels, and :math:`\\mathbb{R}_{\\geq 0}` is a set of real
numbers grater or equals to :math:`0`.
The implementation of this mapping is given as
.. math::
\\phi_{j}^{\\cal{P}}(y_{j}) \\leftarrow \\|\\cal{L}_{\\cal{P}}\\|.
On the other hand, :math:`\\cal{L}_{\\cal{P}}` is given as {good, bad}.
It means the mapping returns :math:`2` despite the given product.
This function returns the logarithm of such :math:`\\phi_{j}`, i.e.
:math:`\\log(\\phi_{j}(p))` for any product :math:`p`.
Args:
_p_label: Product label.
Returns:
The logarithm of the prior belief of the label of the given product.
However, it returns :math:`\\log 2` whatever the given product is.
"""
return _LOG_2 | /rgmining_fraud_eagle-0.10.3.tar.gz/rgmining_fraud_eagle-0.10.3/fraud_eagle/prior.py | 0.947817 | 0.808521 | prior.py | pypi |
"""Provide a bipartite graph class implementing Fraud Eagle algorithm.
"""
from functools import lru_cache
from logging import getLogger
from typing import Any, Final, Optional, cast
import networkx as nx
import numpy as np
from fraud_eagle.labels import ProductLabel, ReviewLabel, UserLabel
from fraud_eagle.likelihood import psi
from fraud_eagle.prior import phi_p, phi_u
_LOGGER: Final = getLogger(__name__)
"""Logging object."""
_LOG_POINT_5: Final = float(np.log(0.5))
"""Precomputed value, the logarithm of 0.5."""
def _logaddexp(x1: float, x2: float) -> float:
"""Wrapper of np.logaddexp to solve a type problem."""
return cast(float, np.logaddexp(x1, x2))
class Node:
"""Define a node of the bipartite graph model.
Each node has a reference to a graph object, and has a name.
Thus, to make a node, both of them are required.
Args:
graph: reference of the parent graph.
name: name of this node.
"""
graph: Final["ReviewGraph"]
"""Reference of the parent graph."""
name: Final[str]
"""Name of this node."""
__slots__ = ("graph", "name")
def __init__(self, graph: "ReviewGraph", name: str) -> None:
self.graph = graph
self.name = name
def __hash__(self) -> int:
"""Returns a hash value of this instance."""
return 13 * hash(type(self)) + 17 * hash(self.name)
def __str__(self) -> str:
"""Returns the name of this node."""
return self.name
class Reviewer(Node):
"""Reviewer node in ReviewGraph.
Each reviewer has an anomalous_score property. In Fraud Eagle, we uses the
belief that this reviewer is a fraud reviewer as the anomalous score.
The belief is defined as
.. math::
b(y_{i}) = \\alpha_{2} \\phi^{\\cal{U}}_{i}(y_{i})
\\prod_{Y_{j} \\in \\cal{N}_{i} \\cap \\cal{Y}_{\\cal{P}}}
m_{j \\rightarrow i}(y_{i}),
where :math:`y_{i}` is a user label and one of the {honest, fraud} and
:math:`\\cal{N}_{i} \\cap \\cal{Y}_{\\cal{P}}` means a set of products
this reviewer reviews. :math:`\\alpha_{2}` is a normalize constant so that
:math:`b(honest) + b(fraud) = 1`.
Thus, we use :math:`b(fraud)` as the anomalous score.
Args:
graph: reference of the parent graph.
name: name of this node.
"""
__slots__ = ()
@property
def anomalous_score(self) -> float:
"""Anomalous score of this reviewer."""
b = {}
for u_label in iter(UserLabel):
b[u_label] = phi_u(u_label) + self.graph.prod_message_from_products(self, None, u_label)
return cast(float, np.exp(b[UserLabel.FRAUD] - _logaddexp(*b.values())))
class Product(Node):
"""Product node in ReviewGraph.
Each product has a summary of its ratings. In Fraud Eagle, we uses the
weighted average of ratings given to the product as the summary.
The weights are anomalous scores of reviewers.
Thus, letting :math:`r_{i}` be the rating given by :math:`i`-th reviewer,
and :math:`a_{i}` be the anomalous score of :math:`i`-th reviewer,
the summary of the product is defined as
.. math::
\\frac{\\sum_{i}a_{i}r_{i}}{\\sum_{i}a_{i}}
Args:
graph: reference of the parent graph.
name: name of this node.
"""
__slots__ = ()
@property
def summary(self) -> float:
"""Summary of ratings given to this product."""
reviewers = self.graph.retrieve_reviewers(self)
ratings = [self.graph.retrieve_review(r, self).rating for r in reviewers]
weights = [1 - r.anomalous_score for r in reviewers]
if sum(weights) == 0:
return float(np.mean(ratings))
else:
return float(np.average(ratings, weights=weights))
class Review:
"""Review represents a edge in the bipartite graph.
Review is an edge in the bipartite graph connecting a user to a product
if the user reviews the product. The review has a score the user gives
to the product. Additionally, in Fraud Eagle, each review has two message
functions, i.e. message from the user to the product, vise versa.
Each message function takes only two values.
For example, the message from the user to the product can take {good, bad}.
To implement those message functions, this review class maintain four values
associated with each function and each input. But also provide message
functions as methods.
Review also has a rating score given a user to a product. We assume this
score is normalized in :math:`[0, 1]`. Fraud Eagle treats this score as a
binary value i.e. + or -. To implement it, we choose a threshold 0.5 to
decide each rating belonging to + group or - group, and evaluation property
returns this label. In other words, for a review *r*,
.. math::
r.evaluation =
\\begin{cases}
PLUS \\quad (r.rating \\geq 0.5) \\\\
MINUS \\quad (otherwise)
\\end{cases}
Args:
rating: the normalized rating of this review.
"""
rating: Final[float]
"""The normalized rating of this review."""
_user_to_product: Final[dict[ProductLabel, float]]
_product_to_user: Final[dict[UserLabel, float]]
__slots__ = ("rating", "_user_to_product", "_product_to_user")
def __init__(self, rating: float) -> None:
self.rating = rating
self._user_to_product = {ProductLabel.GOOD: _LOG_POINT_5, ProductLabel.BAD: _LOG_POINT_5}
self._product_to_user = {UserLabel.HONEST: _LOG_POINT_5, UserLabel.FRAUD: _LOG_POINT_5}
@property
def evaluation(self) -> ReviewLabel:
"""Returns a label of this review.
If the rating is grater or equal to :math:`0.5`,
:data:`ReviewLabel.PLUS<fraud_eagle.labels.ReviewLabel.PLUS>` is returned.
Otherwise, :data:`ReviewLabel.MINUS<fraud_eagle.labels.ReviewLabel.MINUS>` is returned.
"""
if self.rating >= 0.5:
return ReviewLabel.PLUS
else:
return ReviewLabel.MINUS
def user_to_product(self, label: ProductLabel) -> float:
"""Message function from the user to the product associated with this review.
The argument `label` must be one of the {:data:`ProductLabel.GOOD<fraud_eagle.labels.ProductLabel.GOOD>`,
:data:`ProductLabel.BAD<fraud_eagle.labels.ProductLabel.BAD>`}.
This method returns the logarithm of the value of the message function
for a given label.
Args:
label: label of the product.
Returns:
the logarithm of the :math:`m_{u\\rightarrow p}(label)`,
where :math:`u` and :math:`p` is the user and the product, respectively.
"""
return self._user_to_product[label]
def product_to_user(self, label: UserLabel) -> float:
"""Message function from the product to the user associated with this review.
The argument `label` must be one of the
{:data:`UserLabel.HONEST<fraud_eagle.labels.UserLabel.HONEST>`,
:data:`UserLabel.FRAUD<fraud_eagle.labels.UserLabel.FRAUD>`}.
This method returns the logarithm of the value of the message function
for a given label.
Args:
label: label of the user.
Returns:
the logarithm of the :math:`m_{p\\rightarrow u}(label)`,
where :math:`u` and :math:`p` is the user and the product, respectively.
"""
return self._product_to_user[label]
def update_user_to_product(self, label: ProductLabel, value: float) -> None:
"""Update user-to-product message value.
The argument `label` must be one of the {:data:`ProductLabel.GOOD<fraud_eagle.labels.ProductLabel.GOOD>`,
:data:`ProductLabel.BAD<fraud_eagle.labels.ProductLabel.BAD>`}.
Note that this method doesn't normalize any given values.
Args:
label: product label,
value: new message value.
"""
self._user_to_product[label] = value
def update_product_to_user(self, label: UserLabel, value: float) -> None:
"""Update product-to-user message value.
The argument `label` must be one of the
{:data:`UserLabel.HONEST<fraud_eagle.labels.UserLabel.HONEST>`,
:data:`UserLabel.FRAUD<fraud_eagle.labels.UserLabel.FRAUD>`}.
Note that this method doesn't normalize any given values.
Args:
label: user label,
value: new message value.
"""
self._product_to_user[label] = value
class ReviewGraph:
"""A bipartite graph modeling reviewers and products relationships.
Args:
epsilon: a hyper parameter in (0, 0.5).
"""
graph: Final[nx.DiGraph]
"""Graph object of networkx."""
reviewers: Final[list[Reviewer]]
"""A collection of reviewers."""
products: Final[list[Product]]
"""A collection of products."""
epsilon: Final[float]
"""Hyper parameter."""
def __init__(self, epsilon: float) -> None:
if epsilon <= 0.0 or epsilon >= 0.5:
raise ValueError("Hyper parameter epsilon must be in (0, 0.5):", epsilon)
self.graph = nx.DiGraph()
self.reviewers = []
self.products = []
self.epsilon = epsilon
def new_reviewer(self, name: str, *_args: Any, **_kwargs: Any) -> Reviewer:
"""Create a new reviewer and add it to this graph.
Args:
name: name of the new reviewer,
Returns:
a new reviewer.
"""
reviewer = Reviewer(self, name)
self.graph.add_node(reviewer)
self.reviewers.append(reviewer)
return reviewer
def new_product(self, name: str) -> Product:
"""Create a new product and add it to this graph.
Args:
name: name of the new product.
Returns:
a new product.
"""
product = Product(self, name)
self.graph.add_node(product)
self.products.append(product)
return product
def add_review(self, reviewer: Reviewer, product: Product, rating: float, *_args: Any, **_kwargs: Any) -> Review:
"""Add a review from a given reviewer to a product.
Args:
reviewer: reviewer of the review,
product: product of the review,
rating: rating score of the review.
Returns:
a new review.
"""
review = Review(rating)
self.graph.add_edge(reviewer, product, review=review)
return review
@lru_cache
def retrieve_reviewers(self, product: Product) -> list[Reviewer]:
"""Retrieve reviewers review a given product.
Args:
product: Product.
Returns:
a collection of reviewers who review the product.
"""
return list(self.graph.predecessors(product))
@lru_cache
def retrieve_products(self, reviewer: Reviewer) -> list[Product]:
"""Retrieve products a given reviewer reviews.
Args:
reviewer: Reviewer.
Returns:
a collection of products the given reviewer reviews.
"""
return list(self.graph.successors(reviewer))
@lru_cache
def retrieve_review(self, reviewer: Reviewer, product: Product) -> Review:
"""Retrieve a review a given reviewer posts to a given product.
Args:
reviewer: Reviewer,
product: Product,
Returns:
a reviewer associated with the given reviewer and product.
"""
return cast(Review, self.graph[reviewer][product]["review"])
def update(self) -> float:
"""Update reviewers' anomalous scores and products' summaries.
For each user :math:`u`, update messages to every product :math:`p`
the user reviews. The message function :math:`m_{u\\rightarrow p}`
takes one argument i.e. label of the receiver product.
The label is one of {good, bad}.
Therefore, we need to compute updated :math:`m_{u\\rightarrow p}(good)`
and :math:`m_{u\\rightarrow p}(bad)`.
The updated messages are defined as
.. math::
m_{u\\rightarrow p}(y_{j}) \\leftarrow
\\alpha_{1} \\sum_{y_{i} \\in \\cal{L}_{\\cal{U}}}
\\psi_{ij}^{s}(y_{i}, y_{j}) \\phi^{\\cal{U}}_{i}(y_{i})
\\prod_{Y_{k} \\in \\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/p}
m_{k \\rightarrow i}(y_{i}),
where :math:`y_{j} \\in {good, bad}`, and
:math:`\\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/p` means a set of product
the user :math:`u` reviews but except product :math:`p`.
For each product :math:`p`, update message to every user :math:`u`
who reviews the product. The message function :math:`m_{p\\rightarrow u}`
takes one argument i.e. label of the receiver user.
The label is one of {honest, fraud}.
Thus, we need to compute updated :math:`m_{p\\rightarrow u}(honest)`
and :math:`m_{p\\rightarrow u}(fraud)`.
The updated messages are defined as
.. math::
m_{p\\rightarrow u}(y_{i}) \\leftarrow
\\alpha_{3} \\sum_{y_{j} \\in \\cal{L}_{\\cal{P}}}
\\psi_{ij}^{s}(y_{i}, y_{j}) \\phi^{\\cal{P}}_{j}(y_{j})
\\prod_{Y_{k} \\in \\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/u}
m_{k\\rightarrow j}(y_{j}),
where :math:`y_{i} \\in {honest, fraud}`, and
:math:`\\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/u` means a set of users
who review the product :math:`p` but except user :math:`u`,
This method runs one iteration of update for both reviewers, i.e. users
and products. It returns the maximum difference between an old message
value and the associated new message value. You can stop iteration when
the update gap reaches satisfied small value.
Returns:
maximum difference between an old message value and its updated new
value.
"""
diffs: list[float] = []
# Update messages from users to products.
for reviewer in self.reviewers:
for product in self.retrieve_products(reviewer):
message_to_product = {}
for p_label in iter(ProductLabel):
message_to_product[p_label] = self._update_user_to_product(reviewer, product, p_label)
s = _logaddexp(*message_to_product.values())
review = self.retrieve_review(reviewer, product)
for p_label in iter(ProductLabel):
updated = message_to_product[p_label] - s
diffs.append(abs(np.exp(review.user_to_product(p_label)) - np.exp(updated)))
review.update_user_to_product(p_label, updated)
# Update messages from products to users.
for product in self.products:
for reviewer in self.retrieve_reviewers(product):
message_to_user = {}
for u_label in iter(UserLabel):
message_to_user[u_label] = self._update_product_to_user(reviewer, product, u_label)
s = _logaddexp(*message_to_user.values())
review = self.retrieve_review(reviewer, product)
for u_label in iter(UserLabel):
updated = message_to_user[u_label] - s
diffs.append(abs(np.exp(review.product_to_user(u_label)) - np.exp(updated)))
review.update_product_to_user(u_label, updated)
histo, edges = np.histogram(diffs)
_LOGGER.info(
"Differentials:\n"
+ "\n".join(" {0}-{1}: {2}".format(edges[i], edges[i + 1], v) for i, v in enumerate(histo))
)
# Clear memoized values since messages are updated.
self.prod_message_from_all_users.cache_clear()
self.prod_message_from_all_products.cache_clear()
return max(diffs)
def _update_user_to_product(self, reviewer: Reviewer, product: Product, p_label: ProductLabel) -> float:
"""Compute an updated message from a user to a product with a product label.
The updated message is defined as
.. math::
m_{u\\rightarrow p}(y_{j}) \\leftarrow
\\alpha_{1} \\sum_{y_{i} \\in \\cal{L}_{\\cal{U}}}
\\psi_{ij}^{s}(y_{i}, y_{j}) \\phi^{\\cal{U}}_{i}(y_{i})
\\prod_{Y_{k} \\in \\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/p}
m_{k \\rightarrow i}(y_{i}),
where :math:`y_{j} \\in {good, bad}`, and
:math:`\\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/p` means a set of product
the user :math:`u` reviews but except product :math:`p`.
The :math:`\\psi_{ij}^{s}(y_{i}, y_{j})` takes :math:`\\epsilon` as a
hyper parameter, self.epsilon is used for it.
This method returns a logarithm of the updated message.
Args:
reviewer: Reviewer,
product: Product,
p_label: produce label,
Returns:
a logarithm of the updated message from the given reviewer to the
given product with the given product label.
"""
review = self.retrieve_review(reviewer, product)
res: dict[UserLabel, float] = {}
for u_label in iter(UserLabel):
res[u_label] = (
np.log(psi(u_label, p_label, review.evaluation, self.epsilon))
+ phi_u(u_label)
+ self.prod_message_from_products(reviewer, product, u_label)
)
return _logaddexp(*res.values())
def _update_product_to_user(self, reviewer: Reviewer, product: Product, u_label: UserLabel) -> float:
"""Compute an updated message from a product to a user with a user label.
The updated message is defined as
.. math::
m_{p\\rightarrow u}(y_{i}) \\leftarrow
\\alpha_{3} \\sum_{y_{j} \\in \\cal{L}_{\\cal{P}}}
\\psi_{ij}^{s}(y_{i}, y_{j}) \\phi^{\\cal{P}}_{j}(y_{j})
\\prod_{Y_{k} \\in \\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/u}
m_{k\\rightarrow j}(y_{j}),
where :math:`y_{i} \\in {honest, fraud}`, and
:math:`\\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/u` means a set of users
who review the product :math:`p` but except user :math:`u`,
The :math:`\\psi_{ij}^{s}(y_{i}, y_{j})` takes :math:`\\epsilon` as a
hyper parameter, self.epsilon is used for it.
This method returns a logarithm of the updated message.
Args:
reviewer: Reviewer i.e. a user,
product: Product,
u_label: user label,
Returns:
a logarithm of the updated message from the given product to the
given reviewer with the given user label.
"""
review = self.retrieve_review(reviewer, product)
res: dict[ProductLabel, float] = {}
for p_label in iter(ProductLabel):
res[p_label] = (
np.log(psi(u_label, p_label, review.evaluation, self.epsilon))
+ phi_p(p_label)
+ self.prod_message_from_users(reviewer, product, p_label)
)
return _logaddexp(*res.values())
@lru_cache
def prod_message_from_all_users(self, product: Product, p_label: ProductLabel) -> float:
"""Compute a product of messages to a product.
This helper function computes a logarithm of the product of messages such as
.. math::
\\prod_{Y_{k} \\in \\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}}
m_{k\\rightarrow j}(y_{j}),
where :math:`\\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}` means a set
of reviewers who review the given product except the given reviewer,
:math:`y_{j}` is a product label and one of the {GOOD, BAD}.
If reviewer is None, compute a product of all messages sending to the
product.
Args:
product : Product,
p_label: product label
Returns:
a logarithm of the product defined above.
"""
reviewers = set(self.retrieve_reviewers(product))
return cast(float, np.sum([self.retrieve_review(r, product).user_to_product(p_label) for r in reviewers]))
def prod_message_from_users(self, reviewer: Optional[Reviewer], product: Product, p_label: ProductLabel) -> float:
"""Compute a product of messages to a product except from a reviewer.
This helper function computes a logarithm of the product of messages such as
.. math::
\\prod_{Y_{k} \\in \\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/user}
m_{k\\rightarrow j}(y_{j}),
where :math:`\\cal{N}_{j} \\cap \\cal{Y}^{\\cal{U}}/user` means a set
of reviewers who review the given product except the given reviewer,
:math:`y_{j}` is a product label and one of the {GOOD, BAD}.
If reviewer is None, compute a product of all messages sending to the
product.
Args:
reviewer: Reviewer, can be None,
product : Product,
p_label: product label
Returns:
a logarithm of the product defined above.
"""
sum_all = self.prod_message_from_all_users(product, p_label)
sum_reviewer = 0.0
if reviewer is not None:
sum_reviewer = self.retrieve_review(reviewer, product).user_to_product(p_label)
return sum_all - sum_reviewer
@lru_cache
def prod_message_from_all_products(self, reviewer: Reviewer, u_label: UserLabel) -> float:
"""Compute a product of messages sending to a reviewer.
This helper function computes a logarithm of the product of messages such as
.. math::
\\prod_{Y_{k} \\in \\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}}
m_{k \\rightarrow i}(y_{i}),
where :math:`\\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}` means a set
of products the given reviewer reviews,
:math:`y_{i}` is a user label and one of the {HONEST, FRAUD}.
If product is None, compute a product of all messages sending to the
reviewer.
Args:
reviewer: reviewer object,
u_label: user label.
Returns:
a logarithm of the product defined above.
"""
products = set(self.retrieve_products(reviewer))
return cast(float, np.sum([self.retrieve_review(reviewer, p).product_to_user(u_label) for p in products]))
def prod_message_from_products(self, reviewer: Reviewer, product: Optional[Product], u_label: UserLabel) -> float:
"""Compute a product of messages sending to a reviewer except from a product.
This helper function computes a logarithm of the product of messages such as
.. math::
\\prod_{Y_{k} \\in \\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/product}
m_{k \\rightarrow i}(y_{i}),
where :math:`\\cal{N}_{i} \\cap \\cal{Y}^{\\cal{P}}/product` means a set
of products the given reviewer reviews except the given product,
:math:`y_{i}` is a user label and one of the {HONEST, FRAUD}.
If product is None, compute a product of all messages sending to the
reviewer.
Args:
reviewer: reviewer object,
product: product object, can be None,
u_label: user label.
Returns:
a logarithm of the product defined above.
"""
sum_all = self.prod_message_from_all_products(reviewer, u_label)
sum_product = 0.0
if product is not None:
sum_product = self.retrieve_review(reviewer, product).product_to_user(u_label)
return sum_all - sum_product | /rgmining_fraud_eagle-0.10.3.tar.gz/rgmining_fraud_eagle-0.10.3/fraud_eagle/graph.py | 0.972165 | 0.564909 | graph.py | pypi |
"""Provide a review graph which runs Fraudar algorithm.
"""
import tempfile
from bisect import bisect_left
from collections import defaultdict
from typing import Any, Final, Protocol
import numpy as np
from fraudar.export import greedy
from fraudar.export.greedy import logWeightedAveDegree
class Node:
"""Node of the ReviewGraph.
A node has a name and a link to the graph. It also implements
:meth:`__hash__` function so that each node can be stored in
dictionaries.
Args:
graph: graph object this node belongs to.
name: name of this node.
"""
graph: Final["ReviewGraph"]
"""The graph object this node belongs to."""
name: Final[str]
"""Name of this node."""
__slots__ = ("graph", "name")
def __init__(self, graph: "ReviewGraph", name: str) -> None:
"""Construct a node instance.
Args:
graph: graph object this node belongs to.
name: name of this node.
"""
self.graph = graph
self.name = name
def __hash__(self) -> int:
"""Returns a hash value of this instance."""
return 13 * hash(type(self)) + 17 * hash(self.name)
def __lt__(self, other: "Node") -> bool:
return self.name.__lt__(other.name)
class Reviewer(Node):
"""A node type representing a reviewer.
Use :meth:`ReviewGraph.new_reviewer` to create a new reviewer object
instead of using this constructor directory.
Args:
graph: graph object this reviewer belongs to.
name: name of this reviewer.
"""
anomalous_score: float
"""anomalous score of this reviewer."""
__slots__ = ("anomalous_score",)
def __init__(self, graph: "ReviewGraph", name: str, anomalous_score: float = 0) -> None:
super().__init__(graph, name)
self.anomalous_score = anomalous_score
class Product(Node):
"""A node type representing a product.
Use :meth:`ReviewGraph.new_product` to create a new product object
instead of using this constructor directory.
Args:
graph: graph object this product belongs to.
name: name of this product.
"""
__slots__ = ()
@property
def summary(self) -> float:
"""Summary of ratings given to this product."""
reviewers = self.graph.reviews[self].keys()
ratings = [self.graph.reviews[self][r] for r in reviewers]
weights = [1 - r.anomalous_score for r in reviewers]
if sum(weights) == 0:
return float(np.mean(ratings))
else:
return float(np.average(ratings, weights=weights))
class _Writable(Protocol):
def write(self, s: str, /) -> int:
...
class ReviewGraph:
"""ReviewGraph is a simple bipartite graph representing review relation.
Args:
blocks: how many blocks to be detected. (default: 1)
algo: algorithm used in fraudar, chosen from
:meth:`aveDegree <fraudar.export.greedy.aveDegree>`,
:meth:`sqrtWeightedAveDegree <fraudar.export.greedy.sqrtWeightedAveDegree>`,
and
:meth:`logWeightedAveDegree <fraudar.export.greedy.logWeightedAveDegree>`.
(default: logWeightedAveDegree)
"""
reviewers: Final[list[Reviewer]]
"""Collection of reviewers."""
products: Final[list[Product]]
"""Collection of products."""
reviews: Final[defaultdict[Product, dict[Reviewer, float]]]
"""Collection of reviews.
reviews is a dictionary of which key is a product and value is another
dictionary of which key is a reviewer and value is a rating from the
reviewer to the product.
"""
_algo: Final[Any]
_blocks: Final[int]
def __init__(self, blocks: int = 1, algo: Any = logWeightedAveDegree) -> None:
self.reviewers = []
self.products = []
self.reviews = defaultdict(dict)
self._algo = algo
self._blocks = blocks
def new_reviewer(self, name: str, **_kwargs: Any) -> Reviewer:
"""Create a new reviewer.
Args:
name: name of the new reviewer.
Returns:
a new reviewer object.
"""
r = Reviewer(self, name)
self.reviewers.append(r)
return r
def new_product(self, name: str) -> Product:
"""Create a new product.
Args:
name: name of the new product.
Returns:
a new product object.
"""
p = Product(self, name)
self.products.append(p)
return p
def add_review(self, reviewer: Reviewer, product: Product, rating: float, **_kwargs: Any) -> float:
"""Add a review from a reviewer to a product.
Args:
reviewer: reviewer who posts the review.
product: product which receives the review.
rating: the review score.
Returns:
added review score.
"""
self.reviews[product][reviewer] = rating
return rating
def update(self) -> float:
"""Update anomalous scores by running a greedy algorithm.
Returns:
0
"""
with tempfile.NamedTemporaryFile(mode="w") as fp:
# Store this graph to a temporal file.
self._store_matrix(fp)
fp.seek(0)
# Run greedy algorithm.
M = greedy.readData(fp.name)
res = greedy.detectMultiple(M, self._algo, self._blocks)
# Update anomalous scores.
for block in res:
for i in block[0][0]:
self.reviewers[i].anomalous_score = 1
return 0
def _store_matrix(self, fp: _Writable) -> None:
"""Store this graph as a sparse matrix format.
Args:
fp: file-like object where the matrix to be written.
"""
self.reviewers.sort()
self.products.sort()
for p in self.reviews:
j = bisect_left(self.products, p)
for r in self.reviews[p]:
i = bisect_left(self.reviewers, r)
fp.write("{0} {1}\n".format(i, j)) | /rgmining_fraudar-0.7.3.tar.gz/rgmining_fraudar-0.7.3/fraudar/graph.py | 0.960473 | 0.610541 | graph.py | pypi |
import random
import numpy as np
from scipy import sparse
from .MinTree import MinTree
np.set_printoptions(linewidth=160)
# given 2 lists corresponding to the edge source and destination,
# this returns the sparse matrix representation of the data
# @profile
def listToSparseMatrix(edgesSource, edgesDest):
m = max(edgesSource) + 1
n = max(edgesDest) + 1
M = sparse.coo_matrix(([1] * len(edgesSource), (edgesSource, edgesDest)), shape=(m, n))
M1 = M > 0
return M1.astype("int")
# reads matrix from file and returns sparse matrix. first 2 columns should be row and column indices of ones.
# @profile
def readData(filename):
# dat = np.genfromtxt(filename, delimiter='\t', dtype=int)
edgesSource = []
edgesDest = []
with open(filename) as f:
for line in f:
toks = line.split()
edgesSource.append(int(toks[0]))
edgesDest.append(int(toks[1]))
return listToSparseMatrix(edgesSource, edgesDest)
def detectMultiple(M, detectFunc, numToDetect):
Mcur = M.copy().tolil()
res = []
for i in range(numToDetect):
((rowSet, colSet), score) = detectFunc(Mcur)
res.append(((rowSet, colSet), score))
(rs, cs) = Mcur.nonzero()
for i in range(len(rs)):
if rs[i] in rowSet and cs[i] in colSet:
Mcur[rs[i], cs[i]] = 0
return res
# inject a clique of size m0 by n0, with density pp. the last parameter testIdx determines the camouflage type.
# testIdx = 1: random camouflage, with camouflage density set so each fraudster outputs approximately equal number
# of fraudulent and camouflage edges
# testIdx = 2: random camouflage, with double the density as in the previous setting
# testIdx = 3: biased camouflage, more likely to add camouflage to high degree columns
def injectCliqueCamo(M, m0, n0, p, testIdx):
(m, n) = M.shape
M2 = M.copy().tolil()
colSum = np.squeeze(M2.sum(axis=0).A)
colSumPart = colSum[n0:n]
colSumPartPro = np.int_(colSumPart)
colIdx = np.arange(n0, n, 1)
population = np.repeat(colIdx, colSumPartPro, axis=0)
for i in range(m0):
# inject clique
for j in range(n0):
if random.random() < p:
M2[i, j] = 1
# inject camo
if testIdx == 1:
thres = p * n0 / (n - n0)
for j in range(n0, n):
if random.random() < thres:
M2[i, j] = 1
if testIdx == 2:
thres = 2 * p * n0 / (n - n0)
for j in range(n0, n):
if random.random() < thres:
M2[i, j] = 1
# biased camo
if testIdx == 3:
colRplmt = random.sample(population, int(n0 * p))
M2[i, colRplmt] = 1
return M2.tocsc()
# sum of weighted edges in rowSet and colSet, plus node suspiciousness values, in matrix M
def c2Score(M, rowSet, colSet, nodeSusp):
suspTotal = nodeSusp[0][list(rowSet)].sum() + nodeSusp[1][list(colSet)].sum()
return M[list(rowSet), :][:, list(colSet)].sum(axis=None) + suspTotal
def jaccard(pred, actual):
intersectSize = len(set.intersection(pred[0], actual[0])) + len(set.intersection(pred[1], actual[1]))
unionSize = len(set.union(pred[0], actual[0])) + len(set.union(pred[1], actual[1]))
return intersectSize / unionSize
def getPrecision(pred, actual):
intersectSize = len(set.intersection(pred[0], actual[0])) + len(set.intersection(pred[1], actual[1]))
return intersectSize / (len(pred[0]) + len(pred[1]))
def getRecall(pred, actual):
intersectSize = len(set.intersection(pred[0], actual[0])) + len(set.intersection(pred[1], actual[1]))
return intersectSize / (len(actual[0]) + len(actual[1]))
def getFMeasure(pred, actual):
prec = getPrecision(pred, actual)
rec = getRecall(pred, actual)
return 0 if (prec + rec == 0) else (2 * prec * rec / (prec + rec))
def getRowPrecision(pred, actual, idx):
intersectSize = len(set.intersection(pred[idx], actual[idx]))
return intersectSize / len(pred[idx])
def getRowRecall(pred, actual, idx):
intersectSize = len(set.intersection(pred[idx], actual[idx]))
return intersectSize / len(actual[idx])
def getRowFMeasure(pred, actual, idx):
prec = getRowPrecision(pred, actual, idx)
rec = getRowRecall(pred, actual, idx)
return 0 if (prec + rec == 0) else (2 * prec * rec / (prec + rec))
# run greedy algorithm using square root column weights
def sqrtWeightedAveDegree(M, nodeSusp=None):
(m, n) = M.shape
colSums = M.sum(axis=0)
colWeights = 1.0 / np.sqrt(np.squeeze(colSums) + 5)
colDiag = sparse.lil_matrix((n, n))
colDiag.setdiag(colWeights)
W = M * colDiag
return fastGreedyDecreasing(W, colWeights, nodeSusp)
# run greedy algorithm using logarithmic weights
def logWeightedAveDegree(M, nodeSusp=None):
(m, n) = M.shape
colSums = M.sum(axis=0)
colWeights = np.squeeze(np.array(1.0 / np.log(np.squeeze(colSums) + 5)))
colDiag = sparse.lil_matrix((n, n))
colDiag.setdiag(colWeights)
W = M * colDiag
print("finished computing weight matrix")
return fastGreedyDecreasing(W, colWeights, nodeSusp)
def aveDegree(M, nodeSusp=None):
(m, n) = M.shape
return fastGreedyDecreasing(M, [1] * n, nodeSusp)
def subsetAboveDegree(M, col_thres, row_thres):
M = M.tocsc()
(m, n) = M.shape
colSums = np.squeeze(np.array(M.sum(axis=0)))
rowSums = np.squeeze(np.array(M.sum(axis=1)))
colValid = colSums > col_thres
rowValid = rowSums > row_thres
M1 = M[:, colValid].tocsr()
M2 = M1[rowValid, :]
rowFilter = [i for i in range(m) if rowValid[i]]
colFilter = [i for i in range(n) if colValid[i]]
return M2, rowFilter, colFilter
# @profile
def fastGreedyDecreasing(M, colWeights, nodeSusp=None):
(m, n) = M.shape
if nodeSusp is None:
nodeSusp = (np.zeros(m), np.zeros(n))
Md = M.todok()
Ml = M.tolil()
Mlt = M.transpose().tolil()
rowSet = set(range(0, m))
colSet = set(range(0, n))
curScore = c2Score(M, rowSet, colSet, nodeSusp)
bestAveScore = curScore / (len(rowSet) + len(colSet))
bestSets = (rowSet, colSet)
print("finished initialization")
rowDeltas = (
np.squeeze(M.sum(axis=1).A) + nodeSusp[0]
) # contribution of this row to total weight, i.e. *decrease* in total weight when *removing* this row
colDeltas = np.squeeze(M.sum(axis=0).A) + nodeSusp[1]
print("finished setting deltas")
rowTree = MinTree(rowDeltas)
colTree = MinTree(colDeltas)
print("finished building min trees")
numDeleted = 0
deleted = []
bestNumDeleted = 0
while rowSet and colSet:
if (len(colSet) + len(rowSet)) % 100000 == 0:
print("current set size = %d" % (len(colSet) + len(rowSet),))
(nextRow, rowDelt) = rowTree.getMin()
(nextCol, colDelt) = colTree.getMin()
if rowDelt <= colDelt:
curScore -= rowDelt
for j in Ml.rows[nextRow]:
delt = colWeights[j]
colTree.changeVal(j, -colWeights[j])
rowSet -= {nextRow}
rowTree.changeVal(nextRow, float("inf"))
deleted.append((0, nextRow))
else:
curScore -= colDelt
for i in Mlt.rows[nextCol]:
delt = colWeights[nextCol]
rowTree.changeVal(i, -colWeights[nextCol])
colSet -= {nextCol}
colTree.changeVal(nextCol, float("inf"))
deleted.append((1, nextCol))
numDeleted += 1
curAveScore = curScore / (len(colSet) + len(rowSet))
if curAveScore > bestAveScore:
bestAveScore = curAveScore
bestNumDeleted = numDeleted
# reconstruct the best row and column sets
finalRowSet = set(range(m))
finalColSet = set(range(n))
for i in range(bestNumDeleted):
if deleted[i][0] == 0:
finalRowSet.remove(deleted[i][1])
else:
finalColSet.remove(deleted[i][1])
return (finalRowSet, finalColSet), bestAveScore | /rgmining_fraudar-0.7.3.tar.gz/rgmining_fraudar-0.7.3/fraudar/export/greedy.py | 0.652463 | 0.513303 | greedy.py | pypi |
"""Implementations of histogram review and summary classes.
"""
from __future__ import absolute_import
from collections import defaultdict
import numbers
import numpy as np
from review.base import Review
from review.base import Summary
class HistoReview(Review):
"""Vector Review.
"""
__slots__ = ("_v")
def __init__(self, v, quantizer=round, date=None):
super(HistoReview, self).__init__(date)
self._v = {}
if isinstance(v, dict):
for key, value in v.items():
self._v[quantizer(key)] = float(value)
else:
self._v[quantizer(v)] = 1.
@property
def score(self):
"""A float value representing score of this review. """
res = 0.
for k in self:
res += k * self[k]
return res
@property
def vector(self):
""" Raw vector.
"""
return self._v
def norm(self):
""" 1-Norm of this vector.
"""
return sum(self._v.values(), 0.)
def inner_product(self, other):
""" Inner product of two vectors.
Args:
other: a HistogramReview instance.
Returns:
the inner product between this and the other.
"""
if not isinstance(other, HistoReview):
raise TypeError(
"other must be an HistoReview: {0}".format(type(other)))
res = 0.
for k in set(self.vector.keys()) & set(other.vector.keys()):
res += self.vector[k] * other.vector[k]
return res
def __eq__(self, other):
if not isinstance(other, HistoReview):
return False
if self.vector.keys() != other.vector.keys():
return False
for k in self:
if self[k] != other[k]:
return False
return True
def __add__(self, other):
if not isinstance(other, HistoReview):
raise TypeError(
"other is {0}, not HistoReview".format(type(other)))
res = defaultdict(float, self._v)
for k in other:
res[k] += other[k]
return HistoReview(res)
def __rmul__(self, other):
if not isinstance(other, numbers.Number):
raise TypeError("other is {0}, not a number".format(type(other)))
res = self._v.copy()
for k in res:
res[k] *= other
return HistoReview(res)
def __getitem__(self, key):
return self._v[key]
def __iter__(self):
return self._v.__iter__()
def __contains__(self, v):
return v in self._v
def __str__(self):
return ", ".join(["{0}:{1}".format(i, self[i]) for i in self])
class HistoSummary(Summary):
""" Vector summary.
"""
__slots__ = ("_histo") # _histo: an instance of HistoReview
def __init__(self, reviews):
if hasattr(reviews, "__iter__"):
reviews = list(reviews)
if not isinstance(reviews[0], HistoReview):
reviews = [HistoReview(r) for r in reviews]
self._histo = np.mean(reviews)
elif isinstance(reviews, HistoReview):
self._histo = reviews
else:
self._histo = HistoReview(reviews)
def difference(self, r):
"""Compute a difference between this summary and a given review score.
Args:
An instance of Review.
Returns:
The difference between of the summary and the given review.
"""
if not isinstance(r, HistoReview):
raise TypeError("r must be an HistoReview: {0}".format(type(r)))
return abs(1 - self._histo.inner_product(r))
@property
def score(self):
"""Return a float value representing this summary.
"""
return self._histo.score
def __str__(self):
return ", ".join(["{0}:{1}".format(i, self._histo[i]) for i in self._histo])
@classmethod
def review_class(cls):
"""A review class associated with this summary. """
return HistoReview | /rgmining-review-0.9.2.tar.gz/rgmining-review-0.9.2/review/histogram.py | 0.888581 | 0.432723 | histogram.py | pypi |
"""Implementations of scalar review and summary classes.
"""
from __future__ import absolute_import
import numbers
import numpy as np
from review.base import Review
from review.base import Summary
class AverageReview(Review):
"""Scalar review.
Args:
v: a float value representing review score.
The review score is a scalar value.
"""
__slots__ = ("_v")
def __init__(self, v, date=None):
"""Construct average review class.
Args:
v: a float value representing review score.
date: the date when this review was posted (default: None).
"""
super(AverageReview, self).__init__(date)
if not isinstance(v, numbers.Number):
raise TypeError("v ({0}) is not a scalar value.".format(type(v)))
self._v = float(v)
@property
def score(self):
"""A float value representing score of this review. """
return self._v
def __eq__(self, other):
if not isinstance(other, AverageReview):
return False
return self.score == other.score
def __add__(self, other):
if not isinstance(other, AverageReview):
raise TypeError(
"other is {0}, not AverageReview".format(type(other)))
return AverageReview(self.score + other.score)
def __rmul__(self, other):
if not isinstance(other, numbers.Number):
raise TypeError(
"other is {0}, not numbers.Number".format(type(other)))
return AverageReview(other * self.score)
def __str__(self):
return str(self.score)
def __hash__(self):
return hash(self.score)
class AverageSummary(Summary):
"""Scalar summary.
The summary is an average of given reviews.
"""
__slots__ = ("_v") # _v : an instance of AgerageReview
def __init__(self, scores):
if hasattr(scores, "__iter__"):
v = np.mean(list(scores))
if isinstance(v, AverageReview):
self._v = v
else:
self._v = AverageReview(v)
elif isinstance(scores, AverageReview):
self._v = scores
else:
self._v = AverageReview(scores)
def difference(self, r):
"""Difference between this summary and a given review.
Args:
r: a review.
Returns:
a non-negative float value or 0 representing the difference between
this summary and the given value.
"""
if not isinstance(r, AverageReview):
raise TypeError("r is {0}, not AverageReview".format(type(r)))
return abs(self._v.score - r.score)
@property
def score(self):
"""Float value representing this summary.
"""
return self._v.score
@property
def v(self):
"""Summary score.
"""
return self._v.score
def __str__(self):
return str(self._v)
@classmethod
def review_class(cls):
"""A review class associated with this summary. """
return AverageReview | /rgmining-review-0.9.2.tar.gz/rgmining-review-0.9.2/review/scalar.py | 0.968336 | 0.437523 | scalar.py | pypi |
"""Defines abstract Review and Summary classes.
"""
import math
import numbers
class _ImmutableAdditiveGroup(object):
"""Immutable additive group.
Subclass must implement __add__, __neg__, and __eq__, then this class
complements __sub__ and __ne__.
"""
__slots__ = ()
def __add__(self, _):
raise NotImplementedError("Subclasses must implement __add__.")
def __sub__(self, other):
return self + (-other)
def __neg__(self):
raise NotImplementedError("Subclasses must implement __neg__.")
def __eq__(self, _):
raise NotImplementedError("Subclasses must implement __eq__.")
def __ne__(self, other):
return not self == other
class _MultipliableImmutableAdditiveGroup(_ImmutableAdditiveGroup):
"""Multipliable immutable additive group.
This is a subclass of _ImmutableAdditiveGroup.
Subclass must implement __add__, __rmul__, and __eq__, then this class
complements __div__ and __neg__.
"""
__slots__ = ()
def __rmul__(self, _):
# value must be a number.
raise NotImplementedError("Subclasses must implement __rmul__")
def __div__(self, value):
return self.__truediv__(value)
def __truediv__(self, value):
if not isinstance(value, numbers.Number):
raise TypeError("value must be an instance of numbers.Number")
return (1. / value) * self
def __floordiv__(self, value):
if not isinstance(value, numbers.Number):
raise TypeError("value must be an instance of numbers.Number")
return math.floor((1. / value) * self)
def __neg__(self):
return -1 * self
class Review(_MultipliableImmutableAdditiveGroup):
"""Abstruct class of Review.
Review is defined on a multipliable immutable additive group.
Subclass must implement the following methods;
- _eq_:
- _add_:
- _rmul_:
Review also needs to implement a property `score` to return the review
score itselt. The returned score must be a float number.
Attribute:
date: the date when this review was posted.
"""
__slots__ = ("date")
def __init__(self, date=None):
self.date = date
def __mul__(self, other):
return self.__rmul__(other)
@property
def score(self):
"""A float value representing score of this review. """
raise NotImplementedError
class Summary(object):
""" Abstract class of summary of reviews.
Summary only needs to define `differenct` and `norm` methods.
`difference` computes difference between a summary and a review.
`norm` maps a summary to a float value.
Each summary type might be related to a review type.
Summary must implements a class method `review_class` to return the
associated review class.
"""
__slots__ = ()
def difference(self, r):
"""Compute a difference between this summary and a given review score.
Args:
An instance of Review.
Returns:
The difference between of the summary and the given review.
"""
raise NotImplementedError
@property
def score(self):
"""Return a float value representing this summary.
"""
raise NotImplementedError
@classmethod
def review_class(cls):
"""A review class associated with this summary. """
raise NotImplementedError | /rgmining-review-0.9.2.tar.gz/rgmining-review-0.9.2/review/base.py | 0.913046 | 0.459804 | base.py | pypi |
:description: This package provides 4 algorithms for review graph mining project.
Mutually Reinforcing Analysis (MRA) algorithm is an algorithm we've
introduced in DEXA 2011, Repeated Improvement Analysis (RIA) algorithm
is an algorithm we've introduced in DEIM 2015.
.. _top:
Repeated Improvement Analysis
===============================
.. raw:: html
<div class="addthis_inline_share_toolbox"></div>
This package provides 4 algorithms for review graph mining project.
**Mutually Reinforcing Analysis (MRA)** algorithm is an algorithm we've introduced
in DEXA 2011 [#DEXA2011]_, **Repeated Improvement Analysis (RIA)** algorithm
is an algorithm we've introduced in DEIM 2015 [#DEIM2015]_.
Algorithm **One** has been proposed by Ee-Peng Lim *ea al.* in CIKM 2010 [#CIKM2010]_.
Algorithm **OneSum** is an variation of it made by us.
This package is a part of `Review Graph Mining Project </>`_
which provides other algorithms, datasets, and helper libraries.
Installation
--------------
Use `pip` to install this package.
.. code-block:: bash
pip install --upgrade rgmining-ria
Graph model
-------------
This package assumes review data are represented in a bipartite graph.
This bipartite graph has two kinds of nodes; reviewers and products.
One reviewer node and one product node are connected if the reviewer posts
a review to the product.
In other words, an edge in the graph represents a review.
Each review has a rating score.
We assume the score is normalized in 0 to 1.
Here is a sample of the bipartite graph.
.. graphviz::
digraph bipartite {
graph [label="Sample bipartite graph.", rankdir = LR];
"reviewer-0";
"reviewer-1";
"product-0";
"product-1";
"product-2";
"reviewer-0" -> "product-0" [label="0.2"];
"reviewer-0" -> "product-1" [label="0.9"];
"reviewer-0" -> "product-2" [label="0.6"];
"reviewer-1" -> "product-1" [label="0.1"];
"reviewer-1" -> "product-2" [label="0.7"];
}
Usage
------
Construct a graph
^^^^^^^^^^^^^^^^^^
This package provides four functions to construct graph objects.
* :meth:`ria.mra_graph` creates a review graph running MRA algorithm.
* :meth:`ria.ria_graph` creates a review graph running RIA algorithm.
It takes one parameter :math:`\\alpha`.
* :meth:`ria.one_graph` and :meth:`ria.one_sum_graph` create a review graph
running One and OneSum algorithm, respectively.
For example, to create a review graph running RIA algorithm,
.. code-block:: python
import ria
graph = ria.ria_graph(1.0)
Then, you need to add reviewer nodes, product nodes, and review edges.
:meth:`new_reviewer()<ria.bipartite.BipartiteGraph.new_reviewer>` and
:meth:`new_product()<ria.bipartite.BipartiteGraph.new_product>` methods
of the graph create a reviewer node and a product node, respectively,
and add them to the graph. Both methods take one argument `name`, i.e. ID of
the node.
Note that, the names must be unique in a graph.
:meth:`add_review()<ria.bipartite.BipartiteGraph.add_review>` method add a
review to the graph. It takes a `reviewer`, a `product`, and a normalized
rating score which the reviewer posted to the product.
The normalized rating scores mean they must be in 0 to 1.
For example, let us assume there are two reviewers and three products
like the below.
.. graphviz::
digraph bipartite {
graph [label="Sample bipartite graph.", rankdir = LR];
"reviewer-0";
"reviewer-1";
"product-0";
"product-1";
"product-2";
"reviewer-0" -> "product-0" [label="0.2"];
"reviewer-0" -> "product-1" [label="0.9"];
"reviewer-0" -> "product-2" [label="0.6"];
"reviewer-1" -> "product-1" [label="0.1"];
"reviewer-1" -> "product-2" [label="0.7"];
}
The graph can be constructed by the following code.
.. code-block:: python
# Create reviewers and products.
reviewers = [graph.new_reviewer("reviewer-{0}".format(i)) for i in range(2)]
products = [graph.new_product("product-{0}".format(i)) for i in range(3)]
graph.add_review(reviewers[0], products[0], 0.2)
graph.add_review(reviewers[0], products[1], 0.9)
graph.add_review(reviewers[0], products[2], 0.6)
graph.add_review(reviewers[1], products[0], 0.1)
graph.add_review(reviewers[1], products[1], 0.7)
Analysis
^^^^^^^^^^^
:meth:`update()<fraud_eagle.graph.ReviewGraph.update>` runs one iteration of
`loopy belief propagation algorithm <https://arxiv.org/pdf/1206.0976.pdf>`_.
This method returns the amount of update in the iteration.
You need to run iterations until the amount of update becomes enough small.
It's depended to the review data and the parameter epsilon that how many
iterations are required to the amount of update becomes small.
Moreover, sometimes it won't be converged.
Thus, you should set some limitation to the iterations.
.. code-block:: python
print("Start iterations.")
max_iteration = 10000
for i in range(max_iteration):
# Run one iteration.
diff = graph.update()
print("Iteration %d ends. (diff=%s)", i + 1, diff)
if diff < 10**-5: # Set 10^-5 as an acceptable small number.
break
Result
^^^^^^^^
Each reviewer has an anomalous score which representing how the reviewer is
anomalous. The score is normalized in 0 to 1. To obtain that score,
use :meth:`anomalous_score<fraud_eagle.graph.Reviewer.anomalous_score>`
property.
The :class:`ReviewGraph<fraud_eagle.graph.ReviewGraph>` has
:meth:`reviewers<fraud_eagle.graph.ReviewGraph.reviewers>` property,
which returns a collection of reviewers the graph has.
Thus, the following code outputs all reviewers' anomalous score.
.. code-block:: python
for r in graph.reviewers:
print(r.name, r.anomalous_score)
On the other hand, each product has a summarized ratings computed from all
reviews posted to the product according to each reviewers' anomalous score.
The summarized ratings are also normalized in 0 to 1.
:meth:`summary<fraud_eagle.graph.Product.summary>` property returns such
summarized rating.
The :class:`ReviewGraph<fraud_eagle.graph.ReviewGraph>` also has
:meth:`products<fraud_eagle.graph.ReviewGraph.products>` property,
which returns a collection of products.
Thus, the following code outputs all products' summarized ratings.
.. code-block:: python
for p in graph.products:
print(p.name, p.summary)
API Reference
---------------
.. toctree::
:glob:
:maxdepth: 2
modules/*
License
---------
This software is released under The GNU General Public License Version 3,
see `COPYING <https://github.com/rgmining/ria/blob/master/COPYING>`_ for more detail.
References
------------
.. [#DEXA2011] Kazuki Tawaramoto, `Junpei Kawamoto`_, `Yasuhito Asano`_, and `Masatoshi Yoshikawa`_,
"|springer| `A Bipartite Graph Model and Mutually Reinforcing Analysis for Review Sites
<http://www.anrdoezrs.net/links/8186671/type/dlg/http://link.springer.com/chapter/10.1007%2F978-3-642-23088-2_25>`_,"
Proc. of `the 22nd International Conference on Database and Expert Systems Applications <http://www.dexa.org/>`_ (DEXA 2011),
pp.341-348, Toulouse, France, August 31, 2011.
.. [#DEIM2015] `川本 淳平`_, 俵本 一輝, `浅野 泰仁`_, `吉川 正俊`_,
"|pdf| `初期レビューを用いた長期間評価推定 <http://db-event.jpn.org/deim2015/paper/253.pdf>`_,"
`第7回データ工学と情報マネジメントに関するフォーラム <http://db-event.jpn.org/deim2015>`_,
D3-6, 福島, 2015年3月2日~4日. |deim2015-slide|
.. [#CIKM2010] `Ee-Peng Lim <https://sites.google.com/site/aseplim/>`_,
`Viet-An Nguyen <http://www.cs.umd.edu/~vietan/>`_,
Nitin Jindal,
`Bing Liu <https://www.cs.uic.edu/~liub/>`_,
`Hady Wirawan Lauw <http://www.smu.edu.sg/faculty/profile/9621/Hady-W-LAUW>`_,
"`Detecting Product Review Spammers Using Rating Behaviors
<http://dl.acm.org/citation.cfm?id=1871557>`_,"
Proc. of the 19th ACM International Conference on Information and Knowledge Management,
pp.939-948, 2010.
.. _Junpei Kawamoto: https://www.jkawamoto.info
.. _Yasuhito Asano: http://www.iedu.i.kyoto-u.ac.jp/intro/member/asano
.. _Masatoshi Yoshikawa: http://www.db.soc.i.kyoto-u.ac.jp/~yoshikawa/
.. _川本 淳平: https://www.jkawamoto.info
.. _浅野 泰仁: http://www.iedu.i.kyoto-u.ac.jp/intro/member/asano
.. _吉川 正俊: http://www.db.soc.i.kyoto-u.ac.jp/~yoshikawa/
.. |springer| image:: img/springer.png
.. |pdf| raw:: html
<i class="fa fa-file-pdf-o" aria-hidden="true"></i>
.. |deim2016-slide| raw:: html
<a href="http://www.slideshare.net/jkawamoto/ss-59672505">
<i class="fa fa-slideshare" aria-hidden="true"></i>
</a>
.. |deim2015-slide| raw:: html
<a href="http://www.slideshare.net/jkawamoto/deim2015-45470497">
<i class="fa fa-slideshare" aria-hidden="true"></i>
</a>
| /rgmining-ria-0.9.6.tar.gz/rgmining-ria-0.9.6/docs/source/index.rst | 0.957298 | 0.767864 | index.rst | pypi |
from __future__ import absolute_import, division
import json
from logging import getLogger
import math
import numpy as np
import networkx as nx
from common import memoized
from ria.credibility import WeightedCredibility
from review import AverageSummary
LOGGER = getLogger(__name__)
class _Node(object):
"""Abstract node of the bipartite model.
Args:
graph: parent graph instance.
name: name of the new node.
If the name is not given, object.__str__() will be used.
This class implements __eq__, __ne__, and __hash__ for convenience.
Attributes:
name: Name of this node.
"""
__slots__ = ("_graph", "name", "_hash")
def __init__(self, graph, name=None):
"""Construct a new node.
Args:
name: Specifying the name of this node.
If not given, use strings returned from __str__ method.
"""
if not isinstance(graph, BipartiteGraph):
raise ValueError(
"Given graph is not instance of Bipartite:", graph)
self._graph = graph
if name:
self.name = name
else:
self.name = super(_Node, self).__str__()
self._hash = None
def __eq__(self, other):
if not isinstance(other, type(self)):
return False
return self.name == other.name
def __hash__(self):
if not self._hash:
self._hash = 13 * hash(type(self)) + 17 * hash(self.name)
return self._hash
def __str__(self):
return self.name
class Reviewer(_Node):
"""A node class representing Reviewer.
Args:
graph: an instance of BipartiteGraph representing the parent graph.
credibility: an instance of credibility.Credibility to be used to update
scores.
name: name of this node. (default: None)
anomalous: initial anomalous score. (default: None)
"""
__slots__ = ("_anomalous", "_credibility")
def __init__(self, graph, credibility, name=None, anomalous=None):
super(Reviewer, self).__init__(graph, name)
self._anomalous = anomalous
self._credibility = credibility
@property
def anomalous_score(self):
"""Anomalous score of this reviewer.
Initial anomalous score is :math:`1 / |R|`
where :math:`R` is a set of reviewers.
"""
return self._anomalous if self._anomalous else 1. / len(self._graph.reviewers)
@anomalous_score.setter
def anomalous_score(self, v):
"""Set an anomalous score.
Args:
v: the new anomalous score.
"""
self._anomalous = float(v)
def update_anomalous_score(self):
"""Update anomalous score.
New anomalous score is a weighted average of differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score of reviewer :math:`p` is as
.. math::
{\\rm anomalous}(r) = \\frac{
\\sum_{p \\in P} {\\rm credibility}(p)|
{\\rm review}(r, p)-{\\rm summary}(p)|
}{
\\sum_{p \\in P} {\\rm credibility}(p)
}
where :math:`P` is a set of products reviewed by reviewer :math:`p`,
review(:math:`r`, :math:`p`) is the rating reviewer :math:`r` posted
to product :math:`p`, summary(:math:`p`) and credibility(:math:`p`) are
summary and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
"""
products = self._graph.retrieve_products(self)
diffs = [
p.summary.difference(self._graph.retrieve_review(self, p))
for p in products
]
old = self.anomalous_score
try:
self.anomalous_score = np.average(
diffs, weights=list(map(self._credibility, products)))
except ZeroDivisionError:
self.anomalous_score = np.average(diffs)
return abs(self.anomalous_score - old)
class Product(_Node):
"""A node class representing Product.
Args:
graph: An instance of BipartiteGraph representing the parent graph.
name: Name of this node. (default: None)
summary_cls: Specify summary type. (default: AverageSummary)
"""
__slots__ = ("_summary", "_summary_cls")
def __init__(self, graph, name=None, summary_cls=AverageSummary):
super(Product, self).__init__(graph, name)
self._summary = None
self._summary_cls = summary_cls
@property
def summary(self):
"""Summary of reviews for this product.
Initial summary is computed by
.. math::
\\frac{1}{|R|} \\sum_{r \\in R} \\mbox{review}(r),
where :math:`\\mbox{review}(r)` means review from reviewer :math:`r`.
"""
if self._summary:
return self._summary
reviewers = self._graph.retrieve_reviewers(self)
return self._summary_cls(
[self._graph.retrieve_review(r, self) for r in reviewers])
@summary.setter
def summary(self, v):
"""Set summary.
Args:
v: A new summary. It could be a single number or lists.
"""
if hasattr(v, "__iter__"):
self._summary = self._summary_cls(v)
else:
self._summary = self._summary_cls(float(v))
def update_summary(self, w):
"""Update summary.
The new summary is a weighted average of reviews i.e.
.. math::
\\frac{\\sum_{r \\in R} \\mbox{weight}(r) \\times \\mbox{review}(r)}
{\\sum_{r \\in R} \\mbox{weight}(r)},
where :math:`R` is a set of reviewers reviewing this product,
:math:`\\mbox{review}(r)` and :math:`\\mbox{weight}(r)` are
the review and weight of the reviewer :math:`r`, respectively.
Args:
w: A weight function.
Returns:
absolute difference between old summary and updated one.
"""
old = self.summary.v # pylint: disable=no-member
reviewers = self._graph.retrieve_reviewers(self)
reviews = [self._graph.retrieve_review(
r, self).score for r in reviewers]
weights = [w(r.anomalous_score) for r in reviewers]
if sum(weights) == 0:
self.summary = np.mean(reviews)
else:
self.summary = np.average(reviews, weights=weights)
return abs(self.summary.v - old) # pylint: disable=no-member
class BipartiteGraph(object):
"""Bipartite graph model for review data mining.
Args:
summary_type: specify summary type class, default value is AverageSummary.
alpha: used to compute weight of anomalous scores, default value is 1.
credibility: credibility class to be used in this graph.
(Default: :class:`ria.credibility.WeightedCredibility`)
reviewer: Class of reviewers.
product: Class of products.
Attributes:
alpha: Parameter.
graph: Graph object of networkx.
reviewers: Collection of reviewers.
products: Collection of products.
credibility: Credibility object.
"""
def __init__(
self, summary=AverageSummary, alpha=1,
credibility=WeightedCredibility, reviewer=Reviewer, product=Product):
"""Construct bipartite graph.
Args:
summary_type: specify summary type class, default value is AverageSummary.
alpha: used to compute weight of anomalous scores, default value is 1.
credibility: credibility class to be used in this graph.
(Default: WeightedCredibility)
reviewer: Class of reviewers.
product: Class of products.
"""
self.alpha = alpha
self.graph = nx.DiGraph()
self.reviewers = []
self.products = []
self._summary_cls = summary
self._review_cls = summary.review_class()
self.credibility = credibility(self)
self._reviewer_cls = reviewer
self._product_cls = product
def new_reviewer(self, name, anomalous=None):
"""Create a new reviewer.
Args:
name: name of the new reviewer.
anomalous: initial anomalous score. (default: None)
Returns:
A new reviewer instance.
"""
n = self._reviewer_cls(
self, name=name, credibility=self.credibility, anomalous=anomalous)
self.graph.add_node(n)
self.reviewers.append(n)
return n
def new_product(self, name):
"""Create a new product.
Args:
name: name of the new product.
Returns:
A new product instance.
"""
n = self._product_cls(self, name, summary_cls=self._summary_cls)
self.graph.add_node(n)
self.products.append(n)
return n
def add_review(self, reviewer, product, review, date=None):
"""Add a new review from a given reviewer to a given product.
Args:
reviewer: an instance of Reviewer.
product: an instance of Product.
review: a float value.
date: date the review issued.
Returns:
the added new review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
r = self._review_cls(review, date=date)
self.graph.add_edge(reviewer, product, review=r)
return r
@memoized
def retrieve_products(self, reviewer):
"""Retrieve products reviewed by a given reviewer.
Args:
reviewer: A reviewer.
Returns:
A list of products which the reviewer reviews.
Raises:
TypeError: when given reviewer isn't instance of specified reviewer
class when this graph is constructed.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
return list(self.graph.successors(reviewer))
@memoized
def retrieve_reviewers(self, product):
"""Retrieve reviewers who reviewed a given product.
Args:
product: A product specifying reviewers.
Returns:
A list of reviewers who review the product.
Raises:
TypeError: when given product isn't instance of specified product
class when this graph is constructed.
"""
if not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
return list(self.graph.predecessors(product))
@memoized
def retrieve_review(self, reviewer, product):
"""Retrieve review that the given reviewer put the given product.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
Returns:
A review object.
Raises:
TypeError: when given reviewer and product aren't instance of
specified reviewer and product class when this graph is constructed.
KeyError: When the reviewer does not review the product.
"""
if not isinstance(reviewer, self._reviewer_cls):
raise TypeError(
"Type of given reviewer isn't acceptable:", reviewer,
", expected:", self._reviewer_cls)
elif not isinstance(product, self._product_cls):
raise TypeError(
"Type of given product isn't acceptable:", product,
", expected:", self._product_cls)
try:
return self.graph[reviewer][product]["review"]
except TypeError:
raise KeyError(
"{0} does not review {1}.".format(reviewer, product))
def update(self):
"""Update reviewers' anomalous scores and products' summaries.
Returns:
maximum absolute difference between old summary and new one, and
old anomalous score and new one.
"""
w = self._weight_generator(self.reviewers)
diff_p = max(p.update_summary(w) for p in self.products)
diff_a = max(r.update_anomalous_score() for r in self.reviewers)
return max(diff_p, diff_a)
def _weight_generator(self, reviewers):
"""Compute a weight function for the given reviewers.
Args:
reviewers: a set of reviewers to compute weight function.
Returns:
a function computing a weight for a reviewer.
"""
scores = [r.anomalous_score for r in reviewers]
mu = np.average(scores)
sigma = np.std(scores)
if sigma:
def w(v):
"""Compute a weight for the given reviewer.
Args:
v: anomalous score of a reviewer.
Returns:
weight of the given anomalous score.
"""
try:
exp = math.exp(self.alpha * (v - mu) / sigma)
return 1. / (1. + exp)
except OverflowError:
return 0.
return w
else:
# Sigma = 0 means all reviews have same anomalous scores.
# In this case, all reviews should be treated as same.
return lambda v: 1.
def dump_credibilities(self, output):
"""Dump credibilities of all products.
Args:
output: a writable object.
"""
for p in self.products:
json.dump({
"product_id": p.name,
"credibility": self.credibility(p)
}, output)
output.write("\n")
def to_pydot(self):
"""Convert this graph to PyDot object.
Returns:
PyDot object representing this graph.
"""
return nx.nx_pydot.to_pydot(self.graph) | /rgmining-ria-0.9.6.tar.gz/rgmining-ria-0.9.6/ria/bipartite.py | 0.919832 | 0.267837 | bipartite.py | pypi |
from __future__ import absolute_import
from ria import bipartite
class Reviewer(bipartite.Reviewer):
"""Reviewer which uses normalized summations for updated anomalous scores.
This reviewer will update its anomalous score by computing summation of
partial anomalous scores instead of using a weighted average.
"""
__slots__ = ()
def update_anomalous_score(self):
"""Update anomalous score.
New anomalous score is the summation of weighted differences
between current summary and reviews. The weights come from credibilities.
Therefore, the new anomalous score is defined as
.. math::
{\\rm anomalous}(r)
= \\sum_{p \\in P} \\mbox{review}(p) \\times \\mbox{credibility}(p) - 0.5
where :math:`P` is a set of products reviewed by this reviewer,
review(:math:`p`) and credibility(:math:`p`) are
review and credibility of product :math:`p`, respectively.
Returns:
absolute difference between old anomalous score and updated one.
"""
old = self.anomalous_score
products = self._graph.retrieve_products(self)
self.anomalous_score = sum(
p.summary.difference(
self._graph.retrieve_review(self, p)) * self._credibility(p) - 0.5
for p in products
)
return abs(self.anomalous_score - old)
class BipartiteGraph(bipartite.BipartiteGraph):
"""Bipartite Graph implementing OneSum algorithm.
This graph employs a normalized summation of deviation times credibility
as the undated anomalous scores for each reviewer.
Constructor receives as same arguments as
:class:`ria.bipartite.BipartiteGraph` but `reviewer` argument is ignored
since this graph uses :class:`ria.bipartite_sum.Reviewer` instead.
"""
def __init__(self, **kwargs):
kwargs["reviewer"] = Reviewer
super(BipartiteGraph, self).__init__(**kwargs)
def update(self):
""" Update reviewers' anomalous scores and products' summaries.
The update consists of 2 steps;
Step1 (updating summaries):
Update summaries of products with anomalous scores of reviewers
and weight function. The weight is calculated by the manner in
:class:`ria.bipartite.BipartiteGraph`.
Step2 (updating anomalous scores):
Update its anomalous score of each reviewer by computing the summation
of deviation times credibility.
See :meth:`Reviewer.update_anomalous_score` for more details.
After that those updated anomalous scores are normalized so that
every value is in :math:`[0, 1]`.
Returns:
maximum absolute difference between old summary and new one, and
old anomalous score and new one. This value is not normalized and
thus it may be grater than actual normalized difference.
"""
res = super(BipartiteGraph, self).update()
max_v = None
min_v = float("inf")
for r in self.reviewers:
max_v = max(max_v, r.anomalous_score)
min_v = min(min_v, r.anomalous_score)
width = max_v - min_v
if width:
for r in self.reviewers:
r.anomalous_score = (r.anomalous_score - min_v) / width
return res | /rgmining-ria-0.9.6.tar.gz/rgmining-ria-0.9.6/ria/bipartite_sum.py | 0.936952 | 0.611585 | bipartite_sum.py | pypi |
from __future__ import absolute_import
import numpy as np
from common import memoized
class UniformCredibility(object):
"""Uniform credibility assigns 1 for every product.
Formally, this credibility is defined by
.. math::
{\\rm cred}(p) = 1,
where *p* is a product.
Uniform credibility does not use any arguments to construct.
"""
__slots__ = ()
def __init__(self, *unused_args):
pass
def __call__(self, product):
""" Compute credibility of a given product.
Args:
product: An instance of :class:`bipartite.Product`.
Returns:
Always 1.
"""
return 1.
class GraphBasedCredibility(object):
"""Abstract class of credibility using a Bipartite graph.
Args:
g: A bipartite graph instance.
This class provides two helper methods; :meth:`reviewers` and
:meth:`review_score`.
"""
__slots__ = ("_g")
def __init__(self, g):
"""Construct a GraphBasedCredibility with a given graph instance g.
Args:
g: A bipartite graph instance.
"""
self._g = g
def __call__(self, product):
"""Compute credibility of a given product.
Args:
product: An instance of :class:`ria.bipartite.Product`.
"""
raise NotImplementedError
def reviewers(self, product):
"""Find reviewers who have reviewed a given product.
Args:
product: An instance of :class:`ria.bipartite.Product`.
Returns:
A list of reviewers who have reviewed the product.
"""
return self._g.retrieve_reviewers(product)
def review_score(self, reviewer, product):
"""Find a review score from a given reviewer to a product.
Args:
reviewer: Reviewer i.e. an instance of :class:`ria.bipartite.Reviewer`.
product: Product i.e. an instance of :class:`ria.bipartite.Product`.
Returns:
A review object representing the review from the reviewer to the product.
"""
return self._g.retrieve_review(reviewer, product).score
class WeightedCredibility(GraphBasedCredibility):
"""Credibility using unbiased variance of review scores.
Args:
g: an instance of bipartite graph.
The credibility computed by this class is defined by
.. math::
{\\rm cred}(p) = \\begin{cases}
0.5 \\quad \\mbox{if} \\; N_{p} = 1, \\\\
\\frac{\\log N_{p}}{\\sigma^{2} + 1} \\quad \\mbox{otherwise},
\\end{cases}
where :math:`N_{p}` is the number of reviews for the product *p*
and :math:`\\sigma^{2}` is the unbiased variance of review scores.
The unbiased variance is defined by
.. math::
\\sigma^{2} = \\frac{1}{N_{p} - 1} \\sum_{r \\in R} \\left(
{\\rm review}(r, p)
- \\frac{1}{N_{p}}\\sum_{r' \\in r} {\\rm review}(r', p)
\\right)^{2},
where :math:`{\\rm review}(r, p)` is a review from reviewer *r* to
product *p*.
"""
@memoized
def __call__(self, product):
""" Compute credibility of a given product.
Args:
product: An instance of :class:`bipartite.Product`.
Returns:
The credibility of the product. It is >= 0.5.
"""
reviewers = self.reviewers(product)
Nq = len(reviewers)
if Nq == 1:
return 0.5
else:
# Computing the unbiased variance of scores.
var = np.var([self.review_score(r, product)
for r in reviewers], ddof=1)
return np.log(Nq) / (var + 1) | /rgmining-ria-0.9.6.tar.gz/rgmining-ria-0.9.6/ria/credibility.py | 0.956533 | 0.513546 | credibility.py | pypi |
"""Implementation of RSD.
"""
from collections.abc import Collection
from functools import cache
from typing import Final, NamedTuple, Optional
import networkx as nx
import numpy as np
def _scale(v: float) -> float:
"""Scaling a given value.
The output is defined by
.. math::
{\\rm scale}(v) = \\frac{2}{1 + \\exp(-v)} - 1
Args:
v: Input value.
Returns:
Output value defined above.
"""
e = np.exp(-v)
return float(2.0 / (1.0 + e) - 1.0)
class Node:
"""Abstract class of review graph.
Args:
graph: the graph object this node will belong to.
name: name of this node.
"""
name: Final[str]
"""Name of this node."""
_g: Final["ReviewGraph"]
"""Graph object this node belongs to."""
__slots__ = ("name", "_g")
def __init__(self, graph: "ReviewGraph", name: Optional[str] = None) -> None:
self._g = graph
self.name = name if name else super().__str__()
def __eq__(self, other: object) -> bool:
if not isinstance(other, type(self)):
return False
return self.name == other.name
def __hash__(self) -> int:
return 13 * hash(type(self)) + 17 * hash(self.name)
class Reviewer(Node):
"""A node class representing a reviewer.
Args:
graph: Graph object this reviewer belongs to.
name: Name of this reviewer.
anomalous: Initial anomalous score (default: None).
"""
trustiness: float
"""A float value in [0, 1] which represents trustiness of this reviewer."""
__slots__ = ("trustiness",)
def __init__(self, graph: "ReviewGraph", name: Optional[str] = None, anomalous: Optional[float] = None) -> None:
super().__init__(graph, name)
# If an initial anomalous score is given, use it.
self.trustiness = 1.0 - anomalous if anomalous else 0.5
@property
def anomalous_score(self) -> float:
"""Returns the anomalous score of this reviewer.
The anomalous score is defined by 1 - trustiness.
"""
return 1.0 - self.trustiness
def update_trustiness(self) -> float:
"""Update trustiness of this reviewer.
The updated trustiness of a reviewer :math:`u` is defined by
.. math::
{\\rm trustiness}(u) =
\\frac{2}{1 + \\exp(-\\sum_{r \\in R(u)} {\\rm honesty(r)} )} - 1
where :math:`R(u)` is a set of reviews the reviewer :math:`u` posts.
Returns;
absolute difference between the old trustiness and updated one.
"""
sum_h = 0.0
for re in self._g.retrieve_reviews_by_reviewer(self):
sum_h += re.honesty
new = _scale(sum_h)
diff = abs(self.trustiness - new)
self.trustiness = new
return diff
def __str__(self) -> str:
return f"{self.name}: {self.anomalous_score}"
class Product(Node):
"""A node class representing a product.
Args:
graph: Graph object this product belongs to.
name: Name of this product.
"""
reliability: float
"""A float value in [0, 1], which represents reliability of this product."""
__slots__ = ("reliability",)
def __init__(self, graph: "ReviewGraph", name: Optional[str] = None):
super(Product, self).__init__(graph, name)
self.reliability = 0.5
@property
def summary(self) -> float:
"""Summary of reviews.
This value is same as reliability.
Original algorithm uses *reliability* but our algorithm uses *summary*.
For convenience, both properties remain.
"""
return self.reliability
def update_reliability(self) -> float:
"""Update product's reliability.
The new reliability is defined by
.. math::
{\\rm reliability}(p) = \\frac{2}{1 + e^{-\\theta}} - 1, \\quad
\\theta = \\sum_{r \\in R(p)}
{\\rm trustiness}(r)({\\rm review}(r, p) - \\hat{s}),
where :math:`R(p)` is a set of reviewers product *p* receives,
trustiness is defined in :meth:`Reviewer.trustiness`,
review(*r*, *p*) is the review score reviewer *r* has given to product *p*,
and :math:`\\hat{s}` is the median of review scores.
Returns:
absolute difference between old reliability and new one.
"""
res = 0.0
reviews = self._g.retrieve_reviews_by_product(self)
s = float(np.median([re.rating for re in reviews]))
for re in reviews:
for r in self._g.retrieve_reviewers(re):
res += r.trustiness * (re.rating - s)
new = _scale(res)
diff = abs(self.reliability - new)
self.reliability = new
return diff
def __str__(self) -> str:
return f"{self.name}: {self.summary}"
class Review:
"""A graph entity representing a review.
Args:
graph: Graph object this product belongs to.
time: When this review is posted.
rating: Rating of this review.
"""
rating: Final[float]
"""Rating score of this review."""
honesty: float
"""Honesty score."""
agreement: float
"""Agreement score."""
time: Final[int]
"""Time when this review posted."""
_g: Final["ReviewGraph"]
__slots__ = ("rating", "honesty", "agreement", "time", "_g")
def __init__(self, graph: "ReviewGraph", time: int, rating: float) -> None:
self._g = graph
self.time = time
self.rating = rating
self.honesty = 0.5
self.agreement = 0.5
def update_honesty(self) -> float:
"""Update honesty of this review.
The updated honesty of this review :math:`r` is defined by
.. math::
{\\rm honesty}(r)
= |{\\rm reliability}(P(r))| \\times {\\rm agreement}(r)
where :math:`P(r)` is the product this review posted.
Returns:
absolute difference between old honesty and new one.
"""
res = 0.0
for p in self._g.retrieve_products(self):
res += abs(p.reliability) * self.agreement
diff = abs(self.honesty - res)
self.honesty = res
return diff
def update_agreement(self, delta: float) -> float:
"""Update agreement of this review.
This process considers reviews posted in a close time span of this review.
More precisely, let :math:`t` be the time when this review posted
and :math:`\\delta` be the time span,
only reviews of which posted times are in :math:`[t - \\delta, t+\\delta]`
are considered.
The updated agreement of a review :math:`r` will be computed with such
reviews by
.. math::
{\\rm agreement}(r)
= \\frac{2}{1 + \\exp(
\\sum_{v \\in R_{+}} {\\rm trustiness}(v)
- \\sum_{v \\in R_{-}} {\\rm trustiness}(v)
)} - 1
where :math:`R_{+}` is a set of reviews close to the review :math:`r`,
i.e. the difference between ratings are smaller than or equal to delta,
:math:`R_{-}` is the other reviews. The trustiness of a review means
the trustiness of the reviewer who posts the review.
Args:
delta: a time span :math:`\\delta`.
Only reviews posted in the span will be considered for this update.
Returns:
absolute difference between old agreement and new one.
"""
score_diff = 1.0 / 5.0
agree, disagree = self._g.retrieve_reviews(self, delta, score_diff)
res = 0.0
for re in agree:
for r in self._g.retrieve_reviewers(re):
res += r.trustiness
for re in disagree:
for r in self._g.retrieve_reviewers(re):
res -= r.trustiness
new = _scale(res)
diff = abs(self.agreement - new)
self.agreement = new
return diff
def __str__(self) -> str:
return f"Review (time={self.time}, rating={self.rating}, agreement={self.agreement}, honesty={self.honesty})"
class ReviewSet(NamedTuple):
"""Pair of agreed reviews and disagreed reviews."""
agree: Collection[Review]
"""Collection of agreed reviews."""
disagree: Collection[Review]
"""Collection of disagreed reviews."""
class ReviewGraph:
"""A bipartite graph of which one set of nodes represent reviewers and the other set of nodes represent products.
Each edge has a label representing a review.
Args:
theta: A parameter for updating.
See `the paper <https://ieeexplore.ieee.org/document/6137345?arnumber=6137345>`__ for more details.
"""
graph: Final[nx.DiGraph]
"""Graph object of networkx."""
reviewers: Final[list[Reviewer]]
"""Collection of reviewers."""
products: Final[list[Product]]
"""Collection of products."""
reviews: Final[list[Review]]
"""Collection of reviews."""
_theta: Final[float]
"""Parameter of the algorithm."""
_delta: Optional[float]
"""Cached time delta."""
def __init__(self, theta: float) -> None:
self.graph = nx.DiGraph()
self.reviewers = []
self.products = []
self.reviews = []
self._theta = theta
self._delta = None
@property
def delta(self) -> float:
"""Time delta.
This value is defined by
:math:`\\delta = (t_{\\rm max} - t_{\\rm min}) \\times \\theta`,
where :math:`t_{\\rm max}, t_{\\rm min}` are the maximum time,
minimum time of all reviews, respectively,
:math:`\\theta` is the given parameter defining time ratio.
"""
if not self._delta:
min_time = min([r.time for r in self.reviews])
max_time = max([r.time for r in self.reviews])
self._delta = (max_time - min_time) * self._theta
return self._delta
def new_reviewer(self, name: Optional[str] = None, anomalous: Optional[float] = None) -> Reviewer:
"""Create a new reviewer.
Args:
name: the name of the new reviewer.
anomalous: the anomalous score of the new reviewer.
Returns:
A new reviewer instance.
"""
n = Reviewer(self, name=name, anomalous=anomalous)
self.graph.add_node(n)
self.reviewers.append(n)
return n
def new_product(self, name: Optional[str] = None) -> Product:
"""Create a new product.
Args:
name: The name of the new product.
Returns:
A new product instance.
"""
n = Product(self, name)
self.graph.add_node(n)
self.products.append(n)
return n
def add_review(self, reviewer: Reviewer, product: Product, review: float, time: Optional[int] = None) -> Review:
"""Add a new review.
Args:
reviewer: An instance of Reviewer.
product: An instance of Product.
review: A real number representing review score.
time: An integer representing reviewing time. (optional)
Returns:
the new review object.
"""
if not time:
re = Review(self, len(self.reviews), review)
else:
re = Review(self, time, review)
self.graph.add_node(re)
self.reviews.append(re)
self.graph.add_edge(reviewer, re)
self.graph.add_edge(re, product)
self._delta = None
return re
@cache
def retrieve_reviewers(self, review: Review) -> Collection[Reviewer]:
"""Find reviewers associated with a review.
Args:
review: A review instance.
Returns:
A list of reviewers associated with the review.
"""
return list(self.graph.predecessors(review))
@cache
def retrieve_products(self, review: Review) -> Collection[Product]:
"""Find products associated with a review.
Args:
review: A review instance.
Returns:
A list of products associated with the given review.
"""
return list(self.graph.successors(review))
@cache
def retrieve_reviews_by_reviewer(self, reviewer: Reviewer) -> Collection[Review]:
"""Find reviews given by a reviewer.
Args:
reviewer: Reviewer
Returns:
A list of reviews given by the reviewer.
"""
return list(self.graph.successors(reviewer))
@cache
def retrieve_reviews_by_product(self, product: Product) -> Collection[Review]:
"""Find reviews to a product.
Args:
product: Product
Returns:
A list of reviews to the product.
"""
return list(self.graph.predecessors(product))
def retrieve_reviews(
self, review: Review, time_diff: Optional[float] = None, score_diff: float = 0.25
) -> ReviewSet:
"""Find agree and disagree reviews.
This method retrieve two groups of reviews.
Agree reviews have similar scores to a given review.
On the other hands disagree reviews have different scores.
Args:
review: A review instance.
time_diff: An integer.
score_diff: An float value.
Returns:
A tuple consists of (a list of agree reviews, a list of disagree reviews)
"""
if not time_diff:
time_diff = float("inf")
agree, disagree = [], []
for p in self.retrieve_products(review):
for re in self.retrieve_reviews_by_product(p):
if re == review:
continue
if abs(re.time - review.time) < time_diff:
if abs(re.rating - review.rating) < score_diff:
agree.append(re)
else:
disagree.append(re)
return ReviewSet(agree, disagree)
def update(self) -> float:
"""Update reviewers' anomalous scores and products' summaries.
This update process consists of four steps;
1. Update honesty of reviews (See also :meth:`Review.update_honesty`),
2. Update rustiness of reviewers
(See also :meth:`Reviewer.update_trustiness`),
3. Update reliability of products
(See also :meth:`Product.update_reliability`),
4. Update agreements of reviews
(See also :meth:`Review.update_agreement`).
Returns:
summation of maximum absolute updates for the above four steps.
"""
diff = max(re.update_honesty() for re in self.reviews)
diff += max(r.update_trustiness() for r in self.reviewers)
diff += max(p.update_reliability() for p in self.products)
diff += max(re.update_agreement(self.delta) for re in self.reviews)
return diff | /rgmining_rsd-0.3.3.tar.gz/rgmining_rsd-0.3.3/rsd/graph.py | 0.965682 | 0.610337 | graph.py | pypi |
"""Analyze and handle datasets.
"""
from __future__ import absolute_import
import datetime
import logging
from os import path
import sys
from common.writer import JSONWriter, CSVWriter
import dataset_io
import dsargparse
import ria
import numpy as np
import helper
# Input type
def file_or_list(value):
"""Argument type for dsargparse.
If argument is a file, it will be opened and passed as an iterator.
If argument is a string, it will be treated as a comma-separated list.
Args:
value: Argument value.
Yield:
each line in the file if the given value points a file, otherwise,
each item in the given collection.
"""
if path.exists(value):
with open(value) as fp:
for line in fp:
yield line
else:
for item in value.split(","):
yield item
#------------------------------------------------
# Reviewer
#------------------------------------------------
def retrieve_reviewers(graph, output, target):
"""Output the ID of reviewers who review at least one of the given products.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
target: a list of target product ids.
"""
target_ids = {s.strip() for s in target}
for reviewer in graph.reviewers:
for product in graph.retrieve_products(reviewer):
if product.name in target_ids:
output.write(reviewer.name)
output.write("\n")
break
def active_reviewers(graph, output, threshold=2):
"""Output the ID of reviewers who review at least threshold items.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
threshold: the threshold (default: 2).
"""
for reviewer in graph.reviewers:
if len(graph.retrieve_products(reviewer)) >= threshold:
output.write(reviewer.name)
output.write("\n")
def reviewer_size(graph, output, target, csv_format=False):
"""Output the number of reviews of each reviewer who reviews target products.
Compute the number of reviews of each reviewer who reviews at least one
product in the given target products.
The default output format is JSON and the scheme as::
{
"reviewer": <Reviewer ID>,
"size": <The number of reviews the reviewer posts>,
"product": <Product ID which the reviewer reviews in the targets>
}
In the outputs, one line represents one JSON object.
CSV format is also supported to output results.
In this option, the first line shows a header.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
target: a list of target object IDs.
csv_format: If True, outputs will be formatted in CSV format.
"""
if csv_format:
writer = CSVWriter(output, ("reviewer", "size", "product"))
else:
writer = JSONWriter(output)
targets = {name for name in target}
for r in graph.reviewers:
products = graph.retrieve_products(r)
for p in products:
if p.name in targets:
writer.write({
"size": len(products),
"reviewer": r.name,
"product": p.name
})
def filter_reviewers(graph, output, target, csv_format=False):
"""Output reviews posted by reviewers whose IDs match the given set of IDs.
The output format is JSON and the scheme as::
{
"member_id": <Reviewer ID>,
"product_id": <Product ID>,
"rating": <Rating score>,
"date": <Date the review posted>
}
In the outputs, one line represents one JSON object.
CSV format is also supported to output results.
In this option, the first line shows a header.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
target: a list of target reviewer ids.
csv_format: If True, outputs will be formatted in CSV format.
"""
if csv_format:
writer = CSVWriter(output, ("member_id", "product_id", "rating", "date"))
else:
writer = JSONWriter(output)
targets = {name for name in target}
for r in graph.reviewers:
if r.name in targets:
for p in graph.retrieve_products(r):
review = graph.retrieve_review(r, p)
date = review.date
if date:
date = datetime.datetime.strptime(
str(review.date), "%Y%m%d").strftime("%Y-%m-%d")
writer.write({
"member_id": r.name,
"product_id": p.name,
"rating": review.score,
"date": date
})
#------------------------------------------------
# Product
#------------------------------------------------
def rating_average(graph, output, csv_format=False):
"""Output average rating scores of each product.
The output format is JSON and the scheme as::
{
"product_id": <Product ID>,
"summary": <Average rating score>
}
In the outputs, one line represents one JSON object.
CSV format is also supported to output results.
In this option, the first line shows a header.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
csv_format: If True, outputs will be formatted in CSV format.
"""
if csv_format:
writer = CSVWriter(output, ("product_id", "summary"))
else:
writer = JSONWriter(output)
for p in graph.products:
avg = np.mean([
graph.retrieve_review(r, p).score for r in graph.retrieve_reviewers(p)
])
writer.write({
"product_id": p.name,
"summary": avg
})
def distinct_product(graph, output):
"""Output distinct product IDs.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
"""
for p in graph.products:
output.write(p.name)
output.write("\n")
def popular_products(graph, output, threshold=2):
"""Output ID of products of which the number of reviews >= threshold.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
threshold: the threshold (default: 2).
"""
for p in graph.products:
if len(graph.retrieve_reviewers(p)) >= threshold:
output.write(p.name)
output.write("\n")
def filter_product(graph, output, target, csv_format=False):
"""Output reviews posted to products of which IDs match the given set of IDs.
The output format is JSON and the scheme as::
{
"member_id": <Reviewer ID>,
"product_id": <Product ID>,
"rating": <Rating score>,
"date": <Date the review posted>
}
In the outputs, one line represents one JSON object.
CSV format is also supported to output results.
In this option, the first line shows a header.
Args:
graph: Graph instance to which the target dataset is loaded.
output: a writable object.
target: a list of target product IDs.
csv_format: If True, outputs will be formatted in CSV format.
"""
if csv_format:
writer = CSVWriter(output, ("member_id", "product_id", "rating", "date"))
else:
writer = JSONWriter(output)
targets = {name for name in target}
for p in graph.products:
if p.name in targets:
for r in graph.retrieve_reviewers(p):
review = graph.retrieve_review(r, p)
date = review.date
if date:
date = datetime.datetime.strptime(
str(review.date), "%Y%m%d").strftime("%Y-%m-%d")
writer.write({
"member_id": r.name,
"product_id": p.name,
"rating": review.score,
"date": date
})
def review_variance(graph, output, target=None, csv_format=False):
"""Output variances of reviews for each product.
Each line of the output will be formatted as a JSON document,
of which schema is as::
{
"product_id": <Product ID>,
"size": <number of reviews>,
"variance": <variance of reviews>
}
In the outputs, one line represents one JSON object.
CSV format is also supported to output results.
In this option, the first line shows a header.
If target is supplied, only products of which id is in the target will be
outputted.
Args:
data: a readable object containing reviews.
output: a writable object to be outputted results.
target: an iterable of target product ids (default: None).
csv_format: If True, outputs will be formatted in CSV format.
"""
if csv_format:
writer = CSVWriter(output, ("member_id", "product_id", "rating", "date"))
else:
writer = JSONWriter(output)
if target:
target_ids = {s.strip() for s in target}
else:
target_ids = None
for p in graph.products:
if target_ids and p.name not in target_ids:
continue
reviews = [
graph.retrieve_review(r, p).score
for r in graph.retrieve_reviewers(p)
]
if len(reviews) == 0:
continue
writer.write({
"product_id": p.name,
"size": len(reviews),
"variance": np.var(reviews)
})
def _dispatch(cmd, dataset, dataset_param, additional, **kwargs):
"""Dispatch command to be run.
"""
graph = helper.load(ria.one_graph(), dataset, dataset_param)
for item in additional:
with open(item) as fp:
dataset_io.load(graph, fp)
logging.info("Start analyzing.")
cmd(graph=graph, **kwargs)
def main():
"""The main function.
"""
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
parser = dsargparse.ArgumentParser(main=main)
parser.add_argument(
"--output", default=sys.stdout, type=dsargparse.FileType("w"),
help="Output file (default: stdout).")
parser.add_argument(
"dataset", choices=
helper.DATASETS.keys(),
help=(
"choose one dataset to be analyzed.\n"
"If choose `file`, give a file path via dataset-param with file key\n"
"i.e. --dataset-param file=<path>."))
parser.add_argument(
"--dataset-param", action="append", default=[], dest="dataset_param",
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
parser.add_argument(
"--additional-dataset", action="append", default=[], dest="additional",
help=(
"add an additional dataset file to be loaded.\n"
"This option can be set multiply."))
subparsers = parser.add_subparsers()
# Reviewer
reviewer_cmd = subparsers.add_parser(
name="reviewer", help="analyze reviewer information").add_subparsers()
retrieve_reviewers_cmd = reviewer_cmd.add_parser(
retrieve_reviewers, name="retrieve")
retrieve_reviewers_cmd.add_argument(
"target", type=dsargparse.FileType("r"),
help="a file containing target product IDs.")
active_reviewer_cmd = reviewer_cmd.add_parser(
active_reviewers, name="active")
active_reviewer_cmd.add_argument("--threshold", type=int, default=2)
size_cmd = reviewer_cmd.add_parser(reviewer_size)
size_cmd.add_argument(
"target", type=dsargparse.FileType("r"))
size_cmd.add_argument(
"--csv", action="store_true", dest="csv_format",
help="Outputs will be formatted in CSV format.")
filter_reviewer_cmd = reviewer_cmd.add_parser(
filter_reviewers, name="filter")
filter_reviewer_cmd.add_argument(
"target", type=dsargparse.FileType("r"),
help="a file containing target reviewer IDs.")
filter_reviewer_cmd.add_argument(
"--csv", action="store_true", dest="csv_format",
help="Outputs will be formatted in CSV format.")
# Product
product_cmd = subparsers.add_parser(
name="product", help="analyze product information").add_subparsers()
rating_average_cmd = product_cmd.add_parser(rating_average, name="average")
rating_average_cmd.add_argument(
"--csv", action="store_true", dest="csv_format",
help="Outputs will be formatted in CSV format.")
product_cmd.add_parser(distinct_product, name="distinct")
popular_products_cmd = product_cmd.add_parser(
popular_products, name="popular")
popular_products_cmd.add_argument("--threshold", type=int, default=2)
filter_product_cmd = product_cmd.add_parser(filter_product, name="filter")
filter_product_cmd.add_argument(
"target", type=dsargparse.FileType("r"),
help="a file containing target product IDs.")
filter_product_cmd.add_argument(
"--csv", action="store_true", dest="csv_format",
help="Outputs will be formatted in CSV format.")
review_variance_cmd = product_cmd.add_parser(
review_variance, name="variance")
review_variance_cmd.add_argument(
"--target", type=dsargparse.FileType("r"),
help="a file consisting of a list of product ids.")
review_variance_cmd.add_argument(
"--csv", action="store_true", dest="csv_format",
help="Outputs will be formatted in CSV format.")
try:
_dispatch(**vars(parser.parse_args()))
except KeyboardInterrupt:
return "Canceled"
except Exception as e: # pylint: disable=broad-except
logging.exception("Untracked exception occurred.")
return e.message
finally:
logging.shutdown()
if __name__ == "__main__":
main() | /rgmining-script-0.6.1.tar.gz/rgmining-script-0.6.1/dataset.py | 0.786582 | 0.447943 | dataset.py | pypi |
from __future__ import absolute_import
import logging
import sys
import dsargparse
import dataset_io
import helper
def analyze(graph, output=sys.stdout, loop=20, threshold=10**-5):
"""Execute iteration.
The iteration ends if the number of iteration reaches the given maximum
number of iteration, loop, or the update becomes smaller than or equal to
the given threshold.
After each iteration ends, the current status of the graph will be outputted
using :meth:`dataset_io:dataset_io.helper.print_state`.
Args:
graph: Review graph object.
output: Writable object to write outputs.
loop: Maximum number of iteration (default: 20).
threshold: Threshold of the update (default: 10^5).
"""
# Initial summary
dataset_io.print_state(graph, 0, output)
# Updates
logging.info("Start iterations.")
for i in range(loop):
diff = graph.update()
if diff is not None and diff < threshold:
break
# Current summary
logging.info("Iteration %d ends. (diff=%s)", i + 1, diff)
dataset_io.print_state(graph, i + 1, output)
# Print final state.
dataset_io.print_state(graph, "final", output)
def run(method, method_param, dataset, dataset_param, **kwargs):
"""Prepare a review graph, load a dataset to it, and execute analyze.
Args:
method: name of the method to be run.
method_param: list of strings representing key-value pairs.
dataset: name of the dataset to be loaded.
dataset_param: list of strings representing key-value pairs.
"""
graph = helper.graph(method, method_param)
analyze(helper.load(graph, dataset, dataset_param), **kwargs)
def main():
"""Main function.
"""
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
parser = dsargparse.ArgumentParser(main=main)
# Dataset
parser.add_argument(
"dataset", choices=sorted(helper.DATASETS.keys()),
help=(
"choose one dataset to be analyzed.\n"
"If choose `file`, give a file path via dataset-param with file key\n"
"i.e. --dataset-param file=<path>."))
parser.add_argument(
"--dataset-param", action="append", default=[], dest="dataset_param",
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
# Algorithm
parser.add_argument(
"method", choices=sorted(helper.ALGORITHMS.keys()),
help="choose one method.")
parser.add_argument(
"--method_param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
parser.add_argument(
"--loop", type=int, default=20,
help="At most the given number of iteration will be run (default: 20).")
parser.add_argument(
"--threshold", type=float, default=10 ^ -5,
help=(
"Loop ends the update will be smaller than the given number "
"(default: 10^-5)."))
# Output configuration
parser.add_argument(
"--output", default=sys.stdout,
type=dsargparse.FileType("w"), # pylint: disable=no-member
help="file path to store results (Default: stdout).")
# Run
try:
return run(**vars(parser.parse_args()))
except KeyboardInterrupt:
return "Canceled"
except Exception as e: # pylint: disable=broad-except
logging.exception("Untracked exception occurred.")
return e.message
finally:
logging.shutdown()
if __name__ == "__main__":
main() | /rgmining-script-0.6.1.tar.gz/rgmining-script-0.6.1/analyze.py | 0.701202 | 0.473901 | analyze.py | pypi |
from logging import getLogger
_LOGGER = getLogger(__name__)
"""Logger.
"""
ALGORITHMS = {}
"""Dictionary of installed algorithms.
Keys are the names of the installed algorithms, and the associated value is
the graph creation function of that dataset.
"""
# Load and register RIA.
try:
import ria
except ImportError:
_LOGGER.info("rgmining-ria is not installed.")
else:
def ignore_args(func):
"""Returns a wrapped function which ignore given arguments."""
def _(*_args):
"""The function body."""
return func()
return _
ALGORITHMS["ria"] = ria.ria_graph
ALGORITHMS["one"] = ignore_args(ria.one_graph)
ALGORITHMS["onesum"] = ignore_args(ria.one_sum_graph)
ALGORITHMS["mra"] = ignore_args(ria.mra_graph)
# Load and register RSD.
try:
import rsd # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-rsd is not installed.")
else:
ALGORITHMS["rsd"] = rsd.ReviewGraph
# Load and register Fraud Eagle.
try:
import fraud_eagle # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-fraud-eagle is not installed.")
else:
ALGORITHMS["feagle"] = fraud_eagle.ReviewGraph
# Load and register FRAUDAR.
try:
import fraudar # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-fraudar is not installed.")
else:
def create_fraudar_graph(nblock=1):
"""Create a review graph defined in Fraud Eagle package.
"""
return fraudar.ReviewGraph(int(nblock))
ALGORITHMS["fraudar"] = create_fraudar_graph
def graph(method, method_param):
"""Create a review graph.
Args:
method: name of the method to be run.
method_param: list of strings representing key-value pairs.
Returns:
Graph object.
"""
# Create a review graph.
method_param = {key: float(value)
for key, value in [v.split("=") for v in method_param]}
return ALGORITHMS[method](**method_param) | /rgmining-script-0.6.1.tar.gz/rgmining-script-0.6.1/helper/algorithms.py | 0.847968 | 0.233728 | algorithms.py | pypi |
import functools
from logging import getLogger
import dataset_io
_LOGGER = getLogger(__name__)
"""Logger.
"""
DATASETS = {}
"""Dictionary of installed datasets.
Keys are the names of the installed datasets, and the associated values are
load function of that dataset.
"""
def ignore_kwargs(func):
"""Decorator to ignore kwargs.
"""
@functools.wraps(func)
def decorated_func(*args, **_kwargs):
"""Decorated function which ignore kwargs and run the wrapped function.
"""
return func(*args)
return decorated_func
# Load and register the synthetic dataset.
try:
import synthetic # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-synthetic-dataset is not installed.")
else:
DATASETS["synthetic"] = ignore_kwargs(synthetic.load)
# Load and register Amazon dataset.
# The loader of the Amazon dataset takes one argument, categories,
# to specify which reviews will be loaded.
try:
import amazon # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-amazon-dataset is not installed.")
else:
DATASETS["amazon"] = amazon.load
# Load and register Trip Advisor dataset.
try:
import tripadvisor # pylint: disable=wrong-import-position
except ImportError:
_LOGGER.info("rgmining-tripadvisor-dataset is not installed.")
else:
DATASETS["tripadvisor"] = tripadvisor.load
# Register the loader which load a dataset from a file.
# The loder function takes kwargument, fp, where it loads the dataset.
DATASETS["file"] = dataset_io.load
def load(graph, dataset, dataset_param):
"""Load a dataset and return a review graph.
Args:
graph: review graph object which the dataset is loaded to.
dataset: name of the dataset to be loaded.
dataset_param: list of key-value parameters.
Returns:
Review graph object, which is as same as the parameter graph.
"""
_LOGGER.info("Prepare options for the selected dataset.")
params = {key: value
for key, value in [v.split("=") for v in dataset_param]}
if "file" in params:
params["fp"] = open(params["file"])
del params["file"]
try:
_LOGGER.info("Load the dataset.")
DATASETS[dataset](graph, **params)
finally:
if "fp" in params:
params["fp"].close()
return graph | /rgmining-script-0.6.1.tar.gz/rgmining-script-0.6.1/helper/datasets.py | 0.73678 | 0.332446 | datasets.py | pypi |
:description: Explaining the basic usage of the analyze command.
Analyze command usage
=======================
.. raw:: html
<div class="addthis_inline_share_toolbox"></div>
The basic usage of this command is
.. code:: sh
$ analyze <dataset-specifier> <dataset-parameters> <method-specifier> <method-parameters> <options>
The dataset-specifier and datasset-parameters are the same parameters
described in the dataset command explanation.
The method-specifier is a name of installed method. You can see
available method names by ``analyze -h``.
method-parameters are optional arguments specified with
``--method-param`` flag. The ``--method-param`` flag takes a string
which connecting key and value with a single =, and can be given
multi-times.
You can find what kinds of parameter keys are defined in the method you
want to run from documents of the constructor of the review graph object
defined in the method.
For example, `Fraud Eagle <https://rgmining.github.io/fraud-eagle>`__
takes one parameter ``epsilon`` and you can give a value by
``--method-param epsilon=0.25``.
analyze also takes three options:
* ``--threshold``: threshold to distinguish an update is negligible
(Default: :math:`10^{-5}`),
* ``--loop``: the maximum number of iterations (Default: 20),
* ``--output``: file path to store results (Default: stdout).
Most of methods, the Review Graph Mining project provides, are loop based
algorithms, which iterate some procedure until the update will be negligible.
The ``--threshold`` flag sets a threshold and if an update is smaller than
or equal to the threshold, it will be decided as negligible and the iteration
will be ended.
On the other hand, some methods with some datasets won't converge or even if
they will converge but it takes lots of time.
The ``--loop`` flag sets the maximum number of iterations to stop algorithms.
Datasets the Review Graph Mining Project provides
---------------------------------------------------
* :ref:`A Synthetic Review Dataset Loader <synthetic:top>`
(`rgmining-synthetic-dataset <https://pypi.python.org/pypi/rgmining-synthetic-dataset>`_),
* :ref:`amazon:top`
(`rgmining-amazon-dataset <https://pypi.python.org/pypi/rgmining-amazon-dataset>`_),
* :ref:`tripadvisor:top`
(`rgmining-tripadvisor-datast <https://pypi.python.org/pypi/rgmining-tripadvisor-dataset>`_).
All packages are available on `PyPI <https://pypi.python.org/pypi>`_ and you can
install them by ``pip install``.
Methods the Review Graph Mining Project provides
---------------------------------------------------
* :ref:`Mutually Reinforcing Analysis (MRA) <ria:top>` [#DEXA2011]_
(`rgmining-ria <https://pypi.python.org/pypi/rgmining-ria>`_),
* :ref:`Repeated Improvement Analysis (RIA) <ria:top>` [#DEIM2015]_
(`rgmining-ria <https://pypi.python.org/pypi/rgmining-ria>`_),
* :ref:`Detecting Product Review Spammers Using Rating Behaviors <ria:top>` [#CIKM2010]_
(`rgmining-ria <https://pypi.python.org/pypi/rgmining-ria>`_),
* :ref:`Review Spammer Detection <rsd:top>` [#ICDM2011]_
(`rgmining-rsd <https://pypi.python.org/pypi/rgmining-rsd>`_),
* :ref:`Fraud Eagle <fraud-eagle:top>` [#ICWSM13]_
(`rgmining-fraud-eagle <https://pypi.python.org/pypi/rgmining-fraud-eagle>`_),
* :ref:`FRAUDAR <fraudar:top>` [#KDD2016]_
(`rgmining-fraudar <https://pypi.python.org/pypi/rgmining-fraudar>`_).
All packages are also available on `PyPI <https://pypi.python.org/pypi>`_ and you can
install them by ``pip install``.
References
------------
.. [#DEXA2011] Kazuki Tawaramoto, `Junpei Kawamoto`_, `Yasuhito Asano`_, and `Masatoshi Yoshikawa`_,
"|springer| `A Bipartite Graph Model and Mutually Reinforcing Analysis for Review Sites
<http://www.anrdoezrs.net/links/8186671/type/dlg/http://link.springer.com/chapter/10.1007%2F978-3-642-23088-2_25>`_,"
Proc. of `the 22nd International Conference on Database and Expert Systems Applications <http://www.dexa.org/>`_ (DEXA 2011),
pp.341-348, Toulouse, France, August 31, 2011.
.. [#DEIM2015] `川本 淳平`_, 俵本 一輝, `浅野 泰仁`_, `吉川 正俊`_,
"|pdf| `初期レビューを用いた長期間評価推定 <http://db-event.jpn.org/deim2015/paper/253.pdf>`_,"
`第7回データ工学と情報マネジメントに関するフォーラム <http://db-event.jpn.org/deim2015>`_,
D3-6, 福島, 2015年3月2日~4日. |deim2015-slide|
.. [#CIKM2010] `Ee-Peng Lim <https://sites.google.com/site/aseplim/>`_,
`Viet-An Nguyen <http://www.cs.umd.edu/~vietan/>`_,
Nitin Jindal,
`Bing Liu`_,
`Hady Wirawan Lauw <http://www.smu.edu.sg/faculty/profile/9621/Hady-W-LAUW>`_,
"`Detecting Product Review Spammers Using Rating Behaviors
<http://dl.acm.org/citation.cfm?id=1871557>`_,"
Proc. of the 19th ACM International Conference on Information and Knowledge Management,
pp.939-948, 2010.
.. [#ICDM2011] `Guan Wang <https://www.cs.uic.edu/~gwang/>`_,
`Sihong Xie <http://www.cse.lehigh.edu/~sxie/>`_, `Bing Liu`_,
`Philip S. Yu <http://bdsc.lab.uic.edu/people.html>`_,
"`Review Graph Based Online Store Review Spammer Detection
<http://ieeexplore.ieee.org/document/6137345/?reload=true&arnumber=6137345>`_,"
Proc. of the 11th IEEE International Conference on Data Mining (ICDM 2011),
pp.1242-1247, 2011.
.. [#ICWSM13] `Leman Akoglu <http://www.andrew.cmu.edu/user/lakoglu/>`_,
Rishi Chandy, and `Christos Faloutsos`_,
"|pdf| `Opinion Fraud Detection in Online Reviews by Network Effects
<https://www.aaai.org/ocs/index.php/ICWSM/ICWSM13/paper/viewFile/5981/6338>`_,"
Proc. of `the 7th International AAAI Conference on WeblogsS and Social Media
<http://www.icwsm.org/2013/>`_ (ICWSM 2013), Boston, MA, July, 2013.
.. [#KDD2016] `Bryan Hooi <https://www.andrew.cmu.edu/user/bhooi/index.html>`_,
`Hyun Ah Song <http://www.cs.cmu.edu/~hyunahs/>`_,
`Alex Beutel <http://alexbeutel.com/>`_,
`Neil Shah <http://www.cs.cmu.edu/~neilshah/>`_,
`Kijung Shin <http://www.cs.cmu.edu/~kijungs/>`_,
`Christos Faloutsos`_,
"|pdf| `FRAUDAR: Bounding Graph Fraud in the Face of Camouflage
<http://www.andrew.cmu.edu/user/bhooi/papers/fraudar_kdd16.pdf>`_,"
Proc. of the 22nd ACM SIGKDD International Conference on Knowledge Discovery and Data Mining (KDD 2016),
pp.895-904, 2016.
.. _Junpei Kawamoto: https://www.jkawamoto.info
.. _Yasuhito Asano: http://www.iedu.i.kyoto-u.ac.jp/intro/member/asano
.. _Masatoshi Yoshikawa: http://www.db.soc.i.kyoto-u.ac.jp/~yoshikawa/
.. _川本 淳平: https://www.jkawamoto.info
.. _浅野 泰仁: http://www.iedu.i.kyoto-u.ac.jp/intro/member/asano
.. _吉川 正俊: http://www.db.soc.i.kyoto-u.ac.jp/~yoshikawa/
.. _Bing Liu: https://www.cs.uic.edu/~liub/
.. _Christos Faloutsos: http://www.cs.cmu.edu/afs/cs/usr/christos/www/
.. |springer| image:: img/springer.png
.. |pdf| raw:: html
<i class="fa fa-file-pdf-o" aria-hidden="true"></i>
.. |deim2016-slide| raw:: html
<a href="http://www.slideshare.net/jkawamoto/ss-59672505">
<i class="fa fa-slideshare" aria-hidden="true"></i>
</a>
.. |deim2015-slide| raw:: html
<a href="http://www.slideshare.net/jkawamoto/deim2015-45470497">
<i class="fa fa-slideshare" aria-hidden="true"></i>
</a>
| /rgmining-script-0.6.1.tar.gz/rgmining-script-0.6.1/docs/source/analyze.rst | 0.921225 | 0.847968 | analyze.rst | pypi |
"""Evaluate a review graph mining algorithm with the synthetic dataset.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division
import json
import logging
from logging import getLogger
import math
from os import path
import sys
import dsargparse
from matplotlib import pyplot
import numpy as np
sys.path.append(path.join(path.dirname(__file__), "../"))
import synthetic # pylint: disable=import-error,wrong-import-position
LOGGER = getLogger(__name__)
#--------------------------
# Loading algorithms
#--------------------------
ALGORITHMS = {}
"""Dictionary of graph loading functions associated with installed algorithms.
"""
# Load and register RIA.
try:
import ria
except ImportError:
LOGGER.info("rgmining-ria is not installed.")
else:
def ignore_args(func):
"""Returns a wrapped function which ignore given arguments."""
def _(*_args):
"""The function body."""
return func()
return _
ALGORITHMS["ria"] = ria.ria_graph
ALGORITHMS["one"] = ignore_args(ria.one_graph)
ALGORITHMS["onesum"] = ignore_args(ria.one_sum_graph)
ALGORITHMS["mra"] = ignore_args(ria.mra_graph)
# Load and register RSD.
try:
import rsd # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-rsd is not installed.")
else:
ALGORITHMS["rsd"] = rsd.ReviewGraph
# Load and register Fraud Eagle.
try:
import fraud_eagle # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraud-eagle is not installed.")
else:
ALGORITHMS["feagle"] = fraud_eagle.ReviewGraph
# Load and register FRAUDAR.
try:
import fraudar # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraudar is not installed.")
else:
def create_fraudar_graph(nblock=1):
"""Create a review graph defined in Fraud Eagle package.
"""
return fraudar.ReviewGraph(int(nblock))
ALGORITHMS["fraudar"] = create_fraudar_graph
TYPE = sorted(ALGORITHMS.keys())
"""List of supported algorithm types.
"""
#--------------------------
def DCG(reviewers, k):
"""Computes a DCG score for a top-k ranking.
Args:
reviewers: A collection of reviewers.
k: An integer specifying the k.
Returns:
The DCG score of the top-k ranking.
"""
res = 0.
i = 1.
for r in sorted(reviewers, key=lambda r: r.anomalous_score, reverse=True)[:k]:
if r.name.find("anomaly") != -1:
res += 1. / math.log(i, 2) if i != 1 else 1
i += 1
return res
def IDCG(k):
"""Computes an IDCG score.
Args:
k: An integer specifying the length of an ideal ranking.
Returns:
The IDCG score of a l-length ideal ranking.
"""
res = 0.
for i in range(1, k + 1):
res += 1. / math.log(i, 2) if i != 1 else 1
return res
def calc_anomalous_reviews(reviewers):
"""Counts the number of anomalous reviewers.
Args:
reviewers: A collection of reviewers.
Returns:
A tuple consisted in the number of type-1 anomalous reviewers,
the number of type-2 anomalous reviewers, and the number of type-3
anomalous reviewers in the collection.
"""
type1 = type2 = type3 = 0.
for r in reviewers:
if r.name.find("anomaly") != -1:
if r.name.find("_1") != -1:
type2 += 1
elif r.name.find("_2") != -1:
type3 += 1
else:
type1 += 1
return (type1, type2, type3)
def threshold(method, loop, output, param, plot=None):
"""Threshold based classification.
Runs a given algorithm and classifies reviewers whose anomalous degree is
grator than or equal to a threshold as anomalous.
Moving the threshold from 0.0 to 1.0, evaluates true positive score,
true negative score, false positive score, and false negative score.
The output is a list of JSON object which has a threshold value,
true positive score, true negative score, false positive score,
and false negative score.
Some algorithm requires a set of parameters. For example, feagle requires
parameter `epsilon`. Argument `param` specifies those parameters, and
if you want to set 0.1 to the `epsilon`, pass `epsilon=0.1` via the
argument.
If a file name is given via `plot`, a ROC curve will be plotted and
stored in the file.
Args:
method: name of algorithm.
loop: the number of iteration (default: 20).
output: writable object where the output will be written.
param: list of key and value pair which are connected with "=".
plot: file name of the result graph. If set, plot an ROC curve.
"""
kwargs = {key: float(value)
for key, value in [v.split("=") for v in param]}
g = ALGORITHMS[method](**kwargs)
synthetic.load(g)
# If method is ONE, the graph is updated only one time.
for _ in xrange(loop if method != "one" else 1):
g.update()
X, Y = [], []
normal_reviewer_size = len(g.reviewers) - synthetic.ANOMALOUS_REVIEWER_SIZE
for th in np.linspace(0, 1, 100):
a = [r for r in g.reviewers if r.anomalous_score >= th]
tp = sum(calc_anomalous_reviews(a))
fp = len(a) - tp
fn = synthetic.ANOMALOUS_REVIEWER_SIZE - tp
tn = normal_reviewer_size - fp
json.dump({
"threshold": th,
"true-positive": tp,
"true-negative": tn,
"false-positive": fp,
"false-negative": fn
}, output)
output.write("\n")
X.append(fp / normal_reviewer_size)
Y.append(tp / synthetic.ANOMALOUS_REVIEWER_SIZE)
if plot:
pyplot.plot(X, Y)
pyplot.xlabel("False positive rate")
pyplot.ylabel("True positive rate")
pyplot.xlim(0, 1)
pyplot.ylim(0, 1)
pyplot.title("AUC: {0}".format(-round(np.trapz(Y, X), 5)))
pyplot.tight_layout()
pyplot.savefig(plot)
def ranking(
method, loop, output,
param, plot=None): # pylint: disable=too-many-locals
"""Ranking based classification.
Runs a given algorithm and classifies reviewers who have top 57 highest
anomalous degree as anomalous.
After every iteration, outputs precision of anomalous reviewers in JSON
format.
a1, a2, and a3 means the number of independent, collude, and the other
anomalous reviewers in the top 57 anomalous reviewers, respectively,
a1-presicion, a2-precision, and a3-precision are the precisions of them.
error and error-rate are the number of normal reviwers in the top 57
anomalous reviewers and its rate, respectively.
Some algorithm requires a set of parameters. For example, feagle requires
parameter `epsilon`. Argument `param` specifies those parameters, and
if you want to set 0.1 to the `epsilon`, pass `epsilon=0.1` via the
argument.
If a file name is given via `--plot` flag, a ROC curve will be plotted and
stored in the file.
Args:
method: name of algorithm.
loop: the number of iteration (default: 20).
output: writable object where the output will be written.
param: list of key and value pair which are connected with "=".
plot: file name of the result graph. If set, plot a graph.
"""
kwargs = {key: float(value)
for key, value in [v.split("=") for v in param]}
g = ALGORITHMS[method](**kwargs)
synthetic.load(g)
num_of_reviewers = len(g.reviewers)
num_of_type1, num_of_type2, num_of_type3 = calc_anomalous_reviews(
g.reviewers)
A1, A2, A3, E = [], [], [], []
for i in xrange(loop if method != "one" else 1):
g.update()
a = sorted(g.reviewers, key=lambda r: r.anomalous_score,
reverse=True)[:57]
type1, type2, type3 = calc_anomalous_reviews(a)
error = len(a) - (type1 + type2 + type3)
a1 = type1 / num_of_type1
a2 = type2 / num_of_type2
a3 = type3 / num_of_type3
e = error / num_of_reviewers
json.dump({
"a1": int(type1),
"a1-precision": a1,
"a2": int(type2),
"a2-precision": a2,
"a3": int(type3),
"a3-precision": a3,
"error": int(error),
"error-rate": e,
"loop": i
}, output)
output.write("\n")
A1.append(a1)
A2.append(a2)
A3.append(a3)
E.append(e)
if plot:
X = np.arange(len(A1))
pyplot.plot(X, A1, label="a1")
pyplot.plot(X, A2, label="a2")
pyplot.plot(X, A3, label="a3")
pyplot.plot(X, E, label="error")
pyplot.xlim(1, len(A1))
pyplot.ylim(0)
pyplot.xlabel("iteration")
pyplot.legend()
pyplot.tight_layout()
pyplot.savefig(plot)
def dcg(method, loop, output, param, plot=None):
"""Evaluate an anomalous degree ranking by DCG.
Runs a given algorithm and outputs Discounted Cumulative Gain (DCG) score
for each k in 1 to 57.
Some algorithm requires a set of parameters. For example, feagle requires
parameter `epsilon`. Argument `param` specifies those parameters, and
if you want to set 0.1 to the `epsilon`, pass `epsilon=0.1` via the
argument.
If a file name is given via `--plot` flag, a nDCG curve will be plotted and
stored in the file.
Args:
method: name of algorithm.
loop: the number of iteration (default: 20).
output: writable object where the output will be written.
param: list of key and value pair which are connected with "=".
plot: file name of the result graph. If set, plot a nDCG curve.
"""
kwargs = {key: float(value)
for key, value in [v.split("=") for v in param]}
g = ALGORITHMS[method](**kwargs)
synthetic.load(g)
for _ in xrange(loop if method != "one" else 1):
g.update()
X, Y = [], []
for k in range(1, synthetic.ANOMALOUS_REVIEWER_SIZE + 1):
score = DCG(g.reviewers, k) / IDCG(k)
json.dump({"k": k, "score": score}, output)
output.write("\n")
X.append(k)
Y.append(score)
if plot:
pyplot.plot(X, Y)
pyplot.xlabel("k")
pyplot.ylabel("nDCG")
pyplot.xlim(1, synthetic.ANOMALOUS_REVIEWER_SIZE)
pyplot.ylim(0, 1.1)
pyplot.tight_layout()
pyplot.savefig(plot)
def main():
"""The main function.
"""
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
if not ALGORITHMS:
logging.error("No algorithms are installed.")
sys.exit(1)
parser = dsargparse.ArgumentParser(main=main)
parser.add_argument(
"--output", default=sys.stdout,
type=dsargparse.FileType("w"), # pylint: disable=no-member
help="file path to store results (Default: stdout).")
subparsers = parser.add_subparsers()
threshold_cmd = subparsers.add_parser(threshold)
threshold_cmd.add_argument("method", choices=TYPE)
threshold_cmd.add_argument("--loop", type=int, default=20)
threshold_cmd.add_argument(
"--param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
threshold_cmd.add_argument("--plot", metavar="FILE")
ranking_cmd = subparsers.add_parser(ranking)
ranking_cmd.add_argument("method", choices=TYPE)
ranking_cmd.add_argument("--loop", type=int, default=20)
ranking_cmd.add_argument(
"--param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
ranking_cmd.add_argument("--plot", metavar="FILE")
dcg_cmd = subparsers.add_parser(dcg)
dcg_cmd.add_argument("method", choices=TYPE)
dcg_cmd.add_argument("--loop", type=int, default=20)
dcg_cmd.add_argument(
"--param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
dcg_cmd.add_argument("--plot", metavar="FILE")
try:
return parser.parse_and_run()
except KeyboardInterrupt:
return "Canceled"
except Exception as e: # pylint: disable=broad-except
logging.exception("Untracked exception occurred.")
return e.message
finally:
logging.shutdown()
if __name__ == "__main__":
sys.exit(main()) | /rgmining-synthetic-dataset-0.9.3.tar.gz/rgmining-synthetic-dataset-0.9.3/synthetic_evaluation.py | 0.858081 | 0.434101 | synthetic_evaluation.py | pypi |
from __future__ import division
from contextlib import closing
import datetime
import json
from os.path import exists, join
import site
import sys
import tarfile
_DATE_FORMAT = "%B %d, %Y"
"""Data format in the dataset.
"""
def _files(tar):
"""Yields a file in the tar file.
"""
info = tar.next()
while info:
if info.isfile():
yield info
info = tar.next()
def load(graph):
"""Load the Trip Advisor dataset to a given graph object.
The graph object must implement the
:ref:`graph interface <dataset-io:graph-interface>`.
Args:
graph: an instance of bipartite graph.
Returns:
The graph instance *graph*.
"""
base = "TripAdvisorJson.tar.bz2"
path = join(".", base)
if not exists(path):
path = join(sys.prefix, "rgmining","data", base)
if not exists(path):
path = join(sys.prefix, "local", "rgmining","data", base)
if not exists(path):
path = join(site.getuserbase(), "rgmining","data", base)
R = {} # Reviewers dict.
with tarfile.open(path) as tar:
for info in _files(tar):
with closing(tar.extractfile(info)) as fp:
obj = json.load(fp)
target = obj["HotelInfo"]["HotelID"]
product = graph.new_product(name=target)
for r in obj["Reviews"]:
name = r["ReviewID"]
score = float(r["Ratings"]["Overall"]) / 5.
try:
date = datetime.datetime.strptime(
r["Date"], _DATE_FORMAT).strftime("%Y%m%d")
except ValueError:
date = None
if name not in R:
R[name] = graph.new_reviewer(name=name)
graph.add_review(R[name], product, score, date)
return graph
def print_state(g, i, output=sys.stdout):
"""Print a current state of a given graph.
This method outputs a current of a graph as a set of json objects.
Graph objects must have two properties, `reviewers` and `products`.
Those properties returns a set of reviewers and products respectively.
See the :ref:`graph interface <dataset-io:graph-interface>`
for more information.
In this output format, each line represents a reviewer or product object.
Reviewer objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"reviewer_id": <Reviewer's ID>
"score": <Anomalous score of the reviewer>
}
}
Product objects are defined as ::
{
"iteration": <the iteration number given as i>
"reviewer":
{
"product_id": <Product's ID>
"sumarry": <Summary of the reviews for the product>
}
}
Args:
g: Graph instance.
i: Iteration number.
output: A writable object (default: sys.stdout).
"""
for r in g.reviewers:
json.dump({
"iteration": i,
"reviewer": {
"reviewer_id": r.name,
"score": r.anomalous_score
}
}, output)
output.write("\n")
for p in g.products:
json.dump({
"iteration": i,
"product": {
"product_id": p.name,
"summary": float(str(p.summary))
}
}, output)
output.write("\n") | /rgmining-tripadvisor-dataset-0.5.6.tar.gz/rgmining-tripadvisor-dataset-0.5.6/tripadvisor.py | 0.60778 | 0.319838 | tripadvisor.py | pypi |
"""Evaluate a review graph mining algorithm with the Trip Advisor dataset.
"""
# pylint: disable=invalid-name
from __future__ import absolute_import, division
import logging
from logging import getLogger
import sys
import dsargparse
import tripadvisor
LOGGER = getLogger(__name__)
#--------------------------
# Loading algorithms
#--------------------------
ALGORITHMS = {}
"""Dictionary of graph loading functions associated with installed algorithms.
"""
# Load and register RIA.
try:
import ria
except ImportError:
LOGGER.info("rgmining-ria is not installed.")
else:
def ignore_args(func):
"""Returns a wrapped function which ignore given arguments."""
def _(*_args):
"""The function body."""
return func()
return _
ALGORITHMS["ria"] = ria.ria_graph
ALGORITHMS["one"] = ignore_args(ria.one_graph)
ALGORITHMS["onesum"] = ignore_args(ria.one_sum_graph)
ALGORITHMS["mra"] = ignore_args(ria.mra_graph)
# Load and register RSD.
try:
import rsd # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-rsd is not installed.")
else:
ALGORITHMS["rsd"] = rsd.ReviewGraph
# Load and register Fraud Eagle.
try:
import fraud_eagle # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraud-eagle is not installed.")
else:
ALGORITHMS["feagle"] = fraud_eagle.ReviewGraph
# Load and register FRAUDAR.
try:
import fraudar # pylint: disable=wrong-import-position
except ImportError:
LOGGER.info("rgmining-fraudar is not installed.")
else:
def create_fraudar_graph(nblock=1):
"""Create a review graph defined in Fraud Eagle package.
"""
return fraudar.ReviewGraph(int(nblock))
ALGORITHMS["fraudar"] = create_fraudar_graph
#--------------------------
def run(method, loop, threshold, output, param):
"""Run a given algorithm with the Trip Advisor dataset.
Runs a given algorithm and outputs anomalous scores and summaries after
each iteration finishes. The function will ends if a given number of loops
ends or the update of one iteration becomes smaller than a given threshold.
Some algorithm requires a set of parameters. For example, feagle requires
parameter `epsilon`. Argument `param` specifies those parameters, and
if you want to set 0.1 to the `epsilon`, pass `epsilon=0.1` via the
argument.
Args:
method: name of algorithm.
loop: the number of iteration (default: 20).
threshold: threshold to judge an update is negligible (default: 10^-3).
output: writable object where the output will be written.
param: list of key and value pair which are connected with "=".
"""
kwargs = {key: float(value)
for key, value in [v.split("=") for v in param]}
g = ALGORITHMS[method](**kwargs)
tripadvisor.load(g)
tripadvisor.print_state(g, 0, output)
# Updates
logging.info("Start iterations.")
for i in xrange(loop if not method.startswith("one") else 1):
diff = g.update()
if diff is not None and diff < threshold:
break
# Current summary
logging.info("Iteration %d ends. (diff=%s)", i + 1, diff)
tripadvisor.print_state(g, i + 1, output)
# Print final state.
tripadvisor.print_state(g, "final", output)
def main():
"""The main function.
"""
if not ALGORITHMS:
logging.error("No algorithms are installed.")
sys.exit(1)
parser = dsargparse.ArgumentParser(main=main)
parser.add_argument("method", choices=sorted(ALGORITHMS.keys()))
parser.add_argument(
"--output", default=sys.stdout,
type=dsargparse.FileType("w"), # pylint: disable=no-member
help="file path to store results (Default: stdout).")
parser.add_argument("--loop", type=int, default=20)
parser.add_argument("--threshold", type=float, default=10^-3)
parser.add_argument(
"--param", action="append", default=[],
help=(
"key and value pair which are connected with '='.\n"
"This option can be set multiply."))
run(**vars(parser.parse_args()))
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
try:
main()
except KeyboardInterrupt:
pass
except Exception: # pylint: disable=broad-except
logging.exception("Untracked exception occurred.")
finally:
logging.shutdown() | /rgmining-tripadvisor-dataset-0.5.6.tar.gz/rgmining-tripadvisor-dataset-0.5.6/sample.py | 0.77586 | 0.455925 | sample.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /rgmrcy_distributions-0.1.0.tar.gz/rgmrcy_distributions-0.1.0/rgmrcy_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
class VisitorBase:
"""Base class for Smali-Class visitor classes.
:param delegate: A delegate visitor, defaults to None
:type delegate: BaseVisitor subclass, optional
"""
def __init__(self, delegate: 'VisitorBase' = None) -> None:
self.delegate = delegate
# does not apply to muliple inheritance
if delegate and not isinstance(delegate, self.__class__.__base__):
raise TypeError(
f'Invalid Visitor type - expected subclass of {self.__class__}')
def visit_comment(self, text: str) -> None:
"""Visits a comment string.
Important: if you want to visit inline comments (EOL comments)
use `#visit_eol_comment()` instead.
:param text: the comment's text without the leading '#'
:type text: str
"""
if self.delegate:
self.delegate.visit_comment(text)
def visit_eol_comment(self, text: str) -> None:
"""Visits an inlined comment (EOL comment)
:param text: the text without the leading '#'
:type text: str
"""
if self.delegate:
self.delegate.visit_eol_comment(text)
def visit_end(self) -> None:
"""Called at then end of an annotation."""
if self.delegate:
self.delegate.visit_end()
class AnnotationVisitor(VisitorBase):
"""Base class for annotation visitors."""
def visit_value(self, name: str, value) -> None:
"""Visits a simple annotation value.
:param name: the value's name
:type name: str
:param value: the value
:type value: _type_
"""
if self.delegate:
self.delegate.visit_value(name, value)
def visit_array(self, name: str, values: list) -> None:
"""Visits an array of values.
:param name: the value name
:type name: str
:param values: the array's values
:type values: list
"""
if self.delegate:
self.visit_array(name, values)
def visit_subannotation(self, name: str, access_flags: int, signature: str) -> 'AnnotationVisitor':
"""Prepares to visit an internal annotation.
:param name: the annotation value name
:type name: str
:param access_flags: the annotations access flags (zero on most cases)
:type access_flags: int
:param signature: the class signature
:type signature: str
"""
if self.delegate:
return self.delegate.visit_subannotation(name, access_flags, signature)
def visit_enum(self, name: str, owner: str, const: str, value_type: str) -> None:
"""Visits an enum value
:param owner: the declaring class
:type owner: str
:param name: the annotation value name
:type name: str
:param const: the enum constant name
:type const: str
:param value_type: the value type
:type value_type: str
"""
if self.delegate:
self.delegate.visit_enum(name, owner, const, value_type)
class MethodVisitor(VisitorBase):
"""Base class for method visitors."""
def __init__(self, delegate: 'MethodVisitor' = None) -> None:
super().__init__(delegate)
def visit_catch(self, exc_name: str, blocks: tuple) -> None:
"""Called on a ``.catch`` statement.
The blocks contain the two enclosing goto blocks and the returning
definition:
.. code-block:: bnf
.catch <name> { <try_start> .. <try_end> } <catch_handler>
:param exc_name: the exception descriptor
:type exc_name: str
:param blocks: the goto-blocks definition
:type blocks: tuple
"""
if self.delegate:
self.delegate.visit_catch(exc_name, blocks)
def visit_catchall(self, exc_name: str, blocks: tuple) -> None:
"""Called on a ``.catchall`` statement.
The blocks contain the two enclosing goto blocks and the returning
definition:
.. code-block:: bnf
.catchall <name> { <try_start> .. <try_end> } <catch_handler>
:param exc_name: the exception descriptor
:type exc_name: str
:param blocks: the goto-blocks definition
:type blocks: tuple
"""
if self.delegate:
self.delegate.visit_catch(exc_name, blocks)
def visit_param(self, register: str, name: str) -> None:
"""Called on a ``.param`` statement
:param register: the register
:type register: str
:param name: the parameter's name
:type name: str
"""
if self.delegate:
return self.delegate.visit_param(register, name)
def visit_annotation(self, access_flags: int, signature: str) -> AnnotationVisitor:
"""Prepares to visit an annotation.
:param access_flags: the annotations access flags (zero on most cases)
:type access_flags: int
:param signature: the class signature
:type signature: str
"""
if self.delegate:
return self.delegate.visit_annotation(access_flags, signature)
def visit_locals(self, local_count: int) -> None:
"""Called on a ``.locals`` statement.
The execution context of this method should be the same as of
*visit_registers*.
:param locals: the amount of local variables
:type locals: int
"""
if self.delegate:
self.delegate.visit_locals(local_count)
def visit_registers(self, registers: int) -> None:
"""Called on a '.registers' statement.
The execution context of this method should be the same as of
'visit_locals'.
:param registers: the amount of local variables
:type registers: int
"""
if self.delegate:
self.delegate.visit_registers(registers)
def visit_line(self, number: int) -> None:
"""Called when a line definition is parsed.
:param name: the line number
:type name: str
"""
if self.delegate:
self.delegate.visit_line(number)
def visit_block(self, name: str) -> None:
"""Called when a goto-block definition is parsed.
:param name: the block's name
:type name: str
"""
if self.delegate:
self.delegate.visit_block(name)
def visit_invoke(self, inv_type: str, args: list, owner: str, method: str) -> None:
"""Handles an 'invoke-' statement.
This method is called whenever an 'invoke-' statement hias been
parsed. That includes 'invoke-virtual' as well as 'invoke-direct'.
The provided metho string contains the method signature which can
be passed into the Type constructor.
:param inv_type: the invocation type (direct, virtual, ...)
:type inv_type: str
:param args: the argument list
:type args: list
:param owner: the owner class of the referenced method
:type owner: str
:param method: the method to call
:type method: str
"""
if self.delegate:
self.delegate.visit_invoke(inv_type, args, owner, method)
def visit_return(self, ret_type: str, args: list) -> None:
"""Handles 'return-' statements.
:param ret_type: the return type, e.g. "object" or "void", ...
:type ret_type: str
:param args: the argument list
:type args: list
"""
if self.delegate:
self.delegate.visit_return(ret_type, args)
def visit_instruction(self, ins_name: str, args: list) -> None:
"""Visits common instructions with one or two parameters.
:param ins_name: the instruction name
:type ins_name: str
:param args: the argument list
:type args: list
"""
if self.delegate:
self.delegate.visit_instruction(ins_name, args)
def visit_goto(self, block_name: str) -> None:
"""Visits 'goto' statements.
:param block_name: the destination block name
:type block_name: str
"""
if self.delegate:
self.delegate.visit_goto(block_name)
def visit_packed_switch(self, value: str, blocks: list) -> None:
"""Handles the packed-switch statement.
:param value: the value which will be "switched"
:type value: str
:param blocks: the block ids
:type blocks: list[str]
"""
if self.delegate:
self.delegate.visit_packed_switch(value, blocks)
def visit_array_data(self, length: str, value_list: list) -> None:
"""Called on an '.array-data' statement.
:param length: the array's length
:type length: str
:param value_list: the array's values
:type value_list: list
"""
if self.delegate:
self.delegate.visit_array_data(length, value_list)
def visit_local(self, register: str, name: str, descriptor: str, full_descriptor: str) -> None:
"""Handles debug information packed into .local statements.
:param register: the variable register
:type register: str
:param name: the variable name
:type name: str
:param descriptor: the type descriptor
:type descriptor: str
:param full_descriptor: the java descriptor
:type full_descriptor: str
"""
if self.delegate:
self.delegate.visit_local(register, name, descriptor, full_descriptor)
def visit_sparse_switch(self, branches: dict) -> None:
"""Visits a .sparse-switch statement.
The branches takes the original case value as their key
and the block_id as their value.
:param branches: the switch branches
:type branches: dict
"""
if self.delegate:
self.delegate.visit_sparse_switch(branches)
def visit_prologue(self) -> None:
"""Visits a .prologue statement.
Note that this call comes without any arguments.
"""
if self.delegate:
self.delegate.visit_prologue()
def visit_restart(self, register: str) -> None:
"""Visits a .restart statement.
:param register: the register
:type register: str
"""
if self.delegate:
self.delegate.visit_restart(register)
class FieldVisitor(VisitorBase):
"""Base class for field visitors."""
def visit_annotation(self, access_flags: int, signature: str) -> AnnotationVisitor:
"""Prepares to visit an annotation.
:param access_flags: the annotations access flags (zero on most cases)
:type access_flags: int
:param signature: the class signature
:type signature: str
"""
if self.delegate:
return self.delegate.visit_annotation(access_flags, signature)
class ClassVisitor(VisitorBase):
"""Base class for Smali class visitors."""
def visit_class(self, name: str, access_flags: int) -> None:
"""Called when the class definition has been parsed.
:param name: the class name (type descriptor, e.g. "Lcom/example/A;")
:type name: str
:param access_flags: different access flags (PUBLIC, FINAL, ...)
:type access_flags: int
"""
if self.delegate:
self.delegate.visit_class(name, access_flags)
def visit_super(self, super_class: str) -> None:
"""Called when a .super statement has been parsed.
:param super_class: the super class name as type descriptor
:type super_class: str
"""
if self.delegate:
self.delegate.visit_super(super_class)
def visit_implements(self, interface: str) -> None:
"""Colled upon an implements directive.
:param interface: the class name (internal name)
:type interface: str
"""
if self.delegate:
self.delegate.visit_implements(interface)
def visit_field(self, name: str, access_flags: int, field_type: str, value=None) -> FieldVisitor:
"""Called when a global field definition has been parsed.
:param name: the field's name
:type name: str
:param access_flags: the access flags like PUBLIC, FINAL, ...
:type access_flags: str
:param field_type: the field's type (can be primitive)
:type field_type: str
:param value: the field's value
:type value: _type_
"""
if self.delegate:
return self.delegate.visit_field(name, access_flags, field_type, value)
def visit_method(self, name: str, access_flags: int, parameters: list,
return_type: str) -> MethodVisitor:
"""Called when a method definition has been parsed.
:param name: the method's name
:type name: str
:param access_flags: the access flags (PUBLIC, PRIVATE, ...)
:type access_flags: int
:param parameters: the parameter list (internal names)
:type parameters: list
:param return_type: the return type (internal name)
:type return_type: str
:return: a MethodVisitor that handles method parsing events
:rtype: MethodVisitor
"""
if self.delegate:
return self.delegate.visit_method(name, access_flags, parameters, return_type)
def visit_inner_class(self, name: str, access_flags: int) -> 'ClassVisitor':
"""Called when the class definition has been parsed.
:param name: the class name (type descriptor, e.g. "Lcom/example/A;")
:type name: str
:param access_flags: different access flags (PUBLIC, FINAL, ...)
:type access_flags: int
"""
if self.delegate:
return self.delegate.visit_inner_class(name, access_flags)
def visit_annotation(self, access_flags: int, signature: str) -> AnnotationVisitor:
"""Prepares to visit an annotation.
:param access_flags: the annotations access flags (zero on most cases)
:type access_flags: int
:param signature: the class signature
:type signature: str
"""
if self.delegate:
return self.delegate.visit_annotation(access_flags, signature)
def visit_source(self, source: str) -> None:
"""Visits the source type of the smali file.
:param source: the source type
:type source: str
"""
if self.delegate:
self.delegate.visit_source(source)
def visit_debug(self, enabled: int) -> None:
"""Visits a ``.debug`` directive.
:param enabled: whether debugging symbols are enabled.
:type enabled: int
"""
if self.delegate:
self.delegate.visit_debug(enabled) | /smali/visitor.py | 0.874104 | 0.23268 | visitor.py | pypi |
[](https://github.com/RedisGears/rgsync)
[](https://circleci.com/gh/RedisGears/rgsync/tree/master)

[](https://badge.fury.io/py/rgsync)
[](https://snyk.io/test/github/RedisGears/rgsync?targetFile=requirements.txt)
[](https://lgtm.com/projects/g/RedisGears/rgsync/context:python)
# RGSync
[](https://forum.redislabs.com/c/modules/redisgears)
[](https://discord.gg/6yaVTtp)
A _Write Behind_ and _Write Through_ Recipe for [RedisGears](https://github.com/RedisGears/RedisGears)
## Demo

## Example
The following is a RedisGears recipe that shows how to use the _Write Behind_ pattern to map data from Redis Hashes to MySQL tables. The recipe maps all Redis Hashes with the prefix `person:<id>` to the MySQL table `persons`, with `<id>` being the primary key and mapped to the `person_id` column. Similarly, it maps all Hashes with the prefix `car:<id>` to the `cars` table.
```python
from rgsync import RGWriteBehind, RGWriteThrough
from rgsync.Connectors import MySqlConnector, MySqlConnection
'''
Create MySQL connection object
'''
connection = MySqlConnection('demouser', 'Password123!', 'localhost:3306/test')
'''
Create MySQL persons connector
'''
personsConnector = MySqlConnector(connection, 'persons', 'person_id')
personsMappings = {
'first_name':'first',
'last_name':'last',
'age':'age'
}
RGWriteBehind(GB, keysPrefix='person', mappings=personsMappings, connector=personsConnector, name='PersonsWriteBehind', version='99.99.99')
'''
Create MySQL cars connector
'''
carsConnector = MySqlConnector(connection, 'cars', 'car_id')
carsMappings = {
'id':'id',
'color':'color'
}
RGWriteBehind(GB, keysPrefix='car', mappings=carsMappings, connector=carsConnector, name='CarsWriteBehind', version='99.99.99')
```
## Running the recipe
You can use [this utility](https://github.com/RedisGears/gears-cli) to send a RedisGears recipe for execution. For example, run this repository's [example.py recipe](examples/mysql/example.py) and install its dependencies with the following command:
```bash
gears-cli --host <host> --port <post> --password <password> run example.py REQUIREMENTS rgsync PyMySQL cryptography
```
## Overview of the recipe's operation
The [`RGWriteBehind()` class](rgsync/redis_gears_write_behind.py) implements the _Write Behind_ recipe, that mainly consists of two RedisGears functions and operates as follows:
1. A write operation to a Redis Hash key triggers the execution of a RedisGears function.
1. That RedisGears function reads the data from the Hash and writes into a Redis Stream.
1. Another RedisGears function is executed asynchronously in the background and writes the changes to the target database.
### The motivation for using a Redis Stream
The use of a Redis Stream in the _Write Behind_ recipe implementation is to ensure the persistence of captured changes while mitigating the performance penalty associated with shipping them to the target database.
The recipe's first RedisGears function is registered to run synchronously, which means that the function runs in the same main Redis thread in which the command was executed. This mode of execution is needed so changes events are recorded in order and to eliminate the possibility of losing events in case of failure.
Applying the changes to the target database is usually much slower, effectively excluding the possibility of doing that in the main thread. The second RedisGears function is executed asynchronously on batches and in intervals to do that.
The Redis Stream is the channel through which both of the recipe's parts communicate, where the changes are persisted in order synchronously and are later processed in the background asynchronously.
## Controlling what gets replicated
Sometimes you want to modify the data in Redis without replicating it to the target. For that purpose, the recipe can be customized by adding the special field `#` to your Hash's fields and setting it to one of these values:
* `+` - Adds the data but does not replicate it to the target
* `=` - Adds the data with and replicates it (the default behavior)
* `-` - Deletes the data but does not replicate
* `~` - Deletes the data from Redis and the target (the default behavior when using `del` command)
When the Hash's value contains the `#` field, the recipe will act according to its value and will delete the `#` field from the Hash afterward. For example, the following shows how to delete a Hash without replicating the delete operation:
```
redis> HSET person:1 # -
```
Alternatively, to add a Hash without having it replicated:
```
redis> HSET person:007 first_name James last_name Bond age 42 # +
```
## At Least Once and Exactly Once semantics
By default the _Write Behind_ recipe provides the _At Least Once_ property for writes, meaning that data will be written once to the target, but possibly more than that in cases of failure.
It is possible to have the recipe provide _Exactly Once_ delivery semantics by using the Stream's message ID as an increasing ID of the operations. The writer RedisGears function can use that ID and record it in another table in the target to ensure that any given ID is only be written once.
All of the recipe's SQL connectors support this capability. To use it, you need to provide the connector with the name of the "exactly once" table. This table should contain 2 columns, the `id` which represents some unique ID of the writer (used to distinguish between shards for example) and `val` which is the last Stream ID written to the target. The "exactly once" table's name can be specified to the connector in the constructor via the optional `exactlyOnceTableName` variable.
## Getting write acknowledgment
It is possible to use the recipe and get an acknowledgment of successful writes to the target. Follow these steps to do so:
1. For each data-changing operation generate a `uuid`.
2. Add the operation's `uuid` immediately after the value in the special `#` field, that is after the `+`/`=`/`-`/`~` character. Enabling write acknowledgment requires the use of the special `#`.
3. After performing the operation, perform an `XREAD BLOCK <timeout> STREAMS {<hash key>}<uuid> 0-0`. Once the recipe has written to the target, it will create a message in that (`{<hash key>}<uuid>`) Stream that has a single field named 'status' with the value 'done'.
4. For housekeeping purposes, it is recommended to delete that Stream after getting the acknowledgment. This is not a must, however, as these Streams are created with TTL of one hour.
### Acknowledgment example
```
127.0.0.1:6379> hset person:007 first_name James last_name Bond age 42 # =6ce0c902-30c2-4ac9-8342-2f04fb359a94
(integer) 1
127.0.0.1:6379> XREAD BLOCK 2000 STREAMS {person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94 0-0
1) 1) "{person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94"
2) 1) 1) "1581927201056-0"
2) 1) "status"
2) "done"
```
## Write Through
_Write Through_ is done by using a temporary key. The recipe registers to changes of that key and writes them to the target. Writing to the target is executed in the Server's main thread, in synchronous mode, which means that the server will be blocked at that time and the client will not get the reply until it is finished.
Writing the changes to the target may succeed or fail. If successful, the recipe renames the temporary key to its intended final name. A failure will prevent the rename. In either case, the temporary key is deleted.
The semantics of the acknowledgment Stream remains nearly the same as _Write Behind_. The only change is in the message's structure. Failed writes create a message in that (`{<hash key>}<uuid>`) Stream that has:
* A 'status' field with the value 'failed'
* An 'error' field containing the error's description
Note that when using _Write Through_ it is mandatory to supply a `uuid` and read the acknowledgment Stream. That is the only way to tell whether the write had succeeded.
_Write Through_ is registered using the `RGWriteThrough` class:
```python
RGWriteThrough(GB, keysPrefix, mappings, connector, name, version)
```
The `keysPrefix` argument is the prefix of the key on which the writes will be triggered. The temporary key's name will be in the following format:
```
<keysPrefix>{<realKeyName>}
```
Upon success, the key is renamed to `<realKeyName>`.
Any failure in writing to the target will cause the recipe to abort. In such cases, the temporary key is not renamed and is deleted.
Note that in some cases, such as connection failures, it is impossible to tell whether the operation had succeeded or failed on the target. The recipe considers these as failures, although in reality, the write may have succeeded.
### Example
These examples assume that the `keysPrefix` is set to "__". The first shows a successful write:
```
127.0.0.1:6379> HSET __{person:1} first_name foo last_name bar age 20 # =6ce0c902-30c2-4ac9-8342-2f04fb359a94
(integer) 4
127.0.0.1:6379> XREAD BLOCK 2000 STREAMS {person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94 0-0
1) 1) "{person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94"
2) 1) 1) "1583321726502-0"
2) 1) "status"
2) "done"
127.0.0.1:6379> HGETALL person:1
1) "age"
2) "20"
3) "last_name"
4) "bar"
5) "first_name"
6) "foo"
```
An a example of a failed _Write Through_:
```
127.0.0.1:6379> HSET __{person:1} first_name foo last_name bar age 20 # =6ce0c902-30c2-4ac9-8342-2f04fb359a94
(integer) 4
127.0.0.1:6379> XREAD BLOCK 2000 STREAMS {person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94 0-0
1) 1) "{person:1}6ce0c902-30c2-4ac9-8342-2f04fb359a94"
2) 1) 1) "1583322141455-0"
2) 1) "status"
2) "failed"
3) "error"
4) "Failed connecting to SQL database, error=\"(pymysql.err.OperationalError) (2003, \"Can't connect to MySQL server on 'localhost' ([Errno 111] Connection refused)\")\n(Background on this error at: http://sqlalche.me/e/e3q8)\""
```
### Speed Improvements
To improve the speed of write through updates, users should think about adding indexing to their write through database. This index would be created based on the column containing the *redis* key id being replicated. Using the example above, a *person_id* column will be created, regardless of the back-end database chosen for write through. As such, an index on the *person_id* column may be prudent, depending on your data volume and architecture.
## Data persistence and availability
To avoid data loss in Redis and the resulting inconsistencies with the target databases, it is recommended to employ and use this recipe only with a highly-available Redis environment. In such environments, the failure of a master node will cause the replica that replaced it to continue the recipe's execution from the point it was stopped.
Furthermore, Redis' AOF should be used alongside replication to protect against data loss during system-wide failures.
## Monitoring the RedisGears function registrations
Use [this](https://github.com/RedisGears/RedisGearsMonitor) to monitor RedisGear's function registrations.
| /rgsync-1.2.0.tar.gz/rgsync-1.2.0/README.md | 0.408513 | 0.92297 | README.md | pypi |
from rgtracker.record import *
from rgtracker.tracker import *
from rgtracker.common import *
from rgtracker.website import *
from rgtracker.section import *
from rgtracker.page import *
from rgtracker.device import *
from rgtracker.pageviews import *
from redisgears import log
# Pageviews Rotation Jobs - CMS
pageviews_rotate_jobs = [
# Run the job every 5 minutes to rotate 5 key of 1 minute each.
# Expire new merged key after 30 minutes (keep 6 merged keys of 5 minutes each)
{
'name': 'MegaStar-1to5-W',
'version': '99.99.99',
'input_stream_name': create_key_name(Type.STREAM.value, '1MINUTE', Dimension.WEBSITE.value, '', '', Metric.PAGEVIEWS.value),
'dimension': Dimension.WEBSITE.value,
'number_of_rotated_keys': 5, # rotate 5 keys of 1 minute each
'write_to_ts': True,
'timeseries_name': '5MINUTES',
'key_expire_duration_sc': 1820, # keep 6 keys -> merged key expire 30 minutes later
'batch_size': 10000,
'batch_interval_ms': 300000, # run the job every 5 minutes
'output_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', Dimension.WEBSITE.value, '', '', Metric.PAGEVIEWS.value)
},
{
'name': 'MegaStar-1to5-S',
'version': '99.99.99',
'input_stream_name': create_key_name(Type.STREAM.value, '1MINUTE', Dimension.SECTION.value, '', '', Metric.PAGEVIEWS.value),
'dimension': Dimension.SECTION.value,
'number_of_rotated_keys': 5, # rotate 5 keys of 1 minute each
'write_to_ts': True,
'timeseries_name': '5MINUTES',
'key_expire_duration_sc': 1820, # keep 6 keys -> merged key expire 30 minutes later
'batch_size': 10000,
'batch_interval_ms': 300000, # run the job every 5 minutes
'output_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', Dimension.SECTION.value, '', '', Metric.PAGEVIEWS.value)
},
{
'name': 'MegaStar-1to5-P',
'version': '99.99.99',
'input_stream_name': create_key_name(Type.STREAM.value, '1MINUTE', Dimension.PAGE.value, '', '', Metric.PAGEVIEWS.value),
'dimension': Dimension.PAGE.value,
'number_of_rotated_keys': 5, # rotate 5 keys of 1 minute each
'write_to_ts': True,
'timeseries_name': '5MINUTES',
'key_expire_duration_sc': 1820, # keep 6 keys -> merged key expire 30 minutes later
'batch_size': 10000,
'batch_interval_ms': 300000, # run the job every 5 minutes
'output_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', Dimension.PAGE.value, '', '', Metric.PAGEVIEWS.value)
},
# # Run the job every 10 minutes to rotate 2 key of 5 minutes each.
# # Expire new merged key after 60 minutes (keep 6 merged keys of 10 minutes each)
# {
# 'name': 'MegaStar-5to10-W',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.WEBSITE.value,
# 'number_of_rotated_keys': 2, # rotate 2 keys of 5 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 3620, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 600000, # run the job every 10 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value)
# },
# {
# 'name': 'MegaStar-5to10-S',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.SECTION.value,
# 'number_of_rotated_keys': 2, # rotate 2 keys of 5 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 3620, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 600000, # run the job every 10 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value)
# },
# {
# 'name': 'MegaStar-5to10-P',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '5MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.PAGE.value,
# 'number_of_rotated_keys': 2, # rotate 2 keys of 5 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 3620, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 600000, # run the job every 10 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value)
# },
# # Run the job every 60 minutes to rotate 6 key of 10 minutes each.
# # Expire new merged key after 24 hours (keep 24 merged keys of 1 hour each)
# {
# 'name': 'MegaStar-10to60-W',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.WEBSITE.value,
# 'number_of_rotated_keys': 6, # rotate 6 keys of 10 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 86420, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 3600000, # run the job every 60 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '1HOUR', '', '', '', Metric.PAGEVIEWS.value)
# },
# {
# 'name': 'MegaStar-10to60-S',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.SECTION.value,
# 'number_of_rotated_keys': 6, # rotate 6 keys of 10 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 86420, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 3600000, # run the job every 60 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '1HOUR', '', '', '', Metric.PAGEVIEWS.value)
# },
# {
# 'name': 'MegaStar-10to60-P',
# 'version': '99.99.99',
# 'input_stream_name': create_key_name(Type.STREAM.value, '10MINUTES', '', '', '', Metric.PAGEVIEWS.value),
# 'dimension': Dimension.PAGE.value,
# 'number_of_rotated_keys': 6, # rotate 6 keys of 10 minutes each
# 'write_to_ts': False,
# 'timeseries_name': '',
# 'key_expire_duration_sc': 86420, # keep 6 keys -> merged key expire 60 minutes later
# 'batch_size': 10000,
# 'batch_interval_ms': 3600000, # run the job every 60 minutes
# 'output_stream_name': create_key_name(Type.STREAM.value, '1HOUR', '', '', '', Metric.PAGEVIEWS.value)
# },
]
for job in pageviews_rotate_jobs:
# unregister_old_versions(job.get('name'), job.get('version'))
desc_json = {
"name": job.get('name'),
"version": job.get('version'),
"desc": f"{job.get('name')} - Rotate Pageviews Keys"
}
tracker_log(f'Register {job.get("name")} ...')
GB("StreamReader", desc=json.dumps(desc_json)). \
foreach(lambda records: tracker_log(f'start - {len(records)}->{records}')). \
aggregate([],
lambda a, r: a + [r['value']],
lambda a, r: a + r). \
map(lambda records: transform(records, job.get('number_of_rotated_keys'))). \
foreach(lambda records: tracker_log(f'map - {records}')). \
foreach(lambda records: load_pageviews(
records,
job.get('dimension'),
job.get('write_to_ts'),
job.get('timeseries_name'),
job.get('key_expire_duration_sc'),
job.get('input_stream_name'),
job.get('output_stream_name')
)). \
register(
prefix=job.get('input_stream_name'),
convertToStr=True,
collect=True,
onFailedPolicy='abort',
onFailedRetryInterval=1,
batch=job.get('batch_size'),
duration=job.get('batch_interval_ms'),
trimStream=False)
tracker_log(f'Register {job.get("name")} OK') | /rgtracker-0.0.1.1.226.tar.gz/rgtracker-0.0.1.1.226/src/jobs/megastar.py | 0.587943 | 0.167968 | megastar.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
# stdlib
import time
import subprocess
from multiprocessing import Lock
import traceback
import requests
from typing import List, Tuple, Dict, Optional
from . import logutil
# 3rd party
import koji
import koji_cli.lib
import wrapt
logger = logutil.getLogger(__name__)
# ============================================================================
# Brew/Koji service interaction functions
# ============================================================================
# Populated by watch_task. Each task_id will be a key in the dict and
# each value will be a TaskInfo: https://github.com/openshift/enterprise-images/pull/178#discussion_r173812940
watch_task_info = {}
# Protects threaded access to watch_task_info
watch_task_lock = Lock()
def get_watch_task_info_copy():
"""
:return: Returns a copy of the watch_task info dict in a thread safe way. Each key in this dict
is a task_id and each value is a koji TaskInfo with potentially useful data.
https://github.com/openshift/enterprise-images/pull/178#discussion_r173812940
"""
with watch_task_lock:
return dict(watch_task_info)
def watch_task(brew_hub, log_f, task_id, terminate_event):
end = time.time() + 4 * 60 * 60
watcher = koji_cli.lib.TaskWatcher(
task_id,
koji.ClientSession(brew_hub, opts={'serverca': '/etc/pki/brew/legacy.crt'}),
quiet=True)
error = None
except_count = 0
while error is None:
try:
watcher.update()
except_count = 0
# Keep around metrics for each task we watch
with watch_task_lock:
watch_task_info[task_id] = dict(watcher.info)
if watcher.is_done():
return None if watcher.is_success() else watcher.get_failure()
log_f("Task state: " + koji.TASK_STATES[watcher.info['state']])
except:
except_count += 1
# possible for watcher.update() to except during connection issue, try again
log_f('watcher.update() exception. Trying again in 60s.\n{}'.format(traceback.format_exc()))
if except_count >= 10:
log_f('watcher.update() excepted 10 times. Giving up.')
error = traceback.format_exc()
break
if terminate_event.wait(timeout=3 * 60):
error = 'Interrupted'
elif time.time() > end:
error = 'Timeout building image'
log_f(error + ", canceling build")
subprocess.check_call(("brew", "cancel", str(task_id)))
return error
def get_build_objects(ids_or_nvrs, session):
"""Get information of multiple Koji/Brew builds
:param ids_or_nvrs: list of build nvr strings or numbers.
:param session: instance of :class:`koji.ClientSession`
:return: a list Koji/Brew build objects
"""
logger.debug(
"Fetching build info for {} from Koji/Brew...".format(ids_or_nvrs))
# Use Koji multicall interface to boost performance. See https://pagure.io/koji/pull-request/957
tasks = []
with session.multicall(strict=True) as m:
for b in ids_or_nvrs:
tasks.append(m.getBuild(b))
return [task.result for task in tasks]
def get_latest_builds(tag_component_tuples: List[Tuple[str, str]], build_type: Optional[str], event: Optional[int], session: koji.ClientSession) -> List[Optional[List[Dict]]]:
""" Get latest builds for multiple Brew components as of given event
:param tag_component_tuples: List of (tag, component_name) tuples
:param build_type: if given, only retrieve specified build type (rpm, image)
:param event: Brew event ID, or None for now.
:param session: instance of Brew session
:return: a list of lists of Koji/Brew build dicts
"""
tasks = []
with session.multicall(strict=True) as m:
for tag, component_name in tag_component_tuples:
if not tag:
tasks.append(None)
continue
tasks.append(m.getLatestBuilds(tag, event=event, package=component_name, type=build_type))
return [task.result if task else None for task in tasks]
def get_tagged_builds(tags: List[str], build_type: Optional[str], event: Optional[int], session: koji.ClientSession) -> List[Optional[List[Dict]]]:
""" Get tagged builds for multiple Brew tags
:param tag_component_tuples: List of (tag, component_name) tuples
:param build_type: if given, only retrieve specified build type (rpm, image)
:param event: Brew event ID, or None for now.
:param session: instance of Brew session
:return: a list of lists of Koji/Brew build dicts
"""
tasks = []
with session.multicall(strict=True) as m:
for tag in tags:
tasks.append(m.listTagged(tag, event=event, type=build_type))
return [task.result if task else None for task in tasks]
def list_archives_by_builds(build_ids: List[int], build_type: str, session: koji.ClientSession) -> List[Optional[List[Dict]]]:
""" Retrieve information about archives by builds
:param build_ids: List of build IDs
:param build_type: build type, such as "image"
:param session: instance of Brew session
:return: a list of Koji/Brew archive lists
"""
tasks = []
with session.multicall(strict=True) as m:
for build_id in build_ids:
if not build_id:
tasks.append(None)
continue
tasks.append(m.listArchives(buildID=build_id, type=build_type))
return [task.result if task else None for task in tasks]
def get_builds_tags(build_nvrs, session=None):
"""Get tags of multiple Koji/Brew builds
:param builds_nvrs: list of build nvr strings or numbers.
:param session: instance of :class:`koji.ClientSession`
:return: a list of Koji/Brew tag lists
"""
tasks = []
with session.multicall(strict=True) as m:
for nvr in build_nvrs:
tasks.append(m.listTags(build=nvr))
return [task.result for task in tasks]
def list_image_rpms(image_ids: List[int], session: koji.ClientSession) -> List[Optional[List[Dict]]]:
""" Retrieve RPMs in given images
:param image_ids: image IDs list
:param session: instance of Brew session
:return: a list of Koji/Brew RPM lists
"""
tasks = []
with session.multicall(strict=True) as m:
for image_id in image_ids:
if image_id is None:
tasks.append(None)
continue
tasks.append(m.listRPMs(imageID=image_id))
return [task.result if task else None for task in tasks]
# Map that records the most recent change for a tag.
# Maps tag_id to a list containing the most recent change
# event returned by koji's tagHistory API.
latest_tag_change_cache = {}
cache_lock = Lock()
def tags_changed_since_build(runtime, koji_client, build, tag_ids):
"""
:param build: A build information dict returned from koji getBuild
:param tag_ids: A list of tag ids (or tag names) which should be assessed
:return: If any of the tags have changed since the specified event, returns a
list of information about the tags. If no tags have changed since
event, returns an empty list
"""
build_nvr = build['nvr']
build_event_id = build['creation_event_id']
result = []
for tid in tag_ids:
with cache_lock:
tag_changes = latest_tag_change_cache.get(tid, None)
if tag_changes is None:
# koji returns in reverse chronological order. So we are retrieving the most recent.
tag_changes = koji_client.tagHistory(tag=tid, queryOpts={'limit': 1})
with cache_lock:
latest_tag_change_cache[tid] = tag_changes
if tag_changes:
tag_change = tag_changes[0]
tag_change_event_id = tag_change['create_event']
if tag_change_event_id > build_event_id:
result.append(tag_change)
runtime.logger.debug(f'Found that build of {build_nvr} (event={build_event_id}) occurred before tag changes: {result}')
return result
class KojiWrapper(wrapt.ObjectProxy):
"""
We've see the koji client occasionally get
Connection Reset by Peer errors.. "requests.exceptions.ConnectionError: ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer'))"
Under the theory that these operations just need to be retried,
this wrapper will automatically retry all invocations of koji APIs.
"""
def __call__(self, *args, **kwargs):
retries = 4
while retries > 0:
try:
return self.__wrapped__(*args, **kwargs)
except requests.exceptions.ConnectionError as ce:
time.sleep(5)
retries -= 1
if retries == 0:
raise ce | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/brew.py | 0.774242 | 0.207215 | brew.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
import json
from tenacity import retry, stop_after_attempt, wait_fixed
from urllib import request
RHCOS_BASE_URL = "https://releases-rhcos-art.cloud.privileged.psi.redhat.com/storage/releases"
def rhcos_release_url(version, arch="x86_64", private=False):
"""
base url for a release stream in the release browser (AWS bucket).
@param version The 4.y ocp version as a string (e.g. "4.6")
@param arch architecture we are interested in (e.g. "s390x")
@param private boolean, true for private stream, false for public (currently, no effect)
@return e.g. "https://releases-rhcos-art...com/storage/releases/rhcos-4.6-s390x"
"""
arch_suffix = "" if arch in ["x86_64", "amd64"] else f"-{arch}"
# TODO: create private rhcos builds and do something with "private" here
return f"{RHCOS_BASE_URL}/rhcos-{version}{arch_suffix}"
# this is hard to test with retries, so wrap testable method
@retry(reraise=True, stop=stop_after_attempt(10), wait=wait_fixed(3))
def latest_rhcos_build_id(version, arch="x86_64", private=False):
return _latest_rhcos_build_id(version, arch, private)
def _latest_rhcos_build_id(version, arch="x86_64", private=False):
# returns the build id string or None (or raise exception)
# (may want to return "schema-version" also if this ever gets more complex)
with request.urlopen(f"{rhcos_release_url(version, arch, private)}/builds.json") as req:
data = json.loads(req.read().decode())
if not data["builds"]:
return None
build = data["builds"][0]
# old schema just had the id as a string; newer has it in a dict
return build if isinstance(build, str) else build["id"]
# this is hard to test with retries, so wrap testable method
@retry(reraise=True, stop=stop_after_attempt(10), wait=wait_fixed(3))
def rhcos_build_meta(build_id, version, arch="x86_64", private=False):
return _rhcos_build_meta(build_id, version, arch, private)
def _rhcos_build_meta(build_id, version, arch="x86_64", private=False):
"""
rhcos build record for an id in the given stream in the release browser
@return a "meta" build record e.g.:
https://releases-rhcos-art.cloud.privileged.psi.redhat.com/storage/releases/rhcos-4.1/410.81.20200520.0/meta.json
{
"buildid": "410.81.20200520.0",
...
"oscontainer": {
"digest": "sha256:b0997c9fe4363c8a0ed3b52882b509ade711f7cdb620cc7a71767a859172f423"
"image": "quay.io/openshift-release-dev/ocp-v4.0-art-dev"
},
...
}
"""
url = f"{rhcos_release_url(version, arch, private)}/{build_id}/"
# before 4.3 the arch was not included in the path
vtuple = tuple(int(f) for f in version.split("."))
url += "meta.json" if vtuple < (4, 3) else f"{arch}/meta.json"
with request.urlopen(url) as req:
return json.loads(req.read().decode())
def latest_machine_os_content(version, arch="x86_64", private=False):
# returns the id and machine-os-content pullspec for the latest rhcos build in a stream
build_id = latest_rhcos_build_id(version, arch, private)
if build_id is None:
return (None, None)
m_os_c = rhcos_build_meta(build_id, version, arch, private)['oscontainer']
return build_id, m_os_c['image'] + "@" + m_os_c['digest'] | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/rhcos.py | 0.49707 | 0.173603 | rhcos.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from future import standard_library
standard_library.install_aliases()
import yaml
import logging
import urllib.parse
import os
import shutil
import io
from . import exectools
from .pushd import Dir
from doozerlib import constants
SCHEMES = ['ssh', 'ssh+git', "http", "https"]
class GitDataException(Exception):
"""A broad exception for errors during GitData operations"""
pass
class GitDataBranchException(GitDataException):
pass
class GitDataPathException(GitDataException):
pass
class DataObj(object):
def __init__(self, key, path, data):
self.key = key
self.path = path
self.base_dir = os.path.dirname(self.path)
self.filename = self.path.replace(self.base_dir, '').strip('/')
self.data = data
def __repr__(self):
result = {
'key': self.key,
'path': self.path,
'data': self.data
}
return str(result)
def reload(self):
with io.open(self.path, 'r', encoding="utf-8") as f:
self.data = yaml.full_load(f)
def save(self):
with io.open(self.path, 'w', encoding="utf-8") as f:
yaml.safe_dump(self.data, f, default_flow_style=False)
class GitData(object):
def __init__(self, data_path=None, clone_dir='./', branch='master',
sub_dir=None, exts=['yaml', 'yml', 'json'], logger=None):
"""
Load structured data from a git source.
:param str data_path: Git url (git/http/https) or local directory path
:param str clone_dir: Location to clone data into
:param str branch: Repo branch (tag or sha also allowed) to checkout
:param str sub_dir: Sub dir in data to treat as root
:param list exts: List of valid extensions to search for in data, with out period
:param logger: Python logging object to use
:raises GitDataException:
"""
self.logger = logger
if logger is None:
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger()
self.clone_dir = clone_dir
self.branch = branch
self.remote_path = None
self.sub_dir = sub_dir
self.exts = ['.' + e.lower() for e in exts]
self.commit_hash = None
self.origin_url = None
if data_path:
self.clone_data(data_path)
def clone_data(self, data_path):
"""
Clones data for given data_path:
:param str data_path: Git url (git/http/https) or local directory path
"""
self.data_path = data_path
data_url = urllib.parse.urlparse(self.data_path)
if data_url.scheme in SCHEMES or (data_url.scheme == '' and ':' in data_url.path):
data_name = os.path.splitext(os.path.basename(data_url.path))[0]
data_destination = os.path.join(self.clone_dir, data_name)
clone_data = True
if os.path.isdir(data_destination):
self.logger.info('Data clone directory already exists, checking commit sha')
with Dir(data_destination):
# check the current status of what's local
rc, out, err = exectools.cmd_gather("git status -sb")
if rc:
raise GitDataException('Error getting data repo status: {}'.format(err))
lines = out.strip().split('\n')
synced = ('ahead' not in lines[0] and 'behind' not in lines[0] and len(lines) == 1)
# check if there are unpushed
# verify local branch
rc, out, err = exectools.cmd_gather("git rev-parse --abbrev-ref HEAD")
if rc:
raise GitDataException('Error checking local branch name: {}'.format(err))
branch = out.strip()
if branch != self.branch:
if not synced:
msg = ('Local branch is `{}`, but requested `{}` and you have uncommitted/pushed changes\n'
'You must either clear your local data or manually checkout the correct branch.'
).format(branch, self.branch)
raise GitDataBranchException(msg)
else:
# Check if local is synced with remote
rc, out, err = exectools.cmd_gather(["git", "ls-remote", self.data_path, self.branch])
if rc:
raise GitDataException('Unable to check remote sha: {}'.format(err))
remote = out.strip().split('\t')[0]
try:
exectools.cmd_assert('git branch --contains {}'.format(remote))
self.logger.info('{} is already cloned and latest'.format(self.data_path))
clone_data = False
except:
if not synced:
msg = ('Local data is out of sync with remote and you have unpushed commits: {}\n'
'You must either clear your local data\n'
'or manually rebase from latest remote to continue'
).format(data_destination)
raise GitDataException(msg)
if clone_data:
if os.path.isdir(data_destination): # delete if already there
shutil.rmtree(data_destination)
self.logger.info('Cloning config data from {}'.format(self.data_path))
if not os.path.isdir(data_destination):
# Clone all branches as we must sometimes reference master /OWNERS for maintainer information
cmd = "git clone --no-single-branch -b {} --depth 1 {} {}".format(self.branch, self.data_path, data_destination)
rc, out, err = exectools.cmd_gather(cmd, set_env=constants.GIT_NO_PROMPTS)
if rc:
raise GitDataException('Error while cloning data: {}'.format(err))
self.remote_path = self.data_path
self.data_path = data_destination
elif data_url.scheme in ['', 'file']:
self.remote_path = None
self.data_path = os.path.abspath(self.data_path) # just in case relative path was given
else:
raise ValueError(
'Invalid data_path: {} - invalid scheme: {}'
.format(self.data_path, data_url.scheme)
)
if self.sub_dir:
self.data_dir = os.path.join(self.data_path, self.sub_dir)
else:
self.data_dir = self.data_path
self.origin_url, _ = exectools.cmd_assert(f'git -C {self.data_path} remote get-url origin', strip=True)
self.commit_hash, _ = exectools.cmd_assert(f'git -C {self.data_path} rev-parse HEAD', strip=True)
if not os.path.isdir(self.data_dir):
raise GitDataPathException('{} is not a valid sub-directory in the data'.format(self.sub_dir))
def load_data(self, path='', key=None, keys=None, exclude=None, filter_funcs=None, replace_vars={}):
full_path = os.path.join(self.data_dir, path.replace('\\', '/'))
if path and not os.path.isdir(full_path):
raise GitDataPathException('Cannot find "{}" under "{}"'.format(path, self.data_dir))
if filter_funcs is not None and not isinstance(filter_funcs, list):
filter_funcs = [filter_funcs]
if exclude is not None and not isinstance(exclude, list):
exclude = [exclude]
if key and keys:
raise GitDataException('Must use key or keys, but not both!')
if key:
keys = [key]
if keys:
if not isinstance(keys, list):
keys = [keys]
files = []
for k in keys:
for ext in self.exts:
path = k + ext
if os.path.isfile(os.path.join(full_path, k + ext)):
files.append(path)
break # found for this key, move on
else:
files = os.listdir(full_path)
result = {}
for name in files:
base_name, ext = os.path.splitext(name)
if ext.lower() in self.exts:
data_file = os.path.join(full_path, name)
if os.path.isfile(data_file):
with io.open(data_file, 'r', encoding="utf-8") as f:
raw_text = f.read()
if replace_vars:
try:
raw_text = raw_text.format(**replace_vars)
except KeyError as e:
self.logger.warning('{} contains template key `{}` but no value was provided'.format(data_file, e.args[0]))
data = yaml.full_load(raw_text)
use = True
if exclude and base_name in exclude:
use = False
if use and filter_funcs:
for func in filter_funcs:
use &= func(base_name, data)
if not use:
break
if use:
result[base_name] = DataObj(base_name, data_file, data)
if key and key in result:
result = result[key]
return result
def commit(self, msg):
"""
Commit outstanding data changes
"""
self.logger.info('Commit config: {}'.format(msg))
with Dir(self.data_path):
exectools.cmd_assert('git add .')
exectools.cmd_assert('git commit --allow-empty -m "{}"'.format(msg))
def push(self):
"""
Push changes back to data repo.
Will of course fail if user does not have write access.
"""
self.logger.info('Pushing config...')
with Dir(self.data_path):
exectools.cmd_assert('git push') | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/gitdata.py | 0.555918 | 0.219411 | gitdata.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from future.utils import as_native_str
import glob
import json
import os
import re
import shutil
import threading
import yaml
import io
from functools import wraps
from dockerfile_parse import DockerfileParser
from doozerlib import brew, exectools, logutil, pushd, util
logger = logutil.getLogger(__name__)
def log(func):
"""Logging decorator, log the call and return value of a decorated function
:param function func: Function to be decorated
:return: Return wrapper function
"""
@wraps(func)
def wrapper(*args, **kwargs):
logger.info('running: {}, with args {} {}'.format(func.__name__, args, kwargs))
return_val = func(*args, **kwargs)
logger.info('{} returned {}'.format(func.__name__, return_val))
return return_val
return wrapper
def unpack(func):
"""Unpacking decorator, unpacks a tuple into arguments for a function call
Needed because Python 2.7 doesn't have "starmap" for Pool / ThreadPool
:param function func: Function to be decorated
:return: Return wrapper function
"""
@wraps(func)
def wrapper(arg_tuple):
return func(*arg_tuple)
return wrapper
@unpack
def update_and_build(nvr, stream, runtime, image_ref_mode, merge_branch, force_build=False):
"""Module entrypoint, orchestrate update and build steps of metadata repos
:param string nvr: Operator name-version-release
:param string stream: Which metadata repo should be updated (dev, stage, prod)
:param Runtime runtime: a runtime instance
:param string image_ref_mode: Build mode for image references (by-arch or manifest-list)
:param string merge_branch: Which branch should be updated in the metadata repo
:return bool True if operations succeeded, False if something went wrong
"""
op_md = OperatorMetadataBuilder(nvr, stream, runtime=runtime, image_ref_mode=image_ref_mode)
if not op_md.update_metadata_repo(merge_branch) and not force_build:
logger.info('No changes in metadata repo, skipping build')
print(OperatorMetadataLatestBuildReporter(op_md.operator_name, runtime).get_latest_build())
return True
if not op_md.build_metadata_container():
util.red_print('Build of {} failed, see debug.log'.format(op_md.metadata_repo))
return False
print(OperatorMetadataLatestBuildReporter(op_md.operator_name, runtime).get_latest_build())
return True
class OperatorMetadataBuilder(object):
def __init__(self, nvr, stream, runtime, image_ref_mode='by-arch', **kwargs):
self.nvr = nvr
self.stream = stream
self.runtime = runtime
self.image_ref_mode = image_ref_mode
self._cached_attrs = kwargs
@log
def update_metadata_repo(self, metadata_branch):
"""Update the corresponding metadata repository of an operator
:param string metadata_branch: Which branch of the metadata repository should be updated
:return: bool True if metadata repo was updated, False if there was nothing to update
"""
exectools.cmd_assert('mkdir -p {}'.format(self.working_dir))
self.clone_repo(self.operator_name, self.operator_branch)
self.clone_repo(self.metadata_repo, metadata_branch)
self.checkout_repo(self.operator_name, self.commit_hash)
self.update_metadata_manifests_dir()
self.update_current_csv_shasums()
self.merge_streams_on_top_level_package_yaml()
self.create_metadata_dockerfile()
return self.commit_and_push_metadata_repo()
@log
def build_metadata_container(self):
"""Build the metadata container using rhpkg
:return: bool True if build succeeded, False otherwise
:raise: Exception if command failed (rc != 0)
"""
with pushd.Dir('{}/{}'.format(self.working_dir, self.metadata_repo)):
cmd = 'timeout 600 rhpkg {} {}container-build --nowait --target {}'.format(
self.runtime.rhpkg_config,
('--user {} '.format(self.rhpkg_user) if self.rhpkg_user else ''),
self.target
)
rc, stdout, stderr = exectools.cmd_gather(cmd)
if rc != 0:
raise Exception('{} failed! rc={} stdout={} stderr={}'.format(
cmd, rc, stdout.strip(), stderr.strip()
))
return self.watch_brew_task(self.extract_brew_task_id(stdout.strip())) is None
@log
def clone_repo(self, repo, branch):
"""Clone a repository using rhpkg
:param string repo: Name of the repository to be cloned
:param string branch: Which branch of the repository should be cloned
"""
cmd = 'timeout 600 rhpkg '
cmd += self.runtime.rhpkg_config
cmd += '--user {} '.format(self.rhpkg_user) if self.rhpkg_user else ''
cmd += 'clone containers/{} --branch {}'.format(repo, branch)
delete_repo = 'rm -rf {}/{}'.format(self.working_dir, repo)
with pushd.Dir(self.working_dir):
exectools.cmd_assert(cmd, retries=3, on_retry=delete_repo)
@log
def checkout_repo(self, repo, commit_hash):
"""Checkout a repository to a particular commit hash
:param string repo: The repository in which the checkout operation will be performed
:param string commit_hash: The desired point to checkout the repository
"""
with pushd.Dir('{}/{}'.format(self.working_dir, repo)):
exectools.cmd_assert('git checkout {}'.format(commit_hash))
@log
def update_metadata_manifests_dir(self):
"""Update channel-specific manifests in the metadata repository with the latest
manifests found in the operator repository
If the metadata repository is empty, bring the top-level package YAML file also
"""
self.remove_metadata_channel_dir()
self.ensure_metadata_manifests_dir_exists()
self.copy_channel_manifests_from_operator_to_metadata()
if not self.metadata_package_yaml_exists():
self.copy_operator_package_yaml_to_metadata()
if self.image_ref_mode == "by-arch":
self.copy_manifests_for_additional_arches()
@log
def copy_manifests_for_additional_arches(self):
"""Create aditional copies of current channel's manifests dir,
one extra copy per additional architecture.
"""
for arch in self.additional_arches:
self.delete_metadata_arch_manifests_dir(arch)
self.create_manifests_copy_for_arch(arch)
@log
def delete_metadata_arch_manifests_dir(self, arch):
"""Delete previous arch-specific manifests, should they exist.
"""
exectools.cmd_assert('rm -rf {}/{}/{}/{}-{}'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
self.channel,
arch
))
@log
def create_manifests_copy_for_arch(self, arch):
"""Copy current channel manifests to <current channel>-<arch>
Example: cp /path/to/manifests/4.2 /path/to/manifests/4.2-s390x
"""
exectools.cmd_assert('cp -r {}/{}/{}/{} {}/{}/{}/{}-{}'.format(
self.working_dir,
self.operator_name,
self.operator_manifests_dir,
self.channel,
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
self.channel,
arch
))
filename = glob.glob('{}/{}/{}/{}-{}/*.clusterserviceversion.yaml'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
self.channel,
arch
))[0]
self.change_arch_csv_metadata_name(filename, arch)
@log
def change_arch_csv_metadata_name(self, csv_filename, arch):
"""Each CSV has a metadata > name property, that should be unique because
it is used to map to a channel in package.yaml; So the arch is appended
to the name.
Example: name: foo.4.2.0-12345 becomes foo.4.2.0-12345-s390x
"""
with io.open(csv_filename, 'r', encoding="utf-8") as reader:
contents = reader.read()
with io.open(csv_filename, 'w', encoding="utf-8") as writer:
writer.write(
contents.replace(
' name: {}'.format(self.csv),
' name: {}-{}'.format(self.csv, arch)
)
)
@log
def update_current_csv_shasums(self):
"""Read all files listed in operator's art.yaml, search for image
references and replace their version tags by a corresponding SHA.
"""
if self.image_ref_mode == "by-arch":
self.replace_version_by_sha_on_image_references('x86_64')
for arch in self.additional_arches:
self.replace_version_by_sha_on_image_references(arch)
return
self.replace_version_by_sha_on_image_references('manifest-list')
@log
def replace_version_by_sha_on_image_references(self, arch):
"""Search for image references with a version tag inside 'contents' and
replace them by a corresponding SHA.
:param string contents: File contents potentially containing image references
:return string Same content back, with image references replaced (if any was found)
"""
for file in self.get_file_list_from_operator_art_yaml(arch):
with io.open(file, 'r', encoding="utf-8") as reader:
contents = reader.read()
new_contents = self.find_and_replace_image_versions_by_sha(contents, arch)
with io.open(file, 'w', encoding="utf-8") as writer:
writer.write(new_contents)
@log
def find_and_replace_image_versions_by_sha(self, contents, arch):
"""Read "contents" collecting all image references, query the corresponding
SHA for each found image and replace them inline.
:param contents: a string with the contents of a YAML file that might have image references
:param arch: string with an architecture or "manifests-list", used when picking SHAs
:return: contents string back, with image references replaced + "relatedImages" node under "spec"
"""
found_images = {}
def collect_replaced_image(match):
image = '{}/{}@{}'.format(
self.operator_csv_registry,
match.group(1),
self.fetch_image_sha('{}:{}'.format(match.group(1), match.group(2)), arch)
)
key = u'{}'.format(re.search(r'([^/]+)/(.+)', match.group(1)).group(2))
found_images[key] = u'{}'.format(image)
return image
new_contents = re.sub(
r'{}/([^:]+):([^\'"\s]+)'.format(self.operator_csv_registry),
collect_replaced_image,
contents,
flags=re.MULTILINE
)
new_contents = self.append_related_images_spec(new_contents, found_images)
return new_contents
@log
def append_related_images_spec(self, contents, images):
"""Create a new node inside "spec" listing all related images, without
parsing the YAML, to avoid unwanted modifications when re-serializing it.
:param contents: CSV YAML string
:param images: a dict containing images (key: name, value: image)
:return: contents string back with "relatedImages" node under "spec"
"""
related_images = []
for name, image in images.items():
related_images.append(' - name: {}\n image: {}'.format(name, image))
related_images.sort()
return re.sub(
r'^spec:\n',
'spec:\n relatedImages:\n{}\n'.format('\n'.join(related_images)),
contents,
flags=re.MULTILINE
)
@log
def merge_streams_on_top_level_package_yaml(self):
"""Update (or create) a channel entry on the top-level package YAML file,
pointing to the current CSV
"""
package_yaml = yaml.safe_load(io.open(self.metadata_package_yaml_filename, encoding="utf-8"))
channel_name = self.channel_name
channel_csv = self.csv
package_yaml = self.add_channel_entry(package_yaml, channel_name, channel_csv)
if self.image_ref_mode == "by-arch":
for arch in self.additional_arches:
channel_name = '{}-{}'.format(self.channel_name, arch)
channel_csv = '{}-{}'.format(self.csv, arch)
package_yaml = self.add_channel_entry(package_yaml, channel_name, channel_csv)
package_yaml['defaultChannel'] = str(self.get_default_channel(package_yaml))
with io.open(self.metadata_package_yaml_filename, 'w', encoding="utf-8") as f:
yaml.safe_dump(package_yaml, f)
def add_channel_entry(self, package_yaml, channel_name, channel_csv):
index = self.find_channel_index(package_yaml, channel_name)
if index is not None:
package_yaml['channels'][index]['currentCSV'] = str(channel_csv)
else:
package_yaml['channels'].append({
'name': str(channel_name),
'currentCSV': str(channel_csv)
})
return package_yaml
def find_channel_index(self, package_yaml, channel_name=''):
channel_name = channel_name if channel_name else self.channel_name
for index, channel in enumerate(package_yaml['channels']):
if str(channel['name']) == str(channel_name):
return index
return None
def get_default_channel(self, package_yaml):
"""A package YAML with multiple channels must declare a defaultChannel
It usually would be the highest version, but on 4.1 the channels have
custom names, such as "stable", "preview", etc.
:param dict package_yaml: Parsed package.yaml structure
:return: string with "highest" channel name
"""
highest_version = max([ChannelVersion(str(ch['name'])) for ch in package_yaml['channels']])
return str(highest_version)
@log
def create_metadata_dockerfile(self):
"""Create a minimal Dockerfile on the metadata repository, copying all manifests
inside the image and having nearly the same labels as its corresponding operator Dockerfile
But some modifications on the labels are needed:
- 'com.redhat.component' label should contain the metadata component name,
otherwise it conflicts with the operator.
- 'com.redhat.delivery.appregistry' should always be "true", regardless of
the value coming from the operator Dockerfile
- 'release' label should be removed, because we can't build the same NVR
multiple times
- 'version' label should contain both 'release' info and the target stream
"""
operator_dockerfile = DockerfileParser('{}/{}/Dockerfile'.format(self.working_dir, self.operator_name))
metadata_dockerfile = DockerfileParser('{}/{}/Dockerfile'.format(self.working_dir, self.metadata_repo))
metadata_dockerfile.content = 'FROM scratch\nCOPY ./manifests /manifests'
metadata_dockerfile.labels = operator_dockerfile.labels
metadata_dockerfile.labels['com.redhat.component'] = (
operator_dockerfile.labels['com.redhat.component']
.replace(self.operator_name, self.metadata_name)
)
metadata_dockerfile.labels['com.redhat.delivery.appregistry'] = 'true'
metadata_dockerfile.labels['name'] = 'openshift/ose-{}'.format(self.metadata_name)
# mangle version according to spec
metadata_dockerfile.labels['version'] = '{}.{}.{}'.format(
operator_dockerfile.labels['version'],
operator_dockerfile.labels['release'],
self.stream)
del(metadata_dockerfile.labels['release'])
@log
def commit_and_push_metadata_repo(self):
"""Commit and push changes made on the metadata repository, using rhpkg
"""
with pushd.Dir('{}/{}'.format(self.working_dir, self.metadata_repo)):
try:
exectools.cmd_assert('git add .')
user_option = '--user {} '.format(self.rhpkg_user) if self.rhpkg_user else ''
exectools.cmd_assert('rhpkg {} {}commit -m "Update operator metadata"'.format(self.runtime.rhpkg_config, user_option))
exectools.cmd_assert('timeout 600 rhpkg {}push'.format(user_option), retries=3)
return True
except Exception:
# The metadata repo might be already up to date, so we don't have anything new to commit
return False
@log
def remove_metadata_channel_dir(self):
exectools.cmd_assert('rm -rf {}/{}/{}/{}'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
self.channel
))
@log
def ensure_metadata_manifests_dir_exists(self):
exectools.cmd_assert('mkdir -p {}/{}/{}'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir
))
@log
def copy_channel_manifests_from_operator_to_metadata(self):
exectools.cmd_assert('cp -r {}/{}/{}/{} {}/{}/{}'.format(
self.working_dir,
self.operator_name,
self.operator_manifests_dir,
self.channel,
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir
))
@log
def copy_operator_package_yaml_to_metadata(self):
exectools.cmd_assert('cp {} {}/{}/{}'.format(
self.operator_package_yaml_filename,
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir
))
@log
def metadata_package_yaml_exists(self):
return len(glob.glob('{}/{}/{}/*package.yaml'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir
))) > 0
@log
def get_file_list_from_operator_art_yaml(self, arch):
file_list = [
'{}/{}/{}/{}'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
entry['file'].format(**self.runtime.group_config.vars)
)
for entry in self.operator_art_yaml.get('updates', [])
]
if arch not in ['manifest-list', 'x86_64']:
file_list = self.change_dir_names_to_arch_specific(file_list, arch)
csv_file = self.metadata_csv_yaml_filename(arch)
if csv_file not in file_list:
file_list.append(csv_file)
return file_list
@log
def change_dir_names_to_arch_specific(self, file_list, arch):
"""@TODO: document
"""
return list(filter(os.path.isfile, [
file.replace('{}/'.format(self.channel), '{}-{}/'.format(self.channel, arch))
for file in file_list
]))
@log
def fetch_image_sha(self, image, arch):
"""Use skopeo to obtain the SHA of a given image
We want the image manifest shasum because internal registry/cri-o can't handle manifest lists yet.
More info: http://post-office.corp.redhat.com/archives/aos-team-art/2019-October/msg02010.html
:param string image: Image name + version (format: openshift/my-image:v4.1.16-201901010000)
:param string arch: Same image has different SHAs per architecture
:return string Digest (format: sha256:a1b2c3d4...)
"""
registry = self.runtime.group_config.urls.brew_image_host.rstrip("/")
ns = self.runtime.group_config.urls.brew_image_namespace
if ns:
image = "{}/{}".format(ns, image.replace('/', '-'))
if arch == 'manifest-list':
cmd = 'skopeo inspect docker://{}/{}'.format(registry, image)
out, err = exectools.cmd_assert(cmd, retries=3)
return json.loads(out)['Digest']
cmd = 'skopeo inspect --raw docker://{}/{}'.format(registry, image)
out, err = exectools.cmd_assert(cmd, retries=3)
arch = 'amd64' if arch == 'x86_64' else arch # x86_64 is called amd64 in skopeo
def select_arch(manifests):
return manifests['platform']['architecture'] == arch
return list(filter(select_arch, json.loads(out)['manifests']))[0]['digest']
@log
def extract_brew_task_id(self, container_build_output):
"""Extract the Task ID from the output of a `rhpkg container-build` command
:param string container_build_output: stdout from `rhpkg container-build`
:return: string of captured task ID
:raise: AttributeError if task ID can't be found in provided output
"""
return re.search(r'Created task:\ (\d+)', container_build_output).group(1)
@log
def watch_brew_task(self, task_id):
"""Keep watching progress of brew task
:param string task_id: The Task ID to be watched
:return: string with an error if an error happens, None otherwise
"""
return brew.watch_task(
self.runtime.group_config.urls.brewhub, logger.info, task_id, threading.Event()
)
@property
def working_dir(self):
return self._cache_attr('working_dir')
@property
def rhpkg_user(self):
return self._cache_attr('rhpkg_user')
@property
def operator_branch(self):
return self._cache_attr('operator_branch')
@property
def target(self):
return '{}-candidate'.format(self.operator_branch)
@property
def operator_name(self):
return self._cache_attr('operator_name')
@property
def commit_hash(self):
return self._cache_attr('commit_hash')
@property
def operator(self):
return self._cache_attr('operator')
@property
def metadata_name(self):
return '{}-metadata'.format(self.operator_name)
@property
def metadata_repo(self):
return self.operator_name.replace(
'-operator', '-{}-operator-metadata'.format(self.stream)
)
@property
def channel(self):
return re.search(r'^v?(\d+\.\d+)\.*', self.nvr.split('-')[-2]).group(1)
@property
def brew_buildinfo(self):
return self._cache_attr('brew_buildinfo')
@property
def operator_manifests_dir(self):
return self.operator.config['update-csv']['manifests-dir'].rstrip('/')
@property
def metadata_manifests_dir(self):
return 'manifests'
@property
def operator_package_yaml_filename(self):
return glob.glob('{}/{}/{}/*package.yaml'.format(
self.working_dir,
self.operator_name,
self.operator_manifests_dir
))[0]
@property
def metadata_package_yaml_filename(self):
return glob.glob('{}/{}/{}/*package.yaml'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir
))[0]
@log
def metadata_csv_yaml_filename(self, arch='x86_64'):
arch_dir = self.channel if arch in ['manifest-list', 'x86_64'] else '{}-{}'.format(self.channel, arch)
return glob.glob('{}/{}/{}/{}/*.clusterserviceversion.yaml'.format(
self.working_dir,
self.metadata_repo,
self.metadata_manifests_dir,
arch_dir
))[0]
@property
def operator_art_yaml(self):
try:
return yaml.safe_load(io.open('{}/{}/{}/art.yaml'.format(
self.working_dir,
self.operator_name,
self.operator_manifests_dir
), encoding="utf-8"))
except IOError:
return {}
@property
def operator_csv_registry(self):
return self.operator.config['update-csv']['registry']
@property
def csv(self):
return self._cache_attr('csv')
@property
def channel_name(self):
"""Use a custom name for a channel on package YAML if specified,
fallback to default channel (4.1, 4.2, etc) otherwise
This is valid only for 4.1, custom names should be ignored on 4.2
"""
if str(self.channel) == '4.1' and 'channel' in self.operator.config['update-csv']:
return self.operator.config['update-csv']['channel']
return self.channel
@property
def additional_arches(self):
arches = self.operator.get_arches()
if 'x86_64' in arches:
arches.remove('x86_64')
return arches
def get_working_dir(self):
return '{}/{}/{}'.format(self.runtime.working_dir, 'distgits', 'containers')
def get_rhpkg_user(self):
return self.runtime.user if hasattr(self.runtime, 'user') else ''
def get_operator_branch(self):
return self.runtime.group_config.branch
def get_operator_name(self):
_rc, stdout, _stderr = self.brew_buildinfo
return re.search('Source:([^#]+)', stdout).group(1).split('/')[-1]
def get_commit_hash(self):
_rc, stdout, _stderr = self.brew_buildinfo
return re.search('Source:[^#]+#(.+)', stdout).group(1)
def get_operator(self):
return self.runtime.image_map[self.operator_name]
@log
def get_brew_buildinfo(self):
"""Output of this command is used to extract the operator name and its commit hash
"""
cmd = 'brew buildinfo {}'.format(self.nvr)
stdout, stderr = exectools.cmd_assert(cmd, retries=3)
return 0, stdout, stderr # In this used to be cmd_gather, so return rc=0.
def get_csv(self):
return yaml.safe_load(io.open(self.metadata_csv_yaml_filename(), encoding="utf-8"))['metadata']['name']
def _cache_attr(self, attr):
"""Some attribute values are time-consuming to retrieve, as they might
come from running an external command, etc. So, after obtaining the value
it gets saved in "_cached_attrs" for future uses
Also makes automated testing easier, as values can be simply injected
at "_cached_attrs", without the need of mocking the sources from which
the values come
"""
if attr not in self._cached_attrs:
self._cached_attrs[attr] = getattr(self, 'get_{}'.format(attr))()
return self._cached_attrs[attr]
class OperatorMetadataLatestBuildReporter(object):
@log
def __init__(self, operator_name, runtime):
self.operator_name = operator_name
self.runtime = runtime
@log
def get_latest_build(self):
cmd = 'brew latest-build {} {} --quiet'.format(self.target, self.metadata_component_name)
stdout, stderr = exectools.cmd_assert(cmd, retries=3)
return stdout.split(' ')[0]
@property
def target(self):
return '{}-candidate'.format(self.operator_branch)
@property
def operator_branch(self):
return self.runtime.group_config.branch
@property
def metadata_component_name(self):
return self.operator_component_name.replace('-container', '-metadata-container')
@property
def operator_component_name(self):
if 'distgit' in self.operator.config and 'component' in self.operator.config['distgit']:
return self.operator.config['distgit']['component']
return '{}-container'.format(self.operator_name)
@property
def operator(self):
return self.runtime.image_map[self.operator_name]
class OperatorMetadataLatestNvrReporter(object):
"""Query latest operator metadata based on nvr and stream"""
@log
def __init__(self, operator_nvr, stream, runtime):
self.operator_nvr = operator_nvr
self.stream = stream
self.runtime = runtime
self.operator_component, self.operator_version, self.operator_release = self.unpack_nvr(operator_nvr)
self.metadata_component = self.operator_component.replace('operator-container', 'operator-metadata-container')
self.metadata_version = '{}.{}.{}'.format(self.operator_version, self.operator_release, self.stream)
@log
def get_latest_build(self):
candidate_release = -1
candidate = None
for brew_build in self.get_all_builds():
component, version, release = self.unpack_nvr(brew_build)
release = int(re.search(r'\d+', release).group())
if component == self.metadata_component and version == self.metadata_version and release > candidate_release:
candidate_release = release
candidate = brew_build
return candidate
@log
def get_all_builds(self):
"""Ask brew for all releases of a package"""
cmd = 'brew list-tagged --quiet {} {}'.format(self.brew_tag, self.metadata_component)
_rc, stdout, _stderr = exectools.cmd_gather(cmd)
for line in stdout.splitlines():
yield line.split(' ')[0]
def unpack_nvr(self, nvr):
return tuple(nvr.rsplit('-', 2))
@property
def brew_tag(self):
return self.runtime.get_default_candidate_brew_tag() or '{}-candidate'.format(self.operator_branch)
@property
def operator_branch(self):
return self.runtime.group_config.branch
class ChannelVersion(object):
"""Quick & dirty custom version comparison implementation, since buildvm
has drastically different versions of pkg_resources and setuptools.
"""
def __init__(self, raw):
self.raw = raw
self.parse_version()
def parse_version(self):
parsed_version = re.match(r'^(?P<major>\d+)\.(?P<minor>\d+).*$', self.raw)
self.major = int(parsed_version.group('major')) if parsed_version else 0
self.minor = int(parsed_version.group('minor')) if parsed_version else 0
@as_native_str()
def __str__(self):
return self.raw
def __lt__(self, other):
if self.major < other.major:
return True
if self.major == other.major and self.minor < other.minor:
return True
return False
def __gt__(self, other):
if self.major > other.major:
return True
if self.major == other.major and self.minor > other.minor:
return True
return False
def __eq__(self, other):
return self.major == other.major and self.minor == other.minor
def __ne__(self, other):
return not self.__eq__(other) | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/operator_metadata.py | 0.703753 | 0.169166 | operator_metadata.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
import os
import io
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse
import requests
from doozerlib.logutil import getLogger
from doozerlib.util import mkdirs, is_in_directory
from doozerlib.model import Missing
from doozerlib.exceptions import DoozerFatalError
from doozerlib.exectools import cmd_assert
from doozerlib.pushd import Dir
LOGGER = getLogger(__name__)
class SourceModifierFactory(object):
"""A factory class for creating source modifier objects."""
MODIFICATIONS = {}
@classmethod
def supports(cls, action_name):
"""Test if specified modification action is supported"""
return action_name in cls.MODIFICATIONS
def create(self, *args, **kwargs):
"""Create a source modifier based on action.
For example, create a source modifier for adding an out-of-tree file:
factory = SourceModifierFactory()
modifier = factory.create(action='add', source='http://example.com/gating_yaml', dest='gating.yaml', overwrite=True)
modifier.modify()
"""
action = kwargs["action"]
if not self.supports(action):
raise KeyError("Unknown modification action: {}.".format(action))
return self.MODIFICATIONS[action](*args, **kwargs)
class AddModifier(object):
""" A source modifier that supports adding an out-of-tree source to dist-git.
An `add` action has the following valid fields:
- `action`: must be `add`
- `source`: URL to the out-of-tree source
- `path`: Path in dist-git to write the source to
- `overwriting`: Allow to overwrite if `path` exists
For example, to add an out-of-tree source https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml to dist-git and save as `gating.yaml`:
content:
source:
git:
branch:
fallback: master
target: release-{MAJOR}.{MINOR}
url: git@github.com:openshift/kubernetes-autoscaler.git
modifications:
- action: replace
match: origin-cluster-autoscaler
replacement: atomic-openshift-cluster-autoscaler
- action: add
source: https://gitlab.cee.redhat.com/aosqe/ocp-build-data-gating/raw/master/openshift-3.11/atomic-openshift-cluster-autoscaler/gating_yaml
path: gating.yaml
overwriting: true
path: images/cluster-autoscaler
# omitted
"""
SUPPORTED_URL_SCHEMES = ["http", "https"]
def __init__(self, *args, **kwargs):
""" Initialize an "add" Modifier.
:param source: URL to the out-of-tree source.
:param path: Destination path to the dist-git repo.
:param overwriting: True to allow to overwrite if path exists.
Setting to false to prevent from accidently overwriting files from in-tree source.
"""
self.source = kwargs["source"]
self.path = kwargs["path"]
self.overwriting = kwargs.get("overwriting", False)
def act(self, *args, **kwargs):
""" Run the modification action
:param ceiling_dir: If not None, prevent from writing to a directory that is out of ceiling_dir.
:param session: If not None, a requests.Session object for HTTP requests
"""
LOGGER.debug("Running 'add' modification action...")
context = kwargs["context"]
distgit_path = context['distgit_path']
source = urlparse(self.source)
if source.scheme not in self.SUPPORTED_URL_SCHEMES:
raise ValueError(
"Unsupported URL scheme {} used in 'add' action.".format(source.scheme))
source_url = source.geturl() # normalized URL
path = str(distgit_path.joinpath(self.path))
ceiling_dir = kwargs.get("ceiling_dir")
session = kwargs.get("session") or requests.session()
if ceiling_dir and not is_in_directory(path, ceiling_dir):
raise ValueError("Writing to a file out of {} is not allowed.".format(ceiling_dir))
# NOTE: `overwriting` is checked before writing.
# Data race might happen but it should suffice for prevent from accidently overwriting in-tree sources.
if not self.overwriting and os.path.exists(path):
raise IOError(
"Destination path {} exists. Use 'overwriting: true' to overwrite.".format(self.path))
LOGGER.debug("Getting out-of-tree source {}...".format(source_url))
response = session.get(source_url)
response.raise_for_status()
mkdirs(os.path.dirname(path))
with io.open(path, "wb") as dest_file:
dest_file.write(response.content)
LOGGER.debug("Out-of-tree source saved: {} -> {}".format(source_url, path))
SourceModifierFactory.MODIFICATIONS["add"] = AddModifier
class ReplaceModifier(object):
""" A source modifier that supports replacing a substring in Dockerfile or RPM spec file.
"""
def __init__(self, *args, **kwargs):
""" Initialize ReplaceModifier
:param match: This is old substring to be replaced.
:param replacement: This is new substring, which would replace old substring.
"""
self.match = kwargs["match"]
self.replacement = kwargs["replacement"]
def act(self, *args, **kwargs):
""" Run the modification action
:param context: A context dict. `context.component_name` is the dist-git repo name,
and `context.content` is the content of Dockerfile or RPM spec file.
"""
context = kwargs["context"]
content = context["content"]
component_name = context["component_name"]
match = self.match
assert (match is not Missing)
replacement = self.replacement
assert (replacement is not Missing)
if replacement is None: # Nothing follows colon in config yaml; user attempting to remove string
replacement = ""
pre = content
post = pre.replace(match, replacement)
if post == pre:
raise DoozerFatalError("{}: Replace ({}->{}) modification did not make a change to the Dockerfile content"
.format(component_name, match, replacement))
LOGGER.debug(
"Performed string replace '%s' -> '%s':\n%s\n" %
(match, replacement, post))
context["content"] = post
SourceModifierFactory.MODIFICATIONS["replace"] = ReplaceModifier
class CommandModifier(object):
""" A source modifier that supports running a custom command to modify the source.
"""
def __init__(self, *args, **kwargs):
""" Initialize CommandModifier
:param command: a `str` or `list` of the command with arguments
"""
self.command = kwargs["command"]
def act(self, *args, **kwargs):
""" Run the command
:param context: A context dict. `context.set_env` is a `dict` of env vars to set for command (overriding existing).
"""
context = kwargs["context"]
set_env = context["set_env"]
with Dir(context['distgit_path']):
cmd_assert(self.command, set_env=set_env)
SourceModifierFactory.MODIFICATIONS["command"] = CommandModifier | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/source_modifications.py | 0.717012 | 0.197251 | source_modifications.py | pypi |
import glob
import io
import json
import os
import re
import threading
import yaml
from dockerfile_parse import DockerfileParser
from doozerlib import brew, exectools, logutil, pushd
class OLMBundle(object):
"""This class is responsible for generating bundle containers out of previously built operators
Every OLM Operator image should have a corresponding bundle container, which is mostly empty,
carrying only the operator's manifests and some special Dockerfile labels, that allows the
bundle container to publish those manifests on operator's behalf
Even though each bundle container has its dedicated distgit repo, they are not meant to be
independently built, due to their tight coupling to corresponding operators
"""
def __init__(self, runtime):
self.runtime = runtime
def rebase(self, operator_nvr):
"""Update bundle distgit contents with manifests from given operator NVR
Perform image SHA replacement on manifests before commit & push
Annotations and Dockerfile labels are re-generated with info from operator's package YAML
:param string operator_nvr: Operator NVR (format: my-operator-v4.2.30-202004200449)
:return bool True if rebase succeeds, False if there was nothing new to commit
"""
self.operator_nvr = operator_nvr
self.get_operator_buildinfo()
self.clone_operator()
self.checkout_operator_to_build_commit()
self.clone_bundle()
self.clean_bundle_contents()
self.get_operator_package_yaml_info()
self.copy_operator_manifests_to_bundle()
self.replace_image_references_by_sha_on_bundle_manifests()
self.generate_bundle_annotations()
self.generate_bundle_dockerfile()
self.create_container_yaml()
return self.commit_and_push_bundle(commit_msg="Update bundle manifests")
def build(self, operator_name=None):
"""Trigger a brew build of operator's bundle
:param string operator_name: Operator name (as in ocp-build-data file name, not brew component)
:return bool True if build succeeds, False otherwise
"""
if operator_name:
self.operator_repo_name = 'containers/{}'.format(operator_name)
self.clone_bundle()
if not self.trigger_bundle_container_build():
return False
return self.watch_bundle_container_build()
def get_latest_bundle_build(self):
"""Get NVR of latest bundle build tagged on given target
:return string: NVR of latest bundle build, or "" if there is none.
"""
_rc, out, _err = exectools.cmd_gather(
'brew latest-build --quiet {} {}'.format(self.target, self.bundle_brew_component)
)
return out.split(' ')[0]
def get_operator_buildinfo(self):
"""Get operator distgit repository name and commit hash used to build given operator NVR
"""
operator_buildinfo = brew.get_build_objects([self.operator_nvr], self.brew_session)[0]
match = re.search(r'([^#]+)#(\w+)', operator_buildinfo['source'])
self.operator_repo_name = '/'.join(match.group(1).split('/')[-2:])
self.operator_build_commit = match.group(2)
def clone_operator(self):
"""Clone operator distgit repository to doozer working dir
"""
exectools.cmd_assert('rm -rf {}'.format(self.operator_clone_path))
exectools.cmd_assert('mkdir -p {}'.format(os.path.dirname(self.operator_clone_path)))
exectools.cmd_assert('rhpkg{}clone --branch {} {} {}'.format(
self.rhpkg_opts, self.branch, self.operator_repo_name, self.operator_clone_path
), retries=3)
def checkout_operator_to_build_commit(self):
"""Checkout clone of operator repository to specific commit used to build given operator NVR
"""
with pushd.Dir(self.operator_clone_path):
exectools.cmd_assert('git checkout {}'.format(self.operator_build_commit))
def clone_bundle(self):
"""Clone corresponding bundle distgit repository of given operator NVR
"""
exectools.cmd_assert('rm -rf {}'.format(self.bundle_clone_path))
exectools.cmd_assert('mkdir -p {}'.format(os.path.dirname(self.bundle_clone_path)))
exectools.cmd_assert('rhpkg{}clone --branch {} {} {}'.format(
self.rhpkg_opts, self.branch, self.bundle_repo_name, self.bundle_clone_path
), retries=3)
def clean_bundle_contents(self):
"""Delete all files currently present in the bundle repository
Generating bundle files is an idempotent operation, so it is much easier to clean up
everything and re-create them instead of parsing and figuring out what changed
At the end, only relevant diff, if any, will be committed.
"""
exectools.cmd_assert('git -C {} rm -rf *'.format(self.bundle_clone_path))
def get_operator_package_yaml_info(self):
"""Get operator package name and channel from its package YAML
This info will be used to generate bundle's Dockerfile labels and metadata/annotations.yaml
"""
file_path = glob.glob('{}/*package.yaml'.format(self.operator_manifests_dir))[0]
package_yaml = yaml.safe_load(io.open(file_path, encoding='utf-8'))
self.package = package_yaml['packageName']
self.channel = str(package_yaml['channels'][0]['name'])
def copy_operator_manifests_to_bundle(self):
"""Copy all manifests from the operator distgit repository over to its corresponding bundle
repository (except image-references file)
We can be sure that the manifests contents are exactly what we expect, because our copy of
operator repository is checked out to the specific commit used to build given operator NVR
"""
exectools.cmd_assert('mkdir -p {}'.format(self.bundle_manifests_dir))
exectools.cmd_assert('cp -r {} {}/'.format(
' '.join(self.list_of_manifest_files_to_be_copied),
self.bundle_manifests_dir
))
exectools.cmd_assert('rm -f {}/image-references'.format(self.bundle_manifests_dir))
def replace_image_references_by_sha_on_bundle_manifests(self):
"""Iterate through all bundle manifests files, replacing any image reference tag by its
corresponding SHA
That is used to allow disconnected installs, where a cluster can't reach external registries
in order to translate image tags into something "pullable"
"""
for file in glob.glob('{}/*'.format(self.bundle_manifests_dir)):
with io.open(file, 'r+', encoding='utf-8') as f:
contents = self.find_and_replace_image_references_by_sha(f.read())
f.seek(0)
f.truncate()
f.write(contents)
def generate_bundle_annotations(self):
"""Create an annotations YAML file for the bundle, using info extracted from operator's
package YAML
"""
annotations_file = '{}/metadata/annotations.yaml'.format(self.bundle_clone_path)
exectools.cmd_assert('mkdir -p {}'.format(os.path.dirname(annotations_file)))
with io.open(annotations_file, 'w', encoding='utf-8') as writer:
writer.write(yaml.dump({'annotations': self.operator_framework_tags}))
def generate_bundle_dockerfile(self):
"""Create a Dockerfile with instructions to build the bundle container and a set of LABELs
that allow the bundle to publish its manifests on operator's behalf.
"""
operator_df = DockerfileParser('{}/Dockerfile'.format(self.operator_clone_path))
bundle_df = DockerfileParser('{}/Dockerfile'.format(self.bundle_clone_path))
bundle_df.content = 'FROM scratch\nCOPY ./manifests /manifests\nCOPY ./metadata /metadata'
bundle_df.labels = operator_df.labels
bundle_df.labels['com.redhat.component'] = self.bundle_brew_component
bundle_df.labels['com.redhat.delivery.appregistry'] = False
bundle_df.labels['name'] = 'openshift/ose-{}'.format(self.bundle_name)
bundle_df.labels['version'] = '{}.{}'.format(
operator_df.labels['version'],
operator_df.labels['release']
)
bundle_df.labels = {
**bundle_df.labels,
**self.redhat_delivery_tags,
**self.operator_framework_tags
}
del(bundle_df.labels['release'])
def create_container_yaml(self):
"""Use container.yaml to disable unnecessary multiarch
"""
filename = '{}/container.yaml'.format(self.bundle_clone_path)
with io.open(filename, 'w', encoding='utf-8') as writer:
writer.write('# metadata containers are not functional and do not need to be multiarch')
writer.write('\n\n')
writer.write(yaml.dump({
'platforms': {'only': ['x86_64']},
'operator_manifests': {'manifests_dir': 'manifests'},
}))
def commit_and_push_bundle(self, commit_msg):
"""Try to commit and push bundle distgit repository if there were any content changes.
:param string commit_msg: Commit message
:return bool True if new changes were committed and pushed, False otherwise
"""
with pushd.Dir(self.bundle_clone_path):
try:
exectools.cmd_assert('git add .')
exectools.cmd_assert('rhpkg{}commit -m "{}"'.format(self.rhpkg_opts, commit_msg))
rc, out, err = exectools.cmd_gather('rhpkg{}push'.format(self.rhpkg_opts))
return True
except Exception:
return False # Bundle repository might be already up-to-date, nothing new to commit
def trigger_bundle_container_build(self):
"""Ask brew for a container-build of operator's bundle
:return bool True if brew task was successfully created, False otherwise
"""
with pushd.Dir(self.bundle_clone_path):
rc, out, err = exectools.cmd_gather(
'rhpkg{}container-build --nowait --target {}'.format(self.rhpkg_opts, self.target)
)
if rc != 0:
msg = 'Unable to create brew task: rc={} out={} err={}'.format(rc, out, err)
self.runtime.logger.info(msg)
return False
self.task_url = re.search(r'Task info:\s(.+)', out).group(1)
self.task_id = re.search(r'Created task:\s(\d+)', out).group(1)
return True
def watch_bundle_container_build(self):
"""Log brew task URL and eventual task states until task completion (or failure)
:return bool True if brew task was successfully completed, False otherwise
"""
self.runtime.logger.info('Build running: {}'.format(self.task_url))
error = brew.watch_task(
self.runtime.group_config.urls.brewhub,
self.runtime.logger.info,
self.task_id,
threading.Event()
)
if error:
self.runtime.logger.info(error)
return False
return True
def find_and_replace_image_references_by_sha(self, contents):
"""Search image references (<registry>/<image>:<tag>) on given contents (usually YAML),
replace them with corresponding (<registry>/<image>@<sha>) and collect such replacements to
list them as "relatedImages" under "spec" section of contents (should it exist)
:param string contents: File contents that potentially contains image references
:return string: Same contents, with aforementioned modifications applied
"""
found_images = {}
def collect_replaced_image(match):
image = '{}/{}@{}'.format(
'registry.redhat.io', # hardcoded until appregistry is dead
match.group(1).replace('openshift/', 'openshift4/'),
self.fetch_image_sha('{}:{}'.format(match.group(1), match.group(2)))
)
key = u'{}'.format(re.search(r'([^\/]+)\/(.+)', match.group(1)).group(2))
found_images[key] = u'{}'.format(image)
return image
new_contents = re.sub(
r'{}\/([^:]+):([^\'"\s]+)'.format(self.operator_csv_config['registry']),
collect_replaced_image,
contents,
flags=re.MULTILINE
)
return self.append_related_images_spec(new_contents, found_images)
def fetch_image_sha(self, image):
"""Get corresponding SHA of given image (using `oc image info`)
OCP 4.3+ supports "manifest-lists", which is a SHA that doesn't represent an actual image,
but a list of images per architecture instead. OCP 4.3+ is smart enough to read that list
and pick the correct architecture.
Unfortunately, OCP 4.2 is multi-arch and does not support manifest-lists. It is still unclear
how we want to handle this case on this new bundle workflow. Previously it was easy to simply
generate manifests for all arches in a single run, since all manifests were living together
under the same branch.
Possible solutions:
* Build multiple sets of operator bundles, one per architecture
Caveats: More manual work, and maybe more advisories, since I'm not sure if Errata Tool will
let us attach multiple builds of the same brew component to a single advisory.
* Have a multi-arch build of the bundle container
Caveats: Not sure if appregistry/IIB will know what to do with that, publishing each arch in
a different channel
For now, simply assuming x86_64 (aka amd64 in golang land)
:param string image: Image reference (format: <registry>/<image>:<tag>)
:return string: SHA of corresponding <tag> (format: sha256:a1b2c3d4...)
"""
registry = self.runtime.group_config.urls.brew_image_host.rstrip('/')
ns = self.runtime.group_config.urls.brew_image_namespace
image = '{}/{}'.format(ns, image.replace('/', '-')) if ns else image
pull_spec = '{}/{}'.format(registry, image)
cmd = 'oc image info --filter-by-os=linux/amd64 -o json {}'.format(pull_spec)
try:
out, err = exectools.cmd_assert(cmd, retries=3)
except:
self.runtime.logger.error(f'Unable to find image from CSV: {pull_spec}. Image may have failed to build after CSV rebase.')
raise
if self.runtime.group_config.operator_image_ref_mode == 'manifest-list':
return json.loads(out)['listDigest']
# @TODO: decide how to handle 4.2 multi-arch. hardcoding amd64 for now
return json.loads(out)['contentDigest']
def append_related_images_spec(self, contents, images):
"""Create a new section under contents' "spec" called "relatedImages", listing all given
images in the following format:
spec:
relatedImages:
- name: image-a
image: registry/image-a@sha256:....
- name: image-b
image: registry/image-b@sha256:....
If list of images is empty or "spec" section is not found, return contents as-is
:param string contents: File contents that potentially contains a "spec" section
:param list images: List of image info dictionaries (format: [{"name": "...", "image": "..."}])
:return string: Given contents with aforementioned modifications applied
"""
if not images:
return contents
related_images = [
' - name: {}\n image: {}'.format(name, image)
for name, image in images.items()
]
related_images.sort()
return re.sub(
r'^spec:\n',
'spec:\n relatedImages:\n{}\n'.format('\n'.join(related_images)),
contents,
flags=re.MULTILINE
)
@property
def brew_session(self):
if not hasattr(self, '_brew_session'):
self._brew_session = brew.koji.ClientSession(
self.runtime.group_config.urls.brewhub,
opts={'serverca': '/etc/pki/brew/legacy.crt'}
)
return self._brew_session
@property
def operator_name(self):
return self.operator_repo_name.split('/')[-1]
@property
def operator_csv_config(self):
return self.runtime.image_map[self.operator_name].config['update-csv']
@property
def operator_clone_path(self):
return '{}/distgits/{}'.format(self.runtime.working_dir, self.operator_repo_name)
@property
def operator_manifests_dir(self):
return '{}/{}'.format(
self.operator_clone_path,
self.operator_csv_config['manifests-dir'].rstrip('/')
)
@property
def operator_bundle_dir(self):
return '{}/{}'.format(
self.operator_manifests_dir,
self.operator_csv_config['bundle-dir'].rstrip('/')
)
@property
def operator_brew_component(self):
config = self.runtime.image_map[self.operator_name].config
if 'distgit' in config and 'component' in config['distgit']:
return config['distgit']['component']
return '{}-container'.format(self.operator_name)
@property
def bundle_name(self):
return '{}-bundle'.format(self.operator_name)
@property
def bundle_repo_name(self):
return '{}-bundle'.format(self.operator_repo_name)
@property
def bundle_clone_path(self):
return '{}-bundle'.format(self.operator_clone_path)
@property
def bundle_manifests_dir(self):
return '{}/manifests'.format(self.bundle_clone_path)
@property
def bundle_brew_component(self):
return self.operator_brew_component.replace('-container', '-metadata-container')
@property
def branch(self):
return self.runtime.group_config.branch.format(**self.runtime.group_config.vars)
@property
def rhpkg_opts(self):
opts = self.runtime.rhpkg_config
if hasattr(self.runtime, 'user') and self.runtime.user is not None:
opts += ' --user {} '.format(self.runtime.user)
return opts
@property
def list_of_manifest_files_to_be_copied(self):
files = glob.glob('{}/*'.format(self.operator_bundle_dir))
if not files:
# 4.1 channel in package YAML is "preview" or "stable", but directory name is "4.1"
files = glob.glob('{}/{}/*'.format(
self.operator_manifests_dir,
'{MAJOR}.{MINOR}'.format(**self.runtime.group_config.vars)
))
return files
@property
def redhat_delivery_tags(self):
versions = 'v{MAJOR}.{MINOR}'.format(**self.runtime.group_config.vars)
return {
# 'com.redhat.delivery.backport': 'true',
'com.redhat.delivery.operator.bundle': 'true',
'com.redhat.openshift.versions': versions,
}
@property
def operator_framework_tags(self):
return {
'operators.operatorframework.io.bundle.channel.default.v1': self.channel,
'operators.operatorframework.io.bundle.channels.v1': self.channel,
'operators.operatorframework.io.bundle.manifests.v1': 'manifests/',
'operators.operatorframework.io.bundle.mediatype.v1': 'registry+v1',
'operators.operatorframework.io.bundle.metadata.v1': 'metadata/',
'operators.operatorframework.io.bundle.package.v1': self.package,
}
@property
def target(self):
return self.runtime.get_default_candidate_brew_tag() or '{}-candidate'.format(self.branch) | /rh_doozer-1.2.27-py3-none-any.whl/doozerlib/olm/bundle.py | 0.595022 | 0.27075 | bundle.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
import click
import datetime
from multiprocessing import cpu_count
from multiprocessing.dummy import Pool as ThreadPool
import re
from errata_tool import Erratum
from kerberos import GSSError
# -----------------------------------------------------------------------------
# Constants and defaults
# -----------------------------------------------------------------------------
default_release_date = datetime.datetime(1970, 1, 1, 0, 0)
now = datetime.datetime.now()
YMD = '%Y-%b-%d'
def red_prefix(msg, file=None):
"""Print out a message prefix in bold red letters, like for "Error: "
messages"""
click.secho(msg, nl=False, bold=True, fg='red', file=file)
def green_prefix(msg, file=None):
"""Print out a message prefix in bold green letters, like for "Success: "
messages"""
click.secho(msg, nl=False, bold=True, fg='green', file=file)
def yellow_prefix(msg, file=None):
"""Print out a message prefix in bold yellow letters, like for "Warning: "
or Notice: messages"""
click.secho(msg, nl=False, bold=True, fg='yellow', file=file)
def red_print(msg, file=None):
"""Print out a message in red text"
messages"""
click.secho(msg, nl=True, bold=False, fg='red', file=file)
def green_print(msg, file=None):
"""Print out a message in green text"""
click.secho(msg, nl=True, bold=False, fg='green', file=file)
def yellow_print(msg, file=None):
"""Print out a message in yellow text"""
click.secho(msg, nl=True, bold=False, fg='yellow', file=file)
def cprint(msg):
"""Wrapper for click.echo"""
click.echo(msg)
def exit_unauthenticated():
"""Standard response when an API call returns 'unauthenticated' (401)"""
red_prefix("Error Unauthenticated: ")
click.echo("401 - user is not authenticated, are you sure you have a kerberos ticket?")
exit(1)
def exit_unauthorized():
"""Standard response when an API call returns 'unauthorized' (403)"""
red_prefix("Error Unauthorized: ")
click.echo("403 - user is authenticated, but unauthorized to perform this action")
exit(1)
def ensure_erratatool_auth():
"""Test (cheaply) that we at least have authentication to erratatool"""
try:
Erratum(errata_id=1)
except GSSError:
exit_unauthenticated()
def validate_release_date(ctx, param, value):
"""Ensures dates are provided in the correct format"""
try:
release_date = datetime.datetime.strptime(value, YMD)
if release_date == default_release_date:
# Default date, nothing special to note
pass
else:
# User provided date passed validation, they deserve a
# hearty thumbs-up!
green_prefix("User provided release date: ")
click.echo("{} - Validated".format(release_date.strftime(YMD)))
return value
except ValueError:
raise click.BadParameter('Release date (--date) must be in YYYY-Mon-DD format')
def validate_email_address(ctx, param, value):
"""Ensure that email addresses provided are valid email strings"""
# Really just check to match /^[^@]+@[^@]+\.[^@]+$/
email_re = re.compile(r'^[^@ ]+@[^@ ]+\.[^@ ]+$')
if not email_re.match(value):
raise click.BadParameter(
"Invalid email address for {}: {}".format(param, value))
return value
def release_from_branch(ver):
"""Parse the release version from the provided 'branch'.
For example, if --group=openshift-3.9 then runtime.group_config.branch
will have the value rhaos-3.9-rhel-7. When passed to this function the
return value would be the number 3.9, where in considering '3.9' then
'3.9' is the RELEASE version.
This behavior is HIGHLY dependent on the format of the input
argument. Hence, why this function indicates the results are based on
the 'branch' variable. Arbitrary input will fail. Use of this implies
you read the docs.
"""
return ver.split('-')[1]
def major_from_branch(ver):
"""Parse the major version from the provided version (or 'branch').
For example, if --group=openshift-3.9 then runtime.group_config.branch
will have the value rhaos-3.9-rhel-7. When passed to this function the
return value would be the number 3, where in considering '3.9' then
'3' is the MAJOR version.
I.e., this gives you the X component if 3.9 => X.Y.
This behavior is HIGHLY dependent on the format of the input
argument. Hence, why this function indicates the results are based on
the 'branch' variable. Arbitrary input will fail. Use of this implies
you read the docs.
"""
return ver.split('-')[1].split('.')[0]
def minor_from_branch(ver):
"""Parse the minor version from the provided version (or 'branch').
For example, if --group=openshift-3.9 then runtime.group_config.branch
will have the value rhaos-3.9-rhel-7. When passed to this function the
return value would be the number 9, where in considering '3.9' then
'9' is the MINOR version.
I.e., this gives you the Y component if 3.9 => X.Y.
This behavior is HIGHLY dependent on the format of the input
argument. Hence, why this function indicates the results are based on
the 'branch' variable. Arbitrary input will fail. Use of this implies
you read the docs.
"""
return ver.split('-')[1].split('.')[1]
def pbar_header(msg_prefix='', msg='', seq=[], char='*'):
"""Generate a progress bar header for a given iterable or
sequence. The given sequence must have a countable length. A bar of
`char` characters is printed between square brackets.
:param string msg_prefix: Header text to print in heavy green text
:param string msg: Header text to print in the default char face
:param sequence seq: A sequence (iterable) to size the progress
bar against
:param str char: The character to use when drawing the progress
bar
For example:
pbar_header("Foo: ", "bar", seq=[None, None, None], char='-')
would produce:
Foo: bar
[---]
where 'Foo: ' is printed using green_prefix() and 'bar' is in the
default console fg color and weight.
TODO: This would make a nice context wrapper.
"""
green_prefix(msg_prefix)
click.echo(msg)
click.echo("[" + (char * len(seq)) + "]")
def progress_func(func, char='*', file=None):
"""Use to wrap functions called in parallel. Prints a character for
each function call.
:param lambda-function func: A 'lambda wrapped' function to call
after printing a progress character
:param str char: The character (or multi-char string, if you
really wanted to) to print before calling `func`
:param file: the file to print the progress. None means stdout.
Usage examples:
* See find-builds command
"""
click.secho(char, fg='green', nl=False, file=file)
return func()
def parallel_results_with_progress(inputs, func, file=None):
"""Run a function against a list of inputs with a progress bar
:param sequence inputs : A sequence of items to iterate over in parallel
:param lambda-function func: A lambda function to call with one arg to process
Usage examples:
* See find-builds command
candidate_build_infos = parallel_results_with_progress(
candidate_builds,
lambda build: build.get_latest_build_info()
)
Example output:
[****************]
"""
click.secho('[', nl=False, file=file)
pool = ThreadPool(cpu_count())
results = pool.map(
lambda it: progress_func(lambda: func(it), file=file),
inputs)
# Wait for results
pool.close()
pool.join()
click.echo(']', file=file)
return results
def get_release_version(pv):
""" there are two kind of format of product_version: OSE-4.1-RHEL-8 RHEL-7-OSE-4.1 RHEL-7-OSE-4.1-FOR-POWER-LE """
return re.search(r'OSE-(\d+\.\d+)', pv).groups()[0]
def convert_remote_git_to_https(source):
"""
Accepts a source git URL in ssh or https format and return it in a normalized
https format:
- https protocol
- no trailing /
:param source: Git remote
:return: Normalized https git URL
"""
url = re.sub(
pattern=r'[^@]+@([^:/]+)[:/]([^\.]+)',
repl='https://\\1/\\2',
string=source.strip(),
)
return re.sub(string=url, pattern=r'\.git$', repl='').rstrip('/') | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/util.py | 0.562537 | 0.152947 | util.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from future import standard_library
standard_library.install_aliases()
from errata_tool import ErrataConnector
import datetime
import json
import ssl
import re
from elliottlib import exceptions, constants, brew, logutil, bzutil
from elliottlib.util import green_prefix, exit_unauthenticated
import requests
from requests_kerberos import HTTPKerberosAuth
from kerberos import GSSError
from errata_tool import Erratum, ErrataException, ErrataConnector
import xmlrpc.client
logger = logutil.getLogger(__name__)
ErrataConnector._url = constants.errata_url
errata_xmlrpc = xmlrpc.client.ServerProxy(constants.errata_xmlrpc_url)
def new_erratum(et_data, errata_type=None, boilerplate_name=None, kind=None, release_date=None, create=False,
assigned_to=None, manager=None, package_owner=None, impact=None, cves=None):
"""5.2.1.1. POST /api/v1/erratum
Create a new advisory.
Takes an unrealized advisory object and related attributes using the following format:
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-post-apiv1erratum
:param et_data: The ET data dump we got from our erratatool.yaml file
:param errata_type: The type of advisory to create (RHBA, RHSA, or RHEA)
:param string kind: One of [rpm, image].
Only used for backward compatibility.
:param string boilerplate_name: One of [rpm, image, extras, metadata, cve].
The name of boilerplate for creating this advisory
:param string release_date: A date in the form YYYY-Mon-DD
:param bool create: If true, create the erratum in the Errata
tool, by default just the DATA we would have POSTed is
returned
:param string assigned_to: The email address of the group responsible for
examining and approving the advisory entries
:param string manager: The email address of the manager responsible for
managing the contents and status of this advisory
:param string package_owner: The email address of the person who is handling
the details and status of this advisory
:param impact: The security impact. Only applies to RHSA
:param cves: The CVE(s) to attach to the advisory. Separate multiple CVEs with a space. Only applies to RHSA
:return: An Erratum object
:raises: exceptions.ErrataToolUnauthenticatedException if the user is not authenticated to make the request
"""
if not release_date:
release_date = datetime.datetime.now() + datetime.timedelta(days=21)
if not kind:
kind = 'rpm'
if not boilerplate_name:
boilerplate_name = kind
if "boilerplates" in et_data and boilerplate_name in et_data["boilerplates"]:
boilerplate = et_data['boilerplates'][boilerplate_name]
else: # FIXME: For backward compatibility.
boilerplate = {
"synopsis": (et_data['synopsis'].get(boilerplate_name, 'rpm') if boilerplate_name != "cve"
else et_data['synopsis'][kind]),
"topic": et_data["topic"],
"description": et_data["description"],
"solution": et_data["solution"],
}
e = Erratum(
product=et_data['product'],
release=et_data['release'],
errata_type=errata_type,
synopsis=boilerplate['synopsis'],
topic=boilerplate['topic'],
description=boilerplate['description'],
solution=boilerplate['solution'],
qe_email=assigned_to,
qe_group=et_data['quality_responsibility_name'],
owner_email=package_owner,
manager_email=manager,
date=release_date
)
if errata_type == 'RHSA':
e.security_impact = impact
e.cve_names = cves
if create:
# THIS IS NOT A DRILL
e.commit()
return e
else:
return e
def build_signed(build):
"""return boolean: is the build signed or not
:param string build: The build nvr or id
"""
filter_endpoint = constants.errata_get_build_url.format(
id=build)
res = requests.get(filter_endpoint,
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
if res.status_code == 200:
return res.json()['rpms_signed']
elif res.status_code == 401:
raise exceptions.ErrataToolUnauthenticatedException(res.text)
else:
raise exceptions.ErrataToolError("Other error (status_code={code}): {msg}".format(
code=res.status_code,
msg=res.text))
def get_filtered_list(filter_id=constants.errata_default_filter, limit=5):
"""return a list of Erratum() objects from results using the provided
filter_id
:param filter_id: The ID number of the pre-defined filter
:param int limit: How many erratum to list
:return: A list of Erratum objects
:raises exceptions.ErrataToolUnauthenticatedException: If the user is not authenticated to make the request
:raises exceptions.ErrataToolError: If the given filter does not exist, and, any other unexpected error
Note: Errata filters are defined in the ET web interface
"""
filter_endpoint = constants.errata_filter_list_url.format(
id=filter_id)
res = requests.get(filter_endpoint,
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
if res.status_code == 200:
# When asked for an advisory list which does not exist
# normally you would expect a code like '404' (not
# found). However, the Errata Tool sadistically returns a 200
# response code. That leaves us with one option: Decide that
# successfully parsing the response as a JSONinfo object indicates
# a successful API call.
try:
return [Erratum(errata_id=advs['id']) for advs in res.json()][:limit]
except Exception:
raise exceptions.ErrataToolError("Could not locate the given advisory filter: {fid}".format(
fid=filter_id))
elif res.status_code == 401:
raise exceptions.ErrataToolUnauthenticatedException(res.text)
else:
raise exceptions.ErrataToolError("Other error (status_code={code}): {msg}".format(
code=res.status_code,
msg=res.text))
def add_comment(advisory_id, comment):
"""5.2.1.8. POST /api/v1/erratum/{id}/add_comment
Add a comment to an advisory.
Example request body:
{"comment": "This is my comment"}
The response body is the updated or unmodified advisory, in the same format as GET /api/v1/erratum/{id}.
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-post-apiv1erratumidadd_comment
:param dict comment: The metadata object to add as a comment
"""
data = {"comment": json.dumps(comment)}
return requests.post(constants.errata_add_comment_url.format(id=advisory_id),
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth(),
data=data)
def get_comments(advisory_id):
"""5.2.10.2. GET /api/v1/comments?filter[key]=value
Retrieve all advisory comments
Example request body:
{"filter": {"errata_id": 11112, "type": "AutomatedComment"}}
Returns an array of comments ordered in descending order
(newest first). The array may be empty depending on the filters
used. The meaning of each attribute is documented under GET
/api/v1/comments/{id} (see Erratum.get_comment())
Included for reference:
5.2.10.2.1. Filtering
The list of comments can be filtered by applying
filter[key]=value as a query parameter. All attributes of a
comment - except advisory_state - can be used as a filter.
This is a paginated API. Reference documentation:
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-pagination
"""
body = {
"filter": {
"errata_id": advisory_id,
"type": "Comment"
}
}
# This is a paginated API, we need to increment page[number] until an empty array is returned.
params = {
"page[number]": 1
}
while True:
res = requests.get(
constants.errata_get_comments_url,
params=params,
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth(),
json=body)
if res.ok:
data = res.json().get('data', [])
if not data:
break
for comment in data:
yield comment
params["page[number]"] += 1
elif res.status_code == 401:
raise exceptions.ErrataToolUnauthorizedException(res.text)
else:
return False
def get_metadata_comments_json(advisory_id):
"""
Fetch just the comments that look like our metadata JSON comments from the advisory.
Returns a list, oldest first.
"""
comments = get_comments(advisory_id)
metadata_json_list = []
# they come out in (mostly) reverse order, start at the beginning
for c in reversed(list(comments)):
try:
metadata = json.loads(c['attributes']['text'])
except Exception:
pass
else:
if 'release' in metadata and 'kind' in metadata and 'impetus' in metadata:
metadata_json_list.append(metadata)
return metadata_json_list
def get_builds(advisory_id, session=None):
"""5.2.2.6. GET /api/v1/erratum/{id}/builds
Fetch the Brew builds associated with an advisory.
Returned builds are organized by product version, variant, arch
and include all the build files from the advisory.
Returned attributes for the product version include:
* name: name of the product version.
* description: description of the product version.
Returned attributes for each build include:
* id: build's ID from Brew, Errata Tool also uses this as an internal ID
* nvr: nvr of the build.
* variant_arch: the list of files grouped by variant and arch.
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-get-apiv1erratumidbuilds
"""
if not session:
session = requests.session()
res = session.get(constants.errata_get_builds_url.format(id=advisory_id),
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
if res.status_code == 200:
return res.json()
else:
raise exceptions.ErrataToolUnauthorizedException(res.text)
# https://errata.devel.redhat.com/bugs/1743872/advisories.json
def get_brew_builds(errata_id, session=None):
"""5.2.2.1. GET /api/v1/erratum/{id}/builds
Get Errata list of builds.
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-get-apiv1erratumidbuilds
:param str errata_id: the errata id
:param requests.Session session: A python-requests Session object,
used for for connection pooling. Providing `session` object can
yield a significant reduction in total query time when looking up
many builds.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:return: A List of initialized Build object with the build details
:raises exceptions.BrewBuildException: When erratum return errors
"""
if session is None:
session = requests.session()
res = session.get(constants.errata_get_builds_url.format(id=errata_id),
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
brew_list = []
if res.status_code == 200:
jlist = res.json()
for key in jlist.keys():
for obj in jlist[key]['builds']:
brew_list.append(brew.Build(nvr=list(obj.keys())[0], product_version=key))
return brew_list
else:
raise exceptions.BrewBuildException("fetch builds from {id}: {msg}".format(
id=errata_id,
msg=res.text))
def get_brew_build(nvr, product_version='', session=None):
"""5.2.2.1. GET /api/v1/build/{id_or_nvr}
Get Brew build details.
https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-get-apiv1buildid_or_nvr
:param str nvr: A name-version-release string of a brew rpm/image build
:param str product_version: The product version tag as given to ET
when attaching a build
:param requests.Session session: A python-requests Session object,
used for for connection pooling. Providing `session` object can
yield a significant reduction in total query time when looking up
many builds.
http://docs.python-requests.org/en/master/user/advanced/#session-objects
:return: An initialized Build object with the build details
:raises exceptions.BrewBuildException: When build not found
"""
if session is None:
session = requests.session()
res = session.get(constants.errata_get_build_url.format(id=nvr),
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
if res.status_code == 200:
return brew.Build(nvr=nvr, body=res.json(), product_version=product_version)
else:
raise exceptions.BrewBuildException("{build}: {msg}".format(
build=nvr,
msg=res.text))
def get_advisories_for_bug(bug_id, session=None):
""" Fetch the list of advisories which a specified bug is attached to.
5.2.26.7 /bugs/{id}/advisories.json
:param bug_id: Bug ID
:param session: Optional requests.Session
"""
if not session:
session = requests.session()
r = session.get(constants.errata_get_advisories_for_bug_url.format(id=int(bug_id)),
verify=ssl.get_default_verify_paths().openssl_cafile,
auth=HTTPKerberosAuth())
r.raise_for_status()
return r.json()
def parse_exception_error_message(e):
"""
:param e: exception messages (format is like 'Bug #1685399 The bug is filed already in RHBA-2019:1589.
# Bug #1685398 The bug is filed already in RHBA-2019:1589.' )
:return: [1685399, 1685398]
"""
return [int(b.split('#')[1]) for b in re.findall(r'Bug #[0-9]*', str(e))]
def add_bugs_with_retry(advisory, bugs, retried=False, noop=False):
"""
adding specified bugs into advisory, retry 2 times: first time
parse the exception message to get failed bug id list, remove from original
list then add bug to advisory again, if still has failures raise exceptions
:param advisory: advisory id
:param bugs: iterable of bzutil.bug to attach to advisory
:param retried: retry 2 times, first attempt fetch failed bugs sift out then attach again
:return:
"""
if noop:
print(f'Would have added the following bugs to advisory {advisory}: {[bug.id for bug in bugs]}')
return
try:
advs = Erratum(errata_id=advisory)
except GSSError:
exit_unauthenticated()
if advs is False:
raise exceptions.ElliottFatalError("Error: Could not locate advisory {advs}".format(advs=advisory))
green_prefix("Adding {count} bugs to advisory {retry_times} times:".format(
count=len(bugs),
retry_times=1 if retried is False else 2
))
print(f" {advs}")
try:
advs.addBugs([bug.id for bug in bugs])
advs.commit()
except ErrataException as e:
print("ErrataException Message: {}, retry it again".format(e))
if retried is not True:
block_list = parse_exception_error_message(e)
retry_list = [x for x in bugs if x.id not in block_list]
if len(retry_list) > 0:
add_bugs_with_retry(advisory, retry_list, retried=True, noop=noop)
else:
raise exceptions.ElliottFatalError(getattr(e, 'message', repr(e)))
def get_rpmdiff_runs(advisory_id, status=None, session=None):
""" Get RPMDiff runs for a given advisory.
:param advisory_id: advisory number
:param status: If set, only returns RPMDiff runs in the status.
:param session: requests.Session object.
"""
params = {
"filter[active]": "true",
"filter[test_type]": "rpmdiff",
"filter[errata_id]": advisory_id,
}
if status:
if status not in constants.ET_EXTERNAL_TEST_STATUSES:
raise ValueError("{} is not a valid RPMDiff run status.".format(status))
params["filter[status]"] = status
url = constants.errata_url + "/api/v1/external_tests"
if not session:
session = requests.Session()
# This is a paginated API. We need to increment page[number] until an empty array is returned.
# https://errata.devel.redhat.com/developer-guide/api-http-api.html#api-pagination
page_number = 1
while True:
params["page[number]"] = page_number
resp = session.get(
url,
params=params,
auth=HTTPKerberosAuth(),
)
resp.raise_for_status()
data = resp.json()["data"]
if not data:
break
for item in data:
yield item
page_number += 1
def get_advisory_images(image_advisory_id, raw=False):
"""List images of a given advisory, raw, or in the format we usually send to CCS (docs team)
:param int image_advisory_id: ID of the main image advisory
:param bool raw: Print undoctored artifact list
:return: str with a list of images
"""
cdn_docker_file_list = errata_xmlrpc.get_advisory_cdn_docker_file_list(image_advisory_id)
if raw:
return '\n'.join(cdn_docker_file_list.keys())
pattern = re.compile(r'^redhat-openshift(\d)-')
def _get_image_name(repo):
return pattern.sub(r'openshift\1/', list(repo['docker']['target']['repos'].keys())[0])
def _get_nvr(component):
parts = component.split('-')
return '{}-{}'.format(parts[-2], parts[-1])
image_list = [
'{}:{}'.format(_get_image_name(repo), _get_nvr(key))
for key, repo in sorted(cdn_docker_file_list.items())
]
return '#########\n{}\n#########'.format('\n'.join(image_list)) | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/errata.py | 0.555918 | 0.16398 | errata.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from . import logutil
import os
from io import BytesIO
import collections
import tarfile
import pygit2
import errata_tool
import koji
import logging
from . import constants
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urldefrag
# Exclude those files from the outputing tarball sources:
TARBALL_IGNORES = {".gitignore", ".oit", "container.yaml",
"content_sets.yml", "gating.yaml", "sources", "additional-tags"}
LOGGER = logutil.getLogger(__name__)
BuildWithProductVersion = collections.namedtuple(
"BuildWithProductVersion", ["nvr", "product", "product_version"])
def find_builds_from_advisory(advisory_number, components):
""" Returns a filtered list of builds attached to advisories
NOTE: This function fetches the advisory info and attached builds using `errata_tool.Erratum()` Python API,
which is pretty SLOW because it iterates over builds for signature checking but doesn't have an option to disable.
Note that API cannot be called concurrently due to a race condition in its API implementation (instances of errata_tool.Erratum share class variables).
:param advisory_number: an advisory number
:param components: list of Koji/Brew components or NVRs to filter builds on the advisory
:return: list of triple in the form of `(nvr, product, product_version)`
"""
LOGGER.debug(
"Fetching advisory {} from Errata Tool...".format(advisory_number))
advisory = errata_tool.Erratum(errata_id=advisory_number)
LOGGER.info("Got info for advisory {} - {} - {}: {} - {}".format(advisory_number, advisory.errata_state,
advisory.errata_name, advisory.synopsis, advisory.url()))
flattened_builds = [BuildWithProductVersion(nvr, advisory._product, product_version) for product_version,
nvrs in advisory.errata_builds.items() for nvr in nvrs]
def matches_components(build):
for component in components:
if build[0].startswith(component):
return True
return False
filtered_builds = [
build for build in flattened_builds if matches_components(build)]
return filtered_builds
def generate_tarball_source(tarball_file, prefix, local_repo_path, source_url, force_fetch=False):
""" Gereate a tarball source from specified commit of a remote Git repository.
This function uses pygit2 (libgit2) to walkthrough files of a commit.
:param tarball_file: File object to write the tarball into
:param prefix: Prepend a prefix (usually a directory) to files placed in the tarball.
:param local_repo_path: Clone the remote repository into this local directory.
:param source_url: Remote source repo url and commit hash seperated by `#`.
:param force_fetch: Force download objects and refs from another repository
"""
source_repo_url, source_commit_hash = urldefrag(source_url)
assert source_commit_hash
LOGGER.debug("Source is from repo {}, commit {}".format(
source_repo_url, source_commit_hash))
git_commit = None # type: pygit2.Commit
if os.path.isdir(local_repo_path) and os.listdir(local_repo_path):
LOGGER.debug(
"Local Git repo {} exists. Examining...".format(local_repo_path))
discovered_repo = pygit2.discover_repository(
local_repo_path, False, os.path.abspath(os.path.dirname(local_repo_path)))
if not discovered_repo:
raise ValueError(
"{} exists but is not a valid Git repo.".format(local_repo_path))
repo = pygit2.Repository(discovered_repo)
origin = repo.remotes["origin"] # type: pygit2.Remote
if origin.url != source_repo_url:
raise ValueError(
"Found a different local Git repo in {}".format(discovered_repo))
LOGGER.info(
"Use existing local Git repo {}.".format(local_repo_path))
fetch = force_fetch
if not force_fetch:
try:
git_commit = repo.revparse_single(source_commit_hash).peel(pygit2.Commit)
except KeyError:
fetch = True
if fetch:
LOGGER.info("Fetching latest objects and refs...")
repo.remotes["origin"].fetch()
else:
LOGGER.info("Cloning from {}...".format(source_repo_url))
repo = pygit2.clone_repository(source_repo_url, local_repo_path)
if not git_commit:
git_commit = repo.revparse_single(source_commit_hash).peel(pygit2.Commit)
LOGGER.info("Generating source from commit {}, author: {} <{}> message:{}".format(
git_commit.id, git_commit.author.name, git_commit.author.email, git_commit.message))
LOGGER.debug("Creating tarball {}...".format(tarball_file.name))
with tarfile.open(fileobj=tarball_file, mode="w:gz") as archive:
stack = [(git_commit.tree, "")]
while stack:
root, path = stack.pop()
for _entry in root:
entry = _entry # type: pygit2.TreeEntry
full_name = path + entry.name
if full_name in TARBALL_IGNORES:
LOGGER.info(
"Excluded {} from source tarball.".format(full_name))
continue
if entry.type == "tree":
stack.append((repo.get(entry.id), full_name + "/"))
continue
info = tarfile.TarInfo(prefix + full_name)
info.mtime = git_commit.committer.time
info.uname = info.gname = 'root' # Git does this!
if entry.type == "commit":
info.type = tarfile.DIRTYPE
archive.addfile(info)
LOGGER.warning("Created placeholder dir for submodule {}: {}. Dist-git repos usually don't use submodules!".format(
full_name, entry.id))
elif entry.type == "blob":
blob = repo.get(entry.id) # type: pygit2.Blob
if entry.filemode == pygit2.GIT_FILEMODE_LINK:
info.type = tarfile.SYMTYPE
info.linkname = blob.data.decode("utf-8")
info.mode = 0o777 # symlinks get placeholder
info.size = 0
else:
info.mode = entry.filemode
info.size = blob.size
archive.addfile(info, BytesIO(blob.data))
LOGGER.debug("Added {}".format(full_name))
tarball_file.flush() # important to write to a temp file | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/tarball_sources.py | 0.662796 | 0.226473 | tarball_sources.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
# stdlib
from subprocess import check_output, CalledProcessError
import json
# ours
from elliottlib.exceptions import ElliottFatalError
def get_bug_list(working_dir, old, new):
"""
Get fixed bugzilla IDs between two payloads. Needs to clone
the entire okd repo, so it can be quite slow.
:param str working_dir: file location to clone okd repo
:param str old: URL to the previous payload
:param str new: URL to the current payload
:return: A list of BZ IDs
:raises exceptions.CalledProcessError: When oc returns a non-zero exit
"""
bug_list = []
try:
bug_list = check_output([
'oc', 'adm', 'release', 'info',
'-o', 'name', # only output BZ IDs
'--bugs={}/origin'.format(working_dir), # clone origin to working dir
'--changes-from={}'.format(old),
new # payloads to compare
]).splitlines()
except CalledProcessError as e:
raise ElliottFatalError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
return bug_list
def get_build_list(old, new):
"""
Get changed container builds between two payloads.
:param str old: URL to the previous payload
:param str new: URL to the current payload
:return: A list of brew NVRs
:raises exceptions.CalledProcessError: When oc returns a non-zero exit
"""
build_list = []
oc_output = ""
try:
oc_output = check_output([
'oc', 'adm', 'release', 'info',
'--output=json',
'--changes-from={}'.format(old),
new # payloads to compare
])
except CalledProcessError as e:
raise ElliottFatalError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
payload_json = json.loads(oc_output)
changed_images = []
for k, v in payload_json["changedImages"].items():
if k == "machine-os-content":
continue # no use in comparing this as it doesn't go in the advisory
if v["to"]:
changed_images.append(v["to"]["from"]["name"])
for i in changed_images:
build_list.append(get_image_nvr(i))
return build_list
def get_image_nvr(image):
"""
Get brew NVR from a oc output.
:param str image: reference to an image in the payload
:return: A brew NVR
:raises exceptions.CalledProcessError: When oc returns a non-zero exit
"""
try:
oc_output = check_output(['oc', 'image', 'info', '--output=json', image])
except CalledProcessError as e:
raise ElliottFatalError("command '{}' return with error (code {}): {}".format(e.cmd, e.returncode, e.output))
try:
image_json = json.loads(oc_output)
image_name = image_json['config']['config']['Labels']['com.redhat.component']
image_version = image_json['config']['config']['Labels']['version']
image_release = image_json['config']['config']['Labels']['release']
except Exception:
print("This image json does not have the expected fields:\n" + oc_output)
raise
return "{}-{}-{}".format(image_name, image_version, image_release) | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/openshiftclient.py | 0.54577 | 0.216798 | openshiftclient.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from future.utils import as_native_str
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse
import yaml
import ruamel.yaml
import ruamel.yaml.util
import logging
import os
import shutil
from . import exectools
from .pushd import Dir
SCHEMES = ['ssh', 'ssh+git', "http", "https"]
class GitDataException(Exception):
"""A broad exception for errors during GitData operations"""
pass
class GitDataBranchException(GitDataException):
pass
class GitDataPathException(GitDataException):
pass
class DataObj(object):
def __init__(self, key, path, data):
self.key = key
self.path = path
self.base_dir = os.path.dirname(self.path)
self.filename = self.path.replace(self.base_dir, '').strip('/')
self.data = data
self.indent = 2
self.block_seq_indent = None
@as_native_str()
def __repr__(self):
result = {
'key': self.key,
'path': self.path,
'data': self.data
}
return str(result)
def reload(self):
with open(self.path, 'r') as f:
# Reload with ruamel.yaml and guess the indent.
self.data, self.indent, self.block_seq_indent = ruamel.yaml.util.load_yaml_guess_indent(f, preserve_quotes=True)
def save(self):
with open(self.path, 'w') as f:
# pyyaml doesn't preserve the order of keys or comments when loading and saving yamls. Save with ruamel.yaml instead to keep the format as much as possible.
ruamel.yaml.round_trip_dump(self.data, f, indent=self.indent, block_seq_indent=self.block_seq_indent)
class GitData(object):
def __init__(self, data_path=None, clone_dir='./', branch='main',
sub_dir=None, exts=['yaml', 'yml', 'json'], logger=None):
"""
Load structured data from a git source.
:param str data_path: Git url (git/http/https) or local directory path
:param str clone_dir: Location to clone data into
:param str branch: Repo branch (tag or sha also allowed) to checkout
:param str sub_dir: Sub dir in data to treat as root
:param list exts: List of valid extensions to search for in data, with out period
:param logger: Python logging object to use
:raises GitDataException:
"""
self.logger = logger
if logger is None:
logging.basicConfig(level=logging.INFO)
self.logger = logging.getLogger()
self.clone_dir = clone_dir
self.branch = branch
self.remote_path = None
self.sub_dir = sub_dir
self.exts = ['.' + e.lower() for e in exts]
if data_path:
self.clone_data(data_path)
def clone_data(self, data_path):
"""
Clones data for given data_path:
:param str data_path: Git url (git/http/https) or local directory path
"""
self.data_path = data_path
data_url = urlparse(self.data_path)
if data_url.scheme in SCHEMES or (data_url.scheme == '' and ':' in data_url.path):
data_name = os.path.splitext(os.path.basename(data_url.path))[0]
data_destination = os.path.join(self.clone_dir, data_name)
clone_data = True
if os.path.isdir(data_destination):
self.logger.info('Data clone directory already exists, checking commit sha')
with Dir(data_destination):
# check the current status of what's local
rc, out, err = exectools.cmd_gather("git status -sb")
if rc:
raise GitDataException('Error getting data repo status: {}'.format(err))
lines = out.strip().split('\n')
synced = ('ahead' not in lines[0] and 'behind' not in lines[0] and len(lines) == 1)
# check if there are unpushed
# verify local branch
rc, out, err = exectools.cmd_gather("git rev-parse --abbrev-ref HEAD")
if rc:
raise GitDataException('Error checking local branch name: {}'.format(err))
branch = out.strip()
if branch != self.branch:
if not synced:
msg = ('Local branch is `{}`, but requested `{}` and you have uncommitted/pushed changes\n'
'You must either clear your local data or manually checkout the correct branch.'
).format(branch, self.branch)
raise GitDataBranchException(msg)
else:
# Check if local is synced with remote
rc, out, err = exectools.cmd_gather(["git", "ls-remote", self.data_path, self.branch])
if rc:
raise GitDataException('Unable to check remote sha: {}'.format(err))
remote = out.strip().split('\t')[0]
try:
exectools.cmd_assert('git branch --contains {}'.format(remote))
self.logger.info('{} is already cloned and latest'.format(self.data_path))
clone_data = False
except:
if not synced:
msg = ('Local data is out of sync with remote and you have unpushed commits: {}\n'
'You must either clear your local data\n'
'or manually rebase from latest remote to continue'
).format(data_destination)
raise GitDataException(msg)
if clone_data:
if os.path.isdir(data_destination): # delete if already there
shutil.rmtree(data_destination)
self.logger.info('Cloning config data from {}'.format(self.data_path))
if not os.path.isdir(data_destination):
cmd = "git clone -b {} --depth 1 {} {}".format(self.branch, self.data_path, data_destination)
rc, out, err = exectools.cmd_gather(cmd)
if rc:
raise GitDataException('Error while cloning data: {}'.format(err))
self.remote_path = self.data_path
self.data_path = data_destination
elif data_url.scheme in ['', 'file']:
self.remote_path = None
self.data_path = os.path.abspath(self.data_path) # just in case relative path was given
else:
raise ValueError(
'Invalid data_path: {} - invalid scheme: {}'
.format(self.data_path, data_url.scheme)
)
if self.sub_dir:
self.data_dir = os.path.join(self.data_path, self.sub_dir)
else:
self.data_dir = self.data_path
if not os.path.isdir(self.data_dir):
raise GitDataPathException('{} is not a valid sub-directory in the data'.format(self.sub_dir))
def load_data(self, path='', key=None, keys=None, exclude=None, filter_funcs=None, replace_vars={}):
full_path = os.path.join(self.data_dir, path.replace('\\', '/'))
if path and not os.path.isdir(full_path):
raise GitDataPathException('Cannot find "{}" under "{}"'.format(path, self.data_dir))
if filter_funcs is not None and not isinstance(filter_funcs, list):
filter_funcs = [filter_funcs]
if exclude is not None and not isinstance(exclude, list):
exclude = [exclude]
if key and keys:
raise GitDataException('Must use key or keys, but not both!')
if key:
keys = [key]
if keys:
if not isinstance(keys, list):
keys = [keys]
files = []
for k in keys:
for ext in self.exts:
path = k + ext
if os.path.isfile(os.path.join(full_path, k + ext)):
files.append(path)
break # found for this key, move on
else:
files = os.listdir(full_path)
result = {}
for name in files:
base_name, ext = os.path.splitext(name)
if ext.lower() in self.exts:
data_file = os.path.join(full_path, name)
if os.path.isfile(data_file):
with open(data_file, 'r') as f:
raw_text = f.read()
if replace_vars:
try:
raw_text = raw_text.format(**replace_vars)
except KeyError as e:
self.logger.warning('{} contains template key `{}` but no value was provided'.format(data_file, e.args[0]))
data = yaml.full_load(raw_text)
use = True
if exclude and base_name in exclude:
use = False
if use and filter_funcs:
for func in filter_funcs:
use &= func(base_name, data)
if not use:
break
if use:
result[base_name] = DataObj(base_name, data_file, data)
if key and key in result:
result = result[key]
return result
def commit(self, msg):
"""
Commit outstanding data changes
"""
self.logger.info('Commit config: {}'.format(msg))
with Dir(self.data_path):
exectools.cmd_assert('git add .')
exectools.cmd_assert('git commit --allow-empty -m "{}"'.format(msg))
def push(self):
"""
Push changes back to data repo.
Will of course fail if user does not have write access.
"""
self.logger.info('Pushing config...')
with Dir(self.data_path):
exectools.cmd_assert('git push') | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/gitdata.py | 0.568536 | 0.215392 | gitdata.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
from future.utils import as_native_str
# stdlib
from future import standard_library
standard_library.install_aliases()
from time import sleep
import urllib.parse
from elliottlib import logutil
# ours
from . import constants
from elliottlib import exceptions, constants, util
# 3rd party
import click
import bugzilla
logger = logutil.getLogger(__name__)
def get_highest_impact(trackers, tracker_flaws_map):
"""Get the hightest impact of security bugs
:param trackers: The list of tracking bugs you want to compare to get the highest severity
:param tracker_flaws_map: A dict with tracking bug IDs as keys and lists of flaw bugs as values
:return: The highest impact of the bugs
"""
severity_index = 0 # "unspecified" severity
for tracker in trackers:
tracker_severity = constants.BUG_SEVERITY_NUMBER_MAP[tracker.severity.lower()]
if tracker_severity == 0:
# When severity isn't set on the tracker, check the severity of the flaw bugs
# https://jira.coreos.com/browse/ART-1192
flaws = tracker_flaws_map[tracker.id]
for flaw in flaws:
flaw_severity = constants.BUG_SEVERITY_NUMBER_MAP[flaw.severity.lower()]
if flaw_severity > tracker_severity:
tracker_severity = flaw_severity
if tracker_severity > severity_index:
severity_index = tracker_severity
if severity_index == 0:
# When severity isn't set on all tracking and flaw bugs, default to "Low"
# https://jira.coreos.com/browse/ART-1192
logger.warning("CVE impact couldn't be determined for tracking bug(s); defaulting to Low.")
return constants.SECURITY_IMPACT[severity_index]
def get_flaw_bugs(trackers):
"""Get a list of flaw bugs blocked by a list of tracking bugs. For a definition of these terms see
https://docs.engineering.redhat.com/display/PRODSEC/%5BDRAFT%5D+Security+bug+types
:param trackers: A list of tracking bugs
:return: A list of flaw bug ids
"""
flaw_ids = []
for t in trackers:
# Tracker bugs can block more than one flaw bug, but must be more than 0
if not t.blocks:
# This should never happen, log a warning here if it does
logger.warning("Warning: found tracker bugs which doesn't block any other bugs")
else:
flaw_ids.extend(t.blocks)
return flaw_ids
def get_tracker_flaws_map(bzapi, trackers):
"""Get flaw bugs blocked by tracking bugs. For a definition of these terms see
https://docs.engineering.redhat.com/display/PRODSEC/%5BDRAFT%5D+Security+bug+types
:param bzapi: An instance of the python-bugzilla Bugzilla class
:param trackers: A list of tracking bugs
:return: A dict with tracking bug IDs as keys and lists of flaw bugs as values
"""
tracker_flaw_ids_map = {
tracker.id: get_flaw_bugs([tracker]) for tracker in trackers
}
flaw_ids = [flaw_id for _, flaw_ids in tracker_flaw_ids_map.items() for flaw_id in flaw_ids]
flaw_id_bug_map = get_bugs(bzapi, flaw_ids)
tracker_flaws_map = {tracker.id: [] for tracker in trackers}
for tracker_id, flaw_ids in tracker_flaw_ids_map.items():
for flaw_id in flaw_ids:
flaw_bug = flaw_id_bug_map.get(flaw_id)
if not flaw_bug or not is_flaw_bug(flaw_bug):
logger.warning("Bug {} is not a flaw bug.".format(flaw_id))
continue
tracker_flaws_map[tracker_id].append(flaw_bug)
return tracker_flaws_map
def get_bugs(bzapi, ids, raise_on_error=True):
""" Get a map of bug ids and bug objects.
:param bzapi: An instance of the python-bugzilla Bugzilla class
:param ids: The IDs of the bugs you want to get the Bug objects for
:param raise_on_error: If True, raise an error if failing to find bugs
:return: A map of bug ids and bug objects
:raises:
BugzillaFatalError: If bugs contains invalid bug ids, or if some other error occurs trying to
use the Bugzilla XMLRPC api. Could be because you are not logged in to Bugzilla or the login
session has expired.
"""
id_bug_map = {}
bugs = bzapi.getbugs(ids) # type: list
for i, bug in enumerate(bugs):
bug_id = ids[i]
if not bug:
msg = "Couldn't find bug {}.".format(bug_id)
if raise_on_error:
raise exceptions.BugzillaFatalError(msg)
logger.warning(msg)
id_bug_map[bug_id] = bug
return id_bug_map
def is_flaw_bug(bug):
return bug.product == "Security Response" and bug.component == "vulnerability"
def get_flaw_aliases(flaws):
"""Get a map of flaw bug ids and associated CVE aliases. For a definition of these terms see
https://docs.engineering.redhat.com/display/PRODSEC/%5BDRAFT%5D+Security+bug+types
:param bzapi: An instance of the python-bugzilla Bugzilla class
:param flaws: Flaw bugs you want to get the aliases for
:return: A map of flaw bug ids and associated CVE alisas.
:raises:
BugzillaFatalError: If bugs contains invalid bug ids, or if some other error occurs trying to
use the Bugzilla XMLRPC api. Could be because you are not logged in to Bugzilla or the login
session has expired.
"""
flaw_cve_map = {}
for flaw in flaws:
if flaw is None:
raise exceptions.BugzillaFatalError("Couldn't find bug with list of ids provided")
if flaw.product == "Security Response" and flaw.component == "vulnerability":
alias = flaw.alias
if len(alias) >= 1:
logger.debug("Found flaw bug with more than one alias, only alias which starts with CVE-")
for a in alias:
if a.startswith('CVE-'):
flaw_cve_map[flaw.id] = a
else:
flaw_cve_map[flaw.id] = ""
for key in flaw_cve_map.keys():
if flaw_cve_map[key] == "":
logger.warning("Found flaw bug with no alias, this can happen if a flaw hasn't been assigned to a CVE")
return flaw_cve_map
def set_state(bug, desired_state, noop=False):
"""Change the state of a bug to desired_state
:param bug:
:param desired_state: Target state
:param noop: Do not do anything
"""
current_state = bug.status
if noop:
logger.info(f"Would have changed BZ#{bug.bug_id} from {current_state} to {desired_state}")
return
logger.info(f"Changing BZ#{bug.bug_id} from {current_state} to {desired_state}")
comment = f'Elliott changed bug status from {current_state} to {desired_state}.'
bug.setstatus(status=desired_state,
comment=comment,
private=True)
def create_placeholder(bz_data, kind):
"""Create a placeholder bug
:param bz_data: The Bugzilla data dump we got from our bugzilla.yaml file
:param kind: The "kind" of placeholder to create. Generally 'rpm' or 'image'
:return: Placeholder Bug object
"""
bzapi = get_bzapi(bz_data)
version = bz_data['version'][0]
target_release = bz_data['target_release'][0]
boilerplate = "Placeholder bug for OCP {} {} release".format(target_release, kind)
createinfo = bzapi.build_createbug(
product=bz_data['product'],
version=version,
component="Release",
summary=boilerplate,
description=boilerplate)
newbug = bzapi.createbug(createinfo)
# change state to VERIFIED, set target release
try:
update = bzapi.build_update(status="VERIFIED", target_release=target_release)
bzapi.update_bugs([newbug.id], update)
except Exception as ex: # figure out the actual bugzilla error. it only happens sometimes
sleep(5)
bzapi.update_bugs([newbug.id], update)
print(ex)
return newbug
def search_for_bugs(bz_data, status, search_filter='default', filter_out_security_bugs=True, verbose=False):
"""Search the provided target_release's for bugs in the specified states
:param bz_data: The Bugzilla data dump we got from our bugzilla.yaml file
:param status: The status(es) of bugs to search for
:param search_filter: Which search filter from bz_data to use if multiple are specified
:param filter_out_security_bugs: Boolean on whether to filter out bugs tagged with the SecurityTracking keyword.
:return: A list of Bug objects
"""
bzapi = get_bzapi(bz_data)
query_url = _construct_query_url(bz_data, status, search_filter)
if filter_out_security_bugs:
query_url.addKeyword('SecurityTracking', 'nowords')
# TODO: Expose this for debugging
if verbose:
click.echo(query_url)
return _perform_query(bzapi, query_url, include_fields=['id', 'status', 'summary', 'creation_time', 'cf_pm_score', 'component', 'external_bugs'])
def search_for_security_bugs(bz_data, status=None, search_filter='security', cve=None, verbose=False):
"""Search for CVE tracker bugs
:param bz_data: The Bugzilla data dump we got from our bugzilla.yaml file
:param status: The status(es) of bugs to search for
:param search_filter: Which search filter from bz_data to use if multiple are specified
:param cve: The CVE number to filter against
:return: A list of CVE trackers
"""
if status is None:
status = ['NEW', 'ASSIGNED', 'POST', 'MODIFIED', 'ON_QA', 'VERIFIED', 'RELEASE_PENDING']
bzapi = get_bzapi(bz_data, True)
query_url = _construct_query_url(bz_data, status, search_filter)
query_url.addKeyword('SecurityTracking')
if verbose:
click.echo(query_url)
bug_list = _perform_query(bzapi, query_url, include_fields=['id', 'status', 'summary', 'blocks'])
if cve:
bug_list = [bug for bug in bug_list if cve in bug.summary]
return bug_list
def is_viable_bug(bug_obj):
""" Check if a bug is viable to attach to an advisory.
A viable bug must be in one of MODIFIED and VERIFIED status. We accept ON_QA
bugs as viable as well, as they will be shortly moved to MODIFIED while attaching.
:param bug_obj: bug object
:returns: True if viable
"""
return bug_obj.status in ["MODIFIED", "ON_QA", "VERIFIED"]
def is_cve_tracker(bug_obj):
""" Check if a bug is a CVE tracker.
A CVE tracker bug must have `SecurityTracking` and `Security` keywords.
:param bug_obj: bug object
:returns: True if the bug is a CVE tracker.
"""
return "SecurityTracking" in bug_obj.keywords and "Security" in bug_obj.keywords
def get_bzapi(bz_data, interactive_login=False):
bzapi = bugzilla.Bugzilla(bz_data['server'])
if not bzapi.logged_in:
print("elliott requires cached login credentials for {}".format(bz_data['server']))
if interactive_login:
bzapi.interactive_login()
return bzapi
def _construct_query_url(bz_data, status, search_filter='default'):
query_url = SearchURL(bz_data)
if bz_data.get('filter'):
filter_list = bz_data.get('filter')
elif bz_data.get('filters'):
filter_list = bz_data.get('filters').get(search_filter)
for f in filter_list:
query_url.addFilter(f.get('field'), f.get('operator'), f.get('value'))
for s in status:
query_url.addBugStatus(s)
for r in bz_data.get('target_release', []):
query_url.addTargetRelease(r)
return query_url
def _perform_query(bzapi, query_url, include_fields=None):
if include_fields is None:
include_fields = ['id']
query = bzapi.url_to_query(str(query_url))
query["include_fields"] = include_fields
return bzapi.query(query)
class SearchFilter(object):
"""
This represents a query filter. Each filter consists of three components:
* field selector string
* operator
* field value
"""
pattern = "&f{0}={1}&o{0}={2}&v{0}={3}"
def __init__(self, field, operator, value):
self.field = field
self.operator = operator
self.value = value
def tostring(self, number):
return SearchFilter.pattern.format(
number, self.field, self.operator, urllib.parse.quote(self.value)
)
class SearchURL(object):
url_format = "https://{}/buglist.cgi?"
def __init__(self, bz_data):
self.bz_host = bz_data.get('server')
self.classification = bz_data.get('classification')
self.product = bz_data.get('product')
self.bug_status = []
self.filters = []
self.filter_operator = ""
self.versions = []
self.target_releases = []
self.keyword = ""
self.keywords_type = ""
@as_native_str()
def __str__(self):
root_string = SearchURL.url_format.format(self.bz_host)
url = root_string + self._status_string()
url += "&classification={}".format(urllib.parse.quote(self.classification))
url += "&product={}".format(urllib.parse.quote(self.product))
url += self._keywords_string()
url += self.filter_operator
url += self._filter_string()
url += self._target_releases_string()
url += self._version_string()
return url
def _status_string(self):
return "&".join(["bug_status={}".format(i) for i in self.bug_status])
def _version_string(self):
return "".join(["&version={}".format(i) for i in self.versions])
def _filter_string(self):
return "".join([f.tostring(i) for i, f in enumerate(self.filters)])
def _target_releases_string(self):
return "".join(["&target_release={}".format(tr) for tr in self.target_releases])
def _keywords_string(self):
return "&keywords={}&keywords_type={}".format(self.keyword, self.keywords_type)
def addFilter(self, field, operator, value):
self.filters.append(SearchFilter(field, operator, value))
def addTargetRelease(self, release_string):
self.target_releases.append(release_string)
def addVersion(self, version):
self.versions.append(version)
def addBugStatus(self, status):
self.bug_status.append(status)
def addKeyword(self, keyword, keyword_type="anywords"):
self.keyword = keyword
self.keywords_type = keyword_type | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/bzutil.py | 0.628179 | 0.253081 | bzutil.py | pypi |
from __future__ import absolute_import, print_function, unicode_literals
import asyncio
import json
import shlex
import subprocess
import time
from . import assertion, logutil, pushd
SUCCESS = 0
logger = logutil.getLogger(__name__)
class RetryException(Exception):
"""
Provide a custom exception for retry failures
"""
pass
def retry(retries, task_f, check_f=bool, wait_f=None):
"""
Try a function up to n times.
Raise an exception if it does not pass in time
:param retries int: The number of times to retry
:param task_f func: The function to be run and observed
:param func()bool check_f: a function to check if task_f is complete
:param func()bool wait_f: a function to run between checks
"""
for attempt in range(retries):
ret = task_f()
if check_f(ret):
return ret
if attempt < retries - 1 and wait_f is not None:
wait_f(attempt)
raise RetryException("Giving up after {} failed attempt(s)".format(retries))
def cmd_assert(cmd, retries=1, pollrate=60, on_retry=None, text_mode=True):
"""
Run a command, logging (using exec_cmd) and raise an exception if the
return code of the command indicates failure.
Try the command multiple times if requested.
:param cmd <string|list>: A shell command
:param retries int: The number of times to try before declaring failure
:param pollrate int: how long to sleep between tries
:param on_retry <string|list>: A shell command to run before retrying a failure
:param text_mode: True to return stdout and stderr as strings
:return: (stdout,stderr) if exit code is zero
"""
for try_num in range(0, retries):
if try_num > 0:
logger.debug(
"cmd_assert: Failed {} times. Retrying in {} seconds: {}".
format(try_num, pollrate, cmd))
time.sleep(pollrate)
if on_retry is not None:
cmd_gather(on_retry) # no real use for the result though
result, stdout, stderr = cmd_gather(cmd, text_mode)
if result == SUCCESS:
break
logger.debug("cmd_assert: Final result = {} in {} tries.".format(result, try_num))
assertion.success(
result,
"Error running [{}] {}. See debug log.".
format(pushd.Dir.getcwd(), cmd))
return stdout, stderr
def cmd_gather(cmd, text_mode=True):
"""
Runs a command and returns rc,stdout,stderr as a tuple.
If called while the `Dir` context manager is in effect, guarantees that the
process is executed in that directory, even if it is no longer the current
directory of the process (i.e. it is thread-safe).
:param cmd: The command and arguments to execute
:param text_mode: True to return stdout and stderr as strings
:return: (rc,stdout,stderr)
"""
if not isinstance(cmd, list):
cmd_list = shlex.split(cmd)
else:
cmd_list = cmd
cwd = pushd.Dir.getcwd()
cmd_info = '[cwd={}]: {}'.format(cwd, json.dumps(cmd_list))
logger.debug("Executing:cmd_gather {}".format(cmd_info))
proc = subprocess.Popen(
cmd_list, cwd=cwd,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
rc = proc.returncode
if text_mode:
out_str = out.decode(encoding="utf-8")
err_str = err.decode(encoding="utf-8")
logger.debug(
"Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n".
format(cmd_info, rc, out_str, err_str))
return rc, out_str, err_str
else:
logger.debug(
"Process {}: exited with: {}".format(cmd_info, rc))
return rc, out, err
async def cmd_gather_async(cmd, text_mode=True):
"""Similar to cmd_gather, but run asynchronously"""
if not isinstance(cmd, list):
cmd_list = shlex.split(cmd)
else:
cmd_list = cmd
cwd = pushd.Dir.getcwd()
cmd_info = '[cwd={}]: {}'.format(cwd, json.dumps(cmd_list))
logger.debug("Executing:cmd_gather {}".format(cmd_info))
proc = await asyncio.create_subprocess_exec(
*cmd, cwd=cwd,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
out, err = await proc.communicate()
rc = proc.returncode
if text_mode:
out_str = out.decode(encoding="utf-8")
err_str = err.decode(encoding="utf-8")
logger.debug(
"Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n".
format(cmd_info, rc, out_str, err_str))
return rc, out_str, err_str
else:
logger.debug(
"Process {}: exited with: {}".format(cmd_info, rc))
return rc, out, err | /rh_elliott-1.0.15-py3-none-any.whl/elliottlib/exectools.py | 0.61231 | 0.321154 | exectools.py | pypi |
from validator import support
from schema import Schema, Optional, Use, And, Or, SchemaError
def image_schema(file):
valid_arches = [
'x86_64',
]
valid_modification_actions = [
'command',
'replace',
]
valid_modification_commands = [
'update-console-sources',
]
valid_distgit_namespaces = [
'apbs',
'containers',
'rpms',
]
valid_streams = support.get_valid_streams_for(file)
valid_member_references = support.get_valid_member_references_for(file)
valid_modes = [
'auto',
'disabled',
'wip',
]
return Schema({
Optional('arches'): [Or(*valid_arches)],
Optional('base_only'): True,
Optional('container_yaml'): {
'go': {
'modules': [
{
'module': And(str, len),
Optional('path'): str,
},
],
},
},
Optional('content'): {
'source': {
Optional('alias'): And(str, len),
Optional('dockerfile'): And(str, len),
Optional('git'): {
'branch': {
Optional('fallback'): And(str, len),
'target': And(str, len),
},
'url': And(Use(str), len),
},
Optional('modifications'): [{
'action': Or(*valid_modification_actions),
Optional('command'): [
Or(*valid_modification_commands),
],
Optional('match'): And(str, len),
Optional('replacement'): And(str, len),
}],
Optional('path'): str,
},
},
Optional('dependents'): [
And(str, len)
],
Optional('distgit'): {
Optional('namespace'): Or(*valid_distgit_namespaces),
Optional('component'): And(str, len),
Optional('branch'): And(str, len),
},
Optional('enabled_repos'): [
And(str, len),
],
'from': {
Optional('builder'): [
{
Optional('stream'): Or(*valid_streams),
Optional('member'): Or(*valid_member_references),
Optional('image'): And(str, len),
},
],
Optional('image'): And(str, len),
Optional('stream'): Or(*valid_streams),
Optional('member'): Or(*valid_member_references),
},
Optional('labels'): {
Optional('License'): And(str, len),
Optional('io.k8s.description'): And(str, len),
Optional('io.k8s.display-name'): And(str, len),
Optional('io.openshift.tags'): And(str, len),
Optional('vendor'): And(str, len),
},
Optional('mode'): Or(*valid_modes),
'name': And(str, len),
Optional('odcs'): {
'packages': {
'exclude': [
And(str, len),
],
'mode': 'auto',
},
},
Optional('no_oit_comments'): bool,
Optional('owners'): [
And(str, len),
],
Optional('push'): {
'repos': [
And(str, len),
],
},
Optional('required'): bool,
Optional('update-csv'): {
'manifests-dir': And(str, len),
'registry': And(str, len),
},
Optional('wait_for'): And(str, len),
})
def validate(file, data):
try:
image_schema(file).validate(data)
except SchemaError as err:
return '{}'.format(err) | /rh-ocp-build-data-validator-0.0.8.tar.gz/rh-ocp-build-data-validator-0.0.8/validator/schema/image_schema.py | 0.421433 | 0.228146 | image_schema.py | pypi |
import numpy as np
from .non_dimension import toone
from .check_func import array_check
def __variability(data: np.ndarray) -> np.ndarray:
array_check(data=data)
length, _ = data.shape
ave_x = np.mean(data, axis=0)
diff = data - ave_x
sum_var = np.sum(diff**2, axis=0) / (length - 1)
return np.sqrt(sum_var)
def __conflict(data: np.ndarray) -> np.ndarray:
array_check(data=data)
try:
corr_matrix: np.ndarray = np.corrcoef(data, rowvar=False)
except np.linalg.LinAlgError as e:
raise np.linalg.LinAlgError("指标存在严重多重共线性") from e
conflicts: np.ndarray = (1 - corr_matrix).sum(axis=0)
return conflicts
def critic(data_origin: np.ndarray) -> np.ndarray:
"""
通过所提供数据计算critic权重
Args:
data_origin (np.ndarray): 待计算权重的数据
Returns:
np.ndarray: critic权重数组
"""
info1: np.ndarray = __variability(data_origin)
info2: np.ndarray = __conflict(data_origin)
information: np.ndarray = np.multiply(info1, info2)
information = np.nan_to_num(information)
_, width = data_origin.shape
sum_info: float = information.sum()
if sum_info == 0:
return np.ones(width) / width
weights: np.ndarray = information / sum_info
return weights
def ewm(data_origin: np.ndarray) -> np.ndarray:
"""通过所提供数据计算entropy weight method(ewm)权重
Args:
data_origin (np.ndarray): 待计算权重的数据
Returns:
np.ndarray: ewm权重数组
"""
array_check(data=data_origin)
data = toone(data_origin.copy(), mode='0')
assert isinstance(data, np.ndarray)
length, _ = data.shape
data /= np.sum(data, axis=0)
data = np.clip(data, a_min=1e-10, a_max=None)
entropy = -np.log(1 / length) * np.sum(data * np.log(data), axis=0)
return (1 - entropy) / (length - np.sum(entropy))
def stddev(data_origin: np.ndarray) -> np.ndarray:
"""通过所提供数据计算standard deviation(stddev)权重
Args:
data_origin (np.ndarray): 待计算权重的数据
Returns:
np.ndarray: stddev权重数组
"""
array_check(data=data_origin)
data = toone(data_origin.copy(), mode='0')
width = data.shape[1]
info = np.std(data, axis=0)
return np.ones(width) / width if np.sum(info) == 0 else np.divide(info, np.sum(info))
def gini(data_origin: np.ndarray) -> np.ndarray:
"""
计算基尼系数法权重
Args:
data (np.ndarray): 待计算权重的数据
Returns:
np.ndarray: 基尼系数法权重数组
"""
array_check(data=data_origin)
length, _ = data_origin.shape
diff_array = np.abs(data_origin[:, :, np.newaxis] - data_origin[:, :, np.newaxis].T)
score = 2 / (length * (length - 1)) * np.sum(diff_array, axis=(0, 1))
return score / np.sum(score) | /Rh-s-PyTool-0.3.0.tar.gz/Rh-s-PyTool-0.3.0/src/index_calmeth/weights.py | 0.768038 | 0.747224 | weights.py | pypi |
import numpy as np
from .non_dimension import toone
from .check_func import array_check
def topsis(
data_origin: np.ndarray, weights: list[int | float] | np.ndarray
) -> np.matrix | None:
"""计算优劣解距离法得分矩阵,weights为权重矩阵。
Args:
data_origin (np.ndarray): 待计算数据
weights (np.ndarray): 权重数组
Returns:
np.matrix: 若参数无误,返回得分数据,否则返回None
"""
array_check(data=data_origin)
data = data_origin.copy()
data = toone(data, mode='3')
dist_max = np.multiply(np.square(np.subtract(data.max(axis=0), data)),
np.square(weights))
dist_min = np.multiply(np.square(np.subtract(data.min(axis=0), data)),
np.square(weights))
dist_max *= weights
dist_min *= weights
dist_z_max: np.ndarray = np.sqrt(dist_max.sum(axis=1))
dist_z_min: np.ndarray = np.sqrt(dist_min.sum(axis=1))
result: np.ndarray = dist_z_min / (dist_z_max + dist_z_min)
return np.mat(result.reshape(result.shape[0], 1))
def rsr(
data_origin: np.ndarray, weights: list[int | float] | np.ndarray
) -> np.matrix | None:
"""计算整次秩和比得分矩阵,weights为权重矩阵。
Args:
data_origin (np.ndarray): 待计算数据
weights (np.ndarray): 权重数组
Returns:
np.matrix: 若参数无误,返回得分数据,否则返回None
"""
array_check(data=data_origin)
data = data_origin.copy()
length, _ = data.shape
assert isinstance(weights, (list, np.ndarray)), "weights必须是一维数组或列表"
weights = np.mat(weights)
compare_indices = np.argsort(data, axis=0)
rsr_matrix = np.argsort(compare_indices, axis=0)
return rsr_matrix * np.mat(weights).T/ length
def ni_rsr(
data_origin: np.ndarray, weights: list[int | float] | np.ndarray
) -> np.matrix | None:
"""计算非整次秩和比得分矩阵,weights为权重矩阵。
Args:
data_origin (np.ndarray): 待计算数据
weights (np.ndarray): 权重数组
Returns:
np.matrix: 若参数无误,返回得分数据,否则返回None
"""
array_check(data=data_origin)
data = data_origin.copy()
length, _ = data.shape
assert isinstance(weights, (list, np.ndarray)), "weights必须是一维数组或列表"
max_value = np.max(data, axis=0)
min_value = np.min(data, axis=0)
rsr_matrix = 1 + ((length - 1) * (data - min_value) / (max_value - min_value))
return rsr_matrix * np.mat(weights).T / length | /Rh-s-PyTool-0.3.0.tar.gz/Rh-s-PyTool-0.3.0/src/index_calmeth/evaluation.py | 0.832373 | 0.757032 | evaluation.py | pypi |
from math import radians, cos, sin, asin, sqrt
from .classdefine import Addr
from .special import ZXCITIES
def lookup(name: str, level: str | None = None) -> str | None:
"""根据地名简称查找全称
Args:
name (str): 待查找地名
level (str | None): 查询区域的行政等级,包括province、city、county三级。默认值为None,当为None时不区分查找范围,因此很可能出现重名错误
Returns:
str | None: 地名全称
"""
if name in ZXCITIES.keys():
new_name = ZXCITIES[name]
obj_area = Addr(new_name, level="province").addr
else:
try:
obj_area = Addr(name, level=level).addr
except Exception as e:
raise ValueError(f"无法找到{name}的全称") from e
return None if len(obj_area) == 0 else obj_area[0, "name"]
def belongs_to(name: str, level: str | None = None) -> str | None:
"""根据地名查找其上级行政区名称
Args:
name (str): 待查找地名
level (str | None): 查询区域的行政等级,包括province、city、county三级。默认值为None,当为None时不区分查找范围,因此很可能出现重名错误
Returns:
str | None: 上级行政区全称
"""
if name in ZXCITIES.keys():
new_name = ZXCITIES[name]
obj_area = Addr(new_name, level="province")
else:
try:
obj_area = Addr(name, level=level)
except Exception as e:
raise ValueError(f"无法找到{name}的上级行政区") from e
return obj_area._belongs_to()
def coordinate(name: str, level: str | None = None) -> tuple[float, float] | None:
"""根据区域名得到其行政中心的坐标
Args:
name (str): 行政区名称
level (str | None): 查询区域的行政等级,包括province、city、county三级。默认值为None,当为None时不区分查找范围,因此很可能出现重名错误
Returns:
tuple[float, float]: 行政中心经纬度
"""
if name in ZXCITIES.keys():
new_name = ZXCITIES[name]
obj_area = Addr(new_name, level="province")
else:
try:
obj_area = Addr(name, level=level)
lat, lon = obj_area._coordinate()
except Exception as e:
raise ValueError(f"无法找到{name}的坐标") from e
return lat, lon
def haversine(lat1: float, lon1: float, lat2: float, lon2: float) -> float:
"""利用两个地点的经纬度计算球面距离
Args:
lat1 (float): 地区1的纬度
lon1 (float): 地区1的经度
lat2 (float): 地区2的纬度
lon2 (float): 地区2的经度
Returns:
float: 球面距离,单位:km
"""
# 将十进制度数转化为弧度
lat1, lon1, lat2, lon2 = map(radians, [lat1, lon1, lat2, lon2])
# haversine(半正矢)公式
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2
c = 2 * asin(sqrt(a))
r = 6371
return c * r
def dist(
name1: str, name2: str, level1: str | None = None, level2: str | None = None
) -> float:
"""利用两个地区的城市名称计算球面距离
Args:
name1 (str): 地区1的城市名称
name2 (str): 地区2的城市名称
level1 (str | None): 查询地区1的行政等级,包括province、city、county三级。
level2 (str | None): 查询地区2的行政等级,包括province、city、county三级。
Returns:
float: 球面距离,单位:km
Note:
level1、level2默认值为None,当为None时不区分查找范围,因此很可能出现重名错误。
"""
try:
city1_lat: float
city1_lon: float
city1_lat, city1_lon = coordinate(name1, level=level1)
city2_lat: float
city2_lon: float
city2_lat, city2_lon = coordinate(name2, level=level2)
return haversine(city1_lat, city1_lon, city2_lat, city2_lon)
except Exception as e:
raise ValueError(f"无法计算{name1}与{name2}之间的球面距离") from e | /Rh-s-PyTool-0.3.0.tar.gz/Rh-s-PyTool-0.3.0/src/cp_lookup/mainfunc.py | 0.539226 | 0.3671 | mainfunc.py | pypi |
from random import randint
def formatar_cpf(cpf):
return "%s.%s.%s-%s" % (cpf[0:3], cpf[3:6], cpf[6:9], cpf[9:11])
def generate(formatar=False):
"""
* Essa função gera um número de CPF válido.
* @param {Boolean} formatar define se o número do CPF deve ser gerado com os pontos e hífen.
* @return {String} CPF
*
* Regra de Formação
*
* O número de um CPF tem exatamente 9 algarismos em sua raiz e mais dois dígitos verificadores que são indicados por último.
* Portanto, um CPF tem 11 algarismos. O número do CPF é escrito na forma abcdefghi-jk ou diretamente como abcdefghijk onde
* os algarismos não podem ser todos iguais entre si.
*
* abc.def.ghi-jk
*
* O j é chamado 1° dígito verificador do número do CPF.
*
* O k é chamado 2° dígito verificador do número do CPF.
*
* Primeiro Dígito
*
* Para obter j multiplicamos a, b, c, d, e, f, g, h e i pelas constantes correspondentes, e somamos os resultados de cada multiplicação:
*
* S = 10a + 9b + 8c + 7d + 6e + 5f + 4g + 3h + 2i
*
* O resultado da soma é dividido por 11, e resto da divisão é tratada da seguinte forma:
*
* se o resto for igual a 0 ou 1, j será 0 (zero)
* se o resto for 2, 3, 4, 5, 6, 7, 8, 9 ou 10, j será 11 - resto
*
* Para obter k, multiplicamos a, b, c, d, e, f, g, h, i e j pelas constantes correspondentes, e somamos os resultados de cada multiplicação:
*
* S = 11a + 10b + 9c + 8d + 7e + 6f + 5g + 4h + 3i + 2j
*
* O resultado da soma é dividido por 11, e resto da divisão é tratada da seguinte forma:
*
* se o resto for igual a 0 ou 1, k será 0 (zero)
* se o resto for 2, 3, 4, 5, 6, 7, 8, 9 ou 10, k será 11 - resto
*
"""
# 9 números aleatórios
arNumeros = []
for i in range(9):
arNumeros.append(randint(0, 9))
# Calculado o primeiro DV
somaJ = (arNumeros[0] * 10) + (arNumeros[1] * 9) + (arNumeros[2] * 8) + (arNumeros[3] * 7) + (arNumeros[4] * 6) + (
arNumeros[5] * 5) + (arNumeros[6] * 4) + (arNumeros[7] * 3) + (arNumeros[8] * 2)
restoJ = somaJ % 11
if restoJ == 0 or restoJ == 1:
j = 0
else:
j = 11 - restoJ
arNumeros.append(j)
# Calculado o segundo DV
somaK = (arNumeros[0] * 11) + (arNumeros[1] * 10) + (arNumeros[2] * 9) + (arNumeros[3] * 8) + (arNumeros[4] * 7) + (
arNumeros[5] * 6) + (arNumeros[6] * 5) + (arNumeros[7] * 4) + (arNumeros[8] * 3) + (j * 2)
restoK = somaK % 11
if restoK == 0 or restoK == 1:
k = 0
else:
k = 11 - restoK
arNumeros.append(k)
cpf = ''.join(str(x) for x in arNumeros)
if formatar:
return cpf[:3] + '.' + cpf[3:6] + '.' + cpf[6:9] + '-' + cpf[9:]
else:
return cpf | /rh_util-1.5.4.tar.gz/rh_util-1.5.4/rh_util/cpf/services.py | 0.472927 | 0.44354 | services.py | pypi |
import ctypes
from . import basetypes
# Python 2/3 compatibility
try:
_basestring = basestring
except NameError:
_basestring = str
_cf = ctypes.cdll.LoadLibrary('/System/Library/Frameworks/CoreFoundation.framework/CoreFoundation')
def declare_cf_type(type_name, base_type):
class CFType(base_type):
pass
CFType.__name__ = type_name
CFTypeRef = ctypes.POINTER(CFType)
# NB: Subclassing ctypes pointer types causes it to catch fire.
# Instead, we will just patch in the functionality we want.
def _CFTypeRef_autorelease(self):
"""Declare that this CFTypeRef should be automatically released
Observe the rules of CoreFoundation when using this function.
Only call autorelease() when the "Create Rule" is applicable.
If the "Get Rule" is applicable, then you MUST NOT call
autorelease(), unless you have explicitly retained the object
first.
NB: If you pass a CFTypeRef byref (i.e. as an output parameter)
and it gets mutated, the old value will not be released!
"""
self._autorelease = True
return self
CFTypeRef.autorelease = _CFTypeRef_autorelease
def _CFTypeRef___del__(self):
if self and getattr(self, '_autorelease', False):
CFRelease(self)
CFTypeRef.__del__ = _CFTypeRef___del__
return CFType, CFTypeRef
# types
CFType, CFTypeRef = declare_cf_type('CFType', ctypes.Structure)
CFAllocator, CFAllocatorRef = declare_cf_type('CFAllocator', CFType)
CFDictionary, CFDictionaryRef = declare_cf_type('CFDictionary', CFType)
CFMutableDictionary, CFMutableDictionaryRef = declare_cf_type('CFMutableDictionary', CFDictionary)
CFString, CFStringRef = declare_cf_type('CFString', CFType)
CFURL, CFURLRef = declare_cf_type('CFURL', CFType)
# Swizzle CFStringRef.from_param so that a string or bytes object can
# be passed to a function that expects a CFStringRef. Since the
# resultant object is ephemeral, autorelease() is automatically called
# so that it does not leak.
_CFStringRef_original_from_param = CFStringRef.from_param
def _CFStringRef_from_param(param):
if isinstance(param, (_basestring, bytes)):
if isinstance(param, _basestring):
param = param.encode('utf-8')
return CFStringCreateWithBytes(
kCFAllocatorDefault, param, len(param),
kCFStringEncodingUTF8, False).autorelease()
else:
return _CFStringRef_original_from_param(param)
CFStringRef.from_param = staticmethod(_CFStringRef_from_param)
def _CFStringRef___str__(self):
if self:
unicode_length = CFStringGetLength(self)
byte_length = CFStringGetMaximumSizeForEncoding(unicode_length, kCFStringEncodingUTF8)
buf = ctypes.create_string_buffer(byte_length + 1)
CFStringGetCString(self, buf, byte_length + 1, kCFStringEncodingUTF8)
return buf.value.decode('utf-8')
else:
raise ValueError('CFStringRef is null')
CFStringRef.__str__ = _CFStringRef___str__
CFIndex = ctypes.c_long
CFStringEncoding = ctypes.c_uint32
CFURLPathStyle = CFIndex
# constants
kCFAllocatorDefault = CFAllocatorRef.in_dll(_cf, 'kCFAllocatorDefault')
kCFStringEncodingUTF8 = 0x08000100
kCFURLPOSIXPathStyle = 0
# functions
CFDictionaryGetValue = _cf.CFDictionaryGetValue
CFDictionaryGetValue.argtypes = [ CFDictionaryRef, ctypes.c_void_p ]
CFDictionaryGetValue.restype = ctypes.c_void_p
CFRelease = _cf.CFRelease
CFRelease.argtypes = [ CFTypeRef ]
CFRelease.restype = None
CFRetain = _cf.CFRetain
CFRetain.argtypes = [ CFTypeRef ]
CFRetain.restype = CFTypeRef
CFStringCreateCopy = _cf.CFStringCreateCopy
CFStringCreateCopy.argtypes = [ CFAllocatorRef, CFStringRef ]
CFStringCreateCopy.restype = CFStringRef
CFStringCreateWithBytes = _cf.CFStringCreateWithBytes
CFStringCreateWithBytes.argtypes = [
CFAllocatorRef, ctypes.c_char_p, CFIndex, CFStringEncoding, basetypes.Boolean ]
CFStringCreateWithBytes.restype = CFStringRef
CFStringGetCString = _cf.CFStringGetCString
CFStringGetCString.argtypes = [ CFStringRef, ctypes.c_char_p, CFIndex, CFStringEncoding ]
CFStringGetCString.restype = basetypes.Boolean
CFStringGetLength = _cf.CFStringGetLength
CFStringGetLength.argtypes = [ CFStringRef ]
CFStringGetLength.restype = CFIndex
CFStringGetMaximumSizeForEncoding = _cf.CFStringGetMaximumSizeForEncoding
CFStringGetMaximumSizeForEncoding.argtypes = [ CFIndex, CFStringEncoding ]
CFStringGetMaximumSizeForEncoding.restype = CFIndex
CFURLCopyFileSystemPath = _cf.CFURLCopyFileSystemPath
CFURLCopyFileSystemPath.argtypes = [ CFURLRef, CFURLPathStyle ]
CFURLCopyFileSystemPath.restype = CFStringRef | /rh.osx-0.1.1.tar.gz/rh.osx-0.1.1/rh/osx/corefoundation.py | 0.525125 | 0.215475 | corefoundation.py | pypi |
[](https://travis-ci.org/biocore/rhapsody)
# rhapsody
Neural networks for estimating microbe-metabolite interactions through their co-occurence probabilities.
# Installation
Rhapsody can be installed via pypi as follows
```
pip install rhapsody
```
If you are planning on using GPUs, be sure to `pip install tensorflow-gpu`.
Rhapsody can also be installed via conda as follows
```
conda install rhapsody -c conda-forge
```
Note that this option may not work in cluster environments, it maybe workwhile to pip install within a virtual environment. It is possible to pip install rhapsody within a conda environment, including qiime2 conda environments. However, pip and conda are known to have compatibility issues, so proceed with caution.
# Getting started
To get started you can run a quick example as follows. This will learn microbe-metabolite vectors (mmvec)
which can be used to estimate microbe-metabolite conditional probabilities that are accurate up to rank.
```
rhapsody mmvec \
--otu-file data/otus.biom \
--metabolite-file data/ms.biom \
--summary-dir summary
```
While this is running, you can open up another session and run `tensorboard --logdir .` for diagnosis, see FAQs below for more details.
If you investigate the summary folder, you will notice that there are a number of files deposited.
See the following url for a more complete tutorial with real datasets.
https://github.com/knightlab-analyses/multiomic-cooccurences
More information can found under `rhapsody --help`
# Qiime2 plugin
If you want to make this qiime2 compatible, install this in your
qiime2 conda environment (see qiime2 installation instructions [here](https://qiime2.org/)) and run the following
```
pip install git+https://github.com/biocore/rhapsody.git
qiime dev refresh-cache
```
This should allow your q2 environment to recognize rhapsody. Before we test
the qiime2 plugin, run the following commands to import an example dataset
```
qiime tools import \
--input-path data/otus_nt.biom \
--output-path otus_nt.qza \
--type FeatureTable[Frequency]
qiime tools import \
--input-path data/lcms_nt.biom \
--output-path lcms_nt.qza \
--type FeatureTable[Frequency]
```
Then you can run mmvec
```
qiime rhapsody mmvec \
--i-microbes otus_nt.qza \
--i-metabolites lcms_nt.qza \
--o-conditionals ranks.qza \
--o-conditional-biplot biplot.qza
```
In the results, there are two files, namely `results/conditional_biplot.qza` and `results/conditionals.qza`. The conditional biplot is a biplot representation the
conditional probability matrix so that you can visualize these microbe-metabolite interactions in an exploratory manner. This can be directly visualized in
Emperor as shown below. We also have the estimated conditional probability matrix given in `results/conditionals.qza`,
which an be unzip to yield a tab-delimited table via `unzip results/conditionals`. Each row can be ranked,
so the top most occurring metabolites for a given microbe can be obtained by identifying the highest co-occurrence probabilities for each microbe.
It is worth your time to investigate the logs (labeled under `logdir**`) that are deposited using Tensorboard.
The actual logfiles within this directory are labeled `events.out.tfevents.*` : more discussion on this later.
Tensorboard can be run via
```
tensorboard --logdir .
```
You may need to tinker with the parameters to get readable tensorflow results, namely `--p-summary-interval`,
`--epochs` and `--batch-size`.
A description of these two graphs is outlined in the FAQs below.
Then you can run the following to generate a emperor biplot.
```
qiime emperor biplot \
--i-biplot conditional_biplot.qza \
--m-sample-metadata-file data/metabolite-metadata.txt \
--m-feature-metadata-file data/microbe-metadata.txt \
--o-visualization emperor.qzv
```
The resulting biplot should look like something as follows

Here, the metabolite represent points and the arrows represent microbes. The points close together are indicative of metabolites that
frequently co-occur with each other. Furthermore, arrows that have a small angle between them are indicative of microbes that co-occur with each other.
Arrows that point in the same direction as the metabolites are indicative of microbe-metabolite co-occurrences. In the biplot above, the red arrows
correspond to Pseudomonas aeruginosa, and the red points correspond to Rhamnolipids that are likely produced by Pseudomonas aeruginosa.
More information behind the parameters can found under `qiime rhapsody --help`
# FAQs
**Q**: Looks like there are two different commands, a standalone script and a qiime2 interface. Which one should I use?!?
**A**: It'll depend on how deep in the weeds you'll want to get. For most intents and purposes, the qiime2 interface will more practical for most analyses. There are 3 major reasons why the standalone scripts are more preferable to the qiime2 interface, namely
1. Customized acceleration : If you want to bring down your runtime from a few days to a few hours, you may need to compile Tensorflow to handle hardware specific instructions (i.e. GPUs / SIMD instructions). It probably is possible to enable GPU compatiability within a conda environment with some effort, but since conda packages binaries, SIMD instructions will not work out of the box.
2. Checkpoints : If you are not sure how long your analysis should run, the standalone script can allow you record checkpoints, which can allow you to recover your model parameters. This enables you to investigate your model while the model is training.
3. More model parameters : The standalone script will return the bias parameters learned for each dataset (i.e. microbe and metabolite abundances). These are stored under the summary directory (specified by `--summary`) under the names `embeddings.csv`. This file will hold the coordinates for the microbes and metabolites, along with biases. There are 4 columns in this file, namely `feature_id`, `axis`, `embed_type` and `values`. `feature_id` is the name of the feature, whether it be a microbe name or a metabolite feature id. `axis` corresponds to the name of the axis, which either corresponds to a PC axis or bias. `embed_type` denotes if the coordinate corresponds to a microbe or metabolite. `values` is the coordinate value for the given `axis`, `embed_type` and `feature_id`. This can be useful for accessing the raw parameters and building custom biplots / ranks visualizations - this also has the advantage of requiring much less memory to manipulate.
**Q** : You mentioned that you can use GPUs. How can you do that??
**A** : This can be done by running `pip install tensorflow-gpu` in your environment. See details [here](https://www.tensorflow.org/install/gpu).
At the moment, these capabilities are only available for the standalone CLI due to complications of installation. See the `--arm-the-gpu` option in the standalone interface.
**Q** : Neural networks scare me - don't they overfit the crap out of your data?
**A** : Here, we are using shallow neural networks (so only two layers). This falls under the same regime as PCA and SVD. But just as you can overfit PCA/SVD, you can also overfit mmvec. Which is why we have Tensorboard enabled for diagnostics. You can visualize the `cv_rmse` to gauge if there is overfitting -- if your run is strictly decreasing, then that is a sign that you are probably not overfitting. But this is not necessarily indicative that you have reach the optimal -- you want to check to see if `logloss` has reached a plateau as shown above.
**Q** : I'm confused, what is Tensorboard?
**A** : Tensorboard is a diagnostic tool that runs in a web browser. To open tensorboard, make sure you’re in the rhapsody environment and cd into the folder you are running the script above from. Then run:
```
tensorboard --logdir .
```
Returning line will look something like:
```
TensorBoard 1.9.0 at http://Lisas-MacBook-Pro-2.local:6006 (Press CTRL+C to quit)
```
Open the website (highlighted in red) in a browser. (Hint; if that doesn’t work try putting only the port number (here it is 6006), adding localhost, localhost:6006). Leave this tab alone. Now any rhapsody output directories that you add to the folder that tensorflow is running in will be added to the webpage.
If working properly, it will look something like this

FIRST graph in Tensorflow; 'Prediction accuracy'. Labelled `cv_rmse`
This is a graph of the prediction accuracy of the model; the model will try to guess the metabolite intensitiy values for the testing samples that were set aside in the script above, using only the microbe counts in the testing samples. Then it looks at the real values and sees how close it was.
The second graph is the `likelihood` - if your `likelihood` values are plateaued, that is a sign that you have converged and reached at a local minima.
The x-axis is the number of iterations (meaning times the model is training across the entire dataset). Every time you iterate across the training samples, you also run the test samples and the averaged results are being plotted on the y-axis.
The y-axis is the average number of counts off for each feature. The model is predicting the sequence counts for each feature in the samples that were set aside for testing. So in the graph above it means that, on average, the model is off by ~0.75 intensity units, which is low. However, this is ABSOLUTE error not relative error (unfortunately we don't know how to compute relative errors because of the sparsity in these datasets).
You can also compare multiple runs with different parameters to see which run performed the best. If you are doing this, be sure to look at the `training-column` example make the testing samples consistent across runs.
**Q** : What's up with the `--training-column` argument?
**A** : That is used for cross-validation if you have a specific reproducibility question that you are interested in answering. It can also make it easier to compare cross validation results across runs. If this is specified, only samples labeled "Train" under this column will be used for building the model and samples labeled "Test" will be used for cross validation. In other words the model will attempt to predict the microbe abundances for the "Test" samples. The resulting prediction accuracy is used to evaluate the generalizability of the model in order to determine if the model is overfitting or not. If this argument is not specified, then 10 random samples will be chosen for the test dataset. If you want to specify more random samples to allocate for cross-validation, the `num-random-test-examples` argument can be specified.
**Q** : What sort of parameters should I focus on when picking a good model?
**A** : There are 3 different parameters to focus on, `input-prior`, `output-prior` and `latent-dim`
The `--input-prior` and `--output-prior` options specifies the width of the prior distribution of the coefficients, where the `--input-prior` is typically specific to microbes and the `--output-prior` is specific to metabolites.
For a prior of 1, this means 99% of entries in the embeddings (typically given in the `U.txt` and `V.txt` files are within -3 and +3 (log fold change). The higher differential-prior is, the more parameters can have bigger changes, so you want to keep this relatively small. If you see overfitting (accuracy and fit increasing over iterations in tensorboard) you may consider reducing the `--input-prior` and `--output-prior` in order to reduce the parameter space.
Another parameter worth thinking about is `--latent-dim`, which controls the number of dimensions used to approximate the conditional probability matrix. This also specifies the dimensions of the microbe/metabolite embeddings `U.txt` and `V.txt`. The more dimensions this has, the more accurate the embeddings can be -- but the higher the chance of overfitting there is. The rule of thumb to follow is in order to fit these models, you need at least 10 times as many samples as there are latent dimensions (this is following a similar rule of thumb for fitting straight lines). So if you have 100 samples, you should definitely not have a latent dimension of more than 10. Furthermore, you can still overfit certain microbes and metabolites. For example, you are fitting a model with those 100 samples and just 1 latent dimension, you can still easily overfit microbes and metabolites that appear in less than 10 samples -- so even fitting models with just 1 latent dimension will require some microbes and metabolites that appear in less than 10 samples to be filtered out.
**Q** : What does a good model fit look like??
**A** : Again the numbers vary greatly by dataset. But you want to see the both the `logloss` and `cv_rmse` curves decaying, and plateau as close to zero as possible.
**Q** : How long should I expect this program to run?
**A** : Both `epochs` and `batch-size` contribute to determining how long the algorithm will run, namely
**Number of iterations = `epoch #` multiplied by the ( Total # of microbial reads / `batch-size` parameter)**
This also depends on if your program will converge. The `learning-rate` specifies the resolution (smaller step size = smaller resolution, but may take longer to converge). You will need to consult with Tensorboard to make sure that your model fit is sane. See this paper for more details on gradient descent: https://arxiv.org/abs/1609.04747
If you are running this on a CPU, 16 cores, a run that reaches convergence should take about 1 day.
If you have a GPU - you maybe able to get this down to a few hours. However, some finetuning of the `batch-size` parameter maybe required -- instead of having a small `batch-size` < 100, you'll want to bump up the `batch-size` to between 1000 and 10000 to fully leverage the speedups available on the GPU.
Credits to Lisa Marotz ([@lisa55asil](https://github.com/lisa55asil)), Yoshiki Vazquez-Baeza ([@ElDeveloper](https://github.com/ElDeveloper)) and Julia Gauglitz ([@jgauglitz](https://github.com/jgauglitz)) for their README contributions.
| /rhapsody-0.4.0.tar.gz/rhapsody-0.4.0/README.md | 0.453746 | 0.980986 | README.md | pypi |
import logging
import shutil
import subprocess
import tempfile
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
_LOGGER = logging.getLogger("rhasspyasr_deepspeech")
def train(
graph: nx.DiGraph,
language_model: typing.Union[str, Path],
scorer_path: typing.Union[str, Path],
alphabet_path: typing.Union[str, Path],
language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_weight: typing.Optional[float] = None,
mixed_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
balance_counts: bool = True,
):
"""Re-generates language model and scorer from intent graph"""
# Language model mixing
base_fst_weight = None
if (
(base_language_model_fst is not None)
and (base_language_model_weight is not None)
and (base_language_model_weight > 0)
):
base_fst_weight = (base_language_model_fst, base_language_model_weight)
# Begin training
with tempfile.NamedTemporaryFile(mode="w+") as arpa_file:
with tempfile.NamedTemporaryFile(mode="w+") as vocab_file:
# 1. Create language model
_LOGGER.debug("Converting to ARPA language model")
rhasspynlu.arpa_lm.graph_to_arpa(
graph,
arpa_file.name,
model_path=language_model_fst,
base_fst_weight=base_fst_weight,
merge_path=mixed_language_model_fst,
vocab_path=vocab_file.name,
)
arpa_file.seek(0)
vocab_file.seek(0)
with tempfile.NamedTemporaryFile(mode="wb+") as lm_file:
# 2. Convert to binary language model
arpa_to_binary(arpa_file.name, lm_file.name)
lm_file.seek(0)
with tempfile.NamedTemporaryFile(mode="wb+") as scorer_file:
# 3. Generate scorer
make_scorer(
alphabet_path, lm_file.name, vocab_file.name, scorer_file.name
)
# Copy over actual files
lm_file.seek(0)
shutil.copy(lm_file.name, language_model)
_LOGGER.debug("Wrote binary language model to %s", language_model)
scorer_file.seek(0)
shutil.copy(scorer_file.name, scorer_path)
_LOGGER.debug("Wrote scorer to %s", scorer_path)
def arpa_to_binary(
arpa_path: typing.Union[str, Path], binary_lm_path: typing.Union[str, Path]
):
"""Convert ARPA language model to binary format using kenlm."""
# NOTE: Using -i because other LM tools mistakenly produce positive log
# probabilities. This option sets those to 0.
binary_command = [
"build_binary",
"-s",
"-i",
"-a",
"255",
"-q",
"8",
"-v",
"trie",
str(arpa_path),
str(binary_lm_path),
]
_LOGGER.debug(binary_command)
subprocess.check_call(binary_command)
def make_scorer(
alphabet_path: typing.Union[str, Path],
binary_lm_path: typing.Union[str, Path],
vocab_path: typing.Union[str, Path],
scorer_path: typing.Union[str, Path],
default_alpha: float = 0.931289039105002,
default_beta: float = 1.1834137581510284,
):
"""Generate scorer using Mozilla native-client tool."""
scorer_command = [
"generate_scorer_package",
"--alphabet",
str(alphabet_path),
"--lm",
str(binary_lm_path),
"--vocab",
str(vocab_path),
"--package",
str(scorer_path),
"--default_alpha",
str(default_alpha),
"--default_beta",
str(default_beta),
]
_LOGGER.debug(scorer_command)
subprocess.check_call(scorer_command) | /rhasspy-asr-deepspeech-0.4.1.tar.gz/rhasspy-asr-deepspeech-0.4.1/rhasspyasr_deepspeech/train.py | 0.578091 | 0.265863 | train.py | pypi |
import io
import logging
import math
import time
import typing
import wave
from pathlib import Path
import deepspeech
import numpy as np
from rhasspyasr import Transcriber, Transcription, TranscriptionToken
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class DeepSpeechTranscriber(Transcriber):
"""Speech to text with deepspeech library."""
def __init__(
self,
model_path: typing.Optional[Path] = None,
scorer_path: typing.Optional[Path] = None,
model: typing.Optional[deepspeech.Model] = None,
beam_width: typing.Optional[int] = None,
lm_alpha: typing.Optional[float] = None,
lm_beta: typing.Optional[float] = None,
):
self.model = model
self.model_path = model_path
self.scorer_path = scorer_path
self.beam_width = beam_width
self.lm_alpha = lm_alpha
self.lm_beta = lm_beta
def transcribe_wav(self, wav_bytes: bytes) -> typing.Optional[Transcription]:
"""Speech to text from WAV data."""
self.maybe_load_model()
assert self.model, "Model was not loaded"
start_time = time.perf_counter()
# Convert to raw numpy buffer
with io.BytesIO(wav_bytes) as wav_io:
with wave.open(wav_io) as wav_file:
audio_bytes = wav_file.readframes(wav_file.getnframes())
audio_buffer = np.frombuffer(audio_bytes, np.int16)
metadata = self.model.sttWithMetadata(audio_buffer)
end_time = time.perf_counter()
wav_seconds = get_wav_duration(wav_bytes)
transcribe_seconds = end_time - start_time
return DeepSpeechTranscriber.metadata_to_transcription(
metadata, wav_seconds, transcribe_seconds
)
# -------------------------------------------------------------------------
def transcribe_stream(
self,
audio_stream: typing.Iterable[bytes],
sample_rate: int,
sample_width: int,
channels: int,
) -> typing.Optional[Transcription]:
"""Speech to text from an audio stream."""
self.maybe_load_model()
assert self.model, "Model was not loaded"
stream = self.model.createStream()
start_time = time.perf_counter()
num_frames = 0
for chunk in audio_stream:
if chunk:
stream.feedAudioContent(np.frombuffer(chunk, dtype=np.int16))
num_frames += len(chunk) // sample_width
metadata = stream.finishStreamWithMetadata()
end_time = time.perf_counter()
wav_seconds = num_frames / sample_rate
transcribe_seconds = end_time - start_time
return DeepSpeechTranscriber.metadata_to_transcription(
metadata, wav_seconds, transcribe_seconds
)
# -------------------------------------------------------------------------
@staticmethod
def metadata_to_transcription(
metadata: typing.Optional[deepspeech.Metadata],
wav_seconds: float,
transcribe_seconds: float,
) -> typing.Optional[Transcription]:
"""Convert DeepSpeech metadata to Rhasspy Transcription"""
if metadata:
# Actual transcription
text = ""
# Individual tokens
tokens: typing.List[TranscriptionToken] = []
confidence = 1
if metadata.transcripts:
transcript = next(iter(metadata.transcripts))
confidence = math.exp(transcript.confidence)
words_and_tokens: typing.List[typing.Any] = [["", []]]
# Organize by whitespace-separated words
for token in transcript.tokens:
text += token.text
if token.text.strip():
# Part of a word
words_and_tokens[-1][0] += token.text
words_and_tokens[-1][1].append(token)
else:
# Whitespace
words_and_tokens.append(["", []])
for word, word_tokens in words_and_tokens:
if not (word and word_tokens):
continue
tokens.append(
TranscriptionToken(
token=word,
likelihood=1,
start_time=word_tokens[0].start_time,
end_time=word_tokens[-1].start_time,
)
)
return Transcription(
text=text,
likelihood=confidence,
transcribe_seconds=transcribe_seconds,
wav_seconds=wav_seconds,
tokens=tokens,
)
# Failure
return None
def stop(self):
"""Stop the transcriber."""
def __repr__(self) -> str:
return "DeepSpeechTranscriber(" f"model={self.model}" ")"
def maybe_load_model(self):
"""Load DeepSpeech model if not already loaded."""
if self.model:
return
assert self.model_path, "No model path"
_LOGGER.debug("Loading model from %s", self.model_path)
self.model = deepspeech.Model(str(self.model_path))
if self.scorer_path and self.scorer_path.is_file():
_LOGGER.debug("Enabling scorer: %s)", self.scorer_path)
self.model.enableExternalScorer(str(self.scorer_path))
if self.beam_width is not None:
_LOGGER.debug("Setting beam width to %s", self.beam_width)
self.model.setBeamWidth(self.beam_width)
if (self.lm_alpha is not None) and (self.lm_beta is not None):
_LOGGER.debug(
"Setting lm_alpha=%s, lm_beta=%s", self.lm_alpha, self.lm_beta
)
self.model.setScorerAlphaBeta(self.lm_alpha, self.lm_beta)
# -----------------------------------------------------------------------------
def get_wav_duration(wav_bytes: bytes) -> float:
"""Return the real-time duration of a WAV file"""
with io.BytesIO(wav_bytes) as wav_buffer:
wav_file: wave.Wave_read = wave.open(wav_buffer, "rb")
with wav_file:
frames = wav_file.getnframes()
rate = wav_file.getframerate()
return frames / float(rate) | /rhasspy-asr-deepspeech-0.4.1.tar.gz/rhasspy-asr-deepspeech-0.4.1/rhasspyasr_deepspeech/transcribe.py | 0.793106 | 0.264109 | transcribe.py | pypi |
import gzip
import logging
import os
import threading
import typing
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
from queue import Queue
import networkx as nx
import rhasspyasr_kaldi
import rhasspynlu
from rhasspyasr import Transcriber, Transcription
from rhasspyasr_kaldi.train import LanguageModelType
from rhasspyhermes.asr import (
AsrAudioCaptured,
AsrError,
AsrRecordingFinished,
AsrStartListening,
AsrStopListening,
AsrTextCaptured,
AsrToggleOff,
AsrToggleOn,
AsrToggleReason,
AsrTrain,
AsrTrainSuccess,
)
from rhasspyhermes.audioserver import AudioFrame, AudioSessionFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.g2p import G2pError, G2pPhonemes, G2pPronounce, G2pPronunciation
from rhasspyhermes.nlu import AsrToken, AsrTokenTime
from rhasspynlu.g2p import PronunciationsType
from rhasspysilence import SilenceMethod, VoiceCommandRecorder, WebRtcVadRecorder
from . import utils
_LOGGER = logging.getLogger("rhasspyasr_kaldi_hermes")
# -----------------------------------------------------------------------------
AudioCapturedType = typing.Tuple[AsrAudioCaptured, TopicArgs]
StopListeningType = typing.Union[
AsrRecordingFinished, AsrTextCaptured, AsrError, AudioCapturedType
]
@dataclass
class TranscriberInfo:
"""Objects for a single transcriber"""
transcriber: typing.Optional[Transcriber] = None
recorder: typing.Optional[VoiceCommandRecorder] = None
frame_queue: "Queue[typing.Optional[bytes]]" = field(default_factory=Queue)
ready_event: threading.Event = field(default_factory=threading.Event)
result: typing.Optional[Transcription] = None
result_event: threading.Event = field(default_factory=threading.Event)
result_sent: bool = False
start_listening: typing.Optional[AsrStartListening] = None
thread: typing.Optional[threading.Thread] = None
audio_buffer: typing.Optional[bytes] = None
reuse: bool = True
@dataclass
class PronunciationDictionary:
"""Details of a phonetic dictionary."""
path: Path
pronunciations: PronunciationsType = field(default_factory=dict)
mtime_ns: typing.Optional[int] = None
# -----------------------------------------------------------------------------
class AsrHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy ASR using Kaldi."""
def __init__(
self,
client,
transcriber_factory: typing.Callable[[], Transcriber],
model_dir: typing.Optional[Path] = None,
graph_dir: typing.Optional[Path] = None,
base_dictionaries: typing.Optional[typing.List[Path]] = None,
dictionary_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
g2p_model: typing.Optional[Path] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
dictionary_path: typing.Optional[Path] = None,
language_model_path: typing.Optional[Path] = None,
language_model_type: LanguageModelType = LanguageModelType.ARPA,
unknown_words: typing.Optional[Path] = None,
no_overwrite_train: bool = False,
base_language_model_fst: typing.Optional[Path] = None,
base_language_model_weight: float = 0,
mixed_language_model_fst: typing.Optional[Path] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
recorder_factory: typing.Optional[
typing.Callable[[], VoiceCommandRecorder]
] = None,
skip_seconds: float = 0.0,
min_seconds: float = 1.0,
max_seconds: typing.Optional[float] = None,
speech_seconds: float = 0.3,
silence_seconds: float = 0.5,
before_seconds: float = 0.5,
vad_mode: int = 3,
max_energy: typing.Optional[float] = None,
max_current_energy_ratio_threshold: typing.Optional[float] = None,
current_energy_threshold: typing.Optional[float] = None,
silence_method: SilenceMethod = SilenceMethod.VAD_ONLY,
reuse_transcribers: bool = False,
spn_phone: str = "SPN",
sil_phone: str = "SIL",
allow_unknown_words: bool = False,
frequent_words: typing.Optional[typing.Set[str]] = None,
unknown_words_probability: float = 1e-10,
unknown_token: str = "<unk>",
max_unknown_words: int = 8,
silence_probability: float = 0.5,
cancel_word: typing.Optional[str] = None,
cancel_probability: float = 1e-2,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspyasr_kaldi_hermes",
client,
site_ids=site_ids,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
)
self.subscribe(
AsrToggleOn,
AsrToggleOff,
AsrStartListening,
AsrStopListening,
G2pPronounce,
AudioFrame,
AudioSessionFrame,
AsrTrain,
)
self.transcriber_factory = transcriber_factory
# Kaldi model/graph dirs
self.model_dir = model_dir
self.graph_dir = graph_dir
# True if transcribers should be reused
self.reuse_transcribers = reuse_transcribers
self.kaldi_port: typing.Optional[int] = None
if not self.reuse_transcribers:
# Use a fixed port number
self.kaldi_port = utils.get_free_port()
# Files to write during training
self.dictionary_path = dictionary_path
self.language_model_path = language_model_path
self.language_model_type = language_model_type
# Pronunciation dictionaries and word transform function
base_dictionaries = base_dictionaries or []
self.base_dictionaries = [
PronunciationDictionary(path=path) for path in base_dictionaries
]
self.dictionary_word_transform = dictionary_word_transform
# Grapheme-to-phonme model (Phonetisaurus FST) and word transform
# function.
self.g2p_model = g2p_model
self.g2p_word_transform = g2p_word_transform
# Mixed language model
self.base_language_model_fst = base_language_model_fst
self.base_language_model_weight = base_language_model_weight
self.mixed_language_model_fst = mixed_language_model_fst
# If True, HCLG.fst won't be overwritten during training
self.no_overwrite_train = no_overwrite_train
# Path to write missing words and guessed pronunciations
self.unknown_words = unknown_words
# Phone used for spoken noise (<unk>)
self.spn_phone = spn_phone
# Phone used for silence (<sil>)
self.sil_phone = sil_phone
# Used to produce alternative unknown word paths
self.frequent_words = frequent_words
self.allow_unknown_words = allow_unknown_words
self.unknown_words_probability = unknown_words_probability
self.unknown_token = unknown_token
self.max_unknown_words = max_unknown_words
self.silence_probability = silence_probability
self.cancel_word = cancel_word
self.cancel_probability = cancel_probability
# True if ASR system is enabled
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
# Seconds to wait for a result from a finished transcriber thread
self.session_result_timeout = 20
# Required audio format
self.sample_rate = sample_rate
self.sample_width = sample_width
self.channels = channels
# No timeout on silence detection
def make_webrtcvad():
return WebRtcVadRecorder(
max_seconds=max_seconds,
vad_mode=vad_mode,
skip_seconds=skip_seconds,
min_seconds=min_seconds,
speech_seconds=speech_seconds,
silence_seconds=silence_seconds,
before_seconds=before_seconds,
silence_method=silence_method,
current_energy_threshold=current_energy_threshold,
max_energy=max_energy,
max_current_ratio_threshold=max_current_energy_ratio_threshold,
)
self.recorder_factory = recorder_factory or make_webrtcvad
# WAV buffers for each session
self.sessions: typing.Dict[typing.Optional[str], TranscriberInfo] = {}
self.free_transcribers: typing.List[TranscriberInfo] = []
self.first_audio: bool = True
self.lang = lang
# -------------------------------------------------------------------------
async def start_listening(
self, message: AsrStartListening
) -> typing.AsyncIterable[typing.Union[StopListeningType, AsrError]]:
"""Start recording audio data for a session."""
try:
if message.session_id in self.sessions:
# Stop existing session
async for stop_message in self.stop_listening(
AsrStopListening(
site_id=message.site_id, session_id=message.session_id
)
):
yield stop_message
if self.free_transcribers:
# Re-use existing transcriber
info = self.free_transcribers.pop()
_LOGGER.debug(
"Re-using existing transcriber (session_id=%s)", message.session_id
)
else:
# Create new transcriber
info = TranscriberInfo(reuse=self.reuse_transcribers)
_LOGGER.debug("Creating new transcriber session %s", message.session_id)
def transcribe_proc(
info, transcriber_factory, sample_rate, sample_width, channels
):
def audio_stream(frame_queue) -> typing.Iterable[bytes]:
# Pull frames from the queue
frames = frame_queue.get()
while frames:
yield frames
frames = frame_queue.get()
try:
info.transcriber = transcriber_factory(port_num=self.kaldi_port)
assert (
info.transcriber is not None
), "Failed to create transcriber"
while True:
# Wait for session to start
info.ready_event.wait()
info.ready_event.clear()
# Get result of transcription
result = info.transcriber.transcribe_stream(
audio_stream(info.frame_queue),
sample_rate,
sample_width,
channels,
)
_LOGGER.debug("Transcription result: %s", result)
assert (
result is not None and result.text
), "Null transcription"
# Signal completion
info.result = result
info.result_event.set()
if not self.reuse_transcribers:
try:
info.transcriber.stop()
except Exception:
_LOGGER.exception("Transcriber stop")
break
except Exception:
_LOGGER.exception("session proc")
# Mark as not reusable
info.reuse = False
# Stop transcriber
if info.transcriber is not None:
try:
info.transcriber.stop()
except Exception:
_LOGGER.exception("Transcriber stop")
# Signal failure
info.transcriber = None
info.result = Transcription(
text="", likelihood=0, transcribe_seconds=0, wav_seconds=0
)
info.result_event.set()
# Run in separate thread
info.thread = threading.Thread(
target=transcribe_proc,
args=(
info,
self.transcriber_factory,
self.sample_rate,
self.sample_width,
self.channels,
),
daemon=True,
)
info.thread.start()
# ---------------------------------------------------------------------
# Settings for session
info.start_listening = message
# Signal session thread to start
info.ready_event.set()
if message.stop_on_silence:
# Begin silence detection
if info.recorder is None:
info.recorder = self.recorder_factory()
info.recorder.start()
else:
# Use internal buffer (no silence detection)
info.audio_buffer = bytes()
self.sessions[message.session_id] = info
_LOGGER.debug("Starting listening (session_id=%s)", message.session_id)
self.first_audio = True
except Exception as e:
_LOGGER.exception("start_listening")
yield AsrError(
error=str(e),
context=repr(message),
site_id=message.site_id,
session_id=message.session_id,
)
async def stop_listening(
self, message: AsrStopListening
) -> typing.AsyncIterable[StopListeningType]:
"""Stop recording audio data for a session."""
info = self.sessions.pop(message.session_id, None)
if info:
try:
# Trigger publishing of transcription on end of session
async for result in self.finish_session(
info, message.site_id, message.session_id
):
yield result
if info.reuse and (info.transcriber is not None):
# Reset state
info.result = None
info.result_event.clear()
info.result_sent = False
info.result = None
info.start_listening = None
info.audio_buffer = None
while info.frame_queue.qsize() > 0:
info.frame_queue.get_nowait()
# Add to free pool
self.free_transcribers.append(info)
except Exception as e:
_LOGGER.exception("stop_listening")
yield AsrError(
error=str(e),
context=repr(info.transcriber),
site_id=message.site_id,
session_id=message.session_id,
)
_LOGGER.debug("Stopping listening (session_id=%s)", message.session_id)
async def handle_audio_frame(
self,
frame_wav_bytes: bytes,
site_id: str = "default",
session_id: typing.Optional[str] = None,
) -> typing.AsyncIterable[
typing.Union[
AsrRecordingFinished,
AsrTextCaptured,
AsrError,
typing.Tuple[AsrAudioCaptured, TopicArgs],
]
]:
"""Process single frame of WAV audio"""
# Don't process audio if no sessions
if not self.sessions:
return
audio_data = self.maybe_convert_wav(frame_wav_bytes)
if session_id is None:
# Add to every open session
target_sessions = list(self.sessions.items())
else:
# Add to single session
target_sessions = [(session_id, self.sessions[session_id])]
# Add to every open session with matching site_id
for target_id, info in target_sessions:
try:
assert info.start_listening is not None
# Match site_id
if info.start_listening.site_id != site_id:
continue
# Push to transcription thread
info.frame_queue.put(audio_data)
if info.recorder is not None:
# Check for voice command end
command = info.recorder.process_chunk(audio_data)
if command:
# Trigger publishing of transcription on silence
async for result in self.finish_session(
info, site_id=site_id, session_id=target_id
):
yield result
else:
# Use session audio buffer
assert info.audio_buffer is not None
info.audio_buffer += audio_data
except Exception as e:
_LOGGER.exception("handle_audio_frame")
yield AsrError(
error=str(e),
context=repr(info.transcriber),
site_id=site_id,
session_id=target_id,
)
async def finish_session(
self, info: TranscriberInfo, site_id: str, session_id: typing.Optional[str]
) -> typing.AsyncIterable[
typing.Union[AsrRecordingFinished, AsrTextCaptured, AudioCapturedType]
]:
"""Publish transcription result for a session if not already published"""
if info.recorder is not None:
# Stop silence detection and get trimmed audio
audio_data = info.recorder.stop()
else:
# Use complete audio buffer
assert info.audio_buffer is not None
audio_data = info.audio_buffer
if not info.result_sent:
# Send recording finished message
yield AsrRecordingFinished(site_id=site_id, session_id=session_id)
# Avoid re-sending transcription
info.result_sent = True
# Last chunk
info.frame_queue.put(None)
# Wait for result
result_success = info.result_event.wait(timeout=self.session_result_timeout)
if not result_success:
# Mark transcription as non-reusable
info.reuse = False
transcription = info.result
assert info.start_listening is not None
if transcription:
# Successful transcription
asr_tokens: typing.Optional[typing.List[typing.List[AsrToken]]] = None
if transcription.tokens:
# Only one level of ASR tokens
asr_inner_tokens: typing.List[AsrToken] = []
asr_tokens = [asr_inner_tokens]
range_start = 0
for ps_token in transcription.tokens:
range_end = range_start + len(ps_token.token) + 1
asr_inner_tokens.append(
AsrToken(
value=ps_token.token,
confidence=ps_token.likelihood,
range_start=range_start,
range_end=range_start + len(ps_token.token) + 1,
time=AsrTokenTime(
start=ps_token.start_time, end=ps_token.end_time
),
)
)
range_start = range_end
yield (
AsrTextCaptured(
text=transcription.text,
likelihood=transcription.likelihood,
seconds=transcription.transcribe_seconds,
site_id=site_id,
session_id=session_id,
asr_tokens=asr_tokens,
lang=(info.start_listening.lang or self.lang),
)
)
else:
# Empty transcription
yield AsrTextCaptured(
text="",
likelihood=0,
seconds=0,
site_id=site_id,
session_id=session_id,
lang=(info.start_listening.lang or self.lang),
)
if info.start_listening.send_audio_captured:
wav_bytes = self.to_wav_bytes(audio_data)
# Send audio data
yield (
# pylint: disable=E1121
AsrAudioCaptured(wav_bytes),
{"site_id": site_id, "session_id": session_id},
)
# -------------------------------------------------------------------------
async def handle_train(
self, train: AsrTrain, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[AsrTrainSuccess, TopicArgs], AsrError]
]:
"""Re-trains ASR system."""
try:
assert (
self.model_dir and self.graph_dir
), "Model and graph dirs are required to train"
# Load base dictionaries
pronunciations: PronunciationsType = defaultdict(list)
for base_dict in self.base_dictionaries:
if not os.path.exists(base_dict.path):
_LOGGER.warning(
"Base dictionary does not exist: %s", base_dict.path
)
continue
# Re-load dictionary if modification time has changed
dict_mtime_ns = os.stat(base_dict.path).st_mtime_ns
if (base_dict.mtime_ns is None) or (
base_dict.mtime_ns != dict_mtime_ns
):
base_dict.mtime_ns = dict_mtime_ns
_LOGGER.debug("Loading base dictionary from %s", base_dict.path)
with open(base_dict.path, "r") as base_dict_file:
rhasspynlu.g2p.read_pronunciations(
base_dict_file, word_dict=base_dict.pronunciations
)
for word in base_dict.pronunciations:
pronunciations[word].extend(base_dict.pronunciations[word])
if not self.no_overwrite_train:
_LOGGER.debug("Loading %s", train.graph_path)
with gzip.GzipFile(train.graph_path, mode="rb") as graph_gzip:
graph = nx.readwrite.gpickle.read_gpickle(graph_gzip)
# Re-generate HCLG.fst
_LOGGER.debug("Starting training")
rhasspyasr_kaldi.train(
graph,
pronunciations,
self.model_dir,
self.graph_dir,
dictionary=self.dictionary_path,
language_model=self.language_model_path,
language_model_type=self.language_model_type,
dictionary_word_transform=self.dictionary_word_transform,
g2p_model=self.g2p_model,
g2p_word_transform=self.g2p_word_transform,
missing_words_path=self.unknown_words,
base_language_model_fst=self.base_language_model_fst,
base_language_model_weight=self.base_language_model_weight,
mixed_language_model_fst=self.mixed_language_model_fst,
spn_phone=self.spn_phone,
sil_phone=self.sil_phone,
allow_unknown_words=self.allow_unknown_words,
frequent_words=self.frequent_words,
unk_prob=self.unknown_words_probability,
sil_prob=self.silence_probability,
unknown_token=self.unknown_token,
max_unk_words=self.max_unknown_words,
cancel_word=self.cancel_word,
cancel_prob=self.cancel_probability,
)
else:
_LOGGER.warning("Not overwriting HCLG.fst")
kaldi_dir = rhasspyasr_kaldi.get_kaldi_dir()
rhasspyasr_kaldi.train_prepare_online_decoding(
self.model_dir, self.graph_dir, kaldi_dir
)
yield (AsrTrainSuccess(id=train.id), {"site_id": site_id})
except Exception as e:
_LOGGER.exception("train")
yield AsrError(error=str(e), site_id=site_id, session_id=train.id)
async def handle_pronounce(
self, pronounce: G2pPronounce
) -> typing.AsyncIterable[typing.Union[G2pPhonemes, G2pError]]:
"""Looks up or guesses word pronunciation(s)."""
try:
result = G2pPhonemes(
word_phonemes={},
id=pronounce.id,
site_id=pronounce.site_id,
session_id=pronounce.session_id,
)
# Load base dictionaries
pronunciations: PronunciationsType = {}
for base_dict in self.base_dictionaries:
if base_dict.path.is_file():
_LOGGER.debug("Loading base dictionary from %s", base_dict.path)
with open(base_dict.path, "r") as base_dict_file:
rhasspynlu.g2p.read_pronunciations(
base_dict_file, word_dict=pronunciations
)
# Try to look up in dictionary first
missing_words: typing.Set[str] = set()
if pronunciations:
for word in pronounce.words:
# Handle case transformation
if self.dictionary_word_transform:
word = self.dictionary_word_transform(word)
word_prons = pronunciations.get(word)
if word_prons:
# Use dictionary pronunciations
result.word_phonemes[word] = [
G2pPronunciation(phonemes=p, guessed=False)
for p in word_prons
]
else:
# Will have to guess later
missing_words.add(word)
else:
# All words must be guessed
missing_words.update(pronounce.words)
if missing_words:
if self.g2p_model:
_LOGGER.debug("Guessing pronunciations of %s", missing_words)
guesses = rhasspynlu.g2p.guess_pronunciations(
missing_words,
self.g2p_model,
g2p_word_transform=self.g2p_word_transform,
num_guesses=pronounce.num_guesses,
)
# Add guesses to result
for guess_word, guess_phonemes in guesses:
result_phonemes = result.word_phonemes.get(guess_word) or []
result_phonemes.append(
G2pPronunciation(phonemes=guess_phonemes, guessed=True)
)
result.word_phonemes[guess_word] = result_phonemes
else:
_LOGGER.warning("No g2p model. Cannot guess pronunciations.")
yield result
except Exception as e:
_LOGGER.exception("handle_pronounce")
yield G2pError(
error=str(e),
context=f"model={self.model_dir}, graph={self.graph_dir}",
site_id=pronounce.site_id,
session_id=pronounce.session_id,
)
# -------------------------------------------------------------------------
async def on_message_blocking(
self, message: Message, site_id=None, session_id=None, topic=None
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, AsrToggleOn):
if message.reason == AsrToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, AsrToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled (%s)", message.reason)
elif isinstance(message, AudioFrame):
if self.enabled:
# Add to all active sessions
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
async for frame_result in self.handle_audio_frame(
message.wav_bytes, site_id=site_id
):
yield frame_result
elif isinstance(message, AudioSessionFrame):
if self.enabled:
# Check site_id
if session_id in self.sessions:
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
# Add to specific session only
async for session_frame_result in self.handle_audio_frame(
message.wav_bytes, site_id=site_id, session_id=session_id
):
yield session_frame_result
elif isinstance(message, AsrStartListening):
# hermes/asr/startListening
async for start_result in self.start_listening(message):
yield start_result
elif isinstance(message, AsrStopListening):
# hermes/asr/stopListening
async for stop_result in self.stop_listening(message):
yield stop_result
elif isinstance(message, AsrTrain):
# rhasspy/asr/<site_id>/train
async for train_result in self.handle_train(message, site_id=site_id):
yield train_result
elif isinstance(message, G2pPronounce):
# rhasspy/g2p/pronounce
async for pronounce_result in self.handle_pronounce(message):
yield pronounce_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-asr-kaldi-hermes-0.7.1.tar.gz/rhasspy-asr-kaldi-hermes-0.7.1/rhasspyasr_kaldi_hermes/__init__.py | 0.672977 | 0.186132 | __init__.py | pypi |
# Rhasspy ASR Kaldi
[](https://github.com/rhasspy/rhasspy-asr-kaldi/actions)
[](https://github.com/rhasspy/rhasspy-asr-kaldi/blob/master/LICENSE)
Automated speech recognition in [Rhasspy](https://github.com/synesthesiam/rhasspy) voice assistant with [Kaldi](http://kaldi-asr.org).
## Requirements
* Python 3.7
* [Kaldi](https://kaldi-asr.org)
* Expects `$KALDI_DIR` in environment
* [Opengrm](http://www.opengrm.org/twiki/bin/view/GRM/NGramLibrary)
* Expects `ngram*` in `$PATH`
* [Phonetisaurus](https://github.com/AdolfVonKleist/Phonetisaurus)
* Expects `phonetisaurus-apply` in `$PATH`
See [pre-built apps](https://github.com/synesthesiam/prebuilt-apps) for pre-compiled binaries.
## Installation
```bash
$ git clone https://github.com/rhasspy/rhasspy-asr-kaldi
$ cd rhasspy-asr-kaldi
$ ./configure
$ make
$ make install
```
## Transcribing
Use `python3 -m rhasspyasr_kaldi transcribe <ARGS>`
```
usage: rhasspy-asr-kaldi transcribe [-h] --model-dir MODEL_DIR
[--graph-dir GRAPH_DIR]
[--model-type MODEL_TYPE]
[--frames-in-chunk FRAMES_IN_CHUNK]
[wav_file [wav_file ...]]
positional arguments:
wav_file WAV file(s) to transcribe
optional arguments:
-h, --help show this help message and exit
--model-dir MODEL_DIR
Path to Kaldi model directory (with conf, data)
--graph-dir GRAPH_DIR
Path to Kaldi graph directory (with HCLG.fst)
--model-type MODEL_TYPE
Either nnet3 or gmm (default: nnet3)
--frames-in-chunk FRAMES_IN_CHUNK
Number of frames to process at a time
```
For nnet3 models, the `online2-tcp-nnet3-decode-faster` program is used to handle streaming audio. For gmm models, audio is buffered and packaged as a WAV file before being transcribed.
## Training
Use `python3 -m rhasspyasr_kaldi train <ARGS>`
```
usage: rhasspy-asr-kaldi train [-h] --model-dir MODEL_DIR
[--graph-dir GRAPH_DIR]
[--intent-graph INTENT_GRAPH]
[--dictionary DICTIONARY]
[--dictionary-casing {upper,lower,ignore}]
[--language-model LANGUAGE_MODEL]
--base-dictionary BASE_DICTIONARY
[--g2p-model G2P_MODEL]
[--g2p-casing {upper,lower,ignore}]
optional arguments:
-h, --help show this help message and exit
--model-dir MODEL_DIR
Path to Kaldi model directory (with conf, data)
--graph-dir GRAPH_DIR
Path to Kaldi graph directory (with HCLG.fst)
--intent-graph INTENT_GRAPH
Path to intent graph JSON file (default: stdin)
--dictionary DICTIONARY
Path to write custom pronunciation dictionary
--dictionary-casing {upper,lower,ignore}
Case transformation for dictionary words (training,
default: ignore)
--language-model LANGUAGE_MODEL
Path to write custom language model
--base-dictionary BASE_DICTIONARY
Paths to pronunciation dictionaries
--g2p-model G2P_MODEL
Path to Phonetisaurus grapheme-to-phoneme FST model
--g2p-casing {upper,lower,ignore}
Case transformation for g2p words (training, default:
ignore)
```
This will generate a custom `HCLG.fst` from an intent graph created using [rhasspy-nlu](https://github.com/rhasspy/rhasspy-nlu). Your Kaldi model directory should be laid out like this:
* my_model/ (`--model-dir`)
* conf/
* mfcc_hires.conf
* data/
* local/
* dict/
* lexicon.txt (copied from `--dictionary`)
* lang/
* lm.arpa.gz (copied from `--language-model`)
* graph/ (`--graph-dir`)
* HCLG.fst (generated)
* model/
* final.mdl
* phones/
* extra_questions.txt
* nonsilence_phones.txt
* optional_silence.txt
* silence_phones.txt
* online/ (nnet3 only)
* extractor/ (nnet3 only)
When using the `train` command, you will need to specify the following arguments:
* `--intent-graph` - path to graph json file generated using [rhasspy-nlu](https://github.com/rhasspy/rhasspy-nlu)
* `--model-type` - either nnet3 or gmm
* `--model-dir` - path to top-level model directory (my_model in example above)
* `--graph-dir` - path to directory where HCLG.fst should be written (my_model/graph in example above)
* `--base-dictionary` - pronunciation dictionary with all words from intent graph (can be used multiple times)
* `--dictionary` - path to write custom pronunciation dictionary (optional)
* `--language-model` - path to write custom ARPA language model (optional)
## Building From Source
`rhasspy-asr-kaldi` depends on the following programs that must be compiled:
* [Kaldi](http://kaldi-asr.org)
* Speech to text engine
* [Opengrm](http://www.opengrm.org/twiki/bin/view/GRM/NGramLibrary)
* Create ARPA language models
* [Phonetisaurus](https://github.com/AdolfVonKleist/Phonetisaurus)
* Guesses pronunciations for unknown words
### Kaldi
Make sure you have the necessary dependencies installed:
```bash
sudo apt-get install \
build-essential \
libatlas-base-dev libatlas3-base gfortran \
automake autoconf unzip sox libtool subversion \
python3 python \
git zlib1g-dev
```
Download Kaldi and extract it:
```bash
wget -O kaldi-master.tar.gz \
'https://github.com/kaldi-asr/kaldi/archive/master.tar.gz'
tar -xvf kaldi-master.tar.gz
```
First, build Kaldi's tools:
```bash
cd kaldi-master/tools
make
```
Use `make -j 4` if you have multiple CPU cores. This will take a **long** time.
Next, build Kaldi itself:
```bash
cd kaldi-master
./configure --shared --mathlib=ATLAS
make depend
make
```
Use `make depend -j 4` and `make -j 4` if you have multiple CPU cores. This will take a **long** time.
There is no installation step. The `kaldi-master` directory contains all the libraries and programs that Rhasspy will need to access.
See [docker-kaldi](https://github.com/synesthesiam/docker-kaldi) for a Docker build script.
### Phonetisaurus
Make sure you have the necessary dependencies installed:
```bash
sudo apt-get install build-essential
```
First, download and build [OpenFST 1.6.2](http://www.openfst.org/)
```bash
wget http://www.openfst.org/twiki/pub/FST/FstDownload/openfst-1.6.2.tar.gz
tar -xvf openfst-1.6.2.tar.gz
cd openfst-1.6.2
./configure \
"--prefix=$(pwd)/build" \
--enable-static --enable-shared \
--enable-far --enable-ngram-fsts
make
make install
```
Use `make -j 4` if you have multiple CPU cores. This will take a **long** time.
Next, download and extract Phonetisaurus:
```bash
wget -O phonetisaurus-master.tar.gz \
'https://github.com/AdolfVonKleist/Phonetisaurus/archive/master.tar.gz'
tar -xvf phonetisaurus-master.tar.gz
```
Finally, build Phonetisaurus (where `/path/to/openfst` is the `openfst-1.6.2` directory from above):
```
cd Phonetisaurus-master
./configure \
--with-openfst-includes=/path/to/openfst/build/include \
--with-openfst-libs=/path/to/openfst/build/lib
make
make install
```
Use `make -j 4` if you have multiple CPU cores. This will take a **long** time.
You should now be able to run the `phonetisaurus-align` program.
See [docker-phonetisaurus](https://github.com/synesthesiam/docker-phonetisaurus) for a Docker build script.
| /rhasspy-asr-kaldi-0.6.1.tar.gz/rhasspy-asr-kaldi-0.6.1/README.md | 0.53777 | 0.885086 | README.md | pypi |
import logging
import os
import shlex
import shutil
import subprocess
import tempfile
import typing
from enum import Enum
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspynlu.g2p import PronunciationsType
_DIR = Path(__file__).parent
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class LanguageModelType(str, Enum):
"""Type of language model used to train Kaldi."""
ARPA = "arpa"
TEXT_FST = "text_fst"
def get_kaldi_dir() -> Path:
"""Get directory to Kaldi installation."""
# Check environment variable
if "KALDI_DIR" in os.environ:
return Path(os.environ["KALDI_DIR"])
return _DIR / "kaldi"
# -----------------------------------------------------------------------------
def train(
graph: nx.DiGraph,
pronunciations: PronunciationsType,
model_dir: typing.Union[str, Path],
graph_dir: typing.Union[str, Path],
dictionary: typing.Optional[typing.Union[str, Path]] = None,
language_model: typing.Optional[typing.Union[str, Path]] = None,
dictionary_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
g2p_model: typing.Optional[typing.Union[str, Path]] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
missing_words_path: typing.Optional[Path] = None,
vocab_path: typing.Optional[typing.Union[str, Path]] = None,
language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_weight: typing.Optional[float] = None,
mixed_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
balance_counts: bool = True,
kaldi_dir: typing.Optional[Path] = None,
language_model_type: LanguageModelType = LanguageModelType.ARPA,
spn_phone: str = "SPN",
):
"""Re-generates HCLG.fst from intent graph"""
g2p_word_transform = g2p_word_transform or (lambda s: s)
# Determine directory with Kaldi binaries
if kaldi_dir is None:
kaldi_dir = get_kaldi_dir()
assert kaldi_dir is not None
_LOGGER.debug("Using kaldi at %s", str(kaldi_dir))
vocabulary: typing.Set[str] = set()
if vocab_path:
vocab_file = open(vocab_path, "w+")
else:
vocab_file = typing.cast(
typing.TextIO, tempfile.NamedTemporaryFile(suffix=".txt", mode="w+")
)
vocab_path = vocab_file.name
# Language model mixing
is_mixing = False
base_fst_weight = None
if (
(base_language_model_fst is not None)
and (base_language_model_weight is not None)
and (base_language_model_weight > 0)
):
is_mixing = True
base_fst_weight = (base_language_model_fst, base_language_model_weight)
# Begin training
with tempfile.NamedTemporaryFile(mode="w+") as lm_file:
with vocab_file:
if language_model_type == LanguageModelType.TEXT_FST:
_LOGGER.debug("Writing G.fst directly")
graph_to_g_fst(graph, lm_file, vocab_file)
else:
# Create language model from ARPA
_LOGGER.debug("Converting to ARPA language model")
rhasspynlu.arpa_lm.graph_to_arpa(
graph,
lm_file.name,
vocab_path=vocab_path,
model_path=language_model_fst,
base_fst_weight=base_fst_weight,
merge_path=mixed_language_model_fst,
)
# Load vocabulary
vocab_file.seek(0)
vocabulary.update(line.strip() for line in vocab_file)
if is_mixing:
# Add all known words
vocabulary.update(pronunciations.keys())
assert vocabulary, "No words in vocabulary"
# <unk>
vocabulary.add("<unk>")
pronunciations["<unk>"] = [[spn_phone]]
# Write dictionary to temporary file
with tempfile.NamedTemporaryFile(mode="w+") as dictionary_file:
_LOGGER.debug("Writing pronunciation dictionary")
rhasspynlu.g2p.write_pronunciations(
vocabulary,
pronunciations,
dictionary_file.name,
g2p_model=g2p_model,
g2p_word_transform=g2p_word_transform,
missing_words_path=missing_words_path,
)
# -----------------------------------------------------------------
dictionary_file.seek(0)
if dictionary:
# Copy dictionary over real file
shutil.copy(dictionary_file.name, dictionary)
_LOGGER.debug("Wrote dictionary to %s", str(dictionary))
else:
dictionary = Path(dictionary_file.name)
dictionary_file.seek(0)
lm_file.seek(0)
if language_model:
# Copy language model over real file
shutil.copy(lm_file.name, language_model)
_LOGGER.debug("Wrote language model to %s", str(language_model))
else:
language_model = Path(lm_file.name)
lm_file.seek(0)
# Generate HCLG.fst
train_kaldi(
model_dir,
graph_dir,
dictionary,
language_model,
kaldi_dir=kaldi_dir,
language_model_type=language_model_type,
)
# -----------------------------------------------------------------------------
def graph_to_g_fst(
graph: nx.DiGraph,
fst_file: typing.IO[str],
vocab_file: typing.IO[str],
eps: str = "<eps>",
):
"""
Write G.fst text file using intent graph.
Compiled later on with fstcompile.
"""
vocabulary: typing.Set[str] = set()
n_data = graph.nodes(data=True)
final_states: typing.Set[int] = set()
state_map: typing.Dict[int, int] = {}
# start state
start_node: int = next(n for n, data in n_data if data.get("start"))
# Transitions
for _, intent_node in graph.edges(start_node):
# Map states starting from 0
from_state = state_map.get(start_node, len(state_map))
state_map[start_node] = from_state
to_state = state_map.get(intent_node, len(state_map))
state_map[intent_node] = to_state
print(f"{from_state} {to_state} {eps} {eps} 0.0", file=fst_file)
# Add intent sub-graphs
for edge in nx.edge_bfs(graph, intent_node):
edge_data = graph.edges[edge]
from_node, to_node = edge
# Get input/output labels.
# Empty string indicates epsilon transition (eps)
ilabel = edge_data.get("ilabel", "") or eps
# Check for whitespace
assert (
" " not in ilabel
), f"Input symbol cannot contain whitespace: {ilabel}"
if ilabel != eps:
vocabulary.add(ilabel)
# Map states starting from 0
from_state = state_map.get(from_node, len(state_map))
state_map[from_node] = from_state
to_state = state_map.get(to_node, len(state_map))
state_map[to_node] = to_state
print(f"{from_state} {to_state} {ilabel} {ilabel} 0.0", file=fst_file)
# Check if final state
if n_data[from_node].get("final", False):
final_states.add(from_state)
if n_data[to_node].get("final", False):
final_states.add(to_state)
# Record final states
for final_state in final_states:
print(f"{final_state} 0.0", file=fst_file)
# Write vocabulary
for word in vocabulary:
print(word, file=vocab_file)
# -----------------------------------------------------------------------------
def train_kaldi(
model_dir: typing.Union[str, Path],
graph_dir: typing.Union[str, Path],
dictionary: typing.Union[str, Path],
language_model: typing.Union[str, Path],
kaldi_dir: typing.Union[str, Path],
language_model_type: LanguageModelType = LanguageModelType.ARPA,
):
"""Generates HCLG.fst from dictionary and language model."""
# Convert to paths
model_dir = Path(model_dir)
graph_dir = Path(graph_dir)
kaldi_dir = Path(kaldi_dir)
# -------------------------------------------------------------------------
# Kaldi Training
# ---------------------------------------------------------
# 1. prepare_lang.sh
# 2. format_lm.sh (or fstcompile)
# 3. mkgraph.sh
# 4. prepare_online_decoding.sh
# ---------------------------------------------------------
# Extend PATH
egs_utils_dir = kaldi_dir / "egs" / "wsj" / "s5" / "utils"
extended_env = os.environ.copy()
extended_env["PATH"] = (
str(kaldi_dir) + ":" + str(egs_utils_dir) + ":" + extended_env["PATH"]
)
# Create empty path.sh
path_sh = model_dir / "path.sh"
if not path_sh.is_file():
path_sh.write_text("")
# Delete existing data/graph
data_dir = model_dir / "data"
if data_dir.exists():
shutil.rmtree(data_dir)
if graph_dir.exists():
shutil.rmtree(graph_dir)
data_local_dir = model_dir / "data" / "local"
_LOGGER.debug("Generating lexicon")
dict_local_dir = data_local_dir / "dict"
dict_local_dir.mkdir(parents=True, exist_ok=True)
# Copy phones
phones_dir = model_dir / "phones"
for phone_file in phones_dir.glob("*.txt"):
shutil.copy(phone_file, dict_local_dir / phone_file.name)
# Copy dictionary
shutil.copy(dictionary, dict_local_dir / "lexicon.txt")
# Create utils link
model_utils_link = model_dir / "utils"
try:
# Can't use missing_ok in 3.6
model_utils_link.unlink()
except Exception:
pass
model_utils_link.symlink_to(egs_utils_dir, target_is_directory=True)
# 1. prepare_lang.sh
lang_dir = data_dir / "lang"
lang_local_dir = data_local_dir / "lang"
prepare_lang = [
"bash",
str(egs_utils_dir / "prepare_lang.sh"),
str(dict_local_dir),
"<unk>",
str(lang_local_dir),
str(lang_dir),
]
_LOGGER.debug(prepare_lang)
subprocess.check_call(prepare_lang, cwd=model_dir, env=extended_env)
if language_model_type == LanguageModelType.TEXT_FST:
# 2. fstcompile > G.fst
compile_grammar = [
"fstcompile",
shlex.quote(f"--isymbols={lang_dir}/words.txt"),
shlex.quote(f"--osymbols={lang_dir}/words.txt"),
"--keep_isymbols=false",
"--keep_osymbols=false",
shlex.quote(str(language_model)),
shlex.quote(str(lang_dir / "G.fst")),
]
subprocess.check_call(compile_grammar, cwd=model_dir, env=extended_env)
else:
# 2. format_lm.sh
lm_arpa = lang_local_dir / "lm.arpa"
lm_arpa.parent.mkdir(parents=True, exist_ok=True)
shutil.copy(language_model, lm_arpa)
gzip_lm = ["gzip", str(lm_arpa)]
_LOGGER.debug(gzip_lm)
subprocess.check_call(gzip_lm, cwd=lm_arpa.parent, env=extended_env)
format_lm = [
"bash",
str(egs_utils_dir / "format_lm.sh"),
str(lang_dir),
str(lm_arpa.with_suffix(".arpa.gz")),
str(dict_local_dir / "lexicon.txt"),
str(lang_dir),
]
_LOGGER.debug(format_lm)
subprocess.check_call(format_lm, cwd=model_dir, env=extended_env)
# 3. mkgraph.sh
mkgraph = [
"bash",
str(egs_utils_dir / "mkgraph.sh"),
str(lang_dir),
str(model_dir / "model"),
str(graph_dir),
]
_LOGGER.debug(mkgraph)
subprocess.check_call(mkgraph, cwd=model_dir, env=extended_env)
# 4. prepare_online_decoding.sh
train_prepare_online_decoding(model_dir, lang_dir, kaldi_dir)
def train_prepare_online_decoding(
model_dir: typing.Union[str, Path],
lang_dir: typing.Union[str, Path],
kaldi_dir: typing.Union[str, Path],
):
"""Prepare model for online decoding."""
model_dir = Path(model_dir)
kaldi_dir = Path(kaldi_dir)
# prepare_online_decoding.sh (nnet3 only)
extractor_dir = model_dir / "extractor"
if extractor_dir.is_dir():
# Extend PATH
egs_utils_dir = kaldi_dir / "egs" / "wsj" / "s5" / "utils"
extended_env = os.environ.copy()
extended_env["PATH"] = (
str(kaldi_dir) + ":" + str(egs_utils_dir) + ":" + extended_env["PATH"]
)
# Create empty path.sh
path_sh = model_dir / "path.sh"
if not path_sh.is_file():
path_sh.write_text("")
# Create utils link
model_utils_link = model_dir / "utils"
try:
# Can't use missing_ok in 3.6
model_utils_link.unlink()
except Exception:
pass
model_utils_link.symlink_to(egs_utils_dir, target_is_directory=True)
# Generate online.conf
mfcc_conf = model_dir / "conf" / "mfcc_hires.conf"
egs_steps_dir = kaldi_dir / "egs" / "wsj" / "s5" / "steps"
prepare_online_decoding = [
"bash",
str(egs_steps_dir / "online" / "nnet3" / "prepare_online_decoding.sh"),
"--mfcc-config",
str(mfcc_conf),
str(lang_dir),
str(extractor_dir),
str(model_dir / "model"),
str(model_dir / "online"),
]
_LOGGER.debug(prepare_online_decoding)
subprocess.run(
prepare_online_decoding,
cwd=model_dir,
env=extended_env,
stderr=subprocess.STDOUT,
check=True,
)
# ----------------------------------------------------------------------------- | /rhasspy-asr-kaldi-0.6.1.tar.gz/rhasspy-asr-kaldi-0.6.1/rhasspyasr_kaldi/train.py | 0.63023 | 0.268196 | train.py | pypi |
# Rhasspy ASR Pocketsphinx Hermes MQTT Service
[](https://github.com/rhasspy/rhasspy-asr-pocketsphinx-hermes/actions)
[](https://github.com/rhasspy/rhasspy-asr-pocketsphinx-hermes/blob/master/LICENSE)
Implements `hermes/asr` functionality from [Hermes protocol](https://docs.snips.ai/reference/hermes) using [rhasspy-asr-pocketsphinx](https://github.com/rhasspy/rhasspy-asr-pocketsphinx).
## Installation
```bash
$ git clone https://github.com/rhasspy/rhasspy-asr-pocketsphinx-hermes
$ cd rhasspy-asr-pocketsphinx-hermes
$ ./configure
$ make
$ make install
```
## Running
```bash
$ bin/rhasspy-asr-pocketsphinx-hermes <ARGS>
```
## Command-Line Options
```
usage: rhasspy-asr-pocketsphinx-hermes [-h] --acoustic-model ACOUSTIC_MODEL
--dictionary DICTIONARY
[--dictionary-casing {upper,lower,ignore}]
--language-model LANGUAGE_MODEL
[--mllr-matrix MLLR_MATRIX]
[--base-dictionary BASE_DICTIONARY]
[--g2p-model G2P_MODEL]
[--g2p-casing {upper,lower,ignore}]
[--unknown-words UNKNOWN_WORDS]
[--no-overwrite-train]
[--intent-graph INTENT_GRAPH]
[--base-language-model-fst BASE_LANGUAGE_MODEL_FST]
[--base-language-model-weight BASE_LANGUAGE_MODEL_WEIGHT]
[--mixed-language-model-fst MIXED_LANGUAGE_MODEL_FST]
[--voice-skip-seconds VOICE_SKIP_SECONDS]
[--voice-min-seconds VOICE_MIN_SECONDS]
[--voice-speech-seconds VOICE_SPEECH_SECONDS]
[--voice-silence-seconds VOICE_SILENCE_SECONDS]
[--voice-before-seconds VOICE_BEFORE_SECONDS]
[--voice-sensitivity {1,2,3}]
[--host HOST] [--port PORT]
[--username USERNAME]
[--password PASSWORD] [--tls]
[--tls-ca-certs TLS_CA_CERTS]
[--tls-certfile TLS_CERTFILE]
[--tls-keyfile TLS_KEYFILE]
[--tls-cert-reqs {CERT_REQUIRED,CERT_OPTIONAL,CERT_NONE}]
[--tls-version TLS_VERSION]
[--tls-ciphers TLS_CIPHERS]
[--site-id SITE_ID] [--debug]
[--log-format LOG_FORMAT]
optional arguments:
-h, --help show this help message and exit
--acoustic-model ACOUSTIC_MODEL
Path to Pocketsphinx acoustic model directory (hmm)
--dictionary DICTIONARY
Path to read/write pronunciation dictionary file
--dictionary-casing {upper,lower,ignore}
Case transformation for dictionary words (training,
default: ignore)
--language-model LANGUAGE_MODEL
Path to read/write ARPA language model file
--mllr-matrix MLLR_MATRIX
Path to read tuned MLLR matrix file
--base-dictionary BASE_DICTIONARY
Path(s) to base pronunciation dictionary file(s)
(training)
--g2p-model G2P_MODEL
Phonetisaurus FST model for guessing word
pronunciations (training)
--g2p-casing {upper,lower,ignore}
Case transformation for g2p words (training, default:
ignore)
--unknown-words UNKNOWN_WORDS
Path to write missing words from dictionary (training)
--no-overwrite-train Don't overwrite dictionary/language model during
training
--intent-graph INTENT_GRAPH
Path to intent graph (gzipped pickle)
--base-language-model-fst BASE_LANGUAGE_MODEL_FST
Path to base language model FST (training, mixed)
--base-language-model-weight BASE_LANGUAGE_MODEL_WEIGHT
Weight to give base langauge model (training, mixed)
--mixed-language-model-fst MIXED_LANGUAGE_MODEL_FST
Path to write mixed langauge model FST (training,
mixed)
--voice-skip-seconds VOICE_SKIP_SECONDS
Seconds of audio to skip before a voice command
--voice-min-seconds VOICE_MIN_SECONDS
Minimum number of seconds for a voice command
--voice-speech-seconds VOICE_SPEECH_SECONDS
Consecutive seconds of speech before start
--voice-silence-seconds VOICE_SILENCE_SECONDS
Consecutive seconds of silence before stop
--voice-before-seconds VOICE_BEFORE_SECONDS
Seconds to record before start
--voice-sensitivity {1,2,3}
VAD sensitivity (1-3)
--host HOST MQTT host (default: localhost)
--port PORT MQTT port (default: 1883)
--username USERNAME MQTT username
--password PASSWORD MQTT password
--tls Enable MQTT TLS
--tls-ca-certs TLS_CA_CERTS
MQTT TLS Certificate Authority certificate files
--tls-certfile TLS_CERTFILE
MQTT TLS certificate file (PEM)
--tls-keyfile TLS_KEYFILE
MQTT TLS key file (PEM)
--tls-cert-reqs {CERT_REQUIRED,CERT_OPTIONAL,CERT_NONE}
MQTT TLS certificate requirements (default:
CERT_REQUIRED)
--tls-version TLS_VERSION
MQTT TLS version (default: highest)
--tls-ciphers TLS_CIPHERS
MQTT TLS ciphers to use
--site-id SITE_ID Hermes site id(s) to listen for (default: all)
--debug Print DEBUG messages to the console
--log-format LOG_FORMAT
Python logger format
```
| /rhasspy-asr-pocketsphinx-hermes-0.4.0.tar.gz/rhasspy-asr-pocketsphinx-hermes-0.4.0/README.md | 0.449876 | 0.701815 | README.md | pypi |
import gzip
import logging
import os
import tempfile
import typing
from collections import defaultdict
from dataclasses import dataclass, field
from pathlib import Path
import networkx as nx
import rhasspyasr_pocketsphinx
import rhasspynlu
from rhasspyasr import Transcriber
from rhasspynlu.g2p import PronunciationsType
from rhasspyhermes.asr import (
AsrAudioCaptured,
AsrError,
AsrRecordingFinished,
AsrStartListening,
AsrStopListening,
AsrTextCaptured,
AsrToggleOff,
AsrToggleOn,
AsrToggleReason,
AsrTrain,
AsrTrainSuccess,
)
from rhasspyhermes.audioserver import AudioFrame, AudioSessionFrame
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.g2p import G2pError, G2pPhonemes, G2pPronounce, G2pPronunciation
from rhasspyhermes.nlu import AsrToken, AsrTokenTime
from rhasspysilence import (
SilenceMethod,
VoiceCommandRecorder,
VoiceCommandResult,
WebRtcVadRecorder,
)
_LOGGER = logging.getLogger("rhasspyasr_pocketsphinx_hermes")
# -----------------------------------------------------------------------------
@dataclass
class SessionInfo:
"""Information about an open session."""
start_listening: AsrStartListening
session_id: typing.Optional[str] = None
recorder: typing.Optional[VoiceCommandRecorder] = None
transcription_sent: bool = False
num_wav_bytes: int = 0
audio_buffer: typing.Optional[bytes] = None
# Custom transcriber for filtered intents
transcriber: typing.Optional[Transcriber] = None
@dataclass
class PronunciationDictionary:
"""Details of a phonetic dictionary."""
path: Path
pronunciations: PronunciationsType = field(default_factory=dict)
mtime_ns: typing.Optional[int] = None
# -----------------------------------------------------------------------------
class AsrHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy ASR using Pocketsphinx."""
def __init__(
self,
client,
transcriber_factory: typing.Callable[[Path], Transcriber],
dictionary: Path,
language_model: Path,
base_dictionaries: typing.Optional[typing.List[Path]] = None,
dictionary_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
g2p_model: typing.Optional[Path] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
unknown_words: typing.Optional[Path] = None,
no_overwrite_train: bool = False,
intent_graph_path: typing.Optional[Path] = None,
base_language_model_fst: typing.Optional[Path] = None,
base_language_model_weight: float = 0,
mixed_language_model_fst: typing.Optional[Path] = None,
site_ids: typing.Optional[typing.List[str]] = None,
enabled: bool = True,
sample_rate: int = 16000,
sample_width: int = 2,
channels: int = 1,
make_recorder: typing.Callable[[], VoiceCommandRecorder] = None,
skip_seconds: float = 0.0,
min_seconds: float = 1.0,
max_seconds: typing.Optional[float] = None,
speech_seconds: float = 0.3,
silence_seconds: float = 0.5,
before_seconds: float = 0.5,
vad_mode: int = 3,
max_energy: typing.Optional[float] = None,
max_current_energy_ratio_threshold: typing.Optional[float] = None,
current_energy_threshold: typing.Optional[float] = None,
silence_method: SilenceMethod = SilenceMethod.VAD_ONLY,
lm_cache_dir: typing.Optional[typing.Union[str, Path]] = None,
lang: typing.Optional[str] = None,
):
super().__init__(
"rhasspyasr_pocketsphinx_hermes",
client,
site_ids=site_ids,
sample_rate=sample_rate,
sample_width=sample_width,
channels=channels,
)
self.subscribe(
AsrToggleOn,
AsrToggleOff,
AsrStartListening,
AsrStopListening,
G2pPronounce,
AudioFrame,
AudioSessionFrame,
AsrTrain,
)
self.make_transcriber = transcriber_factory
self.transcriber: typing.Optional[Transcriber] = None
# Intent graph from training
self.intent_graph_path: typing.Optional[Path] = intent_graph_path
self.intent_graph: typing.Optional[nx.DiGraph] = None
# Files to write during training
self.dictionary = dictionary
self.language_model = language_model
# Cache for filtered language model
self.lm_cache_dir = lm_cache_dir
self.lm_cache_paths: typing.Dict[str, Path] = {}
self.lm_cache_transcribers: typing.Dict[str, Transcriber] = {}
# Pronunciation dictionaries and word transform function
base_dictionaries = base_dictionaries or []
self.base_dictionaries = [
PronunciationDictionary(path=path) for path in base_dictionaries
]
self.dictionary_word_transform = dictionary_word_transform
# Grapheme-to-phonme model (Phonetisaurus FST) and word transform
# function.
self.g2p_model = g2p_model
self.g2p_word_transform = g2p_word_transform
# Path to write missing words and guessed pronunciations
self.unknown_words = unknown_words
# Mixed language model
self.base_language_model_fst = base_language_model_fst
self.base_language_model_weight = base_language_model_weight
self.mixed_language_model_fst = mixed_language_model_fst
# If True, dictionary and language model won't be overwritten during training
self.no_overwrite_train = no_overwrite_train
# True if ASR system is enabled
self.enabled = enabled
self.disabled_reasons: typing.Set[str] = set()
self.lang = lang
def default_recorder():
return WebRtcVadRecorder(
max_seconds=max_seconds,
vad_mode=vad_mode,
skip_seconds=skip_seconds,
min_seconds=min_seconds,
speech_seconds=speech_seconds,
silence_seconds=silence_seconds,
before_seconds=before_seconds,
silence_method=silence_method,
current_energy_threshold=current_energy_threshold,
max_energy=max_energy,
max_current_ratio_threshold=max_current_energy_ratio_threshold,
)
self.make_recorder = make_recorder or default_recorder
# WAV buffers for each session
self.sessions: typing.Dict[typing.Optional[str], SessionInfo] = {}
self.first_audio: bool = True
# -------------------------------------------------------------------------
async def start_listening(self, message: AsrStartListening) -> None:
"""Start recording audio data for a session."""
session = self.sessions.get(message.session_id)
if not session:
session = SessionInfo(
session_id=message.session_id, start_listening=message
)
if message.stop_on_silence:
# Use voice command recorder
session.recorder = self.make_recorder()
else:
# Use buffer
session.audio_buffer = bytes()
if message.intent_filter:
# Load filtered language model
self.maybe_load_filtered_transcriber(session, message.intent_filter)
self.sessions[message.session_id] = session
# Start session
assert session
if session.recorder:
session.recorder.start()
_LOGGER.debug("Starting listening (session_id=%s)", message.session_id)
self.first_audio = True
async def stop_listening(
self, message: AsrStopListening
) -> typing.AsyncIterable[
typing.Union[
AsrRecordingFinished,
AsrTextCaptured,
AsrError,
typing.Tuple[AsrAudioCaptured, typing.Dict[str, typing.Any]],
]
]:
"""Stop recording audio data for a session."""
try:
session = self.sessions.pop(message.session_id, None)
if session:
# Stop session
if session.recorder:
audio_data = session.recorder.stop()
else:
assert session.audio_buffer is not None
audio_data = session.audio_buffer
wav_bytes = self.to_wav_bytes(audio_data)
_LOGGER.debug(
"Received a total of %s byte(s) for WAV data for session %s",
session.num_wav_bytes,
message.session_id,
)
if not session.transcription_sent:
# Send recording finished message
yield AsrRecordingFinished(
site_id=message.site_id, session_id=message.session_id
)
# Send transcription
session.transcription_sent = True
yield (
await self.transcribe(
wav_bytes,
transcriber=session.transcriber,
site_id=message.site_id,
session_id=message.session_id,
lang=(session.start_listening.lang or self.lang),
)
)
if session.start_listening.send_audio_captured:
# Send audio data
yield (
AsrAudioCaptured(wav_bytes=wav_bytes),
{
"site_id": message.site_id,
"session_id": message.session_id,
},
)
_LOGGER.debug("Stopping listening (session_id=%s)", message.session_id)
except Exception as e:
_LOGGER.exception("stop_listening")
yield AsrError(
error=str(e),
context=repr(self.transcriber),
site_id=message.site_id,
session_id=message.session_id,
)
async def handle_audio_frame(
self,
frame_wav_bytes: bytes,
site_id: str = "default",
session_id: typing.Optional[str] = None,
) -> typing.AsyncIterable[
typing.Union[
AsrRecordingFinished,
AsrTextCaptured,
AsrError,
typing.Tuple[AsrAudioCaptured, typing.Dict[str, typing.Any]],
]
]:
"""Process single frame of WAV audio"""
# Don't process audio if no sessions
if not self.sessions:
return
audio_data = self.maybe_convert_wav(frame_wav_bytes)
if session_id is None:
# Add to every open session
target_sessions = list(self.sessions.items())
else:
# Add to single session
target_sessions = [(session_id, self.sessions[session_id])]
# Add audio to session(s)
for target_id, session in target_sessions:
try:
# Skip if site_id doesn't match
if session.start_listening.site_id != site_id:
continue
session.num_wav_bytes += len(frame_wav_bytes)
if session.recorder:
# Check for end of voice command
command = session.recorder.process_chunk(audio_data)
if command and (command.result == VoiceCommandResult.SUCCESS):
assert command.audio_data is not None
_LOGGER.debug(
"Voice command recorded for session %s (%s byte(s))",
target_id,
len(command.audio_data),
)
# Send recording finished message
yield AsrRecordingFinished(
site_id=site_id, session_id=target_id
)
session.transcription_sent = True
wav_bytes = self.to_wav_bytes(command.audio_data)
yield (
await self.transcribe(
wav_bytes,
transcriber=session.transcriber,
site_id=site_id,
session_id=target_id,
lang=(session.start_listening.lang or self.lang),
)
)
if session.start_listening.send_audio_captured:
# Send audio data
yield (
AsrAudioCaptured(wav_bytes=wav_bytes),
{"site_id": site_id, "session_id": target_id},
)
# Reset session (but keep open)
session.recorder.stop()
session.recorder.start()
else:
# Add to buffer
assert session.audio_buffer is not None
session.audio_buffer += audio_data
except Exception as e:
_LOGGER.exception("handle_audio_frame")
yield AsrError(
error=str(e),
context=repr(self.transcriber),
site_id=site_id,
session_id=target_id,
)
async def transcribe(
self,
wav_bytes: bytes,
site_id: str,
transcriber: typing.Optional[Transcriber] = None,
session_id: typing.Optional[str] = None,
lang: typing.Optional[str] = None,
) -> AsrTextCaptured:
"""Transcribe audio data and publish captured text."""
if not transcriber and not self.transcriber:
# Load default transcriber
self.transcriber = self.make_transcriber(self.language_model)
transcriber = transcriber or self.transcriber
assert transcriber, "No transcriber"
_LOGGER.debug("Transcribing %s byte(s) of audio data", len(wav_bytes))
transcription = transcriber.transcribe_wav(wav_bytes)
if transcription:
_LOGGER.debug(transcription)
asr_tokens: typing.Optional[typing.List[typing.List[AsrToken]]] = None
if transcription.tokens:
# Only one level of ASR tokens
asr_inner_tokens: typing.List[AsrToken] = []
asr_tokens = [asr_inner_tokens]
range_start = 0
for ps_token in transcription.tokens:
range_end = range_start + len(ps_token.token) + 1
asr_inner_tokens.append(
AsrToken(
value=ps_token.token,
confidence=ps_token.likelihood,
range_start=range_start,
range_end=range_start + len(ps_token.token) + 1,
time=AsrTokenTime(
start=ps_token.start_time, end=ps_token.end_time
),
)
)
range_start = range_end
# Actual transcription
return AsrTextCaptured(
text=transcription.text,
likelihood=transcription.likelihood,
seconds=transcription.transcribe_seconds,
site_id=site_id,
session_id=session_id,
asr_tokens=asr_tokens,
lang=lang,
)
_LOGGER.warning("Received empty transcription")
return AsrTextCaptured(
text="",
likelihood=0,
seconds=0,
site_id=site_id,
session_id=session_id,
lang=lang,
)
async def handle_train(
self, train: AsrTrain, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[AsrTrainSuccess, TopicArgs], AsrError]
]:
"""Re-trains ASR system"""
try:
if not self.base_dictionaries:
_LOGGER.warning(
"No base dictionaries provided. Training will likely fail."
)
# Load base dictionaries
pronunciations: PronunciationsType = defaultdict(list)
for base_dict in self.base_dictionaries:
if not os.path.exists(base_dict.path):
_LOGGER.warning(
"Base dictionary does not exist: %s", base_dict.path
)
continue
# Re-load dictionary if modification time has changed
dict_mtime_ns = os.stat(base_dict.path).st_mtime_ns
if (base_dict.mtime_ns is None) or (
base_dict.mtime_ns != dict_mtime_ns
):
base_dict.mtime_ns = dict_mtime_ns
_LOGGER.debug("Loading base dictionary from %s", base_dict.path)
with open(base_dict.path, "r") as base_dict_file:
rhasspynlu.g2p.read_pronunciations(
base_dict_file, word_dict=base_dict.pronunciations
)
for word in base_dict.pronunciations:
pronunciations[word].extend(base_dict.pronunciations[word])
# Load intent graph
_LOGGER.debug("Loading %s", train.graph_path)
with gzip.GzipFile(train.graph_path, mode="rb") as graph_gzip:
self.intent_graph = nx.readwrite.gpickle.read_gpickle(graph_gzip)
# Clean LM cache completely
for lm_path in self.lm_cache_paths.values():
try:
lm_path.unlink()
except Exception:
pass
self.lm_cache_paths = {}
self.lm_cache_transcribers = {}
# Generate dictionary/language model
if not self.no_overwrite_train:
_LOGGER.debug("Starting training")
rhasspyasr_pocketsphinx.train(
self.intent_graph,
self.dictionary,
self.language_model,
pronunciations,
dictionary_word_transform=self.dictionary_word_transform,
g2p_model=self.g2p_model,
g2p_word_transform=self.g2p_word_transform,
missing_words_path=self.unknown_words,
base_language_model_fst=self.base_language_model_fst,
base_language_model_weight=self.base_language_model_weight,
mixed_language_model_fst=self.mixed_language_model_fst,
)
else:
_LOGGER.warning("Not overwriting dictionary/language model")
_LOGGER.debug("Re-loading transcriber")
self.transcriber = self.make_transcriber(self.language_model)
yield (AsrTrainSuccess(id=train.id), {"site_id": site_id})
except Exception as e:
_LOGGER.exception("handle_train")
yield AsrError(
error=str(e),
context=repr(self.transcriber),
site_id=site_id,
session_id=train.id,
)
async def handle_pronounce(
self, pronounce: G2pPronounce
) -> typing.AsyncIterable[typing.Union[G2pPhonemes, G2pError]]:
"""Looks up or guesses word pronunciation(s)."""
try:
result = G2pPhonemes(
word_phonemes={},
id=pronounce.id,
site_id=pronounce.site_id,
session_id=pronounce.session_id,
)
# Load base dictionaries
pronunciations: typing.Dict[str, typing.List[typing.List[str]]] = {}
for base_dict in self.base_dictionaries:
if base_dict.path.is_file():
_LOGGER.debug("Loading base dictionary from %s", base_dict.path)
with open(base_dict.path, "r") as base_dict_file:
rhasspynlu.g2p.read_pronunciations(
base_dict_file, word_dict=pronunciations
)
# Try to look up in dictionary first
missing_words: typing.Set[str] = set()
if pronunciations:
for word in pronounce.words:
# Handle case transformation
if self.dictionary_word_transform:
word = self.dictionary_word_transform(word)
word_prons = pronunciations.get(word)
if word_prons:
# Use dictionary pronunciations
result.word_phonemes[word] = [
G2pPronunciation(phonemes=p, guessed=False)
for p in word_prons
]
else:
# Will have to guess later
missing_words.add(word)
else:
# All words must be guessed
missing_words.update(pronounce.words)
if missing_words:
if self.g2p_model:
_LOGGER.debug("Guessing pronunciations of %s", missing_words)
guesses = rhasspynlu.g2p.guess_pronunciations(
missing_words,
self.g2p_model,
g2p_word_transform=self.g2p_word_transform,
num_guesses=pronounce.num_guesses,
)
# Add guesses to result
for guess_word, guess_phonemes in guesses:
result_phonemes = result.word_phonemes.get(guess_word) or []
result_phonemes.append(
G2pPronunciation(phonemes=guess_phonemes, guessed=True)
)
result.word_phonemes[guess_word] = result_phonemes
else:
_LOGGER.warning("No g2p model. Cannot guess pronunciations.")
yield result
except Exception as e:
_LOGGER.exception("handle_pronounce")
yield G2pError(
error=str(e),
context=repr(self.transcriber),
site_id=pronounce.site_id,
session_id=pronounce.session_id,
)
def cleanup(self):
"""Delete any temporary files."""
for lm_path in self.lm_cache_paths.values():
try:
lm_path.unlink()
except Exception:
pass
def maybe_load_filtered_transcriber(
self, session: SessionInfo, intent_filter: typing.List[str]
):
"""Create/load a language model with only filtered intents."""
lm_key = ",".join(intent_filter)
# Try to look up in cache
lm_transcriber = self.lm_cache_transcribers.get(lm_key)
if not lm_transcriber:
lm_path = self.lm_cache_paths.get(lm_key)
if not lm_path:
# Create a new temporary file
lm_file = tempfile.NamedTemporaryFile(
suffix=".arpa", dir=self.lm_cache_dir, delete=False
)
lm_path = Path(lm_file.name)
self.lm_cache_paths[lm_key] = lm_path
# Function to filter intents by name
def intent_filter_func(intent_name: str) -> bool:
return intent_name in intent_filter
# Load intent graph and create transcriber
if (
not self.intent_graph
and self.intent_graph_path
and self.intent_graph_path.is_file()
):
# Load intent graph
_LOGGER.debug("Loading %s", self.intent_graph_path)
with gzip.GzipFile(self.intent_graph_path, mode="rb") as graph_gzip:
self.intent_graph = nx.readwrite.gpickle.read_gpickle(graph_gzip)
if self.intent_graph:
# Create language model
_LOGGER.debug("Converting to ARPA language model")
rhasspynlu.arpa_lm.graph_to_arpa(
self.intent_graph, lm_path, intent_filter=intent_filter_func
)
# Load transcriber
lm_transcriber = self.make_transcriber(lm_path)
self.lm_cache_transcribers[lm_key] = lm_transcriber
else:
# Use full transcriber
_LOGGER.warning("No intent graph loaded. Cannot filter intents.")
session.transcriber = lm_transcriber
# -------------------------------------------------------------------------
async def on_message_blocking(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker (blocking)."""
# Check enable/disable messages
if isinstance(message, AsrToggleOn):
if message.reason == AsrToggleReason.UNKNOWN:
# Always enable on unknown
self.disabled_reasons.clear()
else:
self.disabled_reasons.discard(message.reason)
if self.disabled_reasons:
_LOGGER.debug("Still disabled: %s", self.disabled_reasons)
else:
self.enabled = True
self.first_audio = True
_LOGGER.debug("Enabled")
elif isinstance(message, AsrToggleOff):
self.enabled = False
self.disabled_reasons.add(message.reason)
_LOGGER.debug("Disabled")
elif isinstance(message, AudioFrame):
if self.enabled:
assert site_id, "Missing site_id"
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
# Add to all active sessions
async for frame_result in self.handle_audio_frame(
message.wav_bytes, site_id=site_id
):
yield frame_result
elif isinstance(message, AudioSessionFrame):
if self.enabled:
assert site_id and session_id, "Missing site_id or session_id"
if session_id in self.sessions:
if self.first_audio:
_LOGGER.debug("Receiving audio")
self.first_audio = False
# Add to specific session only
async for session_frame_result in self.handle_audio_frame(
message.wav_bytes, site_id=site_id, session_id=session_id
):
yield session_frame_result
elif isinstance(message, AsrStartListening):
# Handle blocking
await self.start_listening(message)
elif isinstance(message, AsrStopListening):
# hermes/asr/stopListening
async for stop_result in self.stop_listening(message):
yield stop_result
elif isinstance(message, AsrTrain):
# rhasspy/asr/<site_id>/train
assert site_id, "Missing site_id"
async for train_result in self.handle_train(message, site_id=site_id):
yield train_result
elif isinstance(message, G2pPronounce):
# rhasspy/g2p/pronounce
async for pronounce_result in self.handle_pronounce(message):
yield pronounce_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-asr-pocketsphinx-hermes-0.4.0.tar.gz/rhasspy-asr-pocketsphinx-hermes-0.4.0/rhasspyasr_pocketsphinx_hermes/__init__.py | 0.680879 | 0.173183 | __init__.py | pypi |
import logging
import shutil
import tempfile
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspynlu.g2p import PronunciationsType
_DIR = Path(__file__).parent
_LOGGER = logging.getLogger(__name__)
# -------------------------------------------------------------------
def train(
graph: nx.DiGraph,
dictionary: typing.Union[str, Path],
language_model: typing.Union[str, Path],
pronunciations: PronunciationsType,
dictionary_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
g2p_model: typing.Optional[typing.Union[str, Path]] = None,
g2p_word_transform: typing.Optional[typing.Callable[[str], str]] = None,
missing_words_path: typing.Optional[typing.Union[str, Path]] = None,
vocab_path: typing.Optional[typing.Union[str, Path]] = None,
language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
base_language_model_weight: typing.Optional[float] = None,
mixed_language_model_fst: typing.Optional[typing.Union[str, Path]] = None,
balance_counts: bool = True,
):
"""Re-generates language model and dictionary from intent graph"""
vocabulary: typing.Set[str] = set()
if vocab_path:
vocab_file = open(vocab_path, "w+")
else:
vocab_file = typing.cast(
typing.TextIO, tempfile.NamedTemporaryFile(suffix=".txt", mode="w+")
)
vocab_path = vocab_file.name
# Language model mixing
is_mixing = False
base_fst_weight = None
if (
(base_language_model_fst is not None)
and (base_language_model_weight is not None)
and (base_language_model_weight > 0)
):
is_mixing = True
base_fst_weight = (base_language_model_fst, base_language_model_weight)
# Begin training
with tempfile.NamedTemporaryFile(mode="w+") as lm_file:
with vocab_file:
# Create language model
_LOGGER.debug("Converting to ARPA language model")
rhasspynlu.arpa_lm.graph_to_arpa(
graph,
lm_file.name,
vocab_path=vocab_path,
model_path=language_model_fst,
base_fst_weight=base_fst_weight,
merge_path=mixed_language_model_fst,
)
# Load vocabulary
vocab_file.seek(0)
vocabulary.update(line.strip() for line in vocab_file)
if is_mixing:
# Add all known words
vocabulary.update(pronunciations.keys())
assert vocabulary, "No words in vocabulary"
# Write dictionary to temporary file
with tempfile.NamedTemporaryFile(mode="w+") as dictionary_file:
_LOGGER.debug("Writing pronunciation dictionary")
rhasspynlu.g2p.write_pronunciations(
vocabulary,
pronunciations,
dictionary_file.name,
g2p_model=g2p_model,
g2p_word_transform=g2p_word_transform,
missing_words_path=missing_words_path,
)
# -----------------------------------------------------------------
# Copy dictionary over real file
dictionary_file.seek(0)
shutil.copy(dictionary_file.name, dictionary)
_LOGGER.debug("Wrote dictionary to %s", str(dictionary))
# Copy language model over real file
lm_file.seek(0)
shutil.copy(lm_file.name, language_model)
_LOGGER.debug("Wrote language model to %s", str(language_model)) | /rhasspy-asr-pocketsphinx-0.3.1.tar.gz/rhasspy-asr-pocketsphinx-0.3.1/rhasspyasr_pocketsphinx/train.py | 0.60054 | 0.315551 | train.py | pypi |
import io
import logging
import os
import time
import typing
import wave
from pathlib import Path
import pocketsphinx
from rhasspyasr import Transcriber, Transcription, TranscriptionToken
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
class PocketsphinxTranscriber(Transcriber):
"""Speech to text with CMU Pocketsphinx."""
def __init__(
self,
acoustic_model: Path,
dictionary: Path,
language_model: Path,
mllr_matrix: typing.Optional[Path] = None,
debug: bool = False,
):
self.acoustic_model = acoustic_model
self.dictionary = dictionary
self.language_model = language_model
self.mllr_matrix = mllr_matrix
self.debug = debug
self.decoder: typing.Optional[pocketsphinx.Decoder] = None
def transcribe_wav(self, wav_bytes: bytes) -> typing.Optional[Transcription]:
"""Speech to text from WAV data."""
if self.decoder is None:
# Load decoder
self.decoder = self.get_decoder()
# Compute WAV duration
audio_data: bytes = bytes()
with io.BytesIO(wav_bytes) as wav_buffer:
with wave.open(wav_buffer) as wav_file:
frames = wav_file.getnframes()
rate = wav_file.getframerate()
wav_duration = frames / float(rate)
# Extract raw audio data
audio_data = wav_file.readframes(wav_file.getnframes())
# Process data as an entire utterance
start_time = time.perf_counter()
self.decoder.start_utt()
self.decoder.process_raw(audio_data, False, True)
self.decoder.end_utt()
transcribe_seconds = time.perf_counter() - start_time
_LOGGER.debug("Decoded audio in %s second(s)", transcribe_seconds)
hyp = self.decoder.hyp()
if hyp:
return Transcription(
text=hyp.hypstr.strip(),
likelihood=self.decoder.get_logmath().exp(hyp.prob),
transcribe_seconds=transcribe_seconds,
wav_seconds=wav_duration,
tokens=[
TranscriptionToken(
token=seg.word,
start_time=seg.start_frame / 100,
end_time=seg.end_frame / 100,
likelihood=self.decoder.get_logmath().exp(seg.prob),
)
for seg in self.decoder.seg()
],
)
return None
def transcribe_stream(
self,
audio_stream: typing.Iterable[bytes],
sample_rate: int,
sample_width: int,
channels: int,
) -> typing.Optional[Transcription]:
"""Speech to text from an audio stream."""
assert channels == 1, "Only mono audio supported"
if self.decoder is None:
# Load decoder
self.decoder = self.get_decoder()
total_frames = 0
# Process data as an entire utterance
start_time = time.perf_counter()
self.decoder.start_utt()
for frame in audio_stream:
self.decoder.process_raw(frame, False, False)
total_frames += 1
self.decoder.end_utt()
transcribe_seconds = time.perf_counter() - start_time
_LOGGER.debug("Decoded audio in %s second(s)", transcribe_seconds)
hyp = self.decoder.hyp()
if hyp:
return Transcription(
text=hyp.hypstr.strip(),
likelihood=self.decoder.get_logmath().exp(hyp.prob),
transcribe_seconds=transcribe_seconds,
wav_seconds=total_frames / float(sample_rate),
tokens=[
TranscriptionToken(
token=seg.word,
start_time=seg.start_frame / 100,
end_time=seg.end_frame / 100,
likelihood=self.decoder.get_logmath().exp(seg.prob),
)
for seg in self.decoder.seg()
],
)
return None
def stop(self):
"""Stop the transcriber."""
def get_decoder(self) -> pocketsphinx.Decoder:
"""Load Pocketsphinx decoder from command-line arguments."""
start_time = time.perf_counter()
decoder_config = pocketsphinx.Decoder.default_config()
decoder_config.set_string("-hmm", str(self.acoustic_model))
decoder_config.set_string("-dict", str(self.dictionary))
decoder_config.set_string("-lm", str(self.language_model))
if not self.debug:
decoder_config.set_string("-logfn", os.devnull)
if (self.mllr_matrix is not None) and self.mllr_matrix.exists():
decoder_config.set_string("-mllr", str(self.mllr_matrix))
decoder = pocketsphinx.Decoder(decoder_config)
end_time = time.perf_counter()
_LOGGER.debug(
"Successfully loaded decoder in %s second(s)", end_time - start_time
)
return decoder
def __repr__(self) -> str:
return (
"PocketsphinxTranscriber("
f"acoustic_model={self.acoustic_model})"
f", dictionary={self.dictionary}"
f", language_model={self.language_model}"
f", mllr_matrix={self.mllr_matrix}"
")"
) | /rhasspy-asr-pocketsphinx-0.3.1.tar.gz/rhasspy-asr-pocketsphinx-0.3.1/rhasspyasr_pocketsphinx/transcribe.py | 0.793146 | 0.219379 | transcribe.py | pypi |
import dataclasses
import typing
from dataclasses import dataclass, field
from enum import Enum
from . import utils
@dataclass
class Entity:
"""Named entity from intent."""
entity: str
value: str
raw_value: str = ""
start: int = 0
raw_start: int = 0
end: int = 0
raw_end: int = 0
tokens: typing.List[typing.Any] = field(default_factory=list)
raw_tokens: typing.List[str] = field(default_factory=list)
@classmethod
def from_dict(cls, entity_dict: typing.Dict[str, typing.Any]) -> "Entity":
"""Create Entity from dictionary."""
return Entity(**utils.only_fields(cls, entity_dict))
@dataclass
class Intent:
"""Named intention with entities and slots."""
name: str
confidence: float = 0
@classmethod
def from_dict(cls, intent_dict: typing.Dict[str, typing.Any]) -> "Intent":
"""Create Intent from dictionary."""
return Intent(**utils.only_fields(cls, intent_dict))
@dataclass
class TagInfo:
"""Information used to process FST tags."""
tag: str
start_index: int = 0
raw_start_index: int = 0
symbols: typing.List[str] = field(default_factory=list)
raw_symbols: typing.List[str] = field(default_factory=list)
@classmethod
def from_dict(cls, tag_dict: typing.Dict[str, typing.Any]) -> "TagInfo":
"""Create TagInfo from dictionary."""
return TagInfo(**utils.only_fields(cls, tag_dict))
class RecognitionResult(str, Enum):
"""Result of a recognition."""
SUCCESS = "success"
FAILURE = "failure"
@dataclass
class Recognition:
"""Output of intent recognition."""
result: RecognitionResult
intent: typing.Optional[Intent] = None
entities: typing.List[Entity] = field(default_factory=list)
text: str = ""
raw_text: str = ""
recognize_seconds: float = 0
tokens: typing.List[typing.Any] = field(default_factory=list)
raw_tokens: typing.List[str] = field(default_factory=list)
# Transcription details
wav_seconds: float = 0.0
transcribe_seconds: float = 0.0
speech_confidence: float = 0.0
def asdict(self) -> typing.Dict[str, typing.Any]:
"""Convert to dictionary."""
return dataclasses.asdict(self)
@classmethod
def from_dict(cls, recognition_dict: typing.Dict[str, typing.Any]) -> "Recognition":
"""Create Recognition from dictionary."""
intent_dict = recognition_dict.pop("intent", None)
entity_dicts = recognition_dict.pop("entities", None)
slots_dict = recognition_dict.pop("slots", None)
intent: typing.Optional[Intent] = None
if intent_dict:
intent = Intent.from_dict(intent_dict)
result = (
RecognitionResult.SUCCESS if intent.name else RecognitionResult.FAILURE
)
else:
result = RecognitionResult.FAILURE
recognition = Recognition(
result=result, intent=intent, **utils.only_fields(cls, recognition_dict)
)
if entity_dicts:
recognition.entities = [Entity.from_dict(e) for e in entity_dicts]
if slots_dict:
recognition.entities = [
Entity(entity=key, value=value) for key, value in slots_dict.items()
]
return recognition | /rhasspy_client-1.1.1-py3-none-any.whl/rhasspyclient/intent.py | 0.899905 | 0.336277 | intent.py | pypi |
import json
import logging
import sqlite3
import typing
from pathlib import Path
import networkx as nx
import rhasspynlu
from rhasspynlu.jsgf import Sentence
import rhasspyfuzzywuzzy
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient, TopicArgs
from rhasspyhermes.intent import Intent, Slot, SlotRange
from rhasspyhermes.nlu import (
NluError,
NluIntent,
NluIntentNotRecognized,
NluIntentParsed,
NluQuery,
NluTrain,
NluTrainSuccess,
)
_LOGGER = logging.getLogger("rhasspyfuzzywuzzy_hermes")
# -----------------------------------------------------------------------------
class NluHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy fuzzywuzzy."""
def __init__(
self,
client,
intent_graph: typing.Optional[nx.DiGraph] = None,
intent_graph_path: typing.Optional[Path] = None,
examples_path: typing.Optional[Path] = None,
sentences: typing.Optional[typing.List[Path]] = None,
default_entities: typing.Dict[str, typing.Iterable[Sentence]] = None,
word_transform: typing.Optional[typing.Callable[[str], str]] = None,
replace_numbers: bool = False,
language: typing.Optional[str] = None,
confidence_threshold: float = 0.0,
extra_converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
site_ids: typing.Optional[typing.List[str]] = None,
lang: typing.Optional[str] = None,
):
super().__init__("rhasspyfuzzywuzzy_hermes", client, site_ids=site_ids)
self.subscribe(NluQuery, NluTrain)
# Intent graph
self.intent_graph = intent_graph
self.intent_graph_path = intent_graph_path
# Examples
self.examples_path = examples_path
self.sentences = sentences or []
self.default_entities = default_entities or {}
self.word_transform = word_transform
self.replace_numbers = replace_numbers
self.language = language
# Minimum confidence before not recognized
self.confidence_threshold = confidence_threshold
self.extra_converters = extra_converters
self.lang = lang
# -------------------------------------------------------------------------
async def handle_query(
self, query: NluQuery
) -> typing.AsyncIterable[
typing.Union[
NluIntentParsed,
typing.Tuple[NluIntent, TopicArgs],
NluIntentNotRecognized,
NluError,
]
]:
"""Do intent recognition."""
# Check intent graph
try:
if (
not self.intent_graph
and self.intent_graph_path
and self.intent_graph_path.is_file()
):
_LOGGER.debug("Loading %s", self.intent_graph_path)
with open(self.intent_graph_path, mode="rb") as graph_file:
self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)
# Check examples
if (
self.intent_graph
and self.examples_path
and self.examples_path.is_file()
):
def intent_filter(intent_name: str) -> bool:
"""Filter out intents."""
if query.intent_filter:
return intent_name in query.intent_filter
return True
original_text = query.input
# Replace digits with words
if self.replace_numbers:
# Have to assume whitespace tokenization
words = rhasspynlu.replace_numbers(
query.input.split(), self.language
)
query.input = " ".join(words)
input_text = query.input
# Fix casing
if self.word_transform:
input_text = self.word_transform(input_text)
recognitions: typing.List[rhasspynlu.intent.Recognition] = []
if input_text:
recognitions = rhasspyfuzzywuzzy.recognize(
input_text,
self.intent_graph,
str(self.examples_path),
intent_filter=intent_filter,
extra_converters=self.extra_converters,
)
else:
_LOGGER.error("No intent graph or examples loaded")
recognitions = []
# Use first recognition only if above threshold
if (
recognitions
and recognitions[0]
and recognitions[0].intent
and (recognitions[0].intent.confidence >= self.confidence_threshold)
):
recognition = recognitions[0]
assert recognition.intent
intent = Intent(
intent_name=recognition.intent.name,
confidence_score=recognition.intent.confidence,
)
slots = [
Slot(
entity=(e.source or e.entity),
slot_name=e.entity,
confidence=1.0,
value=e.value_dict,
raw_value=e.raw_value,
range=SlotRange(
start=e.start,
end=e.end,
raw_start=e.raw_start,
raw_end=e.raw_end,
),
)
for e in recognition.entities
]
if query.custom_entities:
# Copy user-defined entities
for entity_name, entity_value in query.custom_entities.items():
slots.append(
Slot(
entity=entity_name,
confidence=1.0,
value={"value": entity_value},
)
)
# intentParsed
yield NluIntentParsed(
input=recognition.text,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=intent,
slots=slots,
)
# intent
yield (
NluIntent(
input=recognition.text,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
intent=intent,
slots=slots,
asr_tokens=[NluIntent.make_asr_tokens(recognition.tokens)],
asr_confidence=query.asr_confidence,
raw_input=original_text,
wakeword_id=query.wakeword_id,
lang=(query.lang or self.lang),
custom_data=query.custom_data,
),
{"intent_name": recognition.intent.name},
)
else:
# Not recognized
yield NluIntentNotRecognized(
input=query.input,
id=query.id,
site_id=query.site_id,
session_id=query.session_id,
custom_data=query.custom_data,
)
except Exception as e:
_LOGGER.exception("handle_query")
yield NluError(
site_id=query.site_id,
session_id=query.session_id,
error=str(e),
context=original_text,
)
# -------------------------------------------------------------------------
async def handle_train(
self, train: NluTrain, site_id: str = "default"
) -> typing.AsyncIterable[
typing.Union[typing.Tuple[NluTrainSuccess, TopicArgs], NluError]
]:
"""Transform sentences to intent examples"""
try:
_LOGGER.debug("Loading %s", train.graph_path)
with open(train.graph_path, mode="rb") as graph_file:
self.intent_graph = rhasspynlu.gzip_pickle_to_graph(graph_file)
examples = rhasspyfuzzywuzzy.train(self.intent_graph)
if self.examples_path:
if self.examples_path.is_file():
# Delete existing file
self.examples_path.unlink()
# Write examples to SQLite database
conn = sqlite3.connect(str(self.examples_path))
c = conn.cursor()
c.execute("""DROP TABLE IF EXISTS intents""")
c.execute("""CREATE TABLE intents (sentence text, path text)""")
for _, sentences in examples.items():
for sentence, path in sentences.items():
c.execute(
"INSERT INTO intents VALUES (?, ?)",
(sentence, json.dumps(path, ensure_ascii=False)),
)
conn.commit()
conn.close()
_LOGGER.debug("Wrote %s", str(self.examples_path))
yield (NluTrainSuccess(id=train.id), {"site_id": site_id})
except Exception as e:
_LOGGER.exception("handle_train")
yield NluError(
site_id=site_id, session_id=train.id, error=str(e), context=train.id
)
# -------------------------------------------------------------------------
async def on_message(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, NluQuery):
async for query_result in self.handle_query(message):
yield query_result
elif isinstance(message, NluTrain):
assert site_id, "Missing site_id"
async for train_result in self.handle_train(message, site_id=site_id):
yield train_result
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-fuzzywuzzy-hermes-0.6.1.tar.gz/rhasspy-fuzzywuzzy-hermes-0.6.1/rhasspyfuzzywuzzy_hermes/__init__.py | 0.68742 | 0.224385 | __init__.py | pypi |
import argparse
import json
import logging
import os
import sqlite3
import sys
import typing
from pathlib import Path
import rhasspynlu
from rhasspynlu.intent import Recognition
from . import recognize as fuzzywuzzy_recognize
from . import train as fuzzywuzzy_train
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def main():
"""Main method."""
args = get_args()
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
_LOGGER.debug(args)
# Dispatch to appropriate sub-command
args.func(args)
# -----------------------------------------------------------------------------
def get_args() -> argparse.Namespace:
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(prog="rhasspy-fuzzywuzzy")
parser.add_argument(
"--debug", action="store_true", help="Print DEBUG messages to the console"
)
# Create subparsers for each sub-command
sub_parsers = parser.add_subparsers()
sub_parsers.required = True
sub_parsers.dest = "command"
# Run settings
recognize_parser = sub_parsers.add_parser("recognize", help="Do intent recognition")
recognize_parser.set_defaults(func=recognize)
recognize_parser.add_argument(
"--examples", required=True, help="Path to examples SQLite Database"
)
recognize_parser.add_argument(
"--intent-graph", required=True, help="Path to intent graph JSON file"
)
recognize_parser.add_argument(
"--word-casing",
choices=["upper", "lower", "ignore"],
default="ignore",
help="Case transformation applied to query text",
)
recognize_parser.add_argument("query", nargs="*", help="Query input sentences")
# -------------------------------------------------------------------------
# Train settings
train_parser = sub_parsers.add_parser(
"train", help="Generate intent examples from sentences and slots"
)
train_parser.set_defaults(func=train)
train_parser.add_argument(
"--examples", help="Path to write examples SQLite Database"
)
train_parser.add_argument(
"--intent-graph", help="Path to write intent graph JSON file"
)
train_parser.add_argument(
"--sentences", action="append", help="Paths to sentences ini files"
)
return parser.parse_args()
# -----------------------------------------------------------------------------
def recognize(args: argparse.Namespace):
"""Do intent recognition from query text."""
try:
# Convert to Paths
args.examples = Path(args.examples)
args.intent_graph = Path(args.intent_graph)
# Load graph/examples
_LOGGER.debug("Loading intent graph from %s", str(args.intent_graph))
with open(args.intent_graph, "r") as intent_graph_file:
graph_dict = json.load(intent_graph_file)
intent_graph = rhasspynlu.json_to_graph(graph_dict)
_LOGGER.debug("Loading examples from %s", str(args.examples))
_LOGGER.debug("Processing sentences")
word_transform = get_word_transform(args.word_casing)
# Process queries
if args.query:
sentences = args.query
else:
if os.isatty(sys.stdin.fileno()):
print("Reading queries from stdin...", file=sys.stderr)
sentences = sys.stdin
for sentence in sentences:
# Handle casing
sentence = sentence.strip()
sentence = word_transform(sentence)
# Do recognition
recognitions = fuzzywuzzy_recognize(
sentence, intent_graph, str(args.examples)
)
if recognitions:
# Intent recognized
recognition = recognitions[0]
else:
# Intent not recognized
recognition = Recognition.empty()
# Print as a line of JSON
json.dump(recognition.asdict(), sys.stdout, ensure_ascii=False)
print("")
sys.stdout.flush()
except KeyboardInterrupt:
pass
# -----------------------------------------------------------------------------
def train(args: argparse.Namespace):
"""Generate intent examples from sentences and slots."""
# Convert to Paths
if args.examples:
args.examples = Path(args.examples)
if args.intent_graph:
# Load intent graph from file
args.intent_graph = Path(args.intent_graph)
with open(args.intent_graph, "r") as graph_file:
graph_dict = json.load(graph_file)
else:
# Load intent graph from stdin
if os.isatty(sys.stdin.fileno()):
print("Reading intent graph JSON from stdin...", file=sys.stderr)
graph_dict = json.load(sys.stdin)
if args.slots:
args.slots = [Path(p) for p in args.slots]
if args.slot_programs:
args.slot_programs = [Path(p) for p in args.slot_programs]
# -------------------------------------------------------------------------
# Do training
examples = fuzzywuzzy_train(graph_dict)
if args.examples:
# Write examples to SQLite database
conn = sqlite3.connect(str(args.examples))
c = conn.cursor()
c.execute("""DROP TABLE IF EXISTS intents""")
c.execute("""CREATE TABLE intents (sentence text, path text)""")
for _, sentences in examples.items():
for sentence, path in sentences.items():
c.execute(
"INSERT INTO intents VALUES (?, ?)",
(sentence, json.dumps(path, ensure_ascii=False)),
)
conn.commit()
conn.close()
_LOGGER.debug("Wrote %s", str(args.examples))
else:
# Write results to stdout
json.dump(examples, sys.stdout, ensure_ascii=False)
print("")
sys.stdout.flush()
# -----------------------------------------------------------------------------
def get_word_transform(name: str) -> typing.Callable[[str], str]:
"""Gets a word transformation function by name."""
if name == "upper":
return str.upper
if name == "lower":
return str.lower
return lambda s: s
# -----------------------------------------------------------------------------
if __name__ == "__main__":
main() | /rhasspy-fuzzywuzzy-0.4.0.tar.gz/rhasspy-fuzzywuzzy-0.4.0/rhasspyfuzzywuzzy/__main__.py | 0.574275 | 0.204243 | __main__.py | pypi |
import json
import logging
import sqlite3
import time
import typing
import networkx as nx
import rapidfuzz.process as fuzzy_process
import rapidfuzz.utils as fuzz_utils
import rhasspynlu
from rhasspynlu.intent import Recognition
from .train import train
_LOGGER = logging.getLogger(__name__)
# -----------------------------------------------------------------------------
def extract_one_sqlite(query: str, examples_path: str):
"""Finds the best text/path for a query"""
conn = sqlite3.connect(examples_path)
c = conn.cursor()
c.execute("SELECT sentence FROM intents ORDER BY rowid")
result = fuzzy_process.extractOne([query], c, processor=lambda s: s[0])
if not result:
conn.close()
return result
c.execute(
"SELECT path FROM intents ORDER BY rowid LIMIT 1 OFFSET ?", (result[2],)
)
best_path = c.fetchone()[0]
conn.close()
return (result[0][0], json.loads(best_path), result[1])
def recognize(
input_text: str,
intent_graph: nx.DiGraph,
examples_path: str,
intent_filter: typing.Optional[typing.Callable[[str], bool]] = None,
extra_converters: typing.Optional[
typing.Dict[str, typing.Callable[..., typing.Any]]
] = None,
) -> typing.List[Recognition]:
"""Find the closest matching intent(s)."""
start_time = time.perf_counter()
intent_filter = intent_filter or (lambda i: True)
# Find closest match
# pylint: disable=unpacking-non-sequence
best_text, best_path, best_score = extract_one_sqlite(
fuzz_utils.default_process(input_text), examples_path
)
_LOGGER.debug("input=%s, match=%s, score=%s", input_text, best_text, best_score)
end_time = time.perf_counter()
_, recognition = rhasspynlu.fsticuffs.path_to_recognition(
best_path, intent_graph, extra_converters=extra_converters
)
assert recognition and recognition.intent, "Failed to find a match"
recognition.intent.confidence = best_score / 100.0
recognition.recognize_seconds = end_time - start_time
recognition.raw_text = input_text
recognition.raw_tokens = input_text.split()
return [recognition] | /rhasspy-fuzzywuzzy-0.4.0.tar.gz/rhasspy-fuzzywuzzy-0.4.0/rhasspyfuzzywuzzy/__init__.py | 0.675765 | 0.22531 | __init__.py | pypi |
import argparse
import asyncio
import logging
import re
from copy import deepcopy
from dataclasses import dataclass
from typing import Awaitable, Callable, Dict, List, Optional, Union
import paho.mqtt.client as mqtt
import rhasspyhermes.cli as hermes_cli
from rhasspyhermes.client import HermesClient
from rhasspyhermes.dialogue import (
DialogueContinueSession,
DialogueEndSession,
DialogueIntentNotRecognized,
DialogueNotification,
DialogueStartSession,
)
from rhasspyhermes.nlu import NluIntent, NluIntentNotRecognized
from rhasspyhermes.wake import HotwordDetected
_LOGGER = logging.getLogger("HermesApp")
@dataclass
class ContinueSession:
"""Helper class to continue the current session.
Attributes:
text: The text the TTS should say to start this additional request of the session.
intent_filter: A list of intents names to restrict the NLU resolution on the
answer of this query.
custom_data: An update to the session's custom data. If not provided, the custom data
will stay the same.
send_intent_not_recognized: Indicates whether the dialogue manager should handle non recognized
intents by itself or send them for the client to handle.
"""
custom_data: Optional[str] = None
text: Optional[str] = None
intent_filter: Optional[List[str]] = None
send_intent_not_recognized: bool = False
@dataclass
class EndSession:
"""Helper class to end the current session.
Attributes:
text: The text the TTS should say to end the session.
custom_data: An update to the session's custom data. If not provided, the custom data
will stay the same.
"""
text: Optional[str] = None
custom_data: Optional[str] = None
@dataclass
class TopicData:
"""Helper class for topic subscription.
Attributes:
topic: The MQTT topic.
data: A dictionary holding extracted data for the given placeholder.
"""
topic: str
data: Dict[str, str]
class HermesApp(HermesClient):
"""A Rhasspy app using the Hermes protocol.
Attributes:
args: Command-line arguments for the Hermes app.
Example:
.. literalinclude:: ../examples/time_app.py
"""
def __init__(
self,
name: str,
parser: Optional[argparse.ArgumentParser] = None,
mqtt_client: Optional[mqtt.Client] = None,
**kwargs
):
"""Initialize the Rhasspy Hermes app.
Arguments:
name: The name of this object.
parser: An argument parser.
If the argument is not specified, the object creates an
argument parser itself.
mqtt_client: An MQTT client. If the argument
is not specified, the object creates an MQTT client itself.
**kwargs: Other arguments. This supports the same arguments as the command-line
arguments, such has ``host`` and ``port``. Arguments specified by the user
on the command line have precedence over arguments passed as ``**kwargs``.
"""
if parser is None:
parser = argparse.ArgumentParser(prog=name)
# Add default arguments
hermes_cli.add_hermes_args(parser)
# overwrite argument defaults inside parser with argparse.SUPPRESS
# so arguments that are not provided get ignored
suppress_parser = deepcopy(parser)
for action in suppress_parser._actions:
action.default = argparse.SUPPRESS
supplied_args = vars(suppress_parser.parse_args())
default_args = vars(parser.parse_args([]))
# Command-line arguments take precedence over the arguments of the HermesApp.__init__
args = {**default_args, **kwargs, **supplied_args}
self.args = argparse.Namespace(**args)
# Set up logging
hermes_cli.setup_logging(self.args)
_LOGGER.debug(self.args)
# Create MQTT client
if mqtt_client is None:
mqtt_client = mqtt.Client()
# Initialize HermesClient
# pylint: disable=no-member
super().__init__(name, mqtt_client, site_ids=self.args.site_id)
self._callbacks_hotword: List[Callable[[HotwordDetected], Awaitable[None]]] = []
self._callbacks_intent: Dict[
str,
List[Callable[[NluIntent], Awaitable[None]]],
] = {}
self._callbacks_intent_not_recognized: List[
Callable[[NluIntentNotRecognized], Awaitable[None]]
] = []
self._callbacks_dialogue_intent_not_recognized: List[
Callable[[DialogueIntentNotRecognized], Awaitable[None]]
] = []
self._callbacks_topic: Dict[
str, List[Callable[[TopicData, bytes], Awaitable[None]]]
] = {}
self._callbacks_topic_regex: List[
Callable[[TopicData, bytes], Awaitable[None]]
] = []
self._additional_topic: List[str] = []
def _subscribe_callbacks(self) -> None:
# Remove duplicate intent names
intent_names: List[str] = list(set(self._callbacks_intent.keys()))
topics: List[str] = [
NluIntent.topic(intent_name=intent_name) for intent_name in intent_names
]
if self._callbacks_hotword:
topics.append(HotwordDetected.topic())
if self._callbacks_intent_not_recognized:
topics.append(NluIntentNotRecognized.topic())
if self._callbacks_dialogue_intent_not_recognized:
topics.append(DialogueIntentNotRecognized.topic())
topic_names: List[str] = list(set(self._callbacks_topic.keys()))
topics.extend(topic_names)
topics.extend(self._additional_topic)
self.subscribe_topics(*topics)
async def on_raw_message(self, topic: str, payload: bytes):
"""This method handles messages from the MQTT broker.
Arguments:
topic: The topic of the received MQTT message.
payload: The payload of the received MQTT message.
.. warning:: Don't override this method in your app. This is where all the magic happens in Rhasspy Hermes App.
"""
try:
if HotwordDetected.is_topic(topic):
# hermes/hotword/<wakeword_id>/detected
try:
hotword_detected = HotwordDetected.from_json(payload)
for function_h in self._callbacks_hotword:
await function_h(hotword_detected)
except KeyError as key:
_LOGGER.error(
"Missing key %s in JSON payload for %s: %s", key, topic, payload
)
elif NluIntent.is_topic(topic):
# hermes/intent/<intent_name>
try:
nlu_intent = NluIntent.from_json(payload)
intent_name = nlu_intent.intent.intent_name
if intent_name in self._callbacks_intent:
for function_i in self._callbacks_intent[intent_name]:
await function_i(nlu_intent)
except KeyError as key:
_LOGGER.error(
"Missing key %s in JSON payload for %s: %s", key, topic, payload
)
elif NluIntentNotRecognized.is_topic(topic):
# hermes/nlu/intentNotRecognized
try:
nlu_intent_not_recognized = NluIntentNotRecognized.from_json(
payload
)
for function_inr in self._callbacks_intent_not_recognized:
await function_inr(nlu_intent_not_recognized)
except KeyError as key:
_LOGGER.error(
"Missing key %s in JSON payload for %s: %s", key, topic, payload
)
elif DialogueIntentNotRecognized.is_topic(topic):
# hermes/dialogueManager/intentNotRecognized
try:
dialogue_intent_not_recognized = (
DialogueIntentNotRecognized.from_json(payload)
)
for function_dinr in self._callbacks_dialogue_intent_not_recognized:
await function_dinr(dialogue_intent_not_recognized)
except KeyError as key:
_LOGGER.error(
"Missing key %s in JSON payload for %s: %s", key, topic, payload
)
else:
unexpected_topic = True
if topic in self._callbacks_topic:
for function_1 in self._callbacks_topic[topic]:
await function_1(TopicData(topic, {}), payload)
unexpected_topic = False
else:
for function_2 in self._callbacks_topic_regex:
if hasattr(function_2, "topic_extras"):
topic_extras = getattr(function_2, "topic_extras")
for pattern, named_positions in topic_extras:
if re.match(pattern, topic) is not None:
data = TopicData(topic, {})
parts = topic.split(sep="/")
if named_positions is not None:
for name, position in named_positions.items():
data.data[name] = parts[position]
await function_2(data, payload)
unexpected_topic = False
if unexpected_topic:
_LOGGER.warning("Unexpected topic: %s", topic)
except Exception:
_LOGGER.exception("on_raw_message")
def on_hotword(
self, function: Callable[[HotwordDetected], Awaitable[None]]
) -> Callable[[HotwordDetected], Awaitable[None]]:
"""Apply this decorator to a function that you want to act on a detected hotword.
The decorated function has a :class:`rhasspyhermes.wake.HotwordDetected` object as an argument
and doesn't have a return value.
Example:
.. code-block:: python
@app.on_hotword
async def wake(hotword: HotwordDetected):
print(f"Hotword {hotword.model_id} detected on site {hotword.site_id}")
If a hotword has been detected, the ``wake`` function is called with the ``hotword`` argument.
This object holds information about the detected hotword.
"""
self._callbacks_hotword.append(function)
return function
def on_intent(
self, *intent_names: str
) -> Callable[
[
Callable[
[NluIntent], Union[Awaitable[ContinueSession], Awaitable[EndSession]]
]
],
Callable[[NluIntent], Awaitable[None]],
]:
"""Apply this decorator to a function that you want to act on a received intent.
Arguments:
intent_names: Names of the intents you want the function to act on.
The decorated function has a :class:`rhasspyhermes.nlu.NluIntent` object as an argument
and needs to return a :class:`ContinueSession` or :class:`EndSession` object.
If the function returns a :class:`ContinueSession` object, the intent's session is continued after
saying the supplied text. If the function returns a a :class:`EndSession` object, the intent's session
is ended after saying the supplied text, or immediately when no text is supplied.
Example:
.. code-block:: python
@app.on_intent("GetTime")
async def get_time(intent: NluIntent):
return EndSession("It's too late.")
If the intent with name GetTime has been detected, the ``get_time`` function is called
with the ``intent`` argument. This object holds information about the detected intent.
"""
def wrapper(
function: Callable[
[NluIntent], Union[Awaitable[ContinueSession], Awaitable[EndSession]]
]
) -> Callable[[NluIntent], Awaitable[None]]:
async def wrapped(intent: NluIntent) -> None:
message = await function(intent)
if isinstance(message, EndSession):
if intent.session_id is not None:
self.publish(
DialogueEndSession(
session_id=intent.session_id,
text=message.text,
custom_data=message.custom_data,
)
)
else:
_LOGGER.error(
"Cannot end session of intent without session ID."
)
elif isinstance(message, ContinueSession):
if intent.session_id is not None:
self.publish(
DialogueContinueSession(
session_id=intent.session_id,
text=message.text,
intent_filter=message.intent_filter,
custom_data=message.custom_data,
send_intent_not_recognized=message.send_intent_not_recognized,
)
)
else:
_LOGGER.error(
"Cannot continue session of intent without session ID."
)
for intent_name in intent_names:
try:
self._callbacks_intent[intent_name].append(wrapped)
except KeyError:
self._callbacks_intent[intent_name] = [wrapped]
return wrapped
return wrapper
def on_intent_not_recognized(
self,
function: Callable[
[NluIntentNotRecognized],
Union[Awaitable[ContinueSession], Awaitable[EndSession], Awaitable[None]],
],
) -> Callable[[NluIntentNotRecognized], Awaitable[None]]:
"""Apply this decorator to a function that you want to act when the NLU system
hasn't recognized an intent.
The decorated function has a :class:`rhasspyhermes.nlu.NluIntentNotRecognized` object as an argument
and can return a :class:`ContinueSession` or :class:`EndSession` object or have no return value.
If the function returns a :class:`ContinueSession` object, the current session is continued after
saying the supplied text. If the function returns a a :class:`EndSession` object, the current session
is ended after saying the supplied text, or immediately when no text is supplied. If the function doesn't
have a return value, nothing is changed to the session.
Example:
.. code-block:: python
@app.on_intent_not_recognized
async def not_understood(intent_not_recognized: NluIntentNotRecognized):
print(f"Didn't understand \"{intent_not_recognized.input}\" on site {intent_not_recognized.site_id}")
If an intent hasn't been recognized, the ``not_understood`` function is called
with the ``intent_not_recognized`` argument. This object holds information about the not recognized intent.
"""
async def wrapped(inr: NluIntentNotRecognized) -> None:
message = await function(inr)
if isinstance(message, EndSession):
if inr.session_id is not None:
self.publish(
DialogueEndSession(
session_id=inr.session_id,
text=message.text,
custom_data=message.custom_data,
)
)
else:
_LOGGER.error(
"Cannot end session of NLU intent not recognized message without session ID."
)
elif isinstance(message, ContinueSession):
if inr.session_id is not None:
self.publish(
DialogueContinueSession(
session_id=inr.session_id,
text=message.text,
intent_filter=message.intent_filter,
custom_data=message.custom_data,
send_intent_not_recognized=message.send_intent_not_recognized,
)
)
else:
_LOGGER.error(
"Cannot continue session of NLU intent not recognized message without session ID."
)
self._callbacks_intent_not_recognized.append(wrapped)
return wrapped
def on_dialogue_intent_not_recognized(
self,
function: Callable[
[DialogueIntentNotRecognized],
Union[Awaitable[ContinueSession], Awaitable[EndSession], Awaitable[None]],
],
) -> Callable[[DialogueIntentNotRecognized], Awaitable[None]]:
"""Apply this decorator to a function that you want to act when the dialogue manager
failed to recognize an intent and you requested to notify you of this event with the
`sendIntentNotRecognized` flag.
The decorated function has a :class:`rhasspyhermes.dialogue.DialogueIntentNotRecognized` object as an argument
and can return a :class:`ContinueSession` or :class:`EndSession` object or have no return value.
If the function returns a :class:`ContinueSession` object, the current session is continued after
saying the supplied text. If the function returns a a :class:`EndSession` object, the current session
is ended after saying the supplied text, or immediately when no text is supplied. If the function doesn't
have a return value, nothing is changed to the session.
Example:
.. code-block:: python
@app.on_dialogue_intent_not_recognized
async def not_understood(intent_not_recognized: DialogueIntentNotRecognized):
print(f"Didn't understand \"{intent_not_recognized.input}\" on site {intent_not_recognized.site_id}")
If an intent hasn't been recognized, the ``not_understood`` function is called
with the ``intent_not_recognized`` argument. This object holds information about the not recognized intent.
"""
async def wrapped(inr: DialogueIntentNotRecognized) -> None:
message = await function(inr)
if isinstance(message, EndSession):
if inr.session_id is not None:
self.publish(
DialogueEndSession(
session_id=inr.session_id,
text=message.text,
custom_data=message.custom_data,
)
)
else:
_LOGGER.error(
"Cannot end session of dialogue intent not recognized message without session ID."
)
elif isinstance(message, ContinueSession):
if inr.session_id is not None:
self.publish(
DialogueContinueSession(
session_id=inr.session_id,
text=message.text,
intent_filter=message.intent_filter,
custom_data=message.custom_data,
send_intent_not_recognized=message.send_intent_not_recognized,
)
)
else:
_LOGGER.error(
"Cannot continue session of dialogue intent not recognized message without session ID."
)
self._callbacks_dialogue_intent_not_recognized.append(wrapped)
return wrapped
def on_topic(self, *topic_names: str):
"""Apply this decorator to a function that you want to act on a received raw MQTT message.
Arguments:
topic_names: The MQTT topics you want the function to act on.
The decorated function has a :class:`TopicData` and a :class:`bytes` object as its arguments.
The former holds data about the topic and the latter about the payload of the MQTT message.
Example:
.. code-block:: python
@app.on_topic("hermes/+/{site_id}/playBytes/#")
async def test_topic1(data: TopicData, payload: bytes):
_LOGGER.debug("topic: %s, site_id: %s", data.topic, data.data.get("site_id"))
.. note:: The topic names can contain MQTT wildcards (`+` and `#`) or templates (`{foobar}`).
In the latter case, the value of the named template is available in the decorated function
as part of the :class:`TopicData` argument.
"""
def wrapper(function):
async def wrapped(data: TopicData, payload: bytes):
await function(data, payload)
replaced_topic_names = []
for topic_name in topic_names:
named_positions = {}
parts = topic_name.split(sep="/")
length = len(parts) - 1
def placeholder_mapper(part):
i, token = tuple(part)
if token.startswith("{") and token.endswith("}"):
named_positions[token[1:-1]] = i
return "+"
return token
parts = list(map(placeholder_mapper, enumerate(parts)))
replaced_topic_name = "/".join(parts)
def regex_mapper(part):
i, token = tuple(part)
value = token
if i == 0:
value = (
"^[^+#/]"
if token == "+"
else "[^/]+"
if length == 0 and token == "#"
else "^" + token
)
elif i < length:
value = "[^/]+" if token == "+" else token
elif i == length:
value = (
"[^/]+"
if token == "#"
else "[^/]+$"
if token == "+"
else token + "$"
)
return value
pattern = "/".join(map(regex_mapper, enumerate(parts)))
if topic_name == pattern[1:-1]:
try:
self._callbacks_topic[topic_name].append(wrapped)
except KeyError:
self._callbacks_topic[topic_name] = [wrapped]
else:
replaced_topic_names.append(replaced_topic_name)
if not hasattr(wrapped, "topic_extras"):
wrapped.topic_extras = []
wrapped.topic_extras.append(
(
re.compile(pattern),
named_positions if len(named_positions) > 0 else None,
)
)
if hasattr(wrapped, "topic_extras"):
self._callbacks_topic_regex.append(wrapped)
self._additional_topic.extend(replaced_topic_names)
return wrapped
return wrapper
def run(self):
"""Run the app. This method:
- subscribes to all MQTT topics for the functions you decorated;
- connects to the MQTT broker;
- starts the MQTT event loop and reacts to received MQTT messages.
"""
# Subscribe to callbacks
self._subscribe_callbacks()
# Try to connect
# pylint: disable=no-member
_LOGGER.debug("Connecting to %s:%s", self.args.host, self.args.port)
hermes_cli.connect(self.mqtt_client, self.args)
self.mqtt_client.loop_start()
try:
# Run main loop
asyncio.run(self.handle_messages_async())
except KeyboardInterrupt:
pass
finally:
self.mqtt_client.loop_stop()
def notify(self, text: str, site_id: str = "default"):
"""Send a dialogue notification.
Use this to inform the user of something without expecting a response.
Arguments:
text: The text to say.
site_id: The ID of the site where the text should be said.
"""
notification = DialogueNotification(text)
self.publish(DialogueStartSession(init=notification, site_id=site_id)) | /rhasspy-hermes-app-1.1.2.tar.gz/rhasspy-hermes-app-1.1.2/rhasspyhermes_app/__init__.py | 0.758689 | 0.188249 | __init__.py | pypi |
import re
from dataclasses import dataclass
from .base import Message
@dataclass
class IntentGraphRequest(Message):
"""Request publication of intent graph from training.
.. admonition:: MQTT message
Topic
``rhasspy/train/getIntentGraph``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - id
- String
- Unique id for request. Appended to reply topic (:class:`IntentGraph`).
* - siteId
- String
- The id of the site where training occurred. Defaults to ``"default"``.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/train/getIntentGraph' -m '{"id": "abcd", "siteId": "default"}'
Example
-------
>>> from rhasspyhermes.train import IntentGraphRequest
>>> request = IntentGraphRequest(id='abcd')
>>> request.payload()
'{"id": "abcd", "siteId": "default"}'
>>> request.topic()
'rhasspy/train/getIntentGraph'
Note
----
This is a Rhasspy-only message."""
id: str
"""Unique id for request. Appended to reply topic (:class:`IntentGraph`)."""
site_id: str = "default"
"""The id of the site where training occurred."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/train/getIntentGraph"``
"""
return "rhasspy/train/getIntentGraph"
@dataclass
class IntentGraph(Message):
"""Intent graph from training.
.. admonition:: MQTT message
Topic
``rhasspy/train/intentGraph``
Payload (binary)
gzipped pickle bytes containing a NetworkX_ intent graph
.. _NetworkX: https://networkx.github.io/
Subscribe to this message type with ``mosquitto_sub`` and show the binary payload as hexadecimal numbers:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -t 'rhasspy/train/intentGraph' -F %x
Note
----
This is a Rhasspy-only message."""
TOPIC_PATTERN = re.compile(r"^rhasspy/train/intentGraph/([^/]+)$")
graph_bytes: bytes
"""Gzipped pickle bytes containing a NetworkX intent graph"""
def payload(self) -> bytes:
"""Get the binary payload for this message.
Returns
-------
bytes
The binary payload as gzipped pickle bytes containing a NetworkX intent graph.
"""
return self.graph_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""Check for binary payload of message.
Returns
-------
bool
``True``
"""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Arguments
---------
request_id: str
Unique id for request. Supplied in request topic (:class:`IntentGraphRequest`)
Returns
-------
str
``"rhasspy/train/intentGraph/{request_id}"``
Example
-------
>>> from rhasspyhermes.train import IntentGraph
>>> IntentGraph.topic(request_id="abcd")
'rhasspy/train/intentGraph/abcd'
"""
request_id = kwargs.get("request_id", "#")
return f"rhasspy/train/intentGraph/{request_id}"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""Check whether topic is for this message type.
Arguments
---------
topic
message topic
Returns
-------
bool
``True`` if topic is for this message type
Example
-------
>>> from rhasspyhermes.train import IntentGraph
>>> IntentGraph.is_topic("rhasspy/train/intentGraph/abcd")
True
"""
return re.match(IntentGraph.TOPIC_PATTERN, topic) is not None | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/train.py | 0.854339 | 0.198064 | train.py | pypi |
from dataclasses import dataclass
from .base import Message
@dataclass
class HandleToggleOn(Message):
"""Enable intent handling.
.. admonition:: MQTT message
Topic
``rhasspy/handle/toggleOn``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where intent handling should be enabled. Defaults to ``"default"``.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/handle/toggleOn' -m '{"siteId": "default"}'
Example
-------
>>> from rhasspyhermes.handle import HandleToggleOn
>>> on = HandleToggleOn()
>>> on
HandleToggleOn(site_id='default')
>>> on.payload()
'{"siteId": "default"}'
>>> on.topic()
'rhasspy/handle/toggleOn'
Note
----
This is a Rhasspy-only message."""
site_id: str = "default"
"""The id of the site where intent handling should be enabled"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/handle/toggleOn"``
"""
return "rhasspy/handle/toggleOn"
@dataclass
class HandleToggleOff(Message):
"""Disable intent handling.
.. admonition:: MQTT message
Topic
``rhasspy/handle/toggleOff``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where intent handling should be disabled. Defaults to ``"default"``.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/handle/toggleOff' -m '{"siteId": "default"}'
Example
-------
>>> from rhasspyhermes.handle import HandleToggleOff
>>> off = HandleToggleOff()
>>> off
HandleToggleOff(site_id='default')
>>> off.payload()
'{"siteId": "default"}'
>>> off.topic()
'rhasspy/handle/toggleOff'
Note
----
This is a Rhasspy-only message."""
site_id: str = "default"
"""The id of the site where intent handling should be disabled"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/handle/toggleOff"``
"""
return "rhasspy/handle/toggleOff" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/handle.py | 0.865906 | 0.189784 | handle.py | pypi |
import typing
from dataclasses import dataclass
from dataclasses_json import LetterCase, dataclass_json
from rhasspyhermes.base import Message
@dataclass
class G2pPronounce(Message):
"""Get phonetic pronunciation for words.
The response is sent in a :class:`G2pPhonemes` message.
.. admonition:: MQTT message
Topic
``rhasspy/g2p/pronounce``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - words
- List of Strings
- Words to guess pronunciations for.
* - id
- String (optional)
- Unique id for request. Appended to reply topic (:class:`G2pPhonemes`).
* - siteId
- String
- The id of the site where pronunciations were requested. Defaults to ``"default"``.
* - sessionId
- String (optional)
- Id of active session, if there is one.
* - numGuesses
- Integer
- Maximum number of guesses to return for words not in dictionary. Defaults to 5.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/g2p/pronounce' -m '{"words": ["word", "sentence"], "id": "test", "siteId": "default", "sessionId": null, "numGuesses": 5}'
Example
-------
>>> from rhasspyhermes.g2p import G2pPronounce
>>> p = G2pPronounce(words=["word", "sentence"], id="test")
>>> p.payload()
'{"words": ["word", "sentence"], "id": "test", "siteId": "default", "sessionId": null, "numGuesses": 5}'
>>> p.topic()
'rhasspy/g2p/pronounce'
Note
----
This is a Rhasspy-only message."""
words: typing.List[str]
"""Words to guess pronunciations for."""
id: typing.Optional[str] = None
"""Unique id for request. Appended to reply topic (:class:`G2pPhonemes`)."""
site_id: str = "default"
"""Id of site to request pronunciations from."""
session_id: typing.Optional[str] = None
"""Id of active session, if there is one."""
num_guesses: int = 5
"""Maximum number of guesses to return for words not in dictionary."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/g2p/pronounce"``
"""
return "rhasspy/g2p/pronounce"
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class G2pPronunciation:
"""Phonetic pronunciation for a single word."""
phonemes: typing.List[str]
"""Phonetic pronunciation for word."""
guessed: typing.Optional[bool] = None
"""``True`` if this pronunciation was guessed using a g2p model.
``False`` if it came from a pronunciation dictionary."""
@dataclass
class G2pPhonemes(Message):
"""Response to :class:`G2pPronounce`.
.. admonition:: MQTT message
Topic
``rhasspy/g2p/phonemes``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - wordPhonemes
- Dictionary
- Guessed or looked up pronunciations.
* - id
- String (optional)
- Unique id for a :class:`G2pPronounce` request.
* - siteId
- String
- The id of the site where pronunciations were requested. Defaults to ``"default"``.
* - sessionId
- String (optional)
- Id of active session, if there is one.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -t 'rhasspy/g2p/phonemes' -v
Note
----
This is a Rhasspy-only message."""
word_phonemes: typing.Dict[str, typing.List[G2pPronunciation]]
"""Guessed or looked up pronunciations."""
id: typing.Optional[str] = None
"""Unique id from a :class:`G2pPronounce` request."""
site_id: str = "default"
"""Id of site where pronunciations were requested."""
session_id: typing.Optional[str] = None
"""Id of active session, if there is one."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/g2p/phonemes"``
"""
return "rhasspy/g2p/phonemes"
@dataclass
class G2pError(Message):
"""Error from G2P component.
.. admonition:: MQTT message
Topic
``rhasspy/error/g2p``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - error
- String
- A description of the error that occurred.
* - siteId
- String
- The id of the site where the error occurred.
* - context
- String
- Additional information on the context in which the error occurred.
* - sessionId
- String (optional)
- Id of active session, if there is one.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -t 'rhasspy/error/g2p' -v
Note
----
This is a Rhasspy-only message.
"""
error: str
"""A description of the error that occurred."""
site_id: str = "default"
"""The id of the site where the error occurred."""
context: typing.Optional[str] = None
"""Additional information on the context in which the error occurred."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/error/g2p"``
"""
return "rhasspy/error/g2p" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/g2p.py | 0.864925 | 0.34065 | g2p.py | pypi |
import typing
from abc import ABCMeta
from dataclasses_json import DataClassJsonMixin, LetterCase, dataclass_json
@dataclass_json(letter_case=LetterCase.CAMEL)
class Message(DataClassJsonMixin, metaclass=ABCMeta):
"""Base class for Hermes messages.
All classes implementing Hermes messages are subclasses of this class."""
def __init__(self, **kwargs):
DataClassJsonMixin.__init__(self, letter_case=LetterCase.CAMEL)
def payload(self) -> typing.Union[str, bytes]:
"""Get the payload for this message.
Returns
-------
Union[str, bytes]
The payload as a JSON string or bytes
Example
-------
>>> from rhasspyhermes.handle import HandleToggleOn
>>> on = HandleToggleOn(site_id='satellite')
>>> on.payload()
'{"siteId": "satellite"}'
"""
return self.to_json(ensure_ascii=False)
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Extract site id from message topic.
Arguments
---------
topic
message topic
Returns
-------
Optional[str]
The optional site id for this message topic
Example
-------
>>> from rhasspyhermes.audioserver import AudioSessionFrame
>>> topic = "hermes/audioServer/satellite/abcd/audioSessionFrame"
>>> AudioSessionFrame.get_site_id(topic)
'satellite'
"""
return None
@classmethod
def get_session_id(cls, topic: str) -> typing.Optional[str]:
"""Extract session id from message topic.
Arguments
---------
topic
message topic
Returns
-------
Optional[str]
The optional session id for this message topic
Example
-------
>>> from rhasspyhermes.audioserver import AudioSessionFrame
>>> topic = "hermes/audioServer/satellite/abcd/audioSessionFrame"
>>> AudioSessionFrame.get_session_id(topic)
'abcd'
"""
return None
@classmethod
def is_binary_payload(cls) -> bool:
"""Check for binary payload of message.
Returns
-------
bool
``True`` if message payload is not JSON
Example
-------
>>> from rhasspyhermes.audioserver import AudioFrame
>>> AudioFrame.is_binary_payload()
True
"""
return False
@classmethod
def is_site_in_topic(cls) -> bool:
"""Check for site id in topic.
Returns
-------
bool
``True`` if site id is part of topic
Example
-------
>>> from rhasspyhermes.asr import AsrTrain
>>> AsrTrain.is_site_in_topic()
True
"""
return False
@classmethod
def is_session_in_topic(cls) -> bool:
"""Check for session id in topic.
Returns
-------
bool
``True`` if session id is part of topic
Example
-------
>>> from rhasspyhermes.asr import AsrAudioCaptured
>>> AsrAudioCaptured.is_session_in_topic()
True
"""
return False
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
The MQTT topic for this message type
Example
-------
>>> from rhasspyhermes.nlu import NluIntent
>>> NluIntent.topic()
'hermes/intent/#'
"""
@classmethod
def is_topic(cls, topic: str) -> bool:
"""Check whether topic is for this message type.
Arguments
---------
topic
message topic
Returns
-------
bool
``True`` if topic is for this message type
Example
--------
>>> from rhasspyhermes.wake import HotwordDetected
>>> HotwordDetected.is_topic("hermes/hotword/precise/detected")
True
"""
return topic == cls.topic() | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/base.py | 0.880566 | 0.284011 | base.py | pypi |
import re
import typing
from dataclasses import dataclass
from enum import Enum
from dataclasses_json import LetterCase, dataclass_json
from .base import Message
class HotwordToggleReason(str, Enum):
"""Reason for hotword toggle on/off."""
UNKNOWN = ""
"""Overrides all other reasons."""
DIALOGUE_SESSION = "dialogueSession"
"""Dialogue session is active."""
PLAY_AUDIO = "playAudio"
"""Audio is currently playing."""
TTS_SAY = "ttsSay"
"""Text to speech system is currently speaking."""
@dataclass
class HotwordToggleOn(Message):
"""Activate the wake word component, so pronouncing a wake word will trigger a
:class:`HotwordDetected` message.
.. admonition:: MQTT message
Topic
``hermes/hotword/toggleOn``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where the wake word component should be enabled.
* - reason
- String
- The reason for enabling the wake word component.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/hotword/toggleOn' -m '{"siteId": "default", "reason": "dialogueSession"}'
"""
site_id: str = "default"
"""The id of the site where the wake word component should be enabled."""
# ------------
# Rhasspy only
# ------------
reason: HotwordToggleReason = HotwordToggleReason.UNKNOWN
"""The reason for enabling the wake word component.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/hotword/toggleOn"``
"""
return "hermes/hotword/toggleOn"
@dataclass
class HotwordToggleOff(Message):
"""Deactivate the wake word component, so pronouncing a wake word won't trigger a
:class:`HotwordDetected` message.
.. admonition:: MQTT message
Topic
``hermes/hotword/toggleOff``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where the wake word component should be disabled.
* - reason
- String
- The reason for disabling the wake word component.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/hotword/toggleOff' -m '{"siteId": "default", "reason": "dialogueSession"}'
"""
site_id: str = "default"
"""The id of the site where the wake word component should be disabled."""
# ------------
# Rhasspy only
# ------------
reason: HotwordToggleReason = HotwordToggleReason.UNKNOWN
"""The reason for disabling the wake word component.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/hotword/toggleOff"``
"""
return "hermes/hotword/toggleOff"
@dataclass
class HotwordDetected(Message):
"""Message sent by the wake word component when it has detected a specific wake word.
.. admonition:: MQTT message
Topic
``hermes/hotword/<WAKEWORD_ID>/detected``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - modelId
- String
- The id of the model that triggered the wake word.
* - modelVersion
- String
- The version of the model.
* - modelType
- String
- The type of the model. Possible values are ``"universal"`` and ``"personal"``.
* - currentSensitivity
- Float
- The sensitivity configured in the model at the time of the detection.
* - siteId
- String
- The id of the site where the wake word component should be disabled.
* - sessionId
- String (optional)
- The id of the dialogue session created after detection.
* - send_audio_captured
- Boolean (optional)
- ``True`` if audio captured from the ASR should be emitted on
``rhasspy/asr/{site_id}/{session_id}/audioCaptured``.
* - lang
- String (optional)
- Language of the detected wake word.
Copied by the dialogue manager into subsequent ASR and NLU messages.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/hotword/default/detected'
"""
TOPIC_PATTERN = re.compile(r"^hermes/hotword/([^/]+)/detected$")
model_id: str
"""The id of the model that triggered the wake word."""
model_version: str = ""
"""The version of the model."""
model_type: str = "personal"
"""The type of the model. Possible values are ``"universal"`` and ``"personal"``."""
current_sensitivity: float = 1.0
"""The sensitivity configured in the model at the time of the detection."""
site_id: str = "default"
"""The id of the site where the wake word was detected."""
# ------------
# Rhasspy only
# ------------
session_id: typing.Optional[str] = None
"""The desired id of the dialogue session created after detection.
Leave empty to have one auto-generated.
Note
----
This is a Rhasspy-only attribute.
"""
send_audio_captured: typing.Optional[bool] = None
"""``True`` if audio captured from the ASR should be emitted on
``rhasspy/asr/{site_id}/{session_id}/audioCaptured``.
Note
----
This is a Rhasspy-only attribute.
"""
lang: typing.Optional[str] = None
"""Language of the detected wake word.
Copied by the dialogue manager into subsequent ASR and NLU messages.
Note
----
This is a Rhasspy-only attribute.
"""
custom_entities: typing.Optional[typing.Dict[str, typing.Any]] = None
"""User-defined entities to be set in the recognized intent.
Copied by the dialogue manager into subsequent ASR and NLU messages.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Arguments
---------
wakeword_id: str
The id of the wake word.
Returns
-------
str
MQTT topic for this message type with the given wake word id.
Example
-------
>>> from rhasspyhermes.wake import HotwordDetected
>>> HotwordDetected.topic(wakeword_id="example-02.wav")
'hermes/hotword/example-02.wav/detected'
"""
wakeword_id = kwargs.get("wakeword_id", "+")
return f"hermes/hotword/{wakeword_id}/detected"
@classmethod
def get_wakeword_id(cls, topic: str) -> str:
"""Get wakeword id from MQTT topic.
Arguments
---------
topic
MQTT topic.
Returns
-------
str
Wake word ID extracted from the MQTT topic.
Example
-------
>>> from rhasspyhermes.wake import HotwordDetected
>>> HotwordDetected.get_wakeword_id("hermes/hotword/example-02.wav/detected")
'example-02.wav'
"""
match = re.match(HotwordDetected.TOPIC_PATTERN, topic)
assert match, "Not a detected topic"
return match.group(1)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template."""
return re.match(HotwordDetected.TOPIC_PATTERN, topic) is not None
# -----------------------------------------------------------------------------
# Rhasspy Only Messages
# -----------------------------------------------------------------------------
@dataclass
class HotwordError(Message):
"""Error from wake word component.
.. admonition:: MQTT message
Topic
``hermes/error/hotword``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - error
- String
- A description of the error that occurred.
* - siteId
- String
- The id of the site where the error occurred.
* - context
- String (optional)
- Additional information on the context in which the error occurred.
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/error/hotword'
Note
----
This is a Rhasspy-only message.
"""
error: str
"""A description of the error that occurred."""
site_id: str = "default"
"""The id of the site where the error occurred."""
context: typing.Optional[str] = None
"""Additional information on the context in which the error occurred."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/error/hotword"``
"""
return "hermes/error/hotword"
@dataclass
class GetHotwords(Message):
"""Request to list available hotwords. The wake word component responds with a
:class:`Hotwords` message.
.. admonition:: MQTT message
Topic
``rhasspy/hotword/getHotwords``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where the wake word component exists.
* - id
- String (optional)
- Unique id passed to the response in the :class:`Hotwords` message.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/hotword/getHotwords' -m '{"siteId": "default", "id": "foobar"}'
Note
----
This is a Rhasspy-only message.
"""
site_id: str = "default"
"""The id of the site where the wake word component exists."""
id: typing.Optional[str] = None
"""Unique id passed to the response in the :class:`Hotwords` message."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/hotword/getHotwords"``
"""
return "rhasspy/hotword/getHotwords"
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Hotword:
"""Description of a single hotword."""
model_id: str
"""Unique ID of hotword model."""
model_words: str
"""Actual words used to activate hotword."""
model_version: str = ""
"""Model version."""
model_type: str = "personal"
"""Model type (personal, unversal)."""
@dataclass
class Hotwords(Message):
"""The list of available hotwords. The wake word component sends this message
in response to a request in a :class:`GetHotwords` message.
.. admonition:: MQTT message
Topic
``rhasspy/hotword/hotwords``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - models
- List of JSON objects
- The list of available hotwords.
* - siteId
- String
- The id of the site where hotwords were requested.
* - id
- String (optional)
- Unique id passed from the request in the :class:`GetHotwords` message.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'rhasspy/hotword/hotwords'
Note
----
This is a Rhasspy-only message.
"""
models: typing.List[Hotword]
"""The list of available hotwords."""
site_id: str = "default"
"""The id of the site where hotwords were requested."""
id: typing.Optional[str] = None
"""Unique id passed from the request in the :class:`GetHotwords` message."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/hotword/hotwords"``
"""
return "rhasspy/hotword/hotwords"
@dataclass
class RecordHotwordExample(Message):
"""Request to record examples of a hotword. The wake word component responds with a
:class:`HotwordExampleRecorded` message.
.. admonition:: MQTT message
Topic
``rhasspy/hotword/recordExample``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - id
- String
- Unique id used in the response message (:class:`HotwordExampleRecorded`).
* - siteId
- String
- The id of the site where the wake word component exists.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'rhasspy/hotword/recordExample' -m '{"siteId": "default", "id": "foobar"}'
Note
----
This is a Rhasspy-only message.
"""
id: str
"""Unique id used in the response message (:class:`HotwordExampleRecorded`)."""
site_id: str = "default"
"""The id of the site where the wake word component exists."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/hotword/recordExample"``
"""
return "rhasspy/hotword/recordExample"
@dataclass
class HotwordExampleRecorded(Message):
"""Response when a hotword example has been recorded. Sent by the wake word component
in response to a :class:`RecordHotwordExample` message.
.. admonition:: MQTT message
Topic
``rhasspy/hotword/<SITE_ID>/exampleRecorded/<REQUEST_ID>``
Payload (binary)
Audio from the recorded sample in WAV format.
Subscribe to this message type with ``mosquitto_sub`` and show the binary payload as hexadecimal numbers:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -t 'rhasspy/hotword/<SITE_ID>/exampleRecorded/<REQUEST_ID>' -F %x
Note
----
This is a Rhasspy-only message.
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/hotword/([^/]+)/exampleRecorded/([^/]+)$")
wav_bytes: bytes
"""Audio from recorded sample in WAV format."""
def payload(self) -> typing.Union[str, bytes]:
"""Get binary/string for this message."""
return self.wav_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""True if payload is not JSON.
Returns
-------
bool
``True``
"""
return True
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic.
Returns
-------
bool
``True``
"""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Arguments
---------
site_id: str
The id of the site where the wake word component exists.
request_id: str
Unique id of the request message.
Returns
-------
str
MQTT topic for this message type with the given site id and request id.
Example
-------
>>> from rhasspyhermes.wake import HotwordExampleRecorded
>>> HotwordExampleRecorded.topic(site_id="default", request_id="foobar")
'rhasspy/hotword/default/exampleRecorded/foobar'
"""
site_id = kwargs.get("site_id", "+")
request_id = kwargs.get("request_id", "#")
return f"rhasspy/hotword/{site_id}/exampleRecorded/{request_id}"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from MQTT topic.
Arguments
---------
topic
MQTT topic.
Returns
-------
str
Site ID extracted from the MQTT topic.
Example
-------
>>> from rhasspyhermes.wake import HotwordExampleRecorded
>>> HotwordExampleRecorded.get_site_id("rhasspy/hotword/default/exampleRecorded/foobar")
'default'
"""
match = re.match(HotwordExampleRecorded.TOPIC_PATTERN, topic)
assert match, "Not an exampleRecorded topic"
return match.group(1)
@classmethod
def get_request_id(cls, topic: str) -> str:
"""Get request id from MQTT topic.
Arguments
---------
topic
MQTT topic.
Returns
-------
str
Request ID extracted from the MQTT topic.
Example
-------
>>> from rhasspyhermes.wake import HotwordExampleRecorded
>>> HotwordExampleRecorded.get_request_id("rhasspy/hotword/default/exampleRecorded/foobar")
'foobar'
"""
match = re.match(HotwordExampleRecorded.TOPIC_PATTERN, topic)
assert match, "Not an exampleRecorded topic"
return match.group(2)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(HotwordExampleRecorded.TOPIC_PATTERN, topic) is not None | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/wake.py | 0.847527 | 0.199425 | wake.py | pypi |
import typing
from collections.abc import Mapping
from dataclasses import dataclass
from enum import Enum
from dataclasses_json import DataClassJsonMixin, LetterCase, dataclass_json
from dataclasses_json.core import Json
from .base import Message
class DialogueActionType(str, Enum):
"""Type of session init objects."""
ACTION = "action"
"""Use this type when you need the user to respond."""
NOTIFICATION = "notification"
"""Use this type when you only want to inform the user of something without
expecting a response."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DialogueAction(DataClassJsonMixin):
"""Dialogue session action."""
can_be_enqueued: bool
"""If true, the session will start when there is no pending one on this
site. Otherwise, the session is just dropped if there is running one."""
type: DialogueActionType = DialogueActionType.ACTION
"""This value is always :class:`DialogueActionType.ACTION`."""
text: typing.Optional[str] = None
"""Text that the TTS should say at the beginning of the session."""
intent_filter: typing.Optional[typing.List[str]] = None
"""A list of intents names to restrict the NLU resolution on the first query."""
send_intent_not_recognized: bool = False
"""Indicates whether the dialogue manager should handle non-recognized
intents by itself or send them for the client to handle."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DialogueNotification(DataClassJsonMixin):
"""Dialogue session notification."""
text: str
"""Text the TTS should say."""
type: DialogueActionType = DialogueActionType.NOTIFICATION
"""This value is always :class:`DialogueActionType.NOTIFICATION`."""
class DialogueSessionTerminationReason(str, Enum):
"""The reason why the session was ended."""
NOMINAL = "nominal"
"""The session ended as expected (a :class:`DialogueEndSession` message was received)."""
ABORTED_BY_USER = "abortedByUser"
"""The session was aborted by the user."""
INTENT_NOT_RECOGNIZED = "intentNotRecognized"
"""The session ended because no intent was successfully detected."""
TIMEOUT = "timeout"
"""The session timed out because there was no response from one of the components or
no :class:`DialogueContinueSession` or :class:`DialogueEndSession` message in a timely manner."""
ERROR = "error"
"""The session failed with an error."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DialogueSessionTermination:
"""Dialogue session termination type."""
reason: DialogueSessionTerminationReason
"""The reason why the session was ended."""
# -----------------------------------------------------------------------------
@dataclass
class DialogueStartSession(Message):
"""Start a dialogue session.
You can send this message to programmatically initiate a new session. The
Dialogue Manager will start the session by asking the TTS to say the text
(if any) and wait for the answer of the user.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/startSession``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where to start the session.
* - init
- JSON object
- Session initialization description.
* - customData
- String (optional)
- Additional information that can be provided by the handler. Each message
related to the new session - sent by the Dialogue Manager - will contain
this data.
* - lang
- String (optional)
- Language of the session.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/dialogueManager/startSession' -m '{"siteId": "livingroom", "init": {"type": "notification", "text": "Ready"}, "lang": "en"}'
Example
-------
>>> from rhasspyhermes.dialogue import DialogueStartSession, DialogueNotification
>>> start_session = DialogueStartSession(init=DialogueNotification(text="Ready"), site_id="livingroom", lang="en")
>>> start_session
DialogueStartSession(init=DialogueNotification(text='Ready', type=<DialogueActionType.NOTIFICATION: 'notification'>), site_id='livingroom', custom_data=None, lang='en')
>>> start_session.payload()
'{"init": {"text": "Ready", "type": "notification"}, "siteId": "livingroom", "customData": null, "lang": "en"}'
"""
init: typing.Union[DialogueAction, DialogueNotification]
"""Session initialization description."""
site_id: str = "default"
"""The id of the site where to start the session."""
custom_data: typing.Optional[str] = None
"""Additional information that can be provided by the handler. Each message
related to the new session - sent by the Dialogue Manager - will contain
this data."""
lang: typing.Optional[str] = None
"""Language of the session.
Note
----
This is a Rhasspy-only attribute.
"""
# pylint: disable=W0221,W0237
@classmethod
def from_dict(
cls: typing.Type["DialogueStartSession"],
message_dict: Json,
*,
infer_missing=False
) -> "DialogueStartSession":
assert isinstance(message_dict, Mapping)
init = message_dict.pop("init")
if init["type"] == DialogueActionType.NOTIFICATION:
message_dict["init"] = DialogueNotification.from_dict(init)
else:
message_dict["init"] = DialogueAction.from_dict(init)
return super(DialogueStartSession, cls).from_dict(
message_dict, infer_missing=infer_missing
)
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/startSession"``
"""
return "hermes/dialogueManager/startSession"
@dataclass
class DialogueSessionQueued(Message):
"""Sent by the dialogue manager when it receives a :class:`DialogueStartSession` message
and the site where the interaction should take place is busy. When the site is free again,
the session will be started.
Only :class:`DialogueStartSession` messages with an ``init`` attribute of the following type
can be enqueued:
- :class:`DialogueNotification`
- :class:`DialogueAction` with the attribute ``canBeEnqueued`` set to ``True``.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/sessionQueued``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - sessionId
- String
- The id of the session that was enqueued.
* - siteId
- String
- The id of the site where the user interaction will take place.
* - customData
- String (optional)
- Custom data provided in the :class:`DialogueStartSession` message.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/dialogueManager/sessionQueued'
"""
session_id: str
"""The id of the session that was enqueued."""
site_id: str = "default"
"""The id of the site where the user interaction will take place."""
custom_data: typing.Optional[str] = None
"""Custom data provided in the :class:`DialogueStartSession` message."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/sessionQueued"``
"""
return "hermes/dialogueManager/sessionQueued"
@dataclass
class DialogueSessionStarted(Message):
"""Sent when a dialogue session has been started.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/sessionStarted``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - sessionId
- String
- The id of the session that was started.
* - siteId
- String
- The id of the site where the user interaction is taking place.
* - customData
- String (optional)
- Custom data provided in the :class:`DialogueStartSession` message.
* - lang
- String (optional)
- Language of the session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/dialogueManager/sessionStarted'
"""
session_id: str
"""The id of the session that was started."""
site_id: str = "default"
"""The id of the site where the user interaction is taking place."""
custom_data: typing.Optional[str] = None
"""Custom data provided in the :class:`DialogueStartSession` message."""
lang: typing.Optional[str] = None
"""Language of the session.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/sessionStarted"``
"""
return "hermes/dialogueManager/sessionStarted"
@dataclass
class DialogueContinueSession(Message):
"""Sent when a dialogue session should be continued.
You should send this message after receiving a :class:`rhasspyhermes.nlu.NluIntent` message
if you want to continue the session. This can be used for example to ask
additional information to the user.
Make sure to use the same ``sessionId`` as the original :class:`rhasspyhermes.nlu.NluIntent`
message.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/continueSession``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - sessionId
- String
- The id of the session to continue.
* - customData
- String (optional)
- An update to the session's custom data. If not provided, the custom data
will stay the same.
* - text
- String (optional)
- The text the TTS should say to start this additional request of the
session.
* - intentFilter
- List of Strings (optional)
- A list of intent names to restrict the NLU resolution on the answer of
this query.
* - sendIntentNotRecognized
- Boolean
- Indicates whether the dialogue manager should handle non recognized
intents by itself or send them for the client to handle.
* - slot
- String (optional)
- Unused.
* - lang
- String (optional)
- Language of the session.
Leave empty to use setting from start of session.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/dialogueManager/continueSession' -m '{"sessionId": "foobar", "customData": null, "text": "Are you sure?", "intentFilter": null, "sendIntentNotRecognized": false, "slot": null, "lang": null}'
Example
-------
>>> from rhasspyhermes.dialogue import DialogueContinueSession
>>> session = DialogueContinueSession(session_id="foobar", text="Are you sure?")
>>> session
DialogueContinueSession(session_id='foobar', custom_data=None, text='Are you sure?', intent_filter=None, send_intent_not_recognized=False, slot=None, lang=None)
>>> session.payload()
'{"sessionId": "foobar", "customData": null, "text": "Are you sure?", "intentFilter": null, "sendIntentNotRecognized": false, "slot": null, "lang": null}'
"""
session_id: str
"""The id of the session to continue."""
custom_data: typing.Optional[str] = None
"""An update to the session's custom data. If not provided, the custom data
will stay the same."""
text: typing.Optional[str] = None
"""The text the TTS should say to start this additional request of the
session."""
intent_filter: typing.Optional[typing.List[str]] = None
"""A list of intent names to restrict the NLU resolution on the answer of
this query."""
send_intent_not_recognized: bool = False
"""Indicates whether the dialogue manager should handle non recognized
intents by itself or send them for the client to handle."""
slot: typing.Optional[str] = None
"""Unused."""
# ------------
# Rhasspy only
# ------------
lang: typing.Optional[str] = None
"""Language of the session.
Leave empty to use setting from start of session.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/continueSession"``
"""
return "hermes/dialogueManager/continueSession"
@dataclass
class DialogueEndSession(Message):
"""Sent when a dialogue session should be ended.
You should send this message after receiving a :class:`rhasspyhermes.nlu.NluIntent` message
if you want to end the session.
Make sure to use the same ``sessionId`` as the original :class:`rhasspyhermes.nlu.NluIntent`
message.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/continueSession``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - sessionId
- String
- The id of the session to continue.
* - customData
- String (optional)
- An update to the session's custom data. If not provided, the custom data
will stay the same.
* - text
- String (optional)
- The text the TTS should say to end the session.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/dialogueManager/endSession' -m '{"sessionId": "foobar", "customData": null, "text": "OK, turning off the light"}'
Example
-------
>>> from rhasspyhermes.dialogue import DialogueEndSession
>>> session = DialogueEndSession(session_id="foobar", text="OK, turning off the light")
>>> session
DialogueEndSession(session_id='foobar', text='OK, turning off the light', custom_data=None)
>>> session.payload()
'{"sessionId": "foobar", "text": "OK, turning off the light", "customData": null}'
"""
session_id: str
"""The id of the session to end."""
text: typing.Optional[str] = None
"""The text the TTS should say to end the session."""
custom_data: typing.Optional[str] = None
"""An update to the session's custom data. If not provided, the custom data
will stay the same."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/endSession"``
"""
return "hermes/dialogueManager/endSession"
@dataclass
class DialogueSessionEnded(Message):
"""Sent when a dialogue session has ended.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/sessionEnded``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - termination
- JSON Object
- Structured description of why the session has been ended. See
:class:`DialogueSessionTerminationReason` for the possible values.
* - sessionId
- String
- The id of the ended session.
* - siteId
- String
- The id of the site where the user interaction took place.
* - customData
- String (optional)
- Custom data provided in the :class:`DialogueStartSession`,
:class:`DialogueContinueSession` or :class:`DialogueEndSession` messages.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/dialogueManager/sessionEnded'
"""
termination: DialogueSessionTermination
"""Structured description of why the session has been ended."""
session_id: str
"""The id of the ended session."""
site_id: str = "default"
"""The id of the site where the user interaction took place."""
custom_data: typing.Optional[str] = None
"""Custom data provided in the :class:`DialogueStartSession`,
:class:`DialogueContinueSession` or :class:`DialogueEndSession` messages."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/sessionEnded"``
"""
return "hermes/dialogueManager/sessionEnded"
@dataclass
class DialogueIntentNotRecognized(Message):
"""Intent not recognized.
Only sent when ``send_intent_not_recognized`` is ``True``.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/intentNotRecognized``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - sessionId
- String
- The id of the session that generated this event.
* - siteId
- String
- The id of the site where the user interaction took place.
* - input
- String (optional)
- The NLU input that generated this event.
* - customData
- String (optional)
- Custom data provided in the :class:`DialogueStartSession` or
:class:`DialogueContinueSession` messages.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/dialogueManager/intentNotRecognized'
"""
session_id: str
"""The id of the session that generated this event."""
site_id: str = "default"
"""The id of the site where the user interaction took place."""
input: typing.Optional[str] = None
"""The NLU input that generated this event."""
custom_data: typing.Optional[str] = None
"""Custom data provided in the :class:`DialogueStartSession` or
:class:`DialogueContinueSession` messages."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/intentNotRecognized"``
"""
return "hermes/dialogueManager/intentNotRecognized"
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class DialogueConfigureIntent:
"""Enable/disable a specific intent in a :class:`DialogueConfigure` message."""
intent_id: str
"""Name of the intent to enable/disable."""
enable: bool
"""``True`` if the intent should be enabled."""
@dataclass
class DialogueConfigure(Message):
"""Enable/disable specific intents for future dialogue sessions.
If an intent is enabled, the :class:`rhasspyhermes.nlu.NluIntent` message is triggered when
the intent is detected.
Rhasspy enables all intents by default unless specified otherwise.
.. admonition:: MQTT message
Topic
``hermes/dialogueManager/configure``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - intents
- List of JSON Objects
- The list of intents and whether to enable/disable them.
* - siteId
- String
- The id of the site to configure.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/dialogueManager/configure' -m '{"intents": [{"intentId": "GetTime", "enable": true}, {"intentId": "GetTemperature", "enable": false}], "siteId": "livingroom"}'
Example
-------
>>> from rhasspyhermes.dialogue import DialogueConfigureIntent, DialogueConfigure
>>> configure = DialogueConfigure([DialogueConfigureIntent("GetTime", True), DialogueConfigureIntent("GetTemperature", False)], "livingroom")
>>> configure
DialogueConfigure(intents=[DialogueConfigureIntent(intent_id='GetTime', enable=True), DialogueConfigureIntent(intent_id='GetTemperature', enable=False)], site_id='livingroom')
>>> configure.payload()
'{"intents": [{"intentId": "GetTime", "enable": true}, {"intentId": "GetTemperature", "enable": false}], "siteId": "livingroom"}'
"""
intents: typing.List[DialogueConfigureIntent]
"""The list of intents and whether to enable/disable them."""
site_id: str = "default"
"""The id of the site to configure."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/dialogueManager/configure"``
"""
return "hermes/dialogueManager/configure"
# ----------------------------------------------------------------------------
# Rhasspy-Only Messages
# ----------------------------------------------------------------------------
@dataclass
class DialogueError(Message):
"""This message is published by the dialogue manager component if an error has occurred.
.. admonition:: MQTT message
Topic
``hermes/error/dialogueManager``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - error
- String
- A description of the error that occurred.
* - siteId
- String
- The id of the site where the error occurred.
* - context
- String (optional)
- Additional information on the context in which the error occurred.
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/error/dialogueManager'
Example
-------
>>> from rhasspyhermes.dialogue import DialogueError
>>> dialogue_error = DialogueError(error="Unexpected error")
>>> dialogue_error.topic()
'hermes/error/dialogueManager'
>>> dialogue_error.payload()
'{"error": "Unexpected error", "siteId": "default", "context": null, "sessionId": null}'
Note
----
This is a Rhasspy-only message.
"""
error: str
"""A description of the error that occurred."""
site_id: str = "default"
"""The id of the site where the error occurred."""
context: typing.Optional[str] = None
"""Additional information on the context in which the error occurred."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/error/dialogueManager"``
"""
return "hermes/error/dialogueManager" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/dialogue.py | 0.883475 | 0.210138 | dialogue.py | pypi |
import re
import typing
from dataclasses import dataclass
from enum import Enum
from .base import Message
from .nlu import AsrToken
class AsrToggleReason(str, Enum):
"""Reason for ASR toggle on/off.
Values
------
UNKNOWN
Overrides all other reasons
DIALOGUE_SESSION
Dialogue session is active
PLAY_AUDIO
Audio is currently playing
TTS_SAY
Text to speech system is currently speaking
"""
UNKNOWN = ""
DIALOGUE_SESSION = "dialogueSession"
PLAY_AUDIO = "playAudio"
TTS_SAY = "ttsSay"
@dataclass
class AsrToggleOn(Message):
"""Activate the ASR component.
Attributes
----------
site_id: str = "default"
The id of the site where ASR should be turned on
reason: AsrToggleReason = UNKNOWN
Reason why ASR was toggled on
"""
site_id: str = "default"
# ------------
# Rhasspy only
# ------------
reason: AsrToggleReason = AsrToggleReason.UNKNOWN
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/asr/toggleOn"
@dataclass
class AsrToggleOff(Message):
"""Deactivate the ASR component.
Attributes
----------
site_id: str = "default"
The id of the site where ASR should be turned off
reason: AsrToggleReason = UNKNOWN
Reason why ASR was toggled off
"""
site_id: str = "default"
# ------------
# Rhasspy only
# ------------
reason: AsrToggleReason = AsrToggleReason.UNKNOWN
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/asr/toggleOff"
@dataclass
class AsrStartListening(Message):
"""Tell the ASR component to start listening.
Attributes
----------
site_id: str = "default"
The site that must be listened too
session_id: Optional[str] = None
An optional session id if there is a related session
stop_on_silence: bool = True
If true, ASR should automatically detect end of voice command
send_audio_captured: bool = False
If true, ASR emits asr/audioCaptured message with recorded audio
wakeword_id: Optional[str] = None
Optional id of wakeword used to activate ASR
intent_filter: Optional[List[str]] = None
A list of intent names to restrict the ASR on
lang: Optional[str] = None
Language of the incoming audio stream.
Typically set in hotword detected or dialogue startSession messages.
"""
site_id: str = "default"
session_id: typing.Optional[str] = None
lang: typing.Optional[str] = None
# ------------
# Rhasspy only
# ------------
stop_on_silence: bool = True
send_audio_captured: bool = False
wakeword_id: typing.Optional[str] = None
intent_filter: typing.Optional[typing.List[str]] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/asr/startListening"
@dataclass
class AsrStopListening(Message):
"""Tell the ASR component to stop listening.
Attributes
----------
site_id: str = "default"
The id of the site where the ASR should stop listening
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
site_id: str = "default"
session_id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/asr/stopListening"
@dataclass
class AsrTextCaptured(Message):
"""Full ASR transcription results.
Attributes
----------
text: str
The text captured
likelihood: float
The likelihood of the capture
seconds: float
The duration it took to do the processing
site_id: str = "default"
The id of the site where the text was captured
session_id: Optional[str] = None
The id of the session, if there is an active session
wakeword_id: Optional[str] = None
Optional id of wakeword used to activate ASR
asr_tokens: Optional[List[List[AsrToken]]] = None
Structured description of the tokens the ASR captured on for this intent
lang: Optional[str] = None
Language of the session
"""
text: str
likelihood: float
seconds: float
site_id: str = "default"
session_id: typing.Optional[str] = None
# ------------
# Rhasspy only
# ------------
wakeword_id: typing.Optional[str] = None
asr_tokens: typing.Optional[typing.List[typing.List[AsrToken]]] = None
lang: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/asr/textCaptured"
# ----------------------------------------------------------------------------
# Rhasspy-only Messages
# ----------------------------------------------------------------------------
@dataclass
class AsrError(Message):
"""Error from ASR component.
Attributes
----------
site_id: str = "default"
The id of the site where the error occurred
error: str
A description of the error that occurred
context: Optional[str] = None
Additional information on the context in which the error occurred
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
error: str
site_id: str = "default"
context: typing.Optional[str] = None
session_id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/error/asr"
@dataclass
class AsrTrain(Message):
"""Request to retrain ASR from intent graph.
Attributes
----------
graph_path: str
Path to the graph file
id: Optional[str] = None
Unique id for training request
graph_format: typing.Optional[str] = None
Optional format of the graph file
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/asr/([^/]+)/train$")
graph_path: str
id: typing.Optional[str] = None
graph_format: typing.Optional[str] = None
sentences: typing.Optional[typing.Dict[str, str]] = None
slots: typing.Optional[typing.Dict[str, typing.List[str]]] = None
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
site_id = kwargs.get("site_id", "+")
return f"rhasspy/asr/{site_id}/train"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AsrTrain.TOPIC_PATTERN, topic) is not None
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AsrTrain.TOPIC_PATTERN, topic)
assert match, "Not a train topic"
return match.group(1)
@dataclass
class AsrTrainSuccess(Message):
"""Result from successful training.
Attributes
----------
id: Optional[str] = None
Unique id from training request
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/asr/([^/]+)/trainSuccess$")
id: typing.Optional[str] = None
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
site_id = kwargs.get("site_id", "+")
return f"rhasspy/asr/{site_id}/trainSuccess"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AsrTrainSuccess.TOPIC_PATTERN, topic) is not None
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AsrTrainSuccess.TOPIC_PATTERN, topic)
assert match, "Not a trainSuccess topic"
return match.group(1)
@dataclass
class AsrAudioCaptured(Message):
"""Audio captured from ASR session.
Attributes
----------
wav_bytes: bytes
Captured audio in WAV format
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/asr/([^/]+)/([^/]+)/audioCaptured$")
wav_bytes: bytes
def payload(self) -> typing.Union[str, bytes]:
"""Get binary/string for this message."""
return self.wav_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""True if payload is not JSON."""
return True
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def is_session_in_topic(cls) -> bool:
"""True if session id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
site_id = kwargs.get("site_id", "+")
session_id = kwargs.get("session_id", "+")
return f"rhasspy/asr/{site_id}/{session_id}/audioCaptured"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AsrAudioCaptured.TOPIC_PATTERN, topic) is not None
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AsrAudioCaptured.TOPIC_PATTERN, topic)
assert match, "Not an audioCaptured topic"
return match.group(1)
@classmethod
def get_session_id(cls, topic: str) -> typing.Optional[str]:
"""Get session id from a topic"""
match = re.match(AsrAudioCaptured.TOPIC_PATTERN, topic)
assert match, "Not an audioCaptured topic"
return match.group(2)
@dataclass
class AsrRecordingFinished(Message):
"""Sent after silence has been detected, and before transcription occurs.
Attributes
----------
site_id: str = "default"
The id of the site where the ASR is listening
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
site_id: str = "default"
session_id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "rhasspy/asr/recordingFinished" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/asr.py | 0.84346 | 0.18451 | asr.py | pypi |
import typing
from dataclasses import dataclass
from dataclasses_json import LetterCase, dataclass_json
from .base import Message
@dataclass
class TtsSay(Message):
"""Send text to be spoken by the text to speech component.
Note
----
This is a low-level message. You should use the dialogue manager's
:class:`rhasspyhermes.dialogue.DialogueStartSession` or
:class:`rhasspyhermes.dialogue.DialogueContinueSession`
messages.
.. admonition:: MQTT message
Topic
``hermes/tts/say``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - text
- String
- The text to be spoken.
* - siteId
- String
- The id of the site where the text should be spoken. Defaults to ``"default"``.
* - lang
- String (optional)
- The language code to use when saying the text.
* - id
- String (optional)
- A request identifier. If provided, it will be passed back in the
response message :class:`TtsSayFinished`.
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/tts/say' -m '{"text": "Ciao!", "lang": "it_IT"}'
Example
-------
>>> from rhasspyhermes.tts import TtsSay
>>> say = TtsSay(text="Ciao!", lang="it_IT")
>>> say.topic()
'hermes/tts/say'
>>> say.payload()
'{"text": "Ciao!", "siteId": "default", "lang": "it_IT", "id": null, "sessionId": null}'
"""
text: str
"""The text to be spoken."""
site_id: str = "default"
"""The id of the site where the text should be spoken."""
lang: typing.Optional[str] = None
"""The language code to use when saying the text."""
id: typing.Optional[str] = None
"""A request identifier. If provided, it will be passed back in the
response message :class:`TtsSayFinished`."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
volume: typing.Optional[float] = None
"""Volume scale to apply to generated audio (0-1)"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/tts/say"``
"""
return "hermes/tts/say"
@dataclass
class TtsSayFinished(Message):
"""Response published when the text to speech component has finished speaking.
.. admonition:: MQTT message
Topic
``hermes/tts/sayFinished``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - siteId
- String
- The id of the site where the text was spoken. Defaults to ``"default"``.
* - id
- String (optional)
- Identifier from the request (:class:`TtsSay`).
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/tts/sayFinished'
"""
site_id: str = "default"
"""The id of the site where the text was spoken. Defaults to ``"default"``."""
id: typing.Optional[str] = None
"""Identifier from the request (:class:`TtsSay`)."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/tts/sayFinished"``
"""
return "hermes/tts/sayFinished"
# -----------------------------------------------------------------------------
# Rhasspy Only
# -----------------------------------------------------------------------------
@dataclass
class GetVoices(Message):
"""Get the available voices for the text to speech system.
Note
----
This is a Rhasspy-only message.
.. admonition:: MQTT message
Topic
``hermes/tts/getVoices``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - id
- String (optional)
- Unique identifier passed to the response (:class:`Voices`).
* - siteId
- String
- The id of the site to request voices from. Defaults to ``"default"``.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/tts/getVoices' -m '{"id": "abcd", "siteId": "default"}'
Example
-------
>>> from rhasspyhermes.tts import GetVoices
>>> g = GetVoices("abcd")
>>> g.topic()
'rhasspy/tts/getVoices'
>>> g.payload()
'{"id": "abcd", "siteId": "default"}'
"""
id: typing.Optional[str] = None
"""Unique identifier passed to response (:class:`Voices`)."""
site_id: str = "default"
"""Id of site to request voices from."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/tts/getVoices"``
"""
return "rhasspy/tts/getVoices"
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Voice:
"""Information about a single TTS voice."""
voice_id: str
"""Unique identifier for voice."""
description: typing.Optional[str] = None
"""Human-readable description of voice."""
@dataclass
class Voices(Message):
"""Response with the available voices for the text to speech system.
This message is published in response to a :class:`GetVoices` request.
Note
----
This is a Rhasspy-only message.
.. admonition:: MQTT message
Topic
``hermes/tts/Voices``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - voices
- List of JSON objects
- List of available voices.
* - id
- String (optional)
- Unique identifier from the request (:class:`GetVoices`).
* - siteId
- String
- The id of the site where voices were requested. Defaults to ``"default"``.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/tts/voices'
"""
voices: typing.List[Voice]
"""List of available voices."""
id: typing.Optional[str] = None
"""Unique identifier from request (:class:`GetVoices`)."""
site_id: str = "default"
"""Id of site where voices were requested."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/tts/voices"``
"""
return "rhasspy/tts/voices"
@dataclass
class TtsError(Message):
"""This message is published by the text to speech system if an error has occurred.
Note
----
This is a Rhasspy-only message.
.. admonition:: MQTT message
Topic
``hermes/error/tts``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - error
- String
- A description of the error that occurred.
* - siteId
- String
- Site where the error occurred. Defaults to ``"default"``.
* - context
- String (optional)
- Additional information on the context in which the error occurred.
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/error/tts'
Example
-------
>>> from rhasspyhermes.tts import TtsError
>>> tts_error = TtsError(error="Unexpected error")
>>> tts_error.topic()
'hermes/error/tts'
>>> tts_error.payload()
'{"error": "Unexpected error", "siteId": "default", "context": null, "sessionId": null}'
"""
error: str
"""A description of the error that occurred."""
site_id: str = "default"
"""The id of the site where the error occurred."""
context: typing.Optional[str] = None
"""Additional information on the context in which the error occurred."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/error/tts"``
"""
return "hermes/error/tts" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/tts.py | 0.767516 | 0.198336 | tts.py | pypi |
import typing
from dataclasses import dataclass
from dataclasses_json import LetterCase, dataclass_json
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Intent:
"""Intent object with a name and confidence score."""
intent_name: str
"""The name of the detected intent."""
confidence_score: float
"""The probability of the detection, between 0 and 1 (1 being sure)."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class SlotRange:
"""The range where a slot is found in the input text."""
start: int
"""Index of the beginning (inclusive) of the slot value in the substituted input."""
end: int
"""Index of the end (exclusive) of the slot value in the substituted input."""
# ------------
# Rhasspy only
# ------------
raw_start: typing.Optional[int] = None
"""Index of the beginning (inclusive) of the slot value in the unsubstituted input.
Note
----
This is a Rhasspy-only attribute."""
raw_end: typing.Optional[int] = None
"""Index of the end (exclusive) of the slot value in the unsubstituted input.
Note
----
This is a Rhasspy-only attribute."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class Slot:
"""Named entity in an intent slot."""
entity: str
"""The entity of the slot."""
value: typing.Dict[str, typing.Any]
"""The resolved value of the slot. Contains at least a ``"value"`` key."""
slot_name: str = None # type: ignore
"""The name of the slot."""
raw_value: str = None # type: ignore
"""The raw value of the slot, as it was in the input."""
confidence: float = 0.0
"""Confidence score of the slot, between 0 and 1 (1 being confident)."""
range: typing.Optional[SlotRange] = None
"""The range where the slot is found in the input text."""
def __post_init__(self) -> None:
"""dataclasses post-init"""
if self.slot_name is None:
self.slot_name = self.entity
if self.raw_value is None:
self.raw_value = self.value.get("value")
@property
def start(self) -> int:
"""Get the start index (inclusive) of the slot value."""
if self.range:
return self.range.start
return 0
@property
def raw_start(self) -> int:
"""Get the start index (inclusive) of the raw slot value."""
value = None
if self.range:
value = self.range.raw_start
if value is None:
return self.start
return value
@property
def end(self) -> int:
"""Get the end index (exclusive) of the slot value."""
if self.range:
return self.range.end
return 1
@property
def raw_end(self) -> int:
"""Get the end index (exclusive) of the raw slot value."""
value = None
if self.range:
value = self.range.raw_end
if value is None:
return self.end
return value | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/intent.py | 0.888172 | 0.666587 | intent.py | pypi |
import re
import typing
from dataclasses import dataclass
from dataclasses_json import LetterCase, dataclass_json
from .base import Message
from .intent import Intent, Slot
@dataclass
class NluQuery(Message):
"""Request intent recognition from NLU component.
.. admonition:: MQTT message
Topic
``hermes/nlu/query``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - input
- String
- The text to send to the NLU component.
* - siteId
- String
- The id of the site where the NLU component is located. Defaults to ``"default"``.
* - id
- String (optional)
- A request identifier. If provided, it will be passed back in the response
(:class:`NluIntentParsed` or :class:`NluIntentNotRecognized`).
* - intentFilter
- List of strings (optional)
- A list of intent names to restrict the NLU resolution on.
* - sessionId
- String (optional)
- The id of the session, if there is an active session.
* - wakewordId
- String (optional)
- The id of the wakeword used to activate the ASR.
* - lang
- String (optional)
- The language of the session.
* - customData
- String (optional)
- Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
mosquitto_pub -h <HOSTNAME> -t 'hermes/nlu/query' -m '{"input": "what time is it", "siteId": "default"}'
Example
-------
>>> from rhasspyhermes.nlu import NluQuery
>>> query = NluQuery(input='what time is it')
>>> query.payload()
'{"input": "what time is it", "siteId": "default", "id": null, "intentFilter": null, "sessionId": null, "wakewordId": null, "lang": null, "customData": null}'
>>> query.topic()
'hermes/nlu/query'
"""
input: str
"""The text to send to the NLU component."""
site_id: str = "default"
"""The id of the site where the NLU component is located.
Note
----
In contrast to the Snips Hermes protocol, the site id is compulsory.
"""
id: typing.Optional[str] = None
"""A request identifier. If provided, it will be passed back in the response
(:class:`NluIntentParsed` or :class:`NluIntentNotRecognized`)."""
intent_filter: typing.Optional[typing.List[str]] = None
"""A list of intent names to restrict the NLU resolution on."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session.
Note
----
In contrast to the Snips Hermes protocol, the session id is optional.
"""
# ------------
# Rhasspy only
# ------------
wakeword_id: typing.Optional[str] = None
"""Optional id of the wakeword used to activate the ASR.
Note
----
This is a Rhasspy-only attribute.
"""
lang: typing.Optional[str] = None
"""Optional language of the session.
Note
----
This is a Rhasspy-only attribute.
"""
custom_data: typing.Optional[str] = None
"""Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
Note
----
This is a Rhasspy-only attribute.
"""
asr_confidence: typing.Optional[float] = None
"""Speech recognizer confidence score between 0 and 1 (1 being sure).
Note
----
This is a Rhasspy-only attribute.
"""
custom_entities: typing.Optional[typing.Dict[str, typing.Any]] = None
"""User-defined entities to be set in the recognized intent.
Copied by the dialogue manager into subsequent ASR and NLU messages.
Note
----
This is a Rhasspy-only attribute.
"""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/nlu/query"``
"""
return "hermes/nlu/query"
@dataclass
class NluIntentParsed(Message):
"""An intent is successfully parsed.
The NLU component returns this message as a result of the intent resolution
requested by a :class:`NluQuery` message.
.. admonition:: MQTT message
Topic
``hermes/nlu/intentParsed``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - input
- String
- The user input that has generated this intent.
* - intent
- JSON object
- Structured description of the intent classification.
* - siteId
- String
- Site where the user interaction took place.
* - id
- String (optional)
- The request identifier from the NLU query (:class:`NluQuery`), if any.
* - slots
- List of JSON objects (optional)
- Structured description of the detected slots for this intent, if any.
* - sessionId
- String (optional)
- Session id of the intent detection. The client code must use it to continue
(:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or end (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/nlu/intentParsed'
Note
----
This is a low-level message. It preceeds the full intent message,
:class:`NluIntent`. To detect a specific intent parsed by the NLU component,
it is recommended to subscribe to the latter message type.
"""
input: str
"""The user input that has generated this intent."""
intent: Intent
"""Structured description of the intent classification."""
site_id: str = "default"
"""Site where the user interaction took place.
Note
----
In contrast to the Snips Hermes protocol, the site id is compulsory.
"""
id: typing.Optional[str] = None
"""The request identifier from the NLU query (:class:`NluQuery`), if any."""
slots: typing.Optional[typing.List[Slot]] = None
"""Structured description of the detected slots for this intent, if any."""
session_id: typing.Optional[str] = None
"""Session id of the intent detection. The client code must use it to continue
(:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or end (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/nlu/intentParsed"``
"""
return "hermes/nlu/intentParsed"
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class AsrTokenTime:
"""The time when an ASR token was detected."""
start: float
"""Start time (in seconds) of token relative to beginning of utterance."""
end: float
"""End time (in seconds) of token relative to beginning of utterance."""
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass
class AsrToken:
"""A token from an automated speech recognizer."""
value: str
"""Text value of the token."""
confidence: float
"""Confidence score of the token, between 0 and 1 (1 being confident)."""
range_start: int
"""The start of the range in which the token is in the original input."""
range_end: int
"""The end of the range in which the token is in the original input."""
time: typing.Optional[AsrTokenTime] = None
"""Structured time when this token was detected."""
@dataclass
class NluIntent(Message):
"""Recognized intent.
This is the main Rhasspy Hermes message an intent handler should subscribe to.
It is sent by the dialogue manager when an intent has been detected.
It's the intent handler's responsibility to inform the dialogue manager of what it
should do with the current session. The handler should either send a :class:`rhasspyhermes.dialogue.DialogueContinueSession`
or a :class:`rhasspyhermes.dialogue.DialogueEndSession` message with the current session id.
.. admonition:: MQTT message
Topic
``hermes/intent/<intentName>``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - input
- String
- The user input that has generated this intent.
* - intent
- JSON object
- Structured description of the intent classification.
* - siteId
- String
- Site where the user interaction took place. Defaults to ``"default"``.
* - id
- String (optional)
- The request identifier from the NLU query (:class:`NluQuery`), if any.
* - slots
- List of JSON objects (optional)
- Structured description of the detected slots for this intent, if any.
* - sessionId
- String (optional)
- Session id of the intent detection. The client code must use it to continue
(:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or end (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
* - customData
- String (optional)
- Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
* - asrTokens
- List of list of JSON objects (optional)
- Structured description of the tokens the ASR captured for this intent.
The first level of lists represents each invocation of the ASR, the second
level represents the captured tokens in that invocation.
* - asrConfidence
- Number (optional)
- Speech recognizer confidence score between 0 and 1 (1 being sure).
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/intent/<intentName>'
Replace ``<intentName>`` by the name of the intent you're interested in.
You can use the MQTT wildcard ``#`` is you want to receive all intents.
Example
-------
>>> from rhasspyhermes.nlu import NluIntent
>>> from rhasspyhermes.intent import Intent
>>> nlu_intent = NluIntent("what time is it", Intent(intent_name="GetTime", confidence_score=0.95))
>>> nlu_intent.payload()
'{"input": "what time is it", "intent": {"intentName": "GetTime", "confidenceScore": 0.95}, "siteId": "default", "id": null, "slots": null, "sessionId": null, "customData": null, "asrTokens": null, "asrConfidence": null, "rawInput": null, "wakewordId": null, "lang": null}'
"""
TOPIC_PATTERN = re.compile(r"^hermes/intent/(.+)$")
input: str
"""The user input that has generated this intent."""
intent: Intent
"""Structured description of the intent classification."""
site_id: str = "default"
"""Site where the user interaction took place."""
id: typing.Optional[str] = None
"""Request id from the NLU query (:class:`NluQuery`), if any.
Note
----
This is a Rhasspy-only attribute."""
slots: typing.Optional[typing.List[Slot]] = None
"""Structured description of the detected slots for this intent, if any."""
session_id: typing.Optional[str] = None
"""Session id of the intent detection. The client code must use it to continue
(:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or end (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
Note
----
In contrast to the Snips Hermes protocol, the session id is optional."""
custom_data: typing.Optional[str] = None
"""Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session."""
asr_tokens: typing.Optional[typing.List[typing.List[AsrToken]]] = None
"""Structured description of the tokens the ASR captured for this intent.
The first level of lists represents each invocation of the ASR, the second
level represents the captured tokens in that invocation."""
asr_confidence: typing.Optional[float] = None
"""Speech recognizer confidence score between 0 and 1 (1 being sure)."""
# ------------
# Rhasspy only
# ------------
raw_input: typing.Optional[str] = None
"""Original query input before substitutions, such as number replacement.
Note
----
This is a Rhasspy-only attribute."""
wakeword_id: typing.Optional[str] = None
"""Id of the wake word that triggered this session.
Note
----
This is a Rhasspy-only attribute."""
lang: typing.Optional[str] = None
"""Language of the session.
Note
----
This is a Rhasspy-only attribute."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for a message of this type with intent name ``intent_name``.
Returns
-------
str
``"hermes/intent/{intent_name}"``
Example
-------
>>> from rhasspyhermes.nlu import NluIntent
>>> NluIntent.topic()
'hermes/intent/#'
>>> NluIntent.topic(intent_name="GetTime")
'hermes/intent/GetTime'
"""
intent_name = kwargs.get("intent_name", "#")
return f"hermes/intent/{intent_name}"
@classmethod
def get_intent_name(cls, topic: str) -> str:
"""Get intent_name from a topic."""
match = re.match(NluIntent.TOPIC_PATTERN, topic)
assert match, "Not an intent topic"
return match.group(1)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template."""
return re.match(NluIntent.TOPIC_PATTERN, topic) is not None
def to_rhasspy_dict(self) -> typing.Dict[str, typing.Any]:
"""Convert to Rhasspy format."""
return {
"intent": {
"name": self.intent.intent_name,
"confidence": self.intent.confidence_score,
},
"entities": [
{
"entity": s.slot_name,
"value": s.value.get("value"),
"value_details": s.value,
"raw_value": s.raw_value,
"start": s.start,
"end": s.end,
"raw_start": (s.raw_start if s.raw_start is not None else s.start),
"raw_end": (s.raw_end if s.raw_end is not None else s.end),
}
for s in self.slots or []
],
"slots": {s.slot_name: s.value.get("value") for s in self.slots or []},
"text": self.input,
"raw_text": self.raw_input or "",
"tokens": self.input.split(),
"raw_tokens": (self.raw_input or self.input).split(),
"wakeword_id": self.wakeword_id,
}
@classmethod
def make_asr_tokens(cls, tokens: typing.List[typing.Any]) -> typing.List[AsrToken]:
"""Create ASR token objects from words."""
asr_tokens: typing.List[AsrToken] = []
start: int = 0
for token in tokens:
token_str = str(token)
asr_tokens.append(
AsrToken(
value=token_str,
confidence=1.0,
range_start=start,
range_end=(start + len(token_str)),
)
)
start += len(token_str) + 1
return asr_tokens
@dataclass
class NluIntentNotRecognized(Message):
"""Intent not recognized.
.. admonition:: MQTT message
Topic
``hermes/nlu/intentNotRecognized``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - input
- String
- The user input, if any, that generated this event.
* - siteId
- String
- Site where the user interaction took place. Defaults to ``"default"``.
* - id
- String (optional)
- The request identifier from the NLU query (:class:`NluQuery`), if any.
* - customData
- String (optional)
- Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
* - sessionId
- String (optional)
- Session id of the intent detection. The client code must use it to continue
(:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or end (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/nlu/intentNotRecognized'
"""
input: str
"""The input, if any, that generated this event."""
site_id: str = "default"
"""Site where the user interaction took place.
Note
----
In contrast to the Snips Hermes protocol, the site id is compulsory.
"""
id: typing.Optional[str] = None
"""Request id from NLU query, if any."""
custom_data: typing.Optional[str] = None
"""Custom data provided by message that started (:class:`rhasspyhermes.dialogue.DialogueStartSession`),
continued (:class:`rhasspyhermes.dialogue.DialogueContinueSession`) or
ended (:class:`rhasspyhermes.dialogue.DialogueEndSession`) the session."""
session_id: typing.Optional[str] = None
"""Session identifier of the session that generated this intent not recognized event."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"hermes/nlu/intentNotRecognized"``
"""
return "hermes/nlu/intentNotRecognized"
def to_rhasspy_dict(self) -> typing.Dict[str, typing.Any]:
"""Return an empty Rhasspy intent dictionary."""
tokens = self.input.split()
return {
"text": self.input,
"raw_text": self.input,
"tokens": tokens,
"raw_tokens": tokens,
"intent": {"name": "", "confidence": 0.0},
"entities": [],
"slots": {},
}
@dataclass
class NluError(Message):
"""This message is published by the NLU component if an error has occurred.
.. admonition:: MQTT message
Topic
``hermes/error/nlu``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - error
- String
- A description of the error that occurred.
* - siteId
- String
- Site where the error occurred. Defaults to ``"default"``.
* - context
- String (optional)
- Additional information on the context in which the error occurred.
* - sessionId
- String (optional)
- Session id, if there is an active session.
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'hermes/error/nlu'
Example
-------
>>> from rhasspyhermes.nlu import NluError
>>> nlu_error = NluError(error="Unexpected error")
>>> nlu_error.topic()
'hermes/error/nlu'
>>> nlu_error.payload()
'{"error": "Unexpected error", "siteId": "default", "context": null, "sessionId": null}'
"""
error: str
"""A description of the error that occurred."""
site_id: str = "default"
"""The id of the site where the error occurred. Defaults to ``"default"``.
Note
----
In contrast to the Snips Hermes protocol, the site id is compulsory.
"""
context: typing.Optional[str] = None
"""Additional information on the context in which the error occurred."""
session_id: typing.Optional[str] = None
"""The id of the session, if there is an active session."""
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message.
Returns
-------
str
``"hermes/error/nlu"``
"""
return "hermes/error/nlu"
# ----------------------------------------------------------------------------
# Rhasspy-specific Messages
# ----------------------------------------------------------------------------
@dataclass
class NluTrain(Message):
"""Request to retrain NLU from intent graph.
.. admonition:: MQTT message
Topic
``rhasspy/nlu/<siteId>/train``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - graphPath
- String
- Path to the graph file.
* - id
- String (optional)
- Unique id for the training request. Appended to reply topic (:class:`NluTrainSuccess`).
* - graphFormat
- String (optional)
- Format of the graph file.
* - sentences
- Dictionary (optional)
- TODO
* - slots
- Dictionary (optional)
- TODO
Publish this message type with ``mosquitto_pub``:
.. code-block:: shell
TODO
Note
----
This is a Rhasspy-only message.
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/nlu/([^/]+)/train$")
graph_path: str
"""Path to the graph file."""
id: typing.Optional[str] = None
"""Unique id for the training request."""
graph_format: typing.Optional[str] = None
"""Optional format of the graph file."""
sentences: typing.Optional[typing.Dict[str, str]] = None
"""TODO"""
slots: typing.Optional[typing.Dict[str, typing.List[str]]] = None
"""TODO"""
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/nlu/{site_id}/train"``
"""
site_id = kwargs.get("site_id", "+")
return f"rhasspy/nlu/{site_id}/train"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template."""
return re.match(NluTrain.TOPIC_PATTERN, topic) is not None
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic."""
match = re.match(NluTrain.TOPIC_PATTERN, topic)
assert match, "Not a train topic"
return match.group(1)
@dataclass
class NluTrainSuccess(Message):
"""Result from successful training.
.. admonition:: MQTT message
Topic
``rhasspy/nlu/<siteId>/trainSuccess``
Payload (JSON)
.. list-table::
:widths: 10 10 80
:header-rows: 1
* - Key
- Type
- Description
* - id
- String (optional)
- Unique id from the training request (:class:`NluTrain`).
Subscribe to this message type with ``mosquitto_sub``:
.. code-block:: shell
mosquitto_sub -h <HOSTNAME> -v -t 'rhasspy/nlu/<siteId>/trainSuccess'
Note
----
This is a Rhasspy-only message.
"""
TOPIC_PATTERN = re.compile(r"^rhasspy/nlu/([^/]+)/trainSuccess$")
id: typing.Optional[str] = None
"""Unique id from training request."""
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type.
Returns
-------
str
``"rhasspy/nlu/{site_id}/trainSuccess"``
"""
site_id = kwargs.get("site_id", "+")
return f"rhasspy/nlu/{site_id}/trainSuccess"
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(NluTrainSuccess.TOPIC_PATTERN, topic) is not None
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic."""
match = re.match(NluTrainSuccess.TOPIC_PATTERN, topic)
assert match, "Not a trainSuccess topic"
return match.group(1) | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/nlu.py | 0.852675 | 0.224778 | nlu.py | pypi |
import audioop
import io
import re
import time
import typing
import wave
from dataclasses import dataclass
from enum import Enum
from .base import Message
@dataclass
class AudioFrame(Message):
"""Recorded frame of audio.
Attributes
----------
wav_bytes: bytes
Recorded audio frame in WAV format
"""
TOPIC_PATTERN = re.compile(r"^hermes/audioServer/([^/]+)/audioFrame$")
wav_bytes: bytes
def payload(self) -> typing.Union[str, bytes]:
"""Get binary/string for this message."""
return self.wav_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""True if payload is not JSON."""
return True
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
site_id = kwargs.get("site_id", "+")
return f"hermes/audioServer/{site_id}/audioFrame"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AudioFrame.TOPIC_PATTERN, topic)
assert match, "Not an audioFrame topic"
return match.group(1)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AudioFrame.TOPIC_PATTERN, topic) is not None
@classmethod
def iter_wav_chunked(
cls, wav_io: typing.BinaryIO, frames_per_chunk: int, live_delay: bool = False
) -> typing.Iterable[bytes]:
"""Split single WAV into multiple WAV chunks"""
with wave.open(wav_io) as in_wav:
frames_left = in_wav.getnframes()
while frames_left > 0:
chunk = in_wav.readframes(frames_per_chunk)
if not chunk:
break
# Wrap chunk in WAV
with io.BytesIO() as out_io:
out_wav: wave.Wave_write = wave.open(out_io, "wb")
with out_wav:
out_wav.setframerate(in_wav.getframerate())
out_wav.setsampwidth(in_wav.getsampwidth())
out_wav.setnchannels(in_wav.getnchannels())
out_wav.writeframes(chunk)
wav_bytes = out_io.getvalue()
yield wav_bytes
if live_delay:
time.sleep(AudioFrame.get_wav_duration(wav_bytes))
frames_left -= frames_per_chunk
@classmethod
def get_wav_duration(cls, wav_bytes: bytes) -> float:
"""Return the real-time duration of a WAV file"""
with io.BytesIO(wav_bytes) as wav_buffer:
wav_file: wave.Wave_read = wave.open(wav_buffer, "rb")
with wav_file:
frames = wav_file.getnframes()
rate = wav_file.getframerate()
return frames / float(rate)
@dataclass
class AudioPlayBytes(Message):
"""Play WAV sound on specific site.
Attributes
----------
wav_bytes: bytes
Audio to play in WAV format
"""
TOPIC_PATTERN = re.compile(r"^hermes/audioServer/([^/]+)/playBytes/([^/]+)$")
wav_bytes: bytes
def payload(self) -> typing.Union[str, bytes]:
"""Get binary/string for this message."""
return self.wav_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""True if payload is not JSON."""
return True
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def is_session_in_topic(cls) -> bool:
"""True if session id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
site_id = kwargs.get("site_id", "+")
request_id = kwargs.get("request_id", "#")
return f"hermes/audioServer/{site_id}/playBytes/{request_id}"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AudioPlayBytes.TOPIC_PATTERN, topic)
assert match, "Not a playBytes topic"
return match.group(1)
@classmethod
def get_request_id(cls, topic: str) -> str:
"""Get request id from a topic"""
match = re.match(AudioPlayBytes.TOPIC_PATTERN, topic)
assert match, "Not a playBytes topic"
return match.group(2)
@classmethod
def get_session_id(cls, topic: str) -> str:
"""Get session id from a topic"""
return AudioPlayBytes.get_request_id(topic)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AudioPlayBytes.TOPIC_PATTERN, topic) is not None
@dataclass
class AudioPlayFinished(Message):
"""Sent when audio service has finished playing a sound.
Attributes
----------
id: str : Optional[str] = None
Request identifier for the request passed from playBytes topic
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
TOPIC_PATTERN = re.compile(r"^hermes/audioServer/([^/]+)/playFinished$")
id: typing.Optional[str] = None
session_id: typing.Optional[str] = None
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
site_id = kwargs.get("site_id", "+")
return f"hermes/audioServer/{site_id}/playFinished"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site_id from a topic"""
match = re.match(AudioPlayFinished.TOPIC_PATTERN, topic)
assert match, "Not a playFinished topic"
return match.group(1)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AudioPlayFinished.TOPIC_PATTERN, topic) is not None
# -----------------------------------------------------------------------------
# Rhasspy Only
# -----------------------------------------------------------------------------
class AudioDeviceMode(str, Enum):
"""Mode of an audio device.
Values
------
INPUT
Recording device
OUTPUT
Playback device
"""
INPUT = "input"
OUTPUT = "output"
@dataclass
class AudioDevice:
"""Description of an audio device.
Attributes
----------
mode: AudioDeviceMode
Recording or playback device
id: str
Unique id of audio device
name: Optional[str] = None
Optional human-readable name of audio device
description: Optional[str] = None
Optional human-readable description of audio device
working: Optional[bool] = None
Status of audio device if tested
"""
mode: AudioDeviceMode
id: str
name: typing.Optional[str] = None
description: typing.Optional[str] = None
working: typing.Optional[bool] = None
@dataclass
class AudioGetDevices(Message):
"""Get details for available audio devices.
Attributes
----------
modes: List[AudioDeviceMode]
Device types to get information about
id: Optional[str] = None
Unique id to be returned in response
site_id: str = "default"
Id of the site where devices are located
test: bool = False
True if devices should be tested
"""
modes: typing.List[AudioDeviceMode]
site_id: str = "default"
id: typing.Optional[str] = None
test: bool = False
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
return "rhasspy/audioServer/getDevices"
@dataclass
class AudioDevices(Message):
"""Response to getDevices.
Attributes
----------
devices: List[AudioDevice]
Description of requested device types
id: Optional[str] = None
Unique id from request
site_id: str = "default"
Id of site where devices are located
"""
devices: typing.List[AudioDevice]
site_id: str = "default"
id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
return "rhasspy/audioServer/devices"
@dataclass
class AudioSessionFrame(Message):
"""Recorded audio frame for a specific session.
Attributes
----------
wav_bytes: bytes
Audio frame in WAV format
"""
TOPIC_PATTERN = re.compile(
r"^hermes/audioServer/([^/]+)/([^/]+)/audioSessionFrame$"
)
wav_bytes: bytes
def payload(self) -> typing.Union[str, bytes]:
"""Get binary/string for this message."""
return self.wav_bytes
@classmethod
def is_binary_payload(cls) -> bool:
"""True if payload is not JSON."""
return True
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def is_session_in_topic(cls) -> bool:
"""True if session id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
site_id = kwargs.get("site_id", "+")
session_id = kwargs.get("session_id", "+")
return f"hermes/audioServer/{site_id}/{session_id}/audioSessionFrame"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AudioSessionFrame.TOPIC_PATTERN, topic)
assert match, "Not an audioSessionFrame topic"
return match.group(1)
@classmethod
def get_session_id(cls, topic: str) -> typing.Optional[str]:
"""Get session id from a topic"""
match = re.match(AudioSessionFrame.TOPIC_PATTERN, topic)
assert match, "Not an audioSessionFrame topic"
return match.group(2)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AudioSessionFrame.TOPIC_PATTERN, topic) is not None
@dataclass
class AudioSummary(Message):
"""Summary of recent audio frame(s) for diagnostic purposes.
debiased_energy: float
Audio energy computed using get_debiased_energy
is_speech: typing.Optional[bool] = None
True/false if VAD detected speech
"""
TOPIC_PATTERN = re.compile(r"^hermes/audioServer/([^/]+)/audioSummary$")
debiased_energy: float
is_speech: typing.Optional[bool] = None
@classmethod
def get_debiased_energy(cls, audio_data: bytes) -> float:
"""Compute RMS of debiased audio."""
# Thanks to the speech_recognition library!
# https://github.com/Uberi/speech_recognition/blob/master/speech_recognition/__init__.py
energy = -audioop.rms(audio_data, 2)
energy_bytes = bytes([energy & 0xFF, (energy >> 8) & 0xFF])
debiased_energy = audioop.rms(
audioop.add(audio_data, energy_bytes * (len(audio_data) // 2), 2), 2
)
# Probably actually audio if > 30
return debiased_energy
@classmethod
def is_site_in_topic(cls) -> bool:
"""True if site id is in topic."""
return True
@classmethod
def topic(cls, **kwargs) -> str:
"""Get topic for message."""
site_id = kwargs.get("site_id", "+")
return f"hermes/audioServer/{site_id}/audioSummary"
@classmethod
def get_site_id(cls, topic: str) -> typing.Optional[str]:
"""Get site id from a topic"""
match = re.match(AudioSummary.TOPIC_PATTERN, topic)
assert match, "Not an audioSummary topic"
return match.group(1)
@classmethod
def is_topic(cls, topic: str) -> bool:
"""True if topic matches template"""
return re.match(AudioSummary.TOPIC_PATTERN, topic) is not None
@dataclass
class SummaryToggleOn(Message):
"""Activate sending of audio summaries.
Attributes
----------
site_id: str = "default"
Id of site where audio is being recorded
"""
site_id: str = "default"
@classmethod
def topic(cls, **kwargs) -> str:
return "hermes/audioServer/toggleSummaryOn"
@dataclass
class SummaryToggleOff(Message):
"""Deactivate sending of audio summaries.
Attributes
----------
site_id: str = "default"
Id of site where audio is being recorded
"""
site_id: str = "default"
@classmethod
def topic(cls, **kwargs) -> str:
return "hermes/audioServer/toggleSummaryOff"
@dataclass
class AudioToggleOn(Message):
"""Activate audio output system.
Attributes
----------
site_id: str = "default"
Id of site where audio should be turned off
"""
site_id: str = "default"
@classmethod
def topic(cls, **kwargs) -> str:
return "hermes/audioServer/toggleOn"
@dataclass
class AudioToggleOff(Message):
"""Deactivate audio output system.
Attributes
----------
site_id: str = "default"
Id of site where audio should be turned on
"""
site_id: str = "default"
@classmethod
def topic(cls, **kwargs) -> str:
return "hermes/audioServer/toggleOff"
@dataclass
class AudioRecordError(Message):
"""Error from audio input component.
Attributes
----------
error: str
A description of the error that occurred
site_id: str = "default"
The id of the site where the error occurred
context: Optional[str] = None
Additional information on the context in which the error occurred
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
error: str
site_id: str = "default"
context: typing.Optional[str] = None
session_id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/error/audioServer/record"
@dataclass
class AudioPlayError(Message):
"""Error from audio output component.
Attributes
----------
error: str
A description of the error that occurred
site_id: str = "default"
The id of the site where the error occurred
context: Optional[str] = None
Additional information on the context in which the error occurred
session_id: Optional[str] = None
The id of the session, if there is an active session
"""
error: str
site_id: str = "default"
context: typing.Optional[str] = None
session_id: typing.Optional[str] = None
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "hermes/error/audioServer/play"
@dataclass
class AudioSetVolume(Message):
"""Set audio output volume at a site
Attributes
----------
volume: float
The volume scale to set (0 = off, 1 = full volume)
site_id: str = "default"
The id of the site where the error occurred
"""
volume: float
site_id: str = "default"
@classmethod
def topic(cls, **kwargs) -> str:
"""Get MQTT topic for this message type."""
return "rhasspy/audioServer/setVolume" | /rhasspy-hermes-0.6.2.tar.gz/rhasspy-hermes-0.6.2/rhasspyhermes/audioserver.py | 0.849815 | 0.368377 | audioserver.py | pypi |
import logging
import os
import ssl
import typing
from enum import Enum
from urllib.parse import urljoin
from uuid import uuid4
import aiohttp
from rhasspyhermes.base import Message
from rhasspyhermes.client import GeneratorType, HermesClient
from rhasspyhermes.handle import HandleToggleOff, HandleToggleOn
from rhasspyhermes.nlu import NluIntent
from rhasspyhermes.tts import TtsSay
_LOGGER = logging.getLogger("rhasspyhomeassistant_hermes")
# -----------------------------------------------------------------------------
class HandleType(str, Enum):
"""Method for handling intents."""
EVENT = "event"
INTENT = "intent"
# -----------------------------------------------------------------------------
class HomeAssistantHermesMqtt(HermesClient):
"""Hermes MQTT server for Rhasspy intent handling with Home Assistant."""
def __init__(
self,
client,
url: str,
access_token: typing.Optional[str] = None,
api_password: typing.Optional[str] = None,
event_type_format: str = "rhasspy_{0}",
certfile: typing.Optional[str] = None,
keyfile: typing.Optional[str] = None,
handle_type: HandleType = HandleType.EVENT,
site_ids: typing.Optional[typing.List[str]] = None,
):
super().__init__("rhasspyhomeassistant_hermes", client, site_ids=site_ids)
self.subscribe(NluIntent, HandleToggleOn, HandleToggleOff)
self.url = url
self.access_token = access_token
self.api_password = api_password
self.event_type_format = event_type_format
self.handle_type = handle_type
self.handle_enabled = True
# SSL
self.ssl_context = ssl.SSLContext()
if certfile:
_LOGGER.debug("Using SSL with certfile=%s, keyfile=%s", certfile, keyfile)
self.ssl_context.load_cert_chain(certfile, keyfile)
# Async HTTP
self._http_session: typing.Optional[aiohttp.ClientSession] = None
@property
def http_session(self):
"""Get or create async HTTP session"""
if self._http_session is None:
self._http_session = aiohttp.ClientSession()
return self._http_session
# -------------------------------------------------------------------------
async def handle_intent(
self, nlu_intent: NluIntent
) -> typing.AsyncIterable[TtsSay]:
"""Handle intent with Home Assistant."""
try:
if self.handle_type == HandleType.EVENT:
await self.handle_home_assistant_event(nlu_intent)
elif self.handle_type == HandleType.INTENT:
response_dict = await self.handle_home_assistant_intent(nlu_intent)
assert response_dict, f"No response from {self.url}"
# Check for speech response
tts_text = (
response_dict.get("speech", {}).get("plain", {}).get("speech", "")
)
if tts_text:
# Forward to TTS system
yield TtsSay(
text=tts_text,
id=str(uuid4()),
site_id=nlu_intent.site_id,
session_id=nlu_intent.session_id,
)
else:
raise ValueError(f"Unsupported handle_type (got {self.handle_type})")
except Exception:
_LOGGER.exception("handle_intent")
# -------------------------------------------------------------------------
async def handle_home_assistant_event(self, nlu_intent: NluIntent):
"""POSTs an event to Home Assistant's /api/events endpoint."""
try:
# Create new Home Assistant event
event_type = self.event_type_format.format(nlu_intent.intent.intent_name)
slots: typing.Dict[str, typing.Any] = {}
if nlu_intent.slots:
for slot in nlu_intent.slots:
slots[slot.slot_name] = slot.value["value"]
# Add meta slots
slots["_text"] = nlu_intent.input
slots["_raw_text"] = nlu_intent.raw_input
slots["_intent"] = nlu_intent.to_dict()
slots["_site_id"] = nlu_intent.site_id
# Send event
post_url = urljoin(self.url, "api/events/" + event_type)
headers = self.get_hass_headers()
_LOGGER.debug(post_url)
# No response expected
async with self.http_session.post(
post_url, json=slots, headers=headers, ssl=self.ssl_context
) as response:
response.raise_for_status()
except Exception:
_LOGGER.exception("handle_home_assistant_event")
async def handle_home_assistant_intent(
self, nlu_intent: NluIntent
) -> typing.Dict[str, typing.Any]:
"""POSTs a JSON intent to Home Assistant's /api/intent/handle endpoint."""
try:
slots: typing.Dict[str, typing.Any] = {}
if nlu_intent.slots:
for slot in nlu_intent.slots:
slots[slot.slot_name] = slot.value["value"]
# Add meta slots
slots["_text"] = nlu_intent.input
slots["_raw_text"] = nlu_intent.raw_input
slots["_intent"] = nlu_intent.to_dict()
slots["_site_id"] = nlu_intent.site_id
hass_intent = {"name": nlu_intent.intent.intent_name, "data": slots}
# POST intent JSON
post_url = urljoin(self.url, "api/intent/handle")
headers = self.get_hass_headers()
_LOGGER.debug(post_url)
# JSON response expected with optional speech
async with self.http_session.post(
post_url, json=hass_intent, headers=headers, ssl=self.ssl_context
) as response:
response.raise_for_status()
return await response.json()
except Exception:
_LOGGER.exception("handle_home_assistant_intent")
# Empty response
return {}
def get_hass_headers(self) -> typing.Dict[str, str]:
"""Gets HTTP authorization headers for Home Assistant POST."""
if self.access_token:
return {"Authorization": f"Bearer {self.access_token}"}
if self.api_password:
return {"X-HA-Access": self.api_password}
hassio_token = os.environ.get("HASSIO_TOKEN")
if hassio_token:
return {"Authorization": f"Bearer {hassio_token}"}
# No headers
return {}
# -------------------------------------------------------------------------
async def on_message(
self,
message: Message,
site_id: typing.Optional[str] = None,
session_id: typing.Optional[str] = None,
topic: typing.Optional[str] = None,
) -> GeneratorType:
"""Received message from MQTT broker."""
if isinstance(message, NluIntent):
if self.handle_enabled:
async for intent_result in self.handle_intent(message):
yield intent_result
elif isinstance(message, HandleToggleOn):
self.handle_enabled = True
_LOGGER.debug("Intent handling enabled")
elif isinstance(message, HandleToggleOff):
self.handle_enabled = False
_LOGGER.debug("Intent handling disabled")
else:
_LOGGER.warning("Unexpected message: %s", message) | /rhasspy-homeassistant-hermes-0.2.1.tar.gz/rhasspy-homeassistant-hermes-0.2.1/rhasspyhomeassistant_hermes/__init__.py | 0.677474 | 0.152095 | __init__.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.