index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
74,383 | DonaldFoss/DaskDistributed | refs/heads/master | /distributed/protocol/tests/test_numpy.py | from __future__ import print_function, division, absolute_import
import numpy as np
import pytest
from distributed.protocol import (serialize, deserialize, decompress, dumps,
loads, to_serialize)
from distributed.protocol.utils import BIG_BYTES_SHARD_SIZE
from distributed.utils_test import slow
import distributed.protocol.numpy
def test_serialize():
x = np.ones((5, 5))
header, frames = serialize(x)
assert header['type']
assert len(frames) == 1
if 'compression' in header:
frames = decompress(header, frames)
result = deserialize(header, frames)
assert (result == x).all()
@pytest.mark.parametrize('x',
[np.ones(5),
np.asfortranarray(np.random.random((5, 5))),
np.random.random(5).astype('f4'),
np.ones(shape=(5, 6)).astype(dtype=[('total', '<f8'), ('n', '<f8')])])
def test_dumps_serialize_numpy(x):
header, frames = serialize(x)
if 'compression' in header:
frames = decompress(header, frames)
y = deserialize(header, frames)
np.testing.assert_equal(x, y)
@slow
def test_dumps_serialize_numpy_large():
psutil = pytest.importorskip('psutil')
if psutil.virtual_memory().total < 4e9:
return
x = np.random.randint(0, 255, size=int(BIG_BYTES_SHARD_SIZE * 2)).astype('u1')
frames = dumps([to_serialize(x)])
[y] = loads(frames)
np.testing.assert_equal(x, y)
| {"/distributed/protocol/tests/test_numpy.py": ["/distributed/protocol/__init__.py", "/distributed/protocol/numpy.py"]} |
74,384 | DonaldFoss/DaskDistributed | refs/heads/master | /distributed/protocol/numpy.py | from __future__ import print_function, division, absolute_import
import sys
import numpy as np
try:
import blosc
n = blosc.set_nthreads(2)
except ImportError:
blosc = False
from .utils import frame_split_size
from .serialize import register_serialization
from ..utils import log_errors, ensure_bytes
def serialize_numpy_ndarray(x):
if x.dtype.kind == 'V':
dt = x.dtype.descr
else:
dt = x.dtype.str
x = np.ascontiguousarray(x) # np.frombuffer requires this
header = {'dtype': dt,
'strides': x.strides,
'shape': x.shape}
if blosc:
frames = frame_split_size([x.data])
if sys.version_info.major == 2:
frames = [ensure_bytes(frame) for frame in frames]
frames = [blosc.compress(frame, typesize=x.dtype.itemsize,
cname='lz4', clevel=5) for frame in frames]
header['compression'] = ['blosc'] * len(frames)
else:
frames = [x.data]
header['lengths'] = [x.nbytes]
return header, frames
def deserialize_numpy_ndarray(header, frames):
with log_errors():
if len(frames) != 1:
import pdb; pdb.set_trace()
dt = header['dtype']
if isinstance(dt, tuple):
dt = list(dt)
dt = np.dtype(dt)
buffer = frames[0]
x = np.frombuffer(buffer, dt)
x = np.lib.stride_tricks.as_strided(x, header['shape'], header['strides'])
return x
register_serialization(np.ndarray, serialize_numpy_ndarray, deserialize_numpy_ndarray)
| {"/distributed/protocol/tests/test_numpy.py": ["/distributed/protocol/__init__.py", "/distributed/protocol/numpy.py"]} |
74,385 | DonaldFoss/DaskDistributed | refs/heads/master | /distributed/bokeh/tasks/server_lifecycle.py | #!/usr/bin/env python
from __future__ import print_function, division, absolute_import
from collections import deque
import sys
import json
import os
import logging
import sys
from time import time
from tornado import gen
from tornado.httpclient import AsyncHTTPClient
from tornado.iostream import StreamClosedError
from tornado.ioloop import IOLoop
from distributed.core import read
from distributed.diagnostics.eventstream import eventstream
from distributed.diagnostics.progress_stream import task_stream_append
import distributed.bokeh
from distributed.bokeh.utils import parse_args
logger = logging.getLogger(__name__)
client = AsyncHTTPClient()
messages = distributed.bokeh.messages # monkey-patching
options = parse_args(sys.argv[1:])
@gen.coroutine
def task_events(interval, deque, times, index, rectangles, workers, last_seen):
i = 0
try:
stream = yield eventstream('%(host)s:%(tcp-port)d' % options, 0.100)
while True:
msgs = yield read(stream)
if not msgs:
continue
last_seen[0] = time()
for msg in msgs:
if 'compute_start' in msg:
deque.append(msg)
times.append(msg['compute_start'])
index.append(i)
i += 1
if msg.get('transfer_start') is not None:
index.append(i)
i += 1
if msg.get('disk_load_start') is not None:
index.append(i)
i += 1
task_stream_append(rectangles, msg, workers)
except StreamClosedError:
pass # don't log StreamClosedErrors
except Exception as e:
logger.exception(e)
finally:
try:
sys.exit(0)
except:
pass
n = 100000
def on_server_loaded(server_context):
IOLoop.current().add_callback(task_events, **messages['task-events'])
| {"/distributed/protocol/tests/test_numpy.py": ["/distributed/protocol/__init__.py", "/distributed/protocol/numpy.py"]} |
74,386 | DonaldFoss/DaskDistributed | refs/heads/master | /distributed/protocol/__init__.py | from __future__ import print_function, division, absolute_import
from .compression import compressions, default_compression
from .core import dumps, loads, maybe_compress, decompress, msgpack
from .serialize import (serialize, deserialize, Serialize, Serialized,
to_serialize, register_serialization)
from . import numpy
| {"/distributed/protocol/tests/test_numpy.py": ["/distributed/protocol/__init__.py", "/distributed/protocol/numpy.py"]} |
74,397 | zephyrxvxx7/TKB_Course_Bot | refs/heads/master | /TKB_Scraper.py | from selenium import webdriver
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
import os
from time import sleep
from info import username, password, className, year, month, day, branch, time, grabbed
mainUrl = "http://bookseat.tkblearning.com.tw/book-seat/student/login/toLogin"
chrome_options = webdriver.ChromeOptions()
browser = webdriver.Chrome(chrome_options=chrome_options)
browser.maximize_window()
def login():
browser.get(mainUrl)
loginCode = browser.find_element_by_xpath('//*[@id="LoginCode"]').text
loginCodelist = loginCode.split(' ')
loginCode = ''.join(loginCodelist)
browser.find_element_by_xpath('//*[@id="id"]').send_keys(username)
browser.find_element_by_xpath('//*[@id="pwd"]').send_keys(password)
browser.find_element_by_xpath(
'//*[@id="logininputcode"]').send_keys(loginCode)
browser.find_element_by_xpath(
'/html/body/form/div/div/div/table/tbody/tr[6]/td[2]/div[1]/a').click()
print('登入成功')
def grab():
global className
select = Select(browser.find_element_by_xpath('//*[@id="class_selector"]'))
for op in select.options:
if(op.text[:len(className)] == className):
className = op.text
select.select_by_visible_text(className)
sleep(0.5)
select = Select(browser.find_element_by_xpath('//*[@id="date_selector"]'))
select.select_by_value(
'{year}-{month:0>2s}-{day:0>2s}'.format(year=year, month=month, day=day))
sleep(0.5)
select = Select(browser.find_element_by_xpath(
'//*[@id="branch_selector"]'))
select.select_by_visible_text(branch + '數位學堂')
sleep(0.5)
browser.find_element_by_xpath(
'//*[@id="session_time_div"]/input[{0}]'.format(time)).click()
userinputCode = browser.find_element_by_xpath('//*[@id="code"]').text
userinputCodelist = userinputCode.split(' ')
userinputCode = ''.join(userinputCodelist)
browser.find_element_by_xpath(
'//*[@id="userinputcode"]').send_keys(userinputCode)
if grabbed:
browser.find_element_by_xpath(
'/html/body/div[3]/div[3]/div/div[2]/section/article/table/tbody/tr[8]/td[2]/div[1]/a').click()
alert = browser.switch_to_alert()
print(alert.text)
alert.accept()
alert = browser.switch_to_alert()
alert.accept()
print('選課成功')
sleep(5)
if __name__ == "__main__":
login()
grab()
browser.close()
| {"/TKB_Scraper.py": ["/info.py"]} |
74,398 | zephyrxvxx7/TKB_Course_Bot | refs/heads/master | /info.py | # 帳號及密碼
username = ''
password = ''
# 選課名稱
# ex: 資料結構
className = '資料結構'
# 日期
year = '2018'
month = '07'
day = '23'
# 地點
# ex: 東海、逢甲
branch = '東海'
# 第幾場次
time = 1
# 確定要搶課嗎?
grabbed = True
| {"/TKB_Scraper.py": ["/info.py"]} |
74,399 | filantus/aiohttp_test_app | refs/heads/master | /main.py | from aiohttp import web
import json
import yaml
from yaml.scanner import ScannerError
from yaml.parser import ParserError
from docker_manager import DockerManager
class BaseView(web.View):
allowed_methods = ['GET', 'HEAD', 'POST', 'PUT', 'DELETE', 'OPTIONS', 'PATCH']
content_type = 'application/json'
async def get_response(self, request):
raise Exception('GET method not implemented')
async def post_response(self, request):
raise Exception('POST method not implemented')
def response_405(self):
data = {
'error': f'method not allowed',
'allowed_methods': self.allowed_methods,
}
return web.Response(text=json.dumps(data), status=405)
async def get(self):
if 'GET' in self.allowed_methods:
response = await self.get_response(self.request)
response.content_type = self.content_type
return response
return self.response_405()
async def post(self):
if 'POST' in self.allowed_methods:
response = await self.post_response(self.request)
response.content_type = self.content_type
return response
return self.response_405()
class IndexView(BaseView):
allowed_methods = ['GET']
async def get_response(self, request):
data = {'status': 'success'}
return web.Response(text=json.dumps(data))
class ContainersView(BaseView):
allowed_methods = ['GET', 'POST']
async def get_response(self, request):
return web.Response(text=json.dumps(DockerManager.get_containers()))
async def post_response(self, request):
data = await request.read()
try:
config = yaml.load(data)
except (ParserError, ScannerError):
data = {
'status': 'failed',
'error': 'invalid run parameters',
}
return web.Response(text=json.dumps(data), status=500)
try:
data = {
'status': 'success',
'container': DockerManager.run_container(config)
}
return web.Response(text=json.dumps(data), status=201)
except Exception as e:
data = {
'status': 'failed',
'error': str(e),
}
return web.Response(text=json.dumps(data), status=500)
class GetContainerView(BaseView):
allowed_methods = ['GET']
async def get_response(self, request):
container = DockerManager.get_container(request.match_info['name'])
if container:
return web.Response(text=json.dumps(container))
return web.Response(text=json.dumps({'error': 'not_found'}), status=404)
class StartContainerView(BaseView):
allowed_methods = ['POST']
async def post_response(self, request):
result = bool(DockerManager.start_container(request.match_info['name']))
return web.Response(text=json.dumps({'status': 'success' if result else 'failed'}))
class StopContainerView(BaseView):
allowed_methods = ['POST']
async def post_response(self, request):
result = bool(DockerManager.stop_container(request.match_info['name']))
return web.Response(text=json.dumps({'status': 'success' if result else 'failed'}))
class RemoveContainerView(BaseView):
allowed_methods = ['POST']
async def post_response(self, request):
result = bool(DockerManager.remove_container(request.match_info['name']))
return web.Response(text=json.dumps({'status': 'success' if result else 'failed'}))
def get_app():
app = web.Application(middlewares=[
web.normalize_path_middleware(append_slash=True, merge_slashes=True)])
app.router.add_get('/', IndexView)
app.router.add_get('/containers/', ContainersView)
app.router.add_post('/containers/', ContainersView)
app.router.add_get('/containers/{name}/', GetContainerView)
app.router.add_post('/containers/{name}/start/', StartContainerView)
app.router.add_post('/containers/{name}/stop/', StopContainerView)
app.router.add_post('/containers/{name}/remove/', RemoveContainerView)
return app
if __name__ == '__main__':
try:
web.run_app(get_app())
except KeyboardInterrupt:
pass
finally:
DockerManager.client.close()
print('Exit. Bye-bye!') | {"/main.py": ["/docker_manager.py"], "/tests.py": ["/main.py", "/docker_manager.py"]} |
74,400 | filantus/aiohttp_test_app | refs/heads/master | /docker_manager.py | import docker
from docker.models.containers import Container
class DockerManager:
client = docker.from_env()
@classmethod
def serialize_container(cls, container: Container) -> dict:
container_data = {}
for key in ('name', 'status'):
container_data[key] = getattr(container, key)
container_data['image'] = container.image.tags[-1]
container_data['created_at'] = container.attrs.get('Created')
container_data['started_at'] = container.attrs.get('State', {}).get('StartedAt')
ports = container.attrs.get('HostConfig', {}).get('PortBindings')
if ports:
ports = {k.replace('/tcp', ''): ports[k][0].get('HostPort') for k in ports}
container_data['ports'] = ports
return container_data
@classmethod
def get_containers(cls, all: bool=True, filters: dict=None) -> list:
data = []
for container in cls.client.containers.list(all=all, filters=filters):
data.append(cls.serialize_container(container))
return data
@classmethod
def get_container(cls, name) -> dict:
for container in cls.get_containers(filters={'name': name}):
if container.get('name') == name:
return container
@classmethod
def get_native_container(cls, name) -> Container:
return (lambda l: l[0] if l else None)(cls.client.containers.list(all=all, filters={'name': name}))
@classmethod
def run_container(cls, config):
for name, params in config.items():
params = params.get('properties')
if not params:
continue
image = params.get('image')
if image and not image.endswith(':latest'):
image += ':latest'
ports = params.get('ports') or params.get('port_bindings')
ports = {tuple(d.keys())[0]: tuple(d.values())[0] for d in ports} if ports else None
container = DockerManager.client.containers.run(
name=name,
image=image,
ports=ports,
command=params.get('command'),
detach=True
)
return cls.serialize_container(container)
@classmethod
def start_container(cls, name):
container = cls.get_native_container(name)
if container and container.attrs.get('State').get('Running') is False:
container.start()
return True
@classmethod
def stop_container(cls, name):
container = cls.get_native_container(name)
if container and container.attrs.get('State').get('Running') is True:
container.stop()
return True
@classmethod
def remove_container(cls, name):
container = cls.get_native_container(name)
if container:
if container.attrs.get('State').get('Running') is True:
container.stop()
container.remove()
return True
| {"/main.py": ["/docker_manager.py"], "/tests.py": ["/main.py", "/docker_manager.py"]} |
74,401 | filantus/aiohttp_test_app | refs/heads/master | /tests.py | from aiohttp.test_utils import AioHTTPTestCase, unittest_run_loop
import yaml
from main import get_app
from docker_manager import DockerManager
class AppTestCase(AioHTTPTestCase):
test_container_name = 'apache-class-test-container'
async def get_application(self):
return get_app()
@staticmethod
def get_config(override_name=None, ports='56777:56777', parse=False):
with open('./tests_container_run_parameters.yaml') as f:
config = f.read()
if override_name:
config = config.replace('apache', override_name)
if ports:
ports = ports.split(':')
config = config.replace('- 8080: 80', f'- {ports[0]}: {ports[1]}')
if parse:
config = yaml.load(config)
return config
@classmethod
def setUpClass(cls):
"""Run once to set up non-modified data for all class methods."""
print('#'*100, f'\nsetUpTestData {__name__}...\n', sep='')
DockerManager.remove_container(cls.test_container_name)
DockerManager.run_container(cls.get_config(cls.test_container_name, parse=True))
@classmethod
def tearDownClass(cls):
"""Run once after all test methods"""
print('\ntearDownClass...')
DockerManager.remove_container(cls.test_container_name)
super().tearDownClass()
def setUp(self):
"""Run before every test method"""
print(f'\nTest: {self._testMethodName}...')
super().setUp()
def tearDown(self):
"""Run after every test method"""
super().tearDown()
@unittest_run_loop
async def test_index(self):
resp = await self.client.request('GET', '/')
assert resp.status == 200
data = await resp.json()
assert data is not None
@unittest_run_loop
async def test_run_container(self):
container_name = 'apache-test-run-container'
DockerManager.remove_container(container_name)
data = self.get_config(container_name, ports='56778:56778')
resp = await self.client.request('POST', '/containers/', data=data)
assert resp.status == 201
data = await resp.json()
print(data)
assert isinstance(data, dict)
assert data['status'] == 'success'
assert data['container']['name'] == container_name
assert data['container']['image'] == 'httpd:latest'
DockerManager.remove_container(container_name)
@unittest_run_loop
async def test_get_containers_list(self):
resp = await self.client.request('GET', '/containers/')
assert resp.status == 200
data = await resp.json()
print(data)
assert isinstance(data, list)
assert data[0]['name'] == 'apache-class-test-container'
assert data[0]['status'] == 'running'
@unittest_run_loop
async def test_get_container(self):
resp = await self.client.request('GET', f'/containers/{self.test_container_name}')
assert resp.status == 200
data = await resp.json()
print(data)
assert isinstance(data, dict)
assert data['name'] == 'apache-class-test-container'
assert data['status'] == 'running'
@unittest_run_loop
async def test_stop_container(self):
container_name = 'apache-test-stop-container'
DockerManager.remove_container(container_name)
DockerManager.run_container(self.get_config(container_name, ports='56779:56779', parse=True))
resp = await self.client.request('POST', f'/containers/{container_name}/stop/')
assert resp.status == 200
data = await resp.json()
print(data)
assert isinstance(data, dict)
assert data['status'] == 'success'
DockerManager.remove_container(container_name)
@unittest_run_loop
async def test_start_container(self):
container_name = 'apache-test-start-container'
DockerManager.remove_container(container_name)
DockerManager.run_container(self.get_config(container_name, ports='56783:56783', parse=True))
DockerManager.stop_container(container_name)
resp = await self.client.request('POST', f'/containers/{container_name}/start/')
assert resp.status == 200
data = await resp.json()
print(data)
assert isinstance(data, dict)
assert data['status'] == 'success'
DockerManager.remove_container(container_name)
@unittest_run_loop
async def test_remove_container(self):
container_name = 'apache-test-remove-container'
DockerManager.remove_container(container_name)
DockerManager.run_container(self.get_config(container_name, ports='56784:56784', parse=True))
resp = await self.client.request('POST', f'/containers/{container_name}/remove/')
assert resp.status == 200
data = await resp.json()
print(data)
assert isinstance(data, dict)
assert data['status'] == 'success'
DockerManager.remove_container(container_name)
| {"/main.py": ["/docker_manager.py"], "/tests.py": ["/main.py", "/docker_manager.py"]} |
74,404 | benny-z/FlashBackup | refs/heads/master | /main.py | from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from UI import Ui_MainWindow
import sys
if __name__ == "__main__":
app = QApplication(sys.argv)
window = QDialog()
ui = Ui_MainWindow()
ui.setupUi(window)
window.show()
sys.exit(app.exec_())
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,405 | benny-z/FlashBackup | refs/heads/master | /UI.py | from PyQt5 import QtCore, QtGui
from PyQt5.QtWidgets import *
from os.path import exists
from controller import add_new_record
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(639, 600)
MainWindow.setMinimumSize(639, 600)
MainWindow.setMaximumSize(639, 600)
self.widget = QWidget(MainWindow)
self.widget.setObjectName(_fromUtf8("widget"))
self.label = QLabel(self.widget)
self.label.setGeometry(QtCore.QRect(10, 10, 211, 16))
self.label.setObjectName(_fromUtf8("label"))
self.select_drive_label = QLabel(self.widget)
self.select_drive_label.setGeometry(QtCore.QRect(10, 30, 211, 16))
self.select_drive_label.setObjectName(_fromUtf8("select_drive_label"))
self.back_upLbl = QLabel(self.widget)
self.back_upLbl.setGeometry(QtCore.QRect(10, 290, 61, 16))
self.back_upLbl.setObjectName(_fromUtf8("back_upLbl"))
self.okBtn = QPushButton(self.widget)
self.okBtn.setGeometry(QtCore.QRect(80, 320, 61, 31))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Ampersand"))
font.setPointSize(18)
font.setBold(True)
font.setItalic(False)
font.setWeight(75)
self.okBtn.setFont(font)
self.okBtn.setFocusPolicy(QtCore.Qt.TabFocus)
self.okBtn.setAutoFillBackground(True)
self.okBtn.setAutoDefault(False)
self.okBtn.setDefault(False)
self.okBtn.setFlat(True)
self.okBtn.setObjectName(_fromUtf8("okBtn"))
self.backup_pathTxtBx = QLineEdit(self.widget)
self.backup_pathTxtBx.setGeometry(QtCore.QRect(76, 288, 141, 20))
self.backup_pathTxtBx.setObjectName(_fromUtf8("backup_pathTxtBx"))
self.scrollArea = QScrollArea(self.widget)
self.scrollArea.setGeometry(QtCore.QRect(10, 50, 211, 231))
self.scrollArea.setWidgetResizable(True)
self.scrollArea.setObjectName(_fromUtf8("scrollArea"))
self.scrollAreaWidgetContentsLayout = QVBoxLayout()
self.scrollAreaWidgetContents = QWidget()
self.scrollAreaWidgetContents.setLayout(
self.scrollAreaWidgetContentsLayout)
self.scrollAreaWidgetContents.setObjectName(
_fromUtf8("scrollAreaWidgetContents"))
self.scrollArea.setWidget(self.scrollAreaWidgetContents)
self.buttonGroup = QButtonGroup()
for drive in QtCore.QDir.drives():
drive_name = drive.absoluteFilePath().replace('/', '\\')
ip = QFileIconProvider()
my_button = QPushButton(drive_name)
my_button.setIcon(ip.icon(drive))
my_button.setCheckable(True)
my_button.setFlat(True)
buttonSize = QtCore.QSize(120, 70)
my_button.setFixedSize(buttonSize)
my_button.setIconSize(buttonSize)
self.buttonGroup.addButton(my_button)
self.scrollAreaWidgetContentsLayout.addWidget(my_button)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.setConnections()
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QApplication.translate(
"MainWindow", "FlashBackup", None))
self.label.setText(QApplication.translate(
"MainWindow", "Make sure your device is connected!", None))
self.select_drive_label.setText(QApplication.translate(
"MainWindow", "Please select your removable drive:", None))
self.back_upLbl.setText(QApplication.translate(
"MainWindow", "backup path:", None))
self.okBtn.setText(QApplication.translate(
"MainWindow", "Ok", None))
def setConnections(self):
self.okBtn.clicked.connect(
lambda: self.okBtnHandler(self.buttonGroup.checkedButton()))
self.backup_pathTxtBx.mousePressEvent = self.selectBackupDirecotry
def okBtnHandler(self, selected_button):
if selected_button is None:
QMessageBox.warning(
None, ":(", "You must specify a drive you would like to backup")
return
if '' == self.backup_pathTxtBx.text() or not exists(self.backup_pathTxtBx.text()):
QMessageBox.warning(None, "FUUU", "Please select a valid directory\n\
for the backup to be saved to")
return
try:
add_new_record(
str(selected_button.text()), str(self.backup_pathTxtBx.text()))
except Exception as e:
QMessageBox.warning(None, "Somekinda error", str(e))
def selectBackupDirecotry(self, event):
folder = QFileDialog.getExistingDirectory(
None, "Please select a folder where your backup will be saved")
if '' != folder:
self.backup_pathTxtBx.setText(folder)
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,406 | benny-z/FlashBackup | refs/heads/master | /detector.py | #############################################################################
# Based on Tim Golden's code from: #
# http://timgolden.me.uk/python/win32_how_do_i/detect-device-insertion.html #
#############################################################################
import win32serviceutil
import win32service
import win32event
import servicemanager
import win32con
from os import chdir, path, getcwd
from time import sleep
import copyer
import traceback
import _winreg
import win32gui
import win32gui_struct
struct = win32gui_struct.struct
pywintypes = win32gui_struct.pywintypes
import win32con
from win32api import OutputDebugString
from utils import log
GUID_DEVINTERFACE_USB_DEVICE = "{A5DCBF10-6530-11D2-901F-00C04FB951ED}"
DBT_DEVICEARRIVAL = 0x8000
DBT_DEVICEREMOVECOMPLETE = 0x8004
import ctypes
#
# Cut-down clone of UnpackDEV_BROADCAST from win32gui_struct, to be
# used for monkey-patching said module with correct handling
# of the "name" param of DBT_DEVTYPE_DEVICEINTERFACE
#
def _UnpackDEV_BROADCAST(lparam):
if lparam == 0:
return None
hdr_format = "iii"
hdr_size = struct.calcsize(hdr_format)
hdr_buf = win32gui.PyGetMemory(lparam, hdr_size)
size, devtype, reserved = struct.unpack("iii", hdr_buf)
# Due to x64 alignment issues, we need to use the full format string over
# the entire buffer. ie, on x64:
# calcsize('iiiP') != calcsize('iii')+calcsize('P')
buf = win32gui.PyGetMemory(lparam, size)
extra = {}
if devtype == win32con.DBT_DEVTYP_DEVICEINTERFACE:
fmt = hdr_format + "16s"
_, _, _, guid_bytes = struct.unpack(fmt, buf[:struct.calcsize(fmt)])
extra['classguid'] = pywintypes.IID(guid_bytes, True)
extra['name'] = ctypes.wstring_at(lparam + struct.calcsize(fmt))
else:
raise NotImplementedError("unknown device type %d" % (devtype,))
return win32gui_struct.DEV_BROADCAST_INFO(devtype, **extra)
win32gui_struct.UnpackDEV_BROADCAST = _UnpackDEV_BROADCAST
class DeviceEventService(win32serviceutil.ServiceFramework):
_svc_name_ = "Backuper"
_svc_display_name_ = "Backuper"
_svc_description_ = "a service that backs up stuff"
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
#
# Specify that we're interested in device interface
# events for USB devices
#
filter = win32gui_struct.PackDEV_BROADCAST_DEVICEINTERFACE(
GUID_DEVINTERFACE_USB_DEVICE
)
self.hDevNotify = win32gui.RegisterDeviceNotification(
self.ssh, # copy of the service status handle
filter,
win32con.DEVICE_NOTIFY_SERVICE_HANDLE
)
#
# Add to the list of controls already handled by the underlying
# ServiceFramework class. We're only interested in device events
#
def GetAcceptedControls(self):
rc = win32serviceutil.ServiceFramework.GetAcceptedControls(self)
#rc |= win32service.SERVICE_CONTROL_DEVICEEVENT
return rc
#
# Handle non-standard service events (including our device broadcasts)
# by logging to the Application event log
#
def SvcOtherEx(self, control, event_type, data):
if control == win32service.SERVICE_CONTROL_DEVICEEVENT:
info = win32gui_struct.UnpackDEV_BROADCAST(data)
#
# This is the key bit here where you'll presumably
# do something other than log the event. Perhaps pulse
# a named event or write to a secure pipe etc. etc.
#
if event_type == DBT_DEVICEARRIVAL:
device_name = info.name
log("Device %s connected" % info.name)
device_serial = device_name.split('#')[2]
copyer.run(device_serial)
elif event_type == DBT_DEVICEREMOVECOMPLETE:
log("Device %s removed" % info.name)
#
# Standard stuff for stopping and running service; nothing
# specific to device notifications
#
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
win32event.WaitForSingleObject(self.hWaitStop, win32event.INFINITE)
servicemanager.LogMsg(
servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STOPPED,
(self._svc_name_, '')
)
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(DeviceEventService)
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,407 | benny-z/FlashBackup | refs/heads/master | /controller.py | from configparser import SafeConfigParser
import pythoncom
import wmi
from utils import conf_filename, wait_for_result, log_fw
from PyQt5.QtWidgets import QMessageBox, QDialog, QLabel, QVBoxLayout
from PyQt5.QtCore import Qt
from threading import Thread
from copyer import run
parser = SafeConfigParser()
def is_removable_drive(drive_letter):
pythoncom.CoInitialize()
c = wmi.WMI()
drive_letter = drive_letter.replace('\\', '')
return len([localDisk for localDisk in c.Win32_LogicalDisk(Description='Removable Disk', DeviceID=drive_letter)]) > 0
def get_serial_by_drive_letter(drive_letter):
pythoncom.CoInitialize()
c = wmi.WMI()
logical_drive = [logicalDisk.Antecedent.deviceID for logicalDisk in c.Win32_LogicalDiskToPartition(
) if logicalDisk.Dependent.deviceID.lower() in drive_letter.lower()][0]
return [diskDrive.Antecedent.PNPDeviceID for diskDrive in c.Win32_DiskDriveToDiskPartition() if logical_drive == diskDrive.Dependent.deviceID][0].split('\\')[-1].split('&')[0]
def is_record_exists(conf_filename, serial):
parser.read(conf_filename)
for section_name in parser.sections():
if parser.has_option(section_name, 'serial') and serial == parser.get(section_name, 'serial'):
raise Exception('Conf file already contains this device.\n\
In order to remove it, manually delete the [%s] section' % section_name)
def modify_conf_file(conf_filename, serial, drive_letter, backup_folder):
is_record_exists(conf_filename, serial)
section_name = '%s_%s' % (drive_letter, serial)
parser.read(conf_filename)
parser.add_section(section_name)
parser.set(section_name, 'serial', serial)
parser.set(section_name, 'dst_folder', backup_folder)
parser.set(section_name, 'last_backup_time', '0')
parser.set(section_name, 'drive_letter', drive_letter)
with open(conf_filename, 'w') as conf_file:
parser.write(conf_file)
def add_new_record(drive_letter, backup_folder):
if not is_removable_drive(drive_letter):
raise Exception('You have selected a non-removable drive.\n\
Please select a removable device.')
serial = get_serial_by_drive_letter(drive_letter)
if serial is None:
raise Exception('There has occured some kind of error with\n\
getting the device serial number by its drive letter')
modify_conf_file(conf_filename, serial, drive_letter, backup_folder)
if QMessageBox.Yes == QMessageBox.question(None, 'hooray!', '''Congrats!
Your backup setting have been updated.
Would you like to start the backup process right now?''', QMessageBox.Yes, QMessageBox.No):
log_fw = 'stdout'
wait_lbl = QLabel("Please wait for the backup process to finish")
layout = QVBoxLayout()
dlg = QDialog()
dlg.setWindowFlags(Qt.Dialog | Qt.Desktop)
dlg.setLayout(layout)
layout.addWidget(wait_lbl)
t = Thread(target=dlg.exec_)
t.start()
run(serial)
dlg.close()
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,408 | benny-z/FlashBackup | refs/heads/master | /utils.py | from os.path import exists, join, dirname, realpath
from time import sleep
import pythoncom
import wmi
import servicemanager
conf_filename = join(dirname(realpath(__file__)), 'flashBackup.ini')
log_fw = 'event_logs'
def log(content):
if 'event_logs' == log_fw:
content = unicode(content)
servicemanager.LogMsg (servicemanager.EVENTLOG_INFORMATION_TYPE, 0xF000, (content, ''))
if 'stdout' == log_fw:
print(content)
def wait_for_file_to_appear(filename):
return wait_for_result(exists, True, 600, 1)(filename)
def attempt_with_timeout(func, timeout, time_delta):
def dec(*args, **kwargs):
total_time = 0
while total_time < timeout:
try:
return func(*args, **kwargs)
except:
sleep(time_delta)
total_time += time_delta
func(*args, **kwargs)
return dec
def wait_for_result(func, wanted_result, timeout = -1, time_delta = -1):
def dec(*args, **kwargs):
total_time = 0
result = None
while (-1 == timeout and -1 == time_delta) or total_time < timeout:
result = func(*args, **kwargs)
if wanted_result == result:
return result
sleep(time_delta)
total_time += time_delta
return result
return dec
def get_drive_letter_by_device_serial(serial):
# new method
pythoncom.CoInitialize()
c = wmi.WMI()
deviceID = [diskDrive.Dependent.deviceID for diskDrive in c.Win32_DiskDriveToDiskPartition() if serial in diskDrive.Antecedent.PNPDeviceID][0]
return '%s\\' % [logicalDisk.Dependent.deviceID for logicalDisk in c.Win32_LogicalDiskToPartition() if deviceID == logicalDisk.Antecedent.deviceID][0]
# old method
'''
base_key = 'SOFTWARE\Microsoft\Windows Portable Devices\Devices'
devices_reg_key = _winreg.OpenKey(win32con.HKEY_LOCAL_MACHINE, base_key)
number_of_sub_keys, number_of_values, lastMod = _winreg.QueryInfoKey(devices_reg_key)
try:
for i in range(number_of_sub_keys):
sub_key_name = _winreg.EnumKey(devices_reg_key, i)
if device_name in str(sub_key_name):
sub_key = _winreg.OpenKey(devices_reg_key, sub_key_name)
letter = _winreg.QueryValueEx(sub_key, 'FriendlyName')[0]
if ':\\' in letter:
# sometimes there are "friendly names" such as NOKIA, that don't represent a drive letter
return letter
except:
log(traceback.format_exc())
'''
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,409 | benny-z/FlashBackup | refs/heads/master | /copyer.py | import time
import os, shutil
from os import sep, chdir, rename, remove
from os.path import join, getsize, exists
from configparser import SafeConfigParser
import datetime
from sys import argv
import traceback
from utils import log, attempt_with_timeout, get_drive_letter_by_device_serial, log_fw, conf_filename, wait_for_file_to_appear
from time import sleep
max_days_delta = 7
parser = SafeConfigParser()
def do_backup(src_base, dst_base):
log('starting do_backup src: %s; dst:%s' % (src_base, dst_base))
chdir(src_base)
for root, dirs, files in os.walk(src_base):
for filename in files:
src_file_fullpath = unicode(join(root, filename))
dst_directory = unicode(root.replace(src_base, dst_base))
dst_file_fullpath = unicode(join(dst_directory, filename))
if not exists(dst_directory):
os.makedirs(dst_directory)
if not exists(dst_file_fullpath) or getsize(src_file_fullpath) != getsize(dst_file_fullpath):
try:
if exists(dst_file_fullpath):
dst_file_tmp_fullpath = "%s.being.copied" % dst_file_fullpath
shutil.copy2(src_file_fullpath, dst_file_tmp_fullpath)
rename(dst_file_fullpath, "%s.old" % dst_file_fullpath)
rename(dst_file_tmp_fullpath, dst_file_fullpath)
remove("%s.old" % dst_file_fullpath)
else:
shutil.copy2(src_file_fullpath, dst_file_fullpath)
if 'stdout' == log_fw:
print('copied %s' % unicode(src_file_fullpath))
except:
log(traceback.format_exc())
log('backup done')
def is_backup_required(last_update_datetime):
last_update_datetime = int(last_update_datetime)
if 0 == last_update_datetime:
return True
last_backup_datetime = datetime.datetime.fromordinal(last_update_datetime)
current_datetime = datetime.datetime.now()
delta = current_datetime - last_backup_datetime
return delta.days >= max_days_delta
def get_device_info(conf_filename, serial):
parser.read(conf_filename)
for section_name in parser.sections():
# just checking that all the required options are in place
if all([parser.has_option(section_name, option) for option in ('dst_folder', 'serial', 'last_backup_time')]):
if parser.get(section_name, 'serial').lower() == serial.lower():
return section_name, parser.items(section_name)
return None, None
def update_conf_file(conf_filename, conf_section_name, drive_letter):
last_update = str(datetime.datetime.toordinal(datetime.datetime.now()))
parser.set(conf_section_name, 'last_backup_time', last_update)
parser.set(conf_section_name, 'drive_letter', drive_letter)
with open(conf_filename, 'w') as conf_file:
parser.write(conf_file)
def run(serial):
conf_section_name, device_info = get_device_info(conf_filename, serial)
if None in (conf_section_name, device_info):
log('there has occured some exception in finding the given drive in the conf file for the serial: %s' % (serial))
return
dst_base = [value for name, value in device_info if name == 'dst_folder'][0]
last_update = [value for name, value in device_info if name == 'last_backup_time'][0]
drive_letter = [value for name, value in device_info if name == 'drive_letter']
try:
temp_drive_letter = attempt_with_timeout(get_drive_letter_by_device_serial, timeout = 60, time_delta = 1)(serial)
drive_letter = temp_drive_letter
except:
if [] == drive_letter:
log('failed to get the drive letter')
return
drive_letter = drive_letter[0]
if is_backup_required(last_update):
if not drive_letter.endswith(sep): drive_letter = drive_letter + sep
if not dst_base.endswith(sep): dst_base = dst_base + sep
log('waiting for the PC to recognize %s' % drive_letter)
if False == wait_for_file_to_appear(drive_letter):
log('request timeout, the file %s does not exist' % drive_letter)
return
try:
attempt_with_timeout(do_backup, timeout = 600, time_delta = 10)(drive_letter, dst_base)
except Exception as e:
log('the following exception has occured during the backup process:\n%s' % str(e))
return
update_conf_file(conf_filename, conf_section_name, drive_letter)
if '__main__' == __name__:
log_fw = 'stdout'
serial = argv[1]
if len(argv) > 2:
conf_filename = argv[2]
run(serial)
| {"/main.py": ["/UI.py"], "/UI.py": ["/controller.py"], "/detector.py": ["/copyer.py", "/utils.py"], "/controller.py": ["/utils.py", "/copyer.py"], "/copyer.py": ["/utils.py"]} |
74,410 | Tobias272727/azusabot | refs/heads/master | /azusa_core/plugins/auto_h_pic/data_source.py | import requests
import re
from urllib.parse import urlencode
from threading import Thread
import json
import re
import time
import random
import hashlib
class pixivic_image_spider:
def __init__(self, searching_keyword, header = None):
self.searching_keyword = searching_keyword
if not header:
self.header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
else:
self.header = header
self.get_random_image()
def get_random_image(self):
page_num = 1
# assign the random number into the para
params={
'page': str(page_num),
'perSize': '30',
'keyword': self.searching_keyword
}
# the api of pxivic
base_url = 'https://api.pixivic.com/illustrations?'
#combine the apiurl and keywords
url = base_url + urlencode(params)
# requeset
ret = requests.get(url,headers = self.header)
response = ret.content.decode()
print('openning url:',url,'\n' + self.searching_keyword)
# uncode json
re_dict = json.loads(response)
#print(response)
# get infomations of images
print('长度',len(re_dict['data']))
if len(re_dict['data']) == 0:
self.image_loc = None
else:
# get the random pic
item_idx = random.randint(0, min(10,len(re_dict['data'])-1))
img_urls = re_dict['data'][item_idx]['imageUrls']
image_idx = random.randint(0, len(img_urls)-1)
img_link = img_urls[image_idx]['original']
print(img_link)
if len(img_link) > 15:
link = img_link[:-4]
self.image_loc = self.saveimage(link)
def saveimage(self,link):
"""
saving pics
"""
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"referer" :"https://m.pixivic.com/search/illusts?tag=%E5%85%AC%E4%B8%BB%E8%BF%9E%E7%BB%93&VNK=13a4c3fc"
}
m = hashlib.md5()
m.update(link.encode())
name = m.hexdigest()
link = "https://img.cheerfun.dev:233/c/540x540_70/img-master/img" + link[36:] + "_master1200.jpg"
print('[INFO]:正在保存图片:' + link)
ret = requests.get(link,headers = header)
image_content = ret.content
filename = 'C:/Users/tobias27/Desktop/CoolQ/data/image/azusabot/' + name + '.jpg'
file_loc = name + '.jpg'
with open(filename, 'wb') as f:
f.write(image_content)
print('[INFO]:保存成功,图片名为:{}.jpg'.format(name))
return file_loc
async def get_auto_h_pic(wife: str) -> str:
if '老婆' in wife:
wife = wife.replace('老婆','')
spider = pixivic_image_spider(wife)
if spider.image_loc:
CQ_str = '[CQ:image,file=azusabot\\' + spider.image_loc+']'
return CQ_str + '你要的色图,この変態!'
else:
return '醒醒,这个' + wife + '不是人类,应该不是你老婆'
| {"/azusa_main.py": ["/config.py"], "/azusa_core/plugins/auto_h_pic/__init__.py": ["/azusa_core/plugins/auto_h_pic/data_source.py"]} |
74,411 | Tobias272727/azusabot | refs/heads/master | /azusa_main.py | from os import path
import nonebot
import pandas as pd
import config
import jieba
import csv
if __name__ == '__main__':
#initializing the dictionary
# updating the dictionary
data = pd.read_csv('data/nickname.csv')
for i_row in range(data.shape[0]):
for i_col in range(data.shape[1]):
if pd.isna(data.iloc[i_row,i_col]) == False:
jieba.suggest_freq(data.iloc[i_row,i_col], tune=True)
jieba.add_word(data.iloc[i_row,i_col],tag='nr')
# from jieba import posseg
# a = posseg.lcut('镜华老婆')
# for i in a:
# print(i.word,i.flag)
#
print('The dictionary of names has been updated.')
#
nonebot.init(config)
# load_plugins: first para is the dir of plugin which is merged by this file's dir and folder names
# the second para is the pre- when loading the module.
nonebot.load_plugins(
path.join(path.dirname(__file__), 'azusa_core', 'plugins'),
'azusa_core.plugins'
)
nonebot.run() | {"/azusa_main.py": ["/config.py"], "/azusa_core/plugins/auto_h_pic/__init__.py": ["/azusa_core/plugins/auto_h_pic/data_source.py"]} |
74,412 | Tobias272727/azusabot | refs/heads/master | /config.py | from nonebot.default_config import *
HOST = '127.0.0.1'
PORT = 9222
NICKNAME = {'mio','澪','秋山澪','蓝白碗'}
SUPERUSERS = {353152858}
COMMAND_START = {'', '/', '!', '/', '!'} | {"/azusa_main.py": ["/config.py"], "/azusa_core/plugins/auto_h_pic/__init__.py": ["/azusa_core/plugins/auto_h_pic/data_source.py"]} |
74,413 | Tobias272727/azusabot | refs/heads/master | /azusa_core/plugins/auto_h_pic/__init__.py | from nonebot import on_command, CommandSession
from nonebot import on_natural_language, NLPSession, IntentCommand
from jieba import posseg
from .data_source import get_auto_h_pic
@on_command('auto_h_pic', aliases=('色图','涩图'))
async def auto_h_pic(session: CommandSession):
wife = session.get('wife', prompt= '哈?你要谁的色图。')
words = posseg.lcut(wife)
# get the name from the words
for word in words:
if word.flag == 'nr':
wife = word.word
break
wife_report = await get_auto_h_pic(wife)
await session.send(wife_report)
@auto_h_pic.args_parser
async def _(session: CommandSession):
stripped_arg = session.current_arg_text.strip()
#print('进入参数器',stripped_arg)
words = posseg.lcut(stripped_arg)
name_flag = 0
# get the name from the words
for word in words:
if word.flag == 'nr':
stripped_arg = word.word
break
# 如果是第一次输入,分词
if session.is_first_run:
if stripped_arg:
session.state['wife'] = stripped_arg
return
# 第二次没有内容了:
if not stripped_arg:
session.pause('快说快说,究竟要什么图,不说我就走了!')
# 返回状态
session.state[session.current_key] = stripped_arg
#@on_command('auto_h_pic2', aliases=('色图'))
#async def auto_h_pic2(session: CommandSession):
# wife = session.get('wife', prompt= '你要谁的色图呀?')
# wife_report = await get_auto_h_pic(wife)
# await session.send(wife_report)
#
#
#
#
# on_natural_language 装饰器将函数声明为一个自然语言处理器
# keywords 表示需要响应的关键词,类型为任意可迭代对象,元素类型为 str
# 如果不传入 keywords,则响应所有没有被当作命令处理的消息
@on_natural_language(keywords = {'色图','涩图'} )
async def _(session: NLPSession):
# 去掉消息首尾的空白符
stripped_msg = session.msg_text.strip()
print('进入NLP',stripped_msg)
# 对消息进行分词和词性标注
words = posseg.lcut(stripped_msg)
# 定义一个wife
wife = None
# 遍历 posseg.lcut 返回的列表
for word in words:
# 每个元素是一个 pair 对象,包含 word 和 flag 两个属性,分别表示词和词性
if word.flag == 'nr':
# nr 代表人名
wife = word.word
break
# 返回意图命令,前两个参数必填,分别表示置信度和意图命令名
return IntentCommand(90.0, 'auto_h_pic', current_arg = wife or '') | {"/azusa_main.py": ["/config.py"], "/azusa_core/plugins/auto_h_pic/__init__.py": ["/azusa_core/plugins/auto_h_pic/data_source.py"]} |
74,414 | Tobias272727/azusabot | refs/heads/master | /p_get.py | import requests
import re
from urllib.parse import urlencode
from threading import Thread
import re
import time
import random
import hashlib
class pixivic_image_spider:
def __init__(self, searching_keyword, header = None):
self.searching_keyword = searching_keyword
if not header:
self.header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
}
else:
self.header = header
self.get_random_image
def get_random_image(self):
page_num = random.randint(1, 10)
# assign the random number into the para
params={
'page': str(page_num),
'perSize': '30',
'keyword': self.searching_keyword
}
# the api of pxivic
base_url = 'https://api.pixivic.com/illustrations?'
#combine the apiurl and keywords
url = base_url + urlencode(params)
# requeset
ret = requests.get(url,headers = self.header)
response = ret.content.decode()
print(response)
# get infomations of images
img_links = re.findall(r'original.*?\.jpg', response)
image_idx = random.randint(1,len(img_links))
# get the random pic
link = img_links[image_idx][:-4]
self.saveimage(link[11:])
def saveimage(self,link):
"""
saving pics
"""
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36",
"referer" :"https://m.pixivic.com/search/illusts?tag=%E5%85%AC%E4%B8%BB%E8%BF%9E%E7%BB%93&VNK=13a4c3fc"
}
m = hashlib.md5()
m.update(link.encode())
name = m.hexdigest()
link = "https://img.cheerfun.dev:233/c/540x540_70/img-master/img" + link[36:] + "_master1200.jpg"
print('[INFO]:正在保存图片:' + link)
ret = requests.get(link,headers = header)
image_content = ret.content
filename = './image/' + name + '.jpg'
with open(filename, 'wb') as f:
f.write(image_content)
print('[INFO]:保存成功,图片名为:{}.jpg'.format(name))
'''
url = "https://m.pixivic.com/search/illusts"
parame = {"tag":"公主连结"}
'''
if __name__ == '__main__':
spider = pixivic_image_spider('公主连接')
spider.get_random_image()
| {"/azusa_main.py": ["/config.py"], "/azusa_core/plugins/auto_h_pic/__init__.py": ["/azusa_core/plugins/auto_h_pic/data_source.py"]} |
74,443 | jamisonbennett/networkstatus | refs/heads/master | /network-status-main.py | #!/usr/bin/env python3
import datetime
import logging
from hardware import Hardware
import networkstatus
import time
import threading
logger = logging.getLogger(__name__)
def update(hardware, update_interval):
while True:
hardware.update()
time.sleep(update_interval)
def main():
hardware = Hardware()
update_interval = 0.1
thread = threading.Thread(target=update, args=(hardware, update_interval), daemon=True)
thread.start()
user_input_timeout = 1
status_timeout = 10
max_ping = 200
quick_checks = networkstatus.MultipleChecks([
networkstatus.PingDefaultGatewayCheck(max_ping=max_ping, timeout=status_timeout),
networkstatus.PingCheck('google.com', max_ping=max_ping, timeout=status_timeout),
networkstatus.DnsCheck('google.com', timeout=status_timeout),
networkstatus.PingPrinterCheck(max_ping=max_ping, timeout=status_timeout)
])
extended_checks = networkstatus.MultipleChecks([
networkstatus.SpeedCheck(
min_down=20*1000*1000, # 20 mpbs (limited by Raspberry Pi 3b wifi capabilities)
min_up=5*1000*1000, # 5 mpbs
max_ping=max_ping,
timeout=status_timeout)
])
checks = networkstatus.NormalAndExtendedChecks(quick_checks, extended_checks, [hardware])
# Start by running the extended test
last_normal_test = 0
last_extended_test = 0
normal_test_interval = 60
extended_test_interval = 60 * 60
while True:
current_time = time.time()
if (current_time >= last_normal_test + normal_test_interval or
current_time >= last_extended_test + extended_test_interval):
last_normal_test = current_time
time_str = datetime.datetime.now(datetime.timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
if current_time >= last_extended_test + extended_test_interval:
print('# time(utc),{}'.format(checks.column_names()))
last_extended_test = current_time
result_str = checks.extended_check()
else:
result_str = checks.normal_check()
print('{},{}'.format(time_str, result_str), flush=True)
user_input = hardware.get_user_input(user_input_timeout)
if user_input == hardware.UserInput.NORMAL_TEST:
last_normal_test = 0
elif user_input == hardware.UserInput.EXTENDED_TEST:
last_normal_test = 0
last_extended_test = 0
if __name__ == "__main__":
main()
| {"/network-status-main.py": ["/hardware.py", "/networkstatus.py"], "/hardware.py": ["/networkstatus.py"]} |
74,444 | jamisonbennett/networkstatus | refs/heads/master | /hardware.py | #!/usr/bin/env python3
from enum import Enum
from networkstatus import TestObserver
import logging
import RPi.GPIO as GPIO
import time
import threading
logger = logging.getLogger(__name__)
class Hardware(TestObserver):
class Leds(Enum):
NORMAL_PASS = 17
NORMAL_FAIL = 23
EXTENDED_PASS = 24
EXTENDED_FAIL = 25
TEST_RUNNING = 12
class Buttons(Enum):
RUN_TEST = 16
class UserInput(Enum):
NO_INPUT = 0
NORMAL_TEST = 1
EXTENDED_TEST = 2
class TestState:
def __init__(self):
self.normal_test_result = True
self.extended_test_result = True
self.scheduled_normal_test = True
self.scheduled_extended_test = True
self.last_normal_test_failure_time = 0
self.last_extended_test_failure_time = 0
self.normal_test_running = False
self.extended_test_running = False
self.last_normal_test_started_time = 0
self.last_extended_test_started_time = 0
def __init__(self):
self.test_state_lock = threading.Lock()
self.test_state = self.TestState()
self.led_lock = threading.Lock()
GPIO.setwarnings(False)
# Blink all LEDs for 1 second
GPIO.setmode(GPIO.BCM)
for button in self.Buttons:
GPIO.setup(button.value, GPIO.IN)
for led in self.Leds:
GPIO.setup(led.value, GPIO.OUT)
GPIO.output(led.value, False)
time.sleep(1)
for led in self.Leds:
GPIO.output(led.value, True)
time.sleep(1)
for led in self.Leds:
GPIO.output(led.value, False)
def __del__(self):
for led in self.Leds:
GPIO.output(led.value, False)
def notify_test_started(self, test_type):
"""
Update the LEDs to indicate a test is running.
"""
self.test_state_lock.acquire()
try:
if test_type == TestObserver.TestType.NORMAL:
self.test_state.normal_test_running = True
self.test_state.last_normal_test_started_time = time.time()
if test_type == TestObserver.TestType.EXTENDED:
self.test_state.extended_test_running = True
self.test_state.last_extended_test_started_time = time.time()
finally:
self.test_state_lock.release()
self.__update_leds()
def notify_test_completed(self, test_type, result):
self.test_state_lock.acquire()
try:
if test_type == TestObserver.TestType.NORMAL:
self.test_state.normal_test_result = result
self.test_state.normal_test_running = False
self.test_state.scheduled_normal_test = False
if not result:
self.test_state.last_normal_test_failure_time = time.time()
elif test_type == TestObserver.TestType.EXTENDED:
self.test_state.extended_test_result = result
self.test_state.extended_test_running = False
self.test_state.scheduled_extended_test = False
if not result:
self.test_state.last_extended_test_failure_time = time.time()
finally:
self.test_state_lock.release()
self.__update_leds()
@staticmethod
def __get_blink_state(last_failure_time):
current_time = time.time()
hour = 60 * 60
day = 24 * hour
week = 7 * day
if last_failure_time + hour >= current_time:
return round(current_time * 10) % 2 > 0 # Quick blinking
elif last_failure_time + day >= current_time:
return round(current_time) % 2 > 0 # Slow blinking with long pause
elif last_failure_time + week >= current_time:
return round(current_time * 10) % 20 > 0 # Slow blinking with short pause
else:
return True
def update(self):
self.__update_leds()
def __update_leds(self):
current_time = time.time()
min_test_running_led_time = 1
self.test_state_lock.acquire()
try:
test_state = self.test_state
finally:
self.test_state_lock.release()
normal_blink_state = Hardware.__get_blink_state(test_state.last_normal_test_failure_time)
extended_blink_state = Hardware.__get_blink_state(test_state.last_extended_test_failure_time)
self.led_lock.acquire()
try:
GPIO.output(self.Leds.NORMAL_PASS.value,
normal_blink_state and
test_state.normal_test_result and
not test_state.scheduled_normal_test)
GPIO.output(self.Leds.NORMAL_FAIL.value,
not test_state.normal_test_result and
not test_state.scheduled_normal_test)
GPIO.output(self.Leds.EXTENDED_PASS.value,
extended_blink_state and
test_state.extended_test_result and
not test_state.scheduled_extended_test)
GPIO.output(self.Leds.EXTENDED_FAIL.value,
not test_state.extended_test_result and
not test_state.scheduled_extended_test)
GPIO.output(self.Leds.TEST_RUNNING.value,
test_state.normal_test_running or
test_state.extended_test_running or
test_state.last_normal_test_started_time + min_test_running_led_time >= current_time or
test_state.last_extended_test_started_time + min_test_running_led_time >= current_time)
finally:
self.led_lock.release()
def get_user_input(self, timeout):
sleep_time = 0.1
button_push_time_for_extended_test = 3
timeout_time = time.time() + timeout
user_input_time = None
while True:
if user_input_time is None and GPIO.input(self.Buttons.RUN_TEST.value):
# Initial button push
user_input_time = time.time()
self.test_state_lock.acquire()
try:
self.test_state.scheduled_normal_test = True
finally:
self.test_state_lock.release()
self.__update_leds()
elif user_input_time is not None and not GPIO.input(self.Buttons.RUN_TEST.value):
# Button was pushed and released before the extended test time was reached
return self.UserInput.NORMAL_TEST
elif user_input_time is not None and time.time() >= user_input_time + button_push_time_for_extended_test:
# Button was held for the extended test time
self.test_state_lock.acquire()
try:
self.test_state.scheduled_extended_test = True
finally:
self.test_state_lock.release()
self.__update_leds()
return self.UserInput.EXTENDED_TEST
elif user_input_time is None and time.time() >= timeout_time:
# The button was not pushed and the timeout has happened
return self.UserInput.NO_INPUT
else:
time.sleep(sleep_time)
| {"/network-status-main.py": ["/hardware.py", "/networkstatus.py"], "/hardware.py": ["/networkstatus.py"]} |
74,445 | jamisonbennett/networkstatus | refs/heads/master | /networkstatus.py | #!/usr/bin/env python3
import dns.resolver
from enum import Enum
import functools
import json
import logging
import re
import speedtest
import subprocess
logger = logging.getLogger(__name__)
class TestObserver:
class TestType(Enum):
NORMAL = 1
EXTENDED = 2
def notify_test_started(self, test_type):
return
def notify_test_completed(self, test_type, result):
return
def external_command(args, input_str=None, throw_of_error=True, timeout=10):
"""
Run an external command with a timeout
:param args: The arguments for popen
:param input_str: The input text
:param throw_of_error: Whether to throw an exception if the program exits with an error code
:param timeout: The timeout in seconds
:return: (standard out, standard error, error code)
"""
p = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, encoding='utf8')
(out, err) = p.communicate(input_str, timeout=timeout)
if throw_of_error and p.returncode != 0:
raise Exception('Command {} failed with code {}. {}'.format(args[0], p.returncode, err))
return out, err, p.returncode
def ping(host, timeout=10):
"""
Ping a host
:param host: The host to ping
:param timeout: The timeout in seconds
:return: The time in milliseconds
"""
out, _, _ = external_command(args=['ping', '-c', '1', '-w', str(timeout), host], timeout=timeout)
time_string = re.search('time=(.*) ms', out).group(1)
time = float(time_string)
return time
def dns_resolve(host, timeout=10):
"""
Get the IPv4 address of the host
:param host: The host to lookup
:param timeout: The timeout in seconds
:return: The IPv4 address
"""
answers = dns.resolver.query(host, lifetime=timeout)
ipv4 = answers[0].address
return ipv4
def default_gateway(timeout=10):
"""
Get the IPv4 default gateway
:param timeout: The timeout in seconds
:return: The IPv4 address
"""
out, _, _ = external_command(args=['ip', '-j', '-4', 'route'], timeout=timeout)
for record in json.loads(out):
if record['dst'] == 'default':
return record['gateway']
continue
raise Exception('No default route')
def dns_service_discovery(service_type, timeout=10):
"""
Use DNS-SD to get records
:param service_type: The DNS-SD type
:param timeout: The timeout in seconds
:return: [dict(name, address)] The records
"""
out, _, _ = external_command(
args=['avahi-browse', '-d', 'local', '-r', '-t', '-p', '-k', service_type],
timeout=timeout)
list_with_duplicates = []
for record in out.splitlines():
record = record.split(';')
if record[0] != '=':
continue
list_with_duplicates += [dict(
name=record[3],
address=record[7]
)]
ret = [dict(t) for t in {tuple(d.items()) for d in list_with_duplicates}] # Remove duplicates
return ret
def discover_printers(timeout=10):
"""
Use DNS-SD to discovery the printers
:param timeout: The timeout in seconds
:return: [dict(name, address)] The records
"""
return dns_service_discovery('_pdl-datastream._tcp', timeout=timeout)
class StatusCheck:
"""
An interface for checking the equipment status
"""
def do_to_value(self, value_string):
"""
Runs the status check and returns the result string.
:value_string: The string to convert to teh value
:return: The result of the evaluation criteria (None, True, False)
"""
return None
def do_check(self):
"""
Runs the status check and returns the result string.
:return: The result string (CSV)
"""
return ""
def check(self):
"""
Runs the status check and returns the result string.
:return: Tuple where [0] The result string (CSV) and [1] is an array with the result with evaluation criteria
applied (None, True, False))
"""
value = self.do_check()
return value, [self.do_to_value(value)]
def column_names(self):
"""
Gets the column names
:return: The column names
"""
return ""
def num_columns(self):
"""
Gets the number of columns
:return: The number of columns
"""
return 1
class FailEveryNTimes(StatusCheck):
"""
Test class that fails every N times
"""
PASS = "Pass"
FAIL = "Fail"
def __init__(self, n):
self.n = n
self.i = n
def do_to_value(self, value_string):
return value_string != self.FAIL
def do_check(self):
self.i = self.i - 1
if self.i == 0:
self.i = self.n
return self.FAIL
return self.PASS
def column_names(self):
return 'fail every {} times'.format(self.n)
class PingCheckBase(StatusCheck):
"""
Base class for ping checks
"""
NO_PING = "-1"
def __init__(self, max_ping, timeout):
self.max_ping = max_ping
self.timeout = timeout
def do_to_value(self, value_string):
if value_string == self.NO_PING:
return False
return float(value_string) <= self.max_ping
class PingPrinterCheck(PingCheckBase):
"""
Performs the printer status check
"""
def __init__(self, max_ping, timeout):
PingCheckBase.__init__(self, max_ping, timeout)
def do_to_value(self, value_string):
raise NotImplementedError
def do_check(self):
try:
printers = discover_printers(timeout=self.timeout)
printer_addresses = ' '.join([p['address'] for p in printers])
if len(printers) == 0:
raise Exception('No printer found.')
if len(printers) > 1:
logger.error('Multiple printers found, cannot resolve to a unique printer. '
'The following printers were found: {}.'.format(str(printers)))
return '{},{}'.format(self.NO_PING, printer_addresses)
printer = printers[0]
return '{},{}'.format(ping(printer['address']), printer_addresses)
except Exception as e:
logger.error("Failed to ping the printer. {}".format(e))
return self.NO_PING + ","
def check(self):
value = self.do_check()
values = value.split(",")
return value, [PingCheckBase.do_to_value(self, values[0]), None]
def column_names(self):
return "ping time for the printer (ms),printer address"
def num_columns(self):
return 2
class PingCheck(PingCheckBase):
"""
Pings a host
"""
def __init__(self, host, max_ping, timeout):
PingCheckBase.__init__(self, max_ping, timeout)
self.host = host
def do_check(self):
try:
return str(ping(self.host, timeout=self.timeout))
except Exception as e:
logger.error("Failed to ping {}. {}".format(self.host, e))
return "-1"
def column_names(self):
return "ping time for {} (ms)".format(self.host)
class PingDefaultGatewayCheck(PingCheckBase):
"""
Pings the default gateway
"""
def __init__(self, max_ping, timeout):
PingCheckBase.__init__(self, max_ping, timeout)
def do_check(self):
try:
default_gw = default_gateway(self.timeout)
return str(ping(default_gw, timeout=self.timeout))
except Exception as e:
logger.error("Failed to ping the default gateway. {}".format(e))
return "-1"
def column_names(self):
return "ping time for the default gateway (ms)"
class DnsCheck(StatusCheck):
"""
Checks DNS resolution for a host
"""
def __init__(self, host, timeout):
self.host = host
self.timeout = timeout
def do_to_value(self, value_string):
bool(value_string)
def do_check(self):
try:
dns_resolve(self.host, timeout=self.timeout)
return "True"
except Exception as e:
logger.error("Failed to resolve DNS for {}. {}".format(self.host, e))
return "False"
def column_names(self):
return "DNS resolution check for {}".format(self.host)
class SpeedCheck(StatusCheck):
"""
Checks network speed
"""
def __init__(self, min_down, min_up, max_ping, timeout):
self.min_down = min_down
self.min_up = min_up
self.max_ping = max_ping
self.timeout = timeout
def do_to_value(self, value_string):
raise NotImplementedError
def do_check(self):
raise NotImplementedError
def check(self):
try:
speed_tester = speedtest.Speedtest(timeout=self.timeout)
server = speed_tester.get_best_server()
down = speed_tester.download()
up = speed_tester.upload()
name = server['name'].replace(',', '')
latency = server['latency']
results = [
down >= self.min_down,
up >= self.min_up,
latency <= self.max_ping,
None
]
return "{},{},{},{}".format(round(down), round(up), latency, name), results
except Exception as e:
logger.error("Failed to run the speed test. {}".format(e))
return "-1,-1,-1,N/A", [False, False, False, None]
def column_names(self):
return "download speed (bps),upload speed (bps),speed test latency (ms),speed test server name"
def num_columns(self):
return 4
def combine_checks(array_of_tuples):
"""
Combines multiple checks into a single check
:param array_of_tuples: An array of tuples where tuple element 0 is the string result and tuple element 1 is the
array of the evaluation criteria (True, False, or None)
:return: Tuple where [0] The result string (CSV) and [1] is an array with the result with evaluation criteria
applied (None, True, False))
"""
result_strings = [r[0] for r in array_of_tuples]
result_arrays = [r[1] for r in array_of_tuples]
result_string = ','.join(result_strings)
result_array = [element for inner_array in result_arrays for element in inner_array]
return result_string, result_array
class MultipleChecks(StatusCheck):
"""
Runs several checks
"""
def __init__(self, checks):
self.checks = checks
def do_to_value(self, value_string):
raise NotImplementedError
def do_check(self):
raise NotImplementedError
def check(self):
return combine_checks([check.check() for check in self.checks])
def column_names(self):
return ','.join([check.column_names() for check in self.checks])
def num_columns(self):
return sum([check.num_columns() for check in self.checks])
def combine_results(lhs, rhs):
if lhs is None:
return rhs
if rhs is None:
return lhs
return lhs and rhs
class NormalAndExtendedChecks:
"""
Runs some checks all the time and other are only run when requested.
"""
def __init__(self, normal_checks, extended_checks, test_observers):
self.normal_checks = normal_checks
self.extended_checks = extended_checks
self.test_observers= test_observers
def normal_check(self):
"""
Run the normal checks
:return: the result string
"""
skip_extended = (
','.join([""] * self.extended_checks.num_columns())
)
for observer in self.test_observers:
observer.notify_test_started(TestObserver.TestType.NORMAL)
results = self.normal_checks.check()
normal_tests_result = functools.reduce(combine_results, results[1], None)
for observer in self.test_observers:
observer.notify_test_completed(TestObserver.TestType.NORMAL, normal_tests_result)
return results[0] + "," + skip_extended
def extended_check(self):
"""
Run the normal checks and extended checks
:return: the result string
"""
normal_results = self.normal_checks.check()
for observer in self.test_observers:
observer.notify_test_started(TestObserver.TestType.NORMAL)
observer.notify_test_started(TestObserver.TestType.EXTENDED)
normal_tests_result = functools.reduce(combine_results, normal_results[1], None)
extended_results = self.extended_checks.check()
extended_tests_result = functools.reduce(combine_results, extended_results[1], None)
for observer in self.test_observers:
observer.notify_test_completed(TestObserver.TestType.NORMAL, normal_tests_result)
observer.notify_test_completed(TestObserver.TestType.EXTENDED, extended_tests_result)
return normal_results[0] + "," + extended_results[0]
def column_names(self):
normal_column_names = self.normal_checks.column_names()
extended_column_names = self.extended_checks.column_names()
return normal_column_names + "," + extended_column_names
def num_columns(self):
return self.normal_checks.num_columns() + self.extended_checks.num_columns()
| {"/network-status-main.py": ["/hardware.py", "/networkstatus.py"], "/hardware.py": ["/networkstatus.py"]} |
74,480 | chrispsk/Django-channels-celery-in-background-no-broadcast-2-channels | refs/heads/main | /setari/consumers.py | from channels.generic.websocket import AsyncWebsocketConsumer
import json
from random import randint
from asyncio import sleep
from channels.exceptions import StopConsumer
from jokes.tasks import get_joke1, get_joke2
class WSConsumer(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
ok = get_joke1.delay().get()
await self.send(json.dumps({'message': ok}))
print("Client is connected!")
async def disconnect(self, close_code):
print('disconnected! ', close_code)
raise StopConsumer()
class WSConsumer2(AsyncWebsocketConsumer):
async def connect(self):
await self.accept()
ok = get_joke2.delay().get()
await self.send(json.dumps({'message': ok}))
print("Alt Client is connected!")
async def disconnect(self, close_code):
print('disconnected! ', close_code)
raise StopConsumer()
| {"/setari/consumers.py": ["/jokes/tasks.py"]} |
74,481 | chrispsk/Django-channels-celery-in-background-no-broadcast-2-channels | refs/heads/main | /jokes/tasks.py | from __future__ import absolute_import, unicode_literals
import random
from celery.decorators import task
import requests
import time
@task(name="get_the_joke1")
def get_joke1():
url = 'http://api.icndb.com/jokes/random/'
response = requests.get(url).json()
joke = response['value']['joke']
return joke
@task(name="get_the_joke2")
def get_joke2():
time.sleep(4)
ab = "Message from another"
return ab
| {"/setari/consumers.py": ["/jokes/tasks.py"]} |
74,482 | chrispsk/Django-channels-celery-in-background-no-broadcast-2-channels | refs/heads/main | /jokes/urls.py | from django.urls import path, include
from .views import index, index2
urlpatterns = [
path('', index, name="home"),
path('alt/', index2, name="home2"),
]
| {"/setari/consumers.py": ["/jokes/tasks.py"]} |
74,490 | billoh28/CA4010-Data-Mining | refs/heads/main | /bayes.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from ufc_model import sanitation
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sn
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
#Taking args off command line
# First argument for level of snaitition
import sys
args = sys.argv[1:]
if int(args[1]) == 1:
fight_dataset = sanitation(int(args[0]), True)
else:
fight_dataset = sanitation(int(args[0]))
le = LabelEncoder()
fight_dataset = fight_dataset.apply(le.fit_transform)
X = fight_dataset.iloc[:, :-1].values
y = fight_dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = X[int(len(X) * .2):] , X[:int(len(X) * .2)], y[int(len(y) * .2):] , y[:int(len(y) * .2)]
classifier = GaussianNB()
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
df_cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
plt.figure(figsize = (10,7))
graph = sn.heatmap(df_cm, annot=True)
graph.set(xlabel='Predicted Label', ylabel='True Label')
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,491 | billoh28/CA4010-Data-Mining | refs/heads/main | /degree_1.py | import os, math
import numpy as np
import pandas as pd
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def multiple_of_five_conv(x):
weight = int(x.split()[0])
suffix = x.split()[1:]
# Convert to multiple of five
rm = weight % 5
if rm > 2:
# Round down
return " ".join([str(weight - rm)] + suffix)
elif rm == 0:
return x
else:
# Round up
return " ".join([str(weight + (10 - rm))] + suffix)
def get_new_weight(row, average_w2h_dict):
try:
return str(average_w2h_dict[row["Weight"]])
except:
return row["Height"]
def sanitation_degree_1():
# degree 0 : No sanitation
# degree 1 : Slight sanitation
# degree 2 : Complete sanitation
PATH = os.getcwd()
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
#for some reason this file is separated by a semicolon
# Middle severity sanitation
# Average all the data we can
# Such as stance, height and reach
# fighter_detail.dropna(axis=0, how='any', inplace=True)
# Do this before the merge --> not taking out fighters, just udating their values
# Stance - Find the mode of all stances in raw_fighter_details.csv
# print(fighter_detail["Stance"].value_counts(dropna=False)) # --> Orthodox is the most common
stance_mode = fighter_detail["Stance"].mode()
# Now must set every NaN to the mode
fighter_detail["Stance"] = fighter_detail["Stance"].apply(lambda x: x if type(x) == str else stance_mode[0])
fighter_detail["Weight"] = fighter_detail["Weight"].apply(lambda x: int(x.split()[0]) if type(x) == str else x)
fighter_detail["Reach"] = fighter_detail["Reach"].apply(lambda x: int(x.split('"')[0]) if type(x) == str else x)
# print(fighter_detail["Stance"].value_counts(dropna=False))
# Now move on to height
# Must deal with weight first --> 75 NaN weights
# Can deal with ones which have weight or height, but not neither
# Need to get the average height for every five pounds of weight, and get average weight for every height
#print(fighter_detail["Weight"].value_counts(sort=True, dropna=False))
#print(fighter_detail["Height"].value_counts(sort=True, dropna=False))
# Must convert heights to inches
fighter_detail["Height"] = fighter_detail["Height"].apply(lambda x: (int(x.strip('"').split("' ")[0]) * 12) + int(x.strip('"').split("' ")[1]) if type(x) == str else x)
temp = fighter_detail.iloc[:,1:3] # Copy weight and height colums
# Make every weight a multiple of five
temp["Weight"] = temp["Weight"].apply(lambda x: multiple_of_five_conv(x) if type(x) == str else x)
# Now add in missing weights and heights
# If a weight is missing, find the coresponding height and vice versa
#temp = temp.apply(lambda row: weight_from_height(row, temp) if type(row["Weight"]) == float else x)
# Gonna build a weight to height dictionary
weight_to_height = {}
for index, row in temp.iterrows():
#print(row["Weight"], row["Height"])
# Need to check if either are NaNs
if pd.isna(row["Height"]) or pd.isna(row["Weight"]):
continue
else:
weight = int(row["Weight"])
height = int(row["Height"])
if weight < 265:
if weight not in weight_to_height:
weight_to_height[weight] = [height]
else:
(weight_to_height[weight]).append(height)
# Now must go through each item in the dictionary and get the average height
# Make a new dictionary as this one might be useful
average_w2h_dict = {}
for key, value_lst in weight_to_height.items():
# Get average of value_lst
avg = sum(value_lst)//len(value_lst)
# Add to dict
average_w2h_dict[key] = avg
average_w2h_dict[130] = 66 # 125 --> 65, 135 --> 67
# print(sorted(average_w2h_dict.keys()))
# fighter_detail["Height"] = fighter_detail.apply(lambda row: row["Height"] if type(row["Height"]) == str else get_new_weight(row, average_w2h_dict))
#print(fighter_detail["Height"].value_counts(sort=True, dropna=False))
# Have all possible heights. Now must fill in missing weights
# Can use weight_to_height dict to do this
average_h2w_dict = {}
for key, value in average_w2h_dict.items():
average_h2w_dict[value] = key
#print(average_h2w_dict.keys())
for index, row in fighter_detail.iterrows():
if pd.isna(row["Height"]) and pd.isna(row["Weight"]):
continue
elif pd.isna(row["Height"]):
try:
# Change weight to int and make it a multiple of five
weight = int(row["Weight"])
# Change height from nan to an average
fighter_detail.at[index, "Height"] = int(average_w2h_dict[weight])
except KeyError: # Over 265 lbs
fighter_detail.at[index, "Height"] = 80
elif pd.isna(row["Weight"]):
try:
# Change height to inches (from "5' 4""")
height = int(row["Height"])
# Change height from nan to an average
fighter_detail.at[index, "Weight"] = int(average_h2w_dict[height])
except KeyError: # Over or equal 265 lbs
fighter_detail.at[index, "Weight"] = 265
#print(fighter_detail["Height"].value_counts(sort=True, dropna=False))
#print(fighter_detail["Weight"].value_counts(sort=True, dropna=False))
# Finally --> Reach
# Reach - "71"""
height_to_reach = {}
for index, row in fighter_detail.iterrows():
#print(row["Weight"], row["Height"])
# Need to check if either are NaNs
if pd.isna(row["Height"]) or pd.isna(row["Weight"]):
continue
elif not pd.isna(row["Reach"]):
height = int(row["Height"])
reach = int(row["Reach"])
if height not in height_to_reach:
height_to_reach[height] = [reach]
else:
height_to_reach[height].append(reach)
for key, item in height_to_reach.items():
height_to_reach[key] = sum(item)//len(item)
# From this we seen that on average reach = height + 1, in inches
for index, row in fighter_detail.iterrows():
if pd.isna(row["Height"]) and pd.isna(row["Weight"]):
continue
elif pd.isna(row["Reach"]):
if int(row["Height"]) in height_to_reach:
fighter_detail.at[index, "Reach"] = height_to_reach[int(row["Height"])]
else:
fighter_detail.at[index, "Reach"] = int(row["Height"]) + 1
#print(fighter_detail.iloc[209:213,])
return fighter_detail
sanitation_degree_1() | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,492 | billoh28/CA4010-Data-Mining | refs/heads/main | /DCT.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics, tree
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from ufc_model import sanitation
from sklearn.metrics import classification_report, confusion_matrix
# Taking args off command line
# First argument for level of snaitition
# Second for which classifier
import sys
args = sys.argv[1:]
if int(args[1]) == 1:
fight_dataset = sanitation(int(args[0]), True)
else:
fight_dataset = sanitation(int(args[0]))
col = fight_dataset.columns # Save colums
le = LabelEncoder()
fight_dataset = fight_dataset.apply(le.fit_transform)
X = fight_dataset.iloc[:, :-1].values
y = fight_dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = X[int(len(X) * .2):] , X[:int(len(X) * .2)], y[int(len(y) * .2):] , y[:int(len(y) * .2)]
#print(X_test[0])
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Random state
classifier = DecisionTreeClassifier(random_state=21, max_depth = 5)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
df_cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
plt.figure(figsize = (10,7))
graph = sn.heatmap(df_cm, annot=True)
graph.set(xlabel='Predicted Label', ylabel='True Label')
plt.show()
# Print what every index of X is in order to see what the tree is splitting on
for i in range(len(col)):
print("X[{:}] is: {:}".format(i, col[i]))
# # Plot tree
fig, ax = plt.subplots(figsize=(7,7))
plot_tree(classifier, max_depth=4, fontsize=6, filled=True)
plt.show() | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,493 | billoh28/CA4010-Data-Mining | refs/heads/main | /fighter_details_subplot.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ufc_model import sanitation
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
fight_data = sanitation(2) # for age
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2)
age = pd.concat([fight_data['RED_Age'], fight_data['BLUE_Age']], ignore_index=True)
stance = pd.concat([fighter_detail['Stance']], ignore_index=True)
stance = stance.dropna(axis=0, how='any')
print(stance.value_counts())
print(stance.mode())
reach = fighter_detail["Reach"].apply(lambda x: int(x.split('"')[0]) if type(x) == str else x)
reach = reach.dropna(axis=0, how='any')
height = fighter_detail["Height"].apply(lambda x: (int(x.strip('"').split("' ")[0]) * 12) + int(x.strip('"').split("' ")[1]) if type(x) == str else x)
height = height.dropna(axis=0, how='any')
weight = fighter_detail["Weight"].apply(lambda x: int(x.split()[0]) if type(x) == str else x)
weight = weight.dropna(axis=0, how='any')
#print(age.mean())
#print(age.mode())
#print(age.median())
print(weight.value_counts())
# Plot Age
ax1.hist(age)
ax1.set_xlabel("Index")
ax1.set_ylabel("Age")
ax1.set_xticks(range(18, 50, 2))
# Plot Reach
ax2.hist(reach)
ax2.set_xlabel("Index")
ax2.set_ylabel("Reach (Inches)")
ax2.set_xticks(range(50, 85, 2))
# Plot Height
ax3.hist(height)
ax3.set_xlabel("Index")
ax3.set_ylabel("Height (Inches)")
ax3.set_xticks(range(50, 85, 2))
# Plot Weight
ax4.hist(weight.sort_values(ascending=True))
ax4.set_xlabel("Index")
ax4.set_ylabel("Weight (lbs)")
ax4.set_xticks(range(100, 400, 10))
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,494 | billoh28/CA4010-Data-Mining | refs/heads/main | /ufc_model.py | import os
import numpy as np
import pandas as pd
from degree_1 import sanitation_degree_1
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
def extract_winner(row):
# Extract if red or blue won
red = row["R_fighter"].lower()
blue = row["B_fighter"].lower()
winner = row["Winner"].lower()
# No draws in dataset
if winner == blue:
return 1
return 0
def sanitation(degree=0, is_prior=False):
PATH = os.getcwd()
if degree == 1:
fighter_detail = sanitation_degree_1()
else:
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
#for some reason this file is separated by a semicolon
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_total_fight_data.csv"), sep=';')
#Separate the following colunms into two separate coloumns
#The Original coloumns data looked like:
#13 of 78
attemp = '_att'
landed = '_landed'
columns = ['R_SIG_STR.', 'B_SIG_STR.', 'R_TOTAL_STR.', 'B_TOTAL_STR.',
'R_TD', 'B_TD', 'R_HEAD', 'B_HEAD', 'R_BODY','B_BODY', 'R_LEG', 'B_LEG',
'R_DISTANCE', 'B_DISTANCE', 'R_CLINCH','B_CLINCH', 'R_GROUND', 'B_GROUND']
for column in columns:
fight_data[column+attemp] = fight_data[column].apply(lambda X: int(X.split('of')[1]))
fight_data[column+landed] = fight_data[column].apply(lambda X: int(X.split('of')[0]))
#Delete the orginal columns with ofs
fight_data.drop(columns, axis=1, inplace=True)
#Coloumns have percentage, we want fractions
pct_columns = ['R_SIG_STR_pct','B_SIG_STR_pct', 'R_TD_pct', 'B_TD_pct']
for column in pct_columns:
fight_data[column] = fight_data[column].apply(lambda X: float(X.replace('%', ''))/100)
# Add fighter details to dataset
fight_data = fight_data.merge(fighter_detail, left_on='R_fighter', right_on='fighter_name', how='left')
fight_data.drop('fighter_name', axis=1, inplace=True)
fight_data.rename(columns={'Height':'RED_Height', 'Weight':'RED_Weight', 'Reach':'RED_Reach', 'Stance':'RED_Stance', 'DOB':'RED_DOB'}, inplace=True)
fight_data = fight_data.merge(fighter_detail, left_on='B_fighter', right_on='fighter_name', how='left')
fight_data.drop('fighter_name', axis=1, inplace=True)
fight_data.rename(columns={'Height':'BLUE_Height', 'Weight':'BLUE_Weight', 'Reach':'BLUE_Reach', 'Stance':'BLUE_Stance', 'DOB':'BLUE_DOB'}, inplace=True)
fight_data.drop("Referee", axis=1, inplace=True) # Drop referee first as it is not needed
if degree in [1, 2]: fight_data = fight_data.dropna(axis=0, how='any')
#print(list(fight_data["date"]))
#print(list(fight_data["RED_DOB"])[0].split()[-1])
def add_age(row, is_blue):
try:
if is_blue:
return int(row["date"].split()[-1]) - int(row["BLUE_DOB"].split()[-1])
return int(row["date"].split()[-1]) - int(row["RED_DOB"].split()[-1])
except AttributeError:
return 0
# Adding age columns to dataset
fight_data["RED_Age"] = fight_data.apply(lambda row: add_age(row, False), axis=1)
fight_data["BLUE_Age"] = fight_data.apply(lambda row: add_age(row, True), axis=1)
# Dont't need following rows for classification
fight_data.drop(['date', 'RED_DOB', 'BLUE_DOB', 'location'], axis=1, inplace=True)
if degree != 1:
#Within the height coloumn there are two float types
fight_data["RED_Height"] = fight_data["RED_Height"].apply(lambda x: (int(x.split()[0].split("'")[0])*12 + int(x.split()[1].split('"')[0])) if type(x) == str else 0)
fight_data["BLUE_Height"] = fight_data["BLUE_Height"].apply(lambda x: (int(x.split()[0].split("'")[0])*12 + int(x.split()[1].split('"')[0])) if type(x) == str else 0)
# Remove lbs from weight
fight_data["RED_Weight"] = fight_data["RED_Weight"].apply(lambda x: int(x.split()[0]) if type(x) == str else 0)
fight_data["BLUE_Weight"] = fight_data["BLUE_Weight"].apply(lambda x: int(x.split()[0]) if type(x) == str else 0)
# Remove " from reach
fight_data["RED_Reach"] = fight_data["RED_Reach"].apply(lambda x: int(x.split('"')[0]) if type(x) == str else 0)
fight_data["BLUE_Reach"] = fight_data["BLUE_Reach"].apply(lambda x: int(x.split('"')[0]) if type(x) == str else 0)
# Changing format column to be no. minutes in fight
#fight_data["Fight_Duration"] = fight_data["Format"].apply(lambda x: int(x.split()[0]) * int(x.split()[-1].split('-')[-1].strip(')').strip('(')) if x.split()[0].isdigit() else 0)
fight_data["Round_length_mins"] = fight_data["Format"].apply(lambda x: int(x.split()[-1].split('-')[-1].strip(')').strip('(')) if x.split()[0].isdigit() else 0)
fight_data["Duration"] = fight_data["Format"].apply(lambda x: int(x.split()[0]) * int(x.split()[-1].split('-')[-1].strip(')').strip('(')) if x.split()[0].isdigit() else 0)
fight_data.drop('Format', axis=1, inplace=True)
# Changing last_round_time format to be in seconds
fight_data["last_round_time"] = fight_data["last_round_time"].apply(lambda x: (int(x.split(':')[0])*60) + int(x.split(':')[1]) if type(x) == str else 0)
# Change duration to be last round time + previous rounds
fight_data["Total_Time"] = fight_data.apply(lambda row: int(row["last_round_time"]) + ((int(row["last_round"])-1) * int(row["Round_length_mins"]) * 60) if type(row["last_round"]) == int else 0, axis=1)
# Now drop rest
fight_data.drop('Round_length_mins', axis=1, inplace=True)
#print(type(fight_data.iloc[0]['R_SIG_STR_pct']))
#print(fight_data.iloc[999])
#print(len(fight_data[fight_data['RED_Age'] == 0]))
#print(fight_data.iloc[0])
# print(fight_data["BLUE_Age"].value_counts())
# Change winner to binary value
fight_data["Winner"] = fight_data.apply(lambda row: (extract_winner(row)), axis=1) # extract if winner is red or blue
# Add winner to end of dataset
location = fight_data.columns.get_loc("Winner")
cols = fight_data.columns.tolist()
cols = cols[:location] + cols[location+1:] + [cols[location]]
fight_data = fight_data[cols]
#fight_data.drop("Winner", axis=1, inplace=True)
# After consideration we decided to split the dataset in two
# We split on whether the model is predicting prior to the fight or after the fight
if not is_prior:
# Don't need half the things we have in as we onlt care about what happened in the fight
# We can also convert the fighters names in red or blue (0, 1)
fight_data.drop("last_round", axis=1, inplace=True)
fight_data.drop("last_round_time", axis=1, inplace=True)
fight_data.drop("Fight_type", axis=1, inplace=True)
fight_data.drop("win_by", axis=1, inplace=True)
fight_data.drop("R_fighter", axis=1, inplace=True)
fight_data.drop("B_fighter", axis=1, inplace=True)
# After removing these columns the accuracy did not change, apart from a percent reduction with win_by for obvious reasons, meaning the models were not using these attributes
#print(fight_data.iloc[0])
else:
# prior prediction
# Want a dataset with all the fighters stastistics, red and blue, and who won the fight
# drop all details of the fight and keep evrything which was there before the fight.
# Still predicting the winner just without the insight of knowing how the fight went.
# In theory, this will make a model which could predict the outcome of two fighters given their statistics and not the outcome.
# Convert fighter to red and blue
#fight_data["R_fighter"] = fight_data["R_fighter"].apply(lambda x: 0)
#fight_data["B_fighter"] = fight_data["B_fighter"].apply(lambda x: 1)
fight_data.drop("R_fighter", axis=1, inplace=True)
fight_data.drop("B_fighter", axis=1, inplace=True)
# Drop irrelevant columns i.e data from the fight
# Drop Red & Blue fight data
fight_data.drop("R_KD", axis=1, inplace=True)
fight_data.drop("B_KD", axis=1, inplace=True)
fight_data.drop("R_SIG_STR_pct", axis=1, inplace=True)
fight_data.drop("B_SIG_STR_pct", axis=1, inplace=True)
fight_data.drop("R_TD_pct", axis=1, inplace=True)
fight_data.drop("B_TD_pct", axis=1, inplace=True)
fight_data.drop("R_SUB_ATT", axis=1, inplace=True)
fight_data.drop("B_SUB_ATT", axis=1, inplace=True)
fight_data.drop("R_PASS", axis=1, inplace=True)
fight_data.drop("B_PASS", axis=1, inplace=True)
fight_data.drop("R_REV", axis=1, inplace=True)
fight_data.drop("B_REV", axis=1, inplace=True)
fight_data.drop("R_SIG_STR._att", axis=1, inplace=True)
fight_data.drop("B_SIG_STR._att", axis=1, inplace=True)
fight_data.drop("R_SIG_STR._landed", axis=1, inplace=True)
fight_data.drop("B_SIG_STR._landed", axis=1, inplace=True)
fight_data.drop("R_TOTAL_STR._att", axis=1, inplace=True)
fight_data.drop("B_TOTAL_STR._att", axis=1, inplace=True)
fight_data.drop("R_TOTAL_STR._landed", axis=1, inplace=True)
fight_data.drop("B_TOTAL_STR._landed", axis=1, inplace=True)
fight_data.drop("R_TD_att", axis=1, inplace=True)
fight_data.drop("B_TD_att", axis=1, inplace=True)
fight_data.drop("R_TD_landed", axis=1, inplace=True)
fight_data.drop("B_TD_landed", axis=1, inplace=True)
fight_data.drop("R_HEAD_att", axis=1, inplace=True)
fight_data.drop("B_HEAD_att", axis=1, inplace=True)
fight_data.drop("R_HEAD_landed", axis=1, inplace=True)
fight_data.drop("B_HEAD_landed", axis=1, inplace=True)
fight_data.drop("R_BODY_att", axis=1, inplace=True)
fight_data.drop("B_BODY_att", axis=1, inplace=True)
fight_data.drop("R_BODY_landed", axis=1, inplace=True)
fight_data.drop("B_BODY_landed", axis=1, inplace=True)
fight_data.drop("R_DISTANCE_att", axis=1, inplace=True)
fight_data.drop("B_DISTANCE_att", axis=1, inplace=True)
fight_data.drop("R_DISTANCE_landed", axis=1, inplace=True)
fight_data.drop("B_DISTANCE_landed", axis=1, inplace=True)
fight_data.drop("R_CLINCH_att", axis=1, inplace=True)
fight_data.drop("B_CLINCH_att", axis=1, inplace=True)
fight_data.drop("R_CLINCH_landed", axis=1, inplace=True)
fight_data.drop("B_CLINCH_landed", axis=1, inplace=True)
fight_data.drop("R_GROUND_att", axis=1, inplace=True)
fight_data.drop("B_GROUND_att", axis=1, inplace=True)
fight_data.drop("R_GROUND_landed", axis=1, inplace=True)
fight_data.drop("B_GROUND_landed", axis=1, inplace=True)
fight_data.drop("R_LEG_att", axis=1, inplace=True)
fight_data.drop("B_LEG_att", axis=1, inplace=True)
fight_data.drop("R_LEG_landed", axis=1, inplace=True)
fight_data.drop("B_LEG_landed", axis=1, inplace=True)
# Drop other fight data
fight_data.drop("last_round", axis=1, inplace=True)
fight_data.drop("last_round_time", axis=1, inplace=True)
fight_data.drop("win_by", axis=1, inplace=True)
fight_data.drop("Referee", axis=1, inplace=True)
# Remove red fighters so equal number of red and blue winners
# Go through data and count number of blue winners, add to new dataset. Then add red wineers up until max is reached
print(fight_data.iloc[0])
return fight_data
sanitation(degree=1, is_prior=False) | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,495 | billoh28/CA4010-Data-Mining | refs/heads/main | /box_plot.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def extract_winner(row):
# Extract if red or blue won
red = row["R_fighter"].lower()
blue = row["B_fighter"].lower()
winner = row["Winner"].lower()
# No draws in dataset
if winner == blue:
return 1
return 0
# This program will produce a box plot conveying winner loser strike attempted and landed percentages
# Load in fight data
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_total_fight_data.csv"), sep=';')
# Remove nans
fight_data = fight_data.dropna(axis=0, how='any')
# Convert winners from their names into Red(0) or Blue(1)
fight_data["Categorical_Winner"] = fight_data.apply((lambda row: "Red" if extract_winner(row) == 0 else "Blue"), axis=1)
fight_data["Winner"] = fight_data.apply(lambda row: (extract_winner(row)), axis=1)
def get_percentage(x, y):
try:
return (x / y) * 100
except:
return 0
# Get number of landed strikes total
fight_data["R_SIG_STR_NUM"] = fight_data["R_SIG_STR."].apply(lambda x: int(x.split()[0]) if type(x) == str else x)
fight_data["B_SIG_STR_NUM"] = fight_data["B_SIG_STR."].apply(lambda x: int(x.split()[0]) if type(x) == str else x)
# Change R_SIG_STR and B_SIG_STR from x of y to a float or integer percentage
fight_data["R_SIG_STR."] = fight_data["R_SIG_STR."].apply(lambda x: get_percentage(int(x.split()[0]), int(x.split()[-1])) if type(x) == str else x)
fight_data["B_SIG_STR."] = fight_data["B_SIG_STR."].apply(lambda x: get_percentage(int(x.split()[0]), int(x.split()[-1])) if type(x) == str else x)
# Boxplot
#boxplot = fight_data.boxplot(column=["R_SIG_STR.", "B_SIG_STR."], by="Categorical_Winner")
#plt.show()
# Now try plot all winners / losers against significant strikes
fight_data["Winners_Sig_Str_Per"] = fight_data.apply(lambda row: row["R_SIG_STR."] if row["Winner"] == 0 else row["B_SIG_STR."], axis=1)
fight_data["Losers_Sig_Str_Per"] = fight_data.apply(lambda row: row["B_SIG_STR."] if row["Winner"] == 0 else row["R_SIG_STR."], axis=1)
# Knockdowns
fight_data["Winners_KD_Per"] = fight_data.apply(lambda row: get_percentage(int(row["R_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))) if row["Winner"] == 0 else get_percentage(int(row["B_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))), axis=1)
fight_data["Losers_KD_Per"] = fight_data.apply(lambda row: get_percentage(int(row["R_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))) if row["Winner"] == 1 else get_percentage(int(row["B_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))), axis=1)
# Takedowns
#fight_data["Winners_TD_Per"] = fight_data.apply(lambda row: get_percentage(int(row["R_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))) if row["Winner"] == 0 else get_percentage(int(row["B_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))), axis=1)
#fight_data["Losers_TD_Per"] = fight_data.apply(lambda row: get_percentage(int(row["R_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))) if row["Winner"] == 1 else get_percentage(int(row["B_KD"]), (int(row["R_KD"]) + int(row["B_KD"]))), axis=1)
# Get percentage of all landed strikes for both fighters
#fight_data["Winners_Per_Total_Landed_Strikes"] = fight_data.apply(lambda row: get_percentage(row["R_SIG_STR_NUM"], (row["R_SIG_STR_NUM"] + row["B_SIG_STR_NUM"])) if row["Winner"] == 0 else get_percentage(row["B_SIG_STR_NUM"], (row["R_SIG_STR_NUM"] + row["B_SIG_STR_NUM"])), axis=1)
#fight_data["Losers_Per_Total_Landed_Strikes"] = fight_data.apply(lambda row: get_percentage(row["R_SIG_STR_NUM"], (row["R_SIG_STR_NUM"] + row["B_SIG_STR_NUM"])) if row["Winner"] == 1 else get_percentage(row["B_SIG_STR_NUM"], (row["R_SIG_STR_NUM"] + row["B_SIG_STR_NUM"])), axis=1)
# Tree splits on R_GROUND_landed so display results
fight_data["Winners_Landed_Ground_Attacks"] = fight_data.apply(lambda row: int(row["R_GROUND"].split()[0]) if row["Winner"] == 0 else int(row["B_GROUND"].split()[0]), axis=1)
fight_data["Losers_Landed_Ground_Attacks"] = fight_data.apply(lambda row: int(row["R_GROUND"].split()[0]) if row["Winner"] == 1 else int(row["B_GROUND"].split()[0]), axis=1)
# Now plot this
boxplot = fight_data.boxplot(column=["Winners_Landed_Ground_Attacks", "Losers_Landed_Ground_Attacks"])
plt.show()
print(fight_data["R_GROUND"].iloc[3])
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,496 | billoh28/CA4010-Data-Mining | refs/heads/main | /weightAndHeight.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from degree_1 import sanitation_degree_1
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
fig, (ax1, ax2) = plt.subplots(2,1)
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
fighter_detail.dropna(axis=0, how='any', inplace=True)
# Prior
fighter_detail["Height"] = fighter_detail["Height"].apply(lambda x: (int(x.split()[0].split("'")[0])*12 + int(x.split()[1].split('"')[0])) if type(x) == str else 0)
fighter_detail["Weight"] = fighter_detail["Weight"].apply(lambda x: int(x.split()[0]) if type(x) == str else x)
# Post
new_fighter_detail = sanitation_degree_1()
# Remove 800 pound man
fighter_detail.plot.scatter(x='Height', y='Weight', c='b', ax=ax1, label="Before")
new_fighter_detail.plot.scatter(x='Height', y='Weight', c='r', ax=ax2, label="After")
#fighter_detail = sanitation_degree_1()
#print(fighter_detail["Height"].mean())
#print(fighter_detail["Height"].mode())
#print(fighter_detail["Height"].median())
#fighter_detail.plot.scatter(x='Height', y='Reach',c='DarkBlue',ax=ax2)
ax1.set_yticks([50, 100, 150, 200, 250, 300, 350, 400, 450])
ax2.set_yticks([50, 100, 150, 200, 250, 300, 350, 400, 450])
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,497 | billoh28/CA4010-Data-Mining | refs/heads/main | /graph_winners.py | import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def extract_winner(row):
# Extract if red or blue won
red = row["R_fighter"].lower()
blue = row["B_fighter"].lower()
winner = row["Winner"].lower()
# No draws in dataset
if winner == blue:
return 1
return 0
# This program will produce a box plot conveying winner loser strike attempted and landed percentages
# Load in fight data
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_total_fight_data.csv"), sep=';')
# Remove nans
fight_data = fight_data.dropna(axis=0, how='any')
# Convert winners from their names into Red(0) or Blue(1)
fight_data["Winner"] = fight_data.apply(lambda row: (extract_winner(row)), axis=1)
values = fight_data["Winner"].value_counts()
labels = ["Red", "Blue"]
# Now plot this
hist = sns.barplot(x=labels,y=values)
plt.title('Histogram Showing Dispersion of Winners per Corner')
plt.ylabel('Number of Winners')
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,498 | billoh28/CA4010-Data-Mining | refs/heads/main | /Code/Dataset_Handler/dataset_handler.py | # Dataset Handler
# Used to prepare the dataset for the model, split the dataset into seperate testing and training datasets and serialise the datasets
import numpy as np
import os
import cv2
from tqdm import tqdm # shows a progress bar for an iteration while it's executing
import random
from tensorflow.keras.utils import to_categorical
import sys
class DataHandler:
def __init__(self, data_path):
# Path to parent file of data
self.data_path = data_path
# Subdirectories in parent file
self.DIRECTORIES = [["bee1", "bee2"], ["wasp1", "wasp2"], ["other_insect"], ["other_noinsect"]]
# Labels
self.INT_LABELS = [0, 1, 2]
# Where training data will be saved to
self.training_set = []
# Where testing data will be saved to
self.testing_set = []
# One hot encoded labels
self.categorical_labels = to_categorical(self.INT_LABELS, num_classes=None)
def create_data(self):
for i in range(len(self.DIRECTORIES) - 1): # Not including other_noinsect
print(self.DIRECTORIES[i])
for j in range(len(self.DIRECTORIES[i])):
# Extract label and sub directory
sub_direct = self.DIRECTORIES[i][j]
label = self.categorical_labels[i]
path = os.path.join(self.data_path, sub_direct)
# Remeber to reserve 20 percent for testing / validation
counter = 1
num_train = int(len(os.listdir(path)) * 0.8)
for img in tqdm(os.listdir(path)):
# Add to training set
img_array = cv2.imread(os.path.join(path,img))
img_array = cv2.resize(img_array, (250, 250))
if counter < num_train:
self.training_set.append([img_array, label])
else:
self.testing_set.append([img_array, label])
counter += 1
def get_training_set(self):
return self.training_set[:]
def get_testing_set(self):
return self.testing_set[:]
def main():
# Change working directory to dataset directory
path = os.path.join(os.getcwd(), "..", "..", "Dataset")
os.chdir(path)
# Initialise data handler
data_handler = DataHandler(path)
# Create Data
data_handler.create_data()
# Extract data
training_set = data_handler.get_training_set()
testing_set = data_handler.get_testing_set()
# Shuffle training data
random.shuffle(training_set)
X = []
y = []
# Extract labels and features from training data set
for features, label in training_set:
X.append(features)
y.append(label)
print(X[100].shape)
# Changes X to a numpy array
# X will have the shape (?, 250, 250, 1), where ? is the number of images in the np array, (150, 150) is the image dimensions and 1 shows that these are greyscale images ( not rgb )
X = np.array(X).reshape(-1, 250, 250, 3)
y = np.array(y)
# Repeat for testing data
X_test = []
y_test = []
for features, label in testing_set:
X_test.append(features)
y_test.append(label)
X_test = np.array(X_test).reshape(-1, 250, 250, 3)
y_test = np.array(y_test)
import joblib
# Save these serialised files outside the git folders as it is too large to have within the git
save_location = os.path.join(path, "..", "..", "Pickles")
# Make sure it exists
if not os.path.isdir(save_location):
os.mkdir(save_location)
os.chdir(save_location)
# Create serialised training datasets
pickle_out = open("X.pickle","wb")
joblib.dump(X, pickle_out)
pickle_out.close()
pickle_out = open("y.pickle","wb")
joblib.dump(y, pickle_out)
pickle_out.close()
# Create serialised testing datasets
pickle_out = open("X_test.pickle","wb")
joblib.dump(X_test, pickle_out)
pickle_out.close()
pickle_out = open("y_test.pickle","wb")
joblib.dump(y_test, pickle_out)
pickle_out.close()
if __name__ == '__main__':
main()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,499 | billoh28/CA4010-Data-Mining | refs/heads/main | /ageBarGraph.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from ufc_model import sanitation
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
fight_data = sanitation(2)
sns.set()
sns.set(style="darkgrid")
age = pd.concat([fight_data['RED_Age'], fight_data['BLUE_Age']], ignore_index=True)
print(age.mean())
print(age.mode())
print(age.median())
age_values = age.value_counts()
age_labels = age_values.index
sns.barplot(x=age_labels,y=age_values)
plt.title('Histogram Showing Dispersion of Age')
plt.ylabel('Number of Fighters')
plt.xlabel('Age')
plt.show() | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,500 | billoh28/CA4010-Data-Mining | refs/heads/main | /random_forest_classifier.py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics, tree
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, plot_tree
from ufc_model import sanitation
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sn
from sklearn.ensemble import RandomForestClassifier
# Taking args off command line
# First argument for level of snaitition
# Second for which classifier
import sys
args = sys.argv[1:]
if int(args[1]) == 1:
fight_dataset = sanitation(int(args[0]), True)
else:
fight_dataset = sanitation(int(args[0]))
le = LabelEncoder()
fight_dataset = fight_dataset.apply(le.fit_transform)
print(fight_dataset.iloc[1, :-1])
X = fight_dataset.iloc[:, :-1].values
y = fight_dataset.iloc[:, -1].values
X_train, X_test, y_train, y_test = X[int(len(X) * .2):] , X[:int(len(X) * .2)], y[int(len(y) * .2):] , y[:int(len(y) * .2)]
#print(X_test[0])
scaler = StandardScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
# Random state
classifier = RandomForestClassifier(max_depth=5, random_state=0)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
df_cm = confusion_matrix(y_test, y_pred)
print(classification_report(y_test, y_pred))
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
plt.figure(figsize = (10,7))
graph = sn.heatmap(df_cm, annot=True)
graph.set(xlabel='Predicted Label', ylabel='True Label')
plt.show()
#print(classifier.apply(X[:4])) | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,501 | billoh28/CA4010-Data-Mining | refs/heads/main | /run_model.py | # Program which calls and runs the trained CNN model
import sys
import os
import numpy as np
from tensorflow.keras.models import load_model
import matplotlib.pyplot as plt
import cv2
def main():
# Current working directory
DATA_DIR = os.getcwd()
# Get images located in Images Test_Images folder
IMAGES_LOCATION = os.path.join(DATA_DIR, "Test_Images")
# Labels
LABELS = ["bee", "wasp", "insect", "non-insect"]
# Outside Git repo
CNN_model = load_model(os.path.join(DATA_DIR, "..", "Models", "model_1.h5"))
#kNN_model = load_model(os.path.join(DATA_DIR, "..", "Models", "kNN_model_1.h5"))
#DT_model = load_model(os.path.join(DATA_DIR, "..", "Models", "DT_model_1.h5")) #Decision Tree
# Possible output of the model
output = []
# Expected output
names = []
# Feed images to the models
#for model in model:
for image in os.listdir(IMAGES_LOCATION):
img_array = cv2.imread(os.path.join(IMAGES_LOCATION, image)) # convert to array
img_array = cv2.resize(img_array, (250, 250))
imgplot = plt.imshow(img_array)
#plt.show()
img_array = np.array(img_array).reshape(-1, 250, 250, 3)
prediction = CNN_model.predict(img_array, verbose=0)
print(prediction)
if np.amax(prediction) < 0.4:
print("Low confidence")
# Low confidence so assuming non insect
output.append(LABELS[-1])
else:
output.append(LABELS[prediction.argmax()])
names.append(image.split(".")[0])
print("Output from the models : {}".format(" ".join(output)))
print("Expected Output: {}".format(" ".join(names)))
if __name__ == '__main__':
main() | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,502 | billoh28/CA4010-Data-Mining | refs/heads/main | /learning_curve_graph.py | import numpy as np
import matplotlib.pyplot as plt
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.datasets import load_digits
from sklearn.model_selection import learning_curve
from sklearn.model_selection import ShuffleSplit
from sklearn.ensemble import RandomForestClassifier
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from ufc_model import sanitation
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
import sys
args = sys.argv[1:]
if int(args[1]) == 1:
fight_dataset = sanitation(int(args[0]), True)
else:
fight_dataset = sanitation(int(args[0]))
le = LabelEncoder()
fight_dataset = fight_dataset.apply(le.fit_transform)
X = fight_dataset.iloc[:, :-1].values
y = fight_dataset.iloc[:, -1].values
sizes, training_scores, testing_scores = learning_curve(RandomForestClassifier(random_state=21, max_depth=5), X, y, cv=10, scoring='accuracy', train_sizes=np.linspace(0.01, 1.0, 50))
# Mean and Standard Deviation of training scores
mean_training = np.mean(training_scores, axis=1)
Standard_Deviation_training = np.std(training_scores, axis=1)
# Mean and Standard Deviation of testing scores
mean_testing = np.mean(testing_scores, axis=1)
Standard_Deviation_testing = np.std(testing_scores, axis=1)
# dotted blue line is for training scores and green line is for cross-validation score
plt.plot(sizes, mean_training, '--', color="b", label="Training score")
plt.plot(sizes, mean_testing, color="g", label="Cross-validation score")
# Drawing plot
plt.title("LEARNING CURVE FOR Random Forest")
plt.xlabel("Training Set Size"), plt.ylabel("Accuracy Score"), plt.legend(loc="best")
plt.tight_layout()
plt.show() | {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,503 | billoh28/CA4010-Data-Mining | refs/heads/main | /heightAndReach.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from degree_1 import sanitation_degree_1
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
fig, (ax1, ax2) = plt.subplots(1,2)
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_total_fight_data.csv"), sep=';')
fighter_detail.dropna(axis=0, how='any', inplace=True)
fighter_detail["Height"] = fighter_detail["Height"].apply(lambda x: (int(x.split()[0].split("'")[0])*12 + int(x.split()[1].split('"')[0])) if type(x) == str else 0)
fighter_detail["Reach"] = fighter_detail["Reach"].apply(lambda x: int(x.split('"')[0]) if type(x) == str else 0)
fighter_detail.plot.scatter(x='Height',
y='Reach',
c='DarkBlue',
ax=ax1)
fighter_detail = sanitation_degree_1()
print(fighter_detail["Height"].mean())
print(fighter_detail["Height"].mode())
print(fighter_detail["Height"].median())
fighter_detail.plot.scatter(x='Height',
y='Reach',
c='DarkBlue',
ax=ax2)
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,504 | billoh28/CA4010-Data-Mining | refs/heads/main | /plot_nans.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load in fight data
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"), sep=',')
# Make a bar chart NaNs occurences
height = fight_data["Height"].isna().sum()
weight = fight_data["Weight"].isna().sum()
reach = fight_data["Reach"].isna().sum()
stance = fight_data["Stance"].isna().sum()
dob = fight_data["DOB"].isna().sum()
data = [reach, stance, dob, height, weight]
sns.barplot(x=["Reach", "Stance", "Date of Birth", "Height", "Weight"], y=data, palette='rocket_r')
plt.title('Barchart Showing No. of NaN Occurences in Fighter Details per Column')
plt.ylabel("No. of Occurances")
plt.tight_layout()
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,505 | billoh28/CA4010-Data-Mining | refs/heads/main | /bar_chart.py | import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# Load in fight data
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_total_fight_data.csv"), sep=';')
# Remove nans
fight_data = fight_data.dropna(axis=0, how='any')
print(fight_data["win_by"].value_counts())
# Make a bar chart of all the different decisions and their counts
# fight_data["win_by"]
data = fight_data["win_by"].value_counts()
data = data.apply(lambda x: int(x))
sns.barplot(x=data, y=data.index, palette='rocket_r')
plt.title('Barchart Showing Disparity Fight Results')
plt.xlabel("No. of Occurances")
plt.tight_layout()
plt.show()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,506 | billoh28/CA4010-Data-Mining | refs/heads/main | /get_weight_class.py | # Want get the weights of NaN valued fighters
# To do this, gonna go through the g=fights dataset and get vthe weight class they fight in
# This is gonna be difficult of womans and mens fighters
# Data.csv has either "Womman's Whatever" or for mens just the weight class name
# Need to write two dictionarys to map weight class to weight for both men and women
import os
import numpy as np
import pandas as pd
def get_weight(fighter):
# read in fight dataset
fight_data = pd.read_csv(os.path.join("UFCDataset", "Original", "data.csv"), sep=',')
# Run through this dataset and parse required info
#print(fight_data["weight_class"].value_counts(dropna=False))
weights = {"Strawweight":115, "Flyweight":125, "Bantamweight":135, "Featherweight":145, "Lightweight":155, "Welterweight":170, "Middleweight":185, "Light Heavyweight":205, "Heavyweight":265}
# Go through data and find fighter
for index, row in fight_data.iterrows():
if str(row["R_fighter"]) == fighter or str(row["B_fighter"]) == fighter:
if not pd.isna(row["weight_class"]) and (str(row["weight_class"]) in weights):
return weights[str(row["weight_class"])]
elif not pd.isna(row["weight_class"]) and (str(row["weight_class"].split()[-1]) in weights):
return weights[str(row["weight_class"].split()[-1])]
return None # If fighter not found or catch / open weight
def main():
fighter_detail = pd.read_csv(os.path.join("UFCDataset", "Original", "raw_fighter_details.csv"))
print(fighter_detail["Weight"].value_counts())
print(fighter_detail["Weight"].isna().sum())
fighter_detail["Weight"] = fighter_detail.apply(lambda row: int(row["Weight"].split()[0]) if type(row["Weight"]) == str else get_weight(row["fighter_name"]), axis=1)
print(fighter_detail["Weight"].value_counts())
print(fighter_detail["Weight"].isna().sum())
if __name__ == '__main__':
main()
| {"/bayes.py": ["/ufc_model.py"], "/DCT.py": ["/ufc_model.py"], "/fighter_details_subplot.py": ["/ufc_model.py"], "/ufc_model.py": ["/degree_1.py"], "/weightAndHeight.py": ["/degree_1.py"], "/ageBarGraph.py": ["/ufc_model.py"], "/random_forest_classifier.py": ["/ufc_model.py"], "/learning_curve_graph.py": ["/ufc_model.py"], "/heightAndReach.py": ["/degree_1.py"]} |
74,510 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/utils/views.py | from ..services.database import Database
from ..services.event_store import EventStore
from ..services.locker import Locker
from .types import ServicesConfig
class ViewSet:
"""
Basic viewset class for all apps.
During initialization we bind the viewpoint and services to the instance.
"""
def __init__(self, viewpoint: str, services: ServicesConfig) -> None:
self.viewpoint = viewpoint
self.database = Database(services["database"])
self.event_store = EventStore(services["event_store"])
self.locker = Locker(services["locker"])
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,511 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/topics/views.py | from typing import Callable, Iterable
import simplejson as json
from werkzeug.exceptions import BadRequest
from werkzeug.routing import Map
from werkzeug.wrappers import Request, Response
from ..utils.routing import Rule
from ..utils.types import ServicesConfig
from ..utils.views import ViewSet
from .schema import is_valid_new_topic, is_valid_update_topic
class TopicViewSet(ViewSet):
"""
Viewset for topics.
"""
def dispatch(self, request: Request, **kwargs: dict) -> Response:
"""
Dispatches request to the viewpoint.
"""
if not request.is_json:
raise BadRequest(
"Wrong media type. Use 'Content-Type: application/json' instead."
)
return getattr(self, self.viewpoint)(request, **kwargs)
def new(self, request: Request, **kwargs: dict) -> Response:
"""
Viewpoint to create new topics.
"""
# Parse event id.
event_id = kwargs["event"]
# Check permissions.
# TODO
# Check existence of event in database.
# It someone removes it right this moment, this is no problem.
# event = self.database.get(f"event:{event_id}:exists")
# if event is None:
# raise BadRequest(f"Event with id {event_id} does not exist.")
# Validate payload.
payload = request.json
is_valid_new_topic(payload)
result = []
# Set lock to prepare data for event store.
with self.locker.acquire(f"{event_id}.topics.new"):
# Get highest existing id.
topic_id = self.event_store.get_highest_id("topic")
data = {}
# import time
# time.sleep(25)
# Parse topics.
for topic in payload:
topic_id += 1
data.update(
{
f"topic:{topic_id}:exists": True,
f"topic:{topic_id}:title": topic["title"],
f"topic:{topic_id}:event": event_id,
f"topic:{topic_id}:text": topic.get("text", ""),
f"topic:{topic_id}:attachments": topic.get("attachments", []),
}
)
result.append(topic_id)
# Save topics.
self.event_store.save(data)
# Send topics to stream and create response.
self.event_store.send(data)
return Response(json.dumps(result), status=201, content_type="application/json")
def update(self, request: Request, **kwargs: dict) -> Response:
"""
Viewpoint to update existing topics.
"""
# TODO: Check permissions.
data = request.json
is_valid_update_topic(data)
result = {"updated": 0, "error": 0}
# for topic in data:
# id = topic.pop("id")
# rev = topic.pop("rev")
# url = "/".join((self.database_url, id))
# headers = self.database_headers
# headers["If-Match"] = rev
# response = requests.put(url, data=json.dumps(topic), headers=headers)
# if response.ok:
# result["updated"] += 1
# else:
# result["error"] += 1
return Response(json.dumps(result), status=200, content_type="application/json")
def delete(self, request: Request, **kwargs: dict) -> Response:
"""
Viewpoint to delete existing topics.
"""
return Response("Hello")
def get_get_rules_func(services: ServicesConfig) -> Callable[[Map], Iterable[Rule]]:
"""
Contructor for Werkzeug's get_rules method.
"""
def get_rules(map: Map) -> Iterable[Rule]:
"""
Rules for this app.
"""
return [
Rule(
"/<int:event>/topics/new",
endpoint="TopicViewSet new",
methods=("POST",),
view=TopicViewSet("new", services=services),
),
Rule(
"/<int:event>/topics/update",
endpoint="TopicViewSet update",
methods=("POST",),
view=TopicViewSet("update", services=services),
),
Rule(
"/<int:event>/topics/delete",
endpoint="TopicViewSet delete",
methods=("POST",),
view=TopicViewSet("delete", services=services),
),
]
return get_rules
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,512 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/utils/routing.py | from typing import Any, Iterable
from werkzeug.routing import Map
from werkzeug.routing import Rule as WerkzeugRule
from werkzeug.routing import RuleFactory as WerkzeugRuleFactory
class Rule(WerkzeugRule):
"""
Customized Rule to bind view function to the rule.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
self.view = kwargs.pop("view")
super().__init__(*args, **kwargs)
class RuleFactory(WerkzeugRuleFactory):
"""
Customized RuleFactory to bind get_rules function to the factory.
During initialization we bind the get_rules method from apps's views.
"""
def get_rules(self, map: Map) -> Iterable[Rule]:
"""
Use get_rules function from our app.
"""
if not hasattr(self, "get_rules_func"):
raise NotImplementedError
return self.get_rules_func(map) # type: ignore[attr-defined] # noqa E821
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,513 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/utils/wrappers.py | from werkzeug.wrappers import Request as WerkzeugRequest
from werkzeug.wrappers.json import JSONMixin # type: ignore
class Request(JSONMixin, WerkzeugRequest):
"""
Customized Request to use the JSONMixin.
"""
pass
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,514 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/topics/schema.py | import fastjsonschema # type: ignore
from ..utils.schema import schema_version
is_valid_new_topic = fastjsonschema.compile(
{
"$schema": schema_version,
"title": "New topics schema",
"description": "An array of new topics.",
"type": "array",
"items": {
"type": "object",
"properties": {
"title": {
"description": "A string. The title or headline of the topic.",
"type": "string",
"minLength": 1,
},
"text": {
"description": "A string containing HTML formatted text.",
"type": "string",
},
"attachments": {
"description": "An array of attachment ids that should be referenced with this topic.",
"type": "array",
"items": {"type": "integer"},
"uniqueItems": True,
},
},
"required": ["title"],
},
"minItems": 1,
"uniqueItems": True,
}
)
is_valid_update_topic = fastjsonschema.compile(
{
"$schema": schema_version,
"title": "Update topics schema",
"description": "An array of topics to be updated.",
"type": "array",
"items": {
"type": "object",
"properties": {
"id": {
"description": "A string. The id of the topic.",
"type": "string",
# TODO: Add id validation.
},
"title": {
"description": "A string. The title or headline of the topic.",
"type": "string",
"minLength": 1,
},
"text": {
"description": "A string containing HTML formatted text.",
"type": "string",
},
"attachments": {
"description": "An arry of attachment ids that should be referenced with this topic.",
"type": "array",
"items": {"type": "integer"},
"uniqueItems": True,
},
},
"required": ["id"],
},
"minItems": 1,
"uniqueItems": True,
}
)
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,515 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/core.py | import logging
import os
from typing import Iterable, Union
from urllib.parse import urlparse
import redis
from fastjsonschema import JsonSchemaException # type: ignore
from werkzeug.exceptions import BadRequest, HTTPException
from werkzeug.routing import Map
from werkzeug.wrappers import Response
from .topics import Topics
from .utils.types import (
ApplicationConfig,
ServicesConfig,
StartResponse,
WSGIEnvironment,
)
from .utils.wrappers import Request
Apps = (Topics,)
logging.basicConfig(level=logging.DEBUG)
class Application:
"""
Central application container for this service.
During initialization we bind configuration for services to the instance
and also map apps's urls.
"""
def __init__(self, config: ApplicationConfig) -> None:
self.config = config
self.services = config["services"]
self.url_map = Map()
for App in Apps:
self.url_map.add(App(self.services))
def dispatch_request(self, request: Request) -> Union[Response, HTTPException]:
"""
Dispatches request to single apps according to URL rules. Returns a
Response object or a HTTPException (both are WSGI applications
themselves).
"""
adapter = self.url_map.bind_to_environ(request.environ)
try:
rule, arguments = adapter.match(return_rule=True)
response = rule.view.dispatch(request, **arguments)
except JsonSchemaException as exception:
return BadRequest(exception.message)
except HTTPException as exception:
return exception
return response
def wsgi_application(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Creates Werkzeug's Request object, calls the dispatch_request method and
evaluates Response object (or HTTPException) as WSGI application.
"""
request = Request(environ)
response = self.dispatch_request(request)
return response(environ, start_response)
def __call__(
self, environ: WSGIEnvironment, start_response: StartResponse
) -> Iterable[bytes]:
"""
Dispatches request to `wsgi_application` method so that one may apply
custom middlewares to the application.
"""
return self.wsgi_application(environ, start_response)
def create_application() -> Application:
"""
Application factory function to create a new instance of the application.
Parses services configuration from environment variables.
"""
# Read environment variables.
database_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_DATABASE_URL", "http://localhost:8008/get-elements"
)
event_store_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_EVENT_STORE_URL",
"http://localhost:8008/save", # TODO: Use correct variables here.
)
locker_url = os.environ.get(
"OPENSLIDES_WRITE_SERVICE_LOCKER_URL", "http://localhost:6379/0"
)
# Parse OPENSLIDES_WRITE_SERVICE_LOCKER_URL and initiate connection to redis
# with it.
parse_result = urlparse(locker_url)
if not parse_result.hostname or not parse_result.port or not parse_result.path:
raise RuntimeError(
"Bad environment variable OPENSLIDES_WRITE_SERVICE_LOCKER_URL."
)
redis_locker_connection = redis.Redis(
host=parse_result.hostname,
port=parse_result.port,
db=int(parse_result.path.strip("/")),
)
# Create application instance.
application = Application(
ApplicationConfig(
services=ServicesConfig(
database=database_url,
event_store=event_store_url,
locker=redis_locker_connection,
)
)
)
return application
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,516 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/services/locker.py | import os
import redis
import redis_lock
class Locker:
"""
Adapter to connect to Redis to access to global lock.
"""
def __init__(self, connection: redis.Redis) -> None:
self.connection = connection
def acquire(self, key: str) -> redis_lock.Lock:
"""
Acquire lock for the given key. Use this with a context manager. The
lock expires after worker timeout. We add one extra second to ensure
that - after a SIGTERM from the master process the context manager exits
cleanly before the lock expires.
"""
expire = int(os.environ.get("OPENSLIDES_WRITE_SERVICE_WORKER_TIMEOUT", 30)) + 1
return redis_lock.Lock(self.connection, key, expire=expire)
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,517 | normanjaeckel/openslides-write-service | refs/heads/master | /start.py | from werkzeug.serving import run_simple
from openslides_write_service.core import create_application
application = create_application()
def main() -> None:
"""
Main entry point for this start script.
"""
# Log "Start Werkzeug's development server."
run_simple("localhost", 8000, application, use_reloader=True)
if __name__ == "__main__":
main()
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,518 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/topics/__init__.py | from ..utils.routing import RuleFactory
from ..utils.types import ServicesConfig
from .views import get_get_rules_func
class Topics(RuleFactory):
"""
App for simple topics that can be shown in agenda.
During initialization we bind the get_rules method from apps's views.
"""
def __init__(self, services: ServicesConfig) -> None:
self.get_rules_func = get_get_rules_func(services)
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,519 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/services/event_store.py | from typing import Any, Dict
class EventStore:
"""
Adapter to connect to event store.
"""
def __init__(self, event_store_url: str) -> None:
self.url = event_store_url
self.headers = {"Content-Type": "application/json"}
def save(self, data: Dict[str, Any]) -> None:
pass
def send(self, data: Dict[str, Any]) -> None:
pass
def get_highest_id(self, key: str) -> int:
"""
Locks inside all events for the highest id for this key. Returns 0 if
nothing is found.
"""
return 0
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,520 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/utils/schema.py | schema_version = "http://json-schema.org/draft-07/schema#"
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,521 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/wsgi.py | from .core import create_application
application = create_application()
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,522 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/services/database.py | from typing import Tuple
import requests
import simplejson as json
from werkzeug.exceptions import InternalServerError
class Database:
"""
Adapter to connect to (read-only) database.
"""
def __init__(self, database_url: str) -> None:
self.url = database_url
self.headers = {"Content-Type": "application/json"}
def get(self, *keys: str) -> Tuple[str, int]:
"""
Fetches all data for given keys from database.
"""
data = {"keys": keys}
response = requests.get(self.url, data=json.dumps(data), headers=self.headers)
if not response.ok:
raise InternalServerError("Connection to database failed.")
return response.json()["data"], response.json()["version"]
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,523 | normanjaeckel/openslides-write-service | refs/heads/master | /openslides_write_service/utils/types.py | from typing import Any, Callable, Dict, Text
import redis
from mypy_extensions import TypedDict
ServicesConfig = TypedDict(
"ServicesConfig", {"database": str, "event_store": str, "locker": redis.Redis},
)
ApplicationConfig = TypedDict("ApplicationConfig", {"services": ServicesConfig})
StartResponse = Callable
WSGIEnvironment = Dict[Text, Any]
| {"/openslides_write_service/utils/views.py": ["/openslides_write_service/services/database.py", "/openslides_write_service/services/event_store.py", "/openslides_write_service/services/locker.py", "/openslides_write_service/utils/types.py"], "/openslides_write_service/topics/views.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/views.py", "/openslides_write_service/topics/schema.py"], "/openslides_write_service/topics/schema.py": ["/openslides_write_service/utils/schema.py"], "/openslides_write_service/core.py": ["/openslides_write_service/topics/__init__.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/utils/wrappers.py"], "/start.py": ["/openslides_write_service/core.py"], "/openslides_write_service/topics/__init__.py": ["/openslides_write_service/utils/routing.py", "/openslides_write_service/utils/types.py", "/openslides_write_service/topics/views.py"], "/openslides_write_service/wsgi.py": ["/openslides_write_service/core.py"]} |
74,527 | cdent/placement | refs/heads/master | /placement/exception.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Stub out a basic exceptions, mostly used by objects."""
class KwException(Exception):
def __init__(self, *args, **kwargs):
kwargs = kwargs or {}
super(KwException, self).__init__(
'%s: %s' % (self.__class__.__name__, str(kwargs)))
class NotFound(KwException):
pass
class ResourceProviderInUse(Exception):
pass
class ResourceClassExists(KwException):
pass
class ResourceClassNotFound(NotFound):
pass
class ResourceClassCannotDeleteStandard(KwException):
pass
class ConcurrentUpdateDetected(Exception):
pass
class MaxDBRetriesExceeded(KwException):
pass
class InvalidInventory(KwException):
pass
class InventoryWithResourceClassNotFound(KwException):
pass
class InventoryInUse(Exception):
pass
class InvalidInventoryCapacity(Exception):
def __init__(self, *args, **kwargs):
super(InvalidInventoryCapacity, self).__init__(
'invalid inv cap: %s' % str(kwargs))
class ObjectActionError(Exception):
def __init__(self, *args, **kwargs):
super(ObjectActionError, self).__init__(
'object action error: %s' % str(kwargs))
| {"/placement/tests/fixtures.py": ["/placement/db/__init__.py"]} |
74,528 | cdent/placement | refs/heads/master | /placement/conf.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_db import options as oslo_db_options
CONF = cfg.CONF
database_group = cfg.OptGroup('database',
title='Database options',
help="""
Database configuration.
""")
api_group = cfg.OptGroup('api',
title='API options',
help="""
Options under this group are used to define Nova API.
""")
auth_opts = [
cfg.StrOpt("auth_strategy",
default="keystone",
choices=("keystone", "noauth2"),
deprecated_group="DEFAULT",
help="""
This determines the strategy to use for authentication: keystone or noauth2.
'noauth2' is designed for testing only, as it does no actual credential
checking. 'noauth2' provides administrative credentials only if 'admin' is
specified as the username.
"""),
]
oslo_db_options.set_defaults(CONF)
CONF.register_group(api_group)
CONF.register_opts(auth_opts, group=api_group)
| {"/placement/tests/fixtures.py": ["/placement/db/__init__.py"]} |
74,529 | cdent/placement | refs/heads/master | /placement/tests/fixtures.py | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fixtures for Nova tests."""
from __future__ import absolute_import
import logging as std_logging
import os
import fixtures
from placement import db as session
from placement.db import migration
DB_SCHEMA = {'main': ""}
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class Database(fixtures.Fixture):
def __init__(self, database='main', connection=None):
"""Create a database fixture.
:param database: The type of database, 'main' or 'api'
:param connection: The connection string to use
"""
super(Database, self).__init__()
self.database = database
if database == 'main':
if connection is not None:
ctxt_mgr = session.create_context_manager(
connection=connection)
facade = ctxt_mgr.get_legacy_facade()
self.get_engine = facade.get_engine
else:
self.get_engine = session.get_engine
else:
raise RuntimeError('only main database allowed')
def _cache_schema(self):
global DB_SCHEMA
if not DB_SCHEMA[self.database]:
engine = self.get_engine()
conn = engine.connect()
# TODO(cdent): put migrations back!
migration.db_sync(database=self.database)
DB_SCHEMA[self.database] = "".join(line for line
in conn.connection.iterdump())
engine.dispose()
def cleanup(self):
engine = self.get_engine()
engine.dispose()
def reset(self):
self._cache_schema()
engine = self.get_engine()
engine.dispose()
conn = engine.connect()
conn.connection.executescript(DB_SCHEMA[self.database])
def setUp(self):
super(Database, self).setUp()
self.reset()
self.addCleanup(self.cleanup)
class NullHandler(std_logging.Handler):
"""custom default NullHandler to attempt to format the record.
Used in conjunction with
log_fixture.get_logging_handle_error_fixture to detect formatting errors in
debug level logs without saving the logs.
"""
def handle(self, record):
self.format(record)
def emit(self, record):
pass
def createLock(self):
self.lock = None
class StandardLogging(fixtures.Fixture):
"""Setup Logging redirection for tests.
There are a number of things we want to handle with logging in tests:
* Redirect the logging to somewhere that we can test or dump it later.
* Ensure that as many DEBUG messages as possible are actually
executed, to ensure they are actually syntactically valid (they
often have not been).
* Ensure that we create useful output for tests that doesn't
overwhelm the testing system (which means we can't capture the
100 MB of debug logging on every run).
To do this we create a logger fixture at the root level, which
defaults to INFO and create a Null Logger at DEBUG which lets
us execute log messages at DEBUG but not keep the output.
To support local debugging OS_DEBUG=True can be set in the
environment, which will print out the full debug logging.
There are also a set of overrides for particularly verbose
modules to be even less than INFO.
"""
def setUp(self):
super(StandardLogging, self).setUp()
# set root logger to debug
root = std_logging.getLogger()
root.setLevel(std_logging.DEBUG)
# supports collecting debug level for local runs
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
level = std_logging.DEBUG
else:
level = std_logging.INFO
# Collect logs
fs = '%(asctime)s %(levelname)s [%(name)s] %(message)s'
self.logger = self.useFixture(
fixtures.FakeLogger(format=fs, level=None))
# TODO(sdague): why can't we send level through the fake
# logger? Tests prove that it breaks, but it's worth getting
# to the bottom of.
root.handlers[0].setLevel(level)
if level > std_logging.DEBUG:
# Just attempt to format debug level logs, but don't save them
handler = NullHandler()
self.useFixture(fixtures.LogHandler(handler, nuke_handlers=False))
handler.setLevel(std_logging.DEBUG)
# Don't log every single DB migration step
std_logging.getLogger(
'migrate.versioning.api').setLevel(std_logging.WARNING)
# At times we end up calling back into main() functions in
# testing. This has the possibility of calling logging.setup
# again, which completely unwinds the logging capture we've
# created here. Once we've setup the logging the way we want,
# disable the ability for the test to change this.
def fake_logging_setup(*args):
pass
self.useFixture(
fixtures.MonkeyPatch('oslo_log.log.setup', fake_logging_setup))
class OutputStreamCapture(fixtures.Fixture):
"""Capture output streams during tests.
This fixture captures errant printing to stderr / stdout during
the tests and lets us see those streams at the end of the test
runs instead. Useful to see what was happening during failed
tests.
"""
def setUp(self):
super(OutputStreamCapture, self).setUp()
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
self.out = self.useFixture(fixtures.StringStream('stdout'))
self.useFixture(
fixtures.MonkeyPatch('sys.stdout', self.out.stream))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
self.err = self.useFixture(fixtures.StringStream('stderr'))
self.useFixture(
fixtures.MonkeyPatch('sys.stderr', self.err.stream))
@property
def stderr(self):
return self.err._details["stderr"].as_text()
@property
def stdout(self):
return self.out._details["stdout"].as_text()
| {"/placement/tests/fixtures.py": ["/placement/db/__init__.py"]} |
74,530 | cdent/placement | refs/heads/master | /placement/db/__init__.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db.sqlalchemy import enginefacade
# The maximum value a signed INT type may have
MAX_INT = 0x7FFFFFFF
# NOTE(dosaboy): This is supposed to represent the maximum value that we can
# place into a SQL single precision float so that we can check whether values
# are oversize. Postgres and MySQL both define this as their max whereas Sqlite
# uses dynamic typing so this would not apply. Different dbs react in different
# ways to oversize values e.g. postgres will raise an exception while mysql
# will round off the value. Nevertheless we may still want to know prior to
# insert whether the value is oversize or not.
SQL_SP_FLOAT_MAX = 3.40282e+38
main_context_manager = enginefacade.transaction_context()
def _context_manager_from_context(context):
if context:
try:
return context.db_connection
except AttributeError:
pass
def get_context_manager(context):
"""Get a database context manager object.
:param context: The request context that can contain a context manager
"""
return _context_manager_from_context(context) or main_context_manager
def get_engine(use_slave=False, context=None):
"""Get a database engine object.
:param use_slave: Whether to use the slave connection
:param context: The request context that can contain a context manager
"""
ctxt_mgr = get_context_manager(context)
return ctxt_mgr.get_legacy_facade().get_engine(use_slave=use_slave)
| {"/placement/tests/fixtures.py": ["/placement/db/__init__.py"]} |
74,539 | Myst3ri0n/reddit-save-saved | refs/heads/master | /web.py | import os
import random
from gcore import db
import sqlite3
from flask import Flask, request, render_template, redirect, url_for, flash, make_response, session, Markup, jsonify
app = Flask(__name__)
@app.route('/',methods=['GET','POST'])
def index():
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute('SELECT ID AS COUNT FROM DOWNLOAD_LOG WHERE DOWNLOAD_FAILED IS NULL;')
image_ids = cur.fetchall()
if len(image_ids)<25:
random_num = len(image_ids)
else:
random_num = 25
img_ids = random.sample(range(len(list(image_ids))), random_num)
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE ID IN ({', '.join(str(x) for x in img_ids)})
AND DOWNLOAD_FAILED IS NULL;
""")
fp_imgs = cur.fetchall()
return render_template('index.html',images=fp_imgs)
@app.route('/search',methods=['GET', 'POST'])
def search():
if request.method == 'POST':
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
search_term = request.form['search']
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE TITLE LIKE '%{search_term}%'
AND DOWNLOAD_FAILED IS NULL;
""")
search_images = cur.fetchall()
return render_template('index.html',images=search_images)
return render_template('index.html')
@app.route('/filter/gifs',methods=['GET','POST'])
def gifsOnly():
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute("""
SELECT ID AS COUNT
FROM DOWNLOAD_LOG
WHERE lower(FILE_NAME) LIKE '%gif%'
AND DOWNLOAD_FAILED IS NULL;
""")
image_ids = cur.fetchall()
if len(image_ids)<25:
random_num = len(image_ids)
else:
random_num = 25
img_ids = random.sample(range(len(list(image_ids))), random_num)
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE ID IN ({', '.join(str(x) for x in img_ids)})
AND DOWNLOAD_FAILED IS NULL;
""")
fp_imgs = cur.fetchall()
return render_template('subreddit.html',images=fp_imgs)
@app.route('/filter/images',methods=['GET','POST'])
def imagesOnly():
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute("""
SELECT ID AS COUNT
FROM DOWNLOAD_LOG
WHERE lower(FILE_NAME) LIKE '%jpg%'
OR lower(FILE_NAME) LIKE '%png%'
AND DOWNLOAD_FAILED IS NULL;""")
image_ids = cur.fetchall()
if len(image_ids)<25:
random_num = len(image_ids)
else:
random_num = 25
img_ids = random.sample(range(len(list(image_ids))), random_num)
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE ID IN ({', '.join(str(x) for x in img_ids)})
AND DOWNLOAD_FAILED IS NULL;
""")
fp_imgs = cur.fetchall()
return render_template('subreddit.html',images=fp_imgs)
@app.route('/filter/recent',methods=['GET','POST'])
def recent():
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE DOWNLOAD_FAILED IS NULL
ORDER BY
POSTED_DATE DESC
LIMIT 150;
""")
imgs = cur.fetchall()
return render_template('subreddit.html',images=imgs)
@app.route('/r/<subreddit>',methods=['GET','POST'])
def subredditPage(subreddit):
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute(f"""
SELECT 'static/saved/'||SUB_REDDIT||'/'||FILE_NAME AS FILE_NAME,
TITLE AS TITLE,
ID AS ID
FROM DOWNLOAD_LOG
WHERE SUB_REDDIT='{subreddit}'
AND DOWNLOAD_FAILED IS NULL;
""")
imgs = cur.fetchall()
return render_template('subreddit.html',images=imgs)
@app.route('/about')
def about():
return render_template('about.html')
#json endpoints
@app.route('/json/subreddits')
def jsonSubs():
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute("""
SELECT DISTINCT SUB_REDDIT
FROM DOWNLOAD_LOG
WHERE DOWNLOAD_FAILED IS NULL
ORDER BY
SUB_REDDIT;""")
result=cur.fetchall()
return jsonify(response=result)
@app.route('/json/image/<id>')
def jsonImage(id):
conn = sqlite3.connect('saved.db')
conn.text_factory = str
cur = conn.cursor()
cur.execute(f"""
SELECT TITLE,
SUB_REDDIT,
URL,
FILE_NAME,
PERMALINK,
POSTED_BY
FROM DOWNLOAD_LOG
WHERE ID={id}
ORDER BY
SUB_REDDIT;
""")
result=cur.fetchall()
return jsonify(response=result)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
if __name__ == '__main__':
host = os.getenv('IP','127.0.0.1')
port = int(os.getenv('PORT',5005))
#app.debug = True
#TEMPLATES_AUTO_RELOAD = True
app.secret_key = 'klnadsfq3849t98q34jt89j'
app.run(host=host, port=port, debug=True) | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,540 | Myst3ri0n/reddit-save-saved | refs/heads/master | /gcore/timesys.py | import time
def nowDateTime():
return (time.strftime("%Y-%m-%d %H:%M:%S"))
def convepoch(date):
return time.strftime('%Y-%m-%d %H:%M:%S',time.gmtime(int(date)/1000.))
def humanTime(time_string):
if time_string <= 60:
time_string = str(time_string)+' Seconds'
elif time_string > 60 and time_string < 3600:
time_string = str(round(float(time_string)/60,2))+' Minutes'
elif time_string >= 3600:
time_string = str(round(float(time_string)/3600,2))+' Hours'
return time_string | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,541 | Myst3ri0n/reddit-save-saved | refs/heads/master | /config_sample.py | client_id=''
client_secret=''
password=''
user_agent='testscript by /u/johndoe'
username='' | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,542 | Myst3ri0n/reddit-save-saved | refs/heads/master | /gcore/filesys.py | import os
#checks for existence of file and removes if present
def exRm(fileName):
fileCheck = os.path.isfile(fileName)
if fileCheck == True:
os.remove(fileName)
#Outputs file size to human readable format.
def getFileSize(file):
fileSize = os.path.getsize(file)
if fileSize<1024:
return str(fileSize)+' Bytes'
elif fileSize >=1024 and fileSize<1048576:
return str(fileSize/1024)+' KB'
elif fileSize>=1048576 and fileSize<1073741824:
return str(fileSize/1048576)+' MB'
elif fileSize>=1073741824 and fileSize<1099511627776:
return str(fileSize/1073741824)+' GB'
else: return str(fileSize/1099511627776)+' TB'
#List directorys and remove hidden files from result
def lsDirVis(path):
for f in os.listdir(path):
if not f.startswith('.'):
yield f | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,543 | Myst3ri0n/reddit-save-saved | refs/heads/master | /main.py | import praw
import config as cfg
import re
import time
import os
import datetime
import argparse
import shutil
from urllib.request import Request, urlopen
from termcolor import colored, cprint
from gcore import db, timesys as t
from requests_html import HTMLSession
start_time = time.time()
parser = argparse.ArgumentParser()
parser.add_argument('--force',action='store_true')
parser.add_argument('--folders',action='store_false')
parser = parser.parse_args()
force = parser.force
folders = parser.folders
db_name='saved'
new_db = False
if os.path.isfile(db_name+'.db') == False:
new_db = True
d = db.DatabaseManager(db_name, False)
db_file_size = os.stat('saved.db').st_size
if new_db or db_file_size==0:
d.query(d.readSql('install.sql'))
d.query(f"INSERT INTO JOB_TRACKER(START_DATE) VALUES('{t.nowDateTime()}');")
job_id = d.query('SELECT ID FROM JOB_TRACKER ORDER BY ID DESC LIMIT 1;',fo=True)
session = HTMLSession()
print('Downloading saved Reddit Images...\n')
reddit = praw.Reddit(client_id=cfg.client_id,
client_secret=cfg.client_secret,
password=cfg.password,
user_agent=cfg.user_agent,
username=cfg.username)
saved_posts = reddit.user.me().saved(limit=None)
post_info = {}
album_count = 0
url_count = 1
album_img_count = 0
for link in saved_posts:
user = link.author
url = link.url
title = link.title
subr = link.subreddit_name_prefixed[2:]
perm_url = 'https://reddit.com'+link.permalink
post_time = datetime.datetime.fromtimestamp(link.created)
up_votes = link.ups
#filter out only images and gifs
if url[-3:].upper() in ['JPG','PNG','GIF']:
file_name = re.findall(r'(?=\w+\.\w{3,4}$).+',link.url)[0]
post_info[url_count] = {'User':user,'Title':title,'Url':url,'File_Name':file_name,
'Album_Url':'','Subreddit':subr,'Permalink':perm_url,
'Is_Album':'','Alb_Index':'','Post_Time':post_time,
'Up_Votes':up_votes}
url_count+=1
is_album = re.search(r'imgur\.com\/a\/',url)
if is_album:
print(f'Scanning Album: {title[:80]} ({url})...\n')
r = session.get(url+'/layout/blog')
album_html = r.html.html
album_images = re.findall(r'.*?{"hash":"([a-zA-Z0-9]+)".*?"ext":"(\.[a-zA-Z0-9]+)".*?',album_html)
album_index = 1
for i in list(set(album_images)):
img_id = i[0]
ext = i[1]
file_name = img_id+ext
post_info[url_count] = {'User':user,'Title':title+'_ALB_'+str(album_index),
'Url':f'https://imgur.com/{img_id}{ext}','File_Name':img_id+ext,
'Album_Url':url,'Subreddit':subr,'Permalink':perm_url,'Is_Album':'Y',
'Alb_Index':album_index,'Post_Time':post_time,'Up_Votes':up_votes}
url_count+=1
album_index+=1
album_img_count+=1
album_count+=1
post_keys = list(post_info.keys())
print(f'{album_count} albums detected with {album_img_count} images...\n')
print(f'{len(post_keys)} total images will be downloaded...\n')
time.sleep(5)
download_count = 0
current_count = 1
#download files
for k in post_keys:
user = post_info[k]['User']
title = post_info[k]['Title']
url = post_info[k]['Url']
alb_url = post_info[k]['Album_Url']
perm = post_info[k]['Permalink']
subr = post_info[k]['Subreddit']
alb = post_info[k]['Is_Album']
alb_i = post_info[k]['Alb_Index']
file_name = post_info[k]['File_Name']
post_time = post_info[k]['Post_Time']
up_votes = post_info[k]['Up_Votes']
db_links = d.query("SELECT URL FROM DOWNLOAD_LOG;")
if url in db_links and force==False:
print(f'{title} already has been downloaded...')
current_count += 1
continue
print(f'Downloading: {title[:80]} ({url}) :: {str(current_count)} out of {str(len(post_keys))}')
current_count += 1
if not os.path.exists('static/saved/'+str(subr)) and folders:
os.makedirs('static/saved/'+str(subr))
file_name = re.search(r'(?=\w+\.\w{3,4}$).+',url).group(0)
save_location = f'static/saved/{subr}/{file_name}' if folders else f'static/saved/{file_name}'
try:
req = Request(url, headers={'User-Agent': 'Mozilla/5.0'})
with urlopen(req) as response, open(save_location, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if url in db_links and force==True:
d.query(f"""
UPDATE DOWNLOAD_LOG
SET DATE_DOWNLOADED = '{t.nowDateTime()}'
WHERE URL='{url}';
""")
else:
#determining if image is missing form imgur
if urlopen(url).geturl()=='https://i.imgur.com/removed.png':
error_msg = 'Imgur Missing Image.'
cprint(f'Unable to download: ^^^{url}^^^ The error was: Imgur Missing Image.','red', 'on_cyan',attrs=['bold'])
os.remove(save_location)
d.query(f"""
INSERT INTO DOWNLOAD_LOG(SAVED_USER,POSTED_BY,DATE_DOWNLOADED,URL,TITLE,SUB_REDDIT,
PERMALINK,IS_ALBUM,ALBUM_INDEX,ALBUM_URL,FILE_NAME,POSTED_DATE
,UP_VOTES,DOWNLOAD_FAILED,ERROR_MSG)
VALUES('{cfg.username}','{user}','{t.nowDateTime()}','{url}','{d.sqlQuotes(title)}',
'{subr}','{perm}',{d.nullValue(alb)},{d.nullValue(alb_i,is_int=True)},
{d.nullValue(alb_url)},'{file_name}','{post_time}',{up_votes},1,'{error_msg}');
""")
else:
download_count += 1
d.query(f"""
INSERT INTO DOWNLOAD_LOG(SAVED_USER,POSTED_BY,DATE_DOWNLOADED,URL,TITLE,SUB_REDDIT,
PERMALINK,IS_ALBUM,ALBUM_INDEX,ALBUM_URL,FILE_NAME,POSTED_DATE,UP_VOTES)
VALUES('{cfg.username}','{user}','{t.nowDateTime()}','{url}','{d.sqlQuotes(title)}',
'{subr}','{perm}',{d.nullValue(alb)},{d.nullValue(alb_i,is_int=True)},
{d.nullValue(alb_url)},'{file_name}','{post_time}',{up_votes});
""")
time.sleep(2)
except Exception as e:
cprint(f'Unable to download: ^^^{url}^^^ The error was: {str(e)}','red', 'on_cyan',attrs=['bold'])
d.query(f"""
INSERT INTO DOWNLOAD_LOG(SAVED_USER,POSTED_BY,DATE_DOWNLOADED,URL,TITLE,SUB_REDDIT,
PERMALINK,IS_ALBUM,ALBUM_INDEX,ALBUM_URL,FILE_NAME,POSTED_DATE
,UP_VOTES,DOWNLOAD_FAILED,ERROR_MSG)
VALUES('{cfg.username}','{user}','{t.nowDateTime()}','{url}','{d.sqlQuotes(title)}',
'{subr}','{perm}',{d.nullValue(alb)},{d.nullValue(alb_i,is_int=True)},
{d.nullValue(alb_url)},'{file_name}','{post_time}',{up_votes},1,'{str(e)}');
""")
d.query(f"""UPDATE JOB_TRACKER
SET END_DATE = '{t.nowDateTime()}',
DOWNLOAD_COUNT = {download_count}
WHERE ID = {job_id};""")
time_took = round(time.time() - start_time,3)
print(f'\nProcess completed in {t.humanTime(time_took)}...')
print('\nProcess Complete!\n') | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,544 | Myst3ri0n/reddit-save-saved | refs/heads/master | /gcore/log.py | from random import randint
from .timesys import nowDateTime
import os
import csv
class logWriter(object):
def __init__(self):
self.logFile = "log.csv"
self.firstLog = os.path.isfile(self.logFile)
self.logA = open(self.logFile, 'a')
if not self.firstLog:
self.logA.write("Job Number,Date/Time,Log Message\n")
self.jobNum = str(randint(1000,1000000))
else:
jobNums = []
with open("log.csv", 'rb') as f:
reader = csv.reader(f)
for row in reader:
jobNums.append(row[0])
jobNums.remove('Job Number')
self.jobNum = str(int(max(jobNums))+1)
def logUp(self, msg):
self.logStr = "\""+self.jobNum+"\",\""+nowDateTime()+"\",\""+msg+"\""
self.logA.write(self.logStr+"\n")
| {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,545 | Myst3ri0n/reddit-save-saved | refs/heads/master | /gcore/__init__.py | from .filesys import exRm, getFileSize, lsDirVis
from .timesys import nowDateTime, convepoch
from .db import DatabaseManager
from .log import logWriter | {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,546 | Myst3ri0n/reddit-save-saved | refs/heads/master | /gcore/db.py | import sqlite3
from .filesys import exRm
class DatabaseManager(object):
def __init__(self, db, remove):
dbFname = db+'.db'
self.sqlFile = None
if remove:
exRm(dbFname)
self.conn = sqlite3.connect(dbFname)
self.conn.text_factory = str
self.conn.row_factory = lambda cursor, row: row[0]
self.conn.execute('pragma foreign_keys = on')
self.conn.commit()
self.cur = self.conn.cursor()
def readSql(self,file):
self.instF = open(file,'r')
self.instSQL = ''
for l in self.instF:
self.instSQL = self.instSQL+l
return self.instSQL
def query(self,q,fo=False):
if q.count(';') > 1:
self.cur.executescript(q)
elif q.count(';') <= 1 and fo == False:
self.cur.execute(q)
else:
self.foq = self.cur.execute(q)
self.conn.commit()
return self.foq.fetchone() if fo else list(self.cur)
def index(self,tableName,field):
iQuery = ""
for i in field:
idxQuery = "CREATE INDEX idx_"+i+"_"+tableName+" ON "+tableName+" ("+i+" ASC);"
iQuery = iQuery + idxQuery+'\n'
iQuery = iQuery[:-1]
self.query(iQuery)
return iQuery
def nullValue(self,string,is_int=False):
if string=='':
return 'NULL'
elif string !='' and is_int==False:
return '\''+str(string)+'\''
elif string !='' and is_int==True:
return string
def sqlQuotes(self,string):
return string.replace("'","''")
def __del__(self):
self.conn.close()
| {"/web.py": ["/gcore/__init__.py"], "/main.py": ["/gcore/__init__.py"], "/gcore/log.py": ["/gcore/timesys.py"], "/gcore/__init__.py": ["/gcore/filesys.py", "/gcore/timesys.py", "/gcore/db.py", "/gcore/log.py"], "/gcore/db.py": ["/gcore/filesys.py"]} |
74,576 | januslinhc/python-exercise | refs/heads/main | /ex3_5_test.py | import builtins
import unittest
import ex3_5
class MyTestCase(unittest.TestCase):
def test_case_1(self):
input_value = "1\n2\n1\n3\n2\n3"
expected = "1/4"
self.common_test(input_value, expected)
def test_case_2(self):
input_value = "3\n6\n1\n6\n4\n9"
expected = "3/4"
self.common_test(input_value, expected)
def common_test(self, input_value, expected):
original_input = builtins.input
builtins.input = lambda: input_value
actual = ex3_5.handle()
self.assertEqual(expected, actual)
builtins.input = original_input
if __name__ == '__main__':
unittest.main()
| {"/ex3_5_test.py": ["/ex3_5.py"]} |
74,577 | januslinhc/python-exercise | refs/heads/main | /ex3_5.py | def compute_hcf(x, y):
while y:
x, y = y, x % y
return x
class Frac(object):
def __init__(self, numerator, denominator):
self.numerator = int(numerator)
self.denominator = int(denominator)
def simplify(self):
x = compute_hcf(self.numerator, self.denominator)
self.numerator = self.numerator / x
self.denominator = self.denominator / x
return self
def minus(self, another_frac):
return self.__sub__(another_frac)
def divide(self, another_frac):
return self.__truediv__(another_frac)
def __str__(self):
return "%i/%i" % (self.numerator, self.denominator)
def __sub__(self, other):
if isinstance(other, Frac):
if self.denominator == other.denominator:
return Frac(self.numerator - other.numerator,
self.denominator).simplify()
else:
return Frac(self.numerator * other.denominator -
other.numerator * self.denominator,
self.denominator * other.denominator).simplify()
def __truediv__(self, other):
if isinstance(other, Frac):
return Frac(self.numerator * other.denominator,
self.denominator * other.numerator).simplify()
def get_value_from_input():
return input()
def handle():
input_value = get_value_from_input()
input_value = input_value.split("\n")
pre_frac = None
for i in range(0, int(len(input_value) / 2)):
new_frac = Frac(input_value[i * 2], input_value[i * 2 + 1])
if pre_frac is None:
pre_frac = new_frac
else:
if i % 2 == 0:
pre_frac = pre_frac.divide(new_frac)
else:
pre_frac = pre_frac.minus(new_frac)
return str(pre_frac)
| {"/ex3_5_test.py": ["/ex3_5.py"]} |
74,585 | meahd/Wk8-InclassActivity | refs/heads/main | /test_fibonacci.py | import unittest
import fibonacci
class TestCase(unittest.TestCase):
def testNIs0(self):
self.assertEqual(fibonacci.Fibonacci(0),0)
def testNIs1(self):
self.assertEqual(fibonacci.Fibonacci(1),1)
def testNIs2(self):
self.assertEqual(fibonacci.Fibonacci(2),1)
def testFindNthFib(self):
self.assertEqual(fibonacci.Fibonacci(9),34)
if __name__ == '__main__':
unittest.main() | {"/test_fibonacci.py": ["/fibonacci.py"]} |
74,586 | meahd/Wk8-InclassActivity | refs/heads/main | /fibonacci.py | #David Meah - CS362 - Wk8 Inclass Activity
def Fibonacci(n):
if n < 0:
print("Invalid Input")
elif n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return Fibonacci(n-1) + Fibonacci(n-2)
def Factorial(n):
if n == 1:
return n
else:
return n * Factorial(n-1)
#Driver code
print("8th Fibonacci Number is = ",Fibonacci(8))
print("Factorial of 7 is = ",Factorial(7)) | {"/test_fibonacci.py": ["/fibonacci.py"]} |
74,598 | evanloshin/manufacturing-line-stock-classifier | refs/heads/master | /detect.py | # Load dependencies
import functions
import cv2
from keras.models import load_model
import pickle
def main():
# Load one-hot-encoding matrix
labeler = functions.load_object('one-hot-matrix.pkl')
# Load the trained model
model = load_model('model.h5')
# Define the webcam object
cam = cv2.VideoCapture(0)
while(True):
# Capture video frame
ret, frame = cam.read()
# Preprocess frame
image = functions.preprocess(frame)
# Predict object in frame
logits = model.predict(image, batch_size=1)
# Decode logits
result = labeler.inverse_transform(logits)
if __name__ == '__main__':
main()
| {"/detect.py": ["/functions.py"], "/take_sample_pictures.py": ["/functions.py"], "/train.py": ["/functions.py"]} |
74,599 | evanloshin/manufacturing-line-stock-classifier | refs/heads/master | /take_sample_pictures.py | # Load dependencies
import functions
import cv2
import matplotlib.pyplot as plt
import datetime
def main():
# Define the webcam object
cam = cv2.VideoCapture(1)
while(True):
# Capture video frame
ret, frame = cam.read()
# Display frame
cv2.imshow('my webcam', frame)
# Quit capture mode
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# Save frame
if cv2.waitKey(60) & 0xFF == ord('s'):
timestamp = datetime.datetime.now()
filename = '/Users/evanloshin/Documents/Udacity/manufacturing-line-stock-classifier/images/' + str(timestamp) + '.png'
plt.imsave(filename, frame)
print('Save successful: ' + str(timestamp))
# When everything's done, release the capture
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| {"/detect.py": ["/functions.py"], "/take_sample_pictures.py": ["/functions.py"], "/train.py": ["/functions.py"]} |
74,600 | evanloshin/manufacturing-line-stock-classifier | refs/heads/master | /train.py | # Load dependencies
import functions
import numpy as np
from sklearn.model_selection import train_test_split
from scipy.ndimage import imread
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Conv2D, Flatten, Dense
from sklearn import preprocessing
# Hyperparameters
EPOCHS = 5
BATCH_SIZE = 10
VALIDATION_SPLIT = 0.3
# Model architecture
model = Sequential()
model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu', name='first_convolution', input_shape=(200, 200, 3)))
model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu', name='second_convolution'))
model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu', name='third_convolution'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
def main():
# Load training data from csv
filename = 'train_data.csv'
raw_data = np.loadtxt(filename, dtype=np.str, delimiter=",")
image_paths = raw_data[:, 0]
labels = raw_data[:, 1]
# One-hot encode labels
lb = preprocessing.LabelBinarizer()
one_hot_labels = lb.fit_transform(labels)
# Save transformation matrix
functions.save_object(lb, 'one-hot-matrix.pkl')
# Load training images from directory
images = []
for path in image_paths:
images.append(imread(path))
images = np.array(images)
# pre-process images
images = np.array([functions.preprocess(img) for img in images])
# Split data into training and validation sets
x_train, x_valid, y_train, y_valid = train_test_split(images, one_hot_labels, test_size=VALIDATION_SPLIT)
# Augment training set with rotated and flipped images
x_train, y_train = functions.augment_dataset(x_train, y_train)
# Compile and run the neural network model
# model.compile(loss='mse', optimizer='adam')
# model.fit(x_train, y_train,
# batch_size=BATCH_SIZE,
# epochs=EPOCHS,
# validation_data=(x_valid, y_valid))
# model.save('model.h5')
if __name__ == '__main__':
main()
| {"/detect.py": ["/functions.py"], "/take_sample_pictures.py": ["/functions.py"], "/train.py": ["/functions.py"]} |
74,601 | evanloshin/manufacturing-line-stock-classifier | refs/heads/master | /functions.py | # Load dependencies
import cv2
from sklearn import preprocessing
from scipy.ndimage import rotate
import numpy as np
import random
import pickle
def preprocess(image):
"""
Grayscale and normalize image to improve classifier
results.
:param image: numpy array of image pixels
:return: conditioned numpy array
"""
grayscale = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
normal = preprocessing.normalize(grayscale)
return normal
def augment_dataset(images, labels):
"""
Increase quantity of images in the training data set by
combination of rotating and flipping.
:param images: numpy array of images
:param labels: numpy array of labels
:return: numpy array of images and augmented images, corresponding numpy array of labels
"""
for img, label in zip(images, labels):
# Rotate original image
rotated = rotate_random(img)
# Flip original image
flipped = np.fliplr(img)
# Rotate flipped image
rotated_flipped = rotate_random(flipped)
# Add two new samples to training data set
images = np.append(images, [rotated, rotated_flipped], axis=0)
labels = np.append(labels, [label, label])
# return images and labels
return images, labels
def rotate_random(img):
"""
Rotates image by a randomly chosen angle between 0-180 degrees
and crops result to maintain original shape.
:param img: original image
:return: rotated image
"""
dims_orig = img.shape
angle = random.random() * 180
rotated = rotate(img, angle)
dims_rtd = rotated.shape
lower_width = (dims_rtd[1] - dims_orig[1]) // 2
upper_width = dims_orig[1] + lower_width
lower_height = (dims_rtd[0] - dims_orig[0]) // 2
upper_height = dims_orig[0] + lower_height
cropped = rotated[lower_height:upper_height, lower_width:upper_width]
return cropped
def save_object(obj, filename):
with open(filename, 'wb') as output: # Overwrites any existing file.
pickle.dump(obj, output, pickle.HIGHEST_PROTOCOL)
def load_object(filename):
with open(filename, 'rb') as input:
return pickle.load(input)
| {"/detect.py": ["/functions.py"], "/take_sample_pictures.py": ["/functions.py"], "/train.py": ["/functions.py"]} |
74,602 | hvl5451/RxMinder | refs/heads/master | /api/serializers.py | from rest_framework import serializers
from .models import PillDetails
class dataSerializer(serializers.ModelSerializer):
class Meta:
model = PillDetails
fields = ("medication_name", "pills_per_dose",
"doses_per_day", "total_qty", "num_mg")
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,603 | hvl5451/RxMinder | refs/heads/master | /api/migrations/0002_auto_20190908_0602.py | # Generated by Django 2.2.5 on 2019-09-08 06:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.RenameField(
model_name='pilldetails',
old_name='client_id',
new_name='medication_name',
),
migrations.RenameField(
model_name='pilldetails',
old_name='mg_dose',
new_name='num_mg',
),
migrations.RenameField(
model_name='pilldetails',
old_name='medicine_name',
new_name='pills_per_dose',
),
migrations.RemoveField(
model_name='pilldetails',
name='pill_per_dose',
),
]
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,604 | hvl5451/RxMinder | refs/heads/master | /api/models.py | from django.db import models
# Create your models here.
class PillDetails(models.Model):
medication_name = models.CharField(max_length=250, null=True)
pills_per_dose = models.CharField(max_length=250, null=True)
doses_per_day = models.CharField(max_length=250, null=True)
total_qty = models.CharField(max_length=10, null=True)
num_mg = models.CharField(max_length=100, null=True)
def __str__(self):
return self.medication_name
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,605 | hvl5451/RxMinder | refs/heads/master | /RxMinder/test.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 6 23:04:11 2019
@author: max
"""
import io
import os
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/max/Desktop/Hackathon Fall 2019/PennApps 2019 RxMinder-49ae1b133ea3.json"
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
# Instantiates a client
client = vision.ImageAnnotatorClient()
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
'images/IMG_2807.JPG')
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.text_detection(image=image)
retrieved_texts = response.text_annotations
document = response.full_text_annotation
print('Texts:')
texts = []
# for text in retrieved_texts:
# print('\n"{}"'.format(text.description))
# texts.append('\n"{}"'.format(text.description))
#
#
# vertices = (['({},{})'.format(vertex.x, vertex.y)
# for vertex in text.bounding_poly.vertices])
#
# print('bounds: {}'.format(','.join(vertices)))
# for page in document.pages:
# for block in page.blocks:
# print(block.text)
# if (feature == FeatureType.BLOCK):
# print(block.block.bounding_box)
# %%
# print(texts[0])
print(document.text)
data = document.text.lower().replace('\n', ' ')
print(data)
# %% | {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,606 | hvl5451/RxMinder | refs/heads/master | /api/medicineskill.py | # class for handling and directing the right request and building the skill from echo
from ask_sdk_core.skill_builder import SkillBuilder
from ask_sdk_core.dispatch_components import AbstractRequestHandler
from ask_sdk_core.utils import is_request_type, is_intent_name
from ask_sdk_core.handler_input import HandlerInput
from ask_sdk_model import Response
from ask_sdk_model.ui import SimpleCard
from .models import PillDetails
sb = SkillBuilder()
class ListAllintent(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("ListAll")(handler_input)
def handle(self, handler_input):
speech_text = "The following is the list of medicines you take, "
for items in PillDetails.objects.all():
speech_text = items + ', '
handler_input.response_builder.speak(speech_text).set_should_end_session(True)
return handler_input.response_builder.response
class DailyDosage(AbstractRequestHandler):
def can_handle(self, handler_input):
return is_intent_name("DailyDosage")(handler_input)
def handle(self, handler_input):
pass
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,607 | hvl5451/RxMinder | refs/heads/master | /api/admin.py | from django.contrib import admin
from .views import PillDetails
# Register your models here.
admin.register(PillDetails) | {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,608 | hvl5451/RxMinder | refs/heads/master | /api/imageProcessor.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 7 17:15:28 2019
@author: max
"""
import io
import os
import re
# First, connect to google vision, upload image, and pull data from cloud
os.environ[
"GOOGLE_APPLICATION_CREDENTIALS"] = "/Users/semideum_zepodesgan01/PycharmProjects/RxMinder/api/PennApps 2019 RxMinder-49ae1b133ea3.json"
# Imports the Google Cloud client library
from google.cloud import vision
from google.cloud.vision import types
def image_processing(image):
# Instantiates a client
client = vision.ImageAnnotatorClient()
# The name of the image file to annotate
file_name = os.path.join(
os.path.dirname(__file__),
'../{}'.format(image))
# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
content = image_file.read()
image = types.Image(content=content)
# Performs label detection on the image file
response = client.text_detection(image=image)
retrieved_texts = response.text_annotations
document = response.full_text_annotation
raw_text = document.text.lower().replace('\n', ' ')
print(raw_text)
# next, we must parse the data!
data_to_send = dict()
# parse dosage
dosage_re = re.search(
"take (one|once|two|twice|three|four|five|six|seven|eight|nine|1|2|3|4|5|6|7|8|9)(.*?)(every morning and evening|every day and night|every morning and night|every day and evening|day|doy|once daily|daily|morning|afternoon|evening|night|bedtime)",
raw_text)
if (dosage_re is not None):
dosage_str = dosage_re.group(0)
print('dosage string: {}'.format(dosage_str))
dosage_split = dosage_str.split(' ')
# dosage string contains two quantities: # of pills and # of times per day to take # of pills
if dosage_split[1] == 'one' or dosage_split[1] == '1':
num_pills = 1
elif dosage_split[1] == 'two' or dosage_split[1] == '2':
num_pills = 2
elif dosage_split[1] == 'three' or dosage_split[1] == '3':
num_pills = 3
elif dosage_split[1] == 'four' or dosage_split[1] == '4':
num_pills = 4
elif dosage_split[1] == 'five' or dosage_split[1] == '5':
num_pills = 5
elif dosage_split[1] == 'six' or dosage_split[1] == '6':
num_pills = 6
elif dosage_split[1] == 'seven' or dosage_split[1] == '7':
num_pills = 7
elif dosage_split[1] == 'eight' or dosage_split[1] == '8':
num_pills = 8
elif dosage_split[1] == 'nine' or dosage_split[1] == '9':
num_pills = 9
else:
print('num_pills not found')
num_pills = 1
five = ['five times', '5 times', '5x', 'five', '5 tablets', '5 capsules', '5 pills']
four = ['four times', '4 times', '4x', 'four', 'every 6 hours', 'every six hours', '4 tablets', '4 capsules',
'4 pills']
three = ['three times', '3 times', '3x', 'three', 'every 8 hours', 'every eight hours', '3 tablets', '3 capsules',
'3 pills']
twice = ['twice a day', 'twice daily', 'twice', 'two times', '2 times', '2 tablets', '2 capsules', '2 pills',
'every morning and evening', 'every day and night', 'every morning and night', 'every day and evening']
once = ['once a day', 'once daily', 'once', 'daily', 'one time', '1 time', '1 tablet', '1 capsule', '1 pill',
'every morning', 'every day', 'every afternoon', 'every evening', 'every night', 'every bedtime', 'midday']
if any(x in dosage_str for x in five):
num_times = 5;
elif any(x in dosage_str for x in four):
num_times = 4;
elif any(x in dosage_str for x in three):
num_times = 3;
elif any(x in dosage_str for x in twice):
num_times = 2;
elif any(x in dosage_str for x in once):
num_times = 1;
else:
print('dose num times per day not found')
num_times = 1
data_to_send['pills_per_dose'] = str(num_pills)
data_to_send['doses_per_day'] = str(num_times)
else:
print('dosage data not found')
data_to_send['pills_per_dose'] = ""
data_to_send['doses_per_day'] = ""
# parse quantity in bottle
qty_re = re.search("(qty|quantity|oty):? \d+", raw_text)
if (qty_re is not None):
print('qty string: {}'.format(qty_re.group(0)))
qty = qty_re.group(0).split(' ')[1]
data_to_send['total_qty'] = qty
else:
print('qty data not found')
data_to_send['total_qty'] = ""
# parse milligram information
mg_re = re.search("(\d+ ?mg)", raw_text)
print(mg_re)
if (mg_re is not None):
print('mg string: {}'.format(mg_re.group(0)))
mg_dose = mg_re.group(0).split(' ')[0]
data_to_send['num_mg'] = mg_dose
else:
print('mg data not found')
data_to_send['num_mg'] = ""
# parse medication name
with open('/Users/semideum_zepodesgan01/PycharmProjects/RxMinder/api/medication_names.txt') as f:
med_names = f.readlines()
med_names = [x.strip() for x in med_names]
data_to_send['medication_name'] = ""
for m in med_names:
if m in raw_text:
print("medication {} found".format(m))
data_to_send['medication_name'] = m.title()
break
# finally, we have distilled all useful data into this dictionary!! Yay!!!
print(data_to_send, flush=True)
return data_to_send
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,609 | hvl5451/RxMinder | refs/heads/master | /api/views.py | from django.shortcuts import render
from rest_framework import generics
from django.http import HttpResponse
from django.core.files.storage import default_storage
from .models import PillDetails
from . import imageProcessor
from django.core import serializers
import json
# Create your views here.
def index(request):
return HttpResponse('<h1> Welcome to the most rad API </h1>')
def process_image(request):
# client_id = request.GET.get('client_id')
print(request.FILES)
file = request.FILES['fileToUpload']
print('here')
print(file.name)
# print(client_id)
print('here2')
file_name123 = default_storage.save(file.name, file)
# file_url = default_storage.url(final_file)
response = imageProcessor.image_processing(file_name123)
print('here3')
response = json.dumps(response)
print(response)
return HttpResponse(response)
def update_pill_data(request):
data = request.body
python_obj = json.loads(data)
print(python_obj, flush=True)
med = PillDetails(**python_obj)
med.save()
print(med, flush=True)
return HttpResponse('{"success": true}}')
# def getPillData(request, generics.ListAPI):
def load_pill_data(request):
# data = list(map(lambda x: x.__dict__, PillDetails.objects.all()))
# data = serializers.dataSerializer(PillDetails.objects.all(), many=True)
data = serializers.serialize('json', PillDetails.objects.all())
print(data, flush=True)
return HttpResponse(data, content_type=json)
def delete_pill_data(request):
name = request.GET.get('medication_name')
pill_instance = PillDetails.objects.get(medication_name=name)
x = pill_instance.delete()
print(x)
return HttpResponse()
def process_alexa_result(request):
pass
| {"/api/serializers.py": ["/api/models.py"], "/api/medicineskill.py": ["/api/models.py"], "/api/admin.py": ["/api/views.py"], "/api/views.py": ["/api/models.py"]} |
74,622 | hriks/fibo | refs/heads/master | /fibo/settings.py |
from django.core.exceptions import ImproperlyConfigured
import json
import os
CONFIGURATION_FILE = os.environ.get('FIBO_CONFIG')
if CONFIGURATION_FILE is None:
raise ImproperlyConfigured(
"ImproperlyConfigured: Set CONFIG environment variable"
)
with open(CONFIGURATION_FILE) as f:
configs = json.loads(f.read())
def get_env_var(setting, configs=configs):
try:
return configs[setting]
except KeyError:
raise ImproperlyConfigured(
"ImproperlyConfigured: Set {0} environment variable".format(
setting)
)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DEBUG = get_env_var('DEBUG')
STATIC_URL = '/static/'
SECRET_KEY = get_env_var('SECRET_KEY')
ALLOWED_HOSTS = get_env_var('ALLOWED_HOSTS').split(',')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'core'
]
if DEBUG:
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static"),
]
else:
STATIC_ROOT = "static"
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware'
]
ROOT_URLCONF = 'fibo.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'core/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'fibo.wsgi.application'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR + '/media'
DATABASES = {
'default': get_env_var('DATABASE_CONFIG')
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', # noqa
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', # noqa
},
]
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'Asia/Kolkata'
USE_I18N = True
USE_L10N = True
USE_TZ = True
| {"/core/urls.py": ["/core/views.py"], "/core/views.py": ["/core/serializers.py"], "/core/serializers.py": ["/core/models.py"]} |
74,623 | hriks/fibo | refs/heads/master | /core/urls.py | from django.conf.urls import url
from core.views import ListCreateFibonnaci, FibonacciView
urlpatterns = [
url(r'^api/fibonnaci$', ListCreateFibonnaci.as_view()),
url(r'^', FibonacciView.as_view()),
]
| {"/core/urls.py": ["/core/views.py"], "/core/views.py": ["/core/serializers.py"], "/core/serializers.py": ["/core/models.py"]} |
74,624 | hriks/fibo | refs/heads/master | /core/models.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
class Fibonacci(models.Model):
number = models.IntegerField()
series = models.TextField(null=True, blank=True)
runtime = models.FloatField(default=0.0)
created = models.DateTimeField(auto_now_add=True)
def save(self, *args, **kwargs):
self.series = self.calculate_fibonnaci_series()
super(Fibonacci, self).save(*args, **kwargs)
@property
def fibonacci_series(self):
return map(int, self.series.split(','))
def calculate_fibonnaci_series(self):
import timeit
start = timeit.default_timer()
fibs = [1, 1]
for f in range(2, self.number):
fibs.append(fibs[-1] + fibs[-2])
self.runtime = timeit.default_timer() - start
print self.runtime
return ",".join(map(str, fibs))
| {"/core/urls.py": ["/core/views.py"], "/core/views.py": ["/core/serializers.py"], "/core/serializers.py": ["/core/models.py"]} |
74,625 | hriks/fibo | refs/heads/master | /core/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from django.views import View
from rest_framework import generics, permissions
from core.serializers import FibonacciSerializers, Fibonacci
class FibonacciView(View):
template_name = 'index.html'
def get(self, request, *args, **kwargs):
return render(request, self.template_name)
class ListCreateFibonnaci(generics.ListCreateAPIView):
permission_classes = [permissions.AllowAny]
queryset = Fibonacci.objects.all()
serializer_class = FibonacciSerializers
| {"/core/urls.py": ["/core/views.py"], "/core/views.py": ["/core/serializers.py"], "/core/serializers.py": ["/core/models.py"]} |
74,626 | hriks/fibo | refs/heads/master | /core/serializers.py | import pytz
from rest_framework import serializers
from core.models import Fibonacci
class FibonacciSerializers(serializers.ModelSerializer):
created = serializers.SerializerMethodField()
def get_created(self, obj):
return obj.created.astimezone(
pytz.timezone("Asia/Kolkata")).strftime("%d %b %Y %k:%M %p")
class Meta:
model = Fibonacci
fields = ('number', 'fibonacci_series', 'runtime', 'created')
read_only_fields = ('created', 'runtime')
| {"/core/urls.py": ["/core/views.py"], "/core/views.py": ["/core/serializers.py"], "/core/serializers.py": ["/core/models.py"]} |
74,628 | RetroDude128/EpicBot-Discord-New | refs/heads/main | /cogs/__init__.py | from .basecog import BaseCog
def setup(bot):
bot.add_cog(BaseCog(bot)) | {"/cogs/__init__.py": ["/cogs/basecog.py"]} |
74,629 | RetroDude128/EpicBot-Discord-New | refs/heads/main | /cogs/basecog.py | from discord.ext import commands
class BaseCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
def setup(bot):
bot.add_cog(BaseCog(bot)) | {"/cogs/__init__.py": ["/cogs/basecog.py"]} |
74,671 | google-research-datasets/tydiqa | refs/heads/master | /gold_passage_baseline/split_predictions.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Splits a prediction file containing all languages into multiple files.
In order to evaluate the TyDiQA-GoldP task, each language must be evaluated
separately. However, much existing code expects a single training set and a
single evaluation set, so we provide this script to help with splitting
post hoc.
This script requires Python 3.
"""
import collections
import json
import os
from absl import app
from absl import flags
from absl import logging
FLAGS = flags.FLAGS
flags.DEFINE_string("input_json", None, "SQuAD-format predicions.json file.")
flags.mark_flag_as_required("input_json")
flags.DEFINE_string(
"output_dir", None,
"Output directory where individual language prediction files will be "
"written.")
flags.mark_flag_as_required("output_dir")
flags.DEFINE_string(
"lang_output_json_pattern", "tydiqa-goldp-dev-predictions-%s.json",
"Per-language output file pattern. The language name will "
"be inserted into the '%s' and files will be written in `output_dir`. ")
def main(argv):
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
lang_list = set([
"english", "arabic", "bengali", "finnish", "indonesian", "swahili",
"korean", "russian", "telugu"
])
data_by_lang = collections.defaultdict(dict)
with open(FLAGS.input_json, "r") as f:
json_dict = json.load(f)
for example_id, answer in json_dict.items():
cols = example_id.split("-")
if len(cols) < 2:
raise ValueError("Example ID '%s' does not start with a valid language." %
example_id)
lang = cols[0]
if lang not in lang_list:
raise ValueError(
"Example ID '%s' does not start with a valid language: '%s'" %
(example_id, lang))
data_by_lang[lang][example_id] = answer
for lang, data in data_by_lang.items():
if "%s" not in FLAGS.lang_output_json_pattern:
raise ValueError(
"Expected placeholder '%s' in `lang_output_json_pattern`.")
lang_for_filename = lang
if lang == "english":
# Make sure people don't accidentally include English in their
# overall scores.
lang_for_filename = "english-DO-NOT-AVERAGE"
filename = FLAGS.lang_output_json_pattern % lang_for_filename
path = os.path.join(FLAGS.output_dir, filename)
logging.info("Writing %d %s answers to %s", len(data), lang, path)
with open(path, "w") as f:
json.dump(data, f, indent=4)
if __name__ == "__main__":
app.run(main)
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,672 | google-research-datasets/tydiqa | refs/heads/master | /eval_utils.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility function for TyDi QA evaluation."""
import collections
import gzip
import json
from absl import logging
# A data structure for storing prediction and annotation.
# When a example has multiple annotations, multiple TyDiLabel will be used.
TyDiLabel = collections.namedtuple(
'TyDiLabel',
[
'plaintext', # context.
'question_text', # a question text.
'example_id', # the unique id for each TyDi example.
'language', # language id.
'passage_answer_index', # A index for passage answer among candidates.
'minimal_answer_span', # A Span object for minimal answer.
'yes_no_answer', # Indicate if the minimal answer is an yes/no answer
# The possible values are "yes", "no", "none".
# (case insensitive)
# If the field is "yes", minimal_answer_span should be empty or null.
'passage_score', # The score for the passage answer prediction.
'minimal_score' # The score for the minimal answer prediction.
])
class Span(object):
"""A class for handling token and byte spans.
The logic is:
1) if both start_byte != -1 and end_byte != -1 then the span is defined
by byte offsets
3) else, this is a null span.
Null spans means that there is no (passage or minimal) answers.
"""
def __init__(self, start_byte_offset, end_byte_offset):
if ((start_byte_offset < 0 and end_byte_offset >= 0) or
(start_byte_offset >= 0 and end_byte_offset < 0)):
raise ValueError('Inconsistent Null Spans (Byte).')
if (start_byte_offset >= 0 and end_byte_offset >= 0 and
start_byte_offset > end_byte_offset):
raise ValueError('Invalid byte spans (start_byte >= end_byte).')
self.start_byte_offset = start_byte_offset
self.end_byte_offset = end_byte_offset
def is_null_span(self):
"""A span is a null span if the start and end are both -1.
This can happen for both gold and predicted values and
for both passage answers and minimal answers.
Returns:
boolean flag whether it is null span or not.
"""
if (self.start_byte_offset < 0 and self.end_byte_offset < 0):
return True
return False
def __str__(self):
return '({},{})'.format(self.start_byte_offset, self.end_byte_offset)
def __repr__(self):
return self.__str__()
def safe_divide(x, y):
"""Compute x / y, but return 0 if y is zero."""
if y == 0:
return 0
else:
return x / y
def safe_average(elements):
"""Computes average `elements`, but returns 0 if `elements` is empty."""
return safe_divide(sum(elements), len(elements))
def compute_partial_match_scores(gold_span, pred_span):
"""Compute byte indices precision, recall and F1 score between span a and b.
This is used for scoring only minimal answers. See `nonnull_span_equal` for
scoring passage answers.
Args:
gold_span: a Span object. End_byte is inclusive (start_byte+byte_len)
pred_span: a Span object. Only compare non-null spans.
Then, if the bytes are ot negative, compare byte offsets.
Returns:
precision: byte offset based precision.
(# bytes in both gold and pred span) / (# bytes in pred_span)
recall: byte offset based recall.
(# bytes in both gold and pred span) / (# bytes in gold_span)
f1: harmonic mean of precision and recall.
"""
if not isinstance(gold_span, Span):
raise TypeError('Gold span must has a Span type.')
if not isinstance(pred_span, Span):
raise TypeError('Prediction span must has a Span type.')
if gold_span.is_null_span():
raise ValueError(
'Null gold span should not be passed for F1 computation.')
if pred_span.is_null_span():
raise ValueError(
'Null prediction span should not be passed for F1 computation.')
assert not pred_span.is_null_span()
# If there is no overlap, partial score is zero.
if ((gold_span.end_byte_offset <= pred_span.start_byte_offset) or
(pred_span.end_byte_offset <= gold_span.start_byte_offset)):
precision = 0.0
recall = 0.0
else:
in_both = (min(gold_span.end_byte_offset, pred_span.end_byte_offset) -
max(gold_span.start_byte_offset, pred_span.start_byte_offset))
assert in_both > 0
# if gold span starts earlier than pred span.
if gold_span.start_byte_offset <= pred_span.start_byte_offset:
only_in_gold = pred_span.start_byte_offset - gold_span.start_byte_offset
only_in_gold += max(0,
gold_span.end_byte_offset - pred_span.end_byte_offset)
only_in_pred = max(pred_span.end_byte_offset - gold_span.end_byte_offset,
0)
# if pred span starts earlier than gold span.
else:
only_in_pred = gold_span.start_byte_offset - pred_span.start_byte_offset
only_in_pred += max(0,
pred_span.end_byte_offset - gold_span.end_byte_offset)
only_in_gold = max(gold_span.end_byte_offset - pred_span.end_byte_offset,
0)
precision = safe_divide(in_both, (in_both + only_in_pred))
recall = safe_divide(in_both, (in_both + only_in_gold))
f1 = safe_divide(2 * precision * recall, precision + recall)
return precision, recall, f1
def nonnull_span_equal(span_a, span_b):
"""Given two spans, return if they are equal.
This is used for scoring only passage answers.
See `compute_partial_match_scores` for minimal answers.
Args:
span_a: a Span object.
span_b: a Span object. Only compare non-null spans. First, if the bytes are
not negative, compare byte offsets.
Returns:
True or False
"""
assert isinstance(span_a, Span)
assert isinstance(span_b, Span)
assert not span_a.is_null_span()
assert not span_b.is_null_span()
# if byte offsets are not negative, compare byte offsets
if ((span_a.start_byte_offset >= 0 and span_a.end_byte_offset >= 0) and
(span_b.start_byte_offset >= 0 and span_b.end_byte_offset >= 0)):
if ((span_a.start_byte_offset == span_b.start_byte_offset) and
(span_a.end_byte_offset == span_b.end_byte_offset)):
return True
return False
def gold_has_minimal_answer(gold_label_list, minimal_non_null_threshold):
"""Gets vote from annotators for judging if there is a minimal answer."""
# We consider if there is a minimal answer if there is an minimal answer span
# or the yes/no answer is not none.
gold_has_answer = gold_label_list and sum([
((not label.minimal_answer_span.is_null_span()) or
(label.yes_no_answer != 'none')) for label in gold_label_list
]) >= minimal_non_null_threshold
return gold_has_answer
def gold_has_passage_answer(gold_label_list, passage_non_null_threshold):
"""Gets vote from annotators for judging if there is a passage answer."""
gold_has_answer = gold_label_list and (sum([
label.passage_answer_index >= 0 # passage answer not null
for label in gold_label_list # for each annotator
]) >= passage_non_null_threshold)
return gold_has_answer
def read_prediction_jsonl(predictions_path):
"""Read the prediction jsonl file with scores.
Args:
predictions_path: the path for the jsonl prediction file.
Returns:
A dictionary with key = example_id, value = TyDiLabel.
"""
logging.info('Reading predictions from file: %s', format(predictions_path))
predictions = []
with open(predictions_path, 'r') as f:
for line in f:
predictions.append(json.loads(line))
tydi_pred_dict = {}
for single_prediction in predictions:
if 'passage_answer_index' in single_prediction:
passage_answer_index = single_prediction['passage_answer_index']
else:
passage_answer_index = -1
if 'minimal_answer' in single_prediction:
minimal_span = Span(
single_prediction['minimal_answer']['start_byte_offset'],
single_prediction['minimal_answer']['end_byte_offset'])
yes_no_answer = 'none'
if 'yes_no_answer' in single_prediction:
yes_no_answer = single_prediction['yes_no_answer'].lower()
if yes_no_answer not in ['yes', 'no', 'none']:
raise ValueError('Invalid yes_no_answer value in prediction')
if yes_no_answer != 'none' and not minimal_span.is_null_span():
raise ValueError(
'yes/no prediction and minimal answers cannot coexist.')
pred_item = TyDiLabel(
example_id=single_prediction['example_id'],
language=single_prediction['language'],
passage_answer_index=passage_answer_index,
minimal_answer_span=minimal_span,
yes_no_answer=yes_no_answer,
question_text='',
plaintext='',
passage_score=float(single_prediction['passage_answer_score']),
minimal_score=float(single_prediction['minimal_answer_score']))
tydi_pred_dict[single_prediction['example_id']] = pred_item
return tydi_pred_dict
def read_annotation_from_file(input_file):
"""Read annotation from file."""
annotation_dict = {}
for line in input_file:
json_example = json.loads(line)
example_id = json_example['example_id']
# There are three annotations for each TyDi QA evaluation example.
annotation_list = []
for annotation in json_example['annotations']:
passage_span_ind = annotation['passage_answer']['candidate_index']
minimal_span_dict = annotation['minimal_answer']
minimal_span = Span(minimal_span_dict['plaintext_start_byte'],
minimal_span_dict['plaintext_end_byte'])
gold_label = TyDiLabel(
example_id=example_id,
language=json_example['language'],
passage_answer_index=passage_span_ind,
minimal_answer_span=minimal_span,
passage_score=0,
minimal_score=0,
question_text=json_example['question_text'],
plaintext=json_example['document_plaintext'],
yes_no_answer=annotation['yes_no_answer'].lower())
annotation_list.append(gold_label)
annotation_dict[example_id] = annotation_list
return annotation_dict
def read_annotation(path_name):
"""Read annotations from path_name."""
if '.gz' in path_name:
logging.info('Parsing %s (gzip)...', path_name)
with gzip.GzipFile(path_name, 'r') as input_file:
return read_annotation_from_file(input_file)
logging.info('Parsing %s...', path_name)
with open(path_name) as input_file:
return read_annotation_from_file(input_file)
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,673 | google-research-datasets/tydiqa | refs/heads/master | /baseline/run_tydi_test.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
"""Tests for `run_tydi.py`."""
import gzip
import json
import os
import tensorflow.compat.v1 as tf
import data
import prepare_tydi_data
import preproc
import tf_io
import tokenization
# For test_srcdir
flags = tf.flags
FLAGS = flags.FLAGS
class RunTyDiTest(tf.test.TestCase):
EXAMPLE = {
"annotations": [{
"annotation_id": 11725509819744756779,
"minimal_answer": {
"plaintext_end_byte": 2,
"plaintext_start_byte": 7
},
"passage_answer": {
"candidate_index": 0
},
"yes_no_answer": "NONE"
}],
"document_url":
"https://en.wikipedia.org/wiki/Feature%20length",
"document_title":
"Feature length",
"example_id":
1472936222771770808,
"language":
"english",
"document_plaintext":
"In motion [CLS] [Q] picture terminology,",
"passage_answer_candidates": [{
"html_end_byte": 2050,
"html_start_byte": 232,
"plaintext_end_byte": 30,
"plaintext_start_byte": 0
}],
"question_text":
"What is the average length of a feature-length motion picture?"
}
def setUp(self):
super(RunTyDiTest, self).setUp()
self.test_tmpdir = tf.test.get_temp_dir()
self.test_file = os.path.join(self.test_tmpdir, "tydi-unittest.jsonl.gz")
def write_examples(self, examples):
tf.gfile.MakeDirs(self.test_tmpdir)
path = os.path.join(self.test_tmpdir, "tydi-unittest.jsonl.gz")
with gzip.GzipFile(fileobj=tf.gfile.Open(path, "w")) as output_file: # pytype: disable=wrong-arg-types
for e in examples:
output_file.write((json.dumps(e) + "\n").encode())
def make_tf_examples(self, example, is_training):
passages = []
spans = []
token_maps = []
vocab_file = self._get_vocab_file()
tf_example_creator = tf_io.CreateTFExampleFn(
is_training=is_training,
max_question_length=64,
max_seq_length=512,
doc_stride=128,
include_unknowns=1.0,
vocab_file=vocab_file)
for record in list(
tf_example_creator.process(example, errors=[], debug_info={})):
tfexample = tf.train.Example()
tfexample.ParseFromString(record)
tokens = []
passages.append(" ".join(tokens).replace(" ##", ""))
if is_training:
start = tfexample.features.feature["start_positions"].int64_list.value[
0]
end = tfexample.features.feature["end_positions"].int64_list.value[0]
spans.append(" ".join(tokens[start:end + 1]).replace(" ##", ""))
else:
token_maps.append(
tfexample.features.feature["token_map"].int64_list.value)
return passages, spans, token_maps
def test_minimal_examples(self):
num_examples = 10
self.write_examples([self.EXAMPLE] * num_examples)
path = os.path.join(self.test_tmpdir, "tydi-unittest.jsonl.gz")
output_examples = prepare_tydi_data.read_entries(
path, fail_on_invalid=False)
self.assertEqual(num_examples, len(list(output_examples)))
def test_example_metadata(self):
self.write_examples([self.EXAMPLE])
path = os.path.join(self.test_tmpdir, "tydi-unittest.jsonl.gz")
_, _, output_example, _ = next(
prepare_tydi_data.read_entries(path, fail_on_invalid=False))
self.assertEqual(output_example["name"], "Feature length")
self.assertEqual(output_example["id"], "1472936222771770808")
self.assertEqual(
output_example["question"]["input_text"],
"What is the average length of a feature-length motion picture?")
def _get_vocab_file(self):
return os.path.join(FLAGS.test_srcdir,
".//baseline",
"mbert_modified_vocab.txt")
def test_offset_wp_mapping(self):
"""Test the mapping from wordpiece to plaintext offsets."""
testdata = os.path.join(
FLAGS.test_srcdir, ".//"
"small_gold_annotation.jsonl")
vocab_file = self._get_vocab_file()
examples = preproc.read_tydi_examples(
testdata,
is_training=False,
max_passages=45,
max_position=45,
fail_on_invalid=False,
open_fn=tf_io.gopen)
vocab_file = self._get_vocab_file()
tokenizer = tokenization.TyDiTokenizer(vocab_file=vocab_file)
for tydi_example in examples:
wordpieces, start_offsets, end_offsets, offset_to_wp = (
tokenizer.tokenize_with_offsets(tydi_example.contexts))
# Check invariants.
for i in start_offsets:
if i > 0:
self.assertLess(
i, len(tydi_example.context_to_plaintext_offset),
"Expected start offset {} to be in `context_to_plaintext_offset` "
"byte_len(contexts)={} Context@{}='{}' Have={}".format(
i, data.byte_len(tydi_example.contexts), i,
data.byte_slice(
tydi_example.contexts, i, i + 100,
errors="ignore").encode("utf8"),
tydi_example.context_to_plaintext_offset))
for i in end_offsets:
if i > 0:
self.assertLess(
i, len(tydi_example.context_to_plaintext_offset),
"Expected end offset {} to be in `context_to_plaintext_offset` "
"byte_len(contexts)={} Have={}".format(
i, data.byte_len(tydi_example.contexts),
tydi_example.context_to_plaintext_offset))
wp_start_offsets, wp_end_offsets = (
preproc.create_mapping(start_offsets, end_offsets,
tydi_example.context_to_plaintext_offset))
wp_count = 0
for wp_s, wp_e in zip(wp_start_offsets, wp_end_offsets):
if wp_s >= 0 or wp_e >= 0 and wp_count < 20:
wp_txt = wordpieces[wp_count]
if isinstance(wp_txt, str):
if "##" not in wp_txt and wp_txt != "[UNK]":
self.assertEqual(tydi_example.plaintext[wp_s:wp_e + 1], wp_txt)
wp_count += 1
for offset in offset_to_wp:
self.assertLess(offset, data.byte_len(tydi_example.contexts))
self.assertGreaterEqual(offset, 0)
matching_wp = offset_to_wp[offset]
if matching_wp == -1:
continue
if wp_end_offsets[matching_wp] == -1:
continue
if wp_start_offsets[matching_wp] == -1:
continue
self.assertGreaterEqual(wp_end_offsets[matching_wp],
wp_start_offsets[matching_wp])
def test_tokenizer_simple(self):
vocab_file = self._get_vocab_file()
tokenizer = tokenization.TyDiTokenizer(vocab_file=vocab_file)
text = "[CLS] [ContextId=0] This is a test."
tokens, _, _, _ = tokenizer.tokenize_with_offsets(text)
# Create reverse vocab lookup.
reverse_vocab_table = {
word_id: word for word, word_id in tokenizer.vocab.items()
}
output_tokens = [reverse_vocab_table[i] for i in tokens]
self.assertEqual(output_tokens,
["[CLS]", "[ContextId=0]", "This", "is", "a", "test", "."])
def test_tokenizer_korean(self):
vocab_file = self._get_vocab_file()
tokenizer = tokenization.TyDiTokenizer(
vocab_file=vocab_file, fail_on_mismatch=True)
text = "[Q] 작가는 만화를 그리기 시작했나요?"
tokens, _, _, _ = tokenizer.tokenize_with_offsets(text)
# Create reverse vocab lookup.
reverse_vocab_table = {
word_id: word for word, word_id in tokenizer.vocab.items()
}
output_tokens = [reverse_vocab_table[i] for i in tokens]
self.assertEqual(output_tokens, [
"[Q]", u"\uc791", u"##\uac00\ub294", u"\ub9cc", u"##\ud654\ub97c",
u"\uadf8", u"##\ub9ac", u"##\uae30", u"\uc2dc", u"##\uc791",
u"##\ud588", u"##\ub098", u"##\uc694", "?"
])
def test_tokenizer(self):
testdata = os.path.join(
FLAGS.test_srcdir, ".//"
"small_gold_annotation.jsonl")
test_examples = preproc.read_tydi_examples(
testdata,
is_training=True,
max_passages=45,
max_position=45,
fail_on_invalid=False,
open_fn=tf_io.gopen)
vocab_file = self._get_vocab_file()
tokenizer = tokenization.TyDiTokenizer(vocab_file=vocab_file)
for tydi_example in test_examples:
features = preproc.convert_single_example(
tydi_example,
tokenizer,
is_training=True,
max_question_length=64,
max_seq_length=512,
doc_stride=128,
include_unknowns=1.0,
errors=[],
debug_info={})
self.assertEqual(len(set([f.language_id for f in features])), 1)
for feature in features:
if feature.end_position <= 0:
self.assertEqual(feature.start_position, 0)
def test_tokenizer_val(self):
testdata = os.path.join(
FLAGS.test_srcdir, ".//"
"small_gold_annotation.jsonl")
train_examples = preproc.read_tydi_examples(
testdata,
is_training=True,
max_passages=45,
max_position=45,
fail_on_invalid=False,
open_fn=tf_io.gopen)
dev_examples = preproc.read_tydi_examples(
testdata,
is_training=False,
max_passages=45,
max_position=45,
fail_on_invalid=False,
open_fn=tf_io.gopen)
vocab_file = self._get_vocab_file()
tokenizer = tokenization.TyDiTokenizer(vocab_file=vocab_file)
for tr_ex, dev_ex in zip(train_examples, dev_examples):
train_feats = preproc.convert_single_example(
tr_ex,
tokenizer,
is_training=True,
max_question_length=64,
max_seq_length=512,
doc_stride=128,
include_unknowns=1.0,
errors=[],
debug_info={})
dev_feats = preproc.convert_single_example(
dev_ex,
tokenizer,
is_training=False,
max_question_length=64,
max_seq_length=512,
doc_stride=128,
include_unknowns=1.0,
errors=[],
debug_info={})
for train_f, dev_f in zip(train_feats, dev_feats):
if train_f.answer_text:
st_ = train_f.start_position
ed_ = train_f.end_position
st_offset = dev_f.wp_start_offset[st_]
end_offset = dev_f.wp_end_offset[ed_]
self.assertGreaterEqual(end_offset, st_offset)
def test_byte_str(self):
self.assertEqual(data.byte_str("작"), b"\xec\x9e\x91")
self.assertEqual(data.byte_str("[Q]"), b"[Q]")
def test_byte_len(self):
self.assertEqual(data.byte_len("작"), 3)
self.assertEqual(data.byte_len("[Q]"), 3)
def test_byte_slice(self):
# 작 -- 3 UTF-8 bytes
s = "[Q] 작가는 만화를 그리기 시작했나요?"
q = data.byte_slice(s, 0, 3)
self.assertEqual(q, "[Q]")
one_char = data.byte_slice(s, 4, 7)
self.assertEqual(one_char, "작")
if __name__ == "__main__":
tf.test.main()
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,674 | google-research-datasets/tydiqa | refs/heads/master | /baseline/run_tydi.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""BERT-joint baseline for TyDi v1.0.
This code is largely based on the Natural Questions baseline from
https://github.com/google-research/language/blob/master/language/question_answering/bert_joint/run_nq.py.
The model uses special tokens to dealing with offsets between the original
document content and the wordpieces. Here are examples:
[ContextId=N] [Q]
The presence of these special tokens requires overwriting some of the [UNUSED]
vocab ids of the public BERT wordpiece vocabulary, similar to NQ baseline.
Overview:
1. data.py: Responsible for deserializing the JSON and creating Pythonic data
structures
[ Usable by any ML framework / no TF dependencies ]
2. tokenization.py: Fork of BERT's tokenizer that tracks byte offsets.
[ Usable by any ML framework / no TF dependencies ]
3. preproc.py: Calls tokenization and munges JSON into a format usable by
the model.
[ Usable by any ML framework / no TF dependencies ]
4. tf_io.py: Tensorflow-specific IO code (reads `tf.Example`s from
TF records). If you'd like to use your own favorite DL framework, you'd
need to modify this; it's only about 200 lines.
4. tydi_modeling.py: The core TensorFlow model code. **If you want to replace
BERT with your own latest and greatest, start here!** Similarly, if
you'd like to use your own favorite DL framework, this would be
the only file that should require heavy modification; it's only about
200 lines.
5. postproc.py: Does postprocessing to find the answer, etc. Relevant only
for inference.
[ Usable by any ML framework / minimal tf dependencies ]
6. run_tydi.py: The actual main driver script that uses all of the above and
calls Tensorflow to do the main training and inference loops.
"""
import collections
import json
import os
from absl import logging
from bert import modeling as bert_modeling
import tensorflow.compat.v1 as tf
import postproc
import preproc
import tf_io
import tydi_modeling
import tensorflow.contrib as tf_contrib
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string(
"bert_config_file", None,
"The config json file corresponding to the pre-trained BERT model. "
"This specifies the model architecture.")
flags.DEFINE_string("vocab_file", None,
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string(
"output_dir", None,
"The output directory where the model checkpoints will be written.")
flags.DEFINE_string("train_records_file", None,
"Precomputed tf records for training.")
flags.DEFINE_string(
"record_count_file", None,
"File containing number of precomputed training records "
"(in terms of 'features', meaning slices of articles). "
"This is used for computing how many steps to take in "
"each fine tuning epoch.")
flags.DEFINE_integer(
"candidate_beam", 30,
"How many wordpiece offset to be considered as boundary at inference time.")
flags.DEFINE_string(
"predict_file", None,
"TyDi json for predictions. E.g., dev-v1.1.jsonl.gz or test-v1.1.jsonl.gz. "
"Used only for `--do_predict`.")
flags.DEFINE_string(
"precomputed_predict_file", None,
"TyDi tf.Example records for predictions, created separately by "
"`prepare_tydi_data.py` Used only for `--do_predict`.")
flags.DEFINE_string(
"output_prediction_file", None,
"Where to print predictions in TyDi prediction format, to be passed to"
"tydi_eval.py.")
flags.DEFINE_string(
"init_checkpoint", None,
"Initial checkpoint (usually from a pre-trained mBERT model).")
flags.DEFINE_integer(
"max_seq_length", 512,
"The maximum total input sequence length after WordPiece tokenization. "
"Sequences longer than this will be truncated, and sequences shorter "
"than this will be padded.")
flags.DEFINE_integer(
"doc_stride", 128,
"When splitting up a long document into chunks, how much stride to "
"take between chunks.")
flags.DEFINE_integer(
"max_question_length", 64,
"The maximum number of tokens for the question. Questions longer than "
"this will be truncated to this length.")
flags.DEFINE_bool("do_train", False, "Whether to run training.")
flags.DEFINE_bool("do_predict", False, "Whether to run prediction.")
flags.DEFINE_integer("train_batch_size", 16, "Total batch size for training.")
flags.DEFINE_integer("predict_batch_size", 8,
"Total batch size for predictions.")
flags.DEFINE_integer(
"predict_file_shard_size", 1000,
"The maximum number of examples to put into each temporary TF example file "
"used as model input a prediction time.")
flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.")
flags.DEFINE_float("num_train_epochs", 3.0,
"Total number of training epochs to perform.")
flags.DEFINE_float(
"warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.")
flags.DEFINE_integer("save_checkpoints_steps", 1000,
"How often to save the model checkpoint.")
flags.DEFINE_integer("iterations_per_loop", 1000,
"How many steps to make in each estimator call.")
flags.DEFINE_integer(
"max_answer_length", 30,
"The maximum length of an answer that can be generated. This is needed "
"because the start and end predictions are not conditioned on one another.")
flags.DEFINE_float(
"include_unknowns", -1.0,
"If positive, probability of including answers of type `UNKNOWN`.")
flags.DEFINE_bool(
"verbose_logging", False,
"If true, all of the warnings related to data processing will be printed. "
"A number of warnings are expected for a normal TyDi evaluation.")
flags.DEFINE_integer(
"max_passages", 45, "Maximum number of passages to consider for a "
"single article. If an article contains more than"
"this, they will be discarded during training. "
"BERT's WordPiece vocabulary must be modified to include "
"these within the [unused*] vocab IDs.")
flags.DEFINE_integer(
"max_position", 45,
"Maximum passage position for which to generate special tokens.")
flags.DEFINE_bool(
"fail_on_invalid", True,
"Stop immediately on encountering an invalid example? "
"If false, just print a warning and skip it.")
### TPU-specific flags:
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
tf.flags.DEFINE_string(
"tpu_name", None,
"The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "
"url.")
tf.flags.DEFINE_string(
"tpu_zone", None,
"[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string(
"gcp_project", None,
"[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from "
"metadata.")
tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer(
"num_tpu_cores", 8,
"Only used if `use_tpu` is True. Total number of TPU cores to use.")
def validate_flags_or_throw(bert_config):
"""Validate the input FLAGS or throw an exception."""
if not FLAGS.do_train and not FLAGS.do_predict:
raise ValueError("At least one of `{do_train,do_predict}` must be True.")
if FLAGS.do_train:
if not FLAGS.train_records_file:
raise ValueError("If `do_train` is True, then `train_records_file` "
"must be specified.")
if not FLAGS.record_count_file:
raise ValueError("If `do_train` is True, then `record_count_file` "
"must be specified.")
if FLAGS.do_predict:
if not FLAGS.predict_file:
raise ValueError("If `do_predict` is True, "
"then `predict_file` must be specified.")
if not FLAGS.output_prediction_file:
raise ValueError("If `do_predict` is True, "
"then `output_prediction_file` must be specified.")
if FLAGS.max_seq_length > bert_config.max_position_embeddings:
raise ValueError(
"Cannot use sequence length %d because the BERT model "
"was only trained up to sequence length %d" %
(FLAGS.max_seq_length, bert_config.max_position_embeddings))
if FLAGS.max_seq_length <= FLAGS.max_question_length + 3:
raise ValueError(
"The max_seq_length (%d) must be greater than max_question_length "
"(%d) + 3" % (FLAGS.max_seq_length, FLAGS.max_question_length))
def main(_):
logging.set_verbosity(logging.INFO)
bert_config = bert_modeling.BertConfig.from_json_file(FLAGS.bert_config_file)
validate_flags_or_throw(bert_config)
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf_contrib.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
is_per_host = tf_contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf_contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
model_dir=FLAGS.output_dir,
save_checkpoints_steps=FLAGS.save_checkpoints_steps,
tpu_config=tf_contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
per_host_input_for_training=is_per_host))
num_train_steps = None
num_warmup_steps = None
if FLAGS.do_train:
with tf.gfile.Open(FLAGS.record_count_file, "r") as f:
num_train_features = int(f.read().strip())
num_train_steps = int(num_train_features / FLAGS.train_batch_size *
FLAGS.num_train_epochs)
logging.info("record_count_file: %s", FLAGS.record_count_file)
logging.info("num_records (features): %d", num_train_features)
logging.info("num_train_epochs: %d", FLAGS.num_train_epochs)
logging.info("train_batch_size: %d", FLAGS.train_batch_size)
logging.info("num_train_steps: %d", num_train_steps)
num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)
model_fn = tydi_modeling.model_fn_builder(
bert_config=bert_config,
init_checkpoint=FLAGS.init_checkpoint,
learning_rate=FLAGS.learning_rate,
num_train_steps=num_train_steps,
num_warmup_steps=num_warmup_steps,
use_tpu=FLAGS.use_tpu,
use_one_hot_embeddings=FLAGS.use_tpu)
# If TPU is not available, this falls back to normal Estimator on CPU or GPU.
estimator = tf_contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
model_fn=model_fn,
config=run_config,
train_batch_size=FLAGS.train_batch_size,
predict_batch_size=FLAGS.predict_batch_size)
if FLAGS.do_train:
logging.info("Running training on precomputed features")
logging.info(" Num split examples = %d", num_train_features)
logging.info(" Batch size = %d", FLAGS.train_batch_size)
logging.info(" Num steps = %d", num_train_steps)
train_filenames = tf.gfile.Glob(FLAGS.train_records_file)
train_input_fn = tf_io.input_fn_builder(
input_file=train_filenames,
seq_length=FLAGS.max_seq_length,
is_training=True,
drop_remainder=True)
estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)
if FLAGS.do_predict:
if not FLAGS.precomputed_predict_file:
predict_examples_iter = preproc.read_tydi_examples(
input_file=FLAGS.predict_file,
is_training=False,
max_passages=FLAGS.max_passages,
max_position=FLAGS.max_position,
fail_on_invalid=FLAGS.fail_on_invalid,
open_fn=tf_io.gopen)
shards_iter = write_tf_feature_files(predict_examples_iter)
else:
# Uses zeros for example and feature counts since they're unknown, and
# we only use them for logging anyway.
shards_iter = enumerate(
((f, 0, 0) for f in tf.gfile.Glob(FLAGS.precomputed_predict_file)), 1)
# Accumulates all of the prediction results to be written to the output.
full_tydi_pred_dict = {}
total_num_examples = 0
for shard_num, (shard_filename, shard_num_examples,
shard_num_features) in shards_iter:
total_num_examples += shard_num_examples
logging.info(
"Shard %d: Running prediction for %s; %d examples, %d features.",
shard_num, shard_filename, shard_num_examples, shard_num_features)
# Runs the model on the shard and store the individual results.
# If running predict on TPU, you will need to specify the number of steps.
predict_input_fn = tf_io.input_fn_builder(
input_file=[shard_filename],
seq_length=FLAGS.max_seq_length,
is_training=False,
drop_remainder=False)
all_results = []
for result in estimator.predict(
predict_input_fn, yield_single_examples=True):
if len(all_results) % 10000 == 0:
logging.info("Shard %d: Predicting for feature %d/%s", shard_num,
len(all_results), shard_num_features)
unique_id = int(result["unique_ids"])
start_logits = [float(x) for x in result["start_logits"].flat]
end_logits = [float(x) for x in result["end_logits"].flat]
answer_type_logits = [
float(x) for x in result["answer_type_logits"].flat
]
all_results.append(
tydi_modeling.RawResult(
unique_id=unique_id,
start_logits=start_logits,
end_logits=end_logits,
answer_type_logits=answer_type_logits))
# Reads the prediction candidates from the (entire) prediction input file.
candidates_dict = read_candidates(FLAGS.predict_file)
predict_features = [
tf.train.Example.FromString(r)
for r in tf.python_io.tf_record_iterator(shard_filename)
]
logging.info("Shard %d: Post-processing predictions.", shard_num)
logging.info(" Num candidate examples loaded (includes all shards): %d",
len(candidates_dict))
logging.info(" Num candidate features loaded: %d", len(predict_features))
logging.info(" Num prediction result features: %d", len(all_results))
logging.info(" Num shard features: %d", shard_num_features)
tydi_pred_dict = postproc.compute_pred_dict(
candidates_dict,
predict_features, [r._asdict() for r in all_results],
candidate_beam=FLAGS.candidate_beam)
logging.info("Shard %d: Post-processed predictions.", shard_num)
logging.info(" Num shard examples: %d", shard_num_examples)
logging.info(" Num post-processed results: %d", len(tydi_pred_dict))
if shard_num_examples != len(tydi_pred_dict):
logging.warning(" Num missing predictions: %d",
shard_num_examples - len(tydi_pred_dict))
for key, value in tydi_pred_dict.items():
if key in full_tydi_pred_dict:
logging.warning("ERROR: '%s' already in full_tydi_pred_dict!", key)
full_tydi_pred_dict[key] = value
logging.info("Prediction finished for all shards.")
logging.info(" Total input examples: %d", total_num_examples)
logging.info(" Total output predictions: %d", len(full_tydi_pred_dict))
with tf.gfile.Open(FLAGS.output_prediction_file, "w") as output_file:
for prediction in full_tydi_pred_dict.values():
output_file.write((json.dumps(prediction) + "\n").encode())
def write_tf_feature_files(tydi_examples_iter):
"""Converts TyDi examples to features and writes them to files."""
logging.info("Converting examples started.")
total_feature_count_frequencies = collections.defaultdict(int)
total_num_examples = 0
total_num_features = 0
for shard_num, examples in enumerate(
sharded_iterator(tydi_examples_iter, FLAGS.predict_file_shard_size), 1):
features_writer = tf_io.FeatureWriter(
filename=os.path.join(FLAGS.output_dir,
"features.tf_record-%03d" % shard_num),
is_training=False)
num_features_to_ids, shard_num_examples = (
preproc.convert_examples_to_features(
tydi_examples=examples,
vocab_file=FLAGS.vocab_file,
is_training=False,
max_question_length=FLAGS.max_question_length,
max_seq_length=FLAGS.max_seq_length,
doc_stride=FLAGS.doc_stride,
include_unknowns=FLAGS.include_unknowns,
output_fn=features_writer.process_feature))
features_writer.close()
if shard_num_examples == 0:
continue
shard_num_features = 0
for num_features, ids in num_features_to_ids.items():
shard_num_features += (num_features * len(ids))
total_feature_count_frequencies[num_features] += len(ids)
total_num_examples += shard_num_examples
total_num_features += shard_num_features
logging.info("Shard %d: Converted %d input examples into %d features.",
shard_num, shard_num_examples, shard_num_features)
logging.info(" Total so far: %d input examples, %d features.",
total_num_examples, total_num_features)
yield (shard_num, (features_writer.filename, shard_num_examples,
shard_num_features))
logging.info("Converting examples finished.")
logging.info(" Total examples = %d", total_num_examples)
logging.info(" Total features = %d", total_num_features)
logging.info(" total_feature_count_frequencies = %s",
sorted(total_feature_count_frequencies.items()))
def sharded_iterator(iterator, shard_size):
"""Returns an iterator of iterators of at most size `shard_size`."""
exhaused = False
while not exhaused:
def shard():
for i, item in enumerate(iterator, 1):
yield item
if i == shard_size:
return
nonlocal exhaused
exhaused = True
yield shard()
def read_candidates(input_pattern):
"""Read candidates from an input pattern."""
input_paths = tf.gfile.Glob(input_pattern)
final_dict = {}
for input_path in input_paths:
file_obj = tf_io.gopen(input_path)
final_dict.update(postproc.read_candidates_from_one_split(file_obj))
return final_dict
if __name__ == "__main__":
flags.mark_flag_as_required("vocab_file")
flags.mark_flag_as_required("bert_config_file")
flags.mark_flag_as_required("output_dir")
tf.app.run()
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,675 | google-research-datasets/tydiqa | refs/heads/master | /eval_utils_test.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing code for eval_utils."""
import tensorflow.compat.v1 as tf
import tydiqa.eval_utils as util
class EvalUtilsTest(tf.test.TestCase):
"""Testing codes for eval_utils."""
def testSpan(self):
"""Test inconsistent null spans."""
self.assertRaises(ValueError, util.Span, -1, 1)
self.assertRaises(ValueError, util.Span, 1, -1)
def testNullSpan(self):
"""Test null spans."""
self.assertTrue(util.Span(-1, -1).is_null_span())
self.assertFalse(util.Span(0, 1).is_null_span())
def testSpanEqual(self):
"""Test span equals."""
span_a = util.Span(100, 102)
span_b = util.Span(100, 102)
self.assertTrue(util.nonnull_span_equal(span_a, span_b))
span_a = util.Span(100, 102)
span_b = util.Span(22, 23)
self.assertFalse(util.nonnull_span_equal(span_a, span_b))
def testSpanPartialMatch(self):
"""Test span equals."""
# exact match.
gold_span = util.Span(100, 102)
pred_span = util.Span(100, 102)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((1., 1., 1.), (pre, rec, f1))
# pred earlier than gold, no overlap
gold_span = util.Span(100, 102)
pred_span = util.Span(78, 100)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((0.0, 0.0, 0.0), (pre, rec, f1))
# gold earlier than pred, no overlap
gold_span = util.Span(1, 42)
pred_span = util.Span(78, 100)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((0.0, 0.0, 0.0), (pre, rec, f1))
# partial overlap, gold inside pred.
gold_span = util.Span(100, 102)
pred_span = util.Span(100, 104)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((0.5, 1.), (pre, rec))
# partial overlap, gold comes before pred.
gold_span = util.Span(90, 104)
pred_span = util.Span(100, 112)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((4./12, 4./14), (pre, rec))
# partial overlap, gold fully inside pred.
gold_span = util.Span(101, 102)
pred_span = util.Span(100, 104)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((0.25, 1), (pre, rec))
# partial overlap, pred fully inside gold.
gold_span = util.Span(100, 104)
pred_span = util.Span(101, 102)
pre, rec, f1 = util.compute_partial_match_scores(gold_span, pred_span)
self.assertEqual((1, 0.25), (pre, rec))
if __name__ == '__main__':
tf.test.main()
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,676 | google-research-datasets/tydiqa | refs/heads/master | /tydi_eval_test.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Testing code for tydi_eval."""
import tensorflow.compat.v1 as tf
import tydiqa.eval_utils as util
import tydiqa.tydi_eval as ev
class TyDiEvalTest(tf.test.TestCase):
"""Testing codes for tydi_eval."""
def _get_null_span(self):
return util.Span(-1, -1)
def _get_tydi_label(self, passage_index, minimal_span, language='eng', eid=0):
return util.TyDiLabel(
example_id=eid,
passage_answer_index=passage_index,
minimal_answer_span=minimal_span,
question_text='',
plaintext='',
passage_score=0,
minimal_score=0,
language=language,
yes_no_answer='none')
def _get_tydi_label_with_yes_no(self, passage_index, yes_no_answer,
language='eng', eid=0):
assert yes_no_answer != 'none'
return util.TyDiLabel(
example_id=eid,
passage_answer_index=passage_index,
minimal_answer_span=self._get_null_span(),
question_text='',
plaintext='',
passage_score=0,
minimal_score=0,
language=language,
yes_no_answer=yes_no_answer)
def _get_span(self, start, end):
return util.Span(start, end)
def testPassageStat(self):
"""Test instance level passage answer f1."""
# Test cases when there is no long answer.
gold_passage_indexes = [0, 0, -1, -1]
gold_label_list = [
self._get_tydi_label(gold_passage_index, self._get_null_span())
for gold_passage_index in gold_passage_indexes]
pred_label = self._get_tydi_label(0, self._get_null_span())
gold_has_answer, pred_has_answer, is_correct, _ = ev.score_passage_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual(is_correct, True)
# Test cases when there is a long answer.
gold_passage_indexes = [1, 2, -1]
gold_label_list = [
self._get_tydi_label(gold_passage_index, self._get_null_span())
for gold_passage_index in gold_passage_indexes]
pred_label = self._get_tydi_label(4,
self._get_null_span())
gold_has_answer, pred_has_answer, is_correct, _ = ev.score_passage_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual(is_correct, False)
def testMinimalStat(self):
"""Test instance level minimal answer p, r, f1."""
long_span = self._get_span(0, 10)
# Test case assumes having 5 way annotations.
# Test case when there is no gold short answer.
gold_spans = [(1, 3), (-1, -1), (-1, -1), (-1, -1), (-1, -1)]
gold_label_list = [self._get_tydi_label(
long_span, self._get_span(a, b)) for a, b in gold_spans]
pred_label = self._get_tydi_label_with_yes_no(long_span, 'yes')
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r, f1), (0, 0, 0))
# This test case assumes having 5 way annotations.
# Test case when there is gold short answer.
gold_spans = [(39, 50), (38, 50), (34, 50), (-1, -1), (-1, -1)]
gold_label_list = [self._get_tydi_label(
long_span, self._get_span(a, b)) for a, b in gold_spans]
pred_label = self._get_tydi_label(long_span, self._get_span(30, 40))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (6/10., 6/16.))
# When there is no overlap.
pred_label = self._get_tydi_label(long_span, self._get_span(30, 34))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (0., 0.))
# When there is complete overlap.
pred_label = self._get_tydi_label(long_span, self._get_span(39, 50))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (1., 1.))
# This test case assumes having 3 way annotations.
# Test case when there is gold short answer.
gold_spans = [(39, 50), (-1, -1), (-1, -1)]
gold_label_list = [self._get_tydi_label(
long_span, self._get_span(a, b)) for a, b in gold_spans]
pred_label = self._get_tydi_label(long_span, self._get_span(30, 40))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (1/10., 1/11.))
# When there is no overlap.
pred_label = self._get_tydi_label(long_span, self._get_span(30, 34))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (0., 0.))
# When there is complete overlap.
pred_label = self._get_tydi_label(long_span, self._get_span(39, 50))
gold_has_answer, pred_has_answer, (p, r, f1), _ = ev.score_minimal_answer(
gold_label_list, pred_label, 1)
self.assertEqual(gold_has_answer, True)
self.assertEqual(pred_has_answer, True)
self.assertEqual((p, r), (1., 1.))
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,677 | google-research-datasets/tydiqa | refs/heads/master | /baseline/tokenization.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A fork of BERT's tokenizer that tracks byte offsets.
This module does not depend on TensorFlow and should be re-usable within your
favorite ML/DL framework.
"""
import collections
from absl import logging
from bert import tokenization as bert_tokenization
import data
SubToken = collections.namedtuple(
"SubToken",
[
# The normalized version of the token, including '##' markers, etc.
"normalized",
# The original version of the token, which can be used for computing
# byte offsets within a document.
"orig",
# Is this a 'good' token that should be fed to the model? (Stripped
# whitespace, etc. that still needs to be kept around for byte-tracking
# purposes.)
"is_good"
])
def whitespace_tokenize(subtokens):
"""An implementation of BERT's whitespace tokenizer that preserves space."""
return split_subtokens_on(
subtokens, lambda char: char.isspace(), are_good=True)
def split_subtokens_on(subtokens, should_isolate_func, are_good):
"""Nondestructively splits subtokens using a character-wise predicate.
Args:
subtokens: List of `Subtoken`s.
should_isolate_func: Function that takes a char and returns a boolean. True
means that the character should be isolated in the output.
are_good: This boolean indicates whether or not each isolated character is
"good", controlling whether or not it will get fed to the model or simply
dropped. This is stored in `is_good` in `SubToken`.
Returns:
List of `SubToken`s.
"""
output = []
result_subtoken = []
for subtoken, orig_subtoken, is_good in subtokens:
assert subtoken == orig_subtoken
# Don't bother running predicate on bad tokens (including potentially
# invalid Unicode).
if not is_good:
output.append(SubToken(subtoken, subtoken, is_good=False))
continue
for char in subtoken:
if should_isolate_func(char):
if result_subtoken:
result_subtoken_str = "".join(result_subtoken)
output.append(
SubToken(result_subtoken_str, result_subtoken_str, is_good=True))
result_subtoken = []
output.append(SubToken(char, char, is_good=are_good))
else:
result_subtoken.append(char)
if result_subtoken:
result_subtoken_str = "".join(result_subtoken)
output.append(
SubToken(result_subtoken_str, result_subtoken_str, is_good=True))
return output
class NonDestructiveFullTokenizer(object):
"""Runs end-to-end tokenziation."""
def __init__(self, vocab_file):
self.vocab = bert_tokenization.load_vocab(vocab_file)
self.inv_vocab = {v: k for k, v in self.vocab.items()}
self.basic_tokenizer = NonDestructiveBasicTokenizer(vocab=self.vocab)
self.wordpiece_tokenizer = NonDestructiveWordpieceTokenizer(
vocab=self.vocab)
def tokenize(self, text):
"""Tokenizes a piece of `text` and returns a list of `SubToken`s."""
split_tokens = [] # list of `SubToken`s.
for token, orig_token, is_good_token in self.basic_tokenizer.tokenize(text):
if not is_good_token:
split_tokens.append(SubToken(token, orig_token, is_good=False))
continue
# Preserve special tokens such as '[Q]' and '[SEP]'.
if bert_tokenization.preserve_token(token, self.vocab):
split_tokens.append(SubToken(token, orig_token, is_good=True))
continue
# For everything else, send the text-like tokens that have survived
# whitespace and puncutation splitting through a wordpiece tokenizer.
for sub_token in self.wordpiece_tokenizer.tokenize(
[SubToken(token, orig_token, is_good_token)]):
# `sub_token` has type `SubToken`.
split_tokens.append(sub_token)
return split_tokens
class NonDestructiveBasicTokenizer(bert_tokenization.BasicTokenizer):
"""An implementation of BERT's BasicTokenizer that preserves space."""
def __init__(self, vocab=tuple()):
"""Constructs a `NonDestructiveBasicTokenizer`.
Lower casing (and accent removal) are not supported.
Args:
vocab: A container of tokens to not mutate during tokenization.
"""
self.vocab = vocab
def tokenize(self, text):
"""Tokenizes a piece of `text` and returns a list of `SubToken`s."""
text = bert_tokenization.convert_to_unicode(text)
# Begin with the entire input as a single string.
subtokens = [SubToken(text, text, is_good=True)]
del text # unused after this point
subtokens = self._clean_text(subtokens)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
subtokens = self._tokenize_chinese_chars(subtokens)
# Split punctuation, preserving special tokens.
subtokens = whitespace_tokenize(subtokens)
split_subtokens = []
for subtoken, orig_subtoken, is_good in subtokens:
assert subtoken == orig_subtoken
if not is_good:
split_subtokens.append(SubToken(subtoken, subtoken, is_good=False))
continue
if bert_tokenization.preserve_token(subtoken, self.vocab):
split_subtokens.append(SubToken(subtoken, subtoken, is_good=True))
continue
split_subtokens.extend(
self._run_split_on_punc([SubToken(subtoken, subtoken, is_good=True)]))
return split_subtokens
def _run_split_on_punc(self, subtokens):
"""Splits punctuation within a list of `SubToken`s."""
return split_subtokens_on(subtokens, self._is_punctuation, are_good=True)
def _is_punctuation(self, char):
return bert_tokenization._is_punctuation(char) # pylint: disable=protected-access
def _is_control(self, char):
return bert_tokenization._is_control(char) # pylint: disable=protected-access
def _is_chinese_char(self, cp):
return bert_tokenization.BasicTokenizer._is_chinese_char( # pylint: disable=protected-access
self, cp),
def _tokenize_chinese_chars(self, subtokens):
"""Adds whitespace around any CJK character."""
return split_subtokens_on(
subtokens, lambda char: self._is_chinese_char(ord(char)), are_good=True)
def _clean_text(self, subtokens):
"""Performs invalid character handling and whitespace cleanup on text.
We never remove characters, but instead just isolate them and mark them as
not being actual inputs to the model so that downstream code can accurately
track byte offsets.
Args:
subtokens: List of input `SubToken`s.
Returns:
List of `SubToken`s.
"""
def should_isolate(char):
cp = ord(char)
return cp == 0 or cp == 0xfffd or self._is_control(char)
return split_subtokens_on(subtokens, should_isolate, are_good=False)
class NonDestructiveWordpieceTokenizer(object):
"""Runs WordPiece tokenziation."""
def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=200):
self.vocab = vocab
self.unk_token = unk_token
self.max_input_chars_per_word = max_input_chars_per_word
def tokenize(self, subtokens):
"""Tokenizes a piece of text into its word pieces.
This uses a greedy longest-match-first algorithm to perform tokenization
using the given vocabulary.
For example:
input = "unaffable"
output = ["un", "##aff", "##able"]
Args:
subtokens: List of `SubToken`s.
Returns:
List of `SubToken`s.
"""
output_tokens = []
for token, orig_token, is_good in subtokens:
if not is_good:
output_tokens.append(SubToken(token, orig_token, is_good=False))
continue
# Due to the use of Python3, this tokenization algorithm is subtly
# different than the original BERT implementation: Instead of slicing
# at byte boundaries and checking for membership in the vocabulary, we
# only slice at character boundaries (and check for character-wise length,
# not byte-wise length). In practice, this shouldn't make much difference
# other than allowing some longer words to be checked and to prevent
# invalid Unicode strings being checked against the vocabulary.
token_char_len = len(token)
if token_char_len > self.max_input_chars_per_word:
output_tokens.append(SubToken(self.unk_token, token, is_good=True))
continue
is_unk = False
start = 0
sub_tokens = []
while start < token_char_len:
end = token_char_len
cur_substr = None
while start < end:
orig_substr = token[start:end]
substr = orig_substr
if start > 0:
substr = "##" + substr
if substr in self.vocab:
cur_substr = substr
break
end -= 1
if cur_substr is None:
is_unk = True
break
sub_tokens.append(SubToken(cur_substr, orig_substr, is_good=True))
start = end
if is_unk:
output_tokens.append(SubToken(self.unk_token, token, is_good=True))
else:
output_tokens.extend(sub_tokens)
return output_tokens
class TyDiTokenizer(object):
"""A BERT-compatible tokenizer that keeps byte indices."""
def __init__(self, vocab_file, fail_on_mismatch=False):
self.vocab = bert_tokenization.load_vocab(vocab_file)
self.tokenizer = NonDestructiveFullTokenizer(vocab_file=vocab_file)
self.fail_on_mismatch = fail_on_mismatch
def tokenize(self, text):
wordpieces, _, _, _ = self.tokenize_with_offsets(text)
return wordpieces
def tokenize_with_offsets(self, text):
"""Tokenize question or context with BERT tokenizer.
Args:
text: text string to be tokenized.
Returns:
tuple:
wordpieces_out: List[int]
start_offsets_out: List[int]
end_offsets_out: List[int]
offset_to_wp_out: Dict[int, int]
"""
# First, tokenize on whitespace so that we can preserve special tokens
# such as '[CLS]' and '[ContextId=0]' (`BertTokenizer` would split these
# otherwise).
whitespace_tokens = text.split(" ")
berttok_wordpieces = [] # type List[List[int]]
berttok_starts = [] # type List[List[int]]
berttok_limits = [] # type List[List[int]]
mismatched_tokens = []
mismatch_bytes = 0
num_tokens = len(whitespace_tokens)
unk_token = "[UNK]"
unk_id = self.vocab[unk_token]
for token in whitespace_tokens:
internal_byte_offset = 0
subtokens = self.tokenizer.tokenize(token)
subtoken_ids = [
self.vocab.get(subtoken, unk_id) for subtoken, _, _ in subtokens
]
subtoken_lengths = [
data.byte_len(orig_subtoken) for _, orig_subtoken, _ in subtokens
]
actual_token_length = data.byte_len(token)
actual_subtokens_length = sum(subtoken_lengths)
if actual_token_length != actual_subtokens_length:
mismatched_tokens.append(token)
mismatch_bytes += abs(actual_token_length - actual_subtokens_length)
if self.fail_on_mismatch:
raise ValueError(
"Mismatched token. token='{}' (len={}) subtokens='{}' (len={})"
.format(
token, actual_token_length,
" ".join(orig_subtoken for _, orig_subtoken, _ in subtokens),
actual_subtokens_length))
inside_berttok_wordpieces = []
inside_berttok_starts = []
inside_berttok_limits = []
for subtoken_id, subtoken_len in zip(subtoken_ids, subtoken_lengths):
inside_berttok_wordpieces.append(subtoken_id)
inside_berttok_starts.append(internal_byte_offset)
inside_berttok_limits.append(internal_byte_offset + subtoken_len)
# Track byte-wise offset inside token. We do *not* need to account
# for splitting on spaces here since that's accounted *outside* of
# each `token`. This should be exactly correct as long as BERT's
# tokenizer doesn't change the number of bytes in a token during
# tokenization; we check for this condition in
# `num_mismatched_tokens`.
internal_byte_offset += subtoken_len
berttok_wordpieces.append(inside_berttok_wordpieces)
berttok_starts.append(inside_berttok_starts)
berttok_limits.append(inside_berttok_limits)
if mismatched_tokens:
logging.info("Have %d mismatched tokens of %d (%d bytes off): %s",
len(mismatched_tokens), num_tokens, mismatch_bytes,
" ".join(mismatched_tokens))
# Finally, we post-process the result to ensure
# that we don't split special tokens, taking care to preserve the mapping
# from `text` bytes to wordpiece indices.
wordpieces_out = []
start_offsets_out = []
end_offsets_out = []
offset_to_wp_out = {}
curr_offset = 0
token_count = 0
# `token`:str are the whitespace-delimited tokens from `tokenize`.
# `wps`:List[int] are the wordpiece ids from `BertTokenizer` within each
# `token`.
# `wp_starts`:List[int] are the byte starts for each wordpiece.
# `wp_limits`:List[int] are the byte limits for each wordpiece.
for token, wps, wp_starts, wp_limits in zip(whitespace_tokens,
berttok_wordpieces,
berttok_starts, berttok_limits):
# If it is a special token (e.g. [UNK]), don't tokenize into wordpieces.
if self.is_special_token(token):
vocab_id = self.get_vocab_id(token)
# Iterate over the text byte offsets covered by this token and
# associate each with this wordpice index.
wp_index = len(wordpieces_out)
for j in range(0, data.byte_len(token)):
offset_to_wp_out[j + curr_offset] = wp_index
if vocab_id > -1:
wordpieces_out.append(vocab_id)
else:
vocab_id = self.get_vocab_id("[UNK]")
wordpieces_out.append(vocab_id)
start_offsets_out.append(curr_offset)
end_offsets_out.append(curr_offset + data.byte_len(token) - 1)
else:
# Not a special token, so keep the wordpieces.
# `i`: index of the current wordpiece *within* the whitespace `token`.
# `wp_start`: byte-wise start of this token within whitespace `token`.
# `wp_limit`: byte-wise end index of this token within whitespace
# `token`.
for i, (wp_start, wp_limit) in enumerate(zip(wp_starts, wp_limits)):
# `j`: byte offset *within* the current whitespace token.
for j in range(0, data.byte_len(token)):
if j >= wp_start and j < wp_limit:
offset_to_wp_out[j + curr_offset] = len(wordpieces_out) + i
wordpieces_out.extend(wps)
start_offsets_out.extend([k + curr_offset for k in wp_starts])
end_offsets_out.extend([k + curr_offset - 1 for k in wp_limits])
curr_offset += data.byte_len(token)
# Splitting space is one byte as defined in `tokenize` function.
token_count += 1
if token_count < len(whitespace_tokens):
offset_to_wp_out[curr_offset] = -1
curr_offset += 1
assert len(start_offsets_out) == len(wordpieces_out)
assert len(start_offsets_out) == len(end_offsets_out)
return wordpieces_out, start_offsets_out, end_offsets_out, offset_to_wp_out
def is_special_token(self, token):
"""Is this a special token reserved for BERT or TyDi QA modeling?"""
# NOTE: These must also be in the system's vocabulary file, which by default
# is `mbert_modified_vocab.txt`, which is the original mBERT vocabulary
# with some special tokens specific to our system added in the reserved
# (unused) vocabulary space.
special_tokens = set([
"[CLS]", "[SEP]", "[PAD]", "[Q]", "[YES]", "[NO]", "[NoLongAnswer]",
"[NoShortAnswer]", "[SA]", "[/SA]", "[UNK]", "[CLS]", "[SEP]", "[MASK]"
])
if token in special_tokens:
return True
if token.startswith("[Paragraph=") or token.startswith("[ContextId="):
return True
return False
def get_vocab_id(self, special_token):
"""Gets the vocab id of a `special_token`."""
if special_token in self.vocab:
return self.vocab[special_token]
else:
raise "Unrecognized special token: '{}'".format(special_token)
def _flatten_inner(self, seq):
"""Creates a 2D nested list from 3D, squeezing the inner dims."""
result = []
for subseq in seq:
# `subseq` is seq[i], a 2D list.
inner = [] # `inner` will remain a 1D list.
for subsubseq in subseq:
# `subsubseq` is seq[i][j], a 1D list.
inner.extend(subsubseq)
result.append(inner)
return result
| {"/tydi_eval.py": ["/eval_utils.py"]} |
74,678 | google-research-datasets/tydiqa | refs/heads/master | /tydi_eval.py | # coding=utf-8
# Copyright 2020 The Google Research Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
r"""Official evaluation script for the TyDi QA primary tasks.
The primary tasks are the Passage Selection Task (SelectP) and the Minimal
Answer Span Task Task (AnsSpan). This script is *not* used for the secondary
task, the SQuAD-compatible gold Passage (GoldP) task.
------------------------------------------------------------------------------
Example usage:
tydi_eval --gold_path=<path-to-gold-files> --predictions_path=<path_to_jsonl>
This will compute both the official byte-level F1 scores, recall@precision
tables for both passage and minimal answers (if the optional answer scores are
provided), and also breakdown per language.
Note that R@P are only meaningful if your model populates the score fields
of the prediction JSON format (which is not required).
gold_path should point to a single N way annotated dev data in the
original download format (gzipped jsonlines) or jsonlines.
predictions_path should point to a jsonl file (one json object per line),
where each line contains the predictions in the format given below.
------------------------------------------------------------------------------
Prediction format (written on multiple lines here for clarity, but each
prediction should be a single line in your output file):
{
'example_id': -2226525965842375672,
'passage_answer_index': 2,
'passage_answer_score': 13.5,
'minimal_answer': {'start_byte_offset': 64206, 'end_byte_offset': 64280},
'minimal_answer_score': 26.4,
'yes_no_answer': 'NONE'
}
The prediction format mirrors the annotation format in defining each passage
or minimal answer span both in terms of byte offsets.
If start_byte_offset >= 0 and end_byte_offset >=0, use byte offsets,
else no span is defined (null answer).
The minimal answer metric takes both minimal answer spans, and the yes/no
answer into account. If the 'minimal_answers' list contains any non/null
spans, then 'yes_no_answer' should be set to 'NONE'.
-----------------------------------------------------------------------------
Metrics:
Each prediction should be provided with a passage answer score, and a minimal
answers score. At evaluation time, the evaluation script will find a score
threshold at which F1 is maximized. All predictions with scores below this
threshold are ignored (assumed to be null). If the score is not provided,
the evaluation script considers all predictions to be valid. The script
will also output the maximum recall at precision points of >= 0.5, >= 0.75,
and >= 0.9.
Key methods:
Scoring passage answer candidates: score_passage_answer()
Scoring minimal answer candidates: score_minimal_answer(),
eval_utils.compute_partial_match_scores()
Computing language-wise F1: compute_macro_f1()
Averaging over non-English languages: main()
"""
import collections
import json
import os
import pickle
from absl import app
from absl import flags
from absl import logging
import eval_utils
flags.DEFINE_string(
'gold_path', None, 'Path to the gzip JSONL data. For '
'multiple files, should be a glob '
'pattern (e.g. "/path/to/files-*"')
flags.DEFINE_string('predictions_path', None,
'Path to JSONL file of predictions.')
flags.DEFINE_bool('verbose', True,
'Print some sample predictions with its socres.')
flags.DEFINE_bool(
'cache_gold_data', False,
'Whether to cache gold data in Pickle format to speed up '
'multiple evaluations.')
flags.DEFINE_bool('pretty_print', False, 'Whether to pretty print output.')
flags.DEFINE_integer(
'passage_non_null_threshold', 2,
'Require this many non-null passage answer annotations '
'to count gold as containing a passage answer.')
flags.DEFINE_integer(
'minimal_non_null_threshold', 2,
'Require this many non-null minimal answer annotations '
'to count gold as containing a minimal answer.')
FLAGS = flags.FLAGS
def score_passage_answer(gold_label_list, pred_label,
passage_non_null_threshold):
"""Scores a passage answer as correct or not.
1) First decide if there is a gold passage answer with
FLAGS.passage_non_null_threshold.
2) The prediction will get a match if:
a. There is a gold passage answer.
b. The prediction span match exactly with *one* of the non-null gold
passage answer index.
Args:
gold_label_list: A list of TyDiLabel, could be None.
pred_label: A single TyDiLabel, could be None.
passage_non_null_threshold: See FLAGS.passage_non_null_threshold.
Returns:
gold_has_answer, pred_has_answer, is_correct, score
"""
gold_has_answer = eval_utils.gold_has_passage_answer(
gold_label_list, passage_non_null_threshold)
if pred_label is None:
return gold_has_answer, not gold_has_answer, False, 0
pred_has_answer = pred_label.passage_answer_index >= 0
is_correct = False
score = pred_label.passage_score
# Both sides are non-null spans.
if gold_has_answer and pred_has_answer:
for gold_label in gold_label_list:
# while the voting results indicate there is an passage answer, each
# annotator might still say there is no passage answer.
if gold_label.passage_answer_index < 0:
continue
if gold_label.passage_answer_index == pred_label.passage_answer_index:
is_correct = True
break
return gold_has_answer, pred_has_answer, is_correct, score
def score_minimal_answer(gold_label_list, pred_label,
minimal_non_null_threshold):
"""Scores a minimal answer.
Outputs score against gold label that gives max F1.
First decide if there is a gold minimal answer with
FLAGS.minimal_non_null_threshold.
If any of the gold label has "yes", or "no", and pred label predicted it
correctly, than precision, recall, f1 is all 1.0.
Args:
gold_label_list: A list of TyDiLabel.
pred_label: A single TyDiLabel.
minimal_non_null_threshold: See FLAGS.minimal_non_null_threshold.
Returns:
gold_has_answer, pred_has_answer, (precision, recall, f1), score
"""
# There is a gold minimal answer if gold_label_list not empty and non null
# answers is over the threshold (sum over annotators).
gold_has_answer = eval_utils.gold_has_minimal_answer(
gold_label_list, minimal_non_null_threshold)
if pred_label is None:
return gold_has_answer, not gold_has_answer, (0, 0, 0), 0
# There is a predicted minimal answer if the predicted minimal label span
# is non-null or we have a specific predicted label (such as yes/no).
pred_has_answer = ((not pred_label.minimal_answer_span.is_null_span()) or
pred_label.yes_no_answer != 'none')
# score is optional.
score = pred_label.minimal_score
# We find the closest (highest scoring) match between the system's predicted
# minimal answer and one of the three gold annotations.
max_f1 = 0.0
max_precision = 0.0
max_recall = 0.0
# Both sides have minimal answers, which contains yes/no questions.
if gold_has_answer and pred_has_answer:
if pred_label.yes_no_answer != 'none': # System predicted a yes/no answer.
for gold_label in gold_label_list:
if pred_label.yes_no_answer == gold_label.yes_no_answer:
max_f1 = 1.0
max_precision = 1.0
max_recall = 1.0
break
else:
for gold_label in gold_label_list:
if gold_label.minimal_answer_span.is_null_span():
continue
# Compute the *micro-F1* (a partial match score for this example).
# We also compute a language-wise *macro-F1* later.
precision, recall, f1 = eval_utils.compute_partial_match_scores(
gold_label.minimal_answer_span, pred_label.minimal_answer_span)
if f1 > max_f1:
max_f1 = f1
max_precision = precision
max_recall = recall
return (gold_has_answer, pred_has_answer,
(max_precision, max_recall, max_f1), score)
def byte_slice(text, start, end):
byte_str = bytes(text, 'utf-8')
return str(byte_str[start:end])
def score_answers(gold_annotation_dict, pred_dict):
"""Scores all answers for all documents.
Args:
gold_annotation_dict: a dict from example id to list of `TyDiLabel`s.
pred_dict: a dict from example id to list of `TyDiLabel`s.
Returns:
passage_answer_stats: List of scores for passage answers.
minimal_answer_stats: List of scores for minimal answers.
"""
gold_id_set = set(gold_annotation_dict.keys())
pred_id_set = set(pred_dict.keys())
unpredicted = gold_id_set - pred_id_set
unexpected = pred_id_set - gold_id_set
if unpredicted:
logging.warning('Predictions missing for %d examples.', len(unpredicted))
logging.info(' Missing ids: %s', sorted(unpredicted))
if unexpected:
logging.warning(
'Found predictions for %d examples that do not appear in the gold data.',
len(unexpected))
logging.info(' Unexpected ids: %s', sorted(unexpected))
passage_answer_stats = []
minimal_answer_stats = []
example_count = 0
for example_id in gold_id_set:
example_count += 1
gold = gold_annotation_dict[example_id]
pred = pred_dict.get(example_id)
passage_answer_stats.append(
score_passage_answer(gold, pred, FLAGS.passage_non_null_threshold))
minimal_answer_stats.append(
score_minimal_answer(gold, pred, FLAGS.minimal_non_null_threshold))
if not FLAGS.verbose:
continue
if pred is None:
continue
pred_min_start = pred.minimal_answer_span.start_byte_offset
pred_min_end = pred.minimal_answer_span.end_byte_offset
gold_min_start = gold[0].minimal_answer_span.start_byte_offset
gold_min_end = gold[0].minimal_answer_span.end_byte_offset
if gold_min_start >= 0:
logging.info('---')
logging.info(gold[0].example_id)
logging.info(gold[0].question_text)
logging.info('gold offsets %d, %d', gold_min_start, gold_min_end)
logging.info('pred offsets %d, %d', pred_min_start, pred_min_end)
logging.info('gold answer: (%s)',
byte_slice(gold[0].plaintext, gold_min_start, gold_min_end))
logging.info('pred answer: (%s)',
byte_slice(pred.plaintext, pred_min_start, pred_min_end))
logging.info('score %.2f', minimal_answer_stats[-1][-1])
logging.info('f1: %.2f, p: %.2f, r: %.2f',
minimal_answer_stats[-1][-2][2],
minimal_answer_stats[-1][-2][0],
minimal_answer_stats[-1][-2][1])
# use the 'score' column, which is last
passage_answer_stats.sort(key=lambda x: x[-1], reverse=True)
minimal_answer_stats.sort(key=lambda x: x[-1], reverse=True)
return passage_answer_stats, minimal_answer_stats
def compute_macro_f1(answer_stats, prefix=''):
"""Computes F1, precision, recall for a list of answer scores.
This computes the *language-wise macro F1*. For minimal answers,
we also compute a partial match score that uses F1, which would be
included in this computation via `answer_stats`.
Args:
answer_stats: List of per-example scores.
prefix (''): Prefix to prepend to score dictionary.
Returns:
Dictionary mapping measurement names to scores.
"""
has_gold, has_pred, f1, _ = list(zip(*answer_stats))
macro_precision = eval_utils.safe_divide(sum(f1), sum(has_pred))
macro_recall = eval_utils.safe_divide(sum(f1), sum(has_gold))
macro_f1 = eval_utils.safe_divide(
2 * macro_precision * macro_recall,
macro_precision + macro_recall)
return collections.OrderedDict({
prefix + 'n': len(answer_stats),
prefix + 'f1': macro_f1,
prefix + 'precision': macro_precision,
prefix + 'recall': macro_recall
})
def compute_final_f1(passage_answer_stats, minimal_answer_stats):
"""Computes overall F1 given passage and minimal answers, ignoring scores.
Note: this assumes that the answers have been thresholded based on scores.
Arguments:
passage_answer_stats: List of passage answer scores.
minimal_answer_stats: List of minimal answer scores.
Returns:
Dictionary of name (string) -> score.
"""
scores = compute_macro_f1(passage_answer_stats, prefix='passage-answer-')
scores.update(compute_macro_f1(
minimal_answer_stats, prefix='minimal-answer-'))
return scores
def compute_pr_curves(answer_stats, targets=None):
"""Computes PR curve and returns R@P for specific targets.
The values are computed as follows: find the (precision, recall) point
with maximum recall and where precision > target.
This is only relevant if you return the system scores in your predictions.
You may find this useful when attempting to tune the threshold for your
system on the dev set before requesting an evaluation on the test set
via the leaderboard.
Arguments:
answer_stats: List of statistic tuples from the answer scores.
targets (None): List of precision thresholds to target.
Returns:
List of table with rows: [target, r, p, score].
"""
total_f1 = 0
total_has_pred = 0
total_has_gold = 0
# Count the number of gold annotations.
for has_gold, _, _, _ in answer_stats:
total_has_gold += has_gold
# Keep track of the point of maximum recall for each target.
max_recall = [0 for _ in targets]
max_precision = [0 for _ in targets]
max_scores = [None for _ in targets]
# Only keep track of unique thresholds in this dictionary.
scores_to_stats = collections.OrderedDict()
# Loop through every possible threshold and compute precision + recall.
for has_gold, has_pred, is_correct_or_f1, score in answer_stats:
if isinstance(is_correct_or_f1, tuple):
_, _, f1 = is_correct_or_f1
else:
f1 = is_correct_or_f1
total_f1 += f1
total_has_pred += has_pred
precision = eval_utils.safe_divide(total_f1, total_has_pred)
recall = eval_utils.safe_divide(total_f1, total_has_gold)
# If there are any ties, this will be updated multiple times until the
# ties are all counted.
scores_to_stats[score] = [precision, recall]
best_f1 = 0.0
best_precision = 0.0
best_recall = 0.0
best_threshold = 0.0
for threshold, (precision, recall) in scores_to_stats.items():
# Match the thresholds to the find the closest precision above some target.
for t, target in enumerate(targets):
if precision >= target and recall > max_recall[t]:
max_recall[t] = recall
max_precision[t] = precision
max_scores[t] = threshold
# Compute optimal threshold.
f1 = eval_utils.safe_divide(2 * precision * recall, precision + recall)
if f1 > best_f1:
best_f1 = f1
best_precision = precision
best_recall = recall
best_threshold = threshold
return ((best_f1, best_precision, best_recall, best_threshold),
list(zip(targets, max_recall, max_precision, max_scores)))
def print_r_at_p_table(answer_stats):
"""Pretty prints the R@P table for default targets."""
opt_result, pr_table = compute_pr_curves(
answer_stats, targets=[0.5, 0.75, 0.9])
f1, precision, recall, threshold = opt_result
print('Optimal threshold: {:.5}'.format(threshold))
print(' F1 / P / R')
print('{: >7.2%} / {: >7.2%} / {: >7.2%}'.format(f1, precision, recall))
for target, recall, precision, row in pr_table:
print('R@P={}: {:.2%} (actual p={:.2%}, score threshold={:.4})'.format(
target, recall, precision, row))
def get_metrics_as_dict(gold_path, prediction_path):
"""Library version of the end-to-end evaluation.
Arguments:
gold_path: Path to a single JSONL data. Could be gzipped or not.
prediction_path: Path to the JSONL file of prediction data.
Returns:
metrics: A dictionary mapping string names to metric scores.
"""
tydi_gold_dict = eval_utils.read_annotation(gold_path)
tydi_pred_dict = eval_utils.read_prediction_jsonl(prediction_path)
passage_answer_stats, minimal_answer_stats = score_answers(
tydi_gold_dict, tydi_pred_dict)
return get_metrics_with_answer_stats(
passage_answer_stats, minimal_answer_stats)
def get_metrics_with_answer_stats(passage_answer_stats, minimal_answer_stats):
"""Generate metrics dict using passage and minimal answer stats."""
def _get_metric_dict(answer_stats, prefix=''):
"""Compute all metrics for a set of answer statistics."""
opt_result, pr_table = compute_pr_curves(
answer_stats, targets=[0.5, 0.75, 0.9])
f1, precision, recall, threshold = opt_result
metrics = collections.OrderedDict({
'best-threshold-f1': f1,
'best-threshold-precision': precision,
'best-threshold-recall': recall,
'best-threshold': threshold,
})
for target, recall, precision, _ in pr_table:
metrics['recall-at-precision>={:.2}'.format(target)] = recall
metrics['precision-at-precision>={:.2}'.format(target)] = precision
# Add prefix before returning.
return dict([(prefix + k, v) for k, v in metrics.items()])
metrics = _get_metric_dict(passage_answer_stats, 'passage-')
metrics.update(_get_metric_dict(minimal_answer_stats, 'minimal-'))
return metrics
def get_latex_str(f1, precision, recall):
return '\\fpr' + '{' + ('%.1f' % (f1 * 100)) + '}{' + (
'%.1f' % (precision * 100)) + '}{' + ('%.1f' % (recall * 100)) + '}'
def main(_):
cache_path = os.path.join(os.path.dirname(FLAGS.gold_path), 'cache')
if FLAGS.cache_gold_data and os.path.exists(cache_path):
logging.info('Reading from cache: %s', format(cache_path))
tydi_gold_dict = pickle.load(open(cache_path, 'r')) # pytype: disable=wrong-arg-types
else:
tydi_gold_dict = eval_utils.read_annotation(FLAGS.gold_path)
if FLAGS.cache_gold_data:
logging.info('Caching gold data for future to: %s', format(cache_path))
pickle.dump(tydi_gold_dict, open(cache_path, 'w')) # pytype: disable=wrong-arg-types
total_ans_count = 0
count = 0
for ans in tydi_gold_dict.values():
count += 1
gold_has_answer = eval_utils.gold_has_minimal_answer(
ans, FLAGS.minimal_non_null_threshold)
total_ans_count += gold_has_answer
logging.info('%d examples have minimal answers', total_ans_count)
logging.info('*' * 40)
tydi_pred_dict = eval_utils.read_prediction_jsonl(FLAGS.predictions_path)
per_lang_gold = {}
per_lang_pred = {}
for ex_id, ex in tydi_gold_dict.items():
if ex[0].language in per_lang_gold:
per_lang_gold[ex[0].language][ex_id] = ex
else:
per_lang_gold[ex[0].language] = {ex_id: ex}
for ex_id, ex in tydi_pred_dict.items():
if ex.language in per_lang_pred:
per_lang_pred[ex.language][ex_id] = ex
else:
per_lang_pred[ex.language] = {ex_id: ex}
macro_avg_passage_scores = ([], [], [])
macro_avg_minimal_scores = ([], [], [])
language_list = [
'english', 'arabic', 'bengali', 'finnish', 'indonesian', 'japanese',
'swahili', 'korean', 'russian', 'telugu', 'thai'
]
for lang in language_list:
if lang in per_lang_pred:
passage_answer_stats, minimal_answer_stats = score_answers(
per_lang_gold.get(lang, {}), per_lang_pred[lang])
# Passage selection task
opt_result, _ = compute_pr_curves(passage_answer_stats, targets=[0.5])
f1, precision, recall, _ = opt_result
if lang != 'english':
macro_avg_passage_scores[0].append(f1)
macro_avg_passage_scores[1].append(precision)
macro_avg_passage_scores[2].append(recall)
print('Passage & ' + lang + ' & ' + get_latex_str(f1, precision, recall))
# Minimal answer span task
opt_result, _ = compute_pr_curves(minimal_answer_stats, targets=[0.5])
f1, precision, recall, _ = opt_result
if lang != 'english':
macro_avg_minimal_scores[0].append(f1)
macro_avg_minimal_scores[1].append(precision)
macro_avg_minimal_scores[2].append(recall)
print('Minimal Answer & ' + lang + ' & ' +
get_latex_str(f1, precision, recall))
if FLAGS.pretty_print:
print('*' * 20)
print(lang)
print('Language: %s (%d)' % (lang, len(per_lang_gold.get(lang, {}))))
print('*' * 20)
print('PASSAGE ANSWER R@P TABLE:')
print_r_at_p_table(passage_answer_stats)
print('*' * 20)
print('MINIMAL ANSWER R@P TABLE:')
print_r_at_p_table(minimal_answer_stats)
else:
metrics = get_metrics_with_answer_stats(passage_answer_stats,
minimal_answer_stats)
print(json.dumps(metrics))
print('Total # examples in gold: %d, # ex. in pred: %d (including english)' %
(len(tydi_gold_dict), len(tydi_pred_dict)))
f1_list, precision_list, recall_list = macro_avg_passage_scores
print('*** Macro Over %d Languages, excluding English **' % len(f1_list))
avg_passage_f1 = eval_utils.safe_average(f1_list)
avg_passage_recall = eval_utils.safe_average(recall_list)
avg_passage_precision = eval_utils.safe_average(precision_list)
print('Passage F1:%.3f P:%.3f R:%3f' %
(avg_passage_f1, avg_passage_precision, avg_passage_recall))
print(get_latex_str(
avg_passage_f1, avg_passage_precision, avg_passage_recall))
f1_list, precision_list, recall_list = macro_avg_minimal_scores
avg_minimal_f1 = eval_utils.safe_average(f1_list)
avg_minimal_recall = eval_utils.safe_average(recall_list)
avg_minimal_precision = eval_utils.safe_average(precision_list)
print('Minimal F1:%.3f P:%.3f R:%3f' %
(avg_minimal_f1, avg_minimal_precision, avg_minimal_recall))
print(get_latex_str(
avg_minimal_f1, avg_minimal_precision, avg_minimal_recall))
print('*** / Aggregate Scores ****')
aggregate_metrics = {'avg_passage_f1': avg_passage_f1,
'avg_passage_recall': avg_passage_recall,
'avg_passage_precision': avg_passage_precision,
'avg_minimal_f1': avg_minimal_f1,
'avg_minimal_recall': avg_minimal_recall,
'avg_minimal_precision': avg_minimal_precision}
print(json.dumps(aggregate_metrics))
if __name__ == '__main__':
flags.mark_flag_as_required('gold_path')
flags.mark_flag_as_required('predictions_path')
app.run(main)
| {"/tydi_eval.py": ["/eval_utils.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.