index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
1,313
|
AlexsandroMO/Bitcoin
|
refs/heads/master
|
/Read_SQL.py
|
import pandas as pd
import pandasql as pdsql
import sqlite3
from datetime import date
from datetime import datetime
def read_sql_btc():
conn = sqlite3.connect('DB/DB_COINS.db')
sql_datas = f"""
SELECT * FROM VARBTC;
"""
read_db = pd.read_sql_query(sql_datas, conn)
conn.close()
return read_db
def read_sql_wallet():
conn = sqlite3.connect('DB/DB_COINS.db')
sql_datas = f"""
SELECT * FROM COINCOIN;
"""
read_db = pd.read_sql_query(sql_datas, conn)
conn.close()
return read_db
|
{"/Write_SQL.py": ["/CreateTable_SQL.py"]}
|
1,314
|
AlexsandroMO/Bitcoin
|
refs/heads/master
|
/coin/tests.py
|
from django.test import TestCase
# pip install django-crispy-forms
'''Upload documents on Github
git clone <nome>
<entra na pasta criada>
git add .
git commit -m "texto"
git push
'''
|
{"/Write_SQL.py": ["/CreateTable_SQL.py"]}
|
1,332
|
glorizen/hi10enc
|
refs/heads/master
|
/app.py
|
import os
from flask import Flask
from flask import request
from flask import jsonify
from flask import render_template
from flask import send_from_directory
from parsers import MediaParser
from parsers import AvsParser
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'd:/temp'
@app.route('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/default')
def default():
# return send_from_directory(app.config['UPLOAD_FOLDER'])
return None
@app.route('/static/metronic_v5.0.2/metronic_v5.0.2/theme/dist/html/demo2')
def demo2():
# return send_from_directory(app.config['UPLOAD_FOLDER'])
return None
@app.route('/static/styles')
def styles():
return None
@app.route('/static/scripts')
def scripts():
return None
@app.route('/')
def index():
return render_template('site_specific/index.html')
@app.route('/encode/video')
def video_command():
return 'Video Command Here.'
@app.route('/encode/audio')
def audio_command():
return 'Audio Command Here.'
@app.route('/extract/subtitle')
def sub_extract_command():
return 'Sub-Extraction Command Here.'
@app.route('/extract/video')
def video_extract_command():
return 'Video-Extraction Command Here.'
@app.route('/extract/audio')
def audio_extract_commmand():
return 'Audio-Extraction Command Here.'
@app.route('/info/ffmpeg')
def ffmpeg_info():
return 'ffmpeg info Here.'
@app.route('/info/x264')
def x264_info():
return 'X264 info here.'
@app.route('/info/x265')
def x265_info():
return 'X265 info here.'
@app.route('/info/libopus')
def libopus_info():
return 'libopus info here.'
@app.route('/info/libfdk_aac')
def libfdk_info():
return 'libfdk_aac info here.'
@app.route('/merge/mkvmerge')
def mkvmerge_command():
return 'mkvmerge command here.'
@app.route('/ajax/metadata', methods=["GET", "POST"])
def ajax_parse_metadata():
xml_string = request.json['mediainfo']
avs_string = request.json['avscript']
if not xml_string:
pass
media_parser = MediaParser(xml_string)
avs_parser = AvsParser(avs_string)
data = dict()
data['general_details'] = media_parser.get_general_details(media_parser.mediainfo)
data['video_details'] = media_parser.get_video_details(media_parser.mediainfo)
data['audio_details'] = media_parser.get_audio_details(media_parser.mediainfo)
data['subtitle_details'] = media_parser.get_subtitle_details(media_parser.mediainfo)
data['menu_details'] = media_parser.get_menu_details(media_parser.mediainfo)
return jsonify(data)
|
{"/app.py": ["/parsers.py"]}
|
1,333
|
glorizen/hi10enc
|
refs/heads/master
|
/parsers.py
|
from pymediainfo import MediaInfo
class MediaParser(object):
def __init__(self, xml_string):
self.mediainfo = MediaInfo(xml_string)
self.metadata = self.mediainfo.to_data()
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_general_details(self, mediainfo):
general_details = list()
for track in mediainfo.tracks:
if 'general' in track.track_type.lower():
track_details = dict()
track_details['file_name'] = track.file_name
track_details['file_extension'] = track.file_extension
track_details['file_size'] = track.file_size
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['stream_size'] = track.stream_size
track_details['attachments'] = track.attachments
general_details.append(track_details)
return general_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_video_details(self, mediainfo):
vid_details = list()
for track in mediainfo.tracks:
if 'video' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['frame_rate_mode'] = track.frame_rate_mode
track_details['frame_rate'] = float(track.frame_rate)
track_details['resolution'] = (track.width, track.height)
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = float(track.bit_rate)
track_details['bit_depth'] = track.bit_depth
track_details['stream_size'] = track.stream_size
track_details['display_aspect_ratio'] = float(track.display_aspect_ratio)
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
vid_details.append(track_details)
return vid_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_audio_details(self, mediainfo):
aud_details = list()
for track in mediainfo.tracks:
if 'audio' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = track.bit_rate
track_details['channels'] = track.channel_s
track_details['sampling_rate'] = track.sampling_rate
track_details['stream_size'] = track.stream_size
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
aud_details.append(track_details)
return aud_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_subtitle_details(self, mediainfo):
aud_details = list()
for track in mediainfo.tracks:
if 'text' in track.track_type.lower():
track_details = dict()
track_details['_id'] = track.track_id
track_details['codec'] = track.codec
track_details['duration'] = float(track.duration)
track_details['bit_rate'] = track.bit_rate
track_details['stream_size'] = track.stream_size
track_details['title'] = track.title
track_details['language'] = track.language
track_details['default'] = track.default
track_details['forced'] = track.forced
aud_details.append(track_details)
return aud_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_menu_details(self, mediainfo):
menu_details = list()
for track in mediainfo.tracks:
if 'menu' in track.track_type.lower():
menu_data = track.to_data()
menu = list()
for key in menu_data:
if key.replace('_', str()).isdigit():
menu.append((key.replace('_', ':'), menu_data[key]))
menu_details.append(menu)
return menu_details
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
class AvsParser(object):
def __init__(self, avs_string):
self.avs_content = [line for line in avs_string.split('\n')
if line and not line.startswith('#') or line.startswith('##>')
or line.startswith('##!!')]
print(self.avs_content)
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def parse_avs_chapters(self, avs_content):
avs_chap_string = ''.join([x.strip('##!!') for x in avs_content
if x.startswith('##!!') and '>' in x and '<' in x])
if not avs_chap_string:
return None
filtered_chaps = [x.strip('>').strip('<').strip(' ').strip('\n')
for x in avs_chap_string.split(',')] if avs_chap_string else None
avs_chapters = dict()
avs_chapters['names'] = list(); avs_chapters['frames'] = list()
for chapter in filtered_chaps:
name = chapter.split('[')[0]
start = int(chapter.split('[')[1].split(':')[0].strip(' '))
end = int(chapter.split('[')[1].split(':')[1].split(']')[0].strip(' '))
avs_chapters['names'].append(name)
avs_chapters['frames'].append((start, end))
return avs_chapters
#>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>
#<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<
def get_custom_commands(self, avs_content):
commands_dict = dict()
avsfile = open(input_file)
file_content = avsfile.readlines()
avsfile.close()
commands = ','.join([x.strip('##>') for x in avs_content if x.startswith('##>')]).split(',')
for command in commands:
if not command or len(command) < 3:
continue
option, value = command.split('=')
commands_dict[option] = value.strip('\r').strip('\n')
avs_chapters = parse_avs_chapters(avs_content)
return commands_dict
|
{"/app.py": ["/parsers.py"]}
|
1,336
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/__init__.py
|
"""UniFi Protect Platform."""
from __future__ import annotations
import asyncio
from datetime import timedelta
import logging
from aiohttp import CookieJar
from aiohttp.client_exceptions import ServerDisconnectedError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
Platform,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryAuthFailed, ConfigEntryNotReady
from homeassistant.helpers import entity_registry as er
from homeassistant.helpers.aiohttp_client import async_create_clientsession
from pyunifiprotect import NotAuthorized, NvrError, ProtectApiClient
from pyunifiprotect.data import ModelType
from .const import (
CONF_ALL_UPDATES,
CONF_DOORBELL_TEXT,
CONF_OVERRIDE_CHOST,
CONFIG_OPTIONS,
DEFAULT_SCAN_INTERVAL,
DEVICES_FOR_SUBSCRIBE,
DEVICES_THAT_ADOPT,
DOMAIN,
MIN_REQUIRED_PROTECT_V,
OUTDATED_LOG_MESSAGE,
PLATFORMS,
)
from .data import ProtectData
from .services import async_cleanup_services, async_setup_services
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=DEFAULT_SCAN_INTERVAL)
@callback
async def _async_migrate_data(
hass: HomeAssistant, entry: ConfigEntry, protect: ProtectApiClient
) -> None:
# already up to date, skip
if CONF_ALL_UPDATES in entry.options:
return
_LOGGER.info("Starting entity migration...")
# migrate entry
options = dict(entry.options)
data = dict(entry.data)
options[CONF_ALL_UPDATES] = False
if CONF_DOORBELL_TEXT in options:
del options[CONF_DOORBELL_TEXT]
hass.config_entries.async_update_entry(entry, data=data, options=options)
# migrate entities
registry = er.async_get(hass)
mac_to_id: dict[str, str] = {}
mac_to_channel_id: dict[str, str] = {}
bootstrap = await protect.get_bootstrap()
for model in DEVICES_THAT_ADOPT:
attr = model.value + "s"
for device in getattr(bootstrap, attr).values():
mac_to_id[device.mac] = device.id
if model != ModelType.CAMERA:
continue
for channel in device.channels:
channel_id = str(channel.id)
if channel.is_rtsp_enabled:
break
mac_to_channel_id[device.mac] = channel_id
count = 0
entities = er.async_entries_for_config_entry(registry, entry.entry_id)
for entity in entities:
new_unique_id: str | None = None
if entity.domain != Platform.CAMERA.value:
parts = entity.unique_id.split("_")
if len(parts) >= 2:
device_or_key = "_".join(parts[:-1])
mac = parts[-1]
device_id = mac_to_id[mac]
if device_or_key == device_id:
new_unique_id = device_id
else:
new_unique_id = f"{device_id}_{device_or_key}"
else:
parts = entity.unique_id.split("_")
if len(parts) == 2:
mac = parts[1]
device_id = mac_to_id[mac]
channel_id = mac_to_channel_id[mac]
new_unique_id = f"{device_id}_{channel_id}"
else:
device_id = parts[0]
channel_id = parts[2]
extra = "" if len(parts) == 3 else "_insecure"
new_unique_id = f"{device_id}_{channel_id}{extra}"
if new_unique_id is None:
continue
_LOGGER.debug(
"Migrating entity %s (old unique_id: %s, new unique_id: %s)",
entity.entity_id,
entity.unique_id,
new_unique_id,
)
try:
registry.async_update_entity(entity.entity_id, new_unique_id=new_unique_id)
except ValueError:
_LOGGER.warning(
"Could not migrate entity %s (old unique_id: %s, new unique_id: %s)",
entity.entity_id,
entity.unique_id,
new_unique_id,
)
else:
count += 1
_LOGGER.info("Migrated %s entities", count)
if count != len(entities):
_LOGGER.warning("%s entities not migrated", len(entities) - count)
@callback
def _async_import_options_from_data_if_missing(
hass: HomeAssistant, entry: ConfigEntry
) -> None:
options = dict(entry.options)
data = dict(entry.data)
modified = False
for importable_option in CONFIG_OPTIONS:
if importable_option not in entry.options and importable_option in entry.data:
options[importable_option] = entry.data[importable_option]
del data[importable_option]
modified = True
if modified:
hass.config_entries.async_update_entry(entry, data=data, options=options)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up the UniFi Protect config entries."""
_async_import_options_from_data_if_missing(hass, entry)
session = async_create_clientsession(hass, cookie_jar=CookieJar(unsafe=True))
protect = ProtectApiClient(
host=entry.data[CONF_HOST],
port=entry.data[CONF_PORT],
username=entry.data[CONF_USERNAME],
password=entry.data[CONF_PASSWORD],
verify_ssl=entry.data[CONF_VERIFY_SSL],
session=session,
subscribed_models=DEVICES_FOR_SUBSCRIBE,
override_connection_host=entry.options.get(CONF_OVERRIDE_CHOST, False),
ignore_stats=not entry.options.get(CONF_ALL_UPDATES, False),
)
_LOGGER.debug("Connect to UniFi Protect")
data_service = ProtectData(hass, protect, SCAN_INTERVAL, entry)
try:
nvr_info = await protect.get_nvr()
except NotAuthorized as err:
raise ConfigEntryAuthFailed(err) from err
except (asyncio.TimeoutError, NvrError, ServerDisconnectedError) as err:
raise ConfigEntryNotReady from err
if nvr_info.version < MIN_REQUIRED_PROTECT_V:
_LOGGER.error(
OUTDATED_LOG_MESSAGE,
nvr_info.version,
MIN_REQUIRED_PROTECT_V,
)
return False
await _async_migrate_data(hass, entry, protect)
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=nvr_info.mac)
await data_service.async_setup()
if not data_service.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})[entry.entry_id] = data_service
hass.config_entries.async_setup_platforms(entry, PLATFORMS)
async_setup_services(hass)
entry.async_on_unload(entry.add_update_listener(_async_options_updated))
entry.async_on_unload(
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, data_service.async_stop)
)
return True
async def _async_options_updated(hass: HomeAssistant, entry: ConfigEntry) -> None:
"""Update options."""
await hass.config_entries.async_reload(entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload UniFi Protect config entry."""
if unload_ok := await hass.config_entries.async_unload_platforms(entry, PLATFORMS):
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
await data.async_stop()
hass.data[DOMAIN].pop(entry.entry_id)
async_cleanup_services(hass)
return bool(unload_ok)
async def async_migrate_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Migrate old entry."""
_LOGGER.debug("Migrating from version %s", config_entry.version)
if config_entry.version == 1:
new = {**config_entry.data}
# keep verify SSL false for anyone migrating to maintain backwards compatibility
new[CONF_VERIFY_SSL] = False
if CONF_DOORBELL_TEXT in new:
del new[CONF_DOORBELL_TEXT]
config_entry.version = 2
hass.config_entries.async_update_entry(config_entry, data=new)
_LOGGER.info("Migration to version %s successful", config_entry.version)
return True
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,337
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/binary_sensor.py
|
"""This component provides binary sensors for UniFi Protect."""
from __future__ import annotations
from copy import copy
from dataclasses import dataclass
import logging
from homeassistant.components.binary_sensor import (
BinarySensorDeviceClass,
BinarySensorEntity,
BinarySensorEntityDescription,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_LAST_TRIP_TIME, ATTR_MODEL
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from pyunifiprotect.data import NVR, Camera, Event, Light, MountType, Sensor
from .const import DOMAIN
from .data import ProtectData
from .entity import (
EventThumbnailMixin,
ProtectDeviceEntity,
ProtectNVREntity,
async_all_device_entities,
)
from .models import ProtectRequiredKeysMixin
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
_KEY_DOOR = "door"
@dataclass
class ProtectBinaryEntityDescription(
ProtectRequiredKeysMixin, BinarySensorEntityDescription
):
"""Describes UniFi Protect Binary Sensor entity."""
ufp_last_trip_value: str | None = None
MOUNT_DEVICE_CLASS_MAP = {
MountType.GARAGE: BinarySensorDeviceClass.GARAGE_DOOR,
MountType.WINDOW: BinarySensorDeviceClass.WINDOW,
MountType.DOOR: BinarySensorDeviceClass.DOOR,
}
CAMERA_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="doorbell",
name="Doorbell",
device_class=BinarySensorDeviceClass.OCCUPANCY,
icon="mdi:doorbell-video",
ufp_required_field="feature_flags.has_chime",
ufp_value="is_ringing",
ufp_last_trip_value="last_ring",
),
ProtectBinaryEntityDescription(
key="dark",
name="Is Dark",
icon="mdi:brightness-6",
ufp_value="is_dark",
),
)
LIGHT_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="dark",
name="Is Dark",
icon="mdi:brightness-6",
ufp_value="is_dark",
),
ProtectBinaryEntityDescription(
key="motion",
name="Motion Detected",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_pir_motion_detected",
ufp_last_trip_value="last_motion",
),
)
SENSE_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key=_KEY_DOOR,
name="Contact",
device_class=BinarySensorDeviceClass.DOOR,
ufp_value="is_opened",
ufp_last_trip_value="open_status_changed_at",
ufp_enabled="is_contact_sensor_enabled",
),
ProtectBinaryEntityDescription(
key="battery_low",
name="Battery low",
device_class=BinarySensorDeviceClass.BATTERY,
entity_category=EntityCategory.DIAGNOSTIC,
ufp_value="battery_status.is_low",
),
ProtectBinaryEntityDescription(
key="motion",
name="Motion Detected",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_motion_detected",
ufp_last_trip_value="motion_detected_at",
ufp_enabled="is_motion_sensor_enabled",
),
ProtectBinaryEntityDescription(
key="tampering",
name="Tampering Detected",
device_class=BinarySensorDeviceClass.TAMPER,
ufp_value="is_tampering_detected",
ufp_last_trip_value="tampering_detected_at",
),
)
MOTION_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="motion",
name="Motion",
device_class=BinarySensorDeviceClass.MOTION,
ufp_value="is_motion_detected",
ufp_last_trip_value="last_motion",
),
)
DISK_SENSORS: tuple[ProtectBinaryEntityDescription, ...] = (
ProtectBinaryEntityDescription(
key="disk_health",
name="Disk {index} Health",
device_class=BinarySensorDeviceClass.PROBLEM,
entity_category=EntityCategory.DIAGNOSTIC,
),
)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up binary sensors for UniFi Protect integration."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
entities: list[ProtectDeviceEntity] = async_all_device_entities(
data,
ProtectDeviceBinarySensor,
camera_descs=CAMERA_SENSORS,
light_descs=LIGHT_SENSORS,
sense_descs=SENSE_SENSORS,
)
entities += _async_motion_entities(data)
entities += _async_nvr_entities(data)
async_add_entities(entities)
@callback
def _async_motion_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
for device in data.api.bootstrap.cameras.values():
for description in MOTION_SENSORS:
entities.append(ProtectEventBinarySensor(data, device, description))
_LOGGER.debug(
"Adding binary sensor entity %s for %s",
description.name,
device.name,
)
return entities
@callback
def _async_nvr_entities(
data: ProtectData,
) -> list[ProtectDeviceEntity]:
entities: list[ProtectDeviceEntity] = []
device = data.api.bootstrap.nvr
for index, _ in enumerate(device.system_info.storage.devices):
for description in DISK_SENSORS:
entities.append(
ProtectDiskBinarySensor(data, device, description, index=index)
)
_LOGGER.debug(
"Adding binary sensor entity %s",
(description.name or "{index}").format(index=index),
)
return entities
class ProtectDeviceBinarySensor(ProtectDeviceEntity, BinarySensorEntity):
"""A UniFi Protect Device Binary Sensor."""
device: Camera | Light | Sensor
entity_description: ProtectBinaryEntityDescription
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
if self.entity_description.key == "doorbell":
new_value = self.entity_description.get_ufp_value(self.device)
if new_value != self.is_on:
_LOGGER.debug(
"Changing doorbell sensor from %s to %s", self.is_on, new_value
)
self._attr_is_on = self.entity_description.get_ufp_value(self.device)
if self.entity_description.ufp_last_trip_value is not None:
last_trip = get_nested_attr(
self.device, self.entity_description.ufp_last_trip_value
)
attrs = self.extra_state_attributes or {}
self._attr_extra_state_attributes = {
**attrs,
ATTR_LAST_TRIP_TIME: last_trip,
}
# UP Sense can be any of the 3 contact sensor device classes
if self.entity_description.key == _KEY_DOOR and isinstance(self.device, Sensor):
self.entity_description.device_class = MOUNT_DEVICE_CLASS_MAP.get(
self.device.mount_type, BinarySensorDeviceClass.DOOR
)
class ProtectDiskBinarySensor(ProtectNVREntity, BinarySensorEntity):
"""A UniFi Protect NVR Disk Binary Sensor."""
entity_description: ProtectBinaryEntityDescription
def __init__(
self,
data: ProtectData,
device: NVR,
description: ProtectBinaryEntityDescription,
index: int,
) -> None:
"""Initialize the Binary Sensor."""
description = copy(description)
description.key = f"{description.key}_{index}"
description.name = (description.name or "{index}").format(index=index)
self._index = index
super().__init__(data, device, description)
@callback
def _async_update_device_from_protect(self) -> None:
super()._async_update_device_from_protect()
disks = self.device.system_info.storage.devices
disk_available = len(disks) > self._index
self._attr_available = self._attr_available and disk_available
if disk_available:
disk = disks[self._index]
self._attr_is_on = not disk.healthy
self._attr_extra_state_attributes = {ATTR_MODEL: disk.model}
class ProtectEventBinarySensor(EventThumbnailMixin, ProtectDeviceBinarySensor):
"""A UniFi Protect Device Binary Sensor with access tokens."""
device: Camera
@callback
def _async_get_event(self) -> Event | None:
"""Get event from Protect device."""
event: Event | None = None
if self.device.is_motion_detected and self.device.last_motion_event is not None:
event = self.device.last_motion_event
return event
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,338
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/services.py
|
"""UniFi Protect Integration services."""
from __future__ import annotations
import asyncio
import functools
from typing import Any
from homeassistant.config_entries import ConfigEntryState
from homeassistant.const import ATTR_DEVICE_ID
from homeassistant.core import HomeAssistant, ServiceCall, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, device_registry as dr
from homeassistant.helpers.service import async_extract_referenced_entity_ids
from pydantic import ValidationError
from pyunifiprotect.api import ProtectApiClient
from pyunifiprotect.exceptions import BadRequest
import voluptuous as vol
from .const import ATTR_MESSAGE, DOMAIN
from .data import ProtectData
SERVICE_ADD_DOORBELL_TEXT = "add_doorbell_text"
SERVICE_REMOVE_DOORBELL_TEXT = "remove_doorbell_text"
SERVICE_SET_DEFAULT_DOORBELL_TEXT = "set_default_doorbell_text"
ALL_GLOBAL_SERIVCES = [
SERVICE_ADD_DOORBELL_TEXT,
SERVICE_REMOVE_DOORBELL_TEXT,
SERVICE_SET_DEFAULT_DOORBELL_TEXT,
]
DOORBELL_TEXT_SCHEMA = vol.All(
vol.Schema(
{
**cv.ENTITY_SERVICE_FIELDS,
vol.Required(ATTR_MESSAGE): cv.string,
},
),
cv.has_at_least_one_key(ATTR_DEVICE_ID),
)
def _async_all_ufp_instances(hass: HomeAssistant) -> list[ProtectApiClient]:
"""All active UFP instances."""
return [
data.api for data in hass.data[DOMAIN].values() if isinstance(data, ProtectData)
]
@callback
def _async_unifi_mac_from_hass(mac: str) -> str:
# MAC addresses in UFP are always caps
return mac.replace(":", "").upper()
@callback
def _async_get_macs_for_device(device_entry: dr.DeviceEntry) -> list[str]:
return [
_async_unifi_mac_from_hass(cval)
for ctype, cval in device_entry.connections
if ctype == dr.CONNECTION_NETWORK_MAC
]
@callback
def _async_get_ufp_instances(
hass: HomeAssistant, device_id: str
) -> tuple[dr.DeviceEntry, ProtectApiClient]:
device_registry = dr.async_get(hass)
if not (device_entry := device_registry.async_get(device_id)):
raise HomeAssistantError(f"No device found for device id: {device_id}")
if device_entry.via_device_id is not None:
return _async_get_ufp_instances(hass, device_entry.via_device_id)
macs = _async_get_macs_for_device(device_entry)
ufp_instances = [
i for i in _async_all_ufp_instances(hass) if i.bootstrap.nvr.mac in macs
]
if not ufp_instances:
# should not be possible unless user manually enters a bad device ID
raise HomeAssistantError( # pragma: no cover
f"No UniFi Protect NVR found for device ID: {device_id}"
)
return device_entry, ufp_instances[0]
@callback
def _async_get_protect_from_call(
hass: HomeAssistant, call: ServiceCall
) -> list[tuple[dr.DeviceEntry, ProtectApiClient]]:
referenced = async_extract_referenced_entity_ids(hass, call)
instances: list[tuple[dr.DeviceEntry, ProtectApiClient]] = []
for device_id in referenced.referenced_devices:
instances.append(_async_get_ufp_instances(hass, device_id))
return instances
async def _async_call_nvr(
instances: list[tuple[dr.DeviceEntry, ProtectApiClient]],
method: str,
*args: Any,
**kwargs: Any,
) -> None:
try:
await asyncio.gather(
*(getattr(i.bootstrap.nvr, method)(*args, **kwargs) for _, i in instances)
)
except (BadRequest, ValidationError) as err:
raise HomeAssistantError(str(err)) from err
async def add_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Add a custom doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "add_custom_doorbell_message", message)
async def remove_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Remove a custom doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "remove_custom_doorbell_message", message)
async def set_default_doorbell_text(hass: HomeAssistant, call: ServiceCall) -> None:
"""Set the default doorbell text message."""
message: str = call.data[ATTR_MESSAGE]
instances = _async_get_protect_from_call(hass, call)
await _async_call_nvr(instances, "set_default_doorbell_message", message)
def async_setup_services(hass: HomeAssistant) -> None:
"""Set up the global UniFi Protect services."""
services = [
(
SERVICE_ADD_DOORBELL_TEXT,
functools.partial(add_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
(
SERVICE_REMOVE_DOORBELL_TEXT,
functools.partial(remove_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
(
SERVICE_SET_DEFAULT_DOORBELL_TEXT,
functools.partial(set_default_doorbell_text, hass),
DOORBELL_TEXT_SCHEMA,
),
]
for name, method, schema in services:
if hass.services.has_service(DOMAIN, name):
continue
hass.services.async_register(DOMAIN, name, method, schema=schema)
def async_cleanup_services(hass: HomeAssistant) -> None:
"""Cleanup global UniFi Protect services (if all config entries unloaded)."""
loaded_entries = [
entry
for entry in hass.config_entries.async_entries(DOMAIN)
if entry.state == ConfigEntryState.LOADED
]
if len(loaded_entries) == 1:
for name in ALL_GLOBAL_SERIVCES:
hass.services.async_remove(DOMAIN, name)
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,339
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/button.py
|
"""Support for Ubiquiti's UniFi Protect NVR."""
from __future__ import annotations
import logging
from homeassistant.components.button import ButtonDeviceClass, ButtonEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from pyunifiprotect.data.base import ProtectAdoptableDeviceModel
from .const import DEVICES_THAT_ADOPT, DOMAIN
from .data import ProtectData
from .entity import ProtectDeviceEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Discover devices on a UniFi Protect NVR."""
data: ProtectData = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[
ProtectButton(
data,
device,
)
for device in data.get_by_types(DEVICES_THAT_ADOPT)
]
)
class ProtectButton(ProtectDeviceEntity, ButtonEntity):
"""A Ubiquiti UniFi Protect Reboot button."""
_attr_entity_registry_enabled_default = False
_attr_device_class = ButtonDeviceClass.RESTART
def __init__(
self,
data: ProtectData,
device: ProtectAdoptableDeviceModel,
) -> None:
"""Initialize an UniFi camera."""
super().__init__(data, device)
self._attr_name = f"{self.device.name} Reboot Device"
async def async_press(self) -> None:
"""Press the button."""
_LOGGER.debug("Rebooting %s with id %s", self.device.model, self.device.id)
await self.device.reboot()
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,340
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/utils.py
|
"""UniFi Protect Integration utils."""
from __future__ import annotations
from enum import Enum
from typing import Any
def get_nested_attr(obj: Any, attr: str) -> Any:
"""Fetch a nested attribute."""
attrs = attr.split(".")
value = obj
for key in attrs:
if not hasattr(value, key):
return None
value = getattr(value, key)
if isinstance(value, Enum):
value = value.value
return value
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,341
|
briis/unifiprotect
|
refs/heads/master
|
/custom_components/unifiprotect/models.py
|
"""The unifiprotect integration models."""
from __future__ import annotations
from collections.abc import Callable, Coroutine
from dataclasses import dataclass
import logging
from typing import Any
from homeassistant.helpers.entity import EntityDescription
from pyunifiprotect.data import NVR, ProtectAdoptableDeviceModel
from .utils import get_nested_attr
_LOGGER = logging.getLogger(__name__)
@dataclass
class ProtectRequiredKeysMixin:
"""Mixin for required keys."""
ufp_required_field: str | None = None
ufp_value: str | None = None
ufp_value_fn: Callable[[ProtectAdoptableDeviceModel | NVR], Any] | None = None
ufp_enabled: str | None = None
def get_ufp_value(self, obj: ProtectAdoptableDeviceModel | NVR) -> Any:
"""Return value from UniFi Protect device."""
if self.ufp_value is not None:
return get_nested_attr(obj, self.ufp_value)
if self.ufp_value_fn is not None:
return self.ufp_value_fn(obj)
# reminder for future that one is required
raise RuntimeError( # pragma: no cover
"`ufp_value` or `ufp_value_fn` is required"
)
def get_ufp_enabled(self, obj: ProtectAdoptableDeviceModel | NVR) -> bool:
"""Return value from UniFi Protect device."""
if self.ufp_enabled is not None:
return bool(get_nested_attr(obj, self.ufp_enabled))
return True
@dataclass
class ProtectSetableKeysMixin(ProtectRequiredKeysMixin):
"""Mixin to for settable values."""
ufp_set_method: str | None = None
ufp_set_method_fn: Callable[
[ProtectAdoptableDeviceModel, Any], Coroutine[Any, Any, None]
] | None = None
async def ufp_set(self, obj: ProtectAdoptableDeviceModel, value: Any) -> None:
"""Set value for UniFi Protect device."""
assert isinstance(self, EntityDescription)
_LOGGER.debug("Setting %s to %s for %s", self.name, value, obj.name)
if self.ufp_set_method is not None:
await getattr(obj, self.ufp_set_method)(value)
elif self.ufp_set_method_fn is not None:
await self.ufp_set_method_fn(obj, value)
|
{"/custom_components/unifiprotect/__init__.py": ["/custom_components/unifiprotect/services.py"], "/custom_components/unifiprotect/binary_sensor.py": ["/custom_components/unifiprotect/models.py", "/custom_components/unifiprotect/utils.py"], "/custom_components/unifiprotect/models.py": ["/custom_components/unifiprotect/utils.py"]}
|
1,343
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/models.py
|
# builtin
import datetime
# 3rd party
import flask_sqlalchemy
import flask_user
db = flask_sqlalchemy.SQLAlchemy()
class User(db.Model, flask_user.UserMixin):
"""Generic User data model for flask_user as seen
in their documentation.
http://pythonhosted.org/Flask-User/basic_app.html
"""
id = db.Column(db.Integer, primary_key=True)
# User authentication information
password = db.Column(db.String(255), nullable=False, server_default='')
reset_password_token = db.Column(db.String(100), nullable=False, server_default='')
# User email information
email = db.Column(db.String(255), nullable=False, unique=True)
confirmed_at = db.Column(db.DateTime())
# User information
active = db.Column('is_active', db.Boolean(), nullable=False, server_default='0')
first_name = db.Column(db.String(100), nullable=False, server_default='')
last_name = db.Column(db.String(100), nullable=False, server_default='')
class Url(db.Model):
"""A URL belonging to a user, accompanied by a description
of 140 characters or less.
Belongs to /urls/x
"""
__tablename__ = 'urls'
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey(User.id)) # should never be null :o
created = db.Column(db.DateTime, default=datetime.datetime.utcnow)
url = db.Column(db.String()) # should neve rbe null :o
description = db.Column(db.String(140))
title = db.Column(db.String())
content_type = db.Column(db.String()) # isn't this a certain number of bytes max? should b required
user = db.relationship('User', foreign_keys='Url.user_id', lazy='subquery')
def __init__(self, user_id, url, description, content_type=None, title=None):
self.user_id = user_id
self.url = url
self.description = description
# these are derived from util.fetch_searchable_data()
self.title = title
self.content_type = content_type
def __repr__(self):
return '<URL #%s %s (%s)>' % (self.id, self.title, self.url)
def to_dict(self):
"""Create a dictionary representing this URL.
Returns:
dict: contains the id, url, and description of
this URL.
"""
data_to_return = {
'id': self.id,
# TODO:
# 'created': self.created,
'url': self.url,
'description': self.description,
'title': self.title,
'content_type': self.content_type,
}
return data_to_return
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,344
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/app.py
|
"""urlink Flask App
"""
# builtin
import os
# local
import models
import config
import urlhelper
# 3rd party/pip
import flask
import flask_mail
import flask_user
import flask_login
import flask_script
import flask_migrate
import sqlalchemy
import wtforms
# flask app setup
app = flask.Flask(__name__)
app.config.from_object(config)
migrate = flask_migrate.Migrate(app, models.db)
manager = flask_migrate.Manager(app)
manager.add_command('db', flask_migrate.MigrateCommand)
models.db.init_app(app) # ???
# flask user
mail = flask_mail.Mail(app)
db_adapter = flask_user.SQLAlchemyAdapter(models.db, models.User)
user_manager = flask_user.UserManager(db_adapter, app)
class AddUrlForm(wtforms.Form):
"""Validation and fields for the form/page which allows a user
to save/add a URL/link.
"""
url = wtforms.StringField(
'url',
[wtforms.validators.URL(require_tld=True),],
render_kw={
"placeholder": "URL/Link",
"class": "form-control input-lg",
"id": "url",
"autofocus": True
},
)
description = wtforms.TextAreaField(
'description',
[wtforms.validators.Length(max=140),],
render_kw={
"placeholder": "Description/about URL",
"class": "form-control input-lg",
"id": "description",
"maxlength": 140,
},
)
class SearchForm(wtforms.Form):
"""For live searching/filtering the bookmarks.
Uses the /autocomplete endpoint (see: autocomplete()).
"""
autocomp = wtforms.TextField('autocomp', id='autocomplete')
# TODO: newest first.
@app.route('/')
def home_page():
"""Rendered Jinja/HTML page for live-searching bookmarks.
Form on this page can use normal form submission, however,
this page includes jQuery which implements the live-searching
feature, it updates the page with values from `/autocomplete`,
i.e., autocomplete().
If the user isn't logged in, they are redirected to the about page.
"""
if flask_login.current_user.is_authenticated:
# this form doesn't need validating
search_form = SearchForm(flask.request.form)
# if we have at least search term, the user has GET'd search form
search_term = flask.request.args.get('term')
search_type = flask.request.args.get('type')
if search_term:
urls = url_search(search_term, search_type=search_type)
else:
urls = models.Url.query.filter_by(
user=flask_login.current_user
).all()
content_types = set([url.content_type for url in urls if url.content_type])
return flask.render_template(
"ur_links.html",
search_form=search_form,
urls=urls,
options=content_types,
)
else:
return flask.render_template("landing.html")
def url_search(search_term, search_type=None):
if search_type:
search_results = models.Url.query.filter(
models.Url.user_id == flask_login.current_user.id,
sqlalchemy.or_(
models.Url.url.ilike("%" + search_term + "%"),
models.Url.description.ilike("%" + search_term + "%"),
),
models.Url.content_type == search_type,
)
else:
search_results = models.Url.query.filter(
models.Url.user_id == flask_login.current_user.id,
sqlalchemy.or_(
models.Url.url.ilike("%" + search_term + "%"),
models.Url.description.ilike("%" + search_term + "%"),
),
)
return search_results
@app.route('/autocomplete', methods=['GET'])
@flask_user.login_required
def autocomplete():
"""Provides JSON response of URLs where
the search term is in the description.
Query for URLs owned by the current user, whose descriptions
in the database contain `term`.
Returns:
json: A list of dictionaries describing each
matching URL.
"""
search_term = flask.request.args.get('term')
search_type = flask.request.args.get('type')
urls = url_search(search_term, search_type=search_type)
urls = [url.to_dict() for url in urls]
return flask.jsonify(urls)
@app.route('/urls/add', methods=['POST', 'GET'])
@flask_user.login_required
def add_url():
"""Process and provide the form for adding a new URL to the
current user's urls.
"""
form = AddUrlForm(flask.request.form)
# Either process the form from POST or show the form.
if flask.request.method == 'POST' and form.validate():
# There's no reason to prevent the URL from being created
# using the POST'd information. Create and show the URL.
url = flask.request.form['url']
searchable_data = urlhelper.fetch_searchable_data(url)
new_url = models.Url(
user_id=flask_login.current_user.id,
url=url,
description=flask.request.form['description'],
**searchable_data,
)
models.db.session.add(new_url)
models.db.session.commit()
return flask.redirect(flask.url_for('home_page'))
else:
return flask.render_template("add_url.html", form=form)
# Create the database
if __name__=='__main__':
manager.run()
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,345
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/migrations/versions/a77719286100_.py
|
"""empty message
Revision ID: a77719286100
Revises: ae0cb4fef303
Create Date: 2016-10-03 13:03:02.448316
"""
# revision identifiers, used by Alembic.
revision = 'a77719286100'
down_revision = 'ae0cb4fef303'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('urls', sa.Column('title', sa.String(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('urls', 'title')
### end Alembic commands ###
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,346
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/urlhelper.py
|
"""Get as much info as possible about a URL.
"""
import mimetypes
import requests
import bs4
MAXIMUM_REDIRECTS = 4
FIELDS = [
{
'name': 'title',
'soup_find': ('title', {}),
},
]
_session = requests.Session()
class MaxRedirectError(Exception):
def __init__(self):
self.message = (
"Head request redirected %d times (max is %d)"
% (MAXIMUM_REDIRECTS + 1, MAXIMUM_REDIRECTS)
)
class HttpError(Exception):
def __init__(self, status_code):
self.status_code = status_code
self.message = "Encountered HTTP error %d" % status_code
def head_until_no_redirect(url, maximum_redirects=MAXIMUM_REDIRECTS):
"""Keep fetching the redirect URL until 200 (not 301) or fail.
Return:
url, Response:
None:
"""
if maximum_redirects:
response = _session.head(url)
if response.status_code == 301:
maximum_redirects -= 1
return head_until_no_redirect(
response.headers['Location'],
maximum_redirects
)
elif response.status_code == 200:
return url, response
else:
raise HttpError(response.status_code)
# maximum redirects is 0; we recursively reached the end
else:
raise MaxRedirectError()
def searchable_data_from_soup(soup):
tags_to_return = {}
for field in FIELDS:
arg, kwargs = field['soup_find']
found_tag = soup.find(arg, **kwargs)
if found_tag:
tags_to_return[field['name']] = found_tag.text
return tags_to_return
# TODO: this docstring sucks, also clean all of this up
def fetch_searchable_data(url):
"""Fetch the title and meta tags of a remote
HTML document, or fail and return None.
May be expanded in the future.
Note:
does note check file extension for mimetype first, becuase more
searchable data is hoped for than simply content_type
Arguments:
url (str): ---
Returns:
dict: Dictionary of searchable data...
"""
searchable_data = {}
# Try to get the HTTP header for this resource. This may fail
# so as a last-ditch effort try to get a type from the URL's
# file extension.
# first try file extension, if can't tell type then determine with head...
# once you can get first x bytes for <head> info (meta, title, etc).
try:
# note that the new url is the final url we were directed to
url, head_response = head_until_no_redirect(url)
except (HttpError, MaxRedirectError) as e:
# we can at least try to guess the mimetype from file extension
mimetype = mimetypes.guess_type(url)
return {"content_type": mimetype[0]} if mimetype else None
# Determine resource's type from the 'Content-Type' HTTP header.
headers_from_url = head_response.headers
content_type = headers_from_url['Content-Type'].split(';', 1)[0]
# TODO: should be able to handle extrapolating meta
# from images, PDFs, music, etc.
#
# Bail if we can't extrapolate any further information
# about this Content-Type (because beyond here we are just
# extrapolating HTML information).
if content_type != "text/html":
return {"content_type": content_type}
# ...now we know the content_type is text/html!
searchable_data['content_type'] = "text/html"
# First try to only request the first 400 bytes to get all of the
# desired tags (which will be used to create searchable data).
#
# If this fails we request bytes 401 onward and combine,
# extrapolating what we can
response = _session.get(url, headers={'Range': 'bytes=0-400'})
soup = bs4.BeautifulSoup(response.text, 'html.parser')
more_searchable_data = searchable_data_from_soup(soup)
# we couldn't find all of the tags we wanted in
# the first 400 bytes of the response
if not len(more_searchable_data) == len(FIELDS):
# Store the old response text so we can skip getting it again
old_response_text = response.text
# Get the full page, but skip the part we already have (skip the
# first 400 bytes), combining this new part with
# the old_response_text!
# FIXME: could be stream of data! Set an upper limit on bytes range!
new_response = _session.get(url, headers={'Range': 'bytes=401-'})
soup = bs4.BeautifulSoup(old_response_text + new_response.text, 'html.parser')
searchable_data.update(searchable_data_from_soup(soup))
return searchable_data
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,347
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/config.py
|
"""Really sloppy configuration that will be overhauled
to include environment-specific configs (develop, test, production).
Mostly due to a Heroku headache.
"""
import os
DEBUG = False
TESTING = False
SECRET_KEY = os.getenv('SECRET_KEY')
SQLALCHEMY_DATABASE_URI = os.getenv(
'SQLALCHEMY_DATABASE_URI', # if not this... then below
os.getenv('DATABASE_URL', 'sqlite:////tmp/debug.db'), # heroku
)
# flask-user
USER_ENABLE_USERNAME = False
USER_ENABLE_CHANGE_USERNAME = False
# flask-mail settings for flask-user
# (email confirmation, password reset)
# setup for gmail by default
# NOTE, FIXME, TODO: the only reason this is false
# is because gmail isn't allowing me use their smtp
# anymore!
USER_ENABLE_CONFIRM_EMAIL = False
# this email stuff is all moot because of above note
# will renable once have smtp service
"""
MAIL_USERNAME = os.getenv('MAIL_USERNAME') # example@gmail.com
MAIL_PASSWORD = os.getenv('MAIL_PASSWORD')
MAIL_DEFAULT_SENDER = '"urlink" <noreply@urlink.link>'
MAIL_SERVER = os.getenv('MAIL_SERVER')
MAIL_PORT = int(os.getenv('MAIL_PORT'))
MAIL_USE_SSL = True
MAIL_USE_TLS = True
"""
# this is used by email:
USER_APP_NAME = 'urlink'
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,348
|
kawa-kokosowa/urlink
|
refs/heads/master
|
/tests.py
|
import unittest
import os
import tempfile
import app
class UrlinkTestCase(unittest.TestCase):
def setUp(self):
"""Deploy the test DB (sqlite).
"""
self.db_handle, app.app.config['DATABASE'] = tempfile.mkstemp()
self.app = app.app.test_client()
with app.app.app_context():
app.init_db() # nope
def tearDown(self):
"""Delete the test DB (sqlite).
"""
os.close(self.db_fd)
os.unlink(app.app.config['DATABASE'])
if __name__ == '__main__':
unittest.main()
|
{"/app.py": ["/models.py", "/config.py", "/urlhelper.py"], "/tests.py": ["/app.py"]}
|
1,356
|
tartaruz/Stein-saks-papir
|
refs/heads/master
|
/stein-saks-papir.py
|
import funk
from time import sleep
import os
clear = lambda: os.system('cls')
valg = 0
while (valg!="avslutt"):
sleep(1)
print()
funk.velkommen()
funk.meny()
print()
valg = funk.valg()
clear()
if valg=="1":
print("--------------Spiller 1's tur--------------")
pvalg=funk.choose()
p1=funk.konv(pvalg)
print("Takk! Nå er det spiller2's tur")
sleep(2)
clear()
print("--------------Spiller 2's tur--------------")
pvalg=funk.choose()
p2=funk.konv(pvalg)
funk.game(p1,p2,1)
time(5)
clear()
elif valg=="2":
print("--------------Spiller 1's tur--------------")
pvalg=funk.choose()
p=funk.konv(pvalg)
print("Du valgte",p,"! Nå er det maskinens tur")
sleep(3)
clear()
print("--------------Terminator's tur-------------")
com=funk.comp()
funk.revmaskinvalg(com)
cp=funk.konv(com)
print()
print("TERMINATOR VALGTE:",cp.upper())
funk.game(p,cp,2) #Type 2
sleep(5)
clear()
elif valg==3:
print("3")
elif valg=="help":
print("help")
c=funk.comp()
print(c)
else:
print("Wrong, try again ")
clear()
print("Farvel!")
time.sleep(10)
|
{"/stein-saks-papir.py": ["/funk.py"]}
|
1,357
|
tartaruz/Stein-saks-papir
|
refs/heads/master
|
/funk.py
|
import random
from time import sleep#for stein saks papir
def velkommen():
print("§-----------------------------------------------------------§")
print("§-----| VELKOMMEN TIL STEIN/SAKS/PAPIR! |-----§")
print("§-----------------------------------------------------------§")
print()
def valg(): #velg
valget=str(input("Kommando: "))
return valget
def valgmeny():
print(" _______")
print("---' ____)")
print(" (_____)")
print(" [1] (_____)")
print(" STEIN(____)")
print("---.__(___)")
print(" _______")
print("---' ____)____")
print(" ______)")
print(" [2] __________)")
print(" SAKS(____)")
print("---.__(___)")
print(" _______")
print("---' ____)____")
print(" ______)")
print(" [3] _______)")
print(" PAPIR _______)")
print("---.__________)")
def revmaskinvalg(hvilken):
if hvilken==1:
print(" _______ ")
print(" ( ____ '---")
print("(_____) ")
print("(_____) ")
print("(____) ")
print(" (___)__.---- ")
elif hvilken==2:
print(" _______ ")
print(" ____(____ '----")
print("(______ ")
print("(__________ ")
print(" (____) ")
print(" (___)__.---")
else:
print(" _______ ")
print(" ____(____ '---")
print(" (______ ")
print("(_______ ")
print("(_______ ")
print(" (__________.--- ")
def choose():
valgmeny()
valg=eval(input("Velg[1-2-3]: "))
return valg
def meny():
print("1: 1vs1")
print("2: 1vsCom")
print("3: Help")
print("4: Avslutt")
def comp():
ran=random.randint(1,3)
return ran
def konv(valg):
if valg==1:
res="stein"
elif valg==2:
res="saks"
else:
res="papir"
return res
def game(valg1,valg2,Gtype): #Gtype viser funksjon hva slags game det er
if Gtype==1:
spiller="spiller 2"
else:
spiller="maskinen"
if valg1==valg2:
print("DRAW! Ingen vinnere!")
elif valg1=="stein":
if valg2=="saks":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
elif valg1=="saks":
if valg2=="papir":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
else: #papir
if valg2=="stein":
print("Spiller 1 vant mot",spiller)
else:
print("Spiller 1 tapte mot",spiller)
|
{"/stein-saks-papir.py": ["/funk.py"]}
|
1,358
|
KagenLH/forme-app
|
refs/heads/main
|
/app/forms/signup_form.py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import Email, ValidationError, InputRequired, Length, EqualTo
from app.models import User
def user_exists(form, field):
# Checking if user exists
email = field.data
user = User.query.filter(User.email == email).first()
if user:
raise ValidationError('Email address is already in use.')
def username_exists(form, field):
# Checking if username is already in use
username = field.data
user = User.query.filter(User.username == username).first()
if user:
raise ValidationError('Username is already in use.')
class SignUpForm(FlaskForm):
username = StringField(
'username', validators=[InputRequired(message='Input Required'), Length(max=40, message='Must be less than 40 characters'), username_exists])
email = StringField('email', validators=[InputRequired(), Length(
max=40, message='Must be less than 40 characters'), Email(message='Invalid'), user_exists])
password = PasswordField('password', validators=[
InputRequired(), EqualTo('confirm', message='Passwords must match')])
confirm = PasswordField('confirm')
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,359
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210816_135552_.py
|
"""empty message
Revision ID: fa590b961f4f
Revises: ffdc0a98111c
Create Date: 2021-08-16 13:55:52.581549
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fa590b961f4f'
down_revision = 'ffdc0a98111c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('forms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=50), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('label_align', sa.String(length=10), nullable=True),
sa.Column('description_align', sa.String(length=10), nullable=True),
sa.Column('title_align', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('forms')
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,360
|
KagenLH/forme-app
|
refs/heads/main
|
/app/seeds/forms.py
|
from app.models import db, Form
def seed_forms():
test = Form(
title = "Test Form Render",
owner_id = 1,
description = "",
label_placement = "",
description_align = "",
title_align = "",
)
db.session.add(test)
db.session.commit()
def undo_forms():
db.session.execute('TRUNCATE forms RESTART IDENTITY CASCADE;')
db.session.commit()
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,361
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210820_171546_.py
|
"""empty message
Revision ID: 94f5eda37179
Revises: b3e721c02f48
Create Date: 2021-08-20 17:15:46.455809
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '94f5eda37179'
down_revision = 'b3e721c02f48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
op.alter_column('forms', 'title',
existing_type=sa.VARCHAR(length=50),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('forms', 'title',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,362
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210821_113310_.py
|
"""empty message
Revision ID: d0c387e43ca4
Revises: 94f5eda37179
Create Date: 2021-08-21 11:33:10.206199
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0c387e43ca4'
down_revision = '94f5eda37179'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('forms', sa.Column('field_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'forms', 'fields', ['field_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'forms', type_='foreignkey')
op.drop_column('forms', 'field_id')
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,363
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210820_103408_.py
|
"""empty message
Revision ID: b05fdd14ae4f
Revises: 4563136888fd
Create Date: 2021-08-20 10:34:08.171553
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b05fdd14ae4f'
down_revision = '4563136888fd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,364
|
KagenLH/forme-app
|
refs/heads/main
|
/app/seeds/users.py
|
from app.models import db, User
# Adds a demo user, you can add other users here if you want
def seed_users():
demo = User(
username='Demo', email='demo@aa.io', password='password')
marnie = User(
username='marnie', email='marnie@aa.io', password='password')
bobbie = User(
username='bobbie', email='bobbie@aa.io', password='password')
db.session.add(demo)
db.session.add(marnie)
db.session.add(bobbie)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and RESET IDENTITY
# resets the auto incrementing primary key, CASCADE deletes any
# dependent entities
def undo_users():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit()
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,365
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210820_100009_.py
|
"""empty message
Revision ID: beeeac90e4ba
Revises: d25f4d1b7ea0
Create Date: 2021-08-20 10:00:09.924819
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'beeeac90e4ba'
down_revision = 'd25f4d1b7ea0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,366
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210821_161057_.py
|
"""empty message
Revision ID: 4df12f583573
Revises: 2453c767d036
Create Date: 2021-08-21 16:10:57.556468
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4df12f583573'
down_revision = '2453c767d036'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,367
|
KagenLH/forme-app
|
refs/heads/main
|
/app/models/__init__.py
|
from .db import db
from .user import User
from .form import Form
from .field import Field
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,368
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210820_100524_.py
|
"""empty message
Revision ID: b8ec5632d693
Revises: beeeac90e4ba
Create Date: 2021-08-20 10:05:24.638509
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b8ec5632d693'
down_revision = 'beeeac90e4ba'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,369
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210821_145311_.py
|
"""empty message
Revision ID: 2453c767d036
Revises: d0c387e43ca4
Create Date: 2021-08-21 14:53:11.208418
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2453c767d036'
down_revision = 'd0c387e43ca4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')
op.drop_column('forms', 'field_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,370
|
KagenLH/forme-app
|
refs/heads/main
|
/app/models/form.py
|
from .db import db
class Form(db.Model):
__tablename__ = 'forms'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
description = db.Column(db.Text)
label_placement = db.Column(db.String(10))
description_align = db.Column(db.String(10))
title_align = db.Column(db.String(10))
# creates a pseudo-column (you won't see it) in the 'fields' table called 'form' that can be assigned a Form instance when creating a Field instance -- 'form' is not the name of this table
fields = db.relationship('Field', backref='form')
# field_id = db.Column(db.Integer, db.ForeignKey('fields.id'))
# fields = db.relationship("Field", foreign_keys=field_id ,back_populates="forms", lazy="joined")
def to_dict(self):
# convert associated fields to serializable dictionaries
form_fields = [field.to_dict() for field in self.fields]
return {
'id': self.id,
'fields': form_fields,
'title': self.title,
'owner_id': self.owner_id,
'description': self.description,
'label_placement': self.label_placement,
'description_align': self.description_align,
'title_align': self.title_align
}
def __repr__(self):
return str(self.to_dict())
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,371
|
KagenLH/forme-app
|
refs/heads/main
|
/app/api/form_routes.py
|
from flask import Blueprint, jsonify, request, session
from flask_login import login_required, current_user
from app.models import Form, db, Field
form_routes = Blueprint("forms", __name__)
# get all forms --- remove this route?
@form_routes.route('/')
# @login_required
def get_forms():
forms = Form.query.all() # original query for ALL forms
return {'forms': [form.to_dict() for form in forms]}
@form_routes.route('/<int:id>', methods=['GET', 'DELETE'])
@login_required
def forms(id):
# get a specific form by primary key
if request.method == 'GET':
form = Form.query.get(id)
return form.to_dict()
# delete a specific form by primary key
elif request.method == 'DELETE':
form = Form.query.get(id) # takes a form's id
db.session.delete(form)
db.session.commit()
return form.to_dict()
# (GET) allow user to access a form without being logged in, i.e. SHARED form
# @form_routes.route('/<int:id>/shared')
# def shared_form(id):
# form = Form.query.get(id)
# return form.to_dict()
# get forms by owner_id (i.e. all forms owned by a specific user)
@form_routes.route('/users/<int:id>')
def user_forms(id): # takes a user's id
forms = Form.query.filter_by(owner_id=id).all()
# destructure in forms store
return {'forms': [form.to_dict() for form in forms]}
@form_routes.route('/build', methods=['POST'])
@login_required
def create_form():
# print('***** REQUEST DATA INFO *****', request.get_json())
user_id = session['_user_id']
# pull JSON data from request body
data = request.get_json()
form_fields = []
form = Form(
title=data["title"],
owner_id=user_id,
description=data["description"],
label_placement=data["labelPlacement"],
description_align=data["descriptionAlignment"],
title_align=data["titleAlignment"],
)
db.session.add(form)
db.session.commit()
# print('FORM FORM FORM:', form)
for field_info in data["fields"]:
# all of the columns in the fields table (except id)
expected_keys = [
"type",
"label",
"maxLength",
"required",
"placeholder",
"instructions",
"choices"
]
# check whether field_info["maxLength"] exists
if "maxLength" in field_info:
# convert the value from string to integer
field_info["maxLength"] = int(field_info["maxLength"])
for key in expected_keys:
if key not in field_info:
# create the key and set the default value to None
field_info.setdefault(key)
# print('******* FIELD INFO ********', field_info)
field_choices = field_info['choices']
choices_string = ""
for choice in field_choices:
choices_string += (str(choice) + '&&')
field = Field(
type=field_info["type"],
label=field_info["label"],
max_length=field_info["maxLength"],
required=field_info["required"],
placeholder=field_info["placeholder"],
instructions=field_info["instructions"],
choices=choices_string,
form=form # handles the form_id
)
# db.session.add(field)
form_fields.append(field)
db.session.add_all(form_fields)
db.session.commit()
# test_form = Form.query.filter_by(title='To Test Fields').first()
# print("*** FORM.FIELDS ***", type(test_form.fields))
# print("*** FIELD.FORMS ***", form_fields[0].form)
# # ...so we can use the dict.update() method
# return_form = form.to_dict()
# # add an entry in 'form' contaning its related fields
# return_form.update({"fields": [field.to_dict() for field in form_fields]})
# print('**** FORM WITH FIELDS ****', form.to_dict())
return form.to_dict()
@form_routes.route('/<int:id>', methods=['PUT'])
@login_required
def edit_form(id):
form = Form.query.get(id)
if form:
if form.owner_id == current_user.id:
data = request.get_json()
form.title= data["title"]
form.description= data["description"]
form.label_placement= data["labelPlacement"]
form.description_align= data["descriptionAlignment"]
form.title_align= data["titleAlignment"]
# Remove any fields on the form that previously existed
for field in form.fields:
db.session.delete(field)
db.session.commit()
# Re-add all the fields to the form
form_fields = []
for field_info in data["fields"]:
# all of the columns in the fields table (except id)
expected_keys = [
"type",
"label",
"maxLength",
"required",
"placeholder",
"instructions",
"choices"
]
# check whether field_info["maxLength"] exists
if "maxLength" in field_info:
# convert the value from string to integer
field_info["maxLength"] = int(field_info["maxLength"])
for key in expected_keys:
if key not in field_info:
# create the key and set the default value to None
field_info.setdefault(key)
# print('******* FIELD INFO ********', field_info)
field_choices = field_info['choices']
choices_string = ""
for choice in field_choices:
choices_string += (str(choice) + '&&')
field = Field(
type=field_info["type"],
label=field_info["label"],
max_length=field_info["maxLength"],
required=field_info["required"],
placeholder=field_info["placeholder"],
instructions=field_info["instructions"],
choices=choices_string,
form=form # handles the form_id
)
# db.session.add(field)
form_fields.append(field)
db.session.add_all(form_fields)
db.session.commit()
return form.to_dict()
else:
return "You do not own the form you are trying to edit.", 401
else:
return "The form you're trying to edit does not exist.", 400
# ! currently causes error "405 method not allowed"
# ! when not bundled with `user_forms(id)` above
# delete a specific form by primary key
# @form_routes.route('/<int:id>', methods=['DELETE'])
# def delete_form(id):
# if request.method == 'DELETE':
# form = Form.query.get(id)
# db.session.delete(form)
# db.session.commit()
# return form.to_dict()
# @form_routes.route('/<int:id>')
# def get_form(id):
# form = Form.query.filter(Form.id == id).first()
# # fields = Field.query.filter(Field.form_id == form.id).all()
# print('FORM IS HERE!!! ', form.to_dict())
# # print('FIELD IS HERE!!!!! ***',
# # {'fields': [field.to_dict() for field in fields]})
# # form["fields"] = {'fields': [field.to_dict() for field in fields]}
# return form.to_dict()
@form_routes.route('/<int:id>/shared', methods=['GET'])
@login_required
def get_share_forms(id):
# get a specific form by primary key
if request.method == 'GET':
form = Form.query.get(id)
print('FORM CHOICES!!!!!!', form)
return form.to_dict()
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,372
|
KagenLH/forme-app
|
refs/heads/main
|
/migrations/versions/20210820_133516_.py
|
"""empty message
Revision ID: b3e721c02f48
Revises: 9aec744a6b98
Create Date: 2021-08-20 13:35:16.871785
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b3e721c02f48'
down_revision = '9aec744a6b98'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,373
|
KagenLH/forme-app
|
refs/heads/main
|
/app/api/field_routes.py
|
# from flask import Blueprint, jsonify, request
# from flask_login import login_required
# from app.models import Field, db
# field_routes = Blueprint('fields', __name__)
# @field_routes.route('/', methods=['POST'])
# def fields():
# if request.method == 'POST':
# # get fields data from request body
# data = request.get_json()
# form_fields = []
# for field_info in data:
# field = Field(
# type=field_info["type"],
# label=field_info["label"],
# max_length=field_info["max_length"],
# required=field_info["required"],
# placeholder=field_info["placeholder"],
# instructions=field_info["instructions"],
# choices=field_info["choices"],
# form_id=field_info["form_id"]
# )
# # db.session.add(field)
# form_fields.append(field)
# # adds each instance individually, so list format is ok
# db.session.add_all(form_fields)
# db.session.commit()
# # must return dictionary, tuple, or string
# return {"fields": [field.to_dict for field in form_fields]}
# @field_routes.route('/forms/<int:id>')
# def form_fields(id):
# fields = Field.query.filter_by(form_id=id).all()
# return {'fields': [field.to_dict for field in fields]}
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,374
|
KagenLH/forme-app
|
refs/heads/main
|
/app/models/field.py
|
from .db import db
class Field(db.Model):
__tablename__ = 'fields'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(255), nullable=False)
label = db.Column(db.String(55), nullable=False)
max_length = db.Column(db.Integer)
required = db.Column(db.Boolean, nullable=False)
placeholder = db.Column(db.String(255))
instructions = db.Column(db.String(255))
choices = db.Column(db.Text)
form_id = db.Column(db.Integer, db.ForeignKey("forms.id"))
# forms = db.relationship("Form", foreign_keys=form_id, lazy="joined") # redundant
def to_dict(self):
return {
'id': self.id,
'form_id': self.form_id,
'type': self.type,
'label': self.label,
'max_length': self.max_length,
'required': self.required,
'placeholder': self.placeholder,
'instructions': self.instructions,
# splits choices into a list, removes empty list entry at the end
'choices': self.choices[:-2].split('&&')
}
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,375
|
KagenLH/forme-app
|
refs/heads/main
|
/app/seeds/fields.py
|
from app.models import db, Field
from app.models import Form
def seed_fields():
form = Form(
title='To Test Fields',
owner_id=1
)
db.session.add(form)
testField = Field(
type="text",
label="Test Field",
required=False,
form=form, # creates the form_id / association
choices='Some Stuff&&Another choice&&Hello from hell&&'
)
db.session.add(testField)
db.session.commit()
def undo_fields():
db.session.execute('TRUNCATE fields RESTART IDENTITY CASCADE;')
db.session.commit()
|
{"/app/forms/signup_form.py": ["/app/models/__init__.py"], "/app/seeds/forms.py": ["/app/models/__init__.py"], "/app/seeds/users.py": ["/app/models/__init__.py"], "/app/models/__init__.py": ["/app/models/form.py", "/app/models/field.py"], "/app/api/form_routes.py": ["/app/models/__init__.py"], "/app/seeds/fields.py": ["/app/models/__init__.py"]}
|
1,383
|
rlebras/pytorch-pretrained-BERT
|
refs/heads/master
|
/examples/run_classifier.py
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""BERT finetuning runner."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import json
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from tqdm import tqdm, trange
from examples.run_squad import _compute_softmax
from pytorch_pretrained_bert import BertForSequenceClassification
from pytorch_pretrained_bert.file_utils import read_jsonl_lines, write_items, TsvIO
from pytorch_pretrained_bert.modeling import BertForMultipleChoice
from pytorch_pretrained_bert.optimization import BertAdam
from pytorch_pretrained_bert.tokenization import printable_text, convert_to_unicode, BertTokenizer
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class InputExampleWithList(object):
"""A single training/test example for simple multiple choice classification."""
def __init__(self, guid, text_a, text_b, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: list. A list containing untokenized text
text_b: list. containing untokenized text associated of the same size as text_A
Only must be specified for multiple choice options.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
assert isinstance(text_a, list)
assert isinstance(text_b, list)
assert len(text_a) == len(text_b)
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
self.text_c = None
self.text_d = None
class InputExampleWithListFourFields(object):
"""A single training/test example for simple multiple choice classification."""
def __init__(self, guid, text_a, text_b, text_c, text_d, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: list. A list containing untokenized text
text_b: list. containing untokenized text associated of the same size as text_A
text_c: list. containing untokenized text associated of the same size as text_A
text_d: list. containing untokenized text associated of the same size as text_A
Only must be specified for multiple choice options.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
assert isinstance(text_a, list)
assert isinstance(text_b, list)
assert text_c is None or isinstance(text_c, list)
assert text_d is None or isinstance(text_d, list)
assert len(text_a) == len(text_b)
if text_c is not None:
assert len(text_c) == len(text_a)
if text_d is not None:
assert len(text_d) == len(text_a)
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.text_c = text_c
self.text_d = text_d
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, input_mask, segment_ids, label_id):
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_id = label_id
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
lines.append(line)
return lines
@classmethod
def _read_jsonl(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
records = []
with open(input_file, "r") as f:
for line in f:
obj = json.loads(line)
records.append(obj)
return records
class AnliProcessor(DataProcessor):
"""Processor for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file), "to-pred")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], "1")
beginning = record['InputSentence1']
ending = record['InputSentence5']
option1 = record['RandomMiddleSentenceQuiz1']
option2 = record['RandomMiddleSentenceQuiz2']
answer = int(record['AnswerRightEnding']) - 1
option1_context = convert_to_unicode(' '.join([beginning, option1]))
option2_context = convert_to_unicode(' '.join([beginning, option2]))
label = convert_to_unicode(str(answer))
examples.append(
InputExampleWithListFourFields(guid=guid,
text_a=[option1_context, option2_context],
text_b=[ending, ending],
text_c=None,
text_d=None,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class AnliProcessor3Option(DataProcessor):
"""Processor for the ANLI data set."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file, "to-pred")
)
def get_labels(self):
"""See base class."""
return ["0", "1", "2"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], record['ending'])
beginning = record['InputSentence1']
ending = record['InputSentence5']
option1 = record['RandomMiddleSentenceQuiz1']
option2 = record['RandomMiddleSentenceQuiz2']
option3 = record['RandomMiddleSentenceQuiz3']
answer = int(record['AnswerRightEnding']) - 1
option1_context = convert_to_unicode(' '.join([beginning, option1]))
option2_context = convert_to_unicode(' '.join([beginning, option2]))
option3_context = convert_to_unicode(' '.join([beginning, option3]))
label = convert_to_unicode(str(answer))
text_a = [option1_context, option2_context, option3_context]
text_b = [ending, ending, ending]
examples.append(
InputExampleWithList(guid=guid,
text_a=text_a,
text_b=text_b,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class AnliWithCSKProcessor(DataProcessor):
"""Processor for the ANLI data set."""
def __init__(self):
self._labels = []
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.jsonl")))
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid.jsonl")), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "test.jsonl")), "test")
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_jsonl(input_file, "to-pred")
)
def get_labels(self):
"""See base class."""
return [str(idx) for idx in range(16)]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
num_fields = len(
[x for x in list(records[0].keys()) if x.startswith('RandomMiddleSentenceQuiz')])
self._labels = [str(idx) for idx in range(1, num_fields + 1)]
for (i, record) in enumerate(records):
guid = "%s-%s-%s" % (set_type, record['InputStoryid'], record['ending'])
beginning = record['InputSentence1']
ending = record['InputSentence5']
text_a = []
text_b = []
for idx in range(1, num_fields + 1):
text_a.append(
beginning + " " + record["RandomMiddleSentenceQuiz" + str(idx)]
)
text_b.append(
ending + " Because , " + record['CSK' + str(idx)]
)
answer = int(record['AnswerRightEnding']) - 1
label = convert_to_unicode(str(answer))
examples.append(
InputExampleWithListFourFields(guid=guid,
text_a=text_a,
text_b=text_b,
text_c=None,
text_d=None,
label=label
)
)
return examples
def label_field(self):
return "AnswerRightEnding"
class WSCProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "test.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["1", "2"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
sentence = convert_to_unicode(line[1]).replace("\"", "")
conj = convert_to_unicode(line[2])
idx = sentence.index(conj)
context = sentence[:idx + len(conj)]
option_str = sentence[idx + len(conj):].strip()
name1 = convert_to_unicode(line[3])
name2 = convert_to_unicode(line[4])
option1 = option_str.replace("_", name1)
option2 = option_str.replace("_", name2)
text_a = [context, context]
text_b = [option1, option2]
label = convert_to_unicode(line[5])
examples.append(
InputExampleWithList(
guid=guid,
text_a=text_a,
text_b=text_b,
label=label
)
)
return examples
def get_examples_from_file(self, input_file):
return self._create_examples(
self._read_tsv(input_file), "to-pred")
def label_field(self):
return "answer"
class MrpcProcessor(DataProcessor):
"""Processor for the MRPC data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
logger.info("LOOKING AT {}".format(os.path.join(data_dir, "train.tsv")))
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line[3])
text_b = convert_to_unicode(line[4])
label = convert_to_unicode(line[0])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class MnliProcessor(DataProcessor):
"""Processor for the MultiNLI data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev_matched.tsv")),
"dev_matched")
def get_labels(self):
"""See base class."""
return ["contradiction", "entailment", "neutral"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0:
continue
guid = "%s-%s" % (set_type, convert_to_unicode(line[0]))
text_a = convert_to_unicode(line[8])
text_b = convert_to_unicode(line[9])
label = convert_to_unicode(line[-1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class ColaProcessor(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
guid = "%s-%s" % (set_type, i)
text_a = convert_to_unicode(line[3])
label = convert_to_unicode(line[1])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=None, label=label))
return examples
class BinaryAnli(DataProcessor):
"""Processor for the CoLA data set (GLUE version)."""
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "train-binary.jsonl")), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_jsonl(os.path.join(data_dir, "valid-binary.jsonl")), "dev")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, records, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, record) in enumerate(records):
guid = "%s-%s" % (set_type, i)
beginning = record['InputSentence1']
ending = record['InputSentence5']
middle = record['RandomMiddleSentenceQuiz1']
label = str(record['AnswerRightEnding'])
text_a = convert_to_unicode(beginning)
text_b = convert_to_unicode(middle + " " + ending)
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in enumerate(examples):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
if tokens_b:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
# Account for [CLS] and [SEP] with "- 2"
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[0:(max_seq_length - 2)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambigiously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
if tokens_b:
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[printable_text(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer):
"""Loads a data file into a list of `InputBatch`s."""
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc="Converting examples"):
inputs = []
tokens_a = [tokenizer.tokenize(t) for t in example.text_a]
inputs.append(tokens_a)
tokens_b = None
if example.text_b:
tokens_b = [tokenizer.tokenize(t) for t in example.text_b]
inputs.append(tokens_b)
tokens_c = None
if example.text_c:
tokens_c = [tokenizer.tokenize(t) for t in example.text_c]
inputs.append(tokens_c)
tokens_d = None
if example.text_d:
tokens_d = [tokenizer.tokenize(t) for t in example.text_d]
inputs.append(tokens_d)
if len(inputs) > 1:
# Modifies `tokens_a` and `tokens_b` in place so that the total
# length is less than the specified length.
# Account for [CLS], [SEP], [SEP] with "- 3"
adjusted_len = max_seq_length - len(inputs) - 1
_truncate_sequences(adjusted_len, inputs)
else:
# Account for [CLS] and [SEP] with "- 2"
for idx, ta in enumerate(tokens_a):
tokens_a[idx] = tokens_a[idx][0:(max_seq_length - 2)]
all_tokens = []
all_token_ids = []
all_segments = []
all_masks = []
for zipped_tokens in zip(*inputs):
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for idx, field in enumerate(zipped_tokens):
for token in field:
tokens.append(token)
segment_ids.append(idx)
tokens.append("[SEP]")
segment_ids.append(idx)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
if len(input_ids) != max_seq_length:
print("FOUND")
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
all_tokens.append(tokens)
all_token_ids.append(input_ids)
all_segments.append(segment_ids)
all_masks.append(input_mask)
label_id = label_map[example.label]
if ex_index < 5:
logger.info("\n\n")
logger.info("*** Example {} ***\n".format(ex_index))
logger.info("guid: %s" % (example.guid))
_ts = all_tokens
_ids = all_token_ids
_masks = all_masks
_segs = all_segments
logger.info("\n")
for idx, (_t, _id, _mask, _seg) in enumerate(zip(_ts, _ids, _masks, _segs)):
logger.info("\tOption {}".format(idx))
logger.info("\ttokens: %s" % " ".join(
[printable_text(x) for x in _t]))
logger.info("\tinput_ids: %s" % " ".join([str(x) for x in _id]))
logger.info("\tinput_mask: %s" % " ".join([str(x) for x in _mask]))
logger.info(
"\tsegment_ids: %s" % " ".join([str(x) for x in _seg]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=all_token_ids,
input_mask=all_masks,
segment_ids=all_segments,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def _truncate_sequences(max_length, inputs):
idx = 0
for ta, tb in zip(inputs[0], inputs[1]):
_truncate_seq_pair(ta, tb, max_length)
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def copy_optimizer_params_to_model(named_params_model, named_params_optimizer):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the parameters optimized on CPU/RAM back to the model on GPU
"""
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,
named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
param_model.data.copy_(param_opti.data)
def set_optimizer_params_grad(named_params_optimizer, named_params_model, test_nan=False):
""" Utility function for optimize_on_cpu and 16-bits training.
Copy the gradient of the GPU parameters to the CPU/RAMM copy of the model
"""
is_nan = False
for (name_opti, param_opti), (name_model, param_model) in zip(named_params_optimizer,
named_params_model):
if name_opti != name_model:
logger.error("name_opti != name_model: {} {}".format(name_opti, name_model))
raise ValueError
if param_model.grad is not None:
if test_nan and torch.isnan(param_model.grad).sum() > 0:
is_nan = True
if param_opti.grad is None:
param_opti.grad = torch.nn.Parameter(
param_opti.data.new().resize_(*param_opti.data.size()))
param_opti.grad.data.copy_(param_model.grad.data)
else:
param_opti.grad = None
return is_nan
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model checkpoints will be written.")
## Other parameters
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval on the dev set.")
parser.add_argument("--do_predict",
default=False,
action='store_true',
help="Whether to run prediction on a given dataset.")
parser.add_argument("--input_file_for_pred",
default=None,
type=str,
help="File to run prediction on.")
parser.add_argument("--output_file_for_pred",
default=None,
type=str,
help="File to output predictions into.")
parser.add_argument("--train_batch_size",
default=32,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10%% of training.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--local_rank",
type=int,
default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed',
type=int,
default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument('--optimize_on_cpu',
default=False,
action='store_true',
help="Whether to perform optimization and keep the optimizer averages on CPU")
parser.add_argument('--fp16',
default=False,
action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--loss_scale',
type=float, default=128,
help='Loss scaling, positive power of 2 values can improve fp16 convergence.')
args = parser.parse_args()
processors = {
"cola": ColaProcessor,
"mnli": MnliProcessor,
"mrpc": MrpcProcessor,
"anli": AnliProcessor,
"anli3": AnliProcessor3Option,
'anli_csk': AnliWithCSKProcessor,
'bin_anli': BinaryAnli,
'wsc': WSCProcessor
}
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else:
device = torch.device("cuda", args.local_rank)
n_gpu = 1
# Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.distributed.init_process_group(backend='nccl')
if args.fp16:
logger.info("16-bits training currently not supported in distributed training")
args.fp16 = False # (see https://github.com/pytorch/pytorch/pull/13496)
logger.info("device %s n_gpu %d distributed training %r", device, n_gpu,
bool(args.local_rank != -1))
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir))
os.makedirs(args.output_dir, exist_ok=True)
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels()
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
train_examples = None
num_train_steps = None
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_steps = int(
len(
train_examples) / args.train_batch_size / args.gradient_accumulation_steps * args.num_train_epochs)
# Prepare model
if task_name == 'bin_anli':
model = BertForSequenceClassification.from_pretrained(args.bert_model, len(label_list))
else:
model = BertForMultipleChoice.from_pretrained(args.bert_model,
len(label_list),
len(label_list)
)
if args.fp16:
model.half()
model.to(device)
if args.local_rank != -1:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.local_rank],
output_device=args.local_rank)
elif n_gpu > 1:
model = torch.nn.DataParallel(model)
# Prepare optimizer
if args.fp16:
param_optimizer = [(n, param.clone().detach().to('cpu').float().requires_grad_()) \
for n, param in model.named_parameters()]
elif args.optimize_on_cpu:
param_optimizer = [(n, param.clone().detach().to('cpu').requires_grad_()) \
for n, param in model.named_parameters()]
else:
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if n not in no_decay], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if n in no_decay], 'weight_decay_rate': 0.0}
]
optimizer = BertAdam(optimizer_grouped_parameters,
lr=args.learning_rate,
warmup=args.warmup_proportion,
t_total=num_train_steps)
global_step = 0
model_save_path = os.path.join(args.output_dir, "bert-finetuned.model")
tr_loss = None
if args.do_train:
if task_name.lower().startswith("anli") or task_name.lower().startswith("wsc"):
train_features = convert_examples_to_features_mc(
train_examples, label_list, args.max_seq_length, tokenizer)
else:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_steps)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=args.train_batch_size)
model.train()
for _ in trange(int(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
status_tqdm = tqdm(train_dataloader, desc="Iteration")
for step, batch in enumerate(status_tqdm):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss, _ = model(input_ids, segment_ids, input_mask, label_ids)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.fp16 and args.loss_scale != 1.0:
# rescale loss for fp16 training
# see https://docs.nvidia.com/deeplearning/sdk/mixed-precision-training/index.html
loss = loss * args.loss_scale
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
if (step + 1) % args.gradient_accumulation_steps == 0:
if args.fp16 or args.optimize_on_cpu:
if args.fp16 and args.loss_scale != 1.0:
# scale down gradients for fp16 training
for param in model.parameters():
param.grad.data = param.grad.data / args.loss_scale
is_nan = set_optimizer_params_grad(param_optimizer,
model.named_parameters(), test_nan=True)
if is_nan:
logger.info("FP16 TRAINING: Nan in gradients, reducing loss scaling")
args.loss_scale = args.loss_scale / 2
model.zero_grad()
continue
optimizer.step()
copy_optimizer_params_to_model(model.named_parameters(), param_optimizer)
else:
optimizer.step()
model.zero_grad()
global_step += 1
status_tqdm.set_description_str("Iteration / Training Loss: {}".format((tr_loss /
nb_tr_examples)))
torch.save(model, model_save_path)
if args.do_eval:
if args.do_predict and args.input_file_for_pred is not None:
eval_examples = processor.get_examples_from_file(args.input_file_for_pred)
else:
eval_examples = processor.get_dev_examples(args.data_dir)
if task_name.lower().startswith("anli") or task_name.lower().startswith("wsc"):
eval_features = convert_examples_to_features_mc(
eval_examples, label_list, args.max_seq_length, tokenizer)
else:
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
all_input_ids = torch.tensor([f.input_ids for f in eval_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in eval_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in eval_features], dtype=torch.long)
all_label_ids = torch.tensor([f.label_id for f in eval_features], dtype=torch.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
if args.local_rank == -1:
eval_sampler = SequentialSampler(eval_data)
else:
eval_sampler = DistributedSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler,
batch_size=args.eval_batch_size)
logger.info("***** Loading model from: {} *****".format(model_save_path))
model = torch.load(model_save_path)
model.eval()
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
eval_predictions = []
eval_pred_probs = []
logger.info("***** Predicting ... *****".format(model_save_path))
for input_ids, input_mask, segment_ids, label_ids in tqdm(eval_dataloader):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(input_ids, segment_ids, input_mask, label_ids)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_predictions.extend(np.argmax(logits, axis=1).tolist())
eval_pred_probs.extend([_compute_softmax(list(l)) for l in logits])
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss / nb_tr_steps if tr_loss is not None else 0.0
}
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results *****")
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
if task_name == "wsc":
pred_examples = list(TsvIO.read(args.input_file_for_pred))
else:
pred_examples = read_jsonl_lines(args.input_file_for_pred)
logger.info("***** Eval predictions *****")
for record, pred, probs in zip(pred_examples, eval_predictions, eval_pred_probs):
record['bert_prediction'] = pred
record['bert_correct'] = pred == (int(record[processor.label_field()]) - 1)
record['bert_pred_probs'] = probs
write_items([json.dumps(r) for r in pred_examples], args.output_file_for_pred)
if __name__ == "__main__":
main()
|
{"/examples/run_classifier.py": ["/pytorch_pretrained_bert/file_utils.py"], "/examples/test_data_processor.py": ["/examples/run_classifier.py"]}
|
1,384
|
rlebras/pytorch-pretrained-BERT
|
refs/heads/master
|
/pytorch_pretrained_bert/file_utils.py
|
"""
Utilities for working with the local dataset cache.
This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp
Copyright by the AllenNLP authors.
"""
import gzip
import csv
import os
import logging
import shutil
import tempfile
import json
from urllib.parse import urlparse
from pathlib import Path
from typing import Optional, Tuple, Union, IO, Callable, Set, List
from hashlib import sha256
from functools import wraps
from tqdm import tqdm
import boto3
from botocore.exceptions import ClientError
import requests
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE',
Path.home() / '.pytorch_pretrained_bert'))
def url_to_filename(url: str, etag: str = None) -> str:
"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""
url_bytes = url.encode('utf-8')
url_hash = sha256(url_bytes)
filename = url_hash.hexdigest()
if etag:
etag_bytes = etag.encode('utf-8')
etag_hash = sha256(etag_bytes)
filename += '.' + etag_hash.hexdigest()
return filename
def filename_to_url(filename: str, cache_dir: str = None) -> Tuple[str, str]:
"""
Return the url and etag (which may be ``None``) stored for `filename`.
Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
raise FileNotFoundError("file {} not found".format(cache_path))
meta_path = cache_path + '.json'
if not os.path.exists(meta_path):
raise FileNotFoundError("file {} not found".format(meta_path))
with open(meta_path) as meta_file:
metadata = json.load(meta_file)
url = metadata['url']
etag = metadata['etag']
return url, etag
def cached_path(url_or_filename: Union[str, Path], cache_dir: str = None) -> str:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https', 's3'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, cache_dir)
elif os.path.exists(url_or_filename):
# File, and it exists.
return url_or_filename
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
def split_s3_path(url: str) -> Tuple[str, str]:
"""Split a full s3 path into the bucket name and path."""
parsed = urlparse(url)
if not parsed.netloc or not parsed.path:
raise ValueError("bad s3 path {}".format(url))
bucket_name = parsed.netloc
s3_path = parsed.path
# Remove '/' at beginning of path.
if s3_path.startswith("/"):
s3_path = s3_path[1:]
return bucket_name, s3_path
def s3_request(func: Callable):
"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""
@wraps(func)
def wrapper(url: str, *args, **kwargs):
try:
return func(url, *args, **kwargs)
except ClientError as exc:
if int(exc.response["Error"]["Code"]) == 404:
raise FileNotFoundError("file {} not found".format(url))
else:
raise
return wrapper
@s3_request
def s3_etag(url: str) -> Optional[str]:
"""Check ETag on S3 object."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_object = s3_resource.Object(bucket_name, s3_path)
return s3_object.e_tag
@s3_request
def s3_get(url: str, temp_file: IO) -> None:
"""Pull a file directly from S3."""
s3_resource = boto3.resource("s3")
bucket_name, s3_path = split_s3_path(url)
s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file)
def http_get(url: str, temp_file: IO) -> None:
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = tqdm(unit="B", total=total)
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
def get_from_cache(url: str, cache_dir: str = None) -> str:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
if cache_dir is None:
cache_dir = PYTORCH_PRETRAINED_BERT_CACHE
os.makedirs(cache_dir, exist_ok=True)
# Get eTag to add to filename, if it exists.
if url.startswith("s3://"):
etag = s3_etag(url)
else:
response = requests.head(url, allow_redirects=True)
if response.status_code != 200:
raise IOError("HEAD request failed for url {} with status code {}"
.format(url, response.status_code))
etag = response.headers.get("ETag")
filename = url_to_filename(url, etag)
# get cache path to put the file
cache_path = os.path.join(cache_dir, filename)
if not os.path.exists(cache_path):
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with tempfile.NamedTemporaryFile() as temp_file:
logger.info("%s not found in cache, downloading to %s", url, temp_file.name)
# GET file object
if url.startswith("s3://"):
s3_get(url, temp_file)
else:
http_get(url, temp_file)
# we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)
logger.info("copying %s to cache at %s", temp_file.name, cache_path)
with open(cache_path, 'wb') as cache_file:
shutil.copyfileobj(temp_file, cache_file)
logger.info("creating metadata file for %s", cache_path)
meta = {'url': url, 'etag': etag}
meta_path = cache_path + '.json'
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
logger.info("removing temp file %s", temp_file.name)
return cache_path
def read_set_from_file(filename: str) -> Set[str]:
'''
Extract a de-duped collection (set) of text from a file.
Expected file format is one item per line.
'''
collection = set()
with open(filename, 'r') as file_:
for line in file_:
collection.add(line.rstrip())
return collection
def get_file_extension(path: str, dot=True, lower: bool = True):
ext = os.path.splitext(path)[1]
ext = ext if dot else ext[1:]
return ext.lower() if lower else ext
def read_jsonl_lines(input_file: str) -> List[dict]:
with open(input_file) as f:
lines = f.readlines()
return [json.loads(l.strip()) for l in lines]
def write_items(items: List[str], output_file):
with open(output_file, 'w') as f:
for concept in items:
f.write(concept + "\n")
f.close()
class TsvIO(object):
@staticmethod
def read(filename, known_schema=None, sep="\t", gzipped=False, source=None):
"""
Read a TSV file with schema in the first line.
:param filename: TSV formatted file
:param first_line_schema: True if the first line is known to contain the schema of the
tsv file. False by default.
:param sep: Separator used in the file. Default is '\t`
:return: A list of data records where each record is a dict. The keys of the dict
correspond to the column name defined in the schema.
"""
first = True
if gzipped:
fn = gzip.open
else:
fn = open
line_num = 0
with fn(filename, 'rt') as f:
for line in f:
if first and known_schema is None:
first = False
known_schema = line.split(sep)
known_schema = [s.strip() for s in known_schema]
else:
line_num += 1
data_fields = line.split(sep)
data = {k.strip(): v.strip() for k, v in zip(known_schema, data_fields)}
data['source'] = filename if source is None else source
data['line_num'] = line_num
yield data
f.close()
@staticmethod
def make_str(item, sub_sep="\t"):
if isinstance(item, list):
return sub_sep.join([TsvIO.make_str(i) for i in item])
else:
return str(item)
@staticmethod
def write(records: List[dict], filename, schema=None, sep='\t', append=False, sub_sep=';'):
"""
Write a TSV formatted file with the provided schema
:param records: List of records to be written to the file
populated
:param filename: Output filename
:param schema: Order in which fields from the Sentence object will be written
:param sep: Separator used in the file. Default is '\t`
:param append: Whether to use append mode or write a new file
:param sub_sep: If a field contains a list of items in JSON, this seperator will be used
to separate values in the list
:return:
"""
mode = 'a' if append else 'w'
if sep == "\t":
with open(filename, mode) as f:
if schema is not None and not append:
f.write(sep.join(schema) + "\n")
for record in records:
f.write(sep.join([TsvIO.make_str(record.__getitem__(field), sub_sep=sub_sep) for
field in schema]))
f.write('\n')
f.close()
elif sep == ",":
with open(filename, mode) as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=schema)
writer.writeheader()
for record in records:
writer.writerow(record)
csvfile.close()
|
{"/examples/run_classifier.py": ["/pytorch_pretrained_bert/file_utils.py"], "/examples/test_data_processor.py": ["/examples/run_classifier.py"]}
|
1,385
|
rlebras/pytorch-pretrained-BERT
|
refs/heads/master
|
/examples/test_data_processor.py
|
from examples.run_classifier import AnliWithCSKProcessor, convert_examples_to_features_mc
from pytorch_pretrained_bert import BertTokenizer
dir = "../../abductive-nli/data/abductive_nli/one2one-correspondence/anli_with_csk/"
processor = AnliWithCSKProcessor()
examples = processor.get_train_examples(dir)
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
label_list = processor.get_labels()
max_seq_length = 128
features = convert_examples_to_features_mc(examples, label_list, max_seq_length, tokenizer)
print("OK")
|
{"/examples/run_classifier.py": ["/pytorch_pretrained_bert/file_utils.py"], "/examples/test_data_processor.py": ["/examples/run_classifier.py"]}
|
1,391
|
RoboBrainCode/Backend
|
refs/heads/master
|
/feed/views.py
|
from django.http import HttpResponse
from feed.models import BrainFeeds, ViewerFeed, GraphFeedback
import json
import numpy as np
from django.core import serializers
import dateutil.parser
from django.views.decorators.csrf import ensure_csrf_cookie
from django.db.transaction import commit_on_success
# This is a temporary function. It will be later moved to learning_plugins
def save_graph_feedback(request):
_id_node = (request.GET.get('id','-1')) # default k=10
_feedback_type = request.GET.get('feedback_type','')
_node_handle = request.GET.get('node_handle','')
_action_type = request.GET.get('action_type','')
graph_feedback = GraphFeedback(
id_node = _id_node,
feedback_type = _feedback_type,
node_handle = _node_handle,
action_type = _action_type
)
graph_feedback.save()
return HttpResponse(json.dumps(graph_feedback.to_json()), content_type="application/json")
# Returns k most recent feeds from BrainFeed table.
def return_top_k_feeds(request):
# Number of feeds required
top_k = int(request.GET.get('k','10')) # default k=10
max_len = ViewerFeed.objects.count()
upper_limit = min(max_len, top_k)
feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[:upper_limit])
brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)
# Reordering brainfeeds from the DB in order of feed_ids in O(n)
# s.t. feed_ids == [bf.id for bf in brainfeeds]
feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}
brainfeeds = [0] * len(feed_ids)
for bf in list(brainfeeds_db):
brainfeeds[feed_map_order[bf.id]] = bf
# Deleting entries from brainfeeds where brainfeeds == 0
delete_entries = []
for bf in brainfeeds:
if bf == 0:
delete_entries.append(0)
for bf in delete_entries:
brainfeeds.remove(bf)
update_scores_top_k(brainfeeds)
json_feeds = [feed.to_json() for feed in brainfeeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# This function allows infinite scrolling.
def infinite_scrolling(request):
# Feeds already present
current_feeds = int(request.GET.get('cur','10')) # default cur=10
# Number of extra feeds required
extra_feeds = int(request.GET.get('k','10')) # default k=10
max_len = ViewerFeed.objects.count()
upper_limit = min(max_len, current_feeds + extra_feeds)
feed_ids = list(ViewerFeed.objects.values_list('feedid', flat=True).order_by('id')[current_feeds:upper_limit])
brainfeeds_db = BrainFeeds.objects.filter(id__in=feed_ids)
# Reordering brainfeeds from the DB in order of feed_ids in O(n)
# s.t. feed_ids == [bf.id for bf in brainfeeds]
feed_map_order = {feed_ids[i] : i for i in xrange(len(feed_ids))}
brainfeeds = [0] * len(feed_ids)
for bf in list(brainfeeds_db):
brainfeeds[feed_map_order[bf.id]] = bf
# Deleting entries from brainfeeds where brainfeeds == 0
delete_entries = []
for bf in brainfeeds:
if bf == 0:
delete_entries.append(0)
for bf in delete_entries:
brainfeeds.remove(bf)
update_scores_scroll(brainfeeds, current_feeds, extra_feeds)
json_feeds = [feed.to_json() for feed in brainfeeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
@commit_on_success
def update_scores_top_k(brainfeeds):
for feeds in brainfeeds:
feeds.update_score = True
feeds.log_normalized_feed_show += 1.0
feeds.save()
@commit_on_success
def update_scores_scroll(brainfeeds, current_feeds, extra_feeds):
page_number = current_feeds/max(1.0,extra_feeds) + 1.0
for feeds in brainfeeds:
feeds.update_score = True
feeds.log_normalized_feed_show += np.log10(1.0+page_number)
feeds.save()
# Filters feeds using the hash word
def filter_feeds_with_hashtags(request):
hashword = request.GET.get('hashword')
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not hashword:
error_response = {
'Error': 'hashword not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(hashtags__contains=hashword).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Filters feeds with types
def filter_feeds_with_type(request):
feedtype = request.GET.get('type')
print(feedtype)
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not feedtype:
error_response = {
'Error': 'type not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(source_text=feedtype).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Return feeds created after datetime. Input time should be in ISO string format. It is them parsed to UTC format
def return_feeds_since(request):
time_since = dateutil.parser.parse(request.GET.get('datetime'))
# Number of extra feeds required
k = int(request.GET.get('k','10')) # default k=10
if not time_since:
error_response = {
'Error': 'time_since not provided.'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feeds = BrainFeeds.objects.filter(toshow=True).filter(created_at__gte=time_since).order_by('-created_at')[:k]
json_feeds = [feed.to_json() for feed in brain_feeds]
return HttpResponse(json.dumps(json_feeds), content_type="application/json")
# Records upvotes for a feed
@ensure_csrf_cookie
def upvotes_recorder(request):
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
payload = json.loads(request.body)
feedid = payload['feedid']
vote_dir = payload['vote']
change = payload['change']
if not feedid:
error_response = {
'Error': 'No feedid provided'
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
if not vote_dir == -1 and not vote_dir == 1:
error_response = {
'Error': 'voteid {0} not provided. Can only be 1 or -1'.format(vote_dir)
}
return HttpResponse(json.dumps(error_response), content_type='application/json')
brain_feed = BrainFeeds.objects.get(id=feedid)
votes = {}
if vote_dir == 1:
brain_feed.upvotes += 1
if change:
brain_feed.downvotes -= 1
if vote_dir == -1:
brain_feed.downvotes += 1
if change:
brain_feed.upvotes -= 1
votes = {
'upvotes': max(brain_feed.upvotes, 0),
'downvotes': max(brain_feed.downvotes, 0)
}
brain_feed.save()
return HttpResponse(json.dumps(votes), content_type='application/json')
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,392
|
RoboBrainCode/Backend
|
refs/heads/master
|
/rest_api/serializer.py
|
from django.forms import widgets
from rest_framework import serializers
from feed.models import JsonFeeds
from djangotoolbox.fields import ListField
import drf_compound_fields.fields as drf
from datetime import datetime
class TagFieldS(serializers.Serializer):
media = serializers.CharField(required=False)
class FeedSerializer(serializers.Serializer):
pk = serializers.Field() # Note: `Field` is an untyped read-only field.
feedtype = serializers.CharField(required=False)
text = serializers.CharField(required=False)
source_text = serializers.CharField(required=False)
source_url = serializers.CharField(required=False)
hashtags = serializers.CharField(required=False)
created_at = serializers.DateTimeField(required=False)
upvotes = serializers.IntegerField(required=False)
media = drf.ListField(serializers.CharField(),required=False)# serializers.CharField(required=False,many=True)
mediamap = drf.ListField(serializers.CharField(),required=False)
mediatype = drf.ListField(serializers.CharField(),required=False)
keywords = drf.ListField(serializers.CharField(),required=False)
graphStructure = drf.ListField(serializers.CharField(),required=False)
mediashow = drf.ListField(serializers.CharField(),required=False)
username = serializers.CharField(required=False)
def restore_object(self, attrs, instance=None):
"""
Create or update a new snippet instance, given a dictionary
of deserialized field values.
Note that if we don't define this method, then deserializing
data will simply return a dictionary of items.
"""
if instance:
# Update existing instance
#instance.feedtype = attrs.get('feedtype', instance.feedtype)
#instance.code = attrs.get('code', instance.code)
#instance.linenos = attrs.get('linenos', instance.linenos)
#instance.language = attrs.get('language', instance.language)
#instance.style = attrs.get('style', instance.style)
return instance
# Create new instance
attrs['created_at']=datetime.now()
return JsonFeeds(**attrs)
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,393
|
RoboBrainCode/Backend
|
refs/heads/master
|
/auth/auth.py
|
from django.http import HttpResponse
import json
from django.contrib.auth.models import User
from django.views.decorators.csrf import ensure_csrf_cookie
from django import forms
from django.contrib.auth import login, logout
from django.contrib.auth import authenticate
from base64 import b64decode
@ensure_csrf_cookie
def create_user_rb(request):
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
payload = json.loads(request.body)
username = payload['username']
email = payload['email']
password = payload['password']
if email and User.objects.filter(email=email).exclude(username=username).count():
return HttpResponse('This email address is already in use! Try logging in.', status=401)
if email and User.objects.filter(email=email, username=username).count():
return HttpResponse('This account already exists! Try logging in.', status=401)
user = User.objects.create_user(username, email, password)
user.save()
return HttpResponse('Ok')
@ensure_csrf_cookie
def login_rb(request):
if request.user.is_authenticated():
user = request.user
user_data = {
'id': user.id,
'username': user.username,
'email': user.email,
'loggedin': 'True'
};
return HttpResponse(json.dumps(user_data), content_type='application/json')
if request.method == 'GET':
return HttpResponse('Ok')
elif request.method == 'POST':
decodedCredentials = b64decode(request.body)
if not ':' in decodedCredentials:
return HttpResponse('Not logged in', status=401)
email, password = decodedCredentials.split(':')
user = authenticateEmail(email, password)
if not user:
return HttpResponse('Invalid Credentials', status=401)
user = authenticate(username=user.username, password=password)
if not user:
return HttpResponse('Invalid Credentials', status=401)
login(request, user)
user_data = {
'id': user.id,
'username': user.username,
'email': user.email
};
return HttpResponse(json.dumps(user_data), content_type='application/json')
def authenticateEmail(email=None, password=None):
try:
user = User.objects.get(email=email)
if user.check_password(password):
return user
except User.DoesNotExist:
return None
def logout_rb(request):
logout(request)
return HttpResponse('Logged Out')
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,394
|
RoboBrainCode/Backend
|
refs/heads/master
|
/UpdateViewerFeeds/updateViewerFeed.py
|
import ConfigParser
import pymongo as pm
from datetime import datetime
import numpy as np
import importlib
import sys
sys.path.insert(0,'/var/www/Backend/Backend/')
def readConfigFile():
"""
Reading the setting file to use.
Different setting files are used on Production and Test robo brain
"""
global setfile
config = ConfigParser.ConfigParser()
config.read('/tmp/backend_uwsgi_setting')
env = config.get('uwsgi','env')
setting_file_name = env.strip().split('.')[1]
setfile = importlib.import_module(setting_file_name)
def establishConnection():
"""
Establishes connection to remote db
"""
global brain_feeds, viewer_feeds
client = pm.MongoClient(host,port)
db = client[dbname]
brain_feeds = db['brain_feeds']
viewer_feeds = db['viewer_feeds']
def viewerFeedsUpdate():
"""
Sorts Brain Feeds on Basis of score and pushes them to ViewerFeeds table
"""
feeds_ordered = brain_feeds.find().sort('score',pm.DESCENDING)
overall_counter = 0
feeds_to_push = []
first_time = True
for feeds in feeds_ordered:
try:
new_feed = {}
new_feed['_id'] = overall_counter
new_feed['feedid'] = feeds['_id'].__str__()
feeds_to_push.append(new_feed)
overall_counter += 1
print "{0} {1} {2}".format(overall_counter,feeds['score'],feeds['source_url'])
if overall_counter % 100 == 0:
if first_time:
viewer_feeds.drop()
first_time = False
viewer_feeds.insert(feeds_to_push)
feeds_to_push = []
except:
print "**************skipping*************"
def viewerFeedsUpdate_deprecated():
"""
DEPRECATED
Equally represent each project
"""
different_projects = brain_feeds.distinct('source_url')
different_projects = sorted(different_projects,key=len)
feeds_each_project = {}
feeds_count = {}
for url in different_projects:
feeds_each_project[url] = brain_feeds.find({'source_url':url},{'created_at':1}).sort('created_at',pm.DESCENDING)
feeds_count[url] = feeds_each_project[url].count()
feeds_to_push = []
overall_counter = 0
level = 0
first_time = True
while True:
toBreak = True
remaining_projects = []
for url in different_projects:
if feeds_count[url] > level:
print url
new_feed = {}
new_feed['_id'] = overall_counter
new_feed['feedid'] = feeds_each_project[url][level]['_id'].__str__()
feeds_to_push.append(new_feed)
overall_counter += 1
remaining_projects.append(url)
toBreak = False
if overall_counter % 100 == 0:
if first_time:
viewer_feeds.drop()
first_time = False
viewer_feeds.insert(feeds_to_push)
feeds_to_push = []
different_projects = remaining_projects
if toBreak:
break
level += 1
if __name__=="__main__":
global host, dbname, port, setfile, brain_feeds, viewer_feeds
# Reading the setting file for db address
readConfigFile()
host = setfile.DATABASES['default']['HOST']
dbname = setfile.DATABASES['default']['NAME']
port = int(setfile.DATABASES['default']['PORT'])
# Extablishing connection to remote db
establishConnection()
viewerFeedsUpdate()
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,395
|
RoboBrainCode/Backend
|
refs/heads/master
|
/rest_api/views.py
|
# Create your views here.
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from feed.models import JsonFeeds
from rest_api.serializer import FeedSerializer
from datetime import datetime
from rest_framework import permissions
@api_view(['GET', 'POST'])
def feed_list(request):
#List all snippets, or create a new snippet.
if request.method == 'GET':
feeds = JsonFeeds.objects.all()[:25]
serializer = FeedSerializer(feeds, many=True)
return Response(serializer.data)
elif request.method == 'POST':
serializer = FeedSerializer(data=request.DATA)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,396
|
RoboBrainCode/Backend
|
refs/heads/master
|
/feed/models.py
|
from django.db import models
from djangotoolbox.fields import ListField
from datetime import datetime
from django.db.models.signals import post_save
from queue_util import add_feed_to_queue
#from feed.models import BrainFeeds
class GraphFeedback(models.Model):
id_node = models.TextField()
feedback_type = models.TextField()
node_handle = models.TextField()
action_type = models.TextField()
def to_json(self):
return {"_id":self.id,
"id_node":self.id_node,
"feedback_type":self.feedback_type,
"node_handle":self.node_handle,
"action_type":self.action_type
}
class Meta:
db_table = "graph_feedback"
class BrainFeeds(models.Model):
toshow = models.BooleanField(default=True)
feedtype = models.TextField() #originally feedtype -> type
text = models.TextField()
source_text = models.TextField()
source_url = models.TextField(db_index=True)
meta = {'indexes':['source_url']}
media = ListField()
mediatype = ListField()
created_at = models.DateTimeField(default=datetime.now())
hashtags = models.TextField(db_index=True)
meta = {'indexes':['hashtags']}
upvotes = models.IntegerField(default=0)
downvotes = models.IntegerField(default=0)
jsonfeed_id = models.TextField()
username = models.TextField()
score = models.FloatField(default=0.0,db_index=True)
meta = {'indexes':['score']}
update_score = models.BooleanField(default=True,db_index=True)
meta = {'indexes':['update_score']}
log_normalized_feed_show = models.FloatField(default=1.0)
def to_json(self):
return {"_id":self.id,
"toshow":self.toshow,
"feedtype":self.feedtype,
"text":self.text,
"source_text":self.source_text,
"source_url":self.source_url,
"media":self.media,
"mediatype":self.mediatype,
"created_at":self.created_at.isoformat(),
"hashtags":self.hashtags,
"upvotes":self.upvotes,
"downvotes":self.downvotes,
"jsonfeed_id":self.jsonfeed_id,
"username":self.username,
"score":self.score,
"log_normalized_feed_show":self.log_normalized_feed_show,
"update_score":self.update_score
}
class Meta:
db_table = 'brain_feeds'
get_latest_by = 'created_at'
class JsonFeeds(models.Model):
feedtype = models.TextField() #originally feedtype -> type
text = models.TextField()
source_text = models.TextField()
source_url = models.TextField()
mediashow = ListField()
media = ListField()
mediatype = ListField()
mediamap = ListField()
keywords = ListField()
graphStructure = ListField()
created_at = models.DateTimeField()
hashtags = models.TextField(default=datetime.now, blank=True)
meta = {'indexes':['hashtags']}
upvotes = models.IntegerField(default=0)
downvotes = models.IntegerField(default=0)
username = models.TextField()
def to_json(self):
return {"_id":self.id,
"feedtype":self.feedtype,
"text":self.text,
"source_text":self.source_text,
"source_url":self.source_url,
"mediashow":self.mediashow,
"media":self.media,
"mediatype":self.mediatype,
"mediamap":self.mediamap,
"keywords":self.keywords,
"graphStructure":self.graphStructure,
"created_at":self.created_at.isoformat(),
"hashtags":self.hashtags,
"upvotes":self.upvotes,
"downvotes":self.downvotes,
"username":self.username
}
class Meta:
db_table = 'json_feeds'
def postSaveJson(**kwargs):
instance = kwargs.get('instance')
print "Post Saving JsonFeed: ", instance.to_json()
add_feed_to_queue(instance.to_json())
#Saving JsonFeed to BrainFeed
brain_feed = BrainFeeds(
feedtype=instance.feedtype,
text=instance.text,
source_text=instance.source_text,
source_url=instance.source_url,
hashtags=instance.hashtags,
jsonfeed_id=instance.id,
username=instance.username
)
media = []
mediatype = []
for mediashow,_media,_mediatype in zip(instance.mediashow,instance.media,instance.mediatype):
if mediashow.lower() == 'true':
media.append(_media)
mediatype.append(_mediatype)
brain_feed.media = media
brain_feed.mediatype = mediatype
brain_feed.save()
#Saving viewer feed
"""
numitem = ViewerFeed.objects.all().count()
viewer_feed = ViewerFeed(
id = numitem,
feedid = brain_feed.id
)
viewer_feed.save()
"""
#Saving JsonFeed to GraphDB
post_save.connect(postSaveJson, JsonFeeds)
class ViewerFeed(models.Model):
feedid = models.TextField()
id = models.IntegerField(db_index=True,primary_key=True)
meta = {'indexes':['id']}
def to_json(self):
return {"_id":self.id,"id":self.id,"feedid":self.feedid}
class Meta:
db_table = 'viewer_feeds'
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,397
|
RoboBrainCode/Backend
|
refs/heads/master
|
/feed/urls.py
|
from django.conf.urls import patterns, url
from feed import views
urlpatterns = patterns('',
url(r'most_recent/', views.return_top_k_feeds, name='most_recent'),
url(r'infinite_scroll/', views.infinite_scrolling, name='infinite_scrolling'),
url(r'filter/', views.filter_feeds_with_hashtags, name='filter'),
url(r'filter_type/', views.filter_feeds_with_type, name='filter_type'),
url(r'since/', views.return_feeds_since, name='since'),
url(r'upvotes/', views.upvotes_recorder, name='upvotes'),
url(r'graph_feedback/', views.save_graph_feedback, name='graph_feedback'),
)
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,398
|
RoboBrainCode/Backend
|
refs/heads/master
|
/auth/urls.py
|
from django.conf.urls import patterns, url
import auth
urlpatterns = patterns('',
url(r'create_user/', auth.create_user_rb, name='create_user'),
url(r'login/', auth.login_rb, name='login'),
url(r'logout/', auth.logout_rb, name='logout')
)
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,399
|
RoboBrainCode/Backend
|
refs/heads/master
|
/fabfile.py
|
from __future__ import with_statement
from fabric.api import cd, env, local, settings, run, sudo
from fabric.colors import green, red
from fabric.contrib.console import confirm
def prod_deploy(user='ubuntu'):
print(red('Deploying to production at robobrain.me...'))
if not confirm('Are you sure you want to deploy to production?'):
print(red('Aborting deploy.'))
env.host_string = '54.149.21.165'
env.key_filename = 'conf/www.pem'
env.user = user
env.shell = '/bin/zsh -l -c'
with cd('/var/www/Backend'):
# sudo('su - ubuntu')
print(green('Checking out test...'))
run('git checkout test')
print(green('Pulling latest version of test...'))
run('git pull origin test')
print(green('Checking out production...'))
run('git checkout production')
print(green('Rebasing onto test...'))
run('git rebase test')
print(green('Pushing production upstream...'))
run('git push origin production')
print(green('Reloading server...'))
sudo('uwsgi --reload /tmp/robobrain-master.pid')
print(red('Done!'))
def test_deploy(user='ubuntu'):
env.host_string = '54.148.225.192'
env.key_filename = 'conf/www.pem'
env.user = user
env.shell = '/bin/zsh -l -c'
print(red('Deploying to test at test.robobrain.me...'))
with cd('/var/www/Backend'):
print(green('Checking out master...'))
run('git checkout master')
print(green('Pulling latest version of master...'))
run('git pull origin master')
print(green('Checking out test...'))
run('git checkout test')
print(green('Rebasing onto master...'))
run('git rebase master')
print(green('Pulling latest version of test...'))
run('git pull origin test')
print(green('Push the latest version of test...'))
run('git push origin test')
print(green('Reloading server...'))
sudo('uwsgi --reload /tmp/robobrain-master.pid')
print(red('Done!'))
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,400
|
RoboBrainCode/Backend
|
refs/heads/master
|
/rest_api/urls.py
|
from django.conf.urls import patterns, url
from rest_framework.urlpatterns import format_suffix_patterns
urlpatterns = patterns('rest_api.views',
url(r'^feeds/$', 'feed_list'),
#url(r'^snippets/(?P<pk>[0-9]+)$', 'snippet_detail'),
)
urlpatterns = format_suffix_patterns(urlpatterns)
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,401
|
RoboBrainCode/Backend
|
refs/heads/master
|
/feed/queue_util.py
|
#!/usr/bin/python
import boto
import json
import traceback
from boto.sqs.message import RawMessage
from bson import json_util
conn = boto.sqs.connect_to_region(
"us-west-2",
aws_access_key_id='AKIAIDKZIEN24AUR7CJA',
aws_secret_access_key='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')
feed_queue = conn.create_queue('feed_queue')
def add_feed_to_queue(json_feed):
m = RawMessage()
try:
m.set_body(json.dumps(json_feed, default=json_util.default))
feed_queue.write(m)
except Exception, e:
print traceback.format_exc()
print json_feed
if __name__ == '__main__':
add_feed_to_queue({
"username" : "arzav",
"_id": "546e6a2f5caae434656bbc36",
"feedtype" : "",
"mediashow" : [ ],
"text" : "#Simhat_Torah is a synonym of #Rejoicing_in_the_Law",
"hashtags" : " simhat_torah rejoicing_in_the_law",
"mediatype" : [ ],
"source_url" : "http://wordnet.princeton.edu/",
"source_text" : "WordNet",
"mediamap" : [ ],
"media" : [ ],
"keywords": ["Simhat_Torah","Rejoicing_in_the_Law","synonym","wordnet"],
"upvotes" : 0,
"graphStructure": ["#same_synset: #0 -> #1", "#same_synset: #1 -> #0"]})
|
{"/feed/views.py": ["/feed/models.py"], "/rest_api/serializer.py": ["/feed/models.py"], "/rest_api/views.py": ["/feed/models.py", "/rest_api/serializer.py"]}
|
1,410
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/web_scrap/raps_org.py
|
from settings import GOOGLE_DRIVER, DATA_PATH
import time
def WebScrap():
print("Raps webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://www.raps.org/news-and-articles/news-articles/2020/3/covid-19-vaccine-tracker')
table = driver.find_element_by_id("vax_wrapper")
table.find_element_by_name("vax_length").send_keys("100")
rows = table.find_element_by_class_name("dataTable").find_elements_by_tag_name("tr")
tableData = []
isColumn = True
for row in rows:
rowData = []
colTag = "td"
if isColumn:
isColumn = False
colTag = "th"
colFirst = True
for col in row.find_elements_by_tag_name(colTag):
if colFirst:
colFirst = False
continue
rowData.append(col.text.encode('utf-8').decode('utf-8'))
tableData.append(rowData)
import csv
with open(DATA_PATH + r'/raps.org.tabledata.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(tableData)
time.sleep(60 * 1)
print("Raps webscrap: Completed...")
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,411
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/web_scrap/airtable_com.py
|
import time
from settings import GOOGLE_DRIVER
def WebScrap():
print("Airtable webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://airtable.com/shrSAi6t5WFwqo3GM/tblEzPQS5fnc0FHYR/viweyymxOAtNvo7yH?blocks=bipZFzhJ7wHPv7x9z')
table = driver.find_element_by_id("table")
table.find_element_by_class_name('viewConfigContainer').find_element_by_class_name('link-quiet').click()
time.sleep(5)
table.find_element_by_class_name('viewSwitcherContainer').find_elements_by_tag_name('li')[2].click()
time.sleep(5)
viewMenuPopover = table.find_elements_by_class_name("viewMenuPopover")[0]
viewMenuPopover.click()
time.sleep(3)
viewMenuPopover.find_element_by_class_name("menu").find_element_by_tag_name("li").click()
time.sleep(60 * 1)
print("Airtable webscrap: Completed...")
# References
# https://medium.com/@moungpeter/how-to-automate-downloading-files-using-python-selenium-and-headless-chrome-9014f0cdd196
# https://www.programcreek.com/python/example/100025/selenium.webdriver.ChromeOptions
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,412
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/web_scrap/who_int.py
|
from settings import GOOGLE_DRIVER, DATA_PATH
import time
def WebScrap():
print("WHO webscrap: Started...")
driver = GOOGLE_DRIVER
driver.get('https://www.who.int/publications/m/item/draft-landscape-of-covid-19-candidate-vaccines')
body = driver.find_element_by_tag_name("body")
body.find_element_by_class_name('button-blue-background').click()
time.sleep(60 * 1)
print("WHO webscrap: Completed...")
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,413
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/main_exce.py
|
from web_scrap import airtable_com, raps_org, who_int
import time
airtable_com.WebScrap()
raps_org.WebScrap()
who_int.WebScrap()
print("Sleep for 1 min")
time.sleep(60 * 1)
from preprocess_data import pdf_read_table,airtable
pdf_read_table.TransformPDFData()
airtable.PreProcessAirtableData()
print("Sleep for 1 min")
time.sleep(60 * 1)
import googleDb
googleDb.MainGSheetUpdate()
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,414
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/googleDb.py
|
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import time
from settings import GSHEET_CRED_FILE, GSHEET_SCOPE, GSHEET_FILE, GSHEET_WORKSHEET
from settings import WHO_INPUT_DATA, RAPS_INPUT_DATA, AIRTABLE_INPUT_DATA
from settings import VT_CORPS
import get_cosine.get_cosine
# use creds to create a client to interact with the Google Drive API
creds = ServiceAccountCredentials.from_json_keyfile_name(GSHEET_CRED_FILE, GSHEET_SCOPE)
client = gspread.authorize(creds)
# Find a workbook by name and open the first sheet
# Make sure you use the right name here.
sheet = client.open(GSHEET_FILE).get_worksheet(GSHEET_WORKSHEET)
# Extract and print all of the values
list_of_hashes = sheet.get_all_records()
def GetDataFromFile(file, separator):
with open(file) as who_file:
file_data = who_file.readlines()
for index in range(len(file_data)):
file_data[index] = file_data[index].split(separator)
return file_data
def GetRow(data, matchString, col):
perfactMatch = None
perfactMatchPer = 0
for row in data:
# try:
# print(row[col] , matchString)
cosineSim = get_cosine.get_cosine.GetCosineSim([row[col], matchString])
if cosineSim > 0.70:
if perfactMatchPer < cosineSim:
perfactMatch = row
perfactMatchPer = cosineSim
# if row[col] == matchString:
# return row
# except:
# print("Error:", row)
# print(perfactMatch, perfactMatchPer, cosineSim)
return perfactMatch, perfactMatchPer
def UpdateGoogleSheet(settings, data, gSheet):
sheetCol = settings["sheetCol"]
dataCol = settings["dataCol"]
currentSheetRow = settings["currentSheetRow"]
updateSheetCol = settings["updateSheetCol"]
dataColForUpdate = settings["dataColForUpdate"]
currentIndex = 0
for sheetRow in gSheet.get_all_values():
try:
foundRow, foundRowMatchPer = GetRow(data, sheetRow[sheetCol], dataCol)
# print(foundRowMatchPer, sheetRow[sheetCol], foundRow)
if foundRow:
gSheet.update_cell(currentSheetRow, updateSheetCol, foundRow[dataColForUpdate])
gSheet.update_cell(currentSheetRow, updateSheetCol + 1, foundRowMatchPer)
time.sleep(3)
except:
print(currentSheetRow, updateSheetCol, dataColForUpdate, foundRow)
currentSheetRow += 1
currentIndex += 1
print("WHO data loading start...")
whoData = GetDataFromFile(WHO_INPUT_DATA, "|")
print("WHO data loading complete...")
print("RAPS data loading start...")
rapsData = GetDataFromFile(RAPS_INPUT_DATA, "|")
print("RAPS data loading complete...")
print("AirTable data loading start...")
airTableData = GetDataFromFile(AIRTABLE_INPUT_DATA, "|")
print("AirTable data loading complete...")
time.sleep(10)
whoSettings = {
'sheetCol': 2,
'dataCol': 2,
'currentSheetRow': 1,
'updateSheetCol': 8,
'dataColForUpdate': 4
}
rapsSettings = {
'sheetCol': 3,
'dataCol': 1,
'currentSheetRow': 1,
'updateSheetCol': 10,
'dataColForUpdate': 2
}
airTableSettings = {
'sheetCol': 1,
'dataCol': 0,
'currentSheetRow': 1,
'updateSheetCol': 6,
'dataColForUpdate': 3
}
print("Updating GSheet for WHO...")
UpdateGoogleSheet(whoSettings, whoData, sheet)
print("Updating GSheet for WHO Completed...")
time.sleep(10)
print("Updating GSheet for RAPS...")
UpdateGoogleSheet(rapsSettings, rapsData, sheet)
print("Updating GSheet for RAPS Completed...")
time.sleep(10)
print("Updating GSheet for AirTable...")
UpdateGoogleSheet(airTableSettings, airTableData, sheet)
print("Updating GSheet for AirTable Completed...")
time.sleep(10)
def GetPhaseCorp():
with open(VT_CORPS, 'r') as file:
data = file.readlines()
phase = {}
for row in data:
col = row.split(':')
phase[col[0]] = col[1].split(',')
return phase
def GetStagePhase(stage):
stage = stage.lower().replace(' ', '')
findStageIn = []
for key in phase:
for p in phase[key]:
if p.lower().replace(' ', '') in stage:
findStageIn.append(key)
findStageIn = sorted(list(set(findStageIn)), reverse=True)
if len(findStageIn) > 0:
return findStageIn[0]
return '0'
def GetFinalPhase(all_stage):
initLen = len(all_stage)
final_stage = dict()
final_stage_result = "Not Sure"
for d in all_stage:
if d not in final_stage:
final_stage[d] = 1
else:
final_stage[d] += 1
if len(final_stage) == initLen:
final_stage_result = "Not Sure"
final_stage = sorted(final_stage.items(), key=lambda x: x[1], reverse=True)
if len(final_stage):
final_stage_result = final_stage[0][0]
if final_stage_result == '0':
final_stage_result = "Not Sure"
return final_stage_result
def UpdateGoogleSheetFinalStage(gSheet):
currentSheetRow = 2
updateSheetCol = 15
index = 0
for sheetRow in gSheet.get_all_values():
if index == 0:
index = 1
continue
WHOStage = GetStagePhase(sheetRow[7])
RAPSStage = GetStagePhase(sheetRow[9])
AIRTableStage = GetStagePhase(sheetRow[5])
finalStage = GetFinalPhase([WHOStage, RAPSStage, AIRTableStage])
gSheet.update_cell(currentSheetRow, updateSheetCol, finalStage)
currentSheetRow += 1
time.sleep(3)
phase = dict(GetPhaseCorp())
def MainGSheetUpdate():
print("Updating GSheet for Final Stage...")
UpdateGoogleSheetFinalStage(sheet)
print("Updating GSheet for Final Stage Completed...")
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,415
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/settings.py
|
import sys
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\web_scrap_data')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\get_cosine')
sys.path.append(r'C:\Users\v-shvi\Desktop\Personal\VT\data_engine\preprocess_data')
ROOT_PATH = "../"
DATA_PATH = "data"
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("--headless")
chrome_options.add_argument("--window-size=1920x1080")
chrome_options.add_argument("--disable-notifications")
chrome_options.add_argument('--no-sandbox')
chrome_options.add_argument('--verbose')
chrome_options.add_experimental_option("prefs", {
"download.default_directory": r"C:\Users\v-shvi\Desktop\Personal\VT\data_engine\data",
"download.prompt_for_download": False,
"download.directory_upgrade": True,
"safebrowsing_for_trusted_sources_enabled": False,
"safebrowsing.enabled": False
})
chrome_options.add_argument('--disable-gpu')
chrome_options.add_argument('--disable-software-rasterizer')
# chrome_options = chrome_options
# GOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe')
GOOGLE_DRIVER = webdriver.Chrome(executable_path='driver/chromedriver.exe', chrome_options=chrome_options)
# SETTINGS DATA
GSHEET_CRED_FILE = "credentials.json"
GSHEET_SCOPE = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
GSHEET_FILE = "Data Engine Database"
GSHEET_WORKSHEET = 6
WHO_INPUT_DATA = "data/who.int.transformed_data.csv"
RAPS_INPUT_DATA = "data/raps.org.tabledata.csv"
# AIRTABLE_INPUT_DATA = "data/COVID-19 Tracker-Vaccines.csv"
AIRTABLE_INPUT_DATA = "data/airtable.transformed_data.csv"
VT_CORPS = 'vt_corp/phase.txt'
STOPWORDS = 'english'
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,416
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/preprocess_data/airtable.py
|
from settings import DATA_PATH
import csv
def parseRowToCell(row):
isSingleWord = False;
word = ""
rowArray = []
for letter in row:
if letter == "\"" and not isSingleWord:
isSingleWord = True
elif letter == "\"" and isSingleWord:
isSingleWord = False
elif letter == "," and not isSingleWord:
rowArray.append(word)
word = ""
else:
word += letter
return rowArray
def PreProcessAirtableData():
print("Airtable csv pre-processing: Started...")
# with open(r"../data/COVID-19 Tracker-Vaccines.csv") as file:
with open(DATA_PATH + r"/COVID-19 Tracker-Vaccines.csv") as file:
data = file.readlines()
dataMatrix = []
for row in data:
if ("\n" in row):
row = row.replace('\n', '')
if ("\"" in row):
dataMatrix.append(parseRowToCell(row))
else:
dataMatrix.append(row.split(","))
with open(DATA_PATH + r'/airtable.transformed_data.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(dataMatrix)
print("Airtable csv pre-processing: Completed...")
# PreProcessAirtableData()
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,417
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/preprocess_data/pdf_read_table.py
|
import tabula
from settings import DATA_PATH
file = DATA_PATH + "/novel-coronavirus-landscape-covid-19-(1).pdf"
tabula.convert_into(file, DATA_PATH + "/who_covid_data.csv", output_format="csv", pages='all')
import csv
file_CSV = open(DATA_PATH + '/who_covid_data.csv')
data_CSV = csv.reader(file_CSV)
list_CSV = list(data_CSV)
def transformData(data):
if len(data) <= 0:
return []
tempData = data[0]
data.remove(tempData)
for r in data:
index = 0
for c in range(len(tempData)):
col = tempData[c] + " " + r[c].lstrip('\r\n').rstrip('\r\n').strip()
tempData[c] = col.strip()
cleanCol = []
for col in tempData:
cleanCol.append(col.replace("\n", " "))
return cleanCol
def TransformPDFData():
print("WHO pdf pre-processing: Started...")
indexStartFrom = 3
row = []
transformedData = []
for data in range(indexStartFrom, len(list_CSV)):
if list_CSV[data][3] != '':
if len(row) > 0:
transformedData.append(transformData(row))
row = []
row.append(list_CSV[data])
with open(DATA_PATH + r'/who.int.transformed_data.csv', 'w') as file:
writer = csv.writer(file, delimiter='|', lineterminator='\n')
writer.writerows(transformedData)
print("WHO pdf pre-processing: Completed...")
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,418
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/test.py
|
statement = """"Institute of Medical Biology, Chinese Academy of Medical Sciences",Vaccine,Inactivated virus,Phase II,Phase II began June 2020,Inactivated,NCT04412538,Unknown,,,N/A,https://docs.google.com/document/d/1Y4nCJJ4njzD1wiHbufCY6gqfRmj49Qn_qNgOJD62Wik/edit,6/23/2020"""
def parseRowToCell(row):
isSingleWord = False;
word = ""
rowArray = []
for letter in row:
if letter == "\"" and not isSingleWord:
isSingleWord = True
elif letter == "\"" and isSingleWord:
isSingleWord = False
elif letter == "," and not isSingleWord:
rowArray.append(word)
word = ""
else:
word += letter
print(rowArray)
return rowArray
parseRowToCell(statement)
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,419
|
thevaccinetracker/data_engine
|
refs/heads/master
|
/get_cosine/get_cosine.py
|
import string
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.feature_extraction.text import CountVectorizer
from nltk.corpus import stopwords
from settings import STOPWORDS
stopwords = stopwords.words(STOPWORDS)
def cosine_sim_vectors(vec1, vec2):
vec1 = vec1.reshape(1, -1)
vec2 = vec2.reshape(1, -1)
return cosine_similarity(vec1, vec2)[0][0]
def clean_string(text):
text = ''.join([word for word in text if word not in string.punctuation])
text = text.lower()
text = ' '.join([word for word in text.split() if word not in stopwords])
return text
def GetCosineSim(sentanceList):
try:
cleaned = list(map(clean_string, sentanceList))
vectorizer = CountVectorizer().fit_transform(cleaned)
vectors = vectorizer.toarray()
# csim = cosine_similarity(vectors)
return cosine_sim_vectors(vectors[0], vectors[1])
except:
return 0
|
{"/web_scrap/raps_org.py": ["/settings.py"], "/web_scrap/airtable_com.py": ["/settings.py"], "/web_scrap/who_int.py": ["/settings.py"], "/main_exce.py": ["/googleDb.py"], "/googleDb.py": ["/settings.py", "/get_cosine/get_cosine.py"], "/preprocess_data/airtable.py": ["/settings.py"], "/preprocess_data/pdf_read_table.py": ["/settings.py"], "/get_cosine/get_cosine.py": ["/settings.py"]}
|
1,432
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/test.py
|
import torch
import numpy as np
import os
l = [{'test': 0, 'test2': 1}, {'test': 3, 'test2': 4}]
print(l)
for i, j in enumerate(l):
print(i)
print(l)
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,433
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/processing.py
|
import os, shutil, gc
from argparse import ArgumentParser
from time import sleep
import h5py
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy import io, signal
from scipy.signal.windows import nuttall, taylor
from .util import *
def proc(args):
rawpath = f'dataset/{args.pathin}/chext'
savepath = f'dataset/{args.pathout}/proc' if args.pathout else f'dataset/{args.pathin}/proc'
print(f'[LOG] Proc | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath + '/raw/')
os.mkdir(savepath + '/denoised/')
# # # PARAMETERS INIT # # #
c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12) # speed of light
f_start = 76e9
f_stop = 78e9
# Tramp_up = 180e-6
# Tramp_down = 32e-6
Tp = 250e-6
# T_int = 66.667e-3
N = 512
# N_frames = 1250
N_loop = 256
# Tx_power = 100
kf = 1.1106e13
BrdFuSca = 4.8828e-5
fs = 2.8571e6
fc = (f_start + f_stop)/2
# # # CONFIGURE SIGNAL PROCESSING # # #
# # Range dimension
NFFT = 2**10 # number of fft points in range dim
nr_chn = 16 # number of channels
# fft will be computed using a hannng window to lower border effects
win_range = np.broadcast_to(np.hanning(N-1), (N_loop, nr_chn, N-1)).T # integral of the window for normalization
# print(win_range.shape)
sca_win = np.sum(win_range[:, 0, 0])
v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf) # vector of range values for each range bin
r_min = 0 # min range considered
r_max = 10 # max range considered
arg_rmin = np.argmin(np.abs(v_range - r_min)) # index of the min range considered value
arg_rmax = np.argmin(np.abs(v_range - r_max)) # index of the max range considered value
vrange_ext = v_range[arg_rmin:arg_rmax+1] # vector of range values from rmin to rmax
# # Doppler dimension
NFFT_vel = 256 # number of fft points in angle dim
win_vel = np.broadcast_to(np.hanning(N_loop).reshape(1, 1, -1), (vrange_ext.shape[0], nr_chn, N_loop))
scawin_vel = np.sum(win_vel[0, 0, :])
vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp) # vector of considered frequencies in Doppler dim
v_vel = vfreq_vel*c0/(2*fc) # transform freqs into velocities
v_vel = np.delete(v_vel, np.arange(124, 132)) # delete velocities close to 0
# # Angle dimension
NFFT_ant = 64 # number of fft points in angle dim
win_ant = np.broadcast_to(taylor(nr_chn, nbar=20, sll=20).reshape(1,-1,1), (vrange_ext.shape[0], nr_chn, NFFT_vel))
scawin_ant = np.sum(win_ant[0, :, 0])
# win_ant = np.tile(win_ant, (len(vrange_ext), 1))
# vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180 # vector of considered angles [-90, 90-dtheta]
# print(vang_deg)
# print(deg2rad_shift(vang_deg))
# ant_idx = np.concatenate([np.arange(nr_chn), np.arange(nr_chn+1, 2*nr_chn)]) # indices of virtual antenna elements
# ant_idx = np.arange(nr_chn)
cal_data = io.loadmat('dataprep/calibration.mat')['CalData'] # load complex calibration weights for each antenna element
cal_data = cal_data[:16] # keep weights for TX1 only
mcal_data = np.broadcast_to(cal_data, (N-1, cal_data.shape[0], N_loop))
# # # PROCESS THE RDA SLICES FOR EACH FRAME # # #
# sequences = [1, 2, 3, 4, 5, 6] # this is just as an example, you should put here the ids of the sequences you want to process
# sequences = range(0, len(os.listdir(rawpath))) # this is just as an example, you should put here the ids of the sequences you want to process
for i, fname in enumerate(os.listdir(rawpath)):
frawname = fname.split('.')[0]
logprefix = f'[LOG] Proc | {i+1} / {len(os.listdir(rawpath))} {frawname}'
print(f'{logprefix} {fname}', end='\r')
Data_orig = np.load(f'{rawpath}/{fname}')
# print(f'{logprefix} Original data shape: {Data_orig.shape}', end='\r')
parts = [0, 1, 2, 3]
SIDELOBE_LEVEL = 3
LINTHR_HIGH = -97
LINTHR_LOW = -107
for part in parts: # split processing in parts for memory, each track is split in 4
savename = f'{args.saveprefix}_seq_{frawname.split("_")[2]}_sub_{part}' \
if args.saveprefix else f'{frawname}_sub_{part}'
logprefix = f'[LOG] Proc | {i*len(parts)+part+1} / {len(os.listdir(rawpath))*len(parts)} {frawname}'
print(f'{logprefix} {savename}', end='\r')
Data = Data_orig[:, :, part*32000:(part+1)*32000] # each part has 32k blocks (128k/4)
split_locs = np.arange(Data.shape[2], step=N_loop, dtype=np.int)[1:]
Data = np.stack(np.split(Data, split_locs, axis=2)[:-1], axis=-1) # split data into a sequence of radar cubes
print(f'{logprefix} Time-split \t\t\t', end='\r')
nsteps = Data.shape[-1] # last dim is time
rda_data = np.zeros((len(vrange_ext), NFFT_ant, NFFT_vel, nsteps), dtype=np.float32)
raw_ra = np.zeros((len(vrange_ext), NFFT_ant, nsteps), dtype=np.float32)
for j in range(nsteps): # loop on the timesteps
print(f'{logprefix} Timestep: {j+1} \t\t\t', end='\r')
RawRadarCube = Data[1:, :, :, j]
# print(RawRadarCube.shape)
# Range fft: window, calibration and scaling are applied
range_profile = np.fft.fft(RawRadarCube*win_range*mcal_data, NFFT, axis=0)*BrdFuSca/sca_win
rp_ext = range_profile[arg_rmin:arg_rmax+1] # extract only ranges of interest (0 to 10 m)
# background subtraction for MTI
rp_ext -= np.mean(rp_ext, axis=2, keepdims=True)
# Doppler fft
range_doppler = np.fft.fftshift(np.fft.fft(rp_ext*win_vel, NFFT_vel, axis=2)/scawin_vel, axes=2)
# Angle fft
range_angle_doppler = np.fft.fftshift(np.fft.fft(range_doppler*win_ant, NFFT_ant, axis=1)/scawin_ant, axes=1)
# absolute value + 20log10 to compute power
range_angle_doppler = 20*np.log10(np.abs(range_angle_doppler))
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(range_angle_doppler.max(2))
# ax[1].imshow(range_angle_doppler.max(1))
# plt.show()
raw_ra[..., j] = range_angle_doppler.max(2) # store raw range-angle image
# at this point you have the RDA representation and you can apply further denoising
rdep_thr = np.linspace(LINTHR_HIGH, LINTHR_LOW, range_angle_doppler.shape[0]).reshape((-1, 1, 1))
range_angle_doppler -= rdep_thr
range_angle_doppler[range_angle_doppler < 0] = 0
maxs = np.max(range_angle_doppler, axis=1).reshape(range_angle_doppler.shape[0], 1, range_angle_doppler.shape[2])
# maxs = np.max(range_angle_doppler, axis=(0, 2)).reshape(1, range_angle_doppler.shape[1], 1)
threshold = maxs - SIDELOBE_LEVEL
range_angle_doppler[range_angle_doppler < threshold] = 0
rda_data[..., j] = range_angle_doppler
# fig, ax = plt.subplots(1, 2)
# ax[0].imshow(range_angle_doppler.max(2))
# ax[1].imshow(range_angle_doppler.max(1))
# plt.show()
print(f'{logprefix} Saving: {savename} \t\t\t')
np.save(f'{savepath}/denoised/{savename}.npy', rda_data)
np.save(f'{savepath}/raw/{savename}.npy', raw_ra)
del Data, rda_data, split_locs, raw_ra
gc.collect()
del Data_orig
gc.collect()
print('\n')
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,434
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/truth.py
|
import os
# import shutil, time, pickle
# from argparse import ArgumentParser
# import matplotlib
import matplotlib.patches as patches
from matplotlib import pyplot as plt
# from matplotlib import rc
import numpy as np
from sklearn.cluster import DBSCAN
# from .channel_extraction import ChannelExtraction
from .util import Cluster, deg2rad_shift, get_box
from .kalman_tracker import KalmanTracker
def truth(args):
action = 'save'
rawpath = f'dataset/{args.pathin}/proc'
savepath = f'dataset/{args.pathout}/final' if args.pathout else f'dataset/{args.pathin}/final'
print(f'[LOG] Truth | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath)
for i, fname in enumerate(os.listdir(rawpath + '/denoised')):
frawname = args.saveprefix if args.saveprefix else args.pathin
frawname = f'{frawname}_ra_{fname.split("_")[2]}{fname.split("_")[4].split(".")[0]}'
logprefix = f'[LOG] Truth | {i+1} / {len(os.listdir(rawpath + "/denoised"))}'
print(f'{logprefix} {frawname}', end='\r')
# starting index in the loaded data
start = 10
# load RDA data, MUST have 4D shape: (N_range_bins, N_angle_bins, N_doppler_bins, N_timesteps)
rda_data = np.load(f'{rawpath}/denoised/{fname}')[..., start:]
raw_ra_seq = np.load(f'{rawpath}/raw/{fname}')[..., start:]
# path where to save the resulting figures
# initialize clustering/tracker parameters
MAX_AGE = 10
MIN_DET_NUMBER = 15
MIN_PTS_THR = 30
MIN_SAMPLES = 40
EPS = 0.04
thr = 20
# assoc_score = 'Mahalanobis' # either 'IOU' or 'Mahalanobis'
# CLASS_CONF_THR = 0.0
# init radar parameters
c0 = 1/np.sqrt(4*np.pi*1e-7*8.85e-12)
f_start = 76e9
f_stop = 78e9
# Tramp_up = 180e-6
# Tramp_down = 32e-6
Tp = 250e-6
# T_int = 66.667e-3
# N = 512
# N_loop = 256
# Tx_power = 100
kf = 1.1106e13
# BrdFuSca = 4.8828e-5
fs = 2.8571e6
fc = (f_start + f_stop)/2
# compute range angle doppler intervals
NFFT = 2**10
# nr_chn = 16
v_range = np.arange(NFFT)/NFFT*fs*c0/(2*kf)
r_min = 0.5
r_max = 10
arg_rmin = np.argmin(np.abs(v_range - r_min))
arg_rmax = np.argmin(np.abs(v_range - r_max))
vrange_ext = v_range[arg_rmin:arg_rmax+1]
NFFT_ant = 64
vang_deg = np.arcsin(2*np.arange(-NFFT_ant/2, NFFT_ant/2)/NFFT_ant)/np.pi*180
NFFT_vel = 256
vfreq_vel = np.arange(-NFFT_vel/2, NFFT_vel/2)/NFFT_vel*(1/Tp)
v_vel = vfreq_vel*c0/(2*fc)
# delta_r = vrange_ext[1] - vrange_ext[0]
# delta_v = v_vel[1] - v_vel[0]
# delta_a = vang_deg[1] - vang_deg[0]
track_id_list = list(range(1000)) # list with possible track id numbers
tracking_list = []
# loop over the time-steps
for timestep in range(rda_data.shape[-1]):
print(f'{logprefix} {frawname} Timestep: {timestep} \t\t\t', end='\r')
# select RDA map of the current time-step
data = rda_data[..., timestep]
data = data[arg_rmin:arg_rmax + 1]
# plt.imshow(data.max(1))
# plt.show()
# compute normalized maps for DBSCAN
norm_ang = (vang_deg - np.min(vang_deg)) / (np.max(vang_deg) - np.min(vang_deg))
norm_vel = (v_vel - np.min(v_vel)) / (np.max(v_vel) - np.min(v_vel))
norm_ran = (vrange_ext - np.min(vrange_ext)) / (np.max(vrange_ext) - np.min(vrange_ext))
rav_pts = np.asarray(np.meshgrid(vrange_ext, vang_deg, v_vel, indexing='ij'))
# print(rav_pts[1, :, :, 0])
norm_rav_pts = np.asarray(np.meshgrid(norm_ran, norm_ang, norm_vel, indexing='ij'))
# select values which are over the threshold
raw_ra = raw_ra_seq[arg_rmin:arg_rmax + 1, :, timestep]
full_indices = (data > thr)
data[data < thr] = 0
rav_pts = rav_pts[:, full_indices]
power_values_full = data[full_indices]
norm_rav_pts = norm_rav_pts[:, full_indices]
rav_pts_lin = rav_pts.reshape(rav_pts.shape[0], -1)
# save range and angle for tracking
ra_totrack = np.copy(rav_pts_lin[:2, :])
ra_totrack[1] = deg2rad_shift(ra_totrack[1])
normrav_pts_lin = norm_rav_pts.reshape(norm_rav_pts.shape[0], -1)
if rav_pts.shape[1] > MIN_SAMPLES:
# apply DBSCAN on normalized RDA map
labels = DBSCAN(eps=EPS, min_samples=MIN_SAMPLES).fit_predict(normrav_pts_lin.T)
unique, counts = np.unique(labels, return_counts=True)
if not len(unique):
print('[WAR] Truth | DBSCAN found no clusters! Skipping frame.')
continue
else:
print('[WAR] Truth | No points to cluster! Skipping frame.')
continue
# loop over the detected clusters
detected_clusters = [] # list containing all the detected clusters
for cluster_id in unique:
if cluster_id == -1: # -1 is the label for noise in DBSCAN, skip it
continue
number = counts[unique == cluster_id]
if number < MIN_PTS_THR:
continue
# initialize new cluster object and fill its fields
new_cluster = Cluster(cluster_id)
new_cluster.cardinality = number
new_cluster.elements = ra_totrack[:, labels == cluster_id] # range and angle
new_cluster.dopplers = rav_pts_lin[2, labels == cluster_id]
w = np.squeeze(power_values_full[labels == cluster_id])
weights = w/np.sum(w) # normalized powers
new_cluster.center_polar = np.average(new_cluster.elements, weights=weights, axis=1).reshape(2, 1)
new_cluster.center_cartesian = np.array([new_cluster.center_polar[0]*np.cos(new_cluster.center_polar[1]),
new_cluster.center_polar[0]*np.sin(new_cluster.center_polar[1])],
dtype=np.float64).reshape(-1, 1)
new_cluster.box = get_box(new_cluster)
detected_clusters.append(new_cluster)
if not timestep: # happens only in the first time-step
for cl in detected_clusters:
tracking_list.append(KalmanTracker(id_=track_id_list.pop(0),
s0=np.array([cl.center_cartesian[0], 0, cl.center_cartesian[1], 0],
dtype=np.float64).reshape(-1,1)))
tracking_list[-1].box = cl.box
sel_tracking_list = np.copy(tracking_list)
elif timestep: # happens in all other time-steps
# prepare the data association building the cost matrix
detected_centers = [x.center_cartesian for x in detected_clusters]
prev_cartcenters = []
prev_centers = []
if len(tracking_list) > 0:
for trk in tracking_list:
prev_cartcenters.append(trk.xy)
prev_centers.append(trk.rtheta)
cost_matrix = np.zeros((len(detected_centers), len(prev_cartcenters)))
for i in range(len(detected_centers)):
for j in range(len(prev_cartcenters)):
# cost is the Mahalanobis distance
cost_matrix[i, j] = KalmanTracker.get_mahalanobis_distance(
detected_centers[i] - prev_cartcenters[j],
tracking_list[j].get_S())
cost_matrix = np.asarray(cost_matrix)
# hungarian algorithm for track association
matches, undet, _ = KalmanTracker.hungarian_assignment(cost_matrix)
# handle matched tracks
if len(matches) > 0:
for detec_idx, track_idx in matches:
# get observation, polar coords center of the detected cluster
obs = detected_clusters[detec_idx].center_polar
# get tracker object of the detection
current_tracker = tracking_list[track_idx]
# KF predict-update step
current_tracker.predict()
current_tracker.update(obs.reshape(2, 1))
current_tracker.box = get_box(detected_clusters[detec_idx])
current_tracker.hits += 1
current_tracker.misses_number = 0
# imaging(current_tracker, detected_clusters[detec_idx], data, labels, full_indices.ravel())
else:
print('[WAR] Truth | No detections-tracks matches found! Skipping frame.')
continue
# deal with undetected tracks
if len(undet) > 0:
for track_idx in undet:
old_tracker = tracking_list[track_idx]
old_tracker.misses_number += 1
# predict only as no obs is detected
old_tracker.predict()
old_tracker.box = get_box(None,
c=old_tracker.xy,
h=old_tracker.box[0],
w=old_tracker.box[0])
# filter out tracks outside room borders (ghost targets)
tracking_list = [t for t in tracking_list if (t.xy[0] > -1.70) and (t.xy[0] < 2.30)] # kill tracks outside the room boundaries
# select the valid tracks, i.e., the ones with less than the max. misses and enough hits
sel_tracking_list = [t for t in tracking_list if (t.misses_number <= MAX_AGE) and (t.hits >= MIN_DET_NUMBER)]
plot4train(f'{savepath}/{frawname}{int(4-len(str(timestep)))*"0"}{timestep}',
data,
raw_ra,
sel_tracking_list,
vrange_ext,
vang_deg,
args.reso,
action)
print(f'[LOG] Truth | Truth data ready: {savepath}')
def imaging(tracker, cluster, data, labels, full_indices):
flat_data = np.copy(data.ravel())
full_data = flat_data[full_indices]
full_data[labels != cluster.label] = 0
flat_data[full_indices] = full_data
flat_data = flat_data.reshape(data.shape)
# print(flat_data.shape)
ra = flat_data.max(2)
rd = flat_data.max(1)
plt.subplot(121)
plt.imshow(rd, aspect='auto')
plt.subplot(122)
plt.imshow(ra, aspect='auto', extent=(np.pi, 0.25065, 0.5, 10))
plt.scatter(tracker.rtheta[1], tracker.rtheta[0], marker='x', c='r')
plt.colorbar()
plt.show()
plt.close()
def plot(path, data_points, ra, noisy_ramap, t_list, action, index, ranges, angles):
boxes = np.array([kt.box for kt in t_list])
angles = deg2rad_shift(angles)
# ramap = data_points.mean(2)
_, ax = plt.subplots(1, 2)
ax[0].set_title('Point-cloud representation')
ax[1].set_title('RA map image representation')
ax[0].scatter(ra[1], ra[0], marker='.')#, c=labels)
ax[1].imshow(noisy_ramap, aspect='auto')
ax[0].set_xlabel(r'$\theta$ [rad]')
ax[0].set_ylabel(r'$R$ [m]')
ax[0].set_xlim([0.25065, np.pi])
ax[0].set_ylim([0.5, 10])
ax[0].grid()
for i in range(len(boxes)):
# add real valued bb on point cloud plot
add_bb(boxes[i], ax[0], t_list[i].id)
# add pixel-level bb to ra image
int_box = adjust_bb(boxes[i], ranges, angles)
add_bb(int_box, ax[1], t_list[i].id)
if action == 'save':
plt.savefig(path + f'fig_{index}', format='png', dpi=300)
plt.close()
elif action == 'plot':
plt.title(f'Frame {index}')
plt.show()
plt.close()
def plot4train(path, data_points, noisy_ramap, t_list, ranges, angles, reso=416, action='save'):
boxes = np.array([kt.box for kt in t_list])
angles = deg2rad_shift(angles)
fig = plt.figure(figsize=(1, 1), dpi=reso, frameon=False)
ax = fig.add_axes([0, 0, 1, 1])
ax.axis('off')
ax.imshow(noisy_ramap, aspect='auto')
w_scale = reso/len(angles)
h_scale = reso/len(ranges)
bbs = []
for i in range(0,min(4, len(boxes))):
# # add pixel-level bb to ra image
bb = adjust_bb(boxes[i], ranges, angles, w_scale, h_scale)
bbs.append(list(map(int, [bb[1][0], bb[0][0], bb[3][0], bb[2][0]])))
# add_bb(bb, ax, t_list[i].id)
if bbs and action == 'save':
plt.savefig(f'{path}_{bbs}.png'.replace(' ', ''), format='png', dpi=reso)
elif action == 'plot':
plt.show()
plt.close()
def add_bb(bb, ax, note):
ax.add_patch(patches.Rectangle((bb[1] - bb[3]/2, bb[0] - bb[2]/2), # top left corner coordinates
bb[3], # width
bb[2], # height
linewidth=1,
edgecolor='r',
facecolor='none'))
def adjust_bb(bb_real, r, a, w_scale = 1, h_scale = 1):
'''
this function is needed to map the bb obtained in real values to the image
pixel coordinates without the bias introduced by non-uniform spacing of angle bins
'''
bb_ind = np.zeros(bb_real.shape[0])
bb_ind[0] = np.argmin(np.abs(r - bb_real[0])) * h_scale
bb_ind[1] = np.argmin(np.abs(a - bb_real[1])) * w_scale
top = np.argmin(np.abs(r - (bb_real[0] - bb_real[2]/2)))
bottom = np.argmin(np.abs(r - (bb_real[0] + bb_real[2]/2)))
left = np.argmin(np.abs(a - (bb_real[1] + bb_real[3]/2)))
right = np.argmin(np.abs(a - (bb_real[1] - bb_real[3]/2)))
bb_ind[2] = np.abs(top - bottom) * h_scale
bb_ind[3] = np.abs(left - right) * w_scale
return bb_ind.reshape(-1, 1)
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,435
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/channel_extraction.py
|
import h5py
import numpy as np
import os, shutil
def chext(args):
rawpath = f'raw/{args.pathin}'
savepath = f'dataset/{args.pathout}/chext' if args.pathout else f'dataset/{args.pathin}/chext'
print(f'[LOG] ChExt | Starting: {args.pathin}')
# Create the subsequent save folders
# if os.path.isdir(savepath):
# shutil.rmtree(savepath)
if not os.path.isdir(savepath):
os.makedirs(savepath)
for i, fname in enumerate(os.listdir(rawpath)):
logprefix = f'[LOG] ChExt | {i+1} / {len(os.listdir(rawpath))}'
savename = f'{args.saveprefix}_seq_{i}' if args.saveprefix else f'{fname.split("_")[0]}_seq_{fname.split("_")[1].split(".")[0]}'
print(f'{logprefix} fname', end='\r')
channel_extraction(
f'{rawpath}/{fname}',
savepath,
savename,
action='SAVE',
logprefix=logprefix)
print('\n')
def channel_extraction(loadpath, savepath, savename, action, logprefix='', nr_chn=16):
with h5py.File(loadpath, 'r+') as h5data:
print(f'{logprefix} Initializing: {loadpath}', end='\r')
Data = np.zeros((h5data['Chn1'].shape[1], nr_chn, h5data['Chn1'].shape[0]), dtype=np.float32)
for i in range(nr_chn):
print(f'{logprefix} Extracting channel {i+1} \t\t\t', end='\r')
channel = np.asarray(h5data['Chn{}'.format(i+1)])
Data[:, i, :] = channel.T
print(f'{logprefix} Finalizing {savepath}', end='\r')
if action == 'SAVE':
print(f'{logprefix} Saving', end='\r')
np.save(f'{savepath}/{savename}', Data)
print(f'{logprefix} Saved: {savepath}/{savename} Data shape: {Data.shape}')
elif action == 'RETURN':
return Data
else:
print(f'[ERR] ChExt | Invalid action, please select SAVE or RETURN')
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,436
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/util.py
|
import os
import shutil
from dataclasses import dataclass, field
from typing import List
import h5py
import matplotlib.patches as patches
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
@dataclass
class Cluster:
# cluster object, contains detected cluster points and additional values
label: int
cardinality: int = 0
elements: List = field(default_factory=list)
dopplers: List = field(default_factory=list)
center_polar: np.ndarray = np.empty((2, 1))
center_cartesian: np.ndarray = np.empty((2, 1))
box: np.ndarray = np.empty((4, 1))
def polar2cartesian(xp):
# angles in rad
return np.array([xp[0]*np.cos(xp[1]), xp[0]*np.sin(xp[1])], dtype=np.float64).reshape(-1, 1)
def cartesian2polar(xy):
# angles in rad
return np.array([np.sqrt(xy[0]**2 + xy[1]**2), np.arctan2(xy[1], xy[0])]).reshape(-1, 1)
def deg2rad_shift(angles):
a = np.copy(angles)
a = np.pi*a/180
a = -a + np.pi/2
return a
def shift_rad2deg(angles):
a = np.copy(angles)
a = -a + np.pi/2
a = 180*a/np.pi
return a
def get_box(cluster, c=None, h=0.5, w=0.3):
if cluster is not None:
r_ext = cluster.elements[0].max() - cluster.elements[0].min()
# print(cluster.elements[1])
a_ext = cluster.elements[1].max() - cluster.elements[1].min()
out = np.array([cluster.center_polar[0].squeeze(),
cluster.center_polar[1].squeeze(),
r_ext,
a_ext]).reshape(4, 1)
return out
else:
return np.array([c[0], c[1], h, w]).reshape(4, 1)
def IOU_score(a, b):
# returns the IOU score of the two input boxes
x1 = max(a[0], b[0])
y1 = max(a[1], b[1])
x2 = min(a[2], b[2])
y2 = min(a[3], b[3])
width = x2 - x1
height = y2 - y1
if (width < 0) or (height < 0):
return 0.0
area_intersection = width*height
area_a = (a[2] - a[0])*(a[3] - a[1])
area_b = (b[2] - b[0])*(b[3] - b[1])
area_union = area_a + area_b - area_intersection
return area_intersection/area_union
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,437
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/__init__.py
|
import argparse
import sys, gc
from .channel_extraction import chext
from .processing import proc
from .truth import truth
def parse_arg():
parser = argparse.ArgumentParser(description='Data preprocessing module', add_help=True)
parser.add_argument('--pathin', type=str, required=True,
help="Path for the input folder")
parser.add_argument('--pathout', type=str,
help="Path for the output folder")
parser.add_argument('--saveprefix', type=str,
help="Prefix for the save file")
parser.add_argument('--chext', action='store_true',
help="Perform channel extraction")
parser.add_argument('--proc', action='store_true',
help="Perform signal processing (FFT and denoising)")
parser.add_argument('--truth', action='store_true',
help="Perform ground truth (clustering, tracking) bouding box calculations")
parser.add_argument('--objcount', type=int, default=1,
help="Number of objects per image (default: 1)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (def: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (def), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def main():
args = parse_arg()
if args.chext:
chext(args)
gc.collect()
if args.proc:
proc(args)
gc.collect()
if args.truth:
truth(args)
gc.collect()
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,438
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/predict.py
|
import torch
# import torch.nn as nn
# import torch.nn.functional as F
# import torch.optim as optim
# import torchvision
import torchvision.transforms as transforms
import os, sys
# import pickle, time, random
import numpy as np
# from PIL import Image
import argparse
from .darknet import DarkNet
from .dataset import *
from .util import *
def parse_arg():
parser = argparse.ArgumentParser(description='MmWaveYoLo Prediction module', add_help=True)
parser.add_argument('--cfg', type=str, default='yolov3micro',
help="Name of the network config (default: yolov3micro)")
parser.add_argument('--pathin', type=str,
help="Path for the input folder (default: testset)")
parser.add_argument('--pathout', type=str,
help="Path for the output folder")
parser.add_argument('--video', type=str, default='False',
help="Create video after prediction (default: False)")
parser.add_argument('--datasplit', type=float, default=0,
help="Dataset split percentage (default: 0 (single set))")
parser.add_argument('--seed', type=float, default=0,
help="Seed for the random shuffling (default: 0, (no shuffle))")
parser.add_argument('--bs', type=int, default=8,
help="Batch size (default: 8)")
parser.add_argument('--ckpt', type=str, default='10.0',
help="Checkpoint name <'epoch'.'iteration'>")
parser.add_argument('--nms', type=float, default=0.5,
help="NMS threshold (default: 0.5)")
parser.add_argument('--obj', type=float, default=0.5,
help="Objectiveness threshold (default: 0.5)")
parser.add_argument('--iou', type=float, default=0.5,
help="Intersection over Union threshold (default: 0.5)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (default: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (default), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def predict():
torch.cuda.empty_cache()
# CONSTANTS
args = parse_arg()
pathcfg = f"cfg/{args.cfg}.cfg"
pathin = f"dataset/{args.pathin}/final"
pathout = f"results/{args.pathout}"
num_workers = 2
# NETWORK
darknet = DarkNet(pathcfg, args.reso, args.obj, args.nms)
pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)
print('# of params: ', pytorch_total_params)
if args.v > 0:
print(darknet.module_list)
# IMAGE PREPROCESSING!!!
transform = transforms.Compose([
transforms.Resize(size=(args.reso, args.reso), interpolation=3),
transforms.ToTensor()
])
# ====================================================
# Test data allocation
_, testloader = getDataLoaders(pathin, transform, train_split=args.datasplit, batch_size=args.bs, \
num_workers=num_workers, collate_fn=collate, random_seed=args.seed)
# ====================================================
start_epoch = 2
start_iteration = 0
# LOAD A CHECKPOINT!!!
start_epoch, start_iteration = args.ckpt.split('.')
start_epoch, start_iteration, state_dict, _, _, _, _ = load_checkpoint(
f'save/checkpoints/',
int(start_epoch),
int(start_iteration)
)
darknet.load_state_dict(state_dict)
# ====================================================
# Use GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
darknet.to(device) # Put the network on device
if args.v > 0:
print(next(darknet.parameters()).device)
# Create the subsequent save folders
# if os.path.isdir(pathout):
# shutil.rmtree(pathout)
if not os.path.isdir(pathout):
os.makedirs(pathout)
# PREDICT
print(f'[LOG] PREDICT | Test set: {len(testloader.dataset)}')
darknet.eval() # set network to evaluation mode
outcomes = np.zeros(4)
predList = []
countLabels = 0
with torch.no_grad():
for bidx, (paths, inputs, targets) in enumerate(testloader):
inputs = inputs.to(device)
predictions = darknet(inputs)
for idx, path in enumerate(paths):
print(f'[LOG] PREDICT | Predicting {(bidx*args.bs)+idx+1}/{len(testloader.dataset)}', end='\r')
savename = path.split('/')[-1].split('_')[2]
try:
prediction = predictions[predictions[:, 0] == idx]
except Exception:
prediction = torch.Tensor([])
print(f'[ERROR] TEST | No prediction? {prediction}')
tempL, _= correctness(prediction, targets[idx], reso=darknet.reso, iou_thresh=args.iou)
predList.extend(tempL)
countLabels += targets[idx].size(0)
# draw_prediction(path, prediction, targets[idx], darknet.reso, \
# names=[''], pathout=f'{pathout}/preds', savename=f'{savename}.png')
if args.video:
animate_predictions(pathout, args.video)
print(countLabels)
predList = precision_recall(predList, countLabels)
plot_precision_recall(predList, pathout=f'{pathout}/map', savename='')
# plot_precision_recall(predList, pathout=f'{pathout}/map', savename=f'iou{args.iou}.png')
# ====================================================
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,439
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/util.py
|
from __future__ import division
import torch
import os
from operator import itemgetter
import numpy as np
import cv2
from PIL import Image, ImageDraw
import matplotlib.pyplot as plt
def draw_prediction(img_path, prediction, target, reso, names, pathout, savename):
"""Draw prediction result
Args
- img_path: (str) Path to image
- prediction: (np.array) Prediction result with size [#bbox, 8]
8 = [batch_idx, x1, y1, x2, y2, objectness, cls_conf, class idx]
- target: (np.array) Prediction result with size [#bbox, 5]
8 = [batch_idx, x1, y1, x2, y2, class idx]
- reso: (int) Image resolution
- names: (list) Class names
- save_path: (str) Path to save prediction result
"""
img = Image.open(img_path).convert('RGB')
w, h = img.size
h_ratio = h / reso
w_ratio = w / reso
draw = ImageDraw.Draw(img)
# Drawing targets (labels)
try:
for i in range(target.shape[0]):
bbox = target[i, 0:4].numpy()
bbox = xywh2xyxy(bbox, target=True)
caption = f'truth #{i}'
color = (255, 255, 255)
x1, y1, x2, y2 = bbox[0]*w, bbox[1]*h, bbox[2]*w, bbox[3]*h
draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),
outline=color, width=2)
draw.rectangle((x1 * w_ratio, y2 * h_ratio + 15,
x2 * w_ratio, y2 * h_ratio),
fill=color)
draw.text((x1 * w_ratio + 2, y2 * h_ratio),
caption, fill='black')
except Exception:
print(f'[ERR] TEST | Could not draw target')
# Drawing predictions
try:
for i in range(prediction.shape[0]):
bbox = prediction[i, 1:5]
conf = '%.2f' % prediction[i, -3]
caption = f'pred {conf}'
color = (0, 0, 255)
x1, y1, x2, y2 = bbox[0], bbox[1], bbox[2], bbox[3]
draw.rectangle(((x1 * w_ratio, y1 * h_ratio, x2 * w_ratio, y2 * h_ratio)),
outline=color, width=int(1+prediction[i, -3]*5))
draw.rectangle((x1 * w_ratio, y1 * h_ratio - 15,
x2 * w_ratio, y1 * h_ratio),
fill=color)
draw.text((x1 * w_ratio + 2, y1 * h_ratio - 15),
caption, fill='white')
except Exception:
print(f'[ERR] TEST | Could not draw prediction')
# img.show()
os.makedirs(pathout, exist_ok=True)
img.save(f'{pathout}/{savename}')
img.close()
def animate_predictions(path, savetype='gif'):
fps = 5
if savetype == 'gif':
gif = []
images = (Image.open(f'{path}/preds/{f}').copy() for f in sorted(os.listdir(f'{path}/preds')) if f.endswith('.png'))
for image in images:
gif.append(image)
os.makedirs(path, exist_ok=True)
gif[0].save(f'{path}/sequence.gif', save_all=True, \
optimize=False, append_images=gif[1:], loop=0, \
duration=int(1000/fps))
print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.gif')
elif savetype == 'avi':
images = [img for img in sorted(os.listdir(f'{path}/preds')) if img.endswith(".png")]
frame = cv2.imread(f'{path}/preds/{images[0]}')
height, width, _ = frame.shape
video = cv2.VideoWriter(f'{path}/sequence.avi', 0, fps, (width,height))
for image in images:
video.write(cv2.imread(f'{path}/preds/{image}'))
cv2.destroyAllWindows()
video.release()
print(f'[LOG] PREDICT | Prediction sequence saved as {path}/sequence.avi')
def IoU(box1, box2):
""" Compute IoU between box1 and box2 """
if box1.is_cuda == True:
box1 = box1.cpu()
if box2.is_cuda == True:
box2 = box2.cpu()
#Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[...,0], box1[...,1], box1[...,2], box1[...,3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[...,0], box2[...,1], box2[...,2], box2[...,3]
#get the corrdinates of the intersection rectangle
inter_rect_x1 = torch.max(b1_x1, b2_x1)
inter_rect_y1 = torch.max(b1_y1, b2_y1)
inter_rect_x2 = torch.min(b1_x2, b2_x2)
inter_rect_y2 = torch.min(b1_y2, b2_y2)
#Intersection area
inter_area = torch.clamp(inter_rect_x2 - inter_rect_x1 + 1, min=0) * torch.clamp(inter_rect_y2 - inter_rect_y1 + 1, min=0)
#Union Area
b1_area = (b1_x2 - b1_x1 + 1)*(b1_y2 - b1_y1 + 1)
b2_area = (b2_x2 - b2_x1 + 1)*(b2_y2 - b2_y1 + 1)
iou = inter_area / (b1_area + b2_area - inter_area)
return iou
# TP / FP / FN / TN calculations
def correctness(prediction, target, reso=416, iou_thresh=0.5):
flagP = np.zeros([prediction.size(0), 2]) # Flag for predictions
flagP[:,1] -= 1
tempCor = np.zeros(4)
flagT = np.zeros(target.size(0))-1
tempList = []
if prediction.size(0) != 0:
for i, p in enumerate(prediction):
for j, t in enumerate(target):
iou = IoU(p[1:5], xywh2xyxy(t[0:4]*reso)).numpy()[0]
if iou > flagP[i, 0]:
flagP[i,:] = [iou, j]
for i in range(flagP.shape[0]):
if flagP[i,0] >= iou_thresh and flagT[int(flagP[i,1])] == -1:
# True Positive: iou >= thresh
tempCor[0] += 1
flagT[int(flagP[i,1])] = 1
tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], False])
else:
# False Positive: iou < thresh or duplicates
tempCor[1] = 1
tempList.append([f'{prediction[i, -3]:.2f}', flagP[i, 0], True])
# False Negative
if np.count_nonzero(flagP[:, 1] == -1) == prediction.size(0):
tempCor[2] += 1
return tempList, tempCor
# Precision and recall calculations
def precision_recall(predList, countLabels):
predList.sort(key = itemgetter(1), reverse=True) # Sort by IoU
predList.sort(key = itemgetter(2)) # Sort by TP
predList.sort(key = itemgetter(0), reverse=True) # Sort by objectiveness
for i, l in enumerate(predList):
temp = [0, 0, 0, 0]
if l[2] == False: temp[0] = 1 # TP
else: temp[1] = 1 # FP
if i != 0:
temp[0] += predList[i-1][3] # Cumulative TP
temp[1] += predList[i-1][4] # Cumulative FP
temp[2] = float(temp[0] / (temp[0] + temp[1])) # Precision
temp[3] = float(temp[0] / countLabels) # Recall
l.extend(temp)
return predList
# Drawing precision/recall curve
def plot_precision_recall(predList, pathout, savename=''):
predArr = np.array(predList, dtype=np.float)
# print(np.round(predArr[:,-2:], 2))
fig, _= plt.subplots(2, 1, gridspec_kw={'height_ratios': [3, 1]})
plt.subplot(2, 1, 1)
plt.plot(predArr[:, -1], predArr[:, -2])
plt.plot(np.round(predArr[:,-1], 2), np.round(predArr[:,-2], 2))
plt.grid(True)
plt.title(f'Precision/Recall graph ({savename})')
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.subplot(2, 1, 2)
plt.plot(predArr[:,0])
ax = plt.gca()
ax.axes.xaxis.set_visible(False)
# ax.axes.yaxis.set_visible(False)
plt.rcParams['axes.titley'] = 1.0 # y is in axes-relative coordinates.
plt.rcParams['axes.titlepad'] = -14 # pad is in points...
plt.title(f'Objectiveness score')
if savename != '':
os.makedirs(f'{pathout}/{savename}', exist_ok=True)
plt.savefig(f'{pathout}/{savename}', dpi=100)
print(f'[LOG] TRAIN | Precision/Recall graph save \"{pathout}/{savename}\"')
else:
plt.show()
plt.close()
def xywh2xyxy(bbox, target=False):
if target:
xc, yc = bbox[0], bbox[1]
half_w, half_h = bbox[2] / 2, bbox[3] / 2
return [xc - half_w, yc - half_h, xc + half_w, yc + half_h]
bbox_ = bbox.clone()
if len(bbox_.size()) == 1:
bbox_ = bbox_.unsqueeze(0)
xc, yc = bbox_[..., 0], bbox_[..., 1]
half_w, half_h = bbox_[..., 2] / 2, bbox_[..., 3] / 2
bbox_[..., 0] = xc - half_w
bbox_[..., 1] = yc - half_h
bbox_[..., 2] = xc + 2 * half_w
bbox_[..., 3] = yc + 2 * half_h
return bbox_
#Check if it is working!!!
def xyxy2xywh(bbox, target=False):
if target:
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
xc, yc = bbox[0] + w/2, bbox[1] + h/2
return [xc, yc, w, h]
bbox_ = bbox.clone()
if len(bbox_.size()) == 1:
bbox_ = bbox_.unsqueeze(0)
w, h = bbox_[..., 2] - bbox_[..., 0], bbox_[..., 3] - bbox_[..., 1]
xc, yc = bbox_[..., 0] + w/2, bbox_[..., 1] + h/2
bbox_[..., 0] = xc
bbox_[..., 1] = yc
bbox_[..., 2] = w
bbox_[..., 3] = h
return bbox_
def load_checkpoint(checkpoint_dir, epoch, iteration):
"""Load checkpoint from path
Args
- checkpoint_dir: (str) absolute path to checkpoint folder
- epoch: (int) epoch of checkpoint
- iteration: (int) iteration of checkpoint in one epoch
Returns
- start_epoch: (int)
- start_iteration: (int)
- state_dict: (dict) state of model
"""
path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')
if not os.path.isfile(path):
raise Exception("Checkpoint in epoch %d doesn't exist" % epoch)
checkpoint = torch.load(path)
start_epoch = checkpoint['epoch']
state_dict = checkpoint['state_dict']
start_iteration = checkpoint['iteration']
tlosses = checkpoint['tlosses']
vlosses = checkpoint['vlosses']
optimizer = checkpoint['optimizer']
scheduler = checkpoint['scheduler']
assert epoch == start_epoch, "epoch != checkpoint's start_epoch"
assert iteration == start_iteration, "iteration != checkpoint's start_iteration"
return start_epoch, start_iteration, state_dict, tlosses, vlosses, optimizer, scheduler
def save_checkpoint(checkpoint_dir, epoch, iteration, save_dict):
"""Save checkpoint to path
Args
- path: (str) absolute path to checkpoint folder
- epoch: (int) epoch of checkpoint file
- iteration: (int) iteration of checkpoint in one epoch
- save_dict: (dict) saving parameters dict
"""
os.makedirs(checkpoint_dir, exist_ok=True)
path = os.path.join(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')
assert epoch == save_dict['epoch'], "[ERROR] epoch != save_dict's start_epoch"
assert iteration == save_dict['iteration'], "[ERROR] iteration != save_dict's start_iteration"
if os.path.isfile(path):
print("[WARNING] Overwrite checkpoint in epoch %d, iteration %d" %
(epoch, iteration))
try:
torch.save(save_dict, path)
except Exception:
raise Exception("[ERROR] Fail to save checkpoint")
print("[LOG] Checkpoint %d.%d.ckpt saved" % (epoch, iteration))
def parse_cfg(cfgfile):
"""
Takes a configuration file
Returns a list of blocks. Each blocks describes a block in the neural
network to be built. Block is represented as a dictionary in the list
"""
file = open(cfgfile, 'r')
lines = file.read().split('\n') # store the lines in a list
lines = [x for x in lines if len(x) > 0] # get read of the empty lines
lines = [x for x in lines if x[0] != '#'] # get rid of comments
lines = [x.rstrip().lstrip() for x in lines] # get rid of fringe whitespaces
file.close()
block = {}
blocks = []
for line in lines:
if line[0] == "[": # This marks the start of a new block
if len(block) != 0: # If block is not empty, implies it is storing values of previous block.
blocks.append(block) # add it the blocks list
block = {} # re-init the block
block["type"] = line[1:-1].rstrip()
else:
key,value = line.split("=")
block[key.rstrip()] = value.lstrip()
blocks.append(block)
return blocks
def plot_losses(tlosses, vlosses=None, savepath=''):
plt.plot(range(0, len(tlosses)), tlosses)
if vlosses:
plt.plot(range(0, len(vlosses)), vlosses)
plt.legend(['Train loss', 'Valid loss'], loc='upper left')
plt.title(f'Training and Validation loss ({len(tlosses)} Epochs) ')
else:
plt.legend(['Train loss'], loc='upper left')
plt.title(f'Training loss ({len(tlosses)} Epochs) ')
plt.xlabel('Epoch')
plt.ylabel('Loss')
if savepath != '':
os.makedirs(savepath, exist_ok=True)
plt.savefig(f'{savepath}/loss_{len(tlosses)}.png', dpi=100)
print(f'[LOG] TRAIN | Loss graph save \"{savepath}/loss_{len(tlosses)}.png\"')
else:
plt.show()
plt.close()
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,440
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/darknet.py
|
from __future__ import division
import torch, torch.nn as nn, torch.nn.functional as F
# from torch.autograd import Variable
import numpy as np
# import cv2
# from pprint import pprint
from .util import *
# =================================================================
# MAXPOOL (with stride = 1, NOT SURE IF NEEDED)
class MaxPool1s(nn.Module):
def __init__(self, kernel_size):
super(MaxPool1s, self).__init__()
self.kernel_size = kernel_size
self.pad = kernel_size - 1
def forward(self, x):
padded_x = F.pad(x, (0, self.pad, 0, self.pad), mode="replicate")
pooled_x = nn.MaxPool2d(self.kernel_size, self.pad)(padded_x)
return pooled_x
# EMPTY LAYER
class EmptyLayer(nn.Module):
def __init__(self):
super(EmptyLayer, self).__init__()
# YOLO / PREDICTION LAYER
class YOLOLayer(nn.Module):
def __init__(self, anchors, num_classes, reso, ignore_thresh):
super(YOLOLayer, self).__init__()
self.anchors = anchors
self.num_classes = num_classes
self.reso = reso
self.ignore_thresh = ignore_thresh
def forward(self, x, y_true=None):
bs, _, gs, _ = x.size()
stride = self.reso // gs # no pooling used, stride is the only downsample
num_attrs = 5 + self.num_classes # tx, ty, tw, th, p0
nA = len(self.anchors)
scaled_anchors = torch.Tensor(
[(a_w / stride, a_h / stride) for a_w, a_h in self.anchors]).cuda()
# Re-organize [bs, (5+nC)*nA, gs, gs] => [bs, nA, gs, gs, 5+nC]
x = x.view(bs, nA, num_attrs, gs, gs).permute(
0, 1, 3, 4, 2).contiguous()
pred = torch.Tensor(bs, nA, gs, gs, num_attrs).cuda()
pred_tx = torch.sigmoid(x[..., 0]).cuda()
pred_ty = torch.sigmoid(x[..., 1]).cuda()
pred_tw = x[..., 2].cuda()
pred_th = x[..., 3].cuda()
pred_conf = torch.sigmoid(x[..., 4]).cuda()
if self.training == True:
pred_cls = x[..., 5:].cuda() # softmax in cross entropy
else:
pred_cls = F.softmax(x[..., 5:], dim=-1).cuda() # class
grid_x = torch.arange(gs).repeat(gs, 1).view(
[1, 1, gs, gs]).float().cuda()
grid_y = torch.arange(gs).repeat(gs, 1).t().view(
[1, 1, gs, gs]).float().cuda()
anchor_w = scaled_anchors[:, 0:1].view((1, nA, 1, 1))
anchor_h = scaled_anchors[:, 1:2].view((1, nA, 1, 1))
pred[..., 0] = pred_tx + grid_x
pred[..., 1] = pred_ty + grid_y
pred[..., 2] = torch.exp(pred_tw) * anchor_w
pred[..., 3] = torch.exp(pred_th) * anchor_h
pred[..., 4] = pred_conf
pred[..., 5:] = pred_cls
if not self.training:
pred[..., :4] *= stride
return pred.view(bs, -1, num_attrs)
else:
loss = YOLOLoss([bs, nA, gs], scaled_anchors, self.num_classes, pred, [pred_tx, pred_ty, pred_tw, pred_th])
loss = loss(x, y_true.float())
return loss
# YOLOv3 Loss
class YOLOLoss(nn.Module):
def __init__(self, shape, scaled_anchors, num_classes, pred, pred_t):
super(YOLOLoss, self).__init__()
self.bs = shape[0]
self.nA = shape[1]
self.gs = shape[2]
self.scaled_anchors = scaled_anchors
self.num_classes = num_classes
self.predictions = pred
self.pred_conf = pred[..., 4]
self.pred_cls = pred[..., 5:]
self.pred_tx = pred_t[0]
self.pred_ty = pred_t[1]
self.pred_tw = pred_t[2]
self.pred_th = pred_t[3]
def forward(self, x, y_true):
gt_tx = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_ty = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_tw = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_th = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_conf = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
gt_cls = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
obj_mask = torch.zeros(self.bs, self.nA, self.gs, self.gs, requires_grad=False).cuda()
for idx in range(self.bs):
for y_true_one in y_true[idx]:
y_true_one = y_true_one.cuda()
gt_bbox = y_true_one[:4] * self.gs
gt_cls_label = int(y_true_one[4])
gt_xc, gt_yc, gt_w, gt_h = gt_bbox[0:4]
gt_i = gt_xc.long().cuda()
gt_j = gt_yc.long().cuda()
pred_bbox = self.predictions[idx, :, gt_j, gt_i, :4]
ious = IoU(xywh2xyxy(pred_bbox), xywh2xyxy(gt_bbox))
best_iou, best_a = torch.max(ious, 0)
w, h = self.scaled_anchors[best_a]
gt_tw[idx, best_a, gt_j, gt_i] = torch.log(gt_w / w)
gt_th[idx, best_a, gt_j, gt_i] = torch.log(gt_h / h)
gt_tx[idx, best_a, gt_j, gt_i] = gt_xc - gt_i.float()
gt_ty[idx, best_a, gt_j, gt_i] = gt_yc - gt_j.float()
gt_conf[idx, best_a, gt_j, gt_i] = best_iou
gt_cls[idx, best_a, gt_j, gt_i] = gt_cls_label
obj_mask[idx, best_a, gt_j, gt_i] = 1
MSELoss = nn.MSELoss(reduction='sum')
BCELoss = nn.BCELoss(reduction='sum')
CELoss = nn.CrossEntropyLoss(reduction='sum')
loss = dict()
# Xc, Yc, W, H loss calculation
loss['x'] = MSELoss(self.pred_tx * obj_mask, gt_tx * obj_mask)
loss['y'] = MSELoss(self.pred_ty * obj_mask, gt_ty * obj_mask)
loss['w'] = MSELoss(self.pred_tw * obj_mask, gt_tw * obj_mask)
loss['h'] = MSELoss(self.pred_th * obj_mask, gt_th * obj_mask)
# CLASS loss calculation
# loss['cls'] = BCELoss(pred_cls * obj_mask, cls_mask * obj_mask)
loss['cls'] = CELoss((self.pred_cls * obj_mask.unsqueeze(-1)).view(-1, self.num_classes),
(gt_cls * obj_mask).view(-1).long())
# OBJECTIVENESS loss calculation
# loss['conf'] = MSELoss(self.pred_conf * obj_mask * 5, gt_conf * obj_mask * 5) + \
# MSELoss(self.pred_conf * (1 - obj_mask), gt_conf * (1 - obj_mask))
lambda_noobj = 0.5
loss['conf'] = BCELoss(self.pred_conf * obj_mask, (gt_conf * obj_mask).detach()) + \
lambda_noobj * BCELoss(self.pred_conf * (1 - obj_mask), (gt_conf * (1 - obj_mask)).detach())
# pprint(loss)
return loss
# Non-Max Suppression
class NMSLayer(nn.Module):
"""
NMS layer which performs Non-maximum Suppression
1. Filter background
2. Get prediction with particular class
3. Sort by confidence
4. Suppress non-max prediction
"""
def __init__(self, conf_thresh=0.65, nms_thresh=0.55):
"""
Args:
- conf_thresh: (float) fore-ground confidence threshold
- nms_thresh: (float) nms threshold
"""
super(NMSLayer, self).__init__()
self.conf_thresh = conf_thresh
self.nms_thresh = nms_thresh
def forward(self, x):
"""
Args
x: (Tensor) prediction feature map, with size [bs, num_bboxes, 5 + nC]
Returns
predictions: (Tensor) prediction result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]]
"""
bs, _, _ = x.size()
predictions = torch.Tensor().cuda()
for idx in range(bs):
pred = x[idx]
try:
non_zero_pred = pred[pred[:, 4] > self.conf_thresh]
non_zero_pred[:, :4] = xywh2xyxy(non_zero_pred[:, :4])
max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1)
max_idx = max_idx.float().unsqueeze(1)
max_score = max_score.float().unsqueeze(1)
non_zero_pred = torch.cat(
(non_zero_pred[:, :5], max_score, max_idx), 1)
classes = torch.unique(non_zero_pred[:, -1])
except Exception: # no object predicted
print('No object predicted')
continue
for cls in classes:
cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls]
conf_sort_idx = torch.sort(cls_pred[:, 5], descending=True)[1]
cls_pred = cls_pred[conf_sort_idx]
max_preds = []
while cls_pred.size(0) > 0:
max_preds.append(cls_pred[0].unsqueeze(0))
ious = IoU(max_preds[-1], cls_pred)
cls_pred = cls_pred[ious < self.nms_thresh]
if len(max_preds) > 0:
max_preds = torch.cat(max_preds).data
batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx)
seq = (batch_idx, max_preds)
predictions = torch.cat(seq, 1) if predictions.size(
0) == 0 else torch.cat((predictions, torch.cat(seq, 1)))
return predictions
# =================================================================
# NETWORK
class DarkNet(nn.Module):
def __init__(self, cfg, reso=416, thr_obj=0.5, thr_nms=0.5):
super(DarkNet, self).__init__()
self.blocks = parse_cfg(cfg)
self.reso, self.thr_obj, self.thr_nms = reso, thr_obj, thr_nms
self.net_info, self.module_list = self.create_modules(self.blocks)
self.nms = NMSLayer(self.thr_obj, self.thr_nms)
def forward(self, x, y_true=None, CUDA=False):
modules = self.blocks[1:]
predictions = torch.Tensor().cuda() if CUDA else torch.Tensor()
outputs = dict() #We cache the outputs for the route layer
loss = dict()
for i, module in enumerate(modules):
if module["type"] == "convolutional" or module["type"] == "upsample":
x = self.module_list[i](x)
outputs[i] = x
elif module["type"] == "shortcut":
from_ = int(module["from"])
x = outputs[i-1] + outputs[i+from_]
outputs[i] = x
elif module["type"] == "route":
layers = module["layers"]
layers = [int(a) for a in layers]
if (layers[0]) > 0:
layers[0] = layers[0] - i
if len(layers) == 1:
x = outputs[i + (layers[0])]
else:
if (layers[1]) > 0:
layers[1] = layers[1] - i
map1 = outputs[i + layers[0]]
map2 = outputs[i + layers[1]]
x = torch.cat((map1, map2), 1)
outputs[i] = x
elif module["type"] == 'yolo':
if self.training == True:
loss_part = self.module_list[i][0](x, y_true)
for key, value in loss_part.items():
value = value
loss[key] = loss[key] + \
value if key in loss.keys() else value
loss['total'] = loss['total'] + \
value if 'total' in loss.keys() else value
else:
x = self.module_list[i][0](x)
predictions = x if len(predictions.size()) == 1 else torch.cat(
(predictions, x), 1)
outputs[i] = outputs[i-1] # skip
# Print the layer information
# print(i, module["type"], x.shape)
# return prediction result only when evaluated
if self.training == True:
return loss
else:
predictions = self.nms(predictions)
return predictions
def create_modules(self, blocks):
net_info = blocks[0] #Captures the information about the input and pre-processing
module_list = nn.ModuleList()
in_channels = 3
out_channels_list = []
for index, block in enumerate(blocks[1:]):
module = nn.Sequential()
# Convolutional Layer
if (block["type"] == "convolutional"):
activation = block["activation"]
try:
batch_normalize = int(block["batch_normalize"])
bias = False
except:
batch_normalize = 0
bias = True
out_channels = int(block["filters"])
kernel_size = int(block["size"])
padding = (kernel_size - 1) // 2 if int(block["pad"]) else 0
stride = int(block["stride"])
conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, bias = bias)
module.add_module("conv_{0}".format(index), conv)
if batch_normalize:
bn = nn.BatchNorm2d(out_channels)
module.add_module("batch_norm_{0}".format(index), bn)
if activation == "leaky":
activn = nn.LeakyReLU(0.1, inplace = True)
module.add_module("leaky_{0}".format(index), activn)
# Up Sample Layer
elif (block["type"] == "upsample"):
stride = int(block["stride"]) # = 2 in Yolov3
upsample = nn.Upsample(scale_factor = stride, mode = "nearest")
module.add_module("upsample_{}".format(index), upsample)
# Shortcut Layer
elif block["type"] == "shortcut":
shortcut = EmptyLayer()
module.add_module("shortcut_{}".format(index), shortcut)
# Route Layer
elif (block["type"] == "route"):
route = EmptyLayer()
module.add_module("route_{0}".format(index), route)
block["layers"] = block["layers"].split(',')
start = int(block["layers"][0])
if len(block['layers']) == 1:
start = int(block['layers'][0])
out_channels = out_channels_list[index + start]
elif len(block['layers']) == 2:
start = int(block['layers'][0])
end = int(block['layers'][1])
out_channels = out_channels_list[index + start] + out_channels_list[end]
# Yolo Layer
elif block["type"] == "yolo":
mask = block["mask"].split(",")
mask = [int(x) for x in mask]
anchors = block["anchors"].split(",")
anchors = [int(a) for a in anchors]
anchors = [(anchors[i], anchors[i+1]) for i in range(0, len(anchors),2)]
anchors = [anchors[i] for i in mask]
num_classes = int(block['classes'])
ignore_thresh = float(block['ignore_thresh'])
prediction = YOLOLayer(anchors, num_classes, self.reso, ignore_thresh)
module.add_module("prediction_{}".format(index), prediction)
module_list.append(module)
in_channels = out_channels
out_channels_list.append(out_channels)
return (net_info, module_list)
def load_weights(self, path, cutoff=None):
"""Load darknet weights from disk.
YOLOv3 is fully convolutional, so only conv layers' weights will be loaded
Darknet's weights data are organized as
1. (optinoal) bn_biases => bn_weights => bn_mean => bn_var
1. (optional) conv_bias
2. conv_weights
Args
- path: (str) path to .weights file
- cutoff: (optinoal, int)
"""
fp = open(path, 'rb')
header = np.fromfile(fp, dtype=np.int32, count=5)
weights = np.fromfile(fp, dtype=np.float32)
fp.close()
header = torch.from_numpy(header)
ptr = 0
for i, module in enumerate(self.module_list):
block = self.blocks[i]
if cutoff is not None and i == cutoff:
print("Stop before", block['type'], "block (No.%d)" % (i+1))
break
if block['type'] == "convolutional":
batch_normalize = int(
block['batch_normalize']) if 'batch_normalize' in block else 0
conv = module[0]
if batch_normalize > 0:
bn = module[1]
num_bn_biases = bn.bias.numel()
bn_biases = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_biases = bn_biases.view_as(bn.bias.data)
bn.bias.data.copy_(bn_biases)
ptr += num_bn_biases
bn_weights = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_weights = bn_weights.view_as(bn.weight.data)
bn.weight.data.copy_(bn_weights)
ptr += num_bn_biases
bn_running_mean = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn.running_mean.copy_(bn_running_mean)
ptr += num_bn_biases
bn_running_var = torch.from_numpy(
weights[ptr:ptr+num_bn_biases])
bn_running_var = bn_running_var.view_as(bn.running_var)
bn.running_var.copy_(bn_running_var)
ptr += num_bn_biases
else:
num_biases = conv.bias.numel()
conv_biases = torch.from_numpy(weights[ptr:ptr+num_biases])
conv_biases = conv_biases.view_as(conv.bias.data)
conv.bias.data.copy_(conv_biases)
ptr = ptr + num_biases
num_weights = conv.weight.numel()
conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
conv_weights = conv_weights.view_as(conv.weight.data)
conv.weight.data.copy_(conv_weights)
ptr = ptr + num_weights
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,441
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/__main__.py
|
import argparse
import sys
import yolo
import dataprep
def parse_arg():
parser = argparse.ArgumentParser(description='mmWave YOLOv3', add_help=True,
usage='''python . <action> [<args>]
Actions:
train Network training module
predict Object detection module
dataprep Data preprocessing module
'''
)
parser.add_argument('Action', type=str, help='Action to run')
return parser.parse_args(sys.argv[1:2])
args = parse_arg()
if args.Action == 'train' or args.Action == 'predict':
yolo.main(args)
elif args.Action == 'dataprep':
dataprep.main()
else:
print('Unknown action. Check "python . --help"')
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,442
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/dataset.py
|
import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
# from torchvision import transforms
import os
# import random
import numpy as np
from PIL import Image
# anchors_wh = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
# [59, 119], [116, 90], [156, 198], [373, 326]],
# np.float32) / 416
class MmwaveDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, data_size = 0, transforms = None):
files = sorted(os.listdir(data_dir))
self.files = [f"{data_dir}/{x}" for x in files]
if data_size < 0 or data_size > len(files):
assert("Data size should be between 0 to number of files in the dataset")
if data_size == 0:
data_size = len(files)
self.data_size = data_size
self.transforms = transforms
def __len__(self):
return self.data_size
def __getitem__(self, idx):
image_path = self.files[idx]
image = Image.open(image_path)
img_w, img_h = image.size
image = self.preProcessImage(image)
labels = [] # to make it array of bbs (for multiple bbs in the future)
labels_str = image_path.split("_")[-1]
if "[[" in labels_str:
labels_str = labels_str.split('[[')[1].split(']]')[0].split('],[')
labels = np.zeros((4, 5))
for i, l in enumerate(labels_str):
label = np.zeros(5)
label[:4] = np.array([int(a) for a in l.split(',')]) # [xc, yc, w, h]
# Normalizing labels
label[0] /= img_w #Xcenter
label[1] /= img_h #Ycenter
label[2] /= img_w #Width
label[3] /= img_h #Height
labels[i, :] = label
else:
labels_str = labels_str.split('[')[1].split(']')[0].split(',') # get the bb info from the filename
labels = np.zeros((1, 5))
labels[0, :4] = np.array([int(a) for a in labels_str]) # [xc, yc, w, h]
if np.any(labels[0, :4] == 0):
return image, None
# Normalizing labels
labels[0, 0] /= img_w #Xcenter
labels[0, 1] /= img_h #Ycenter
labels[0, 2] /= img_w #Width
labels[0, 3] /= img_h #Height
# labels[0, 4] = 0 # class label (0 = person)
# print(torch.any(torch.isfinite(image) == False), labels)
return image_path, image, labels
#Image custom preprocessing if required
def preProcessImage(self, image):
image = image.convert('RGB')
if self.transforms:
return self.transforms(image)
else:
image = np.array(image)
image = image.transpose(2,1,0)
return image.astype(np.float32)
def collate(batch):
batch = list(filter(lambda x:x[1] is not None, batch))
return default_collate(batch) # Use the default method to splice the filtered batch data
def getDataLoaders(data_dir, transforms, train_split=0, batch_size=8, \
num_workers=2, collate_fn=collate, random_seed=0):
if train_split < 0 or train_split > 1:
raise Exception(f"data_loader | Split ({train_split}) coefficient should be 0 < x < 1")
dataset = MmwaveDataset(data_dir=data_dir, transforms=transforms)
shuffle = True if random_seed != 0 else False
# Single Set
if train_split == 0 or train_split == 1:
return None, torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=shuffle, num_workers=num_workers, collate_fn = collate_fn)
# Generate a fixed seed
generator = torch.Generator()
if random_seed != 0:
generator.manual_seed(random_seed)
train_size = int(train_split * len(dataset))
test_size = len(dataset) - train_size
trainset, testset = torch.utils.data.random_split(dataset, [train_size, test_size], generator=generator)
# Train and Validation sets
return torch.utils.data.DataLoader(trainset, batch_size=batch_size, \
shuffle=shuffle, num_workers=2, collate_fn = collate_fn), \
torch.utils.data.DataLoader(testset, batch_size=batch_size, \
shuffle=shuffle, num_workers=2, collate_fn = collate_fn)
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,443
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/__init__.py
|
import gc
from .train import train
from .predict import predict
def main(args):
gc.collect()
if args.Action == 'train':
train()
elif args.Action == 'predict':
predict()
gc.collect()
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,444
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/yolo/train.py
|
import torch
import torch.nn as nn
# import torch.nn.functional as F
import torch.optim as optim
# import torchvision
import torchvision.transforms as transforms
# import os, pickle, random
import time, sys
import numpy as np
# from PIL import Image
import argparse
from .darknet import DarkNet
from .dataset import *
from .util import *
def parse_arg():
parser = argparse.ArgumentParser(description='mmWaveYoLov3 Training module', add_help=True)
parser.add_argument('--cfg', type=str, default='yolov3micro',
help="Name of the network config")
parser.add_argument('--pathin', type=str, default='trainset',
help="Input dataset name")
parser.add_argument('--datasplit', type=float, default=0.8,
help="Dataset split percentage (def: 0.8 (80 (train):20 (validation))")
parser.add_argument('--seed', type=float, default=42,
help="Seed for the random shuffle (default: 42, 0 for no shuffling)")
parser.add_argument('--bs', type=int, default=8,
help="Batch size (default: 8, 0 for single batch)")
parser.add_argument('--ckpt', type=str, default='0.0',
help="Checkpoint name as <'epoch'.'iteration'>")
parser.add_argument('--ep', type=int, default=5,
help="Total epoch number (default: 5)")
parser.add_argument('--lr', type=float, default=1e-5,
help="Learning rate (default: 1e-5)")
parser.add_argument('--reso', type=int, default=416,
help="Input image resolution (default: 416)")
parser.add_argument('--v', type=int, default=0,
help="Verbose (0 minimal (default), 1 normal, 2 all")
return parser.parse_args(sys.argv[2:])
def train():
torch.cuda.empty_cache()
# CONSTANTS
args = parse_arg()
pathcfg = f"cfg/{args.cfg}.cfg"
pathin = f"dataset/{args.pathin}/final"
num_workers = 2
# NETWORK
darknet = DarkNet(pathcfg, args.reso)
pytorch_total_params = sum(p.numel() for p in darknet.parameters() if p.requires_grad)
print('# of params: ', pytorch_total_params)
if args.v > 0:
print(darknet.module_list)
# LOAD A CHECKPOINT!!!
start_epoch, start_iteration = [0, 0]
tlosses, vlosses = [], []
optimizer, scheduler = None, None
start_epoch, start_iteration = [int(x) for x in args.ckpt.split('.')]
if start_epoch != 0 and start_epoch != 0:
start_epoch, start_iteration, state_dict, \
tlosses, vlosses, \
optimizer, scheduler = load_checkpoint(
f'save/checkpoints/',
int(start_epoch),
int(start_iteration)
)
darknet.load_state_dict(state_dict)
# ====================================================
# OPTIMIZER & HYPERPARAMETERS
if optimizer == None:
# optimizer = optim.SGD(filter(lambda p: p.requires_grad, darknet.parameters()), \
# lr=args.lr, momentum=0.9, weight_decay=5e-4, nesterov=True)
optimizer = optim.Adam(filter(lambda p: p.requires_grad, darknet.parameters()), \
lr=args.lr, betas=[0.9,0.999], eps=1e-8, weight_decay=0, amsgrad=False)
if scheduler == None:
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
# IMAGE PREPROCESSING!!!
transform = transforms.Compose([
# transforms.RandomResizedCrop(size=args.reso, interpolation=3),
transforms.Resize(size=(args.reso, args.reso), interpolation=3),
transforms.ColorJitter(brightness=1.5, saturation=1.5, hue=0.2),
transforms.RandomVerticalFlip(),
transforms.ToTensor()
])
# ====================================================
# Train and Validation data allocation
trainloader, validloader = getDataLoaders(pathin, transform, \
train_split=args.datasplit, batch_size=args.bs, \
num_workers=num_workers, collate_fn=collate, random_seed=args.seed)
# ====================================================
# Use GPU if available
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
if torch.cuda.device_count() > 1: # Use Multi GPU if available
darknet = nn.DataParallel(darknet)
darknet.to(device) # Put the network on device
if args.v > 0:
print(next(darknet.parameters()).device)
# TRAIN
print(f'[LOG] TRAIN | Training set: {len(trainloader.dataset)}')
print(f'[LOG] TRAIN | Validation set: {len(validloader.dataset)}')
print(f'[LOG] TRAIN | Starting to train from epoch {start_epoch} iteration {start_iteration}')
if start_epoch > args.ep:
print(f'[ERR] TRAIN | Total epochs ({args.ep}) is less then current epoch ({start_epoch})')
return
for epoch in range(start_epoch, args.ep):
print(f'[LOG] TRAIN | Starting Epoch #{epoch+1}')
darknet.train() # set network to training mode
tloss, vloss = [], []
start = time.time()
for batch_idx, (_, inputs, targets) in enumerate(trainloader):
optimizer.zero_grad() # clear the grads from prev passes
inputs, targets = inputs.to(device), targets.to(device) # Images, Labels
outputs = darknet(inputs, targets, device) # Loss
outputs['total'].backward() # Gradient calculations
tloss.append(outputs['total'].item())
optimizer.step()
end = time.time()
# Latest iteration!
if args.v == 1:
print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ')
elif args.v == 2:
print(f'x: {outputs["x"].item():.2f} y: {outputs["y"].item():.2f} ' \
f'w: {outputs["w"].item():.2f} h: {outputs["h"].item():.2f} ' \
f'cls: {outputs["cls"].item():.2f} ' \
f'conf: {outputs["conf"].item()}')
if (batch_idx % 100) == 99:
print(f'[LOG] TRAIN | Batch #{batch_idx+1}\
Loss: {np.mean(tloss)}\
Time: {end - start}s')
start = time.time()
# Save train loss for the epoch
tlosses.append(np.mean(tloss))
scheduler.step()
# VALIDATION
with torch.no_grad():
for batch_idx, (_, inputs, targets) in enumerate(validloader):
inputs, targets = inputs.to(device), targets.to(device)
voutputs = darknet(inputs, targets)
vloss.append(voutputs['total'].item())
# Validation loss!
print(f'[LOG] VALID | Epoch #{epoch+1} \
Loss: {np.mean(vloss)}')
# Save valid loss for the epoch
vlosses.append(np.mean(vloss))
# ====================================================
if (epoch % 10) == 9:
save_checkpoint(f'save/checkpoints/', epoch+1, 0, {
'epoch': epoch+1,
'iteration': 0,
'state_dict': darknet.state_dict(),
'tlosses': tlosses,
'vlosses': vlosses,
'optimizer': optimizer,
'scheduler': scheduler
})
plot_losses(tlosses, vlosses, f'save/losses')
save_checkpoint(f'save/checkpoints/', epoch+1, 0, {
'epoch': epoch+1,
'iteration': 0,
'state_dict': darknet.state_dict(),
'tlosses': tlosses,
'vlosses': vlosses,
'optimizer': optimizer,
'scheduler': scheduler
})
plot_losses(tlosses, vlosses, f'save/losses')
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,445
|
enverbashirov/YOLOv3-mMwave-Radar
|
refs/heads/master
|
/dataprep/kalman_tracker.py
|
import matplotlib.animation as animation
import numpy as np
import scipy as sp
from matplotlib import pyplot as plt
class KalmanTracker:
def __init__(self, id_, s0=None, disable_rejection_check=False):
# Filter-related parameters
self.dt = 66.667e-3 # T_int of the radar TX
# state transition matrix
self.F = np.kron(np.eye(2), np.array([[1, self.dt], [0, 1]]))
# # state-acceleration matrix
self.G = np.array([0.5*(self.dt**2), self.dt]).reshape(2, 1)
# # observation matrix
self.H = np.array([[1, 0, 0, 0],
[0, 0, 1, 0]])
# measurement covariance matrix
self.R = np.array([[0.5, 0], [0, 0.5]]) # [wagner2017radar]
# initial state covariance
self.P = 0.2*np.eye(4)
# state noise variance
self.sigma_a = 8 # [wagner2017radar]
# state noise covariance
self.Q = np.kron(np.eye(2), np.matmul(self.G, self.G.T)*self.sigma_a**2)
self.n = self.F.shape[1]
self.m = self.H.shape[1]
# initial state
self.s = np.zeros((self.n, 1)) if s0 is None else s0
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
self.REJECT_THR = 4.605
self.disable_rejection_check = disable_rejection_check
#########################################################
# Tracker-related parameters
self.misses_number = 0
self.hits = 0
self.id = id_
self.box = np.array([])
self.state_memory = []
self.identity_label = 'UNK' # initialize as unknown cluster
self.id_dict = {-1: 'UNK', 0: 'S1', 1: 'S2', 2:'S3', 3:'S4'}
# self.id_dict = {-1: 'UNK', 0: 'JP', 1: 'FM', 2:'GP', 3:'RF'}
def transform_obs(self, z):
z_prime = np.array([z[0]*np.cos(z[1]), z[0]*np.sin(z[1])]).reshape(-1, 1)
return z_prime
def reject_obs(self, i, S):
chi_squared = np.matmul(np.matmul(i.T, np.linalg.inv(S)), i)[0, 0]
return chi_squared >= self.REJECT_THR
def predict(self):
# a_x = np.random.normal(0, self.sigma_a)
# a_y = np.random.normal(0, self.sigma_a)
self.s = np.matmul(self.F, self.s)
# check that x has the correct shape
assert self.s.shape == (self.n, 1)
self.P = np.matmul(np.matmul(self.F, self.P), self.F.T) + self.Q
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
return self.s, self.xy
def update(self, z):
z = self.transform_obs(z)
# innovation
y = z - np.matmul(self.H, self.s)
S = np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R
if (not self.reject_obs(y, S)) or self.disable_rejection_check:
K = np.matmul(np.matmul(self.P, self.H.T), np.linalg.inv(S))
self.s = self.s + np.matmul(K, y)
assert self.s.shape == (self.n, 1)
self.P = np.matmul(np.eye(self.n) - np.matmul(K, self.H), self.P)
self.xy = np.array([self.s[0], self.s[2]]).reshape(-1, 1)
self.rtheta = np.array([np.sqrt(self.xy[0]**2 + self.xy[1]**2), np.arctan2(self.xy[1], self.xy[0])]).reshape(-1, 1)
self.state_memory.append(self.xy)
return self.s, self.xy
else:
self.state_memory.append(self.xy)
return self.s, self.xy
def get_S(self):
return np.matmul(np.matmul(self.H, self.P), self.H.T) + self.R
@staticmethod
def get_mahalanobis_distance(x, C):
# returns Mahalanobis distance given the differece vector x and covariance C
return np.matmul(np.matmul(x.T, np.linalg.inv(C)), x)[0, 0]
@staticmethod
def hungarian_assignment(score_matrix):
# call the scipy implementation of Hungarian alg.
det_idx, tr_idx = sp.optimize.linear_sum_assignment(score_matrix)
unmatched, undetected = [], []
for t in range(score_matrix.shape[1]):
if t not in tr_idx:
undetected.append(t)
for d in range(score_matrix.shape[0]):
if d not in det_idx:
unmatched.append(d)
matches = []
for d, t in zip(det_idx, tr_idx):
matches.append(np.array([d, t]).reshape(1, 2))
if len(matches) == 0:
matches = np.empty((0, 2), dtype=int)
else:
matches = np.concatenate(matches, axis=0)
return matches, np.array(undetected), np.array(unmatched)
|
{"/dataprep/processing.py": ["/dataprep/util.py"], "/dataprep/truth.py": ["/dataprep/util.py", "/dataprep/kalman_tracker.py"], "/dataprep/__init__.py": ["/dataprep/channel_extraction.py", "/dataprep/processing.py", "/dataprep/truth.py"], "/yolo/predict.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"], "/yolo/darknet.py": ["/yolo/util.py"], "/__main__.py": ["/yolo/__init__.py", "/dataprep/__init__.py"], "/yolo/__init__.py": ["/yolo/train.py", "/yolo/predict.py"], "/yolo/train.py": ["/yolo/darknet.py", "/yolo/dataset.py", "/yolo/util.py"]}
|
1,446
|
scissorhands/pynal
|
refs/heads/master
|
/index.py
|
from __future__ import print_function
import json
from etl import Etl
def lambda_connect(event, context):
etl = Etl()
etl.retrieve_all_stats()
return 'pickle rick'
if __name__ == '__main__':
lambda_connect(None, None)
|
{"/index.py": ["/etl.py"], "/etl.py": ["/requester.py", "/dbconnector.py"], "/index_microservice.py": ["/requester.py"]}
|
1,447
|
scissorhands/pynal
|
refs/heads/master
|
/requester.py
|
import analytics as service
class Requester:
def __init__(self):
self.analytics = service.initialize_analyticsreporting()
self.general_stats_metrics = [
{'expression': 'ga:sessions'},
{'expression': 'ga:pageViews'},
{'expression': 'ga:avgTimeOnPage'},
{'expression': 'ga:exits'},
{'expression': 'ga:organicSearches'}
]
def get_hostname_stats(self, from_date = '7daysAgo', to_date = 'yesterday' ):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
# {'name' : 'ga:pagePath'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_city_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:city'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_region_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:region'},
{'name' : 'ga:date'}
],
from_date, to_date
)
def get_devices_stats(self, from_date = '7daysAgo', to_date = 'yesterday'):
return service.generic_request(self.analytics,
self.general_stats_metrics,
[
{'name' : 'ga:hostname'},
{'name' : 'ga:deviceCategory'},
{'name' : 'ga:date'}
],
from_date, to_date
)
|
{"/index.py": ["/etl.py"], "/etl.py": ["/requester.py", "/dbconnector.py"], "/index_microservice.py": ["/requester.py"]}
|
1,448
|
scissorhands/pynal
|
refs/heads/master
|
/dbconnector.py
|
import dbconfig
import mysql.connector as _connector
from mysql.connector import errorcode as dberror
class Connector:
def __init__(self):
self.cnx = self.cur = None
try:
self.cnx = _connector.connect(**dbconfig.config)
except _connector.Error as e:
if(e.errno == dberror.ER_ACCESS_DENIED_ERROR):
print('Invalid credentials')
elif(e.errno == dberror.ER_BAD_DB_ERROR):
print('Invalid database')
else:
print(e)
else:
self.cur = self.cnx.cursor()
def test_select(self):
self.cur.execute("SELECT * FROM users AS U LIMIT 10")
print()
print('{0:3} {1:25} {2}'.format('ID:', 'EMAIL:', 'LANG:'))
for row in self.cur.fetchall():
print('{0:3} {1:25} {2}'.format(row[0], row[2], row[4]))
print()
def insert_ignore(self, table, data_dictionary):
insert_id = None
keys = "("+", ".join( "`"+key+"`" for key in data_dictionary.keys() )+")"
values = "("+", ".join( "%("+str(value)+")s" for value in data_dictionary.keys() )+")"
query = ("INSERT IGNORE INTO {0}\n"
"{1}\n"
"VALUES {2}".format(table, keys, values) )
try:
self.cur.execute(query, data_dictionary)
self.cnx.commit()
insert_id = self.cur.lastrowid
except Exception as e:
print(e)
return insert_id
def serv_destory(self):
if self.cur:
self.cur.close()
if self.cnx:
self.cnx.close()
print("Connection destroyed")
def main(self):
id = self.insert_ignore('analytics_hostname_stats', {
'hostname': 'hostname',
'sessions': 1,
'page_views': 1,
'avg_time_on_page': 2.1,
'exits': 3,
'organic_searches': 5,
'date': '2017-07-31',
})
if self.cur:
self.cur.close()
if self.cnx:
self.cnx.close()
if __name__ == '__main__':
connector = Connector()
connector.main()
|
{"/index.py": ["/etl.py"], "/etl.py": ["/requester.py", "/dbconnector.py"], "/index_microservice.py": ["/requester.py"]}
|
1,449
|
scissorhands/pynal
|
refs/heads/master
|
/etl.py
|
from requester import Requester
from dbconnector import Connector
import json
import datetime as dt
class Etl:
def __init__(self):
self.req = Requester()
self.connector = Connector()
def get_report_dictionary(self, report):
columnHeader = report.get('columnHeader', {})
return {
'columnHeader': columnHeader,
'dimensionHeaders': columnHeader.get('dimensions', []),
'metricHeaders': columnHeader.get('metricHeader', {}).get('metricHeaderEntries', []),
'rows': report.get('data', {}).get('rows', [])
}
def formatted_output(self, input):
stats = []
for report in input.get('reports', []):
rdictionary = self.get_report_dictionary(report)
for row in rdictionary['rows']:
stat = {}
dimensions = row.get('dimensions', [])
dateRangeValues = row.get('metrics', [])
for header, dimension in zip(rdictionary['dimensionHeaders'], dimensions):
hd = header.replace('ga:', '')
if(hd == 'date'):
dimension = dt.datetime.strptime(dimension, '%Y%m%d').strftime('%Y-%m-%d')
stat[hd] = dimension
for i, values in enumerate(dateRangeValues):
for metricHeader, value in zip(rdictionary['metricHeaders'], values.get('values') ):
stat[metricHeader.get('name').replace('ga:', '')] = value
stats.append(stat)
return stats
def retrieve_all_stats(self, destroy_after=True):
self.retrieve_hostname_stats(False)
self.retrieve_city_stats(False)
self.retrieve_region_stats(False)
self.retrieve_devices_stats(False)
if (destroy_after):
self.connector.serv_destory()
def retrieve_hostname_stats(self, destroy_after=True):
print('getting hostname stats')
report = self.req.get_hostname_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_hostname_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_city_stats(self, destroy_after=True):
print('getting city stats')
report = self.req.get_city_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_city_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_region_stats(self, destroy_after=True):
print('getting region stats')
report = self.req.get_region_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_region_stats",row)
if (destroy_after):
self.connector.serv_destory()
def retrieve_devices_stats(self, destroy_after=True):
print('getting devices stats')
report = self.req.get_devices_stats( '2017-01-01' )
stats = self.formatted_output(report)
for row in stats:
self.connector.insert_ignore("analytics_device_stats",row)
if (destroy_after):
self.connector.serv_destory()
def main():
etl = Etl()
etl.retrieve_all_stats()
if __name__ == '__main__':
main()
|
{"/index.py": ["/etl.py"], "/etl.py": ["/requester.py", "/dbconnector.py"], "/index_microservice.py": ["/requester.py"]}
|
1,450
|
scissorhands/pynal
|
refs/heads/master
|
/index_microservice.py
|
from requester import Requester
import json
req = Requester()
localTest = False
print('Loading function')
def respond(err, res=None):
return {
'statusCode': '400' if err else '200',
'body': err.message if err else json.dumps(res),
'headers': {
'Content-Type': 'application/json',
},
}
def lambda_handler(event, context):
operations = {
'GET'
}
operation = event['httpMethod']
if operation in operations:
method = event['queryStringParameters']['method']
api_methods = {
'get_hostname_stats',
'get_city_stats',
'get_region_stats',
'get_devices_stats'
}
if method in api_methods:
stats = getattr(req, method)()
if(localTest):
print(stats)
return respond(None, stats)
else:
return respond(ValueError("Unsupported method '{}'".format(method)))
else:
return respond(ValueError('Unsupported http method "{}"'.format(operation)))
if __name__ == '__main__':
localTest = True
event = {
'httpMethod': 'GET',
'queryStringParameters': {
'method': 'get_hostname_stats'
}
}
lambda_handler(event, None)
|
{"/index.py": ["/etl.py"], "/etl.py": ["/requester.py", "/dbconnector.py"], "/index_microservice.py": ["/requester.py"]}
|
1,472
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/network/protocol/ServerToClientHandshakePacket.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ServerToClientHandshakePacket(DataPacket):
NID = ProtocolInfo.SERVER_TO_CLIENT_HANDSHAKE_PACKET
jwt = None
def canBeSentBeforeLogin():
return True
def decodePayload(self):
self.jwt = self.getString()
def encodePayload(self):
self.putString(self.jwt)
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,473
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/math/Facing.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
class Facing:
AXIS_Y = 0
AXIS_Z = 1
AXIS_X = 2
FLAG_AXIS_POSITIVE = 1
DOWN = AXIS_Y << 1
UP = (AXIS_Y << 1) | FLAG_AXIS_POSITIVE
NORTH = AXIS_Z << 1
SOUTH = (AXIS_Z << 1) | FLAG_AXIS_POSITIVE
WEST = AXIS_X << 1
EAST = (AXIS_X << 1) | FLAG_AXIS_POSITIVE
ALL = [
DOWN,
UP,
NORTH,
SOUTH,
WEST,
EAST
]
HORIZONTAL = [
NORTH,
SOUTH,
WEST,
EAST
]
CLOCKWISE = {
AXIS_Y: {
NORTH: EAST,
EAST: SOUTH,
SOUTH: WEST,
WEST: NORTH
},
AXIS_Z: {
UP: EAST,
EAST: DOWN,
DOWN: WEST,
WEST: UP
},
AXIS_X: {
UP: NORTH,
NORTH: DOWN,
DOWN: SOUTH,
SOUTH: UP
}
}
@staticmethod
def axis(direction):
return direction >> 1
@staticmethod
def is_positive(direction):
return (direction & Facing.FLAG_AXIS_POSITIVE) == Facing.FLAG_AXIS_POSITIVE
@staticmethod
def opposite(direction):
return direction ^ Facing.FLAG_AXIS_POSITIVE
@staticmethod
def rotate(direction, axis, clockwise):
if not Facing.CLOCKWISE[axis]:
raise ValueError("Invalid axis {}".format(axis))
if not Facing.CLOCKWISE[axis][direction]:
raise ValueError("Cannot rotate direction {} around axis {}".format(direction, axis))
rotated = Facing.CLOCKWISE[axis][direction]
return rotated if clockwise else Facing.opposite(rotated)
@staticmethod
def validate(facing):
if facing in Facing.ALL:
raise ValueError("Invalid direction {}".format(facing))
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,474
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/network/protocol/ClientToServerHandshakePacket.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ClientToServerHandshakePacket(DataPacket):
NID = ProtocolInfo.CLIENT_TO_SERVER_HANDSHAKE_PACKET
def canBeSentBeforeLogin():
return True
def encodePayload(): pass
def decodePayload(): pass
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,475
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/Podrum.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
#!/usr/bin/env python3
import sys
import inspect
from os import getcwd, path
from threading import Thread
sys.path.insert(0, path.dirname(path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))))
from podrum.Server import Server
if __name__ == "__main__":
if len(sys.argv) >= 3:
if sys.argv[1] == "--no_wizard" and sys.argv[2] == "-travis":
serverThread = Thread(target=Server, args=(getcwd(), False, True))
else:
print("[!] None valid args selected.")
serverThread = Thread(target=Server, args=(getcwd(), True))
elif len(sys.argv) == 2:
if sys.argv[1] == "--no_wizard":
serverThread = Thread(target=Server, args=(getcwd(), False))
else:
print("[!] None valid args selected.")
serverThread = Thread(target=Server, args=(getcwd(), True))
else:
serverThread = Thread(target=Server, args=(getcwd(), True))
serverThread.start()
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,476
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/network/PacketPool.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.ClientToServerHandshakePacket import ClientToServerHandshakePacket
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.DisconnectPacket import DisconnectPacket
from podrum.network.protocol.LoginPacket import LoginPacket
from podrum.network.protocol.PlayStatusPacket import PlayStatusPacket
from podrum.network.protocol.ResourcePacksInfoPacket import ResourcePacksInfoPacket
from podrum.network.protocol.ServerToClientHandshakePacket import ServerToClientHandshakePacket
class PacketPool:
packetPool = {}
def __init__(self):
self.registerPackets()
def registerPacket(packet):
self.pool[packet.NID] = packet.copy()
def registerPackets(self):
self.registerPacket(ClientToServerHandshakePacket)
self.registerPacket(DisconnectPacket)
self.registerPacket(LoginPacket)
self.registerPacket(PlayStatusPacket)
self.registerPacket(ResourcePacksInfoPacket)
self.registerPacket(ServerToClientHandshakePacket)
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,477
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/Utils.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import base64
import binascii
import json
import os
import signal
import sys
import socket
import time
import urllib
import hmac
import hashlib
class Utils:
def getOS():
if sys.platform == 'linux' or sys.platform == 'linux2':
return 'linux'
elif sys.platform == 'darwin':
return 'osx'
elif sys.platform == 'win32' or sys.platform == 'win64':
return 'windows'
def killServer():
os.kill(os.getpid(), signal.SIGTERM)
def getPrivateIpAddress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
return ip
def getPublicIpAddress():
ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
return ip
def microtime(get_as_float = False) :
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def hex2bin(hexdec):
if hexdec == 'x':
return False
if hexdec == '':
return False
dec = int(hexdec, 16)
b = binascii.unhexlify('%x' % dec)
return b
def binToHex(b):
return binascii.hexlify(b)
def HMACSHA256(data, secret):
encodedData = data.encode()
byteSecret = secret.encode()
return hmac.new(byteSecret, encodedData, hashlib.sha256).hexdigest().upper()
def base64UrlEncode(data):
return base64.urlsafe_b64encode(data.encode()).replace(b"=", b"").decode()
def base64UrlDecode(data):
return base64.urlsafe_b64decode(data).decode()
def encodeJWT(header, payload, secret):
body = Utils.base64UrlEncode(json.dumps(header)) + "." + Utils.base64UrlEncode(json.dumps(payload))
secret = Utils.HMACSHA256(body, secret)
return body + "." + Utils.base64UrlEncode(secret)
def decodeJWT(token: str):
[headB64, payloadB64, sigB64] = token.split(".")
rawPayloadJSON = Utils.base64UrlDecode(payloadB64)
if rawPayloadJSON == False:
raise Exception("Payload base64 is invalid and cannot be decoded")
decodedPayload = json.loads(rawPayloadJSON)
if isinstance(decodedPayload, str):
decodedPayload = json.loads(decodedPayload)
if not isinstance(decodedPayload, dict):
raise Exception("Decoded payload should be dict, " + str(type(decodedPayload).__name__) + " received")
return decodedPayload
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,478
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/BinaryStream.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.utlis.Binary import Binary
from podrum.utlis.UUID import UUID
class BinaryStream:
buffer = ""
offset = None
def __int__(self, buffer = "", offset = 0):
self.buffer = buffer
self.offset = offset
def reset(self):
self.buffer = ""
self.offset = 0
def setBuffer(self, buffer = "", offset = 0):
self.buffer = buffer
self.offset = int(offset)
def getOffset(self):
return self.offset
def getBuffer(self):
return self.buffer
def get(self, len):
if len < 0:
self.offset = len(self.buffer) - 1;
return ""
elif len == True:
str = self.buffer[0:self.offset]
self.offset = len(self.buffer)
return str
buffer = self.buffer[self.offset:self.offset+len]
self.offset += length
return buffer
def put(self, str):
self.buffer += str
def getBool(self):
return self.get(1) != b'\x00'
def putBool(self, v):
self.buffer += (b"\x01" if v else b"\x00")
def getByte(self):
self.offset += 1
return ord(self.buffer[self.offset])
def putByte(self, v):
self.buffer += chr(v)
def getLong(self):
return Binary.readLong(self.get(8))
def putLong(self, v):
self.buffer += Binary.writeLong(v)
def getLLong(self):
return Binary.readLLong(self.get(8))
def putLLong(self, v):
self.buffer += Binary.writeLLong(v)
def getInt(self):
return Binary.readInt(self.get(4))
def putInt(self, v):
self.buffer += Binary.writeInt(v)
def getLInt(self):
return Binary.readLInt(self.get(4))
def putLInt(self, v):
self.buffer += Binary.writeLInt(v)
def getShort(self):
return Binary.readShort(self.get(2))
def putShort(self, v):
self.buffer += Binary.writeShort(v)
def getLShort(self):
return Binary.readLShort(self.get(2))
def putLShort(self, v):
self.buffer += Binary.writeLShort(v)
def getSignedShort(self):
return Binary.readSignedShort(self.get(2))
def getSignedLShort(self):
return Binary.readSignedLShort(self.get(4))
def getFloat(self):
return Binary.readFloat(self.get(4))
def putFloat(self, v):
self.buffer += Binary.writeFloat(v)
def getLFloat(self):
return Binary.readLFloat(self.get(4))
def putLFloat(self, v):
self.buffer += Binary.writeLFloat(v)
def getRoundedFloat(self, accuracy):
return Binary.readRoundedFloat(self.get(4), accuracy)
def getRoundedLFloat(self, accuracy):
return Binary.readRoundedLFloat(self.get(4), accuracy)
def getTriad(self):
return Binary.readTriad(self.get(3))
def putTriad(self, v):
self.buffer += Binary.writeTriad(v)
def getLTriad(self):
return Binary.readLTriad(self.get(3))
def putLTriad(self, v):
self.buffer += Binary.writeLTriad(v)
def getUnsignedVarInt(self):
return Binary.readUnsignedVarInt(self.buffer, self.offset)
def putUnsignedVarInt(self, v):
self.put(Binary.writeUnsignedVarInt(v))
def getVarInt(self):
return Binary.readVarInt(self.buffer, self.offset)
def putVarInt(self, v):
self.put(Binary.writeVarInt(v))
def getUnsignedVarLong(self):
return Binary.readUnsignedVarLong(self.buffer, self.offset)
def putUnsignedVarLong(self, v):
self.put(Binary.writeUnsignedVarLong(v))
def getVarLong(self):
return Binary.readVarLong(self.buffer, self.offset)
def putVarLong(self, v):
self.put(Binary.writeVarLong(v))
def getString(self):
self.get(self.getUnsignedVarInt())
def putString(self, v):
self.putUnsignedVarInt(len(v))
self.put(v)
def getUUID(self):
part1 = self.getLInt()
part0 = self.getLInt()
part3 = self.getLInt()
part2 = self.getLInt()
return UUID(part0, part1, part2, part3)
def putUUID(self, uuid: UUID):
self.putLInt(uuid.getPart(1))
self.putLInt(uuid.getPart(0))
self.putLInt(uuid.getPart(3))
self.putLInt(uuid.getPart(2))
def feof(self):
try:
self.buffer[self.offset]
return True
except IndexError:
return False
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,479
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/Binary.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from struct import unpack, pack, calcsize
from re import match
import decimal
import sys
from .bcmath import bcmath
class Binary:
def checkLength(string, expect):
length = len(string)
assert (length == expect), 'Expected ' + str(expect) + 'bytes, got ' + str(length)
@staticmethod
def signByte(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 56
else:
return (int(value) & 0xffffffff) >> 24
@staticmethod
def unsignByte(value: int):
return int(value) & 0xff
@staticmethod
def signShort(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 48
else:
return (int(value) & 0xffffffff) >> 16
@staticmethod
def unsignShort(value: int):
return int(value) & 0xffff
@staticmethod
def signInt(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 32
else:
return (int(value) & 0xffffffff) >> 31
@staticmethod
def unsignInt(value: int):
return int(value) & 0xffffffff
@staticmethod
def readTriad(str: bytes) -> int:
Binary.checkLength(str, 3)
return unpack('>L', b'\x00' + str)[0]
@staticmethod
def writeTriad(value: int) -> bytes:
return pack('>L', value)[1:]
@staticmethod
def readLTriad(str: bytes) -> int:
Binary.checkLength(str, 3)
return unpack('<L', b'\x00' + str)[0]
@staticmethod
def writeLTriad(value: int) -> bytes:
return pack('<L', value)[0:-1]
@staticmethod
def readBool(b: bytes) -> int:
return unpack('?', b)[0]
@staticmethod
def writeBool(b: int) -> bytes:
return b'\x01' if b else b'\x00'
@staticmethod
def readByte(c: bytes) -> int:
Binary.checkLength(c, 1)
return unpack('>B', c)[0]
@staticmethod
def readSignedByte(c: bytes) -> int:
Binary.checkLength(c, 1)
return unpack('>b', c)[0]
@staticmethod
def writeByte(c: int) -> bytes:
return pack(">B", c)
@staticmethod
def readShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return unpack('>H', str)[0]
@staticmethod
def readSignedShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return Binary.signShort(Binary.readShort(str))
@staticmethod
def writeShort(value: int) -> bytes:
return pack('>H', value)
@staticmethod
def readLShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return unpack('<H', str)[0]
@staticmethod
def readSignedLShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return Binary.signShort(Binary.readLShort(str))
@staticmethod
def writeLShort(value: int) -> bytes:
return pack('<H', value)
@staticmethod
def readInt(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('>L', str)[0]
@staticmethod
def writeInt(value: int) -> bytes:
return pack('>L', value)
@staticmethod
def readLInt(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('<L', str)[0]
@staticmethod
def writeLInt(value: int) -> bytes:
return pack('<L', value)
@staticmethod
def readFloat(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('>f', str)[0]
@staticmethod
def readRoundedFloat(str, accuracy):
return round(Binary.readFloat(str), accuracy)
@staticmethod
def writeFloat(value: int) -> bytes:
return pack('>f', value)
@staticmethod
def readLFloat(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('<f', str)[0]
@staticmethod
def readRoundedLFloat(str, accuracy):
return round(Binary.readLFloat(str), accuracy)
@staticmethod
def writeLFloat(value: int) -> bytes:
return pack('<f', value)
@staticmethod
def printFloat(value):
return match(r"/(\\.\\d+?)0+$/", "" + value).group(1)
@staticmethod
def readDouble(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('>d', str)[0]
@staticmethod
def writeDouble(value: int) -> bytes:
return pack('>d', value)
@staticmethod
def readLDouble(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('<d', str)[0]
@staticmethod
def writeLDouble(value: int) -> bytes:
return pack('<d', value)
@staticmethod
def readLong(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('>L', str)[0]
@staticmethod
def writeLong(value: int) -> bytes:
return pack('>L', value)
@staticmethod
def readLLong(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('<L', str)[0]
@staticmethod
def writeLLong(value: int) -> bytes:
return pack('<L', value)
@staticmethod
def readUnsignedVarInt(buffer, offset):
value = "0";
buffer = str(buffer)
i = 0
while i <= 35:
i += 7
offset += 1
b = ord(buffer[offset])
value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i))))
if (b & 0x80) == 0:
return value
elif (len(buffer) - 1) < int(offset):
raise TypeError('Expected more bytes, none left to read')
raise TypeError('Varint did not terminate after 5 bytes!')
@staticmethod
def readVarInt(buffer, offset):
raw = Binary.readUnsignedVarInt(buffer, offset)
temp = bcmath.bcdiv(raw, "2")
if bcmath.bcmod(raw, "2") == "1":
temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1")
return temp
@staticmethod
def writeUnsignedVarInt(value):
buffer = ""
value = value & 0xffffffff
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcadd(value, "18446744073709551616")
i = 0
while i <= 5:
i = i + 1
byte = int(bcmath.bcmod(value, "128"))
value = bcmath.bcdiv(value, "128")
if value != 0:
buffer += chr(byte | 0x80)
else:
buffer += chr(byte)
return buffer
raise TypeError('Value too large to be encoded as a varint')
@staticmethod
def writeVarInt(value):
value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616")
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1")
return Binary.writeUnsignedVarInt(value)
@staticmethod
def readUnsignedVarLong(buffer, offset):
value = "0"
buffer = str(buffer)
i = 0
while i <= 63:
i += 7
offset += 1
b = ord(buffer[offset])
value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i))))
if (b & 0x80) == 0:
return value
elif (len(buffer) - 1) < int(offset):
raise TypeError("Expected more bytes, none left to read")
raise TypeError("VarLong did not terminate after 10 bytes!")
@staticmethod
def readVarLong(buffer, offset):
raw = Binary.readUnsignedVarLong(buffer, offset)
temp = bcmath.bcdiv(raw, "2")
if bcmath.bcmod(raw, "2") == "1":
temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1")
return temp
@staticmethod
def writeUnsignedVarLong(value):
buffer = ""
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcadd(value, "18446744073709551616")
i = 0
while i <= 10:
i = i + 1
byte = int(bcmath.bcmod(value, "128"))
value = bcmath.bcdiv(value, "128")
if value != 0:
buffer += chr(byte | 0x80)
else:
buffer += chr(byte)
return buffer
raise TypeError("Value too large to be encoded as a VarLong")
@staticmethod
def writeVarLong(value):
value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616")
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1")
return Binary.writeUnsignedVarLong(value)
@staticmethod
def flipShortEndianness(value):
return Binary.readLShort(Binary.writeShort(value))
@staticmethod
def flipIntEndianness(value):
return Binary.readLInt(Binary.writeInt(value))
@staticmethod
def flipLongEndianness(value):
return Binary.readLLong(Binary.writeLong(value))
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,480
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/wizard/Parser.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import os
from podrum.lang import Base
class Parser:
def checkYesNo(str):
str = str.lower()
if str == 'y' or str == 'yes':
return True
elif str == 'n' or str == 'no':
return False
else:
return
def checkIfLangExists(str):
path = os.getcwd() + '/src/podrum/lang/'
allLangs = Base.Base.getLangNames(path)
if(str in allLangs):
return True
else:
return False
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,481
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/nbt/tag/NamedTag.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from abc import ABCMeta, abstractmethod
from podrum.nbt.NBTStream import NBTStream
from podrum.nbt.ReaderTracker import ReaderTracker
class NamedTag:
__metaclass__ = ABCMeta
name = None
cloning = False
def __init__(self, name = ''):
if len(name > 32767):
raise ValueError("Tag name cannot be more than 32767 bytes, got length " + str(len(name)))
self.name = name
def getName():
return NamedTag.name
def setName(name):
NamedTag.name = name
def getValue(): pass
def getType(): pass
def write(nbt: NBTStream): pass
def read(nbt: NBTStream, tracker: ReaderTracker): pass
def toString(indentation = 0):
return (" " * indentation) + type(object) + ": " + (("name='NamedTag.name', ") if (NamedTag.name != "") else "") + "value='" + str(NamedTag.getValue()) + "'"
def safeClone() -> NamedTag:
if NamedTag.cloning:
raise ValueError("Recursive NBT tag dependency detected")
NamedTag.cloning = True
retval = NamedTag.copy()
NamedTag.cloning = False
retval.cloning = False
return retval
def equals(that: NamedTag):
return NamedTag.name == that.name and NamedTag.equalsValue(that)
def equalsValue(that: NamedTag):
return isinstance(that, NamedTag()) and NamedTag.getValue() == that.getValue()
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,482
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/UUID.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import hashlib
import os
import random
import time
from podrum.utils.Binary import Binary
from podrum.utils.Utils import Utils
class UUID:
parts = [0, 0, 0, 0]
version = None
def __init__(self, part1 = 0, part2 = 0, part3 = 0, part4 = 0, version = None):
self.parts[0] = int(part1)
self.parts[1] = int(part2)
self.parts[2] = int(part3)
self.parts[3] = int(part4)
self.version = (self.parts[1] & 0xf000) >> 12 if version == None else int(version)
def getVersion(self):
return self.version
def equals(self, uuid: UUID):
return uuid.parts[0] == self.parts[0] and uuid.parts[1] == self.parts[1] and uuid.parts[2] == self.parts[2] and uuid.parts[3] == self.parts[3]
def fromBinary(self, uuid, version = None):
if len(uuid) != 16:
raise Exception("Must have exactly 16 bytes")
return UUID(Binary.readInt(Utils.substr(uuid, 0, 4)), Binary.readInt(Utils.substr(uuid, 4, 4)), Binary.readInt(Utils.substr(uuid, 8, 4)), Binary.readInt(Utils.substr(uuid, 12, 4)), version)
def fromString(self, uuid, version = None):
return self.fromBinary(Utils.hex2bin(uuid.strip().replace("-", "")), version)
def fromData(self, data):
hash = hashlib.new("md5").update("".join(data))
return self.fromBinary(hash, 3)
def fromRandom(self):
return self.fromData(Binary.writeInt(int(time.time())), Binary.writeShort(os.getpid()), Binary.writeShort(os.geteuid()), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)))
def toBinary(self):
return Binary.writeInt(self.parts[0]) + Binary.writeInt(self.parts[1]) + Binary.writeInt(self.parts[2]) + Binary.writeInt(self.parts[3])
def toString(self):
hex = Utils.bin2hex(self.toBinary())
if self.version != None:
return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + int(self.version, 16) + Utils.substr(hex, 13, 3) + "-8" + Utils.substr(hex, 17, 3) + "-" + Utils.substr(hex, 20, 12)
return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + Utils.substr(hex, 12, 4) + "-" + Utils.substr(hex, 16, 4) + "-" + Utils.substr(hex, 20, 12)
def getPart(self, partNumber: int):
if partNumber < 0 or partNumber > 3:
raise Exception("Invalid UUID part index" + str(partNumber))
return self.parts[partNumber]
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,483
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/network/protocol/ResourcePacksInfoPacket.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ResourcePacksInfoPacket(DataPacket):
NID = ProtocolInfo.RESOURCE_PACKS_INFO_PACKET
mustAccept = False
hasScripts = False
behaviorPackEntries = []
resourcePackEntries = []
def decodePayload(self):
self.mustAccept = self.getBool()
self.hasScripts = self.getBool()
behaviorPackCount = self.getLShort()
while behaviorPackCount > 0:
self.getString()
self.getString()
self.getLLong()
self.getString()
self.getString()
self.getString()
self.getBool()
behaviorPackCount -= 1
resourcePackCount = self.getLShort()
while resourcePackCount > 0:
self.getString()
self.getString()
self.getLLong()
self.getString()
self.getString()
self.getString()
self.getBool()
resourcePackCount -= 1
def encodePayload(self):
self.putBool(self.mustAccept)
self.putBool(self.hasScripts)
self.putLShort(len(self.behaviorPackEntries))
for entry in self.behaviorPackEntries:
self.putString(entry.getPackId())
self.putString(entry.getPackVersion())
self.putLLong(entry.getPackSize())
self.putString("") # TODO: encryption key
self.putString("") # TODO: subpack name
self.putString("") # TODO: content identity
self.putBool(False) # TODO: has scripts (?)
self.putLShort(len(self.resourcePackEntries))
for entry in self.resourcePackEntries:
self.putString(entry.getPackId())
self.putString(entry.getPackVersion())
self.putLLong(entry.getPackSize())
self.putString("") # TODO: encryption key
self.putString("") # TODO: subpack name
self.putString("") # TODO: content identity
self.putBool(False) # TODO: seems useless for resource packs
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,484
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/Server.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import time
import os
from podrum.lang.Base import Base
from podrum.utils.Logger import Logger
from podrum.utils.ServerFS import ServerFS
from podrum.utils.Utils import Utils
from podrum.wizard.Wizard import Wizard
from pyraklib.server.PyRakLibServer import PyRakLibServer
from pyraklib.server.ServerHandler import ServerHandler
class Server:
path = None
withWizard = None
port = 19132
podrumLogo = """
____ _
| _ \ ___ __| |_ __ _ _ _ __ ___
| |_) / _ \ / _` | '__| | | | '_ ` _ \
| __/ (_) | (_| | | | |_| | | | | | |
|_| \___/ \__,_|_| \__,_|_| |_| |_|
"""
def __init__(self, path, withWizard, isTravisBuild = False):
super().__init__()
startTime = Utils.microtime(True)
self.path = path
self.withWizard = withWizard
if(withWizard):
ServerFS.checkAllFiles(path)
else:
Wizard.skipWizard(path, True)
port = self.port
print(str(self.podrumLogo))
Wizard.isInWizard = False
Logger.log('info', str(Base.get("startingServer")).replace("{ip}", str(Utils.getPrivateIpAddress())).replace("{port}", str(port)))
Logger.log('info', str(Base.get("extIpMsg")).replace("{ipPublic}", str(Utils.getPublicIpAddress())))
Logger.log('info', str(Base.get("license")))
server = PyRakLibServer(port=19132)
handler = ServerHandler(server, None)
handler.sendOption("name", "MCPE;Podrum powered server;407;1.16.0;0;0;0;PodrumPoweredServer;0")
doneTime = Utils.microtime(True)
finishStartupSeconds = "%.3f" % (doneTime - startTime)
Logger.log('info', f'Done in {str(finishStartupSeconds)}s. Type "help" to view all available commands.')
if (isTravisBuild):
Server.checkTravisBuild(path)
else:
while Wizard.isInWizard == False:
cmd = input('> ')
Server.command(cmd, True)
cmd = None
ticking = True
while ticking:
time.sleep(0.002)
def command(string, fromConsole):
if string.lower() == 'stop':
Logger.log('info', 'Stopping server...')
Utils.killServer()
elif string.lower() == '':
return
elif string.lower() == 'help':
Logger.log('info', '/stop: Stops the server')
else:
Logger.log('error', str(Base.get("invalidCommand")))
def checkTravisBuild(path):
if not ServerFS.checkForFile(path, "server.json"):
Logger.log("error", "Couldn't find server.json file.")
os._exit(1)
if os.path.getsize(f'{path}/server.json') == 0:
Logger.log("error", "The server.json file is empty.")
os._exit(1)
print("Build success.")
os._exit(0)
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,485
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/resourcepacks/ResourcePack.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
class ResourcePack:
def getPath(): pass
def getPackName(): pass
def getPackId(): pass
def getPackSize(): pass
def getPackVersion(): pass
def getSha256(): pass
def getPackChunk(start, length): pass
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,486
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/Player.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.PacketPool import PacketPool
class Player:
connection = None
server = None
logger = None
address = Nome
name = None
locale = None
randomId = None
uuid = None
xuid = None
skin = None
viewDistance = None
gamemode = 0
pitch = 0
yaw = 0
headYaw = 0
onGround = False
platformChatId = ''
deviceOS = None
deviceModel = None
deviceId = Nome
def __init__(self, connection, address, logger, server):
self.connection = connection
self.address = address
self.logger = logger
self.server = server
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,487
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/nbt/NBT.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from abc import ABCMeta, abstractmethod
from podrum.nbt.tag.ByteArrayTag import ByteArrayTag
from podrum.nbt.tag.ByteTag import ByteTag
from podrum.nbt.tag.CompoundTag import CompoundTag
from podrum.nbt.tag.DoubleTag import DoubleTag
from podrum.nbt.tag.FloatTag import FloatTag
from podrum.nbt.tag.IntArrayTag import IntArrayTag
from podrum.nbt.tag.IntTag import IntTag
from podrum.nbt.tag.ListTag import ListTag
from podrum.nbt.tag.LongArrayTag import LongArrayTag
from podrum.nbt.tag.LongTag import LongTag
from podrum.nbt.tag.NamedTag import NamedTag
from podrum.nbt.tag.ShortTag import ShortTag
from podrum.nbt.tag.StringTag import StringTag
class NBT:
__metaclass__ = ABCMeta
TAG_End = 0
TAG_Byte = 1
TAG_Short = 2
TAG_Int = 3
TAG_Long = 4
TAG_Float = 5
TAG_Double = 6
TAG_ByteArray = 7
TAG_String = 8
TAG_List = 9
TAG_COMPOUND = 10
TAG_IntArray = 11
TAG_LongArray = 12
@staticmethod
def createTag(type: int) -> NamedTag:
if type == NBT.TAG_Byte:
return ByteTag()
elif type == NBT.TAG_Short:
return ShortTag()
elif type == NBT.TAG_Int:
return IntTag()
elif type == NBT.TAG_Long:
return LongTag()
elif type == NBT.TAG_Float:
return FloatTag()
elif type == NBT.TAG_Double:
return DoubleTag()
elif type == NBT.TAG_ByteArray:
return ByteArrayTag()
elif type == NBT.TAG_String:
return StringTag()
elif type == NBT.TAG_List:
return ListTag()
elif type == NBT.TAG_Compound:
return CompoundTag()
elif type == NBT.TAG_IntArray:
return IntArrayTag()
elif type == NBT.TAG_LongArray:
return LongArrayTag()
else:
raise ValueError("Unknown NBT tag type " + str(type))
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,488
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/bcmath.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import decimal
class bcmath:
@staticmethod
def bcmul(num1, num2, scale=None):
if scale != None:
decimal.getcontext().prec = scale
result = decimal.Decimal(num1) * decimal.Decimal(num2)
return int(result)
@staticmethod
def bcdiv(num1, num2, scale=None):
if scale != None:
decimal.getcontext().prec = scale
result = decimal.Decimal(num1) / decimal.Decimal(num2)
return int(result)
@staticmethod
def bcadd(num1, num2, scale=None):
if scale != None:
decimal.getcontext().prec = scale
result = decimal.Decimal(num1) + decimal.Decimal(num2)
return int(result)
@staticmethod
def bcsub(num1, num2, scale=None):
if scale != None:
decimal.getcontext().prec = scale
result = decimal.Decimal(num1) - decimal.Decimal(num2)
return int(result)
@staticmethod
def bccomp(num1, num2):
result = (int(num1) > int(num2)) - (int(num1) < int(num2))
return int(result)
@staticmethod
def bcmod(num1, num2):
result = int(num1) % int(num2)
return int(result)
@staticmethod
def bcpow(num1, num2):
result = int(num1) ** int(num2)
return int(result)
@staticmethod
def bcpowmod(num1, num2, mod):
result = pow(num1, num2, mod)
return int(result)
@staticmethod
def bcscale(scale):
result = decimal.getcontext().prec = scale
return int(result)
@staticmethod
def bcsqrt(num):
result = math.sqrt(num)
return int(result)
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,489
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/Config.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import re
import os
import json
import yaml
import pickle
from podrum.utils import Logger
from podrum.ServerFS.ServerFS import read
from podrum.Server import Server
class Config:
DETECT = -1
PROPERTIES = 0
CNF = PROPERTIES
JSON = 1
YAML = 2
EXPORT = 3
SERIALIZED = 4
ENUM = 5
ENUMERATION = ENUM
config = []
nestedCache = []
file = ''
correct = False
type = DETECT
is_array = lambda var: isinstance(var, (list, tuple))
formats = [{
"properties" : PROPERTIES,
"cnf" : CNF,
"conf" : CNF,
"config" : CNF,
"json" : JSON,
"js" : JSON,
"yml" : YAML,
"yaml" : YAML,
"export" : EXPORT,
"xport" : EXPORT,
"sl" : SERIALIZED,
"serialize" : SERIALIZED,
"txt" : ENUM,
"list" : ENUM,
"enum" : ENUM,
}]
def __init__(self, file, type = DETECT, default = [], correct=None):
self.load(file, type, default)
correct = self.correct
@staticmethod
def isset(self, variable):
return variable in locals() or variable in globals()
def reload(self):
self.config = []
self.nestedCache = []
self.correct = False
self.load(self.file, self.type)
@staticmethod
def fixYAMLIndexes(str):
return re.sub(r"#^([ ]*)([a-zA-Z_]{1}[ ]*)\\:$#m", r"$1\"$2\":", str)
def load(self, file, type=DETECT, default = []):
self.correct = True
self.type = int(type)
self.file = file
if not self.is_array(default):
default = []
if not os.path.exists(file):
self.config = default
self.save()
else:
if self.type == self.DETECT:
bname = os.path.basename(self.file)
extension = bname.split(".")
arrlist = extension.pop()
extension = arrlist.strip().lower()
if self.isset(self.formats[extension]):
self.type = self.formats[extension]
else:
self.correct = False
if self.correct:
content = open(self.file).read()
if (self.type == self.PROPERTIES) and (self.type == self.CNF):
self.parseProperties(content)
elif self.type == self.JSON:
self.config = json.loads(content)
elif self.type == self.YAML:
content = self.fixYAMLIndexes(content)
self.config = yaml.load(content)
elif self.type == self.SERIALIZED:
self.config = pickle.loads(content)
elif self.type == self.ENUM:
self.parseList(content)
else:
self.correct = False
return False
if not self.is_array(self.config): # Is array doesn't exist
self.config = default
if self.fillDefaults(default, self.config) > 0:
self.save()
else:
return False
return True
def check():
return correct = True
def save():
if self.correct == True:
try:
content = None
if (type == PROPERTIES) or (type == CNF):
content = writeProperties()
elif type == JSON:
content = json.dumps(config)
elif type == YAML:
content = yaml.emit(config)
elif type == SERIALIZED:
content = pickle.dumps(self.config)
elif type == ENUM:
"\r\n".join(config.keys())
else:
correct = False
return False
except ValueError:
logger.log('error', f'Could not save Config {self.file}')
return True
else:
return false
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,490
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/network/protocol/DisconnectPacket.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class DisconnectPacket(DataPacket):
NID = ProtocolInfo.DISCONNECT_PACKET
hideDisconnectionScreen = False
message = ""
def canBeSentBeforeLogin():
return True
def decodePayload(self):
self.hideDisconnectionScreen = self.getBool()
if not self.hideDisconnectionScreen:
self.message = self.getString()
def encodePayload(self):
self.putBool(self.hideDisconnectionScreen)
if not self.hideDisconnectionScreen:
self.putString(self.message)
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,491
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/command/Command.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
class Command:
def onCommand(string, fromConsole):
pass
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
1,492
|
jessehylton/Podrum
|
refs/heads/master
|
/src/podrum/utils/Logger.py
|
"""
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from datetime import datetime
from podrum.utils.TextFormat import TextFormat
TextFormat = TextFormat()
class Logger:
def log(type_, content):
time = datetime.now()
if type_ == 'info':
print(f'{TextFormat.BLUE}[INFO: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'warn':
print(f'{TextFormat.YELLOW}[WARNING: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'error':
print(f'{TextFormat.RED}[ERROR: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == 'success':
print(f'{TextFormat.GREEN}[SUCCESS: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "emergency":
print(f'{TextFormat.GOLD}[EMERGENCY: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "alert":
print(f'{TextFormat.PURPLE}[ALERT: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "notice":
print(f'{TextFormat.AQUA}[NOTICE: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "critical":
print(f'{TextFormat.RED}[CRITICAL: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
elif type_ == "debug":
print(f'{TTextFormat.GRAY}[DEBUG: {time.strftime("%H:%M")}]{TextFormat.WHITE} {content}')
else:
print(f'[{type_.upper()}: {time.strftime("%H:%M")}]{content}')
|
{"/src/podrum/utils/Binary.py": ["/src/podrum/utils/bcmath.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.