index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
42,588 | laszlocseh/wise.msfd | refs/heads/master | /src/wise/msfd/compliance/nationaldescriptors/a34.py | import logging
from collections import defaultdict
from lxml.etree import fromstring
from Products.Five.browser.pagetemplatefile import \
ViewPageTemplateFile as Template
from wise.msfd.data import get_xml_report_data
from wise.msfd.translation import retrieve_translation
from wise.msfd.utils import (Item, ItemLabel, ItemList, Node, RawRow, # Row,
RelaxedNode, natural_sort_key, to_html)
from ..base import BaseArticle2012
# from .data import REPORT_DEFS
logger = logging.getLogger('wise.msfd')
NSMAP = {
"w": "http://water.eionet.europa.eu/schemas/dir200856ec",
"c": "http://water.eionet.europa.eu/schemas/dir200856ec/mscommon",
}
def xp(xpath, node):
return node.xpath(xpath, namespaces=NSMAP)
class A34Item(Item):
def __init__(self, parent, node, description):
super(A34Item, self).__init__([])
self.parent = parent
self.node = node
self.description = description
self.g = RelaxedNode(node, NSMAP)
# self.id = node.find('w:ReportingFeature', namespaces=NSMAP).text
attrs = [
('Member state description', self.member_state_descr),
('Region / subregion description', self.region_subregion),
('Subdivisions', self.subdivisions),
('Marine reporting units description', self.assessment_areas),
('Region or subregion', self.region_or_subregion),
('Member state', self.member_state),
('Area type', self.area_type),
('Marine Reporting Unit', self.mru_id),
('MRU Name', self.marine_reporting_unit),
]
for title, getter in attrs:
self[title] = getter()
setattr(self, title, getter())
def member_state_descr(self):
text = xp('w:MemberState/text()', self.description)
return text and text[0] or ''
def region_subregion(self):
text = xp('w:RegionSubregion/text()', self.description)
return text and text[0] or ''
def subdivisions(self):
text = xp('w:Subdivisions/text()', self.description)
return text and text[0] or ''
def assessment_areas(self):
text = xp('w:AssessmentAreas/text()', self.description)
return text and text[0] or ''
def region_or_subregion(self):
v = self.g['w:RegionSubRegions/text()']
return v and v[0] or ''
def member_state(self):
v = self.g['w:MemberState/text()']
return v and v[0] or ''
def area_type(self):
v = self.g['w:AreaType/text()']
return v and v[0] or ''
def mru_id(self):
v = self.g['w:MarineUnitID/text()']
return v and v[0] or ''
def marine_reporting_unit(self):
v = self.g['w:MarineUnits_ReportingAreas/text()']
return v and v[0] or ''
class Article34(BaseArticle2012):
""" Article 3 & 4 implementation
klass(self, self.request, self.country_code, self.country_region_code,
self.descriptor, self.article, self.muids)
"""
root = None
year = '2012'
template = Template('pt/report-data-secondary.pt')
help_text = ""
def __init__(self, context, request, country_code, region_code,
descriptor, article, muids, filename):
super(Article34, self).__init__(context, request, country_code,
region_code, descriptor, article,
muids)
self.filename = filename
def sort_cols(self, cols):
sorted_cols = sorted(
cols, key=lambda _r: (
_r['Region / subregion description'],
_r['Area type'],
_r['Marine Reporting Unit']
)
)
return sorted_cols
def setup_data(self):
filename = self.filename
text = get_xml_report_data(filename)
self.root = fromstring(text)
nodes = xp('//w:GeographicalBoundariesID', self.root)
description = xp('//w:Description', self.root)[0]
cols = []
for node in nodes:
item = A34Item(self, node, description)
cols.append(item)
sorted_cols = self.sort_cols(cols)
self.rows = []
for col in sorted_cols:
for name in col.keys():
values = []
for inner in sorted_cols:
values.append(inner[name])
raw_values = []
vals = []
for v in values:
raw_values.append(v)
vals.append(self.context.translate_value(
name, v, self.country_code))
row = RawRow(name, vals, raw_values)
self.rows.append(row)
break # only need the "first" row
self.cols = sorted_cols
def __call__(self):
if self.root is None:
self.setup_data()
return self.template()
class A34Item_2018_mru(Item):
def __init__(self, node):
super(A34Item_2018_mru, self).__init__([])
self.node = node
self.g = RelaxedNode(node, NSMAP)
attrs = [
('Region or subregion', self.region_or_subregion()),
('Member state', self.member_state()),
('Area type', self.area_type()),
('Marine Reporting Unit', self.mru_id()),
('MRU Name', self.marine_reporting_unit()),
]
for title, value in attrs:
self[title] = value
setattr(self, title, value)
def region_or_subregion(self):
v = self.g['w:RegionSubRegions/text()']
return v and v[0] or ''
def member_state(self):
v = self.g['w:MemberState/text()']
return v and v[0] or ''
def area_type(self):
v = self.g['w:AreaType/text()']
return v and v[0] or ''
def mru_id(self):
v = self.g['w:MarineUnitID/text()']
return v and v[0] or ''
def marine_reporting_unit(self):
v = self.g['w:MarineUnits_ReportingAreas/text()']
return v and v[0] or ''
class A34Item_2018_main(Item):
mrus_template = Template('pt/mrus-table-art34.pt')
TRANSLATABLES_EXTRA = ['MRU Name']
def __init__(self, context, request, description,
mru_nodes, previous_mrus=None):
super(A34Item_2018_main, self).__init__([])
self.description = description
self.context = context
self.request = request
attrs = [
('Member state description', self.member_state_descr),
('Region / subregion description', self.region_subregion),
('Subdivisions', self.subdivisions),
('Marine reporting units description', self.assessment_areas),
]
for title, getter in attrs:
self[title] = getter()
setattr(self, title, getter())
mrus = []
for node in mru_nodes:
item = A34Item_2018_mru(node)
mrus.append(item)
sorted_mrus = sorted(mrus, key=lambda x: x['Marine Reporting Unit'])
self._mrus = sorted_mrus
self.available_mrus = [x['Marine Reporting Unit'] for x in sorted_mrus]
self.available_regions = set(
[x['Region or subregion'] for x in sorted_mrus]
)
self.previous_mrus = previous_mrus or []
item_labels = sorted_mrus and sorted_mrus[0].keys() or ""
sorted_mrus = self.mrus_template(
item_labels=item_labels,
item_values=sorted_mrus,
previous_mrus=self.previous_mrus,
country_code=self.context.country_code,
translate_value=self.translate_value
)
self['MRUs'] = sorted_mrus
setattr(self, 'MRUs', sorted_mrus)
# Region or subregion Member state Area type MRU ID Marine
# reporting unit Marine reporting unit
def get_translatable_extra_data(self):
""" Get the translatable fields from the MRU nodes
:return: a list of values to translate
"""
res = []
for row in self._mrus:
for field in self.TRANSLATABLES_EXTRA:
value = getattr(row, field, None)
if not value:
continue
res.append(value)
return set(res)
def translate_value(self, fieldname, value, source_lang):
is_translatable = fieldname in self.TRANSLATABLES_EXTRA
v = self.context.context.translate_view()
return v.translate(source_lang=source_lang,
value=value,
is_translatable=is_translatable)
def sort_mrus(self, cols):
sorted_cols = sorted(
cols, key=lambda _r: (
_r['Member state'],
_r['Region or subregion'],
_r['Marine Reporting Unit'],
_r['MRU Name']
)
)
return sorted_cols
def member_state_descr(self):
text = xp('w:MemberState/text()', self.description)
return text and text[0] or ''
def region_subregion(self):
text = xp('w:RegionSubregion/text()', self.description)
return text and text[0] or ''
def subdivisions(self):
text = xp('w:Subdivisions/text()', self.description)
return text and text[0] or ''
def assessment_areas(self):
text = xp('w:AssessmentAreas/text()', self.description)
return text and text[0] or ''
class Article34_2018(BaseArticle2012):
""" Implementation for Article 3/4 2018 reported data
"""
year = '2012'
root = None
template = Template('pt/report-data-secondary.pt')
help_text = ""
def __init__(self, context, request, country_code, region_code,
descriptor, article, muids, filename=None,
previous_mrus=None):
# TODO: use previous_mrus to highlight this file MRUs according to edit
# status: added or deleted
super(Article34_2018, self).__init__(context, request, country_code,
region_code, descriptor, article,
muids)
self.filename = filename
self.previous_mrus = previous_mrus
def get_report_file_root(self, filename=None):
if self.root is None:
self.setup_data()
return self.root
def setup_data(self):
filename = self.filename
text = get_xml_report_data(filename)
self.root = fromstring(text)
mru_nodes = xp('//w:GeographicalBoundariesID', self.root)
description = xp('//w:Description', self.root)[0]
# TODO: also send the previous file data
main_node = A34Item_2018_main(
self, self.request, description, mru_nodes, self.previous_mrus
)
self.translatable_extra_data = main_node.get_translatable_extra_data()
self.available_mrus = main_node.available_mrus
self.available_regions = main_node.available_regions
self.rows = []
# TODO: this code needs to be explained. It's hard to understand what
# its purpose is
for name in main_node.keys():
values = []
for inner in [main_node]:
values.append(inner[name])
raw_values = []
vals = []
for v in values:
raw_values.append(v)
vals.append(self.context.translate_value(
name, v, self.country_code))
row = RawRow(name, vals, raw_values)
self.rows.append(row)
self.cols = [main_node]
def __call__(self):
if self.root is None:
self.setup_data()
return self.template()
| {"/src/wise/msfd/compliance/nationaldescriptors/a8esa.py": ["/src/wise/msfd/compliance/nationaldescriptors/data.py"], "/src/wise/msfd/compliance/nationalsummary/main.py": ["/src/wise/msfd/compliance/nationaldescriptors/main.py"], "/src/wise/msfd/compliance/regionalsummary/introduction.py": ["/src/wise/msfd/compliance/nationalsummary/introduction.py"], "/src/wise/msfd/compliance/nationalsummary/descriptor_assessments.py": ["/src/wise/msfd/compliance/nationaldescriptors/main.py"], "/src/wise/msfd/compliance/nationalsummary/reportdata.py": ["/src/wise/msfd/compliance/nationaldescriptors/a34.py"], "/src/wise/msfd/compliance/regionalsummary/pdfexport.py": ["/src/wise/msfd/compliance/nationalsummary/pdfexport.py", "/src/wise/msfd/compliance/regionalsummary/introduction.py"], "/src/wise/msfd/compliance/regionaldescriptors/reportdata.py": ["/src/wise/msfd/compliance/nationaldescriptors/utils.py", "/src/wise/msfd/compliance/regionaldescriptors/data.py"], "/src/wise/msfd/compliance/landingpage.py": ["/src/wise/msfd/compliance/assessment.py"], "/src/wise/msfd/__init__.py": ["/src/wise/msfd/patch.py"], "/src/wise/msfd/search/a9.py": ["/src/wise/msfd/__init__.py", "/src/wise/msfd/search/base.py"], "/src/wise/msfd/compliance/nationalsummary/pdfexport.py": ["/src/wise/msfd/compliance/nationaldescriptors/a34.py", "/src/wise/msfd/compliance/nationalsummary/descriptor_assessments.py", "/src/wise/msfd/compliance/nationalsummary/introduction.py"]} |
42,589 | Adil780/CUBIC-WORLD | refs/heads/main | /main.py | import pygame, time, view, controller, model
pygame.init()
model.create_level_1()
while True:
time.sleep(1/60)
view.draw()
model.always()
controller.events() | {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,590 | Adil780/CUBIC-WORLD | refs/heads/main | /controller.py | import pygame, cubic, model
pygame.init()
def events():
"""
process events
"""
t = pygame.event.get()
for i in t:
if i.type == pygame.QUIT:
exit()
if i.type == pygame.MOUSEBUTTONDOWN and i.button == pygame.BUTTON_LEFT:
pos = i.pos
g = model.button_buy.collidepoint(pos)
if g:
model.buy_ball()
for v in model.cub_list:
if v.rect.collidepoint(pos):
v.minus_hp(model.cub_list)
model.coin += 1
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,591 | Adil780/CUBIC-WORLD | refs/heads/main | /model.py | import pygame, cubic, settings, random, ball
pygame.init()
level = 1
cub_list = []
group = pygame.sprite.LayeredUpdates()
coin = 0
button_buy = pygame.Rect(settings.SCREEN_WIDTH - 250, 70, 70, 50)
ball_list = []
def buy_ball():
global coin
if coin >= 10:
coin -= 10
b = ball.Ball(100, 800, 1, 2)
ball_list.append(b)
group.add(b)
def create_level_1():
colums = 5
start = int(settings.SCREEN_WIDTH / 2) - int(colums / 2 * cubic.Cub.CUBIC_SIZE_W)
stop = int(settings.SCREEN_WIDTH / 2) + int(colums / 2 * cubic.Cub.CUBIC_SIZE_W)
step = cubic.Cub.CUBIC_SIZE_W + 2
for p in range(start, stop, step):
for i in range(0, settings.SCREEN_HEIGHT - cubic.Cub.CUBIC_SIZE_H, cubic.Cub.CUBIC_SIZE_H + 2):
a = cubic.Cub(p, i, 1, False)
cub_list.append(a)
group.add(a)
def always():
global coin
for i in ball_list:
coin += i.fly(cub_list)
#coin += c.fly(cub_list)
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,592 | Adil780/CUBIC-WORLD | refs/heads/main | /cubic.py | import pygame
pygame.init()
from pygame.sprite import DirtySprite
f = pygame.font.SysFont("arial", 40, True)
class Cub(DirtySprite):
CUBIC_SIZE_W = 70
CUBIC_SIZE_H = 50
def dessapear(self):
print("12")
def __init__(self, posx, posy, hp, defended):
DirtySprite.__init__(self)
self.rect = pygame.Rect(posx, posy, self.CUBIC_SIZE_W, self.CUBIC_SIZE_H)
self.hp = hp
self.defended = defended
self.create_image()
def create_image(self):
if self.defended == False:
lifes = f.render(str(self.hp), True, [2, 94, 0])
else:
lifes = f.render("\u221e", True, [2, 94, 0])
width = lifes.get_width()
height = lifes.get_height()
self.image = pygame.Surface([self.CUBIC_SIZE_W, self.CUBIC_SIZE_H], pygame.SRCALPHA)
self.image.fill([0, 255, 237], [3, 3, self.CUBIC_SIZE_W, self.CUBIC_SIZE_H - 7])
self.image.blit(lifes, [self.CUBIC_SIZE_W / 2 - width / 2, self.CUBIC_SIZE_H / 2 - height / 2])
pygame.draw.rect(self.image, [0, 0, 0], [0, 0, self.CUBIC_SIZE_W, self.CUBIC_SIZE_H], 5, 6)
def minus_hp(cubic, list):
if cubic.defended != True:
cubic.hp -= 1
cubic.create_image()
if cubic.hp <= 0:
a = cubic.groups()
for i in a:
i.remove(cubic)
list.remove(cubic)
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,593 | Adil780/CUBIC-WORLD | refs/heads/main | /settings.py | import pygame
pygame.init()
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 1000
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,594 | Adil780/CUBIC-WORLD | refs/heads/main | /ball.py | import pygame, settings, model, cubic
pygame.init()
from pygame.sprite import DirtySprite
class Ball(DirtySprite):
BALL_WIDTH = 50
BALL_HEIGHT = 50
def __init__(self, posx, posy, speedx, speedy):
DirtySprite.__init__(self)
self.layer = 2
self.speedx = speedx
self.speedy = speedy
self.image = pygame.Surface([self.BALL_WIDTH, self.BALL_HEIGHT], pygame.SRCALPHA)
self.rect = pygame.Rect(posx, posy, self.BALL_WIDTH, self.BALL_HEIGHT)
pygame.draw.circle(self.image, [255, 255, 255], [self.rect.width - self.BALL_WIDTH / 2, self.rect.height - self.BALL_HEIGHT / 2], self.BALL_WIDTH / 2)
pygame.draw.circle(self.image, [0, 0, 0], [self.rect.width - self.BALL_WIDTH / 2, self.rect.height - self.BALL_HEIGHT / 2], self.BALL_WIDTH / 2, 4)
print(1)
def fly(self, cub):
l = 0
self.rect.x += self.speedx
if self.rect.right >= settings.SCREEN_WIDTH:
self.speedx = -self.speedx
self.rect.right = settings.SCREEN_WIDTH
if self.rect.left <= 0:
self.speedx = -self.speedx
self.rect.left = 0
for i in cub:
a = self.rect.colliderect(i)
if a and not i.defended:
l += 1
if a:
if self.speedx > 0:
self.rect.right = i.rect.left
i.minus_hp(cub)
elif self.speedx < 0:
self.rect.left = i.rect.right
i.minus_hp(cub)
self.speedx = -self.speedx
self.rect.y += self.speedy
if self.rect.top <= 0:
self.speedy = -self.speedy
self.rect.top = 0
if self.rect.bottom >= settings.SCREEN_HEIGHT:
self.speedy = -self.speedy
self.rect.bottom = settings.SCREEN_HEIGHT
for b in cub:
c = self.rect.colliderect(b)
if c and not b.defended:
l += 1
if c:
if self.speedy < 0:
self.rect.top = b.rect.bottom
b.minus_hp(cub)
if self.speedy > 0:
self.rect.bottom = b.rect.top
b.minus_hp(cub)
self.speedy = -self.speedy
return l
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,595 | Adil780/CUBIC-WORLD | refs/heads/main | /view.py | import pygame, settings, model, cubic
pygame.init()
screen = pygame.display.set_mode([settings.SCREEN_WIDTH, settings.SCREEN_HEIGHT])
f = pygame.font.SysFont("arial", 50, True)
def draw():
screen.fill([255, 0, 0])
model.group.draw(screen)
coins = f.render("COINS " + str(model.coin), True, [255, 247, 9])
screen.blit(coins, [settings.SCREEN_WIDTH - 250, 10])
pygame.draw.rect(screen, [0, 255, 0], model.button_buy)
buy = f.render("-10", True, [0, 0, 0])
screen.blit(buy, model.button_buy)
pygame.display.flip()
| {"/main.py": ["/view.py", "/controller.py", "/model.py"], "/controller.py": ["/cubic.py", "/model.py"], "/model.py": ["/cubic.py", "/settings.py", "/ball.py"], "/ball.py": ["/settings.py", "/model.py", "/cubic.py"], "/view.py": ["/settings.py", "/model.py", "/cubic.py"]} |
42,603 | damolaajayi/Hospital | refs/heads/master | /patient_input/forms.py | from django import forms
from patients.models import patient
from .models import DoctorInfo
class PatientInput(forms.ModelForm):
class Meta:
model = patient
fields = [
'Reg_No',
'first_name',
'Last_name',
'Email',
'Patient_Age',
'Gender',
'Contact_No',
'Patient_Address',
]
class DoctorInformation(forms.ModelForm):
class Meta:
model = DoctorInfo
fields = [
'Name',
'Age',
'Contact_No',
'Gender',
'Department',
]
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,604 | damolaajayi/Hospital | refs/heads/master | /patient_input/urls.py | from django.urls import path
from django.urls import include
from . import views
urlpatterns = [
path('patientinput/', views.home, name='patientinput'),
path('doctorinfo/', views.doc, name='doctorinfo')
] | {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,605 | damolaajayi/Hospital | refs/heads/master | /patients/forms.py | from django import forms
from .models import Prescription, patient
class DoctorForm(forms.ModelForm):
class Meta:
model = Prescription
fields = [
'patient',
'Complaint',
'Prescription',
'Remarks',
]
#class DoctorForm(forms.Form):
#patient = forms.ModelChoiceField(queryset=patient.objects.all())
#Complaint = forms.CharField(widget=forms.Textarea)
#Prescription = forms.CharField(widget=forms.Textarea)
#Remarks = forms.CharField(widget=forms.Textarea)
#Date = forms.DateField()
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,606 | damolaajayi/Hospital | refs/heads/master | /patients/urls.py | from django.urls import path
from django.urls import include
from . import views
urlpatterns = [
path('patientsview/', views.patient_list, name='patient_list'),
path('doctor/', views.doct, name='doct'),
path('doctorview/', views.docview, name='docview'),
path('prescription/', views.prescrip, name='prescrip'),
path('patient/', views.singlepatient, name='singlepatient'),
] | {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,607 | damolaajayi/Hospital | refs/heads/master | /hospital/views.py | from django.views.generic import TemplateView
from django.shortcuts import render
from django.urls import reverse_lazy
from django.views import generic
from .forms import DoctorSignUpForm
class HomePageView(TemplateView):
template_name = 'home.html'
class SignUp(generic.CreateView):
form_class = DoctorSignUpForm
success_url = reverse_lazy('hospital:login')
template_name = 'signup.html' | {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,608 | damolaajayi/Hospital | refs/heads/master | /patient_input/migrations/0001_initial.py | # Generated by Django 2.1rc1 on 2018-10-22 19:56
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='DoctorInfo',
fields=[
('Name', models.CharField(max_length=100, primary_key=True, serialize=False)),
('Age', models.CharField(max_length=50)),
('Contact_No', models.BigIntegerField()),
('Gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=6)),
('Department', models.CharField(max_length=100)),
],
),
]
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,609 | damolaajayi/Hospital | refs/heads/master | /hospital/admin.py | from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from .forms import DoctorSignUpForm, DoctorUserChangeForm
from .models import DoctorUser
class DoctorUserAdmin(UserAdmin):
model = DoctorUser
add_form = DoctorSignUpForm
form = DoctorUserChangeForm
admin.site.register(DoctorUser, DoctorUserAdmin)
# Register your models here.
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,610 | damolaajayi/Hospital | refs/heads/master | /patients/migrations/0003_auto_20181024_1745.py | # Generated by Django 2.1rc1 on 2018-10-24 16:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0002_auto_20181024_1742'),
]
operations = [
migrations.AlterField(
model_name='patient',
name='Patient_Address',
field=models.TextField(max_length=100),
),
]
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,611 | damolaajayi/Hospital | refs/heads/master | /patients/migrations/0001_initial.py | # Generated by Django 2.1rc1 on 2018-10-22 19:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='patient',
fields=[
('Reg_No', models.SmallIntegerField(primary_key=True, serialize=False)),
('first_name', models.CharField(max_length=25)),
('Last_name', models.CharField(max_length=25)),
('Email', models.EmailField(max_length=50)),
('Patient_Age', models.CharField(max_length=10)),
('Gender', models.CharField(choices=[('M', 'Male'), ('F', 'Female')], max_length=6)),
('Patient_Address', models.TextField(max_length=200)),
('Contact_No', models.BigIntegerField()),
('Room_No', models.IntegerField()),
('time', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Prescription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Complaint', models.TextField(max_length=300)),
('Prescription', models.TextField(max_length=200)),
('Remarks', models.TextField(max_length=200)),
('Date', models.DateTimeField(verbose_name='Date prescribed')),
('patient', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='patients.patient')),
],
options={
'ordering': ('patient',),
},
),
]
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,612 | damolaajayi/Hospital | refs/heads/master | /patients/migrations/0002_auto_20181024_1742.py | # Generated by Django 2.1rc1 on 2018-10-24 16:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('patients', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='patient',
name='Room_No',
),
migrations.AlterField(
model_name='prescription',
name='Date',
field=models.DateTimeField(auto_now_add=True, verbose_name='Date prescribed'),
),
migrations.AlterField(
model_name='prescription',
name='Remarks',
field=models.TextField(max_length=100),
),
]
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,613 | damolaajayi/Hospital | refs/heads/master | /patients/views.py | from django.http import Http404
from django.shortcuts import render, get_object_or_404, redirect
from .models import patient, Prescription
from .forms import DoctorForm
from patient_input.models import DoctorInfo
from django.db.models import Q
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required(login_url="/users/login")
def patient_list(request):
queryset = patient.objects.order_by('-Reg_No')
query = patient.objects.all()
qs = request.GET.get("q")
if qs:
query = query.filter(
Q(first_name__icontains=qs)|
Q(Last_name__icontains=qs)|
Q(Reg_No__icontains=qs)
).distinct()
context = {
"patient": queryset,
"patient": query
}
return render(request, 'patients/patients.html', context)
@login_required(login_url="/users/login")
def doct(request):
form = DoctorForm(request.POST or None)
if form.is_valid():
instance = form.save(commit=True)
context = {
'form': form
}
return render(request, 'patients/ex.html', context)
@login_required(login_url="/users/login")
def docview(request):
query = DoctorInfo.objects.all()
qs = request.GET.get("q")
if qs:
query = query.filter(
Q(Name__icontains=qs)|
Q(ID__icontains=qs)|
Q(Age__icontains=qs)
).distinct()
context = {
"doctor": query
}
return render(request, 'patients/dove.html', context)
@login_required(login_url="/users/login")
def prescrip(request):
qs = Prescription.objects.all()
query = patient.objects.all()
qr = request.GET.get("q")
if qr:
query = query.filter(
Q(Reg_No__icontains=qr)|
Q(first_name__icontains=qr)|
Q(Last_name__icontains=qr)
).distinct()
context = {
"qs": qs,
'q': query
}
return render(request, 'patients/prescription.html', context)
@login_required(login_url="/users/login")
def singlepatient(request):
q = patient.objects.all()
query = Prescription.objects.all()
context = {
"patient" : q,
"Prescription": query
}
return render(request, 'patients/single.html', context)
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,614 | damolaajayi/Hospital | refs/heads/master | /hospital/urls.py | from django.urls import path
from django.urls import include
from . import views
from django.contrib.auth.views import LoginView, LogoutView
from . import views
app_name = 'hospital'
urlpatterns = [
path('', views.HomePageView.as_view(), name='home'),
path('signup/', views.SignUp.as_view(), name='signup'),
path('login/', LoginView.as_view(template_name='hospital/login.html'), name="login"),
path('logout/', LogoutView.as_view(template_name='hospital/logout.html'), name="logout"),
] | {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,615 | damolaajayi/Hospital | refs/heads/master | /hospital/models.py | from django.contrib.auth.models import AbstractUser, UserManager
class DoctorUserManager(UserManager):
pass
class DoctorUser(AbstractUser):
objects = DoctorUserManager()
# Create your models here.
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,616 | damolaajayi/Hospital | refs/heads/master | /patient_input/views.py | from django.shortcuts import render
from .forms import PatientInput, DoctorInformation
from patients.models import patient
from .models import DoctorInfo
from django.contrib.auth.decorators import login_required
@login_required(login_url="/users/login")
def home(request):
if request.method == 'POST':
form = PatientInput(request.POST)
if form.is_valid():
print(form.cleaned_data)
patient.objects.create(**form.cleaned_data)
else:
form = PatientInput()
return render(request, 'patient_input/home.html', {'form': form})
@login_required(login_url="/users/login")
def doc(request):
if request.method == 'POST':
form = DoctorInformation(request.POST)
if form.is_valid():
print(form.cleaned_data)
DoctorInfo.objects.create()
else:
form = DoctorInformation()
return render(request, 'patient_input/doc.html', {'form': form})
| {"/hospital/admin.py": ["/hospital/models.py"], "/patients/views.py": ["/patients/forms.py"], "/patient_input/views.py": ["/patient_input/forms.py"]} |
42,619 | wbutler/beyondtv-podcast-app | refs/heads/master | /Tester.py | import Config
import Debug
import RecordingsWatcher
strs = ["Daily", "Colbert", "Soup"]
files = []
results = RecordingsWatcher.GetAvailableFiles(strs)
for fileSet in results.values():
for fileName in fileSet:
files.append( fileName )
print RecordingsWatcher.RemoveGrowingFiles( files )
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,620 | wbutler/beyondtv-podcast-app | refs/heads/master | /RecordingsWatcher.py |
import os
import sys
import time
import Config
import Debug
import Recording
# Initialization - make sure we're allowed to read the recordings directory
if (not os.access( Config.TV_SOURCE_DIR, os.R_OK ) ):
Debug.LogEntry( "Unable to read recordings directory", Debug.ERROR )
sys.exit( -1 )
# Given a list of substrings to match, the function will examine the
# recordings directory, find the video files with the appropriate
# extension, and return a dictionary matching the search strings to
# sets of available files that contain those strings in their names.
def GetAvailableRecordings( stringsList ):
matchingRecordings = {}
rawFileList = os.listdir( Config.TV_SOURCE_DIR )
# Clean the list so we only have relevant video files
cleanFileList = set()
for fileName in rawFileList:
for extension in Config.TV_SOURCE_EXTENSIONS:
if( fileName[len(fileName)-len(extension):]==extension ):
cleanFileList.add( fileName )
# Sort files per podcast
for searchString in stringsList:
matchingRecordings[searchString] = set()
for fileName in cleanFileList:
if( fileName.find( searchString ) != -1 ):
newRecording = Recording.Recording(os.path.join( Config.TV_SOURCE_DIR, fileName ))
newRecording.LookupDetails( )
matchingRecordings[searchString].add( newRecording )
for podcast in matchingRecordings.keys():
Debug.LogEntry( podcast, Debug.DEBUG )
for recording in matchingRecordings[podcast]:
Debug.LogEntry( " "+str(recording), Debug.DEBUG )
return matchingRecordings
# Given a list of filenames in its store, we return only those files
# whose sizes are not growing and therefore those files which are
# done recording and ready for encoding as a podcast
def PruneRecordings( recordingsList ):
oldSize = {}
newSize = {}
for recording in recordingsList:
fullPath = recording.pathToFile
if( not os.access( fullPath, os.R_OK ) ):
Debug.LogEntry( "Unable to access file: %s" % fullPath, Debug.ERROR )
else:
oldSize[fullPath] = os.path.getsize(fullPath)
time.sleep( Config.GROWTH_CHECK_WAIT_PERIOD )
for fullPath in oldSize.keys():
newSize[fullPath] = os.path.getsize(fullPath)
Debug.LogEntry( "File growth check:", Debug.DEBUG )
finishedRecordings = []
for recording in recordingsList:
fullPath = recording.pathToFile
Debug.LogEntry( " %s: was %dB, now %dB" % (fullPath, oldSize[fullPath], newSize[fullPath]), Debug.DEBUG )
if oldSize[fullPath] == newSize[fullPath]:
finishedRecordings.append( recording )
Debug.LogEntry( " Adding to list.", Debug.DEBUG )
return finishedRecordings
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,621 | wbutler/beyondtv-podcast-app | refs/heads/master | /StorageManager.py | import os
import pickle
import re
import shutil
import sys
import Config
import Debug
import Podcast
import Recording
# This function looks at the config files and returns a list of
# strings that specify the search inputs for each of the podcasts.
def GetSearchStrings( ):
searchStrings = Podcasts.keys()
Debug.LogEntry( "Search strings: %s" % str(searchStrings), Debug.DEBUG )
return searchStrings
def GetTranscodeRequests( availableRecordingSets ):
Debug.LogEntry( "In GetTranscodeRequests", Debug.DEBUG )
requestList = []
for searchString in availableRecordingSets.keys():
for recording in availableRecordingSets[searchString]:
if not Podcasts[searchString].Contains( recording ):
if Podcasts[searchString].IsRecent( recording ):
requestList.append( recording )
# Sort the list so the most recently recorded video is first
requestList.sort( lambda i1, i2: cmp(i2.pubDate, i1.pubDate) )
Debug.LogEntry( "Requested files list:", Debug.DEBUG )
for recording in requestList:
Debug.LogEntry( " "+str(recording), Debug.DEBUG )
return requestList
# Adds a finished recording to the store and writes
# updated XML files and hash file
def Submit( recording ):
Debug.LogEntry( "Storing recording: %s" % str(recording), Debug.DEBUG )
for podcast in Podcasts.keys():
pass
# Add the file to the relevant podcasts
for search in Podcasts.keys():
if re.search( search, recording.pathToFile ):
Debug.LogEntry( "Recording %s matches podcast %s" % (str(recording), str(Podcasts[search])), Debug.DEBUG )
Podcasts[search].AddRecording( recording )
# Store the updated podcasts array
Debug.LogEntry( "Ready to write podcast metadata to file.", Debug.DEBUG )
for podcast in Podcasts.values():
Debug.LogEntry( str(podcast), Debug.DEBUG )
for recording in podcast.Recordings:
Debug.LogEntry( " %s" % str(recording), Debug.DEBUG )
try:
outFile = open( PodcastFile, 'w' )
pickle.dump( Podcasts, outFile )
outFile.close()
except:
Debug.LogEntry( "Error writing to podcast metadata file. Exiting.", Debug.ERROR )
sys.exit( -1 )
# Store the hash of this file
FileHashes.append( recording.guid )
try:
outFile = open(os.path.join( Config.CONFIG_DIR, Config.HASHES_FILE ), 'w')
pickle.dump( FileHashes, outFile )
outFile.close()
except:
Debug.LogEntry( "Error writing to hash file. Exiting.", Debug.ERROR )
sys.exit( -1 )
def WritePodcasts( ):
for podcast in Podcasts.values():
podcast.WriteXML()
# Initialization code
Debug.LogEntry( "Initializing StorageManager", Debug.DEBUG )
searchFilePath = os.path.join( Config.CONFIG_DIR, Config.SEARCHES_FILE )
Debug.LogEntry( "Looking for searches file at %s" % searchFilePath, Debug.DEBUG )
PodcastFile = os.path.join( Config.CONFIG_DIR, Config.PODCAST_METADATA_FILE )
# Read lines out of the searches file
try:
searchFile = open( searchFilePath, 'r' )
lines = searchFile.readlines()
except:
Debug.LogEntry( "Unable to read searches file. Halting program.", Debug.ERROR )
sys.exit( -1 )
# Strip out comments and blank lines from the searches file
cleanLines = []
for rawLine in lines:
line = rawLine.rstrip()
if( len(line) > 0 and line[0] != '#' ):
cleanLines.append( line )
# Make sure that we have the right number of lines
# in the searches file
if( len(cleanLines) % Config.LINES_PER_SEARCH_RECORD != 0 ):
Debug.LogEntry( "Malformed searches file. Incomplete records present.", Debug.ERROR )
sys.exit( -1 )
# Build search string/podcast name pairs
SearchObjects = []
SearchStrings = []
for i in range( len(cleanLines)/Config.LINES_PER_SEARCH_RECORD ):
newItem = []
for element in cleanLines[i*Config.LINES_PER_SEARCH_RECORD:i*Config.LINES_PER_SEARCH_RECORD+Config.LINES_PER_SEARCH_RECORD]:
newItem.append( element )
SearchObjects.append( newItem )
Debug.LogEntry( "Adding item to SearchObjects list: %s" % str(newItem), Debug.DEBUG )
SearchStrings.append( newItem[0] )
# Read in old file hashes
FileHashes = []
Debug.LogEntry( "Reading old file hashes from %s" % os.path.join( Config.CONFIG_DIR, Config.HASHES_FILE ), Debug.DEBUG )
try:
inFile = open(os.path.join( Config.CONFIG_DIR, Config.HASHES_FILE ), 'r')
FileHashes = pickle.load( inFile )
Debug.LogEntry( "Hash read complete.", Debug.DEBUG )
inFile.close()
except:
Debug.LogEntry( "No file hashes found. Assuming a new install.", Debug.NORMAL )
# We don't have records of old files, so the repository directory is in an
# unknown state. Unlink it and recreate it to clean it out.
Debug.LogEntry( "Unlinking directory at %s." % Config.PODCAST_RECORDING_WWW_DIR, Debug.NORMAL )
if not os.access( Config.PODCAST_RECORDING_WWW_DIR, os.W_OK ):
Debug.LogEntry( "Access to videos directory denied. Exiting.", Debug.ERROR )
sys.exit( -1 )
try:
shutil.rmtree( Config.PODCAST_RECORDING_WWW_DIR )
os.mkdir( Config.PODCAST_RECORDING_WWW_DIR )
except:
Debug.LogEntry( "Failed to reset videos directory. Exiting.", Debug.ERROR )
Debug.LogEntry( "Videos directory reset.", Debug.DEBUG )
# Read in podcast storage records
Podcasts = {}
PodcastFile = os.path.join( Config.CONFIG_DIR, Config.PODCAST_METADATA_FILE )
Debug.LogEntry( "Reading podcast metadata from %s" % PodcastFile, Debug.DEBUG )
try:
inFile = open( PodcastFile, 'r' )
Podcasts = pickle.load( inFile )
Debug.LogEntry( "Podcast read complete.", Debug.DEBUG )
inFile.close()
except:
Debug.LogEntry( "No podcasts file present. Rebuilding from searches.", Debug.NORMAL )
# Verify that we have a podcast object for every search
for search in SearchObjects:
if search[0] in Podcasts.keys():
Debug.LogEntry( "Search %s matches %s" % (search[0], str(Podcasts[search[0]])), Debug.DEBUG )
else:
Debug.LogEntry( "Search %s not in podcasts list." % search[0], Debug.DEBUG )
newPodcast = Podcast.Podcast( Search = search[0], Title = search[1], RssFileName = search[2] )
Podcasts[search[0]] = newPodcast
Debug.LogEntry( "Adding new podcast to list:", Debug.DEBUG )
Debug.LogEntry( str(Podcasts[search[0]]), Debug.DEBUG )
# Make sure that every podcast has a place for its files to land
Debug.LogEntry( "Creating new storage directory for %s at %s" % ( newPodcast.Title, newPodcast.RecordingDir ), Debug.DEBUG )
try:
if os.path.exists( newPodcast.RecordingDir ):
os.rmdir( newPodcast.RecordingDir )
os.mkdir( newPodcast.RecordingDir )
except:
Debug.LogEntry( "Error creating storage directory %s" % newPodcast.RecordingDir, Debug.DEBUG )
# Verify that there is no podcast without an entry in
# the searches list
for podcast in Podcasts.values():
if not podcast.Search in SearchStrings:
Debug.LogEntry( "Podcast %s does not match any search. Deleting." % str(podcast), Debug.DEBUG )
del Podcasts[podcast.Search]
Debug.LogEntry( "Final list of podcasts to service:", Debug.DEBUG )
for podcast in Podcasts.values():
Debug.LogEntry( " %s" % str(podcast), Debug.DEBUG )
podcastMasterListPath = os.path.join( Config.WWW_ROOT_DIR, Config.WWW_PODCAST_DIR )
podcastMasterListPath = os.path.join( podcastMasterListPath, Config.PODCAST_MASTER_LIST_FILE_NAME )
Debug.LogEntry( "Outputting master list of podcasts and URL's to %s" % podcastMasterListPath, Debug.DEBUG )
for podcast in Podcasts.values():
pass
Debug.LogEntry( "StorageManager initialization complete", Debug.DEBUG )
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,622 | wbutler/beyondtv-podcast-app | refs/heads/master | /Config.py | import os
CONFIG_DIR = os.path.join( os.path.expanduser("~"), ".tvpodcast" )
SEARCHES_FILE = "searches.txt"
LOG_FILE = "logs.txt"
HASHES_FILE = "hashes.pk"
RUN_FILE = ".lock"
PODCAST_METADATA_FILE = "podcasts.pk"
TV_SOURCE_DIR = "/mnt/tv"
TV_SOURCE_EXTENSIONS = ["avi"]
WWW_ROOT_DIR = "/www"
WWW_PODCAST_DIR = "/podcasts"
PODCAST_RECORDING_WWW_DIR = WWW_ROOT_DIR + WWW_PODCAST_DIR + "/videos"
PODCAST_MASTER_LIST_FILE_NAME = "index.html"
WWW_ROOT_URL = "http://192.168.1.105"
# Amount of time in seconds to sleep in order to see
# if a file in the store is still growing, i.e. still
# being recorded and thus not ready for processing.
GROWTH_CHECK_WAIT_PERIOD = 5
# Number of lines in a single podcast specification
# in the searches file
LINES_PER_SEARCH_RECORD = 3
# Number of recordings per podcast to save before
# episodes are deleted to make room
MAX_PODCAST_SIZE = 3
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,623 | wbutler/beyondtv-podcast-app | refs/heads/master | /Debug.py | import Config
import os
import sys
import time
# Available logging levels; output at higher values is a
# superset of output at lower levels. Use these values as
# input to SetLogLevel
ERROR = 0
NORMAL = 1
DEBUG = 2
# Default level of logging for the application
RUNTIME_LOG_LEVEL = DEBUG
# If this file is present in the config dir, the app will not
# launch in order to preserve logging information for later
KILL_FILE_NAME = "FATALERROR.txt"
def GetCurrentTimeString( ):
return time.strftime( "%x %X", time.localtime(time.time()))
#def SetLogLevel( newLevel ):
# Debug.RUNTIME_LOG_LEVEL = newLevel
def LogEntry( text, logLevel ):
try:
if (logLevel <= RUNTIME_LOG_LEVEL):
logFile = open( os.path.join(Config.CONFIG_DIR, Config.LOG_FILE), "a" )
logString = GetCurrentTimeString() + " " + text + "\n"
logFile.write( logString )
logFile.close()
except IOError:
print( "Error writing to log file at %s. Halting program." % LOG_FILE_FULL_NAME )
sys.exit( -1 )
if( logLevel == ERROR ):
errorFile = open( os.path.join(Config.CONFIG_DIR, KILL_FILE_NAME), 'w' )
errorFile.write( "Fatal error. See log file." )
errorFile.close()
# Initialization code: make sure we can get to the log file
if( not os.access( Config.CONFIG_DIR, os.W_OK ) ):
print( "Unable to access log file. Halting program." )
sys.exit( -1 )
# If the kill file is present, don't run at all.
if( os.path.exists( os.path.join(Config.CONFIG_DIR, KILL_FILE_NAME) )):
print( "Previous fatal error. Examine log file to debug." )
sys.exit( -1 )
# Start the log
LOG_FILE_FULL_NAME = os.path.join(Config.CONFIG_DIR, Config.LOG_FILE)
if( os.path.exists( LOG_FILE_FULL_NAME ) ):
os.unlink( LOG_FILE_FULL_NAME )
LogEntry( "Initializing the log at %s." % LOG_FILE_FULL_NAME, RUNTIME_LOG_LEVEL )
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,624 | wbutler/beyondtv-podcast-app | refs/heads/master | /Recording.py | import datetime
import os
import re
import Debug
class Recording:
title = ""
author = ""
subtitle = ""
summary = ""
guid = ""
pubDate = datetime.datetime.min
duration = 0
pathToFile = ""
def __init__( self, path="" ):
self.pathToFile = path
def __str__( self ):
retStr = "("
retStr = retStr + str(self.title) + ","
retStr = retStr + str(self.author) + ","
retStr = retStr + str(self.subtitle) + ","
retStr = retStr + str(self.summary) + ","
retStr = retStr + str(self.guid) + ","
retStr = retStr + str(self.pubDate) + ","
retStr = retStr + str(self.duration) + ","
retStr = retStr + str(self.pathToFile) + ")"
return retStr
def LookupDetails( self ):
# Extract just the filename from the path
dirName, fileName = os.path.split( self.pathToFile )
# Get the title
self.title = fileName[:fileName.find("-")]
# Get the date
try:
dateStr = re.search( '[0-9]{4}-[0-9]{2}-[0-9]{2}', fileName ).group(0)
self.pubDate = datetime.datetime( int(dateStr[0:4]), int(dateStr[5:7]), int(dateStr[8:10]) )
except:
Debug.LogEntry( "Error computing date for recording: %s" % str(self), Debug.ERROR )
# Build a guid
self.guid = str(hash(self.title + str(self.pubDate)))
# Get the duration
try:
inPipe, outPipe = os.popen4( "ffmpeg -i \"%s\"" % self.pathToFile )
for line in outPipe.readlines():
if( line.find( "Duration" ) != -1 ):
timeStr = re.search( '[0-9]{2}:[0-9]{2}:[0-9]{2}', line ).group(0)
if( timeStr[0:2] != "00" ):
self.duration = timeStr
else:
self.duration = timeStr[3:]
else:
pass
except:
Debug.LogEntry( "Error finding duration for recording: %s" % str(self), Debug.ERROR )
# Get the summary
self.summary = "BeyondTV Recording"
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,625 | wbutler/beyondtv-podcast-app | refs/heads/master | /podcastgen.py | from xml.dom.minidom import Document
import random
# Text constants
TTL_TIMEOUT = "60"
TITLE = "BeyondTV Test Podcast"
LINK = "http://192.168.1.105/testpodcast.xml"
LANGUAGE = "en-us"
COPYRIGHT_NOTICE = " 2008 Flaming Penguin Heavy Industries"
SUBTITLE = "Subtitle"
AUTHOR = "Author"
SUMMARY = "Summary"
DESCRIPTION = "Description"
OWNER_NAME = "Owner Name"
OWNER_EMAIL = "owner@email.com"
IMAGE_LINK = "http://192.168.1.105/pirate.png"
CATEGORY = "Television"
# Our test item
TEST_ITEM_TITLE = "Daily Show test"
TEST_ITEM_AUTHOR = "Test item author"
TEST_ITEM_SUBTITLE = "Test item subtitle"
TEST_ITEM_SUMMARY = "Test item summary"
TEST_ITEM_URL = "http://192.168.1.105/trek.avi.mp4"
TEST_ITEM_TYPE = "audio/x-m4v"
TEST_ITEM_GUID = str(int(random.random() * 10000000000))
TEST_ITEM_DATE = "Wed, 17 July 2008 12:00:00 GMT"
TEST_ITEM_DURATION = "30:00"
TEST_ITEM_KEYWORD = "Test item keywords"
def addTextElement( document, parentElement, name, text ):
newElement = document.createElement( name )
parentElement.appendChild( newElement )
newText = doc.createTextNode( text )
newElement.appendChild( newText )
def addPodcastItem( document, parentElement, title, author, subtitle, summary, url, type, guid, date, duration, keywords):
itemElement = document.createElement( "item" )
parentElement.appendChild( itemElement )
addTextElement( document, itemElement, "title", title )
addTextElement( document, itemElement, "itunes:author", author )
addTextElement( document, itemElement, "itunes:subtitle", subtitle )
addTextElement( document, itemElement, "itunes:summary", summary )
enclosureElement = document.createElement( "enclosure" )
enclosureElement.setAttribute( "url", url )
enclosureElement.setAttribute( "type", type )
itemElement.appendChild( enclosureElement )
addTextElement( document, itemElement, "guid", guid )
addTextElement( document, itemElement, "pubDate", date )
addTextElement( document, itemElement, "itunes:duration", duration )
addTextElement( document, itemElement, "itunes:keywords", keywords )
doc = Document()
rssElement = doc.createElement( "rss" )
rssElement.setAttribute( "version", "2.0" )
rssElement.setAttribute( "xmlns:itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd" )
doc.appendChild( rssElement )
channelElement = doc.createElement( "channel" )
rssElement.appendChild( channelElement )
addTextElement( doc, channelElement, "ttl", TTL_TIMEOUT )
addTextElement( doc, channelElement, "title", TITLE )
addTextElement( doc, channelElement, "link", LINK )
addTextElement( doc, channelElement, "language", LANGUAGE )
addTextElement( doc, channelElement, "copyright", COPYRIGHT_NOTICE )
addTextElement( doc, channelElement, "itunes:subtitle", SUBTITLE )
addTextElement( doc, channelElement, "itunes:author", AUTHOR )
addTextElement( doc, channelElement, "itunes:summary", SUMMARY )
addTextElement( doc, channelElement, "description", DESCRIPTION )
ownerElement = doc.createElement( "itunes:owner" )
channelElement.appendChild( ownerElement )
addTextElement( doc, ownerElement, "itunes:name", OWNER_NAME )
addTextElement( doc, ownerElement, "itunes:email", OWNER_EMAIL )
imageElement = doc.createElement( "itunes:image" )
imageElement.setAttribute( "href", IMAGE_LINK )
channelElement.appendChild( imageElement )
addTextElement( doc, channelElement, "itunes:category", CATEGORY )
# Add our items to the channel
addPodcastItem( doc, channelElement, TEST_ITEM_TITLE, TEST_ITEM_AUTHOR, TEST_ITEM_SUBTITLE, TEST_ITEM_SUMMARY, TEST_ITEM_URL, TEST_ITEM_TYPE, TEST_ITEM_GUID, TEST_ITEM_DATE, TEST_ITEM_DURATION, TEST_ITEM_KEYWORD )
print doc.toprettyxml(indent = "\t", encoding = "UTF-8")
# print doc.toxml( encoding = "UTF-8" )
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,626 | wbutler/beyondtv-podcast-app | refs/heads/master | /pickletest.py | import pickle
import random
random.seed()
try:
# Read in some data
inFile = open('tempdata.pk', 'r')
data = pickle.load( inFile )
print( "Reading old data:\n%s\n" % str(data) )
inFile.close()
except:
# Couldn't read in any data
print( "No old data available.\n" )
data = [random.randrange(10), random.randrange(10), random.randrange(10)]
print( "Generating new data:\n%s\n" % str(data) )
outFile = open('tempdata.pk', 'w')
pickle.dump( data, outFile )
outFile.close()
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,627 | wbutler/beyondtv-podcast-app | refs/heads/master | /Podcast.py | import copy
import datetime
import os
import shutil
import sys
from xml.dom.minidom import Document
import Config
import Debug
import Recording
DEFAULT_TTL = "60"
DEFAULT_LANGUAGE = "en-us"
DEFAULT_COPYRIGHT = str(datetime.datetime.now().year) + " Frumjous Bandersnatch"
DEFAULT_AUTHOR = "Author TK"
DEFAULT_CATEGORY = "Television"
DEFAULT_OWNER_NAME = "BeyondTV Podcast Generator"
DEFAULT_OWNER_EMAIL = "Owner Email TK"
DEFAULT_IMAGELINK = Config.WWW_ROOT_URL + "/beyondtv.jpg"
DEFAULT_SUBTITLE = "Subtitle TK"
DEFAULT_SUMMARY = "Summary TK"
DEFAULT_DESCRIPTION = "Description TK"
class Podcast:
Ttl = DEFAULT_TTL
Language = DEFAULT_LANGUAGE
Copyright = DEFAULT_COPYRIGHT
Author = DEFAULT_AUTHOR
OwnerName = DEFAULT_OWNER_NAME
OwnerEmail = DEFAULT_OWNER_EMAIL
Category = DEFAULT_CATEGORY
ImageLink = DEFAULT_IMAGELINK
Subtitle = DEFAULT_SUBTITLE
Summary = DEFAULT_SUMMARY
Description = DEFAULT_DESCRIPTION
def __init__( self, Search, RssFileName, Ttl = "", Title = "", Language = "", Copyright = "", Subtitle = "", Author = "", Summary = "", Description = "", ImageLink = "" ):
self.Search = Search
self.RssFileName = RssFileName
self.__RecordingDir = os.path.join( Config.PODCAST_RECORDING_WWW_DIR, self.RssFileName[0:-4] )
self.XmlDocument = None
self.Recordings = []
if Ttl != "":
self.Ttl = Ttl
self.Title = Title
if Language != "":
self.Language = Language
if Copyright != "":
self.Copyright = Copyright
if Subtitle != "":
self.Subtitle = Subtitle
if Author != "":
self.Author = Author
if Summary != "":
self.Summary = Summary
if Description != "":
self.Description = Description
if ImageLink != "":
self.ImageLink = ImageLink
def __str__( self ):
retstr = "("
retstr = retstr + str(self.Search) + ","
retstr = retstr + str(self.RssFileName) + ","
retstr = retstr + str(self.Ttl) + ","
retstr = retstr + str(self.Title) + ","
retstr = retstr + str(self.Link) + ","
retstr = retstr + str(self.Language) + ","
retstr = retstr + str(self.Copyright) + ","
retstr = retstr + str(self.Subtitle) + ","
retstr = retstr + str(self.Author) + ","
retstr = retstr + str(self.Summary) + ","
retstr = retstr + str(self.Description) + ","
retstr = retstr + str(self.OwnerName) + ","
retstr = retstr + str(self.OwnerEmail) + ","
retstr = retstr + str(self.ImageLink) + ","
retstr = retstr + str(self.Category) + ")"
return retstr
def GetRecordingDir( self ):
return self.__RecordingDir
def SetRecordingDir( self, x ):
pass
def GetLink( self ):
return Config.WWW_ROOT_URL + Config.WWW_PODCAST_DIR + "/" + self.RssFileName
def SetLink( self ):
pass
RecordingDir = property( GetRecordingDir, SetRecordingDir )
Link = property( GetLink, SetLink )
def Contains( self, newRecording ):
for recording in self.Recordings:
if recording.guid == newRecording.guid:
Debug.LogEntry( "Podcast %s already contains recording %s" % (self.Title, recording.pathToFile), Debug.DEBUG )
return True
return False
def IsRecent( self, recording ):
if( len(self.Recordings) < Config.MAX_PODCAST_SIZE ):
Debug.LogEntry( "Podcast %s is not full. Recording %s is considered recent." % (str(self.Title), recording.pathToFile), Debug.DEBUG )
return True
else:
self.Recordings.sort( lambda i1, i2: cmp(i1.pubDate, i2.pubDate), reverse = True )
if( recording.pubDate > self.Recordings[len(self.Recordings)-1].pubDate ):
Debug.LogEntry( "Recording %s is recent for podcast %s." % (recording.pathToFile, str(self.Title)), Debug.DEBUG )
return True
else:
Debug.LogEntry( "Recording %s is too old for podcast %s." % (recording.pathToFile, str(self.Title)), Debug.DEBUG )
return False
def AddRecording( self, recording ):
Debug.LogEntry( "Adding recording %s to podcast %s." % (recording.pathToFile, self.Title), Debug.DEBUG )
newRecording = copy.deepcopy( recording )
Debug.LogEntry( "Copying %s to %s" % (newRecording.pathToFile, self.RecordingDir), Debug.DEBUG )
try:
shutil.copy( newRecording.pathToFile, self.RecordingDir )
except:
Debug.LogEntry( "Failed to copy %s to %s" % (newRecording.pathToFile, self.RecordingDir), Debug.DEBUG )
sys.exit( -1 )
newRecording.pathToFile = os.path.join(self.RecordingDir, os.path.basename( newRecording.pathToFile ))
Debug.LogEntry( "File copy complete. New file at %s" % newRecording.pathToFile, Debug.DEBUG )
if( len(self.Recordings) < Config.MAX_PODCAST_SIZE ):
self.Recordings.append( newRecording )
else:
self.Recordings.sort( lambda i1, i2: cmp(i1.pubDate, i2.pubDate) )
Debug.LogEntry( "In podcast %s, replacing %s with %s" % (self.Title, self.Recordings[0].pathToFile, recording.pathToFile), Debug.DEBUG )
Debug.LogEntry( "Deleting %s" % self.Recordings[0].pathToFile, Debug.DEBUG )
try:
os.remove( self.Recordings[0].pathToFile )
except:
Debug.LogEntry( "Failed to delete %s" % self.Recordings[0].pathToFile, Debug.ERROR )
sys.exit( -1 )
self.Recordings[0] = newRecording
self.Recordings.sort( lambda i1, i2: cmp(i1.pubDate, i2.pubDate), reverse = True )
def XmlAddTextElement( self, parentElement, name, text ):
newElement = self.XmlDocument.createElement( name )
parentElement.appendChild( newElement )
newText = self.XmlDocument.createTextNode( text )
newElement.appendChild( newText )
def XmlAddPodcastItem( self, parentElement, title = "", author = "", subtitle = "", summary = "", url = "", type = "audio/x-m4v", guid = "", date = "", duration = "", keywords = ""):
itemElement = self.XmlDocument.createElement( "item" )
parentElement.appendChild( itemElement )
self.XmlAddTextElement( itemElement, "title", title )
self.XmlAddTextElement( itemElement, "itunes:author", author )
self.XmlAddTextElement( itemElement, "itunes:subtitle", subtitle )
self.XmlAddTextElement( itemElement, "itunes:summary", summary )
enclosureElement = self.XmlDocument.createElement( "enclosure" )
enclosureElement.setAttribute( "url", url )
enclosureElement.setAttribute( "type", type )
itemElement.appendChild( enclosureElement )
self.XmlAddTextElement( itemElement, "guid", guid )
self.XmlAddTextElement( itemElement, "pubDate", date )
self.XmlAddTextElement( itemElement, "itunes:duration", duration )
self.XmlAddTextElement( itemElement, "itunes:keywords", keywords )
def GenerateXMLString( self ):
self.XmlDocument = Document()
rssElement = self.XmlDocument.createElement( "rss" )
rssElement.setAttribute( "version", "2.0" )
rssElement.setAttribute( "xmlns:itunes", "http://www.itunes.com/dtds/podcast-1.0.dtd" )
self.XmlDocument.appendChild( rssElement )
channelElement = self.XmlDocument.createElement( "channel" )
rssElement.appendChild( channelElement )
self.XmlAddTextElement( channelElement, "ttl", self.Ttl )
self.XmlAddTextElement( channelElement, "title", self.Title )
self.XmlAddTextElement( channelElement, "link", self.Link )
self.XmlAddTextElement( channelElement, "language", self.Language )
self.XmlAddTextElement( channelElement, "copyright", self.Copyright )
self.XmlAddTextElement( channelElement, "itunes:subtitle", self.Subtitle )
self.XmlAddTextElement( channelElement, "itunes:author", self.Author )
self.XmlAddTextElement( channelElement, "itunes:summary", self.Summary )
self.XmlAddTextElement( channelElement, "description", self.Description )
ownerElement = self.XmlDocument.createElement( "itunes:owner" )
channelElement.appendChild( ownerElement )
self.XmlAddTextElement( ownerElement, "itunes:name", self.OwnerName )
self.XmlAddTextElement( ownerElement, "itunes:email", self.OwnerEmail )
imageElement = self.XmlDocument.createElement( "itunes:image" )
imageElement.setAttribute( "href", self.ImageLink )
channelElement.appendChild( imageElement )
self.XmlAddTextElement( channelElement, "itunes:category", self.Category )
for recording in self.Recordings:
newUrl = Config.WWW_ROOT_URL + recording.pathToFile[len(Config.WWW_ROOT_DIR):]
newDate = recording.pubDate.strftime( "%a, %d %b %Y %H:%M:%S -0800" )
self.XmlAddPodcastItem( parentElement=channelElement, title=recording.title, author=recording.author, subtitle=recording.subtitle, summary=recording.summary, url = newUrl, guid=recording.guid, date=newDate, duration=recording.duration )
return self.XmlDocument.toprettyxml(indent = "\t", encoding = "UTF-8")
def WriteXML( self ):
RssFilePath = os.path.join( Config.WWW_ROOT_DIR + Config.WWW_PODCAST_DIR, self.RssFileName )
Debug.LogEntry( "Writing RSS file for %s to %s" % (self.Title, RssFilePath), Debug.DEBUG )
XMLString = self.GenerateXMLString( )
outFile = open( RssFilePath, 'w' )
outFile.write( XMLString )
outFile.close()
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,628 | wbutler/beyondtv-podcast-app | refs/heads/master | /Main.py | import os
import sys
import fcntl
import time
import Config
# Before we do anything, let's acquire the run lock to make
# sure that we're the only instance running.
runFilePath = os.path.join( Config.CONFIG_DIR, Config.RUN_FILE)
lockFile = open( runFilePath, 'a+' )
try:
fcntl.flock( lockFile, fcntl.LOCK_EX|fcntl.LOCK_NB )
except:
print( "Another instance is already running." )
sys.exit( 0 )
# Process command line options. Right now, there's only one
# possible option, which specifies that we should run in test
# mode. In this mode, we don't transcode any files or update
# any catalogs, but we do all the bookkeeping and log what
# the results would have been.
testMode = False
try:
if sys.argv[1] == '-t':
print( "Running in test mode." )
testMode = True
except:
pass
import Debug
import RecordingsWatcher
import StorageManager
import Transcoder
queryStrings = StorageManager.GetSearchStrings( )
results = RecordingsWatcher.GetAvailableRecordings( queryStrings )
requests = StorageManager.GetTranscodeRequests( results )
if len( requests ) == 0:
Debug.LogEntry( "No new files. Exiting.", Debug.NORMAL )
sys.exit( 0 )
prunedRequests = RecordingsWatcher.PruneRecordings( requests )
if len( prunedRequests ) == 0:
Debug.LogEntry( "No requests for transcoder. Exiting.", Debug.NORMAL )
sys.exit( 0 )
convertedRecording = Transcoder.ConvertFile( prunedRequests[0] )
StorageManager.Submit( convertedRecording )
StorageManager.WritePodcasts( )
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,629 | wbutler/beyondtv-podcast-app | refs/heads/master | /Transcoder.py | import copy
import os
import tempfile
import Config
import Debug
def ConvertFile( inputRecording ):
Debug.LogEntry( "Beginning transcoder module with %s" % str( inputRecording ), Debug.DEBUG )
# Find a working directory
tempDir = tempfile.gettempdir()
Debug.LogEntry( "Using temporary directory at %s" % tempDir, Debug.DEBUG )
# Construct our shell comand
inputFilePath = inputRecording.pathToFile
outputFilePath = os.path.join(tempDir, os.path.basename( inputFilePath )) + ".mp4"
commandString = "podencoder -o %s \"%s\"" % (tempDir, inputFilePath)
print( commandString )
# Run the command
Debug.LogEntry( "Executing shell command: %s" % commandString, Debug.NORMAL )
( stdout, stdin ) = os.popen4( commandString )
results = stdin.readlines()
Debug.LogEntry( "Command execution complete.", Debug.NORMAL )
# Log the results
# TODO: Fill this in.
# Return a new recording object
outputRecording = copy.deepcopy( inputRecording )
outputRecording.pathToFile = outputFilePath
Debug.LogEntry( "Transcoder complete: %s" % str(outputRecording), Debug.DEBUG )
return outputRecording
| {"/RecordingsWatcher.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/StorageManager.py": ["/Config.py", "/Debug.py", "/Podcast.py", "/Recording.py"], "/Debug.py": ["/Config.py"], "/Recording.py": ["/Debug.py"], "/Podcast.py": ["/Config.py", "/Debug.py", "/Recording.py"], "/Main.py": ["/Config.py", "/Debug.py", "/RecordingsWatcher.py", "/StorageManager.py", "/Transcoder.py"], "/Transcoder.py": ["/Config.py", "/Debug.py"]} |
42,630 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/ThoraricSurgery.py | import pandas as pd
from scipy.io.arff import loadarff
def cat_to_num(df):
"""
Convert a Pandas column (Series) from categorical/qualitative values to continuous/quantitative values.
"""
feature_vals = sorted(df.unique())
feature_vals_mapping = dict(zip(feature_vals, range(0, len(feature_vals) +
1)))
return df.map(feature_vals_mapping).astype(int)
def load_ThoraricSurgery():
# Read the dataset
# Prepare the feature names
featureNames = ['DGN', 'PRE4', 'PRE5', 'PRE6', 'PRE7', 'PRE8', 'PRE9', 'PRE10', 'PRE11', 'PRE14', 'PRE17', 'PRE19', 'PRE25', 'PRE30', 'PRE32', 'AGE', 'Risk1Y']
raw_data = loadarff('data/Thoracic Surgery/ThoraricSurgery.arff')
df = pd.DataFrame(raw_data[0])
df.columns = featureNames
# # Map the targets from categorical (byte literal) values to integers.
df[df.columns[0]] = df[df.columns[0]].apply(lambda x: x[3:])
categorical_features = df.select_dtypes(include=['object']).columns
for c in categorical_features:
df[c] = df[c].apply(lambda x: x.decode())
df[c] = cat_to_num(df[c])
return df
# df = load_ThoraricSurgery()
# print(df.head())
# df.info()
# df.describe()
# print(df.isnull().values.any())
# print(df.to_string()) | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,631 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/steel_plate_faults.py | import numpy as np
import pandas as pd
def load_steel_faults_data() :
filePath = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Steel plate faults/Faults.NNA"
data_csv = pd.read_csv(filePath, delim_whitespace=True, header=None)
X= data_csv.iloc[:,:27]
y= data_csv.iloc[:,27:]
y = np.dot(y.to_numpy(),[1,2,3,4,5,6,7])
return X,y | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,632 | AliD101v/ml-course-project-f19 | refs/heads/master | /intrprt/cifar.py | import numpy as np
import sklearn
from sklearn.tree import DecisionTreeClassifier
from data.CIFAR10 import *
from sklearn import metrics
import matplotlib
import matplotlib.pyplot as plt
from sklearn import multiclass
train, test = load_CIFAR10()
n = 5
print(f'First {n} records in...')
X = train[0][b'data']
y = train[0][b'labels']
X_test = test[b'data']
y_test = test [b'labels']
y_test = np.asarray(y_test).reshape((-1,1))
for i in range (1,n):
X = np.vstack((X, train[i][b'data']))
y = np.hstack((y, train[i][b'labels']))
y = np.asarray(y).reshape((-1,1))
print(" Check X shapes : ")
print(X.shape)
print(X_test.shape)
cifar_decision_tree = DecisionTreeClassifier(max_depth=10, random_state=0)
cifar_decision_tree.fit(X, y)
cifar_results = cifar_decision_tree.predict(X_test)
print(cifar_decision_tree.score(X_test, y_test))
print(metrics.accuracy_score(y_test,cifar_results))
plt.figure()
sklearn.tree.plot_tree(cifar_decision_tree, filled= True, max_depth=2, label='root', fontsize=6)
plt.show() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,633 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/ParkinsonSpeech.py | import pandas as pd
def load_Parkinson_Speech():
# Read the dataset
featureNames = ['Subject id', 'Jitter (local)','Jitter (local, absolute)','Jitter (rap)','Jitter (ppq5)','Jitter (ddp)','Shimmer (local)','Shimmer (local, dB)','Shimmer (apq3)','Shimmer (apq5)',' Shimmer (apq11)','Shimmer (dda)','AC','NTH','HTN','Median pitch','Mean pitch','Standard deviation','Minimum pitch','Maximum pitch','Number of pulses','Number of periods','Mean period','Standard deviation of period','Fraction of locally unvoiced frames','Number of voice breaks','Degree of voice breaks']
df = pd.read_csv('C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Parkinson Speech/train_data.txt',
delimiter=',', header=None, names=featureNames + ['UPDRS','class'])
# Not currently using the test dataset, becuase it does not contain the UPDRS ground truth labels
# df_test = pd.read_csv('data/Parkinson Speech/test_data.txt',
# delimiter=',', header=None, names=featureNames + ['class'])
return df
# df = load_Parkinson_Speech()
# print('training data:')
# print(df.head())
# df.info()
# df.describe()
# print()
# df_test = df_test[df_test.columns[-2]]
# print('test data:')
# print(df_test.head())
# df_test.info()
# df_test.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,634 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/StudentPerformance_data.py | import pandas as pd
from sklearn import preprocessing
def load_studentPerformance():
df=pd.read_csv("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/student performance/student-por.csv",delimiter=';')
categorical_columns = df.select_dtypes(['category','object']).columns
# convert categorical data to numeric values
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.astype('category') )
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.cat.codes )
# df=load_studentPerformance()
X=df[df.columns[0:32]]
y=df[df.columns[32]]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,635 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/seismic_bumps.py | import pandas as pd
import numpy as np
from scipy.io.arff import loadarff
def load_seismic_data():
file_path = filePath = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Seismic Bumps/seismic-bumps.arff"
raw_data = loadarff(filePath) # data type tuple
seismic_df = pd.DataFrame(raw_data[0])
categorical_columns = seismic_df.select_dtypes(['category','object']).columns
# convert categorical data to numeric values
seismic_df[categorical_columns] = seismic_df[categorical_columns].apply( lambda x:x.astype('category') )
seismic_df[categorical_columns] = seismic_df[categorical_columns].apply( lambda x:x.cat.codes )
X = seismic_df.iloc[:,:18]
y = seismic_df['class']
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,636 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/Concrete_data.py | #!/usr/bin/env python
# coding: utf-8
# In[9]:
import pandas as pd
from sklearn import preprocessing
def load_concrete():
df = pd.read_excel("Concrete_Data.xls",)
X=df[df.columns[:8]]
y=df[df.columns[-1]]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,637 | AliD101v/ml-course-project-f19 | refs/heads/master | /Resources/Code Templates/ML_Experiments.py | # suppress sklearn warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# imports
import pandas as pd
from pandas.plotting import scatter_matrix
import numpy as np
import sklearn
from sklearn import preprocessing
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA
from sklearn.decomposition import TruncatedSVD
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn import metrics
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
from sklearn.datasets import load_svmlight_file
import scipy
from scipy.sparse import csr_matrix
from IPython.core.display import display
import plotly
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
import timeit
from datetime import datetime
# global configs and params
random_seed = 0
test_size = 0.2
fig_label_font = 'Libertinus Sans'
fig_legend_font = 'Libertinus Sans'
np.random.seed(random_seed)
# ────────────────────────────────────────────────────────────────────────────────
# # 1. Load the dataset(s)
# X, y = load_data()
# ────────────────────────────────────────────────────────────────────────────────
# # 2. Split the dataset(s) into training and test
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
# ────────────────────────────────────────────────────────────────────────────────
# # 3. Pipeline
# ## 3.1 Transformers
# ### 3.1.1 Continuous (quantitative or numeric) transformer
# Example:
# ```python
# numeric_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='median')),
# ('scaler', StandardScaler())])
# ```
#
# ### 3.1.2 Categorical (qualitative) transformer
# Example:
# ```python
# categorical_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
# ('onehot', OneHotEncoder(handle_unknown='ignore'))])
# ```
# ### 3.1.3 Column transformer
# Example:
# ```python
# numeric_features = train.select_dtypes(include=['int64', 'float64']).columns
# categorical_features = train.select_dtypes(include=['object']).drop(['Loan_Status'], axis=1).columns
#
# preprocessor = ColumnTransformer(
# transformers=[
# ('num', numeric_transformer, numeric_features),
# ('cat', categorical_transformer, categorical_features)])
# ```
# ## 3.2 Classifier
# Example:
# ```python
# rf = Pipeline(steps=[('preprocessor', preprocessor),
# ('classifier', RandomForestClassifier())])
# rf.fit(X_train, y_train)
# y_pred = rf.predict(X_test)
# ```
# Example for model selection:
# ```python
# classifiers = [
# KNeighborsClassifier(3),
# SVC(kernel="rbf", C=0.025, probability=True),
# NuSVC(probability=True),
# DecisionTreeClassifier(),
# RandomForestClassifier(),
# AdaBoostClassifier(),
# GradientBoostingClassifier()
# ]for classifier in classifiers:
# pipe = Pipeline(steps=[('preprocessor', preprocessor),
# ('classifier', classifier)])
# pipe.fit(X_train, y_train)
# print(classifier)
# print("model score: %.3f" % pipe.score(X_test, y_test))
# ```
# ## 3.3 Hyperparameter tuning
# Exampel:
# ```python
# param_grid = {
# 'classifier__n_estimators': [200, 500],
# 'classifier__max_features': ['auto', 'sqrt', 'log2'],
# 'classifier__max_depth' : [4,5,6,7,8],
# 'classifier__criterion' :['gini', 'entropy']}from sklearn.model_selection import GridSearchCVCV = GridSearchCV(rf, param_grid, n_jobs= 1)
# CV.fit(X_train, y_train)
# print(CV.best_params_)
# print(CV.best_score_)
# ```
# # 4. Output
# ## 4.1 Results
# ## 4.1 Figures | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,638 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/QSARAquaticToxicity.py | import pandas as pd
def load_QSAR_aquatic_toxicity():
# Read the dataset
featureNames = ['TPSA(Tot)', 'SAacc', 'H-050', 'MLOGP', 'RDCHI', 'GATS1p', 'nN', 'C-040', 'LC50 [-LOG(mol/L)]']
df = pd.read_csv('data/QSAR aquatic toxicity/qsar_aquatic_toxicity.csv',
delimiter=';', header=None, names=featureNames)
return df
# df = load_QSAR_aquatic_toxicity()
# print(df.head())
# df.info()
# df.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,639 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/wine_quality.py | import numpy as np
import pandas as pd
def load_wine_quality() :
filePath_red = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-red.csv"
filePath_white = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Wine Quality/winequality-white.csv"
data_csv_red = pd.read_csv(filePath_red, delimiter=";")
data_csv_white = pd.read_csv(filePath_white, delimiter=";")
X_red = data_csv_red.iloc[:,:11]
y_red = data_csv_red.iloc[:,11]
X_white = data_csv_white.iloc[:,:11]
y_white = data_csv_white.iloc[:,11]
X_red.append(X_white)
y_red.append(y_white)
return X_red,y_red | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,640 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/SGEMM_data.py | import numpy as np
import pandas as pd
#### import pandas as pd
from sklearn import preprocessing
def load_SGEMM():
df=pd.read_csv("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/SGEMM/sgemm_product.csv",delimiter=',')
y=(df['Run1 (ms)']+df['Run1 (ms)']+df['Run1 (ms)']+df['Run1 (ms)'])/4
X=df[df.columns[0:14]]
return X,y
# df=load_SGEMM()
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,641 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/BikeSharing.py | import pandas as pd
def load_BikeSharing():
# Read the dataset
df = pd.read_csv('data/Bike Sharing/hour.csv',
delimiter=',', header=0, index_col='instant')
return df
# df = load_BikeSharing()
# print(df[df.columns[1:-3]].head())
# df.info()
# df.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,642 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/NOAAWeatherBellevue.py | import numpy as np
import pandas as pd
def load_NOAAWeatherBellevue():
featureNames = ['Temperature','Dew Point','Sea Level Pressure','Visibility','Average Wind Speed','Maximum Sustained Wind Speed','Maximum Temperature','Minimum Temperature']
# Read the dataset
df_X = pd.read_csv('data/NOAA Weather Bellevue/NEweather_data.csv',
delimiter=',', header=None, names=featureNames)
df_y = pd.read_csv('data/NOAA Weather Bellevue/NEweather_class.csv',
delimiter=',', header=None, names=['Rain'])
return df_X, df_y.values.ravel()
# df_X,df_y = load_NOAAWeatherBellevue()
# print('Features:')
# print(df_X.head())
# df_X.info()
# df_X.describe()
# print('Targets:')
# print(df_y.head())
# df_y.info()
# df_y.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,643 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/Diabetic_Retinopathy_Analysis.py | # suppress sklearn warnings
def warn(*args, **kwargs):
pass
import warnings
warnings.warn = warn
# imports
import pandas as pd
from pandas.plotting import scatter_matrix
import numpy as np
import sklearn
from sklearn.pipeline import Pipeline
import pickle
# preprocessing
from sklearn import preprocessing
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
# model selection
from sklearn.model_selection import train_test_split, GridSearchCV
# metrics
from sklearn import metrics
from sklearn.metrics import accuracy_score,\
precision_recall_fscore_support
# estimators
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier
from sklearn.neural_network import MLPClassifier
# scipy
import scipy
from scipy.sparse import csr_matrix
from IPython.core.display import display
# plotting
import plotly
import plotly.graph_objects as go
import plotly.express as px
import matplotlib.pyplot as plt
# misc
import timeit
from datetime import datetime
from data.Diabetic_Retinopathy import *
# global configs and params
random_seed = 0
test_size = 0.2
fig_label_font = 'Libertinus Sans'
fig_legend_font = 'Libertinus Sans'
np.random.seed(random_seed)
grid_search = False
dataset_name = 'Diabetic_retinopathy'
results_path = 'cls/results/'
# results_name = f'cnn_{time.strftime("%Y%m%d-%H%M%S")}.pt'
results_name = f'{dataset_name}_20191206'
gridsearch_name = f'{dataset_name}_20191206'
# ────────────────────────────────────────────────────────────────────────────────
# # 1. Load the dataset(s)
# todo perform some exploratory data analysis
# todo check for missing/NA values
X,y = load_diabetic_data()
# print(df.describe())
# ────────────────────────────────────────────────────────────────────────────────
# # 2. Split the dataset(s) into training and test
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size,
random_state=random_seed)
# ────────────────────────────────────────────────────────────────────────────────
# # 3. Pipeline
# ## 3.1 Transformers
# ### 3.1.1 Continuous (quantitative or numeric) transformer
# Example:
# ```python
# numeric_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='median')),
# ('scaler', StandardScaler())])
# ```
numeric_transformer = Pipeline(steps=[
('scaler', StandardScaler())])
#
# ### 3.1.2 Categorical (qualitative) transformer
# Example:
# ```python
# categorical_transformer = Pipeline(steps=[
# ('imputer', SimpleImputer(strategy='constant', fill_value='missing')),
# ('onehot', OneHotEncoder(handle_unknown='ignore'))])
# ```
# ### 3.1.3 Column transformer
# Example:
# ```python
# numeric_features = train.select_dtypes(include=['int64', 'float64']).columns
# categorical_features = train.select_dtypes(include=['object']).drop(['Loan_Status'], axis=1).columns
#
# preprocessor = ColumnTransformer(
# transformers=[
# ('num', numeric_transformer, numeric_features),
# ('cat', categorical_transformer, categorical_features)])
# ```
numeric_features = X.select_dtypes(include=['int64', 'float64']).columns
preprocessor = ColumnTransformer(
transformers=[
('num', numeric_transformer, numeric_features)])
# ## 3.2 Classifier
# Example:
# ```python
# rf = Pipeline(steps=[('preprocessor', preprocessor),
# ('classifier', RandomForestClassifier())])
# rf.fit(X_train, y_train)
# y_pred = rf.predict(X_test)
# ```
# Example for model selection:
# ```python
# classifiers = [
# KNeighborsClassifier(3),
# SVC(kernel="rbf", C=0.025, probability=True),
# NuSVC(probability=True),
# DecisionTreeClassifier(),
# RandomForestClassifier(),
# AdaBoostClassifier(),
# GradientBoostingClassifier()
# ]for classifier in classifiers:
# pipe = Pipeline(steps=[('preprocessor', preprocessor),
# ('classifier', classifier)])
# pipe.fit(X_train, y_train)
# print(classifier)
# print("model score: %.3f" % pipe.score(X_test, y_test))
# ```
classifiers = [
LogisticRegression(random_state=random_seed),
KNeighborsClassifier(),
GaussianNB(),
SVC(probability=True, random_state=random_seed),
DecisionTreeClassifier(random_state=random_seed),
RandomForestClassifier(random_state=random_seed),
AdaBoostClassifier(random_state=random_seed),
MLPClassifier(random_state=random_seed)
]
grid_params = {
'LogisticRegression':{
'LogisticRegression__solver': ['newton-cg', 'lbfgs', 'liblinear', 'sag', 'saga']
},
'KNeighborsClassifier':
{
'KNeighborsClassifier__n_neighbors': [3, 4, 5]
},
'GaussianNB':{
'GaussianNB__var_smoothing': list(np.logspace(-10, 0, num=11, base=10)),
},
'SVC':
{
'SVC__kernel': ['linear', 'poly', 'rbf', 'sigmoid'],
'SVC__C': list(np.logspace(-5, 15, num=11, base=2)),
'SVC__gamma': list(np.logspace(-15, 3, num=10, base=2)),
},
'DecisionTreeClassifier':
{
'DecisionTreeClassifier__criterion': ['gini', 'entropy'],
'DecisionTreeClassifier__max_depth': list(np.linspace(1, 32, 32, endpoint=True)),
# 'DecisionTreeClassifier__splitter': ['best', 'random'],
# 'DecisionTreeClassifier__min_samples_split': list(np.linspace(0.1, 1.0, 10, endpoint=True)),
# 'DecisionTreeClassifier__min_samples_leaf': list(np.linspace(0.1, 0.5, 5, endpoint=True)),
# 'DecisionTreeClassifier__max_features': list(np.linspace(0.1, 0.5, 5, endpoint=True)),
},
'RandomForestClassifier':
{
'RandomForestClassifier__n_estimators': list(np.arange(10, 101)),
'RandomForestClassifier__criterion': ['gini', 'entropy'],
'RandomForestClassifier__max_depth': list(np.linspace(1, 32, 32, endpoint=True)),
# 'RandomForestClassifier__splitter': ['best', 'random'],
# 'RandomForestClassifier__min_samples_split': list(np.linspace(0.1, 1.0, 10, endpoint=True)),
# 'RandomForestClassifier__min_samples_leaf': list(np.linspace(0.1, 0.5, 5, endpoint=True)),
# 'RandomForestClassifier__max_features': list(np.linspace(0.1, 0.5, 5, endpoint=True)),
},
'AdaBoostClassifier':
{
'AdaBoostClassifier__n_estimators': list(np.arange(10, 51)),
'AdaBoostClassifier__learning_rate': list(np.linspace(0.1, 1, 10, endpoint=True)),
},
'MLPClassifier':
{
'MLPClassifier__activation': ['identity', 'logistic', 'tanh', 'relu'],
'MLPClassifier__solver': ['lbfgs', 'sgd', 'adam'],
'MLPClassifier__hidden_layer_sizes': [(1,)] + [(i,) for i in np.arange(10, 101, 10)],
'MLPClassifier__learning_rate': ['constant', 'invscaling', 'adaptive'],
'MLPClassifier__max_iter': list(np.arange(300, 501, 50)),
}
}
results = []
for classifier in classifiers:
pipe = Pipeline(steps=[('preprocessor', preprocessor),
(classifier.__class__.__name__, classifier)])
# Perform a grid search on the entire pipeline of the current classifier
# Note: to disable the grid search, comment the following three lines,
# and call fit() and predict() directly on the pipe object
if (grid_search):
grid_clf = GridSearchCV(pipe, grid_params[classifier.__class__.__name__], n_jobs=8)
grid_clf.fit(X_train, y_train)
# best params are stored in the grid_clf.best_params_ object:
## print(grid_clf.best_params_)
# store the best classifier for each classifier
pipe = grid_clf.best_estimator_
# pickle the grid object
# Its important to use binary mode
grid_file = open(results_path + gridsearch_name, 'ab')
# source, destination
pickle.dump(grid_clf, grid_file)
grid_file.close()
else:
pipe.fit(X_train, y_train)
y_pred = pipe.predict(X_test)
precision, recall, f1, _ = \
precision_recall_fscore_support(y_test, y_pred, average='micro')
result = {
'Classifier': classifier.__class__.__name__,
'Score': pipe.score(X_test, y_test),
'Accuracy': accuracy_score(y_test, y_pred),
'f1 score': f1,
'Precision': precision,
'Recall': recall
}
results.append(result)
results_df = pd.DataFrame(data=results, index=None,
columns=['Classifier', 'Score', 'Accuracy',
'f1 score', 'Precision', 'Recall'])
results_df.index = [''] * len(results_df)
# ## 3.3 Hyperparameter tuning
# Exampel:
# ```python
# grid_param = {
# 'classifier__n_estimators': [200, 500],
# 'classifier__max_features': ['auto', 'sqrt', 'log2'],
# 'classifier__max_depth' : [4,5,6,7,8],
# 'classifier__criterion' :['gini', 'entropy']}
# from sklearn.model_selection import GridSearchCV
# CV = GridSearchCV(rf, grid_param, n_jobs= 1)
# CV.fit(X_train, y_train)
# print(CV.best_params_)
# print(CV.best_score_)
# ```
# # 4. Output
# ## 4.1 Results
# Jupyter Notebook
results_df = results_df.sort_values(by=['Score'], ascending=False)
display(results_df)
# Save the dataframe
results_df.to_pickle(results_path + results_name)
results_df.to_csv(results_path + results_name + '.csv')
# ## 4.1 Figures | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,644 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/facebookMetricData.py | import pandas as pd
from sklearn import preprocessing
def load_facebookMetric():
df = pd.read_csv("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Facebook/dataset_Facebook.csv",delimiter=';')
categorical_columns = df.select_dtypes(['category','object']).columns
# convert categorical data to numeric values
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.astype('category') )
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.cat.codes )
scaler = preprocessing.StandardScaler()
X=df[df.columns[1:6]]
y=df[df.columns[7]]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,645 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/Merck_data.py | import numpy as np
import pandas as pd
#This dataset takes a lot of time to run due to it's size.
def load_set_2_data():
with open("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT2_competition_training.csv") as f:
cols = f.readline().rstrip('\n').split(',')
X = np.loadtxt("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT2_competition_training.csv", delimiter=',', usecols=range(2, len(cols)), skiprows=1, dtype=np.uint8)
y = np.loadtxt("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT2_competition_training.csv", delimiter=',', usecols=[1], skiprows=1)
# outfile = TemporaryFile()
# np.savez(outfile, X, y)
# return outfile
X= pd.DataFrame(X)
y= pd.DataFrame(y)
return X,y
def load_set_4_data():
#Read file to df with delimeter and header from 0-n
with open("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT4_competition_training.csv") as f:
cols_4 = f.readline().rstrip('\n').split(',')
X = np.loadtxt("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT4_competition_training.csv", delimiter=',', usecols=range(2, len(cols_4)), skiprows=1, dtype=np.uint8)
y = np.loadtxt("C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Merck/ACT4_competition_training.csv", delimiter=',', usecols=[1], skiprows=1)
X= np.asarray(X)
y= np.asarray(y)
return X,y
# outfile_4 = TemporaryFile()
# np.savez(outfile_4, X_4, y_4)
# return outfile | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,646 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/austrailian_data.py | import pandas as pd
from sklearn.model_selection import train_test_split
def load_austrailian():
df=pd.read_table('C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Australian credit/australian.dat',sep='\s+',header=None)
df.columns=['X1','X2','X3','X4','X5','X6','X7','X8','X9','X10','X11','X12','X13','X14','Y']
# df.head()
df_cat=df[['X1','X4','X5','X6','X8','X9','X11','X12','Y']]
df_cont=df[['X2','X3','X7','X10','X13','X14','Y']]
# df_cat.head()
X=df.drop('Y',axis=1)
y=df['Y']
return X,y
# X,y=load_austrailian()
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,647 | AliD101v/ml-course-project-f19 | refs/heads/master | /rgs/data/crime_data.py | import numpy as np
import pandas as pd
def load_crime_data():
df =pd.read_table('C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Communities and Crime/communities.data',delimiter=',',header=None)
df = df.replace('?', None)
# df = df.where(df =="\?",None)
df = df.drop()
df = df.dropna(axis="columns")
X=df[df.columns[0:127]]
y=df[df.columns[127]]
return X,y
X,y = load_crime_data()
print(X)
print(y) | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,648 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/GermanCreditData.py | import pandas as pd
from sklearn.model_selection import train_test_split
def load_GermanCredit():
df=pd.read_table('C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/German credit/german.data',sep='\s+',header=None)
X = df[df.columns[:-1]]
y = df[df.columns[-1]]
return X,y
# df=load_GermanCredit()
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,649 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/Diabetic_Retinopathy.py | import pandas as pd
from scipy.io.arff import loadarff
def load_diabetic_data():
file_path = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Diabetic Retinopathy/messidor_features.arff"
# path = "ml-course-project-f19\data\Diabetic Retinopathy\messidor_features.arff"
raw_data = loadarff(file_path) # data type tuple
messidor_columns = ['quality','pre_screening','ma1','ma2','ma3','ma4','ma5','ma6','ex1','ex2','ex3','ex4','ex5','ex6','ex7','ex8','ex9','dist_macula_optic_center','result_am_fm','class']
messidor_df = pd.DataFrame(raw_data[0])
messidor_df.columns = messidor_columns
X = messidor_df.iloc[:,0:19]
y = messidor_df.iloc[:,19]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,650 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/YeastData.py | import pandas as pd
def load_yeastData():
df=df=pd.read_table('C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Yeast/yeast.data',sep='\s+',header=None)
categorical_columns = df.select_dtypes(['category','object']).columns
# convert categorical data to numeric values
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.astype('category') )
df[categorical_columns] = df[categorical_columns].apply( lambda x:x.cat.codes )
X=df[df.columns[1:9]]
y=df[df.columns[9]]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,651 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/credit_card.py | import numpy as np
import pandas as pd
def load_credit_card_data():
# data reading
filePath = "C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data/Default of credit card clients/default_of_credit_card_clients.xls"
data_excel = pd.read_excel(filePath, sheet_name='Data')
X = data_excel.iloc[:,:23]
y = data_excel.iloc[:,23]
return X,y
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,652 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/Adult.py | import pandas as pd
def load_Adult():
# Read the dataset
# Prepare the feature names
featureNames = ['age', 'workclass', 'fnlwgt', 'education', 'education-num', 'marital-status', 'occupation', 'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'Prediction']
df_train = pd.read_csv('data/Adult Data Set/adult.data',
delimiter=',', skipinitialspace=True, header=None, names=featureNames, na_values='?')
df_test = pd.read_csv('data/Adult Data Set/adult.test',
delimiter=',', skipinitialspace=True, header=None, skiprows=1, names=featureNames, na_values='?')
# Map the targets from categorical (string) values to 0/1.
prediction_mapping = {'<=50K':0, '>50K':1}
df_train['Prediction'] = df_train['Prediction'].map(prediction_mapping).astype(int)
prediction_mapping = {'<=50K.':0, '>50K.':1}
df_test['Prediction'] = df_test['Prediction'].map(prediction_mapping).astype(int)
return df_train, df_test
# df,_ = load_Adult()
# print(df[df.columns[:-1]].head())
# df.info()
# df.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,653 | AliD101v/ml-course-project-f19 | refs/heads/master | /intrprt/CIFAR10_CNN.py | #%%
# CUDA test code
# import only for the CUDA test cell
# from __future__ import print_function
import numpy as np
import sklearn
from sklearn.tree import DecisionTreeClassifier
import torch
import torchvision
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import matplotlib.pyplot as plt
from PIL import Image
import time
import sys
from intrprt.data.CIFAR10 import *
# If you are loading a saved trained model, set `loading` to `True`,
# and provide the correct file name and path for model name
loading = False
model_path = 'intrprt/model/'
# model_name = f'cnn_{time.strftime("%Y%m%d-%H%M%S")}.pt'
model_name = 'cnn_20191206.pt'
#%%
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
def imshow(img):
img = img / 2 + 0.5 # unnormalize
npimg = img.numpy()
plt.imshow(np.transpose(npimg, (1, 2, 0)))
plt.show()
def print_CUDA_info():
if device.type == 'cuda':
# print(torch.cuda.get_device_name(0))
# print('Memory Usage:')
print('Allocated:', round(torch.cuda.memory_allocated(0)/1024**3,1), 'GB')
print('Cached: ', round(torch.cuda.memory_cached(0)/1024**3,1), 'GB')
#%% [markdown]
# Load the dataset.
X, y, X_test, y_test = load_CIFAR10(transform=True)
#%% [markdown]
# Transform to normalized-range tensors [-1, 1]
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
#%% [markdown]
# Display some sample images
# num_samples = 3
# indices = np.random.randint(0, X.shape[0], num_samples)
# images = list()
# labels = list()
# for i in range(num_samples):
# images.append(transform(Image.fromarray(X[i])))
# labels.append(classes[y[i]])
# imshow(torchvision.utils.make_grid(images))
# print(' '.join('%5s' % labels[j] for j in range(num_samples)))
#%% [markdown]
# Create the network, loss, and optimizer.
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
#%% [markdown]
# Set up CUDA device
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net.to(device)
print('Using device:', device)
print(torch.cuda.get_device_name(0))
print()
print_CUDA_info()
#%% [markdown]
# Train the convolutional neural network.
# batch_size = 1
epochs = 10
# inputs = np.zeros((batch_size,) + X.shape[1:])
for epoch in range(epochs): # loop over the dataset multiple times
running_loss = 0.0
# for i in range(0, X.shape[0], batch_size):
for i in range(X.shape[0]):
# for i in range(10):
inputs = torch.tensor(np.expand_dims(transform(Image.fromarray(X[i])), axis=0)).to(device)
label = torch.from_numpy(np.array([y[i]]).astype(np.int64)).to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = net(inputs)
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if i % 2000 == 1999: # print every 2000 mini-batches
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
print_CUDA_info()
running_loss = 0.0
print('Finished Training')
# save the trained model once done trianing
torch.save(net.state_dict(), model_path + model_name)
#%% [markdown]
# Load a saved trained model
net = Net()
net.load_state_dict(torch.load(model_path + model_name))
print(net)
#%% [markdown]
# Check the prediction against some sample images from the test data
# print images
num_samples = 5
indices = np.random.randint(0, X_test.shape[0], num_samples)
images = np.zeros((num_samples,) + transform(Image.fromarray(X_test[0])).shape)
labels = list()
for i in range(num_samples):
# images.append(transform(Image.fromarray(X_test[i])))
images[i,:] = transform(Image.fromarray(X_test[i]))
labels.append(classes[y_test[i]])
images = torch.from_numpy(images)
imshow(torchvision.utils.make_grid(images))
print('Ground truth:')
print(' '.join('%5s' % labels[j] for j in range(num_samples)))
outputs = net(images.float())
_, predicted = torch.max(outputs, 1)
print('Predicted: ', ' '.join('%5s' % classes[predicted[j]]
for j in range(num_samples)))
#%% [markdown]
# Calculate the accuracy on test data
correct = 0
total = 0
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
with torch.no_grad():
for i in range(X_test.shape[0]):
# for i in range(10):
images = torch.tensor(np.expand_dims(transform(Image.fromarray(X_test[i])), axis=0)).to(device)
labels = torch.from_numpy(np.array([y_test[i]]).astype(np.int64)).to(device)
outputs = net(images.float())
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
# Calculate per-class accuracy
c = (predicted == labels).squeeze()
# for i in range(4):
# label = labels[i]
class_correct[labels] += c.item()
class_total[labels] += 1
print(f'Accuracy of the network on the {X_test.shape[0]} test images: {(100 * correct / total)}%')
for i in range(10):
print('Accuracy of %5s : %2d %%' % (
classes[i], 100 * class_correct[i] / class_total[i]))
#%% [markdown]
# CUDA test code
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# Assuming that we are on a CUDA machine, this should print a CUDA device:
print(device)
x = torch.rand(5, 3)
print(x)
# %%
# # Ref: [How to visualize convolutional features in 40 lines of code](https://towardsdatascience.com/how-to-visualize-convolutional-features-in-40-lines-of-code-70b7d87b0030 "How to visualize convolutional features in 40 lines of code")
# class SaveFeatures():
# def __init__(self, module):
# self.hook = module.register_forward_hook(self.hook_fn)
# def hook_fn(self, module, input, output):
# self.features = torch.tensor(output,requires_grad=True).cuda()
# def close(self):
# self.hook.remove()
# #%%
# class FilterVisualizer():
# def __init__(self, size=56, upscaling_steps=12, upscaling_factor=1.2):
# self.size, self.upscaling_steps, self.upscaling_factor = size, upscaling_steps, upscaling_factor
# self.model = vgg16(pre=True).cuda().eval()
# set_trainable(self.model, False)
# def visualize(self, layer, filter, lr=0.1, opt_steps=20, blur=None):
# sz = self.size
# img = np.uint8(np.random.uniform(150, 180, (sz, sz, 3)))/255 # generate random image
# activations = SaveFeatures(list(self.model.children())[layer]) # register hook
# for _ in range(self.upscaling_steps): # scale the image up upscaling_steps times
# train_tfms, val_tfms = tfms_from_model(vgg16, sz)
# img_var = V(val_tfms(img)[None], requires_grad=True) # convert image to Variable that requires grad
# optimizer = torch.optim.Adam([img_var], lr=lr, weight_decay=1e-6)
# for n in range(opt_steps): # optimize pixel values for opt_steps times
# optimizer.zero_grad()
# self.model(img_var)
# loss = -activations.features[0, filter].mean()
# loss.backward()
# optimizer.step()
# img = val_tfms.denorm(img_var.data.cpu().numpy()[0].transpose(1,2,0))
# self.output = img
# sz = int(self.upscaling_factor * sz) # calculate new image size
# img = cv2.resize(img, (sz, sz), interpolation = cv2.INTER_CUBIC) # scale image up
# if blur is not None: img = cv2.blur(img,(blur,blur)) # blur image to reduce high frequency patterns
# self.save(layer, filter)
# activations.close()
# def save(self, layer, filter):
# plt.imsave("layer_"+str(layer)+"_filter_"+str(filter)+".jpg", np.clip(self.output, 0, 1)) | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,654 | AliD101v/ml-course-project-f19 | refs/heads/master | /cls/data/WDBC.py | import pandas as pd
def load_WDBC():
# Read the dataset
# Prepare the feature names
featureNames_org = ['Radius', 'Texture', 'Perimeter', 'Area', 'Smoothness', 'Compactness', 'Concavity', 'Concave points', 'Symmetry', 'Fractal dimension']
featureNames = [f'{s} (M)' for s in featureNames_org]
featureNames += [f'{s} (SE)' for s in featureNames_org]
featureNames += [f'{s} (W)' for s in featureNames_org]
featureNames = ['ID', 'Diagnosis'] + featureNames
df = pd.read_csv('data/Breast Cancer Wisconsin/wdbc.data',
delimiter=',', header=None, names=featureNames)
# drop the ID column
df = df.drop(columns='ID')
# Map the targets from categorical (string) values to 0/1
prediction_mapping = {'B':0, 'M':1}
df['Diagnosis'] = df['Diagnosis'].map(prediction_mapping).astype(int)
return df
# df = load_WDBC()
# print(df.head())
# df.info()
# df.describe() | {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,655 | AliD101v/ml-course-project-f19 | refs/heads/master | /intrprt/data/CIFAR10.py | import numpy as np
import pickle
def unpickle(file):
with open(file, 'rb') as fo:
dict = pickle.load(fo, encoding='bytes')
return dict
def load_CIFAR10(transform=False):
train = list()
for i in range(5):
# C:/Users/sidha/OneDrive/Documents/ml-course-project-f19/ml-course-project-f19/data
obj = unpickle(f'data/CIFAR-10/data_batch_{i+1}')
train.append(obj)
test = unpickle(f'data/CIFAR-10/test_batch')
X = train[0][b'data']
y = train[0][b'labels']
y = np.asarray(y)
X_test = test[b'data']
y_test = test [b'labels']
y_test = np.asarray(y_test)
for i in range (1,len(train)):
X = np.vstack((X, train[i][b'data']))
y = np.hstack((y, train[i][b'labels']))
if (transform):
# transpose the colour axis to be the inner-most one, giving the image format (H, W, C)
X = X.reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))
X_test = X_test.reshape(-1, 3, 32, 32).transpose((0, 2, 3, 1))
return X, y, X_test, y_test
## To test:
# X, y, X_test, y_test = load_CIFAR10(transform=True)
# print(y.shape)
## To test [OLD]:
# train, test = load_CIFAR10()
# n = 5
# print(f'First {n} records in...')
# for i in range(len(train)):
# print(f'batch {i+1}:')
# obj_data = train[i][b'data']
# obj_labels = train[i][b'labels']
# print(f'{obj_data[:n,:n]}')
# print(f'{obj_labels[:n]}')
# print(f'Test data:')
# obj_data = test[b'data']
# obj_labels = test[b'labels']
# print(f'{obj_data[:n,:n]}')
# print(f'{obj_labels[:n]}')
| {"/intrprt/CIFAR10_CNN.py": ["/intrprt/data/CIFAR10.py"]} |
42,659 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /parser1.py | from cyk import parse
'''
The array sentence contains the test sentenses that our machine recognizes or not.
Recognizable sentences:
'I and Arham drove the bike down the dirty street'
'I jumped over the aesthetic mountains down the magnificent Himalayas lake'
'I drank the Argentina'
'laptop drank the hot tea'
'I drank the tea'
Non Recognizable Sentences:
'I drank the coffee',
'Argetina and America are neighbours',
'''
sentence = ['I and Arham drove the bike down the dirty street',
'I jumped over the aesthetic mountains down the magnificent Himalayas lake',
'I drank the Argentina',
'laptop drank the hot tea',
'I drank the tea',
'I drank the coffee',
'Argetina and America are neighbours',
]
for i in sentence:
print(i,':')
parse('./english_grammar.txt', i)
print('')
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,660 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /ParseFunc.py | from cyk import parse
'''
The array function contains the test functions that our machine recognizes or not.
Recognizable functions:
f ( x ) = x * 5
f ( x , z ) = sin ( x * z )
f ( x , z ) = ( x + z ) / 2
Non Recognizable functions:
x + y + z
f ( a ) = a / 2
g ( x ) = f ( z )
'''
function = ['f ( x ) = x * 5',
'f ( x , z ) = sin ( x * z )',
'f ( x , z ) = ( x + z ) / 2',
'x + y + z',
'f ( a ) = a / 2',
'g ( x ) = f ( z )',
]
for i in function:
print(i,':')
parse('./function_grammar.txt', i)
print('')
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,661 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /translator.py | from chomsky import convert_grammar, read_grammar
from cyk import parse
translation_key = {
'and': 'y','but': 'pero', 'or': 'o',
'a': 'uno', 'an': 'un', 'this': 'esto', 'the': 'el',
'from': 'desde', 'to': 'a', 'over': 'encima', 'down': 'abajo', 'in': 'en',
'me': 'yo', 'I': 'yo', 'we': 'Nosotros', 'you': 'tú', 'they': 'ellos', 'it': 'eso',
'us': 'nosotros', 'he': 'él',
'fast': 'rápido', 'hot': 'caliente', 'magnificent': 'magnífico', 'aesthetic': 'estético',
'dirty': 'sucio',
'drove': 'condujo', 'drank': 'bebió', 'jumped': 'saltó', 'ran': 'corrió', 'fought': 'luchó',
'Argentina': 'Argentina', 'Pakistan': 'Pakistán', 'Arham': 'Arham', 'street': 'calle',
'tea': 'té', 'laptop': 'ordenador portátil', 'mountains': 'montañas', 'Himalayas': 'Himalaya',
'bike': 'Bicicleta', 'lake': 'Lago'
}
def translate(grammar_file_path: str, string: str, translation_key: dict) -> str:
'''
The function translates the English sentence that we parse into Spanish.
The translation is done word by word by mapping English words to Spanish equals.
The translation is stored in the dictionary: translation_key.
param: (1) the grammar file path that defines grammar
(2) the sentence that we are testing for recognizability and translating
(3) the word by word translation key
return: the translation or the error message
'''
trans = []
if parse(grammar_file_path, string) != True:
print("Sorry the String is not recognized")
else:
trans = []
string = string.split()
for i in range(len(string)):
trans.append(translation_key[string[i]])
print('Translation:', '{}'.format(' '.join("{}".format(i) for i in trans)))
sentence = ['I and Arham drove the bike down the dirty street',
'I jumped over the aesthetic mountains down the magnificent Himalayas lake',
'I drank the Argentina',
'laptop drank the hot tea',
'I drank the tea',
'I drank the coffee',
'Argetina and America are neighbours',
]
for i in sentence:
print(i,':')
translate('./english_grammar.txt', i, translation_key)
print('')
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,662 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /cyk.py | import typing
from typing import List
from chomsky import read_grammar, convert_grammar
def parse(grammar_file_path: List[str], sentence: str) -> bool:
"""
The code for Cocke-Younger-Kasami (CYK) algorithm is taken from this
source: https://github.com/RobMcH/CYK-Parser/blob/master/cyk_parser.py
I added documentations to understand it better and removed the functions that were not required.
Also changed the code to align with the task requirements.
param: (1) the file path of CNF
(2) the sentence that we are testing for recognizability by our Machine
return: boolean value of True or False
"""
# Imports the function from chomsky.py
# Converts teh grammar list to CFG
grammar = convert_grammar(grammar_file_path)
new_string = sentence.split()
length = len(new_string)
matrix = [[[] for x in range(length)] for y in range(length)]
for word in range(length):
for rule in grammar:
if rule[1] == "\'%s\'" % new_string[word]:
matrix[0][word].append(rule[0])
for words_to_consider in range(2, length + 1):
for start_cell in range(0, length - words_to_consider + 1):
for left_size in range(1, words_to_consider):
right_size = words_to_consider - left_size
for rule in grammar:
if [x for x in matrix[left_size - 1][start_cell] if x == rule[1]]:
if [x for x in matrix[right_size - 1][start_cell + left_size] if x == rule[2]]:
matrix[words_to_consider - 1][start_cell].append(rule[0])
sentence = grammar[0][0]
# Returns if the sentence is contained in a language or not
if [n for n in matrix[-1][0] if n == sentence]:
print("The sentence belongs to the language produced by the grammar :)")
return True
else:
print("The sentence does not belongs to the language produced by the grammar :(")
return False
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,663 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /fun_gen.py | from chomsky import convert_grammar
from cyk import parse
def fun_gen(function_generator: str) -> str:
'''
This function determines if a given input is in the language
If it belongs to the language then the function Auto- generates a python function
param: The string that is checked for membership and coverted into a automated python code
return: Python function
'''
if parse('./function_grammar.txt', function_generator) != True:
print('Therefore, code cannot be generated')
else:
replace = function_generator.replace(' ', '')
left, right = replace.split('=')
print('def {}: \n y = {} \n return y'.format(left, right))
'''
The array function contains the test functions that our machine recognizes or not.
Recognizable functions:
f ( x ) = x * 5
f ( x , z ) = sin ( x * z )
f ( x , z ) = ( x + z ) / 2
Non Recognizable functions:
x + y + z
f ( a ) = a / 2
g ( x ) = f ( z )
'''
function = ['f ( x ) = x * 5',
'f ( x , z ) = sin ( x * z )',
'f ( x , z ) = ( x + z ) / 2',
'x + y + z',
'f ( a ) = a / 2',
'g ( x ) = f ( z )',
]
for i in function:
print(i,':')
fun_gen(i)
print('')
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,664 | arhamhameed/Language-Parsers-and-Translators | refs/heads/master | /chomsky.py | '''
The code I used to convert the Context Free Grammar to Chomsky Normal form is taken from this
source: https://github.com/RobMcH/CYK-Parser/blob/master/grammar_converter.py
I added documentations to understand it better and removed the functions that were not required.
Also changed the code to align with the task requirements.
'''
import typing
from typing import List
def read_grammar(file_path: str) -> List[str]:
"""
Reads in the given grammar file and splits it into separate lists for each rule.
param: the file path to the grammar file
return: the list of rules.
"""
with open(file_path, 'r') as grammar_file:
rules = grammar_file.readlines()
return rules
def convert_grammar(grammar_file_path: str) -> List[List[str]]:
"""
Converts a context-free grammar in the Chomsky Normal Form
param: the grammar_file_path
return: A nested list where sublists contain a single rule
"""
rules = read_grammar(grammar_file_path)
grammar = []
vert = "|"
arrow = "->"
# the for loop divides the left and right side of the arrow in two parts
# the left side is stored in an array
# the right side is further divided based on teh verticals
for rule in rules:
if "|" in rule:
left, right = rule.split(arrow)
left = [left.strip()]
right_items = right.split(vert)
for item in right_items:
item_list = item.split()
grammar.append(left + item_list)
else:
grammar.append(rule.replace(arrow, '').split())
dictlist = {}
unit_productions = []
result = []
index = 0
for rule in grammar:
new = []
# Rule is in form A -> X, so back it up for later and continue with the next rule.
if len(rule) == 2 and rule[1][0] != "'":
unit_productions.append(rule)
# looks if the rule already exists or not
if rule[0] in dictlist:
dictlist[rule[0]] += [rule[1:]]
else:
dictlist[rule[0]] = [rule[1:]]
continue
# Rule is in form A -> X B C [...] or A -> X a.
elif len(rule) > 2:
terminals = []
for i in range(len(rule)):
if rule[i][0] == "'":
terminals.append((rule[i], i))
if terminals:
for item in terminals:
# Create a new non terminal symbol and replace the terminal symbol with it.
# The non terminal symbol derives the replaced terminal symbol.
rule[item[1]] = str(rule[0]) + str(index)
new.append([str(rule[0]) + str(index), item[0]])
index += 1
while len(rule) > 3:
new.append([str(rule[0]) + str(index), rule[1], rule[2]])
rule = [rule[0]] + [str(rule[0]) + str(index)] + rule[3:]
index += 1
# Again looks if the rule already exists or not
if rule[0] in dictlist:
dictlist[rule[0]] += [rule[1:]]
else:
dictlist[rule[0]] = [rule[1:]]
result.append(rule)
if new:
for new_rule in new:
result.append(new_rule)
# Handle the unit productions (A -> X)
while unit_productions:
rule = unit_productions.pop()
if rule[1] in dictlist:
for item in dictlist[rule[1]]:
new_rule = [rule[0]] + item
if len(new_rule) > 2 or new_rule[1][0] == "'":
result.append(new_rule)
else:
unit_productions.append(new_rule)
if rule[0] in dictlist:
dictlist[rule[0]] += [rule[1:]]
else:
dictlist[rule[0]] = [rule[1:]]
return result
| {"/parser1.py": ["/cyk.py"], "/ParseFunc.py": ["/cyk.py"], "/translator.py": ["/chomsky.py", "/cyk.py"], "/cyk.py": ["/chomsky.py"], "/fun_gen.py": ["/chomsky.py", "/cyk.py"]} |
42,680 | skiesx/django-project-version | refs/heads/master | /djversion/context_processors.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/context_processors.py
from __future__ import unicode_literals
from djversion.utils import get_version
__all__ = [
"version",
]
def version(request):
"""
Return formatted version string named as "VERSION" to context.
Args:
request: (django.http.request.HttpRequest) django request instance.
Returns:
dict: dict with "VERSION" key with value of project version.
"""
return {
"VERSION": get_version(),
}
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,681 | skiesx/django-project-version | refs/heads/master | /djversion/templatetags/djversion_tags.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/templatetags/djversion_tags.py
from __future__ import unicode_literals
from django import template
from djversion.utils import get_version
__all__ = [
"project_version",
]
register = template.Library()
@register.simple_tag()
def project_version():
"""
Formatted version string templatetag.
Returns:
string: string with project version or empty string.
"""
return get_version()
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,682 | skiesx/django-project-version | refs/heads/master | /djversion/settings.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/settings.py
from __future__ import unicode_literals
from django.conf import settings
__all__ = [
"VERSION",
"UPDATED",
"FORMAT_STRING",
]
VERSION = getattr(settings, "DJVERSION_VERSION", None)
UPDATED = getattr(settings, "DJVERSION_UPDATED", None)
FORMAT_STRING = getattr(settings, "DJVERSION_FORMAT_STRING", "{version} ({updated})")
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,683 | skiesx/django-project-version | refs/heads/master | /setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# django-project-version
# setup.py
from setuptools import (
setup,
find_packages,
)
# metadata
VERSION = (0, 2, 4)
__version__ = ".".join(map(str, VERSION))
setup(
name="django-project-version",
version=__version__,
packages=find_packages(),
install_requires=[
"Django",
],
author="SkiesX",
author_email="SkiesX@ex.ua",
description="Django reusable app to show your project version",
license="MIT",
url="https://github.com/skiesx/django-project-version/",
download_url="https://github.com/skiesx/django-project-version/archive/{version}.tar.gz".format(**{"version": __version__, }),
zip_safe=False,
include_package_data=True,
classifiers=[
"Environment :: Plugins",
"License :: OSI Approved :: MIT License",
"Operating System :: Unix",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Topic :: Utilities",
"Framework :: Django :: 1.5",
"Framework :: Django :: 1.6",
"Framework :: Django :: 1.7",
"Framework :: Django :: 1.8",
"Framework :: Django :: 1.9",
"Framework :: Django :: 1.10",
"Framework :: Django :: 1.11",
"Framework :: Django :: 2.0",
]
)
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,684 | skiesx/django-project-version | refs/heads/master | /djversion/models.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/models.py
from __future__ import unicode_literals
__all__ = []
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,685 | skiesx/django-project-version | refs/heads/master | /djversion/utils.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/utils.py
from __future__ import unicode_literals
from datetime import date, datetime
from django.templatetags.l10n import localize
from djversion.settings import (
VERSION,
UPDATED,
FORMAT_STRING,
)
__all__ = [
"get_version",
]
def get_version():
"""
Return formatted version string.
Returns:
string: string with project version or empty string.
"""
if all([VERSION, UPDATED, any([isinstance(UPDATED, date), isinstance(UPDATED, datetime), ]), ]):
return FORMAT_STRING.format(**{"version": VERSION, "updated": UPDATED, })
elif VERSION:
return VERSION
elif UPDATED:
return localize(UPDATED) if any([isinstance(UPDATED, date), isinstance(UPDATED, datetime), ]) else ""
else:
return ""
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,686 | skiesx/django-project-version | refs/heads/master | /djversion/templatetags/__init__.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/templatetags/__init__.py
from __future__ import unicode_literals
__all__ = []
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,687 | skiesx/django-project-version | refs/heads/master | /djversion/__init__.py | # -*- coding: utf-8 -*-
# django-project-version
# djversion/__init__.py
from __future__ import unicode_literals
__all__ = [
"default_app_config",
]
default_app_config = "djversion.apps.Config"
| {"/djversion/context_processors.py": ["/djversion/utils.py"], "/djversion/templatetags/djversion_tags.py": ["/djversion/utils.py"], "/djversion/utils.py": ["/djversion/settings.py"]} |
42,691 | lucaputelli/ddi_joint | refs/heads/master | /constants.py | from spacy.util import compile_infix_regex, compile_prefix_regex, compile_suffix_regex
import spacy
from spacy.language import Language, Tokenizer
from gensim.models import Word2Vec
from pre_processing_lib import get_character_dictionary
def custom_tokenizer(nlp):
prefix_re = compile_prefix_regex(Language.Defaults.prefixes + (';', '\*'))
suffix_re = compile_suffix_regex(Language.Defaults.suffixes + (';', '\*'))
infix_re = compile_infix_regex(Language.Defaults.infixes + ('(', ')', "/", "-", ";", "\*"))
return Tokenizer(nlp.vocab, prefix_search=prefix_re.search,
suffix_search=suffix_re.search,
infix_finditer=infix_re.finditer,
token_match=None)
nlp = spacy.load('en')
nlp.tokenizer = custom_tokenizer(nlp)
word_model = Word2Vec.load('pub_med_retrained_ddi_word_embedding_200.model')
tag_model = Word2Vec.load('ddi_pos_embedding.model')
number_of_charachters = len(get_character_dictionary().keys())+1 | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,692 | lucaputelli/ddi_joint | refs/heads/master | /error_check.py | from relation_format_extraction import sentences_from_prediction
from pre_processing_lib import get_sentences
def has_numbers(string):
return any(char.isdigit() for char in string)
def get_entity_dict():
xml_sentences = get_sentences('Dataset/Test/Overall')
entity_dict = dict()
for i in range(0, len(xml_sentences)):
entities = xml_sentences[i].getElementsByTagName('entity')
for entity in entities:
id = entity.attributes['id'].value
text = entity.attributes['text'].value
entity_dict.__setitem__(id, text)
return entity_dict
sentences = sentences_from_prediction('inputSent2.txt', 'predLabels2_modified.txt')
entity_dict = get_entity_dict()
only_capital = []
dash = []
numbers = []
parenthesis = []
for id in entity_dict.keys():
text : str = entity_dict.get(id)
if text.upper() == text:
only_capital.append(text)
if '-' in text:
dash.append(text)
if has_numbers(text):
numbers.append(text)
if '(' in text and ')' in text:
parenthesis.append(text)
total_approximate = []
total_missing = []
for s in sentences:
total_approximate += s.approximate_drugs
total_missing += s.missing_drugs
text_approximate = []
text_missing = []
for id in total_approximate:
text_approximate.append(entity_dict.get(id))
for id in total_missing:
text_missing.append(entity_dict.get(id))
num = 0
for d in dash:
if d in text_missing or d in text_approximate:
num += 1
print(num / len(dash))
num = 0
for n in numbers:
if n in text_approximate or n in text_missing:
num += 1
print(num/len(numbers))
num = 0
for o in only_capital:
if o in text_approximate or o in text_missing:
num += 1
print(num/len(only_capital))
num = 0
for p in parenthesis:
if p in text_approximate or p in text_missing:
num += 1
print(num/len(parenthesis))
print(text_missing + text_approximate) | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,693 | lucaputelli/ddi_joint | refs/heads/master | /data_model.py | from typing import List
from spacy.language import Doc
from constants import *
class NERToken:
def __init__(self, word: str, label: str):
self.word = word
self.label = label
def __str__(self):
return self.word + ' ' + self.label
def __len__(self):
return len(self.word)
class CompleteNERToken:
def __init__(self, word: str, label: str, id: str):
self.word = word
self.label = label
self.id = id
def __str__(self):
return self.word + ' ' + self.label
def __len__(self):
return len(self.word)
class SequencePair:
def __init__(self, first_sequence, second_sequence):
self.first_sequence = first_sequence
self.second_sequence = second_sequence
assert len(self.first_sequence) == len(self.second_sequence)
self.word_list = [token.word for token in self.first_sequence]
self.first_labels = [token.label for token in self.first_sequence]
self.second_labels = [token.label for token in self.second_sequence]
self.doc = self.create_doc()
def __len__(self):
return len(self.first_sequence)
def create_doc(self):
tokens = self.word_list
infixes = ['(', ')', '/', '-', ';', '*']
spaces = list()
for i in range(len(tokens) - 1):
actual = tokens[i]
next = tokens[i + 1]
if actual in infixes or next in infixes:
space = False
else:
space = True
spaces.append(space)
spaces.append(False)
try:
doc = Doc(nlp.vocab, tokens, spaces)
except ValueError:
doc = Doc(nlp.vocab, tokens)
return doc
class Interval:
def __init__(self, a: int, b: int):
self.low = a
self.high = b
def __str__(self):
return str(self.low) + '-' + str(self.high)
class PairWithInterval:
def __init__(self, head_id: str, head_interval: Interval, tail_id: str, tail_interval: Interval):
self.head_id = head_id
self.head_interval = head_interval
self.tail_id = tail_id
self.tail_interval = tail_interval
def __str__(self):
return self.head_id + '(' + str(self.head_interval) + '), ' + self.tail_id + '(' + str(self.tail_interval) + ')'
class Sentence:
def __init__(self, id: str, token_with_labels: List[NERToken] = None,
token_with_predictions: List[NERToken] = None):
self.id = id
self.original_tokens = token_with_labels
self.predicted_tokens = token_with_predictions
self.label_dict = Sentence.dict_building(token_with_labels, with_id=True)
self.prediction_list = Sentence.dict_building(token_with_predictions, with_id=False)
self.correct_drugs: dict = None
self.approximate_drugs: dict = None
self.merged_drug_starts = None
self.wrong_drugs: dict = None
self.missing_drugs: dict = None
self.doc = None
self.complete_list = None
self.count_approximate = True
def __str__(self):
if self.doc is None:
return str(self.original_tokens)
return str(self.doc)
def __len__(self):
return len(self.original_tokens)
def set_predictions(self, token_with_predictions: List[NERToken]):
self.predicted_tokens = token_with_predictions
self.prediction_list = Sentence.dict_building(token_with_predictions, with_id=False)
self.check_correct()
def dict_building(ner_tokens: List[NERToken], with_id: bool):
if ner_tokens is None:
return None
labels = [t.label for t in ner_tokens]
drug_starts = [i for i in range(len(labels)) if labels[i].startswith('B')]
drug_dict = dict()
drug_list = list()
for index in range(len(drug_starts)):
i = drug_starts[index]
id = labels[i].replace('B-', '')
j = i + 1
while j < len(labels):
if labels[j] == 'O' or labels[j].startswith('B'):
break
else:
j += 1
drug_tokens = (i, j)
if with_id:
drug_dict.__setitem__(id, drug_tokens)
else:
drug_list.append(drug_tokens)
if with_id:
return drug_dict
else:
return drug_list
def check_correct(self):
self.correct_drugs = dict()
self.approximate_drugs = dict()
self.wrong_drugs = dict()
self.missing_drugs = dict()
wrong_index = 0
for k in self.label_dict.keys():
correct_start, correct_end = self.label_dict.get(k)
for start, end in self.prediction_list:
if abs(end - correct_end) <= 5:
original_string = ''
predicted_string = ''
for i in range(correct_start, correct_end):
original_string += self.original_tokens[i].word
for i in range(start, end):
predicted_string += self.predicted_tokens[i].word
if original_string == predicted_string:
if (start, end) not in self.correct_drugs.values():
self.correct_drugs.__setitem__(k, (start, end))
break
elif predicted_string in original_string:
if len(predicted_string) >= 0.5 * len(original_string) and self.count_approximate:
self.approximate_drugs.__setitem__(k, (start, end))
for (start, end) in self.prediction_list:
if (start, end) not in self.correct_drugs.values() and (start, end) not in self.approximate_drugs.values():
wrong_id = self.id + '.w' + str(wrong_index)
self.wrong_drugs.__setitem__(wrong_id, (start, end))
wrong_index += 1
for k in self.label_dict:
if k not in self.correct_drugs.keys() and k not in self.approximate_drugs.keys():
self.missing_drugs.__setitem__(k, self.label_dict.get(k))
complete_list = list()
for k in self.correct_drugs.keys():
start, end = self.correct_drugs.get(k)
complete_list.append((start, end, k, 'C'))
for k in self.approximate_drugs.keys():
start, end = self.approximate_drugs.get(k)
complete_list.append((start, end, k, 'A'))
for k in self.wrong_drugs.keys():
start, end = self.wrong_drugs.get(k)
complete_list.append((start, end, k, 'W'))
complete_list = sorted(complete_list)
print(complete_list)
self.complete_list = complete_list
def generate_pairs(self):
pairs = list()
for i in range(len(self.label_dict.keys()) - 1):
head_key = list(self.label_dict.keys())[i]
head_a, head_b = self.label_dict.get(head_key)
head_interval = Interval(head_a, head_b)
for j in range(i + 1, len(self.label_dict.keys())):
tail_key = list(self.label_dict.keys())[j]
tail_a, tail_b = self.label_dict.get(tail_key)
tail_interval = Interval(tail_a, tail_b)
pair = PairWithInterval(head_key, head_interval, tail_key, tail_interval)
pairs.append(pair)
return pairs
def no_substitution_doc(self) -> Doc:
tokens = self.original_tokens
infixes = ['(', ')', '/', '-', ';', '*']
spaces = list()
for i in range(len(tokens) - 1):
actual = tokens[i]
next = tokens[i + 1]
if actual.word in infixes or next.word in infixes:
space = False
else:
space = True
spaces.append(space)
spaces.append(False)
words = [t.word for t in tokens]
try:
doc = Doc(nlp.vocab, words, spaces)
except ValueError:
doc = Doc(nlp.vocab, words)
self.doc = doc
return doc
class Pair:
def __init__(self, e1_id: str, e2_id: str, e1_index: int, e2_index: int, e1_text: str, e2_text: str,
sentence: Sentence):
self.e1_id = e1_id
self.e2_id = e2_id
self.e1_index = e1_index
self.e2_index = e2_index
self.e1_text = e1_text
self.e2_text = e2_text
self.type = None
self.sentence = sentence
def set_type(self, type: str):
self.type = type
class JointInstance:
def __init__(self, doc: Doc, original_doc: Doc, pair: Pair):
self.doc = doc
self.original_doc = original_doc
self.e1_id = pair.e1_id
self.e2_id = pair.e2_id
self.pair = pair
self.class_value = ''
self.type = self.pair.type
self.dependency_path = None
def __str__(self):
return self.doc.text
def set_dependency_path(self, dependency_path):
self.dependency_path = dependency_path
def __len__(self):
return len(self.doc) | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,694 | lucaputelli/ddi_joint | refs/heads/master | /post_processing.py | import numpy as np
from matplotlib import pyplot
from sklearn.metrics import confusion_matrix, classification_report
from keras.callbacks import History
def plot(folder: str, name: str, history: History):
pyplot.clf()
pyplot.figure(1, figsize=(13, 6))
pyplot.subplot(1, 2, 1)
pyplot.plot(history.history['loss'])
pyplot.plot(history.history['val_loss'])
pyplot.title('model train vs validation loss')
pyplot.ylabel('loss')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.subplot(1, 2, 2)
try:
pyplot.plot(history.history['accuracy'])
except KeyError:
pyplot.plot(history.history['acc'])
try:
pyplot.plot(history.history['val_accuracy'])
except KeyError:
pyplot.plot(history.history['val_acc'])
pyplot.title('model train vs validation accuracy')
pyplot.ylabel('accuracy')
pyplot.xlabel('epoch')
pyplot.legend(['train', 'validation'], loc='upper right')
pyplot.tight_layout()
pyplot.savefig(folder + '/' + name + '.png')
def joint_plot(folder: str, name: str, history: History):
pyplot.clf()
fig, axs = pyplot.subplots(2, 2)
axs[0, 0].plot(history.history['first_crf_loss'])
axs[0, 0].plot(history.history['val_first_crf_loss'])
# axs[0, 0].set_title('first loss')
axs[0, 1].plot(history.history['first_crf_acc'])
axs[0, 1].plot(history.history['val_first_crf_acc'])
# axs[0, 1].set_title('first acc')
axs[1, 0].plot(history.history['second_crf_loss'])
axs[1, 0].plot(history.history['val_second_crf_loss'])
# axs[1, 0].set_title('second loss')
axs[1, 1].plot(history.history['second_crf_acc'])
axs[1, 1].plot(history.history['val_second_crf_acc'])
# axs[1, 1].set_title('second acc')
pyplot.savefig(folder + '/' + name + '.png')
def metrics(numeric_labels, t_predictions):
target_names = ['unrelated', 'effect', 'mechanism', 'advise', 'int']
matrix = confusion_matrix(numeric_labels, t_predictions)
FP = (matrix.sum(axis=0) - np.diag(matrix))[1:]
FN = (matrix.sum(axis=1) - np.diag(matrix))[1:]
TP = (np.diag(matrix))[1:]
overall_fp = np.sum(FP)
overall_fn = np.sum(FN)
overall_tp = np.sum(TP)
overall_precision = overall_tp / (overall_tp + overall_fp)
overall_recall = overall_tp / (overall_tp + overall_fn)
overall_f_score = 2 * overall_precision * overall_recall / (overall_precision + overall_recall)
report = classification_report(numeric_labels, t_predictions, labels=[0, 1, 2, 3, 4], target_names=target_names)
return matrix, report, overall_precision, overall_recall, overall_f_score
def error_analysis(folder, t_selected_sents, t_negative_sents, numeric_labels, numeric_predictions):
# Maximum length
lengths = [len(x) for x in t_selected_sents + t_negative_sents]
max_length = max(lengths)
# Error plots
errors = np.zeros(max_length)
errors0 = np.zeros(max_length)
errors1 = np.zeros(max_length)
errors2 = np.zeros(max_length)
errors3 = np.zeros(max_length)
errors4 = np.zeros(max_length)
for i in range(max_length):
indices = [len(sent) == i + 1 for sent in t_selected_sents + t_negative_sents]
if len(indices) > 0:
errors[i] = np.sum(numeric_labels[indices] != numeric_predictions[indices]) / len(indices)
indices0 = np.logical_and(indices, [numeric_labels == 0])[0]
if len(indices0) > 0:
errors0[i] = np.sum(numeric_labels[indices0] != numeric_predictions[indices0]) / len(indices0)
indices1 = np.logical_and(indices, [numeric_labels == 1])[0]
if len(indices1) > 0:
errors1[i] = np.sum(numeric_labels[indices1] != numeric_predictions[indices1]) / len(indices1)
indices2 = np.logical_and(indices, [numeric_labels == 2])[0]
if len(indices2) > 0:
errors2[i] = np.sum(numeric_labels[indices2] != numeric_predictions[indices2]) / len(indices2)
indices3 = np.logical_and(indices, [numeric_labels == 3])[0]
if len(indices3) > 0:
errors3[i] = np.sum(numeric_labels[indices3] != numeric_predictions[indices3]) / len(indices3)
indices4 = np.logical_and(indices, [numeric_labels == 4])[0]
if len(indices4) > 0:
errors4[i] = np.sum(numeric_labels[indices4] != numeric_predictions[indices4]) / len(indices4)
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors)
pyplot.xlabel('Sentence length')
pyplot.ylabel('% of errors (total)')
pyplot.savefig(folder + '/errors_total.png')
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors0)
pyplot.xlabel('Sentence length')
pyplot.ylabel('% of errors (Class 0: unrelated)')
pyplot.savefig(folder + '/errors0.png')
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors1)
pyplot.xlabel('Sentence length')
pyplot.ylabel('% of errors (Class 1: effect)')
pyplot.savefig(folder + '/errors1.png')
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors2)
pyplot.xlabel('Sentence length')
pyplot.ylabel('Number of errors (Class 2: mechanism)')
pyplot.savefig(folder + '/errors2.png')
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors3)
pyplot.xlabel('Sentence length')
pyplot.ylabel('% of errors (Class 3: advice)')
pyplot.savefig(folder + '/errors3.png')
pyplot.clf()
pyplot.plot(range(1, max_length + 1), errors4)
pyplot.xlabel('Sentence length')
pyplot.ylabel('% of errors (Class 4: int)')
pyplot.savefig(folder + '/errors4.png')
return errors, errors0, errors1, errors2, errors3, errors4 | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,695 | lucaputelli/ddi_joint | refs/heads/master | /ner_format_extraction.py | from pre_processing_lib import get_sentences, number_substitution
import spacy
from spacy.language import Language, Tokenizer
from typing import Dict
prefix_re = spacy.util.compile_prefix_regex(Language.Defaults.prefixes + (';', '\*', '\(', '\)'))
suffix_re = spacy.util.compile_suffix_regex(Language.Defaults.suffixes + (';' , '\*', '\(', '\)'))
infix_re = spacy.util.compile_infix_regex(Language.Defaults.infixes + ("/", "-", ";", "\*", '\(', '\)'))
nlp = spacy.load('en')
nlp.tokenizer.suffix_search = suffix_re.search
nlp.tokenizer.prefix_search = prefix_re.search
nlp.tokenizer.infix_finditer = infix_re.finditer
class SequencePair:
def __init__(self, first_sequence, second_sequence):
self.first_sequence = first_sequence
self.second_sequence = second_sequence
def only_spaces(token: str) -> bool:
for i in range(len(token)):
if token[i] != ' ':
return False
return True
def ner_format(sentences, no_number = False, clean_separators = False) -> Dict:
ner_instances = dict()
for i in range(len(sentences)):
s_id = sentences[i].attributes['id'].value
entities = sentences[i].getElementsByTagName('entity')
entity_tuples = list()
text = str(sentences[i].attributes['text'].value)
# NUMBER SUBSTITUTION
if no_number:
text = number_substitution(text)
nlp_doc = nlp(text)
tokens = list(nlp_doc)
cleaned_tokens = [t for t in tokens if '\n' not in t.text or '\r' not in t.text]
tokens = cleaned_tokens
for token in tokens:
if ';' in token.text and token.text != ';':
print(token.text)
# displacy.serve(nlp_doc, style='dep')
for j in range(len(entities)):
e_id = entities[j].attributes['id'].value
e_text = entities[j].attributes['text'].value
offset_string = entities[j].attributes['charOffset'].value
type = entities[j].attributes['type'].value
split = str.split(offset_string, ';')
if len(split) > 1:
first = int(split[0].split('-')[0])
last = int(split[1].split('-')[1])
entity = (e_text, first, last, type, e_id)
entity_tuples += [entity]
continue
for s in split:
offsets = str.split(s, "-")
left = int(offsets.__getitem__(0))
right = int(offsets.__getitem__(1))
entity = (e_text, left, right, type, e_id)
entity_tuples += [entity]
drug_dict = dict()
for entity in entity_tuples:
left_tuple = entity[1]
right_tuple = entity[2]
type = entity[3]
id = entity[4]
for k in range(len(tokens)):
try:
t = tokens.__getitem__(k)
except IndexError:
print(tokens)
continue
left_idx = t.idx
length = len(t.text)
right_idx = t.idx + length - 1
if left_tuple == left_idx:
if right_idx == right_tuple:
drug_dict.__setitem__(k, (0, type, id))
break
# print(t)
else:
n = 1
# print(right_tuple, right_idx)
while right_idx < right_tuple:
if k + n >= len(tokens):
break
next = tokens.__getitem__(k + n)
right_idx = next.idx + len(next.text)
n = n + 1
if (right_idx - 1) >= right_tuple:
# span = nlp_doc[k: k + n]
# spans_merge.append(span)
drug_dict.__setitem__(k, (n, type, id))
break
ner_index = dict()
for i in range(len(tokens)):
if i in drug_dict.keys():
(span_index, type, id) = drug_dict.get(i)
if span_index > 0:
for j in range(span_index):
ner_index.__setitem__(i+j, (type, id))
else:
ner_index.__setitem__(i, (type, id))
else:
if i not in ner_index.keys():
ner_index.__setitem__(i, ('O', 'O'))
ner_format = [(tokens[i], ner_index.get(i)) for i in ner_index.keys() if not only_spaces(tokens[i].text)]
# RIMUOVO SEPARATORI DAI NOMI
if clean_separators:
separators = ['(', ')', '-']
cleaner_format = []
for token, label_tuple in ner_format:
if label_tuple != ('O', 'O') and token.text in separators:
continue
else:
cleaner_format.append((token, label_tuple))
ner_instances.__setitem__(s_id, cleaner_format)
else:
ner_instances.__setitem__(s_id, ner_format)
return ner_instances
def write_ner_dataset(sents, file_name, no_number=False, clean_separators=False):
f = open(file_name+'.txt', 'w')
csv_file = open('csv_'+file_name+'.csv', 'w')
other_file = open('other_file.txt', 'w')
b_i = 0
ner_format_dictionary = ner_format(sents, no_number, clean_separators)
for sentence_key in ner_format_dictionary:
instance = ner_format_dictionary.get(sentence_key)
csv_file.write(sentence_key+';;\n')
string = sentence_key+': ['
for t in instance:
key, (type, id) = t
key_text = key.text
if key_text == ';':
key_text = ','
if type == 'O':
b_i = 0
new_type = type
else:
if b_i == 0:
new_type = 'B-' + type
b_i = 1
else:
new_type = 'I-' + type
# print(key.text + '\t' + new_type)
f.write(key.text + '\t' + new_type + '\n')
id_label = new_type
if new_type.startswith('B'):
id_label = 'B-'+id
elif new_type.startswith('I'):
id_label = 'I-'+id
if instance.index(t) != len(instance)-1:
string += id_label + ', '
else:
string += id_label
if new_type.startswith('B'):
csv_file.write(key_text+';'+new_type+';B-'+id+'\n')
elif new_type.startswith('I'):
csv_file.write(key_text + ';' + new_type + ';I-' + id + '\n')
else:
csv_file.write(key_text + ';' + new_type + ';' + new_type + '\n')
f.write('\n')
string += ']\n'
other_file.write(string)
f.close()
def double_sequence(sentences):
sequence_pairs = list()
intersected = list()
for i in range(len(sentences)):
entities = sentences[i].getElementsByTagName('entity')
pairs = sentences[i].getElementsByTagName('pair')
entity_tuples = list()
text = str(sentences[i].attributes['text'].value)
nlp_doc = nlp(text)
tokens = list(nlp_doc)
for j in range(len(entities)):
e_id = entities[j].attributes['id'].value
e_text = entities[j].attributes['text'].value
offset_string = entities[j].attributes['charOffset'].value
type = entities[j].attributes['type'].value
split = str.split(offset_string, ';')
if len(split) > 1:
intersected.append(e_id)
else:
for s in split:
offsets = str.split(s, "-")
left = int(offsets.__getitem__(0))
right = int(offsets.__getitem__(1))
entity = (e_text, left, right, type, e_id)
entity_tuples += [entity]
drug_dict = dict()
for entity in entity_tuples:
left_tuple = entity[1]
right_tuple = entity[2]
type = entity[3]
id = entity[4]
found = False
for k in range(len(tokens)):
try:
t = tokens.__getitem__(k)
except IndexError:
print(tokens)
continue
left_idx = t.idx
length = len(t.text)
right_idx = t.idx + length - 1
if left_tuple == left_idx:
if right_idx == right_tuple:
drug_dict.__setitem__(k, (0, type, id))
found = True
# print(t)
else:
n = 1
# print(right_tuple, right_idx)
while right_idx <= right_tuple:
if k + n >= len(tokens):
break
next = tokens.__getitem__(k + n)
right_idx = next.idx + len(next.text)
n = n + 1
if right_idx >= right_tuple:
span = nlp_doc[k: k + n]
print(entity[0])
print(span)
drug_dict.__setitem__(k, (n, type, id))
found = True
if not found:
print(entity[0])
id_dictionary = dict()
for k in drug_dict.keys():
token = nlp_doc[k]
(n, type, id) = drug_dict.get(k)
id_dictionary.__setitem__(id, (token, k, n, type))
pairs_dict = dict()
for p in pairs:
id = p.attributes['id'].value
e1 = p.attributes['e1'].value
e2 = p.attributes['e2'].value
ddi = p.attributes['ddi'].value
if ddi == 'true':
try:
type = p.attributes['type'].value
except KeyError:
type = 'int'
else:
type = 'unrelated'
if e1 not in pairs_dict.keys():
pairs_dict.__setitem__(e1, [(e2, type)])
else:
pairs_dict.get(e1).append((e2, type))
first_sequence = dict()
second_sequences = list()
for e1 in pairs_dict.keys():
e2_list = [tuple[0] for tuple in pairs_dict.get(e1)]
type_list = [tuple[1] for tuple in pairs_dict.get(e1)]
second_sequence = dict()
for j in range(len(e2_list)):
e2 = e2_list[j]
rel_type = type_list[j]
if e2 in intersected:
continue
if e2 not in id_dictionary.keys():
print(e2)
continue
(token, k, n, drug_type) = id_dictionary.get(e2)
for i in range(len(nlp_doc)):
if i == k:
span_index = n
if span_index > 0:
for j in range(span_index):
second_sequence.__setitem__(i + j, rel_type)
else:
second_sequence.__setitem__(i, rel_type)
for i in range(len(nlp_doc)):
if i not in second_sequence.keys():
second_sequence.__setitem__(i, 'O')
second_sequence = [(nlp_doc[i], second_sequence.get(i)) for i in range(len(nlp_doc))]
second_sequences.append(second_sequence)
if e1 in intersected or e1 not in id_dictionary:
continue
(token, k, span_index, drug_type) = id_dictionary.get(e1)
for i in range(len(nlp_doc)):
if i == k:
if span_index > 0:
for j in range(span_index):
if j == 0:
first_sequence.__setitem__(i + j, 'B-'+drug_type)
else:
first_sequence.__setitem__(i + j, 'I-' + drug_type)
else:
first_sequence.__setitem__(i, 'B-'+drug_type)
for i in range(len(nlp_doc)):
if i not in first_sequence.keys():
first_sequence.__setitem__(i, 'O')
first_sequence = [(nlp_doc[i], first_sequence.get(i)) for i in range(len(nlp_doc))]
for second_sequence in second_sequences:
instance = SequencePair(first_sequence, second_sequence)
sequence_pairs.append(instance)
return sequence_pairs
def iob2_format():
csv_file = open('MANUALLY_CHECKED_CSV_TEST.csv', 'r')
iob2_file = open('MANUALLY_CHECKED_TEST_SET_IOB2.txt', 'w')
lines = csv_file.readlines()
for i in range(1, len(lines)):
if lines[i].startswith('DDI-'):
iob2_file.write('\n')
else:
split = lines[i].split(';')
token = split[0]
label = split[1]
iob2_file.write(token + '\t' + label + '\n')
iob2_file.close()
def cleaned_iob2():
csv_file = open('MANUALLY_CHECKED_CSV_TEST.csv', 'r')
separators = ['(', ')', '-']
lines = csv_file.readlines()
iob2_tuples = []
lines[0] = lines[0].replace('\n', '')
for i in range(1, len(lines)):
lines[i] = lines[i].replace('\n', '')
if lines[i].startswith('DDI-'):
iob2_tuples.append(('\n', '\n'))
else:
split = lines[i].split(';')
token = split[0]
label = split[1]
id = split[2]
if token in separators and label != 'O':
continue
else:
iob2_tuples.append((token, label))
for i in range(1, len(iob2_tuples)):
token, label = iob2_tuples[i-1]
next_token, next_label = iob2_tuples[i]
if next_label.startswith('I') and label == 'O':
iob2_tuples[i] = (next_token, next_label.replace('I', 'B'))
iob2_file = open('CLEANED_CHECKED_TEST_SET_IOB2.txt', 'w')
for token, label in iob2_tuples:
if token == '\n':
iob2_file.write('\n')
else:
iob2_file.write(token + '\t' + label + '\n')
iob2_file.close()
def cleaned_linear():
separators = ['(', ')', '-']
csv_file = open('MANUALLY_CHECKED_CSV_TEST.csv', 'r')
lines = csv_file.readlines()
token_line = ''
id_line = ''
label_line = ''
token_file = open('CLEANED_CHECKED_TOKEN.txt', 'w')
id_file = open('CLEANED_CHECKED_ID.txt', 'w')
label_file = open('CLEANED_CHECKED_LABEL.txt', 'w')
for i in range(0, len(lines)-1):
lines[i] = lines[i].replace('\n', '')
if lines[i].startswith('DDI-'):
if i != 0:
token_file.write(token_line+'\n')
label_file.write(label_line+'\n')
id_file.write(id_line+'\n')
token_line = lines[i].replace(';', '')
token_line += ': ['
id_line = token_line
label_line = token_line
else:
split = lines[i].split(';')
token = number_substitution(split[0])
label = split[1]
id = split[2]
previous_split = lines[i-1].split(';')
# previous_tokens = previous_split[0]
previous_label = previous_split[1]
# previous_id = previous_split[2]
if token in separators and label != 'O':
continue
if previous_label == 'O' and label.startswith('I'):
label = label.replace('I', 'B')
id = id.replace('I', 'B')
if lines[i+1].startswith('DDI-'):
token_line += token + ']'
id_line += id + ']'
label_line += label + ']'
else:
token_line += token + ', '
id_line += id + ', '
label_line += label + ', '
token_file.close()
id_file.close()
label_file.close()
def linear_format():
csv_file = open('csv_TRAINING_SET.csv', 'r')
lines = csv_file.readlines()
token_line = ''
id_line = ''
label_line = ''
token_file = open('TRAINING_TOKEN.txt', 'w')
id_file = open('TRAINING_ID.txt', 'w')
label_file = open('TRAINING_LABEL.txt', 'w')
for i in range(0, len(lines)-1):
lines[i] = lines[i].replace('\n', '')
if lines[i] == '':
continue
if lines[i].startswith('DDI-'):
if i != 0:
token_file.write(token_line+'\n')
label_file.write(label_line+'\n')
id_file.write(id_line+'\n')
token_line = lines[i].replace(';', '')
token_line += ': ['
id_line = token_line
label_line = token_line
else:
split = lines[i].split(';')
token = split[0]
label = split[1]
id = split[2]
if lines[i+1].startswith('DDI-'):
token_line += token + ']'
id_line += id + ']'
label_line += label + ']'
else:
token_line += token + ', '
id_line += id + ', '
label_line += label + ', '
token_file.close()
id_file.close()
label_file.close()
# sentences = get_sentences('Dataset/Train/Overall')
# write_ner_dataset(sentences, 'TRAINING_SET')
# linear_format()
# sequence_pairs = double_sequence(sentences)
# print(sequence_pairs)
# ner_instances = ner_format(sentences, True, True)
# write_ner_dataset(sentences, 'train', True, True)
# cleaned_iob2()
# cleaned_linear() | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,696 | lucaputelli/ddi_joint | refs/heads/master | /char_embedding_test.py | from pre_processing_lib import *
from model import character_network
from relation_format_extraction import double_format, get_tokenized_sentences
from constants import word_model, tag_model
from data_model import SequencePair
import os
from post_processing import joint_plot
from random import randint
def generative_missing_labels(missing: List):
labels = list()
for id1, id2, class_val in missing:
if class_val == 'unrelated':
labels.append(0)
if class_val == 'effect':
labels.append(1)
if class_val == 'mechanism':
labels.append(2)
if class_val == 'advise':
labels.append(3)
if class_val == 'int':
labels.append(4)
labels_array = np.asarray(labels, dtype='int32')
return labels_array
def matrix_composition(doc_list: List[Doc], max_length: int):
word_matrix = np.zeros((len(doc_list), max_length, word_model.vector_size))
pos_matrix = np.zeros((len(doc_list), max_length, tag_model.vector_size))
for i, sent in enumerate(doc_list):
for j in range(len(sent)):
try:
word_matrix[i, j, :] = word_model.wv[sent[j].text.lower()]
pos_matrix[i, j, :] = tag_model.wv[sent[j].pos_]
except KeyError:
pass
return word_matrix, pos_matrix
def generate_predictions(model, test_set: List[np.array], test_labels, test_negative):
predictions = model.predict(test_set)
numeric_predictions = np.argmax(predictions, axis=1)
numeric_labels = np.argmax(test_labels, axis=1)
numeric_labels = np.concatenate((numeric_labels, test_negative))
numeric_predictions = np.concatenate((numeric_predictions, np.zeros(len(test_negative), dtype=np.int64)))
return numeric_labels, numeric_predictions
def char_matrix_composition(sents: List[Doc], max_length: int, max_word_length: int) -> np.ndarray:
char_dict = get_character_dictionary()
char_matrix = np.zeros(shape=(len(sents), max_length, max_word_length), dtype='int32')
for i in range(len(sents)):
sent = sents[i]
for j in range(len(sent)):
t = sent[j]
token_text = t.text
for k in range(len(token_text)):
s = token_text[k]
if s not in char_dict.keys():
continue
char_matrix[i][j][k] = char_dict.get(s)
return char_matrix
def generate_second_labels_dict(train_pairs: List[SequencePair]) -> dict:
labels_dict = dict()
index = 0
train_labels = [s.second_labels for s in train_pairs]
for i in range(len(train_labels)):
for j in range(len(train_labels[i])):
label = train_labels[i][j]
if label not in labels_dict.keys():
labels_dict.__setitem__(label, index)
index += 1
return labels_dict
def first_label_vectors(labels: List[List[str]], max_length: int) -> np.ndarray:
label_matrix = np.zeros(shape=(len(labels), max_length, 3), dtype='int32')
for i in range(len(labels)):
for j in range(max_length):
if j >= len(labels[i]):
label_matrix[i][j] = np.array([1, 0, 0])
else:
label = labels[i][j]
if label == 'O' or j >= len(labels[i]):
label_matrix[i][j] = np.array([1, 0, 0])
if label.startswith('B'):
label_matrix[i][j] = np.array([0, 1, 0])
if label.startswith('I'):
label_matrix[i][j] = np.array([0, 0, 1])
return label_matrix
def second_label_vectors(labels: List[List[str]], max_length: int, labels_dict: dict) -> np.ndarray:
label_matrix = np.zeros(shape=(len(labels), max_length, len(labels_dict)), dtype='int32')
for i in range(len(labels)):
for j in range(max_length):
label_vector = np.zeros(shape=len(labels_dict), dtype='int32')
if j >= len(labels[i]):
index = labels_dict.get('N')
label_vector[index] = 1
else:
label = labels[i][j]
index = labels_dict.get(label)
label_vector[index] = 1
label_matrix[i][j] = label_vector
return label_matrix
def get_dataset(pairs: List[SequencePair], max_length, max_char_length, labels_dict):
first_labels = [p.first_labels for p in pairs]
out_1 = first_label_vectors(first_labels, max_length)
second_labels = [p.second_labels for p in pairs]
out_2 = second_label_vectors(second_labels, max_length, labels_dict)
docs = [p.doc for p in pairs]
word_input, pos_input = matrix_composition(docs, max_length)
char_input = char_matrix_composition(docs, max_length, max_char_length)
return word_input, pos_input, char_input, out_1, out_2
# Pre-processing
char_dict = get_character_dictionary()
train_pairs = double_format(test=False)
values_first = 3
test_pairs = double_format(test=True)
labels_dict = generate_second_labels_dict(test_pairs)
values_second = len(labels_dict.keys())
lengths = [len(p) for p in train_pairs+test_pairs]
max_length = max(lengths)
char_lenghts = [len(t) for p in train_pairs+test_pairs for t in p.word_list]
char_max = max(char_lenghts)
word, pos, char, o1, o2 = get_dataset(train_pairs, max_length, char_max, labels_dict)
t_word, t_pos, t_char, t_o1, t_o2 = get_dataset(test_pairs, max_length, char_max, labels_dict)
lstm_layers = [1, 2, 3]
lstm_dimensions = [48, 72, 96, 120, 144, 168, 200, 224, 248, 272, 296]
char_lstm_dimensions = [5, 10, 15, 20, 25]
character_bool = [True, False]
attention_bool = [True, False]
custom_layer_bool = [True, False]
date_path = '2020_03_06'
if not os.path.exists(date_path):
os.mkdir(date_path)
for i in range(20):
layers = lstm_layers[randint(0, len(lstm_layers)-1)]
lstm_dim = lstm_dimensions[randint(0, len(lstm_dimensions)-1)]
char_lstm = char_lstm_dimensions[randint(0, len(char_lstm_dimensions)-1)]
character = character_bool[randint(0, 1)]
attention = attention_bool[randint(0, 1)]
custom = custom_layer_bool[randint(0, 1)]
path = date_path + '/L{}_D{}'.format(layers, lstm_dim)
model = character_network(layers, lstm_dim, char_lstm, values_first, values_second, 25, max_length, char_max,
True, character, attention, custom)
input = [word, pos]
if character:
input += [char]
path += '_C{}_char'.format(char_lstm)
if attention:
path += '_att'
if custom:
path += '_custom'
if not os.path.exists(path):
os.mkdir(path)
history = model.fit(x=input, y={'first_crf': o1, 'second_crf': o2}, validation_split=0.2,
batch_size=128, epochs=1, verbose=2)
joint_plot(path, 'plot', history) | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,697 | lucaputelli/ddi_joint | refs/heads/master | /test.py | import datetime
import pickle
from os import mkdir
from os.path import exists
from gensim.models import Word2Vec
from pre_processing_lib import *
from post_processing import plot, error_analysis, metrics
from model import neural_network
from relation_format_extraction import instances_from_prediction, joint_negative_filtering, joint_labelled_instances, JointInstance
def matrix_composition(doc_list):
word_matrix = np.zeros((len(doc_list), dim, 200))
pos_matrix = np.zeros((len(doc_list), dim, tag_model.vector_size))
d1_matrix = np.zeros((len(doc_list), dim, 1))
d2_matrix = np.zeros((len(doc_list), dim, 1))
for i, sent in enumerate(doc_list):
index1 = -1
index2 = -1
for j in range(len(sent)):
if sent[j].text == 'PairDrug1':
index1 = j
if sent[j].text == 'PairDrug2':
index2 = j
for j in range(len(sent)):
try:
word_matrix[i, j, :] = word_model.wv[sent[j].text]
pos_matrix[i, j, :] = tag_model.wv[sent[j].pos_]
d1_matrix[i, j, :] = (j - index1) / len(sent)
d2_matrix[i, j, :] = (j - index2) / len(sent)
except KeyError:
pass
return word_matrix, pos_matrix, d1_matrix, d2_matrix
def generate_negative_labels(instance_list):
length = len(instance_list)
matrix = np.zeros(length)
return matrix
def generative_missing_labels(missing: List):
labels = list()
for id1, id2, class_val in missing:
if class_val == 'unrelated':
labels.append(0)
if class_val == 'effect':
labels.append(1)
if class_val == 'mechanism':
labels.append(2)
if class_val == 'advise':
labels.append(3)
if class_val == 'int':
labels.append(4)
labels_array = np.asarray(labels, dtype='int32')
return labels_array
def generate_predictions(model, test_set: List[np.array], test_labels, test_negative):
predictions = model.predict(test_set)
numeric_predictions = np.argmax(predictions, axis=1)
numeric_labels = np.argmax(test_labels, axis=1)
numeric_labels = np.concatenate((numeric_labels, test_negative))
numeric_predictions = np.concatenate((numeric_predictions, np.zeros(len(test_negative), dtype=np.int64)))
return numeric_labels, numeric_predictions
def results(labels, predictions, test_name, folder):
# Metrics
matrix, report, overall_precision, overall_recall, overall_f_score = metrics(labels,
predictions)
f = open(folder + '/metrics_'+test_name+'.txt', 'w')
text = 'Classification Report\n\n{}\n\nConfusion Matrix\n\n{}\n\nOverall precision\n\n{}' \
+ '\n\nOverall recall\n\n{}\n\nOverall F-score\n\n{}\n'
f.write(text.format(report, matrix, overall_precision, overall_recall, overall_f_score))
f.close()
# Model to JSON
model_json = model.to_json()
with open(folder + '/model_'+test_name+'.json', "w") as json_file:
json_file.write(model_json)
# Model pickle
with open(folder + '/metrics_'+test_name+'.pickle', 'wb') as pickle_file:
pickle.dump([matrix, report, overall_precision, overall_recall, overall_f_score], pickle_file)
return matrix
# Pre-processing
# sents = get_sentences('Dataset/Train/Overall')
sents = get_sentences('Train/Sample')
instances = get_instances(sents)
instances = [x for x in instances if x is not None]
instances = negative_filtering(instances)
instances = [x for x in instances if x.get_dependency_path() is not None and len(x.get_dependency_path()) > 0]
t_right, t_approximate, t_wrong, t_missing = instances_from_prediction()
right_selected, right_negative = joint_negative_filtering(t_right)
# approximate_selected, approximate_negative = joint_negative_filtering(t_approximate)
wrong_selected, wrong_negative = joint_negative_filtering(t_wrong)
right_selected = [x for x in right_selected if x.dependency_path is not None and len(x.dependency_path) > 0]
# approximate_selected = [x for x in approximate_selected if x.dependency_path and len(x.dependency_path) > 0]
wrong_selected = [x for x in wrong_selected if x.dependency_path and len(x.dependency_path) > 0]
sents, Y_train = get_labelled_instances(instances)
right_sents, right_labels = joint_labelled_instances(right_selected)
# approximate_sents, approximate_labels = joint_labelled_instances(approximate_selected)
wrong_sents, wrong_labels = joint_labelled_instances(wrong_selected)
missing_labels = generative_missing_labels(t_missing)
# sents, labels = get_labelled_instances(instances)
word_model = Word2Vec.load('pub_med_retrained_ddi_word_embedding_200.model')
tag_model = Word2Vec.load('ddi_pos_embedding.model')
lengths = [len(x) for x in sents+t_right+t_approximate+t_wrong]
dim = max(lengths)
X_word, X_pos, X_d1, X_d2 = matrix_composition(sents)
R_word, R_pos, R_d1, R_d2 = matrix_composition(right_sents)
# A_word, A_pos, A_d1, A_d2 = matrix_composition(approximate_sents)
W_word, W_pos, W_d1, W_d2 = matrix_composition(wrong_sents)
folder = '2020_01_31_noapproximate'
if not exists(folder):
mkdir(folder)
for i in range(10):
lstm_units = np.random.randint(6, 13)*10
dropout = np.random.rand() * 0.2 + 0.3
r_dropout = np.random.rand() * 0.2 + 0.4
batch_size = 128
epochs = 65
name = "LSTM_%d_DROP_%.2f_RDROP_%.2f" % (lstm_units, dropout, r_dropout)
parameters_folder = folder+'/'+name
if not exists(parameters_folder):
mkdir(parameters_folder)
combinations = [(False, False), (True, False), (True, True)]
for pos_tag, offset in combinations:
combination_name = 'word'
if pos_tag:
combination_name += '_pos'
if offset:
combination_name += '_offset'
combination_folder = parameters_folder + '/' + combination_name
if not exists(combination_folder):
mkdir(combination_folder)
training_set = [X_word]
right_set = [R_word]
# approximate_set = [A_word]
wrong_set = [W_word]
if pos_tag:
training_set += [X_pos]
right_set += [R_pos]
# approximate_set += [A_pos]
wrong_set += [W_pos]
if offset:
training_set += [X_d1, X_d2]
right_set += [R_d1, R_d2]
# approximate_set += [A_d1, A_d2]
wrong_set += [W_d1, W_d2]
model = neural_network(dim, lstm_units, dropout, r_dropout,
pos_tag, offset)
history = model.fit(training_set, Y_train,
validation_split=0.15,
batch_size=batch_size,
epochs=epochs, verbose=2)
plot(combination_folder, 'loss_accuracy_' + datetime.datetime.now().strftime("%Y%m%d-%H%M%S"), history)
total_right_labels, right_predictions = generate_predictions(model, right_set, right_labels,
generate_negative_labels(right_negative))
'''total_approximate_labels, approximate_predictions = generate_predictions(model, approximate_set, approximate_labels,
generate_negative_labels(
approximate_negative))'''
total_wrong_labels, wrong_predictions = generate_predictions(model, wrong_set, wrong_labels,
generate_negative_labels(wrong_negative))
missing_predictions = generate_negative_labels(t_missing)
complete_labels = np.concatenate([total_right_labels, total_wrong_labels, missing_labels])
complete_predictions = np.concatenate(
[right_predictions, wrong_predictions, missing_predictions])
results(complete_labels, complete_predictions, 'complete', combination_folder) | {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,698 | lucaputelli/ddi_joint | refs/heads/master | /model.py | from keras.layers import Input, Concatenate, Bidirectional, LSTM, Dense, TimeDistributed, Embedding, Lambda, Layer, RepeatVector
from keras.optimizers import Adam
from keras.models import Model
from AttentionMechanism import AttentionL
from ChainCRF import ChainCRF
import keras.backend as K
import tensorflow as tf
from keras_multi_head import MultiHeadAttention
from constants import number_of_charachters
class EntityAwareDecodingLayer(Layer):
def __init__(self):
super(EntityAwareDecodingLayer, self).__init__()
def call(self, inputs, **kwargs):
assert len(inputs) == 3
lstm_out = inputs[0]
crf_argmax = inputs[1]
label_embedding = inputs[2]
zero = tf.constant(0, dtype='int64')
mask = tf.cast(tf.not_equal(crf_argmax, zero), dtype='float32')
mask = tf.expand_dims(mask, 2)
product = tf.multiply(lstm_out, mask)
label_product = tf.multiply(label_embedding, mask)
lstm_sum = K.sum(product, axis=1)
label_sum = K.sum(label_product, axis=1)
concatenate = K.concatenate([lstm_sum, label_sum], axis=1)
return concatenate
class MyRepeatVector(Layer):
def __init__(self, n, output_dim, **kwargs):
super(MyRepeatVector, self).__init__(**kwargs)
self.n = n
self.output_dim = output_dim
def call(self, inputs, **kwargs):
return K.repeat(inputs, self.n)
def compute_output_shape(self, input_shape):
return input_shape[0], self.n, self.output_dim
def neural_network(input_length: int, lstm_units: int, dropout: float, recurrent_dropout: float,
pos_tag: bool, offset: bool):
input1 = Input(shape=(input_length, 200))
lstm_input = input1
complete_input = input1
if pos_tag:
pos_input1 = Input(shape=(input_length, 20))
if offset:
d1_input = Input(shape=(input_length, 1))
d2_input = Input(shape=(input_length, 1))
lstm_input = Concatenate()([input1, pos_input1, d1_input, d2_input])
complete_input = [input1, pos_input1, d1_input, d2_input]
else:
lstm_input = Concatenate()([input1, pos_input1])
complete_input = [input1, pos_input1]
seq_sentence = Bidirectional(LSTM(lstm_units,
dropout=dropout, return_sequences=True, return_state=False,
recurrent_dropout=recurrent_dropout))(lstm_input)
sentence_out = AttentionL(input_length)(seq_sentence)
main_output = Dense(5, activation='softmax', name='main_output')(sentence_out)
model = Model(inputs=complete_input, outputs=[main_output])
algorithm = Adam(lr=0.0001, decay=0, beta_1=0.9, beta_2=0.999)
model.compile(loss='categorical_crossentropy',
optimizer=algorithm,
metrics=['accuracy'])
model.summary()
return model
def character_network(lstm_layers: int, lstm_units: int, char_lstm_units: int, value_first: int, value_second: int, label_embedding_dim: int,
max_length, max_word_length, pos_tag: bool, character: bool,
attention, custom_layer: bool) -> Model:
word_input = Input(shape=(max_length, 200), name='word_input')
input_list = [word_input]
lstm_list = [word_input]
if pos_tag:
pos_input = Input(shape=(max_length, 20), name='pos_input')
input_list += [pos_input]
lstm_list += [pos_input]
if character:
char_input = Input(shape=(max_length, max_word_length), name='char_input')
char_embedding = TimeDistributed(Embedding(input_dim=number_of_charachters, output_dim=25))(char_input)
char_lstm = TimeDistributed(Bidirectional(LSTM(char_lstm_units, return_sequences=False)))(char_embedding)
lstm_list = input_list + [char_lstm]
input_list += [char_input]
if len(input_list) == 1:
lstm_input = word_input
else:
lstm_input = Concatenate()(lstm_list)
word_lstm = Bidirectional(LSTM(lstm_units, return_sequences=True,
dropout=0.2, recurrent_dropout=0.2))(lstm_input)
for i in range(lstm_layers-1):
word_lstm = Bidirectional(LSTM(lstm_units, return_sequences=True,
dropout=0.2, recurrent_dropout=0.2))(word_lstm)
if attention:
attention = MultiHeadAttention(head_num=8)(word_lstm)
dense_first = TimeDistributed(Dense(value_first, activation=None))(attention)
else:
dense_first = TimeDistributed(Dense(value_first, activation=None))(word_lstm)
crf_layer = ChainCRF(name='first_crf')
first_output = crf_layer(dense_first)
argmax = Lambda(lambda x: K.argmax(x))(first_output)
label_embedding = Embedding(input_dim=value_first+1, output_dim=label_embedding_dim, trainable=True)(argmax)
# print(entity_aware_matrix.shape)
final_input = Concatenate(axis=2)([word_lstm, label_embedding])
# print(second_input.shape)
if custom_layer:
entity_aware = EntityAwareDecodingLayer()([word_lstm, argmax, label_embedding])
entity_aware_matrix = MyRepeatVector(max_length, 2*lstm_units+label_embedding_dim)(entity_aware)
final_input = Concatenate(axis=2)([final_input, entity_aware_matrix])
# print(final_input.shape)
dense_second = TimeDistributed(Dense(value_second, activation=None))(final_input)
second_crf = ChainCRF(name='second_crf')
second_output = second_crf(dense_second)
model = Model(inputs=input_list, outputs=[first_output, second_output])
algorithm = Adam(lr=0.0001, decay=0, beta_1=0.9, beta_2=0.999)
losses = {
"first_crf": crf_layer.loss,
"second_crf": second_crf.loss,
}
model.compile(loss=losses,
optimizer=algorithm,
metrics=['accuracy'])
model.summary()
return model
| {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,699 | lucaputelli/ddi_joint | refs/heads/master | /relation_format_extraction.py | from typing import List
from spacy.language import Doc
from spacy.attrs import LEMMA, LOWER, POS, TAG, ENT_TYPE, IS_ALPHA, DEP, HEAD, SPACY
import networkx as nx
from pre_processing_lib import get_sentences, graph_creation
from networkx.exception import *
from spacy import displacy
from pathlib import Path
import en_core_web_sm
import numpy as np
from data_model import *
from constants import *
def clean_list_string(str_list: str):
str_list = str_list.replace('[', '')
str_list = str_list.replace(']', '')
str_list = str_list.replace('\'', '')
str_list = str_list.replace('\n', '')
return str_list
def get_tokenized_sentences(sentences_path, labels_path):
sentence_file = open(sentences_path, 'r').readlines()
predictions = open(labels_path, 'r').readlines()
assert len(sentence_file) == len(predictions)
sentences = list()
for i in range(len(sentence_file)):
id = sentence_file[i].split(': [')[0].replace(';', '')
sentence = sentence_file[i].split(': [')[1]
sentence = clean_list_string(sentence)
id_pred = predictions[i].split(': [')[0].replace(';', '')
prediction = predictions[i].split(': [')[1]
words = sentence.split(', ')
labels = prediction.split(', ')
labels = [clean_list_string(label) for label in labels]
assert len(words) == len(labels)
assert id == id_pred
tokens = [NERToken(words[j], labels[j]) for j in range(len(words)) if words[j] != '']
ner_sentence = Sentence(id, tokens)
sentences.append(ner_sentence)
return sentences
def substitution(doc: Doc, pair: Pair, drugs) -> Doc:
index_1 = pair.e1_index
index_2 = pair.e2_index
name_1 = pair.e1_text
name_2 = pair.e2_text
np_array = doc.to_array([LEMMA, LOWER, POS, TAG, ENT_TYPE, IS_ALPHA, DEP, HEAD, SPACY])
word_list = [t.text for i, t in enumerate(doc)]
no_pair = False
if name_1.text.lower() == name_2.text.lower():
word_list[index_1] = 'NoPair'
word_list[index_2] = 'NoPair'
no_pair = True
for index, name in drugs:
if index != index_1 and index != index_2:
word_list[index] = 'Drug'
if not no_pair:
word_list[index_1] = 'PairDrug1'
word_list[index_2] = 'PairDrug2'
doc2 = Doc(doc.vocab, words=word_list)
doc2.from_array([LEMMA, LOWER, POS, TAG, ENT_TYPE, IS_ALPHA, DEP, HEAD, SPACY], np_array)
return doc2
def generate_gold_standard(sentences: List[Sentence]) -> (List[JointInstance], List[Sentence]):
instances = list()
infixes = ['(', ')', '/', '-', ';', '*']
for s in sentences:
id = s.id
tokens = s.original_tokens
drug_keys = list(s.label_dict.keys())
spaces = list()
for i in range(len(tokens)-1):
actual = tokens[i]
next = tokens[i+1]
if actual.word in infixes or next.word in infixes:
space = False
else:
space = True
spaces.append(space)
spaces.append(False)
words = [t.word for t in tokens]
try:
doc = Doc(nlp.vocab, words, spaces)
except ValueError:
doc = Doc(nlp.vocab, words)
for j in range(len(drug_keys)):
start, end = s.label_dict.__getitem__(drug_keys[j])
length = end - start
if length > 1:
# print(list(doc))
# span = doc[start:end]
with doc.retokenize() as retokenizer:
try:
retokenizer.merge(doc[start:end])
except IndexError:
print(words)
for k in range(j+1, len(drug_keys)):
tokens = list(doc)
key = drug_keys[k]
n_start, n_end = s.label_dict.__getitem__(key)
n_start -= length-1
n_end -= length-1
s.label_dict.__setitem__(key, (n_start, n_end))
pairs = list()
drug_indexes = [s.label_dict.get(drug_keys[i]) for i in range(len(drug_keys))]
s.merged_drug_starts = [i for (i, end) in drug_indexes]
s.doc = doc
try:
drugs = [(i, doc[i]) for (i, end) in drug_indexes]
except IndexError:
print(drug_indexes, words)
if len(drugs) >= 2:
for i in range(len(drug_keys)-1):
for j in range(i+1, len(drug_keys)):
drug_i, end_1 = s.label_dict.get(drug_keys[i])
drug_j, end_2 = s.label_dict.get(drug_keys[j])
try:
i_text = doc[drug_i]
except IndexError:
i_text = 'NoWord'
j_text = doc[drug_j]
p = Pair(drug_keys[i], drug_keys[j], drug_i, drug_j, i_text, j_text, s)
pairs.append(p)
for p in pairs:
new_doc = substitution(doc, p, drugs)
instance = JointInstance(new_doc, doc, p)
instances.append(instance)
xml_pairs = get_pairs_from_xml()
for i in range(len(instances)):
print(i)
p = instances[i].pair
e1_id = clean_list_string(p.e1_id)
e2_id = clean_list_string(p.e2_id)
found = False
for (e1, e2, class_value) in xml_pairs:
if e1 == e1_id and e2 == e2_id:
found = True
instances[i].class_value = class_value
break
return instances, sentences
def get_pairs_from_xml(path: str):
xml_sentences = get_sentences(path)
pairs = list()
for i in range(0, len(xml_sentences)):
# s_id = xml_sentences[i].attributes['id'].value
s_pairs = xml_sentences[i].getElementsByTagName('pair')
for s_pair in s_pairs:
e1 = s_pair.attributes['e1'].value
e2 = s_pair.attributes['e2'].value
ddi = s_pair.attributes['ddi'].value
if ddi == 'false':
class_value = 'unrelated'
else:
try:
class_value = s_pair.attributes['type'].value
except KeyError:
class_value = 'int'
pairs.append((e1, e2, class_value))
pairs.sort()
return pairs
def check_ids():
sentences = get_tokenized_sentences('DDI_Test_Sent_Gold.txt', 'DDI_Test_IOB2_Gold.txt')
sentences = [s for s in sentences if s.original_tokens]
# sentences.sort(key=lambda x: x.id)
xml_sentences = get_sentences('Dataset/Test/Overall')
wrong = 0
wrong_list = []
for i in range(0, len(xml_sentences)):
entities = xml_sentences[i].getElementsByTagName('entity')
s_id = xml_sentences[i].attributes['id'].value
id = sentences[i].id
text = xml_sentences[i].attributes['text'].value
words = [token.word for token in sentences[i].original_tokens]
xml_entity_number = len(entities)
entity_number = len(sentences[i].label_dict.keys())
print(text, words)
if xml_entity_number != entity_number:
wrong += 1
wrong_list.append((s_id, xml_entity_number, entity_number))
print(wrong)
print(wrong_list)
def sentences_from_prediction(sentences_path, labels_path):
gold_sentences = get_tokenized_sentences('MANUALLY_CHECKED_TOKEN.txt', 'MANUALLY_CHECKED_ID.txt')
sentences_dict = {s.id: s for s in gold_sentences}
sentence_file = open(sentences_path, 'r').readlines()
predictions = open(labels_path, 'r').readlines()
assert len(sentence_file) == len(predictions)
sentences = list()
wrong = list()
for i in range(len(sentence_file)):
id = sentence_file[i].split(': [')[0].replace('; ', '')
sentence = sentence_file[i].split(': [')[1]
sentence = clean_list_string(sentence)
id_pred = predictions[i].split(': [')[0].replace('; ', '')
if id_pred == 'DDI-DrugBank.d769.s3':
continue
prediction = clean_list_string(predictions[i].split(': [')[1])
words = sentence.split(', ')
labels = prediction.split(', ')
labels = [clean_list_string(label) for label in labels]
if len(words) != len(labels):
wrong.append(id)
continue
# assert id == id_pred
length = min(len(words), len(labels))
tokens = [NERToken(words[j], labels[j]) for j in range(length) if words[j] != '']
if not tokens:
print(sentence_file[i])
ner_sentence: Sentence = sentences_dict.get(id_pred)
ner_sentence.set_predictions(tokens)
sentences.append(ner_sentence)
return sentences
def instances_from_prediction():
sentences = sentences_from_prediction('inputSent2.txt', 'predLabels2_modified.txt')
infixes = ['(', ')', '/', '-', ';', '*']
approximate = 'CA', 'AC', 'AA'
instances = list()
for s in sentences:
tokens = s.predicted_tokens
drug_indexes = s.complete_list
spaces = list()
for i in range(len(tokens) - 1):
actual = tokens[i]
next = tokens[i + 1]
if actual.word in infixes or next.word in infixes:
space = False
else:
space = True
spaces.append(space)
spaces.append(False)
words = [t.word for t in tokens]
try:
doc = Doc(nlp.vocab, words, spaces)
except ValueError:
doc = Doc(nlp.vocab, words)
for j in range(len(drug_indexes)):
start, end, drug_id, drug_type = drug_indexes[j]
length = end - start
if length > 1:
# print(list(doc))
# span = doc[start:end]
with doc.retokenize() as retokenizer:
try:
retokenizer.merge(doc[start:end])
except IndexError:
print(words)
for k in range(j + 1, len(drug_indexes)):
tokens = list(doc)
n_start = drug_indexes[k][0]
n_end = drug_indexes[k][1]
id = drug_indexes[k][2]
type = drug_indexes[k][3]
n_start -= length - 1
n_end -= length - 1
if n_start < 0:
print(tokens)
s.complete_list[k] = (n_start, n_end, id, type)
s.merged_drug_starts = [start for (start, end, id, type) in s.complete_list]
s.doc = doc
pairs = list()
for i in range(len(s.complete_list) -1):
s_i, e_i, id_i, type_i = s.complete_list[i]
for j in range(i +1, len(s.complete_list)):
s_j, e_j, id_j, type_j = s.complete_list[j]
try:
i_text = doc[s_i]
j_text = doc[s_j]
except:
i_text = j_text = 'NULL'
type = 'W'
if type_i == type_j == 'C':
type = 'C'
if type_i + type_j in approximate:
type = 'A'
if type_i == 'W' or type_j == 'W':
type = 'W'
p = Pair(id_i, id_j, s_i, s_j, i_text, j_text, s)
p.set_type(type)
pairs.append(p)
drugs = [(i, doc[i]) for i in s.merged_drug_starts]
for p in pairs:
new_doc = substitution(doc, p, drugs)
instance = JointInstance(new_doc, doc, p)
instances.append(instance)
xml_pairs = get_pairs_from_xml('Dataset/Test/Overall')
missing_pairs = list()
for e1, e2, class_value in xml_pairs:
found = False
for i in instances:
id_1 = i.e1_id
id_2 = i.e2_id
if e1 == id_1 and e2 == id_2:
i.class_value = class_value
found = True
if not found:
missing_pairs.append((e1, e2, class_value))
wrong_instances = [i for i in instances if i.type == 'W']
right_instances = [i for i in instances if i.type == 'C' and i.class_value != '']
approximate_instances = [i for i in instances if i.type == 'A' and i.class_value != '']
return right_instances, approximate_instances, wrong_instances, missing_pairs
def joint_path(instances: List[JointInstance]):
# Pipeline con tagger e parser da definire ed eseguire
nlp = en_core_web_sm.load()
no_pair = 0
no_path = 0
for instance in instances:
doc = instance.doc
for name, proc in nlp.pipeline:
doc = proc(doc)
html = displacy.render(doc, style='dep', page=True)
output_path = Path('sentence.html')
output_path.open('w', encoding='utf-8').write(html)
myGraph = graph_creation(doc)
string_drug1 = ''
string_drug2 = ''
for i in range(len(doc)):
token = doc[i]
text = token.text
if text == 'PairDrug1':
string_drug1 = text.lower() + '-' + str(i)
if text == 'PairDrug2':
string_drug2 = text.lower() + '-' + str(i)
try:
path = nx.shortest_path(myGraph, source=string_drug1, target=string_drug2)
except NodeNotFound:
instance.set_dependency_path(list())
no_pair += 1
continue
except NetworkXNoPath:
# Non trova il cammino dell'albero sintattico
no_path += 1
instance.set_dependency_path(list())
continue
path_with_labels = list()
for i in range(len(path)-1):
node = path[i]
node_split = node.rsplit('-')
next_node = path[i+1]
next_split = next_node.rsplit('-')
edges = myGraph[node]
for j in edges:
j_split = j.rsplit('-')
e = edges[j]
j_label = e['label']
if j_label == 'neg':
path_with_labels.append((node_split[0], j_split[0], j_label))
edge = myGraph[node][next_node]
edge_label = edge['label']
path_with_labels.append((node_split[0], next_split[0], edge_label))
instance.set_dependency_path(path_with_labels)
def joint_negative_filtering(instances: List[JointInstance]):
joint_path(instances)
selected_instances = list()
discarded_list = list()
positive_neg = 0
for instance in instances:
doc = instance.doc
text = doc.text
class_val = instance.class_value
nopair = text.count('NoPair')
if nopair == 2:
discarded_list.append(instance)
else:
dependency_path = instance.dependency_path
found = False
for (source, target, label) in dependency_path:
if source != 'pairdrug1' or source != 'pairdrug2' or source != 'and' or source != 'drug':
found = True
if target != 'pairdrug1' or target != 'pairdrug2' or target != 'and' or target != 'drug':
found = True
if not found:
discarded_list.append(instance)
if class_val != 'false':
positive_neg += 1
else:
selected_instances.append(instance)
return selected_instances, discarded_list
def joint_labelled_instances(instances: List[JointInstance]) -> (List[Doc], List[int]):
labels = []
sents = []
for instance in instances:
class_val = instance.class_value
sent = instance.doc
sents.append(sent)
if class_val == 'unrelated' or class_val == '':
labels.append([1, 0, 0, 0, 0])
if class_val == 'effect':
labels.append([0, 1, 0, 0, 0])
if class_val == 'mechanism':
labels.append([0, 0, 1, 0, 0])
if class_val == 'advise':
labels.append([0, 0, 0, 1, 0])
if class_val == 'int':
labels.append([0, 0, 0, 0, 1])
labels_array = np.asarray(labels, dtype='int32')
return sents, labels_array
def ner_labels_from_xml(path):
labels_dict = dict()
sentences = get_sentences(path)
for s in sentences:
entities = s.getElementsByTagName('entity')
for e in entities:
id = e.attributes['id'].value
type = e.attributes['type'].value
labels_dict.__setitem__(id, type)
return labels_dict
def double_format(test: bool = True):
if not test:
id_sentences_gold = get_tokenized_sentences('TRAINING_TOKEN.txt', 'TRAINING_ID.txt')
labels_dict = ner_labels_from_xml('Dataset/Train/Overall')
xml_pairs = get_pairs_from_xml('Dataset/Train/Overall')
else:
id_sentences_gold = get_tokenized_sentences('MANUALLY_CHECKED_TOKEN.txt', 'MANUALLY_CHECKED_ID.txt')
labels_dict = ner_labels_from_xml('Dataset/Test/Overall')
xml_pairs = get_pairs_from_xml('Dataset/Test/Overall')
all_pairs = list()
complete_pairs = list()
for i in range(len(id_sentences_gold)):
print(i)
s = id_sentences_gold[i]
pairs = s.generate_pairs()
heads = list(set([(p.head_id, p.head_interval) for p in pairs]))
heads.sort()
original_tokens = s.original_tokens
head_sequences = list()
tail_sequences = list()
first_ids = list()
second_ids = list()
if len(heads) == 0:
seq_1 = list()
seq_2 = list()
for j in range(len(original_tokens)):
token_1 = CompleteNERToken(original_tokens[j].word, 'O', 'O')
token_2 = CompleteNERToken(original_tokens[j].word, 'N', 'N')
seq_1.append(token_1)
seq_2.append(token_2)
head_sequences.append(seq_1)
tail_sequences.append(seq_2)
for head_id, interval in heads:
first_ids.append(head_id)
first_sequence = list()
label = labels_dict.get(head_id)
for j in range(len(original_tokens)):
word = original_tokens[j].word
if interval.low <= j < interval.high and original_tokens[j].label != 'O':
if original_tokens[j].label.startswith('B'):
ner_label = 'B-'+label
ner_id = 'B-'+head_id
else:
ner_label = 'I-'+label
ner_id = 'I-'+head_id
else:
ner_label = 'O'
ner_id = 'O'
new_token = CompleteNERToken(word, ner_label, ner_id)
first_sequence.append(new_token)
tails = list(set([(p.tail_id, p.tail_interval) for p in pairs if p.head_id == head_id]))
tails.sort()
second_sequence = list()
for k in range(len(original_tokens)):
word = original_tokens[k].word
if interval.low <= k < interval.high:
r_label = 'N'
r_id = 'O'
else:
found = False
class_value = ''
second_id = -1
for tail_id, tail_interval in tails:
if tail_interval.low <= k <= tail_interval.high:
found = True
second_id = tail_id
if tail_id not in second_ids:
second_ids.append(tail_id)
if found:
for id1, id2, r_label in xml_pairs:
if id1 == head_id and id2 == second_id:
class_value = r_label
# Segno solo le istanze non unrelated
if class_value != 'unrelated':
if original_tokens[k].label.startswith('B'):
r_label = 'B-' + class_value
r_id = 'B-'+second_id
else:
r_label = 'I-' + class_value
r_id = 'I-'+second_id
else:
r_label = 'N'
r_id = 'O'
else:
r_label = 'N'
r_id = 'O'
second_sequence.append(CompleteNERToken(word, r_label, r_id))
head_sequences.append(first_sequence)
tail_sequences.append(second_sequence)
# print([i.word + ' ' + i.label for i in first_sequence])
# print([i.word + ' ' + i.label for i in second_sequence])
merged_first_sequence = list()
for n in range(len(original_tokens)):
final_label = 'O'
final_id = 'O'
for sequence in head_sequences:
label = sequence[n].label
if final_label == 'O' and label != 'O':
final_label = label
id = sequence[n].id
if final_id == 'O' and id !='O':
final_id = sequence[n].id
merged_token = CompleteNERToken(original_tokens[n].word, final_label, final_id)
merged_first_sequence.append(merged_token)
# print([i.word + ' ' + i.label for i in merged_first_sequence])
all_pairs += pairs
for seq in tail_sequences:
complete_pair = SequencePair(merged_first_sequence, seq)
complete_pairs.append(complete_pair)
return complete_pairs
# pairs = double_format()
# print(len(pairs))
| {"/error_check.py": ["/relation_format_extraction.py"], "/data_model.py": ["/constants.py"], "/char_embedding_test.py": ["/model.py", "/relation_format_extraction.py", "/constants.py", "/data_model.py", "/post_processing.py"], "/test.py": ["/post_processing.py", "/model.py", "/relation_format_extraction.py"], "/model.py": ["/constants.py"], "/relation_format_extraction.py": ["/data_model.py", "/constants.py"]} |
42,702 | rileythejones/lambdata | refs/heads/master | /lambdata__test.py | # import the unit test package and functions we want to test out
import unittest
from lambdata_rileythejones import CleanData
from lambdata_rileythejones.df_utils import df_null
from lambdata_rileythejones.df_utils import df_random, df_random_column
class LambdataTests(unittest.TestCase):
"""Test for existence of the sample data"""
def test_df_samples(self):
self.assertIsNotNone(df_null)
self.assertIsNotNone(df_random)
"""Test the function that checks for nulls
against two different dataframes"""
def test_check_nulls(self):
dirty_data = CleanData(df_null)
self.assertEqual(dirty_data.check_nulls(), 1)
clean_data = CleanData(df_random)
self.assertEqual(clean_data.check_nulls(), 0)
""" Check that the shape of the filtered dataframe
is different from the original"""
def test_outlier_filter(self):
df_filtered = CleanData(df_random).outlier_filter()
self.assertNotEqual(df_filtered.shape, df_random.shape)
""" Test that the number of unique values
is the same as what's specified"""
def test_bag_tag(self):
df_category = CleanData(df_random).bag_tag(df_random_column, 100)
self.assertEqual(len(df_category.unique()), 100)
if __name__ == '__main__':
unittest.main()
| {"/lambdata__test.py": ["/lambdata_rileythejones/__init__.py", "/lambdata_rileythejones/df_utils.py"]} |
42,703 | rileythejones/lambdata | refs/heads/master | /lambdata_rileythejones/df_utils.py | """"
utility functions for working with DataFrames
"""
import pandas as pd
import numpy as np
df_null = pd.DataFrame([1, 2, 3, 4, 5, 6, 7, 8, 9, np.NaN, 0, 0])
df_random = pd.DataFrame(np.random.randn(100, 3))
df_random_column = df_random[0]
| {"/lambdata__test.py": ["/lambdata_rileythejones/__init__.py", "/lambdata_rileythejones/df_utils.py"]} |
42,704 | rileythejones/lambdata | refs/heads/master | /lambdata_rileythejones/__init__.py | """lambdata_rileythejones - a collection of data science helper functions """
import pandas as pd
import numpy as np
from scipy import stats
class CleanData:
"""
functions to clean a dataset
"""
def __init__(self, df):
self.df = df
"""
returns the total number of null values in the entire dataframe
"""
def check_nulls(self):
result = self.df.isnull().sum().sum()
return f"There are {result} null values in this dataframe"
"""
removes rows that have at least one value that
is n-standard deviations from a column mean
"""
def outlier_filter(self, deviations=2):
return self.df[(np.abs(stats.zscore(
self.df)) < deviations).all(axis=1)]
"""
takes an array of data as an input and outputs integer labels
that correspond to proportional bin rank
"""
def bag_tag(self, data, segments=10):
edge = segments + 1
labels = range(1, edge, 1)
return pd.qcut(data, q=segments, labels=labels)
| {"/lambdata__test.py": ["/lambdata_rileythejones/__init__.py", "/lambdata_rileythejones/df_utils.py"]} |
42,729 | CrawlAll/ZuipinTea | refs/heads/master | /zuipin_tea/spiders/goods_info.py | # -*- coding: utf-8 -*-
import re
import scrapy
from zuipin_tea.items import GoodsItem
class GoodsInfoSpider(scrapy.Spider):
name = 'goods_info'
allowed_domains = ['zuipin.cn']
# start_urls = ['https://www.zuipin.cn/goods?id=ZONGY0840-250']
start_urls = ['https://www.zuipin.cn/']
def parse(self, response):
big_title_list = response.xpath("//dl[@class='clearfix']")
for big_title in big_title_list:
title = big_title.xpath("./dt[@class='big-title float-left']/a/text()").extract_first()
small_list = big_title.xpath(".//li/a[2]")
for small in small_list:
detail = small.xpath("./text()").re_first(r"[\u4e00-\u9fa5]+")
part_url = small.xpath("./@href").extract_first()
url = response.urljoin(part_url)
yield scrapy.Request(url=url,
callback=self.second_parse,
meta={'title': title, 'detail': detail, 'page': 1, 'base_url': url})
def second_parse(self, response):
total_page = response.xpath("//span[@class='endClass']/text()").re_first('\d')
title, detail = response.meta['title'], response.meta['detail']
page, base_url = response.meta['page'], response.meta['base_url']
# 为了避免 获取到一个异常的 页码
if total_page:
next_page = page + 1
if next_page <= int(total_page):
url = base_url + '&page={}'.format(next_page)
yield scrapy.Request(url=url,
callback=self.second_parse,
meta={'title': title, 'detail': detail, 'page': next_page, 'base_url': url})
else:
self.logger.info(f'总页数{total_page} url={response.url}')
else:
self.logger.error(f'没有页数了??? url={response.url}')
info_url_list = response.xpath("//div[@class='item float-left']//a/@href").extract()
for part_info_url in info_url_list:
info_url = response.urljoin(part_info_url)
yield scrapy.Request(url=info_url,
callback=self.info_parse,
meta={'title': title, 'detail': detail})
def info_parse(self, response):
# def parse(self, response):
item = GoodsItem()
url = response.url
title, detail = response.meta['title'], response.meta['detail']
goods_title = response.xpath("//h1[@class='g-title']/text()").extract_first()
goods_desc = response.xpath("//p[@class='g-scr']/text()").extract_first()
scj_price0 = response.xpath("//del[@class='scj']/text()").re_first("\S+")
scj_price = f"{scj_price0}元"
zp_price0 = response.xpath("//span[@class='zp hy ']/text()").re_first("\S+")
zp_price = f"{zp_price0}元"
brand = response.xpath("//ul[@class='clearfix']/li[1]/span[@class='g-con']/text()").extract_first()
weight = response.xpath("//ul[@class='clearfix']/li[2]/span[@class='g-con']/text()").extract_first()
goods_id = response.xpath("//ul[@class='clearfix']/li[3]/span[@class='g-con']/text()").extract_first()
xqms = response.xpath("//dt/ul[@class='clearfix']/li") # 详细描述
detail_desc = [''.join(i.xpath('string(.)').re("\S+")) for i in xqms]
ret = re.search(r"var proExtId = '(\d+)';", response.text)
pl_id = ret.group(1) if ret else ret # 评论id
for field in item.fields:
item[field] = eval(field)
yield item
| {"/zuipin_tea/spiders/goods_info.py": ["/zuipin_tea/items.py"]} |
42,730 | CrawlAll/ZuipinTea | refs/heads/master | /zuipin_tea/items.py | # -*- coding: utf-8 -*-
import scrapy
class GoodsItem(scrapy.Item):
title = scrapy.Field()
detail = scrapy.Field()
goods_title = scrapy.Field()
goods_desc = scrapy.Field()
scj_price = scrapy.Field()
zp_price = scrapy.Field()
brand = scrapy.Field()
weight = scrapy.Field()
goods_id = scrapy.Field()
detail_desc = scrapy.Field()
pl_id = scrapy.Field()
url = scrapy.Field()
| {"/zuipin_tea/spiders/goods_info.py": ["/zuipin_tea/items.py"]} |
42,731 | CrawlAll/ZuipinTea | refs/heads/master | /zuipin_tea/middlewares.py | # -*- coding: utf-8 -*-
import random
class RandomUserAgentMiddleware(object):
def __init__(self, user_agent):
self.user_agent = user_agent
@classmethod
def from_crawler(cls, crawler):
return cls(
user_agent=crawler.settings.get('PC_USER_AGENT')
)
def process_request(self, request, spider):
ua = random.choice(self.user_agent)
request.headers.setdefault("User-Agent", ua)
| {"/zuipin_tea/spiders/goods_info.py": ["/zuipin_tea/items.py"]} |
42,754 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /CS235Flix/adapters/memory_repository.py | import csv
import os
from datetime import datetime
from typing import List
from bisect import bisect_left, insort_left
from werkzeug.security import generate_password_hash
from CS235Flix.adapters.repository import AbstractRepository
from CS235Flix.domain.model import Actor, Director, Genre, Movie, Review, User, make_review
class MemoryRepository(AbstractRepository):
# Movies ordered by rank.
def __init__(self):
self._movies = list()
self._movies_index = dict()
self._users = list()
self._reviews = dict()
self._movie_year = dict()
self._movie_genre = dict()
self._movie_actor = dict()
self._movie_director = dict()
self._genre_year = ['All', 'All']
def update_genre_year(self, genre_year, value):
if genre_year == 'genre':
self._genre_year[0] = value
else:
self._genre_year[1] = value
return self._genre_year
def add_user(self, user: User):
self._users.append(user)
def get_user(self, username) -> User:
return next((user for user in self._users if user.username == username), None)
def add_movie(self, movie: Movie):
insort_left(self._movies, movie)
self._movies_index[movie.rank] = movie
def add_year(self, dict_year):
self._movie_year = dict_year
def add_genres(self, dict_genre):
self._movie_genre = dict_genre
def add_director(self, dict_director):
self._movie_director = dict_director
def add_actor(self, dict_actor):
self._movie_actor = dict_actor
def get_genres(self):
return self._movie_genre
def get_years(self):
return self._movie_year
def get_actors(self):
return self._movie_actor
def get_directors(self):
return self._movie_director
def get_movies(self):
return self._movies
def get_movie(self, rank: int) -> Movie:
movie = None
try:
movie = self._movies_index[rank]
except KeyError:
print("KeyError has been raised!")
pass # Ignore exception and return None.
return movie
def get_number_of_movies(self):
return len(self._movies)
def get_movies_by_year(self, year) -> List[Movie]:
# Fetch the Movies.
try:
movies = self._movie_year[int(year)]
except KeyError:
print("KeyError has been raised!")
pass # Ignore exception and return None.
return movies
def get_movies_by_genre(self, genre):
# Fetch the Movies.
try:
movies = self._movie_genre[Genre(genre)]
except KeyError:
print("KeyError has been raised!")
pass # Ignore exception and return None.
return movies
def get_movies_by_director(self, director):
# Fetch the Movies.
try:
movies = self._movie_director[Director(director)]
except KeyError:
print("KeyError has been raised!")
pass # Ignore exception and return None.
return movies
def get_movies_by_actor(self, actor):
# Fetch the Movies.
try:
movies = self._movie_actor[Actor(actor)]
except KeyError:
print("KeyError has been raised!")
pass # Ignore exception and return None.
return movies
def add_review(self, review: Review):
if review.movie not in self._reviews:
self._reviews[review.movie] = [review]
else:
self._reviews[review.movie].append(review)
def get_reviews(self, rank: int):
movie = self._movies_index[rank]
if movie in self._reviews:
return self._reviews[movie]
else:
return []
# Helper method to return movie index.
def movie_index(self, movie: Movie):
index = bisect_left(self._movies, movie)
if index != len(self._movies) and self._movies[index].year == movie.year:
return index
raise ValueError
def read_csv_file(filename: str):
with open(filename, encoding='utf-8-sig') as infile:
reader = csv.reader(infile)
# Read remaining rows from the CSV file.
for row in reader:
# Strip any leading/trailing white space from data read.
row = [item.strip() for item in row]
yield row
def load_movies(data_path: str, repo: MemoryRepository):
dataset_of_movies = []
dataset_of_actors = []
dataset_of_directors = []
dataset_of_genres = []
dictionary_genres = {} # extension
dictionary_actors = {} # extension
dictionary_directors = {} # extension
dictionary_years = {}
ignore_row=0
for row in read_csv_file(os.path.join(data_path, 'Data1000Movies.csv')):
if ignore_row==0:
ignore_row+=1
continue
rank = int(row[0])
title = row[1]
genres = row[2].split(',')
description = row[3]
director = Director(str(row[4]))
actors = row[5].split(',')
release_year = int(row[6])
runtime_minutes = int(row[7])
rating = float(row[8])
votes = int(row[9])
revenue_millions = row[10]
metascore = row[11]
image_hyperlink = row[12]
# create movie object
movie = Movie(title, release_year)
dataset_of_movies.append(movie)
if movie.year not in dictionary_years:
dictionary_years[movie.year] = [movie] # extension
else:
dictionary_years[movie.year].append(movie)
# add actors
for actor in actors:
actor_obj = Actor(actor)
movie.add_actor(actor_obj)
if actor_obj not in dataset_of_actors:
dataset_of_actors.append(actor_obj)
dictionary_actors[actor_obj] = [movie] # extension
else:
dictionary_actors[actor_obj].append(movie) # extension
# add director
movie.director = director
if director not in dataset_of_directors:
dataset_of_directors.append(director)
dictionary_directors[director] = [movie]
else:
dictionary_directors[director].append(movie)
# add genre
for genre in genres:
genre_obj = Genre(genre)
movie.add_genre(genre_obj)
if genre_obj not in dataset_of_genres:
dataset_of_genres.append(genre_obj)
dictionary_genres[genre_obj] = [movie] # extension
else:
dictionary_genres[genre_obj].append(movie) # extension
# add description
movie.description = description
# add runtime
movie.runtime_minutes = runtime_minutes
# add rank
movie.rank = rank
# add rating
movie.rating = rating
# add votes
movie.votes = votes
# add revenue_million
movie.revenue_millions = revenue_millions
# add metascore
movie.metascore = metascore
# add metascore
movie.image_hyperlink = image_hyperlink
# Add the Movie to the repository.
repo.add_movie(movie)
repo.add_year(dictionary_years)
repo.add_genres(dictionary_genres)
repo.add_director(dictionary_directors)
repo.add_actor(dictionary_actors)
def load_users(data_path: str, repo: MemoryRepository):
users = dict()
ignore_row = 0
for data_row in read_csv_file(os.path.join(data_path, 'users.csv')):
if ignore_row==0:
ignore_row+=1
continue
user = User(
username=data_row[1],
password=generate_password_hash(data_row[2])
)
repo.add_user(user)
users[data_row[0]] = user
return users
def load_reviews(data_path: str, repo: MemoryRepository, users):
ignore_row = 0
for data_row in read_csv_file(os.path.join(data_path, 'reviews.csv')):
if ignore_row==0:
ignore_row+=1
continue
review = make_review(
review_text=data_row[3],
user=users[data_row[1]],
movie=repo.get_movie(int(data_row[2])),
timestamp=datetime.fromisoformat(data_row[4]),
rating=int(data_row[5])
)
repo.add_review(review)
def populate(data_path: str, repo: MemoryRepository):
# Load movies and details into the repository.
load_movies(data_path, repo)
# Load users into the repository.
users = load_users(data_path, repo)
# Load comments into the repository.
load_reviews(data_path, repo, users)
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,755 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/data/Access to CSV/csv reader.py | import omdb
from csv import writer
from csv import reader
import ast
default_text = 'Some Text'
import requests
url = "http://www.omdbapi.com/"
# Open the input_file in read mode and output_file in write mode
with open('test.csv', 'r') as read_obj, \
open('test_output.csv', 'w', newline='') as write_obj:
# Create a csv.reader object from the input file object
csv_reader = reader(read_obj)
# Create a csv.writer object from the output file object
csv_writer = writer(write_obj)
# Read each row of the input csv file as list
for row in csv_reader:
title = row[1]
querystring = {"apikey":"15439843","t":title} # your parameters here in query string
headers = { 'Cache-Control': "no-cache", 'Connection': "keep-alive", 'cache-control': "no-cache" }
response = requests.request("GET", url, headers=headers, params=querystring)
print(title)
dict1 = ast.literal_eval(response.text)
if dict1['Response']=='True'and dict1['Poster']!='N/A':
# Append the default text in the row / list
row.append(dict1['Poster'])
# Add the updated row / list to the output file
csv_writer.writerow(row)
else:
# Append the default text in the row / list
row.append("None")
# Add the updated row / list to the output file
csv_writer.writerow(row)
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,756 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_domain_model/test_movie.py | from CS235Flix.domain.model import Movie, Genre, Actor, Director
import pytest
@pytest.fixture()
def movie():
return Movie('', 0)
def test_init(movie):
#test cr3
movie1 = Movie("Moana", 2016)
assert repr(movie1) == "<Movie Moana, 2016>"
movie3 = Movie("Moana", 1899)
assert movie3.title is None
# check for equality of two Director object instances by comparing the names
movie4 = Movie("Moana", 2016)
assert (movie1 == movie4) == True
# implement a sorting order defined by the name
a_list = []
movie5 = Movie("Yasuo The Retarded", 2015)
a_list.append(movie5)
a_list.append(movie1)
a_list.sort()
assert a_list[0] == movie1
# defines which attribute is used for computing a hash value as used in set or dictionary keys
movies = {movie1, movie4}
assert len(movies) == 1
movie5 = Movie("Moana", 2015)
movies = {movie1, movie5}
assert len(movies) == 2
def test_properties(movie):
movie1 = Movie("Moana", 2016)
movie1.title = "Hokage" # legal title
assert repr(movie1) == "<Movie Hokage, 2016>"
movie1.title = 1234 # illegal title
assert repr(movie1) == "<Movie Hokage, 2016>"
movie2 = Movie("Raikage", 2004)
movie2.description = " Faster than speed of light for real " # legal description
assert movie2.description == "Faster than speed of light for real"
movie2.description = "" # illegal description
assert movie2.description == "Faster than speed of light for real"
movie3 = Movie("Moana", 2016)
actor = Actor("Jacinda Adern")
director = Director("Ron Clements")
movie3.director = actor #illegal director
assert movie3.director is None
movie3.director = director #legal director
assert repr(movie3.director) == "<Director Ron Clements>"
actors = [Actor("Auli'i Cravalho"), Actor("Dwayne Johnson"), Actor("Rachel House"), Actor("Temuera Morrison")]
for actor in actors:
movie3.add_actor(actor) ##legal adding actor
assert str(movie3.actors) == "[<Actor Auli'i Cravalho>, <Actor Dwayne Johnson>, <Actor Rachel House>, <Actor Temuera Morrison>]"
movie3.add_actor(director) ##illegal adding actor
assert str(movie3.actors) == "[<Actor Auli'i Cravalho>, <Actor Dwayne Johnson>, <Actor Rachel House>, <Actor Temuera Morrison>]"
movie3.remove_actor(Actor("Rachel House")) ##legal remove actor
assert str(movie3.actors) == "[<Actor Auli'i Cravalho>, <Actor Dwayne Johnson>, <Actor Temuera Morrison>]"
movie3.remove_actor(director) ##illegal remove actor
assert str(movie3.actors) == "[<Actor Auli'i Cravalho>, <Actor Dwayne Johnson>, <Actor Temuera Morrison>]"
movie3.actors = Actor("Dwayne Johnson") ##test setter
assert str(movie3.actors) == "[<Actor Dwayne Johnson>]"
genres = [Genre("Comedy"), Genre("Action"), Genre("Disney"), Genre("Romantic")]
for genre in genres:
movie3.add_genre(genre) ##legal adding genre
assert str(sorted(movie3.genres)) == "[<Genre Action>, <Genre Comedy>, <Genre Disney>, <Genre Romantic>]"
movie3.add_genre(director) ##illegal adding genre
assert str(movie3.genres) == "[<Genre Comedy>, <Genre Action>, <Genre Disney>, <Genre Romantic>]"
movie3.remove_genre(Genre("Romantic")) ##legal remove genre
assert str(movie3.genres) == "[<Genre Comedy>, <Genre Action>, <Genre Disney>]"
movie3.remove_genre(director) ##illegal remove genre
assert str(movie3.genres) == "[<Genre Comedy>, <Genre Action>, <Genre Disney>]"
movie3.genres = Genre("Comedy") ##test setter
assert str(movie3.genres) == "[<Genre Comedy>]"
movie3.runtime_minutes = 107 ## legal runtime
assert "Movie runtime: {} minutes".format(movie3.runtime_minutes) == "Movie runtime: 107 minutes"
with pytest.raises(ValueError):
movie3.runtime_minutes = -1 ## illegal runtime
###################################### test extension ######################################
movie3.rank = 185 ## legal rank
assert movie3.rank == 185
with pytest.raises(ValueError):
movie3.rank = -1 ## illegal rank
movie3.rating = 8.1 ## legal rating
assert movie3.rating == 8.1
with pytest.raises(ValueError):
movie3.rating = 11 ## illegal rating
movie3.votes = 107583 ## legal votes
assert movie3.votes == 107583
with pytest.raises(ValueError):
movie3.votes = -1 ## illegal votes
movie3.revenue_millions = 510.365 ## legal revenue_millions
assert movie3.revenue_millions == 510.365
with pytest.raises(ValueError):
movie3.revenue_millions = -510.365 ## illegal revenue_millions
movie3.metascore = 91.6 ## legal metascore
assert movie3.metascore == 91.6
with pytest.raises(ValueError):
movie3.metascore = -91.6 ## illegal metascore
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,757 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_domain_model/test_review.py | from CS235Flix.domain.model import Movie, Actor, Review, User
import pytest
from datetime import datetime
@pytest.fixture()
def review():
timestamp = datetime.today()
return Review(User("",""), Movie("", 0),"", timestamp,0)
def test_init(review):
user = User('Steve', 'pass123')
movie = Movie("Moana", 2016)
review_text = "This movie was very enjoyable."
rating = 8
timestamp = datetime.today()
review = Review(user, movie, review_text, timestamp, rating)
## test cr3
assert repr(review.movie) == "<Movie Moana, 2016>"
assert "Review: {}".format(review.review_text) == "Review: This movie was very enjoyable."
assert "Rating: {}".format(review.rating) == "Rating: 8"
##test movie
actor = Actor("Will Smith")
review.movie = actor ##illegal
assert repr(review.movie) == "<Movie Moana, 2016>"
movie = Movie("Will Smith smith Will Smith?", 1900)
review.movie = movie ##legal
assert repr(review.movie) == "<Movie Will Smith smith Will Smith?, 1900>"
##test review text
review.review_text = 1900 ##illegal
assert review.review_text == "This movie was very enjoyable."
review.review_text = "Will Smith will smith Will Smith" ##legal
assert review.review_text == "Will Smith will smith Will Smith"
##test rating
review.rating = 10.1
assert review.rating == 8
review.rating = 9
assert review.rating == 9
##test __eq__
movie = Movie("Will Smith smith Will Smith?", 1900)
review_text = "Will Smith will smith Will Smith"
rating = 9
review1 = Review(user, movie, review_text, timestamp, rating)
assert review == review1
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,758 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /CS235Flix/domain/model.py | from datetime import datetime
class Actor:
def __init__(self, actor_full_name : str):
self.colleagues = []
if actor_full_name == "" or type(actor_full_name ) is not str:
self.__actor_full_name = None
else:
self.__actor_full_name = actor_full_name .strip()
@property
def actor_full_name (self) -> str:
return self.__actor_full_name
def __repr__(self):
return f"<Actor {self.__actor_full_name}>"
def __eq__(self, other):
return self.__actor_full_name == other.__actor_full_name
def __lt__(self, other):
return self.__actor_full_name < other.__actor_full_name
def __hash__(self):
return hash(self.__actor_full_name )
def add_actor_colleague(self, other):
self.colleagues.append(other)
def check_if_this_actor_worked_with(self, other):
return other in self.colleagues
class Director:
def __init__(self, director_full_name: str):
if director_full_name == "" or type(director_full_name) is not str:
self.__director_full_name = None
else:
self.__director_full_name = director_full_name.strip()
@property
def director_full_name(self) -> str:
return self.__director_full_name
def __repr__(self):
return f"<Director {self.__director_full_name}>"
def __eq__(self, other):
return self.__director_full_name == other.__director_full_name
def __lt__(self, other):
return self.__director_full_name < other.__director_full_name
def __hash__(self):
return hash(self.__director_full_name)
class Genre:
def __init__(self, genre_name : str):
if genre_name == "" or type(genre_name ) is not str:
self.__genre_name = None
else:
self.__genre_name = genre_name.strip()
@property
def genre_name(self) -> str:
return self.__genre_name
def __repr__(self):
return f"<Genre {self.__genre_name}>"
def __eq__(self, other):
return self.__genre_name == other.__genre_name
def __lt__(self, other):
return self.__genre_name < other.__genre_name
def __hash__(self):
return hash(self.__genre_name)
class Movie:
def __init__(self, title : str, year : int):
self.__description = None
self.__director = None
self.__actors = []
self.__genres = []
self.__runtime_minutes = None
self.__rank = None
self.__rating = None
self.__votes = None
self.__revenue_millions = None
self.__metascore = None
self.__reviews = []
self.__image_hyperlink = None
if title == "" or type(title) is not str or year < 1900 or type(year ) is not int:
self.__title = None
self.__year = None
else:
self.__title = title.strip()
self.__year = year
@property
def title(self) -> str:
return self.__title
@property
def year(self) -> str:
return self.__year
@property
def description(self):
return self.__description
@property
def director(self):
return self.__director
@property
def actors(self):
return self.__actors
@property
def genres(self):
return self.__genres
@property
def runtime_minutes(self):
return self.__runtime_minutes
@property
def image_hyperlink(self) -> str:
return self.__image_hyperlink
@title.setter
def title(self, title):
if title != "" and type(title) is str:
self.__title = title.strip()
@year.setter
def year(self, year):
if type(year) is int and year >= 1900:
self.__year = year
else:
raise ValueError("ValueError exception thrown")
@description.setter
def description(self, description):
if description != "" and type(description) is str:
self.__description = description.strip()
@director.setter
def director(self, director):
if isinstance(director, Director):
self.__director = director
@actors.setter
def actors(self, actor):
if isinstance(actor, Actor):
self.__actors = [actor]
@genres.setter
def genres(self, genre):
if isinstance(genre, Genre):
self.__genres = [genre]
@runtime_minutes.setter
def runtime_minutes(self, runtime_minutes):
if type(runtime_minutes) is int and runtime_minutes > 0:
self.__runtime_minutes = runtime_minutes
else:
raise ValueError("ValueError exception thrown")
@image_hyperlink.setter
def image_hyperlink(self, image_hyperlink:str) -> str:
self.__image_hyperlink = image_hyperlink
def __repr__(self):
return f"<Movie {self.__title}, {self.__year}>"
def __eq__(self, other):
return self.__title == other.__title and self.__year == other.__year
def __lt__(self, other):
return (self.__title, self.__year) < (other.__title, other.__year)
def __hash__(self):
return hash((self.__title, self.__year))
def add_actor(self, actor):
if isinstance(actor, Actor):
self.__actors.append(actor)
def remove_actor(self, actor):
if isinstance(actor, Actor) and actor in self.__actors:
self.__actors.pop(self.__actors.index(actor))
def add_genre(self, genre):
if isinstance(genre, Genre):
self.__genres.append(genre)
def remove_genre(self, genre):
if isinstance(genre, Genre) and genre in self.__genres:
self.__genres.pop(self.__genres.index(genre))
def is_tagged_by(self, genre):
return genre in self.__genres
###################################### extension ######################################
@property
def year(self):
return self.__year
@property
def rank(self):
return self.__rank
@property
def rating(self):
return self.__rating
@property
def votes(self):
return self.__votes
@property
def revenue_millions(self):
return self.__revenue_millions
@property
def metascore(self):
return self.__metascore
@property
def reviews(self):
return iter(self.__reviews)
@year.setter
def year(self, year):
if type(year) is int and year >= 1900:
self.__year = year
else:
raise ValueError("ValueError exception thrown")
@rank.setter
def rank(self, rank):
if type(rank) is int and rank >= 0:
self.__rank = rank
else:
raise ValueError("ValueError exception thrown")
@rating.setter
def rating(self, rating):
if 0 <= rating <= 10:
self.__rating = rating
else:
raise ValueError("ValueError exception thrown")
@votes.setter
def votes(self, votes):
if type(votes) is int and votes >= 0:
self.__votes = votes
else:
raise ValueError("ValueError exception thrown")
@revenue_millions.setter
def revenue_millions(self, revenue_millions):
if revenue_millions == "N/A":
self.__revenue_millions = "N/A"
elif float(revenue_millions) >= 0:
self.__revenue_millions = float(revenue_millions)
else:
raise ValueError("ValueError exception thrown")
@metascore.setter
def metascore(self, metascore):
if metascore == "N/A":
self.__metascore = "N/A"
elif 0 <= float(metascore) <= 100:
self.__metascore = float(metascore)
else:
raise ValueError("ValueError exception thrown")
@reviews.setter
def reviews(self, reviews):
if isinstance(reviews, Review):
self.__reviews = [reviews]
def add_review(self, review):
if isinstance(review, Review):
self.__reviews.append(review)
class User:
def __init__(self, username: str, password: str):
self.__watched_movies = []
self.__reviews = []
self.__time_spent_watching_movies_minutes = 0
self.__username: str = username
self.__password: str = password
@property
def username(self):
return self.__username
@property
def password (self):
return self.__password
@property
def watched_movies (self):
return self.__watched_movies
@property
def reviews(self):
return self.__reviews
@property
def time_spent_watching_movies_minutes(self):
return self.__time_spent_watching_movies_minutes
@username.setter
def username(self, user_name):
if user_name != "" and type(user_name) is str:
self.__username = user_name.strip()
@password.setter
def password(self, password):
if password != "" and type(password) is str:
self.__password = password
@watched_movies.setter
def watched_movies(self, watched_movies):
if isinstance(watched_movies, Movie):
self.__watched_movies = [watched_movies]
@reviews.setter
def reviews(self, reviews):
if isinstance(reviews, Review):
self.__reviews = [reviews]
@time_spent_watching_movies_minutes.setter
def time_spent_watching_movies_minutes(self, time_spent_watching_movies_minutes):
if type(time_spent_watching_movies_minutes) is int and time_spent_watching_movies_minutes >= 0:
self.__time_spent_watching_movies_minutes = time_spent_watching_movies_minutes
def __repr__(self):
return f'<User {self.__username} {self.__password}>'
def __eq__(self, other):
if not isinstance(other, User):
return False
return other.__username.lower() == self.__username.lower()
def __lt__(self, other):
return self.__username.lower() < other.__username.lower()
def __hash__(self):
return hash(self.__username.lower())
def watch_movie(self, movie):
if isinstance(movie, Movie):
self.__watched_movies.append(movie)
self.__time_spent_watching_movies_minutes += movie.runtime_minutes
def add_review(self, review):
if isinstance(review, Review):
self.__reviews.append(review)
class Review:
def __init__(self, user:User, movie:Movie, review_text: str, timestamp: datetime, rating: int):
self.__user = user
self.__movie = movie
self.__review_text = review_text.strip()
self.__rating = rating
self.__timestamp = timestamp
@property
def user(self):
return self.__user
@property
def movie(self):
return self.__movie
@property
def review_text(self):
return self.__review_text
@property
def rating(self):
return self.__rating
@property
def timestamp(self):
return self.__timestamp
@user.setter
def user(self, user):
if isinstance(user, User):
self.__user = user
@movie.setter
def movie(self, movie):
if isinstance(movie, Movie):
self.__movie = movie
@review_text.setter
def review_text(self, review_text):
if review_text != "" and type(review_text) is str:
self.__review_text = review_text.strip()
@rating.setter
def rating(self, rating):
if type(rating) is int and 1 <= rating <= 10:
self.__rating = rating
@timestamp.setter
def timestamp(self, timestamp):
self.__timestamp = timestamp
def __repr__(self):
return f"<Review ({self.__rating}/10):{self.__review_text}>"
def __eq__(self, other):
return self.__movie == other.__movie and self.__review_text == other.__review_text and \
self.__rating == other.__rating and self.__timestamp == other.__timestamp
class WatchList:
def __init__(self):
self.__movie = []
def add_movie(self, movie):
if movie not in self.__movie:
self.__movie.append(movie)
def remove_movie(self, movie):
if movie in self.__movie and isinstance(movie, Movie):
self.__movie.pop(self.__movie.index(movie))
def select_movie_to_watch(self, index):
if index < len(self.__movie):
return self.__movie[index]
else:
return None
def size(self):
return len(self.__movie)
def first_movie_in_watchlist(self):
if self.__movie == []:
return None
else:
return self.__movie[0]
def __iter__(self):
self.__index = 0
return self
def __next__(self):
if self.__index < len(self.__movie):
self.__index += 1
return self.__movie[self.__index - 1]
else:
raise StopIteration
def make_review(review_text: str, user: User, movie: Movie, timestamp: datetime, rating:int):
review = Review(user, movie, review_text, timestamp, rating)
user.add_review(review)
movie.add_review(review)
return review | {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,759 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_domain_model/test_actor.py | from CS235Flix.domain.model import Actor
import pytest
@pytest.fixture()
def actor():
return Actor('')
def test_init(actor):
actor1 = Actor("Angelina Jolie")
assert repr(actor1) == "<Actor Angelina Jolie>"
actor2 = Actor("")
assert actor2.actor_full_name is None
actor3 = Actor(42)
assert actor3.actor_full_name is None
# check for equality of two Director object instances by comparing the names
actor4 = Actor("Angelina Jolie")
assert (actor1 == actor4) == True
# implement a sorting order defined by the name
a_list = []
actor5 = Actor("Aatrox")
a_list.append(actor1)
a_list.append(actor5)
a_list.sort()
assert a_list[0] == actor5
# defines which attribute is used for computing a hash value as used in set or dictionary keys
actors = {actor1, actor4}
assert len(actors) == 1
# add coleague and check whether it is inside the list
actor1.add_actor_colleague(actor5)
assert actor1.check_if_this_actor_worked_with(actor5) == True
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,760 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_domain_model/test_director.py | from CS235Flix.domain.model import Director
import pytest
@pytest.fixture()
def director():
return Director('')
def test_init(director):
director1 = Director("Taika Waititi")
assert repr(director1) == "<Director Taika Waititi>"
director2 = Director("")
assert director2.director_full_name is None
director3 = Director(42)
assert director3.director_full_name is None
# check for equality of two Director object instances by comparing the names
director4 = Director("Taika Waititi")
assert (director1 == director4) == True
# implement a sorting order defined by the name
a_list = []
director5 = Director("Alex")
a_list.append(director1)
a_list.append(director5)
a_list.sort()
assert a_list[0] == director5
# defines which attribute is used for computing a hash value as used in set or dictionary keys
directors = {director1, director4}
assert len(directors) == 1
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,761 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_domain_model/test_genre.py | from CS235Flix.domain.model import Genre
import pytest
@pytest.fixture()
def genre():
return Genre("")
def test_init(genre):
genre1 = Genre("Horror")
assert repr(genre1) == "<Genre Horror>"
genre2 = Genre("")
assert genre2.genre_name is None
genre3 = Genre(42)
assert genre3.genre_name is None
# check for equality of two Director object instances by comparing the names
genre4 = Genre("Horror")
assert (genre1 == genre4) == True
# implement a sorting order defined by the name
a_list = []
genre5 = Genre("History")
a_list.append(genre1)
a_list.append(genre5)
a_list.sort()
assert a_list[0] == genre5
# defines which attribute is used for computing a hash value as used in set or dictionary keys
genre_set = {genre1, genre4}
assert len(genre_set) == 1
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,762 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_services.py | from datetime import date
import pytest
from CS235Flix.authentication.services import AuthenticationException
from CS235Flix.authentication import services as auth_services
from CS235Flix.movies import services as movies_services
from CS235Flix.movies.services import NonExistentMovieException
from CS235Flix.domain.model import Actor, Director, Genre, Movie, Review, User, WatchList, make_review
def test_can_add_user(in_memory_repo):
new_username = 'jz'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
user_as_dict = auth_services.get_user(new_username, in_memory_repo)
assert user_as_dict['username'] == new_username
# Check that password has been encrypted.
assert user_as_dict['password'].startswith('pbkdf2:sha256:')
def test_cannot_add_user_with_existing_name(in_memory_repo):
username = 'thorke'
password = 'abcd1A23'
with pytest.raises(auth_services.NameNotUniqueException):
auth_services.add_user(username, password, in_memory_repo)
def test_authentication_with_valid_credentials(in_memory_repo):
new_username = 'pmccartney'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
try:
auth_services.authenticate_user(new_username, new_password, in_memory_repo)
except AuthenticationException:
assert False
def test_authentication_with_invalid_credentials(in_memory_repo):
new_username = 'pmccartney'
new_password = 'abcd1A23'
auth_services.add_user(new_username, new_password, in_memory_repo)
with pytest.raises(auth_services.AuthenticationException):
auth_services.authenticate_user(new_username, '0987654321', in_memory_repo)
def test_can_add_comment(in_memory_repo):
comment_text = 'The loonies are stripping the supermarkets bare!'
username = 'fmercury'
# Call the service layer to add the comment.
movies_services.add_review(10,2,comment_text, username, in_memory_repo)
# Retrieve the reviews for the article from the repository.
reviews_as_list = movies_services.get_reviews(2, in_memory_repo)
# Check that the comments include a comment with the new comment text.
assert str(reviews_as_list) == "[<Review (10/10):The loonies are stripping the supermarkets bare!>]"
def test_cannot_add_comment_for_non_existent_article(in_memory_repo):
rank = 1001
comment_text = "COVID-19 - what's that?"
username = 'fmercury'
# Call the service layer to attempt to add the comment.
with pytest.raises(movies_services.NonExistentMovieException):
movies_services.add_review(10,rank,comment_text, username, in_memory_repo)
def test_cannot_add_comment_by_unknown_user(in_memory_repo):
rank = 2
comment_text = "COVID-19 - what's that?"
username = 'gmichael'
# Call the service layer to attempt to add the comment.
with pytest.raises(movies_services.UnknownUserException):
movies_services.add_review(10,rank,comment_text, username, in_memory_repo)
def test_can_get_first_11_movie(in_memory_repo):
movie_list = movies_services.get_11_movie(in_memory_repo)
assert len(movie_list)==11
def test_can_get_random_movies(in_memory_repo):
movie_list = movies_services.get_10_movie('random',in_memory_repo)
assert len(movie_list)==10
def test_can_get_random_movies_based_on_genres(in_memory_repo):
genre1 = Genre('Fantasy')
genre2 = Genre('Action')
genre3 = Genre('Drama')
genre4 = Genre('Music')
movie_list = movies_services.get_10_movie([genre1], in_memory_repo)
assert len(movie_list) == 10
movie_list = movies_services.get_10_movie([genre1, genre2], in_memory_repo)
assert len(movie_list) == 10
movie_list = movies_services.get_10_movie([genre1, genre2, genre3], in_memory_repo)
assert len(movie_list) == 10
movie_list = movies_services.get_10_movie([genre1, genre2, genre3, genre4], in_memory_repo)
assert len(movie_list) == 10
def test_can_get_movie_by_rank(in_memory_repo):
rank = 1
movie_obj = movies_services.get_movie(rank,in_memory_repo)
assert movie_obj.rank == rank
def test_can_get_review_by_rank(in_memory_repo):
rank = 1
review_list = movies_services.get_reviews(rank,in_memory_repo)
assert len(review_list) == 3
def test_can_get_movies_by_year_and_genre(in_memory_repo):
movie_list = movies_services.get_all_movie(['All','All'],in_memory_repo)
assert len(movie_list) == 1000
movie_list = movies_services.get_all_movie(["Fantasy", 'All'], in_memory_repo)
assert len(movie_list) == 101
movie_list = movies_services.get_all_movie(["All", '2016'], in_memory_repo)
assert len(movie_list) == 297
movie_list = movies_services.get_all_movie(["Fantasy", '2016'], in_memory_repo)
assert len(movie_list) == 23
movie_list = movies_services.get_all_movie(["Western", '2009'], in_memory_repo)
assert str(movie_list) == 'None'
def test_can_get_all_genres(in_memory_repo):
genre_list = movies_services.get_all_genres(in_memory_repo)
assert len(genre_list) == 20
def test_can_get_all_years(in_memory_repo):
year_list = movies_services.get_all_years(in_memory_repo)
assert len(year_list) == 11
def test_can_get_related_movie_by_director_or_actor(in_memory_repo):
movie_list = movies_services.get_related_movie('director','James Gunn',in_memory_repo)
assert len(movie_list) == 3
movie_list = movies_services.get_related_movie('actor', 'Dwayne Johnson', in_memory_repo)
assert len(movie_list) == 10
def test_can_search(in_memory_repo):
result_dict, count = movies_services.search('jack',in_memory_repo)
assert count == 27
result_dict1, count = movies_services.search(' JaCk', in_memory_repo)
assert result_dict1 == result_dict
result_dict2, count = movies_services.search(' JaCK ', in_memory_repo)
assert result_dict2 == result_dict
result_dict2, count = movies_services.search('ashdahsdiuahsd ', in_memory_repo)
assert result_dict2 == {}
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,763 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /Tests/unit/test_memory_repository.py | from datetime import date, datetime
from typing import List
import pytest
from CS235Flix.adapters.repository import AbstractRepository, RepositoryException
from CS235Flix.domain.model import Actor, Director, Genre, Movie, Review, User, WatchList, make_review
def test_repository_can_add_a_user(in_memory_repo):
user = User('Dave', '123456789')
in_memory_repo.add_user(user)
assert in_memory_repo.get_user('Dave') is user
def test_repository_can_retrieve_a_user(in_memory_repo):
user = in_memory_repo.get_user('fmercury')
assert user == User('fmercury', '8734gfe2058v')
def test_repository_does_not_retrieve_a_non_existent_user(in_memory_repo):
user = in_memory_repo.get_user('prince')
assert user is None
def test_repository_can_retrieve_movie_count(in_memory_repo):
number_of_movies = in_memory_repo.get_number_of_movies()
# Check that the query returned all movies in csv which is 1000 Movies.
assert number_of_movies == 1000
def test_repository_can_add_movie(in_memory_repo):
movie = Movie(
"I am Legend",
1981
)
movie.rank = 1001
in_memory_repo.add_movie(movie)
assert in_memory_repo.get_movie(1001) is movie
def test_repository_can_add_year_dict(in_memory_repo):
movie = Movie(
"I am Legend",
1981
)
dict_year = {}
dict_year[movie.year]=movie
in_memory_repo.add_year(dict_year)
assert in_memory_repo.get_years() is dict_year
def test_repository_can_add_genre_dict(in_memory_repo):
movie = Movie(
"I am Legend",
1981
)
genre = Genre("Fantasy")
movie.genres=genre
dict_genre = {}
dict_genre[genre]=[movie]
in_memory_repo.add_genres(dict_genre)
assert in_memory_repo.get_genres() is dict_genre
def test_repository_can_add_actor_dict(in_memory_repo):
movie = Movie(
"I am Legend",
1981
)
actor = Actor("Will Smith")
movie.actors=actor
dict_actor = {}
dict_actor[actor]=[movie]
in_memory_repo.add_actor(dict_actor)
assert in_memory_repo.get_actors() is dict_actor
def test_repository_can_add_director_dict(in_memory_repo):
movie = Movie(
"I am Legend",
1981
)
director = Director("James Gunn")
movie.director=director
dict_director = {}
dict_director[director]=[movie]
in_memory_repo.add_director(dict_director)
assert in_memory_repo.get_directors() is dict_director
def test_repository_can_retrieve_movie(in_memory_repo):
movie = in_memory_repo.get_movie(1)
# Check that the Movie has the expected title.
assert movie.title == 'Guardians of the Galaxy'
# Check that the Movie is commented as expected.
review_one = [review for review in movie.reviews if review.review_text == "This movie is fantastic!"][0]
review_two = [review for review in movie.reviews if review.review_text == "Yeah Freddie, good movie!"][0]
assert review_one.user.username == 'fmercury'
assert review_two.user.username == "thorke"
# # Check that the Movie is tagged as expected.
assert movie.is_tagged_by(Genre('Action'))
assert movie.is_tagged_by(Genre('Adventure'))
def test_repository_does_not_retrieve_a_non_existent_movie(in_memory_repo):
movie = in_memory_repo.get_movie(1001)
assert movie is None
def test_repository_can_retrieve_movies_by_date(in_memory_repo):
movies = in_memory_repo.get_movies_by_year(2009)
# Check that the query returned 51 Articles.
assert len(movies) == 51
movies = in_memory_repo.get_movies_by_year(2010)
assert len(movies) == 60
def test_repository_can_retrieve_movies_by_genre(in_memory_repo):
movies = in_memory_repo.get_movies_by_genre('Fantasy')
# Check that the query returned 51 Articles.
assert len(movies) == 101
movies = in_memory_repo.get_movies_by_genre('Action')
assert len(movies) == 303
def test_repository_can_retrieve_movies_by_director(in_memory_repo):
movies = in_memory_repo.get_movies_by_director('James Gunn')
# Check that the query returned 51 Articles.
assert len(movies) == 3
def test_repository_can_retrieve_movies_by_actor(in_memory_repo):
movies = in_memory_repo.get_movies_by_actor('Dwayne Johnson')
# Check that the query returned 51 Articles.
assert len(movies) == 10
def test_repository_retrieve_movie_image(in_memory_repo):
movie = in_memory_repo.get_movie(1)
hyperlink = movie.image_hyperlink
assert hyperlink == "https://m.media-amazon.com/images/M/MV5BMTAwMjU5OTgxNjZeQTJeQWpwZ15BbWU4MDUxNDYxODEx._V1_SX300.jpg"
def test_repository_can_add_a_review(in_memory_repo):
user = in_memory_repo.get_user('thorke')
movie = in_memory_repo.get_movie(1)
timestamp= datetime.today()
review = make_review("Trump's onto it!", user, movie, timestamp,5)
in_memory_repo.add_review(review)
assert review in in_memory_repo.get_reviews(1)
def test_repository_can_retrieve_comments(in_memory_repo):
assert len(in_memory_repo.get_reviews(1)) == 3
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,764 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /CS235Flix/movies/services.py | from typing import List, Iterable
from datetime import datetime
import random
from CS235Flix.adapters.repository import AbstractRepository
from CS235Flix.domain.model import make_review
class NonExistentMovieException(Exception):
pass
class UnknownUserException(Exception):
pass
def add_review(rate: int, rank:int , review_text: str, username: str, repo: AbstractRepository):
# Check that the article exists.
movie = repo.get_movie(int(rank))
print(movie)
if movie is None:
raise NonExistentMovieException
user = repo.get_user(username)
if user is None:
raise UnknownUserException
timestamp= datetime.now().strftime("%Y-%m-%d %H:%M:%S")
print(timestamp)
# Create comment.
review = make_review(review_text, user, movie,timestamp, rate)
# Update the repository.
repo.add_review(review)
def get_11_movie(repo: AbstractRepository):
movies = []
for rank in range(1, 12):
movies.append(repo.get_movie(rank))
if movies is [] or len(movies) != 11:
raise NonExistentMovieException
return movies
def get_10_movie(condition, repo: AbstractRepository):
movies = []
#generate 10 random movies
if condition == 'random':
random_rank_list = random.sample(range(12, repo.get_number_of_movies() + 1), 10)
for rank in random_rank_list:
movies.append(repo.get_movie(rank))
#all genres
else:
all_genre_movies = []
for genre in condition:
all_genre_movies += [movie for movie in repo.get_movies_by_genre(genre.genre_name) if movie not in all_genre_movies]
if len(all_genre_movies) >= 10:
random_index_list = random.sample(range(len(all_genre_movies)), 10)
else: #less than 10 movies among all the genres
random_index_list = random.sample(range(len(all_genre_movies)), len(all_genre_movies))
for index in random_index_list:
movies.append(all_genre_movies[index])
if movies is []:
raise NonExistentMovieException
return movies
def get_movie(rank, repo: AbstractRepository):
return repo.get_movie(int(rank))
def get_reviews(rank, repo: AbstractRepository):
return repo.get_reviews(int(rank))
def get_all_movie(condition, repo: AbstractRepository):
movies = []
if condition[0] != 'All' and condition[1] != 'All':
movies_genre = repo.get_movies_by_genre(condition[0])
movies_year = repo.get_movies_by_year(condition[1])
if len(movies_year) < len(movies_genre):
for movie in movies_year:
if movie in movies_genre:
movies.append(movie)
else:
for movie in movies_genre:
if movie in movies_year:
movies.append(movie)
elif condition == ['All','All']:
for rank in range(1,repo.get_number_of_movies()+1):
movies.append(repo.get_movie(int(rank)))
else:
index = condition.index('All')
if index == 0:
movies = repo.get_movies_by_year(condition[1])
else:
movies = repo.get_movies_by_genre(condition[0])
if movies == []:
return 'None'
return movies
def get_all_genres(repo: AbstractRepository):
genres = []
for genre in repo.get_genres():
genres.append(genre)
if genres is []:
raise NonExistentGenreException
return sorted(genres)
def get_all_years(repo: AbstractRepository):
years = []
for year in repo.get_years():
years.append(year)
if years is []:
raise NonExistentYearException
return sorted(years, reverse = True)
def update_genre_year(genre_year, value, repo: AbstractRepository):
return repo.update_genre_year(genre_year, value)
def get_related_movie(a_class, name, repo: AbstractRepository):
if a_class == 'director':
movies = repo.get_movies_by_director(name)
else:
movies = repo.get_movies_by_actor(name)
return movies
def search(search_text, repo: AbstractRepository):
result_dict = {}
search_text = search_text.strip().lower()
count = 0
#find related movie
for movie in repo.get_movies():
if search_text in movie.title.lower():
count+=1
if "Movie" not in result_dict:
result_dict["Movie"] = [movie]
else:
result_dict["Movie"].append(movie)
#find related actor
for actor in repo.get_actors():
if search_text in actor.actor_full_name.lower():
count += 1
if "Actor" not in result_dict:
result_dict["Actor"] = [actor]
else:
result_dict["Actor"].append(actor)
#find related director
for director in repo.get_directors():
if search_text in director.director_full_name.lower():
count += 1
if "Director" not in result_dict:
result_dict["Director"] = [director]
else:
result_dict["Director"].append(director)
return result_dict, count
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
42,765 | Steve-XD/qkan100-compsci235-a2 | refs/heads/main | /CS235Flix/adapters/repository.py | import abc
from typing import List
from CS235Flix.domain.model import Movie, Review, User
repo_instance = None
class RepositoryException(Exception):
def __init__(self, message=None):
pass
class AbstractRepository(abc.ABC):
@abc.abstractmethod
def add_user(self, user: User):
"""" Adds a User to the repository. """
raise NotImplementedError
@abc.abstractmethod
def get_user(self, username) -> User:
""" Returns the User named username from the repository.
If there is no User with the given username, this method returns None.
"""
raise NotImplementedError
@abc.abstractmethod
def update_genre_year(self, genre_year, value):
""" It updates the condition when genre or year is selected."""
raise NotImplementedError
@abc.abstractmethod
def add_movie(self, movie: Movie):
"""" Adds a Movie to the repository. """
raise NotImplementedError
@abc.abstractmethod
def add_year(self, dict_year):
"""" Adds a dictionary of year to the repository.
It contains a list of movies that is related to a particular year
"""
raise NotImplementedError
@abc.abstractmethod
def add_genres(self, dict_genre):
"""" Adds a dictionary of genre to the repository.
It contains a list of movies that is related to a particular genre
"""
raise NotImplementedError
@abc.abstractmethod
def add_director(self, dict_director):
"""" Adds a dictionary of director to the repository.
It contains a list of movies that is related to a particular director
"""
raise NotImplementedError
@abc.abstractmethod
def add_actor(self, dict_actor):
"""" Adds a dictionary of actors to the repository.
It contains a list of movies that is related to a particular actor
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies(self) -> List:
""" Returns a list of movies from the repository."""
raise NotImplementedError
@abc.abstractmethod
def get_movie(self) -> Movie:
""" Returns a movie by searching up the rank from the repository.
If there is no movie with the given rank, this method returns None.
"""
raise NotImplementedError
@abc.abstractmethod
def get_number_of_movies(self) -> int:
""" Returns number of the movies from the repository."""
raise NotImplementedError
@abc.abstractmethod
def get_movies_by_year(self, year):
""" Returns a list of Movies which matches the target year from the repository.
If there are no matches, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def get_movies_by_genre(self, genre):
""" Returns a list of Movies which matches the genre from the repository.
If there are no matches, this method returns an empty list.
"""
raise NotImplementedError
def get_movies_by_director(self, director):
""" Returns a list of Movies which matches the director from the repository.
If there are no matches, this method returns an empty list.
"""
raise NotImplementedError
def get_movies_by_actor(self, actor):
""" Returns a list of Movies which matches the actor from the repository.
If there are no matches, this method returns an empty list.
"""
raise NotImplementedError
@abc.abstractmethod
def add_review(self, review: Review):
""" Adds a review to the repository.
If the Review doesn't have bidirectional links with an Movie and a User, this method raises a
RepositoryException and doesn't update the repository.
"""
if review.user is None or review not in review.user.reviews:
raise RepositoryException('Review not correctly attached to a User')
if review.movie is None or review not in review.movie.reviews:
raise RepositoryException('Review not correctly attached to a Movie')
@abc.abstractmethod
def get_reviews(self):
""" Returns the Reviews stored in the repository. """
raise NotImplementedError
| {"/CS235Flix/adapters/memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_movie.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_review.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_actor.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_director.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_genre.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_services.py": ["/CS235Flix/movies/services.py", "/CS235Flix/domain/model.py"], "/Tests/unit/test_memory_repository.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/movies/services.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/domain/model.py"], "/CS235Flix/adapters/repository.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_watchlist.py": ["/CS235Flix/domain/model.py"], "/Tests/unit/test_domain_model/test_user.py": ["/CS235Flix/domain/model.py"], "/CS235Flix/movies/movies.py": ["/CS235Flix/adapters/repository.py", "/CS235Flix/movies/services.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.