index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
8,008
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-16 12:03
from __future__ import unicode_literals
from django.db import migrations, models
import django_countries.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='RunnedTest',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.CharField(blank=True, max_length=30)),
('datetime', models.DateTimeField(auto_now_add=True)),
('input_file', models.FileField(blank=True, upload_to='uploads/', verbose_name='Blood markers file')),
('predicted_age', models.FloatField(blank=True, default='0', verbose_name='Predicted age')),
('age', models.FloatField(blank=True, default=0, verbose_name='Age')),
('sex', models.IntegerField(blank=True, choices=[(0, 'Female'), (1, 'Male')], default=1, verbose_name='Sex')),
('weight', models.FloatField(blank=True, default='0', verbose_name='Weight')),
('height', models.FloatField(blank=True, default='0', verbose_name='Height')),
('bmi', models.FloatField(blank=True, default='0', verbose_name='BMI')),
('smoking', models.IntegerField(blank=True, choices=[(0, 'Never smoked'), (1, 'Former smoker'), (2, 'Current light smoker'), (3, 'Current heavy smoker')], default=0, verbose_name='Smoking')),
('alcohol', models.IntegerField(blank=True, choices=[(0, 'non-drinker'), (1, '< 1 drink/month'), (2, '0-4/week'), (3, '5-9/week'), (4, '10-24/week'), (5, 'binger')], default=0, verbose_name='Alcohol')),
('ethnicity', django_countries.fields.CountryField(max_length=2)),
('social_status', models.IntegerField(blank=True, choices=[(0, 'Poor'), (1, 'Good')], default=1, verbose_name='Social status')),
('activity', models.IntegerField(blank=True, choices=[(0, 'Low'), (1, 'Moderate'), (2, 'High')], default=0, verbose_name='Activity')),
('mental_health', models.IntegerField(blank=True, choices=[(0, 'No active illness'), (1, 'Active illness')], default=0, verbose_name='Mental health')),
],
),
]
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,009
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/core/algorythm.py
|
import os
import csv
BASE = os.path.dirname(os.path.abspath(__file__))
kwargs = {"gender":1, #0 = female, 1 = male
"country":"Germany", #country names as in 2 column of cntr.txt. For example France, Nigeria (case not sensitive)
"age":23, #integer or float 0 - 999
"height":1.65, #float height in meters
"weight":65, #float weight in kilograms
"alcohol":1, #integer. Different meaning of value for men and women:
#For men 0 = non-drinker, 1 = < 1 drink/month, 2 = 0-4/week, 3 = 5-9/week, 4 = 10-24/week, 5 = binger
#For women 0 = non-drinker, 1 = < 1 drink/month, 2 = 0-2/week, 3 = 3-5/week, 4 = 6-17/week, 5 = binger
"smoking":1, #integer 0 = never smoked, 1 = formaer smoker, 2 = current light smoker, 3 = current heavy smoker
"activity":1, #integer 0 = low activity, 1 = moderate, 2 = high
"social_status":True, #boolean. True = good social life, False = poor social life
"mental":False} #boolean. True = active mental illnes, False = no active mental illness
def ages_left(**kwargs):
expected_longevity = country_data(kwargs["country"], kwargs["age"], kwargs["gender"])
expected_longevity += bmi_effect(kwargs["height"], kwargs["weight"], kwargs["gender"])
expected_longevity += alcohol_effect(kwargs["alcohol"], kwargs["gender"])
expected_longevity += smoking_effect(kwargs["smoking"], kwargs["gender"])
expected_longevity += activity_effect(kwargs["activity"], kwargs["gender"])
if not kwargs["social_status"]:
expected_longevity -= 1.3
if kwargs["mental"]:
expected_longevity += [-15.9, -12.0]["gender"]
return expected_longevity
def activity_effect(activity, gender):
activity_dict = {2: [3.3, 2.9], 1: [2.7, 1.8], 0: [-1.4, -1.5]}
return activity_dict[activity][gender]
def smoking_effect(smoking, gender):
smoking_dict = {0: [2.2, 3.2], 1: [-1.9, -0.1], 2: [-4.1, -4.5], 3: [-9.0, -8.6]}
return smoking_dict[smoking][gender]
def alcohol_effect(alcohol, gender):
alcohol_dict = {0: [-1.7, -1.5], 1: [-0.1, -0.8], 2: [1.8, 1.0], 3: [3.5, 2.6], 4: [1.5, 0.5], 5: [-1.7, -1.7]}
return alcohol_dict[alcohol][gender]
def country_data(person_country, age, gender):
with open(os.path.join(BASE, "cntr.txt")) as countries_file:
for country in countries_file:
country_entry = country.split("\t")
if country_entry[0].lower() == person_country.lower():
country_name = "cleaned_data/{}.txt".format(country_entry[0])
break
with open(os.path.join(BASE,country_name)) as person_country_file:
for line in person_country_file:
splitted_line = [float(x) for x in line[:-1].split("\t")]
line_dict = dict(zip(["min", "max", 0, 1], splitted_line))
if line_dict["min"] <= age <= line_dict["max"]:
return line_dict[gender]
def bmi_effect(height, weight, gender):
bmi = weight/(height**2)
bmi_effects = {(0, 18.5): (-2.7, -5.9), (18.5, 25): (0, 0), (25, 30): (-1, 0), (30, 35): (-3.8, -1), (35, 250): (-7.8, -3.5)}
for effect in bmi_effects:
if effect[0] < bmi < effect[1]:
return bmi_effects[effect][gender]
def crop_countries():
for country_filename in os.listdir("data"):
out_file = open("cleaned_data/" + country_filename, "w")
header = True
with open("data/" + country_filename) as country_csv:
country_reader = csv.reader(country_csv, delimiter=',', quotechar='"')
for line in country_reader:
if header:
header = False
if line[0][:2] == "ex":
out_listed_line = get_age(line[1]) + line[2:4]
out_line = "\t".join(out_listed_line) + "\n"
out_file.write(out_line)
out_file.close()
def get_age(age_field):
if "<" in age_field:
return["0", "1"]
elif "100+" in age_field:
return ["100", "999"]
age_field = age_field.replace(" years", "").strip()
ages = age_field.split("-")
return ages
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,010
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/views.py
|
# -*- coding: utf-8 -*-
import os
import uuid
import pandas as pd
import numpy as np
import subprocess
import json
from django.views.generic.base import TemplateView
from django.views.generic.edit import FormView
from django.conf import settings
from django.shortcuts import redirect
from django import forms
from django.utils.safestring import mark_safe
from django.core.files import File
from django_countries.fields import LazyTypedChoiceField, Country
from django_countries.widgets import CountrySelectWidget
from django_countries import countries
from core.algorythm import ages_left
from .models import RunnedTest, Article
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
class nnBloodForm(forms.Form):
""" TOP 10 markers """
Albumen = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Serum_albumin' target='_blank'>Albumin**</a>"),
required=True, help_text='35 - 52 g/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=52.25)#min_value=35, max_value=52)
Glucose = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Glucose' target='_blank'>Glucose**</a>"),
required=True, help_text='3.9 - 5.8 mmole/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=0.35, max_value=32)#min_value=3.9, max_value=5.8)
Alkaline_phosphatase = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Alkaline_phosphatase' target='_blank'>Alkaline phosphatase**</a>"),
required=True, help_text='20 - 120 U/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=4337)#min_value=20, max_value=120)
Urea = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Urea' target='_blank'>Urea**(BUN)</a>"),
required=True, help_text='2.5 - 6.4 mmole/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=0.7, max_value=84.1)#min_value=2.5, max_value=6.4)
Erythrocytes = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Red_blood_cell' target='_blank'>Erythrocytes** (RBC)</a>"),
required=True, help_text=mark_safe('3.5 - 5.5 10<sup><small>6</small></sup> /mcl'), widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=0.79, max_value=9.25)#min_value=3.5, max_value=5.5)
Cholesterol = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Cholesterol' target='_blank'>Cholesterol**</a>"),
required=True, help_text='3.37 - 5.96 mmole/l', widget=forms.NumberInput(attrs={'class': 'form-control '}),
min_value=1, max_value=20.19)#min_value=3.37, max_value=5.96)
RDW = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Red_blood_cell_distribution_width' target='_blank'>RDW**</a>"),
required=True, help_text='11.5 - 14.5 %', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=44.2)#min_value=11.5, max_value=14.5)
Alpha_1_globulins1 = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Alpha_globulin' target='_blank'>Alpha-2-globulins**</a>"),
required=True, help_text='5.1 - 8.5 g/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=20.17)#min_value=5.1, max_value=8.5)
Hematocrit = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Hematocrit' target='_blank'>Hematocrit**</a>"),
required=True, help_text='37 - 50 %', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=8, max_value=66)#min_value=37, max_value=50)
Lymphocytes = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Lymphocyte' target='_blank'>Lymphocytes**</a>"),
required=True, help_text='20 - 40 %', widget=forms.NumberInput(attrs={'class': 'form-control '}),
min_value=0, max_value=98)#min_value=20, max_value=40)
country = LazyTypedChoiceField(choices=countries)
class Meta:
widgets = {'country': CountrySelectWidget(attrs={'class': 'form-control '})}
class nnBloodFormUS(forms.Form):
""" TOP 10 markers """
Albumen = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Serum_albumin' target='_blank'>Albumin**</a>"),
required=False, help_text='3.5 - 5.5 U/L', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=0.1, max_value=7.23)#min_value=35, max_value=52)
Glucose = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Glucose' target='_blank'>Glucose**</a>"),
required=False, help_text='65 - 99 mg/dL', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=6.37, max_value=581.8)#min_value=3.9, max_value=5.8)
Alkaline_phosphatase = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Alkaline_phosphatase' target='_blank'>Alkaline phosphatase**</a>"),
required=False, help_text='39 - 117 IU/L', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=4337)#min_value=20, max_value=120)
Urea = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Urea' target='_blank'>Urea**(BUN)</a>"),
required=False, help_text='6 - 24 mg/dL', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=235.6)#min_value=2.5, max_value=6.4)
Erythrocytes = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Red_blood_cell' target='_blank'>Erythrocytes** (RBC)</a>"),
required=False, help_text=mark_safe('3.77 - 5.28 10<sup><small>6</small></sup> /uL'), widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=0.79, max_value=9.25)#min_value=3.5, max_value=5.5)
Cholesterol = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Cholesterol' target='_blank'>Cholesterol**</a>"),
required=False, help_text='100 - 199 mg/dL', widget=forms.NumberInput(attrs={'class': 'form-control '}),
min_value=38.6, max_value=779.5)#min_value=3.37, max_value=5.96)
RDW = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Red_blood_cell_distribution_width' target='_blank'>RDW**</a>"),
required=False, help_text='12.3 - 15.4 %', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=44.2)#min_value=11.5, max_value=14.5)
Alpha_1_globulins1 = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Alpha_globulin' target='_blank'>Alpha-2-globulins**</a>"),
required=False, help_text='5.1 - 8.5 g/l', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=1, max_value=20.17)#min_value=5.1, max_value=8.5)
Hematocrit = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Hematocrit' target='_blank'>Hematocrit**</a>"),
required=False, help_text='37 - 50 %', widget=forms.NumberInput(attrs={'class': 'form-control'}),
min_value=8, max_value=66)#min_value=37, max_value=50)
Lymphocytes = forms.FloatField( label=mark_safe("<a href='https://en.wikipedia.org/wiki/Lymphocyte' target='_blank'>Lymphocytes**</a>"),
required=False, help_text='20 - 40 %', widget=forms.NumberInput(attrs={'class': 'form-control '}),
min_value=0, max_value=98)#min_value=20, max_value=40)
country = LazyTypedChoiceField(choices=countries)
class Meta:
widgets = {'country': CountrySelectWidget(attrs={'class': 'form-control '})}
class IndexPage(TemplateView):
template_name = 'website/index.html'
def dispatch(self, request, *args, **kwargs):
return super(IndexPage, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(IndexPage, self).get_context_data(**kwargs)
context['start_page_text'] = Article.objects.get(idx='start_page_text')
return context
class InputForm(FormView):
template_name = 'website/input_form.html'
form_class = nnBloodForm
success_url = '/result/'
metric = 'eu'
def dispatch(self, request, *args, **kwargs):
try:
self.metric = self.request.GET['m']
if self.metric == 'us':
self.form_class=nnBloodFormUS
else:
self.form_class=nnBloodForm
except:
self.form_class=nnBloodForm
return super(InputForm, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(InputForm, self).get_context_data(**kwargs)
context['how_its_done'] = Article.objects.get(idx='how_its_done')
context['rules'] = Article.objects.get(idx='rules')
context['desc'] = Article.objects.get(idx='desc')
context['partner'] = Article.objects.get(idx='partner')
context['metric'] = self.metric
context['document'] = 'input_document'
return context
def form_valid(self, form):
context = self.get_context_data()
metric = self.request.POST['metric']
try:
ip = get_client_ip(self.request)
except:
ip = 'Undefined'
age = float(self.request.POST.get('age'))
sex = int(self.request.POST.get('sex', 1))
height = float(self.request.POST.get('height', 177))
if metric == 'us':
height = 2.54*height # convet inches to cm
weight = float(self.request.POST.get('weight', 70.8))
if metric == 'us':
weight = 0.453592*weight # convert lb to kg
bmi = format((weight/(height**2))*10000, '.2f')
country = self.request.POST.get('country')
objCountry = Country(country)
country_name = str(objCountry.alpha3)
smoke = int(self.request.POST.get('smoke', 1))
alcohol = int(self.request.POST.get('alcohol', 1))
activity = int(self.request.POST.get('activity', 1))
social_status = int(self.request.POST.get('social_status', 1))
mental = False #2 b implemented later
""" Aging Form"""
df = pd.DataFrame() # DF for the full test
df.loc[:,'Alpha-amylase'] = pd.Series(form.cleaned_data.get('Alpha_amylase', '59.91') if form.cleaned_data.get('Alpha_amylase') else 59.91)
df.loc[:,'ESR (by Westergren)'] = pd.Series(form.cleaned_data.get('ESR') if form.cleaned_data.get('ESR') else 11.19)
df.loc[:,'Bilirubin total'] = pd.Series(form.cleaned_data.get('Bilirubin_total') if form.cleaned_data.get('Bilirubin_total') else 13.01)
df.loc[:,'Bilirubin direct'] = pd.Series(form.cleaned_data.get('Bilirubin_direct') if form.cleaned_data.get('Bilirubin_direct') else 4.85)
df.loc[:,'Gamma-GT'] = pd.Series(form.cleaned_data.get('Gamma_GT') if form.cleaned_data.get('Gamma_GT') else 38.77)
df.loc[:,'Glucose'] = pd.Series(form.cleaned_data.get('Glucose') if form.cleaned_data.get('Glucose') else 5.57)
df.loc[:,'Creatinine'] = pd.Series(form.cleaned_data.get('Creatinine') if form.cleaned_data.get('Creatinine') else 74.72)
df.loc[:,'Lactate dehydrogenase'] = pd.Series(form.cleaned_data.get('Lactate_dehydrogenase') if form.cleaned_data.get('Lactate_dehydrogenase') else 186.43)
df.loc[:,'Urea'] = pd.Series(form.cleaned_data.get('Urea') if form.cleaned_data.get('Urea') else 5.17)
df.loc[:,'Protein total'] = pd.Series(form.cleaned_data.get('Protein_total') if form.cleaned_data.get('Protein_total') else 73.01)
df.loc[:,'Alpha-1-globulins'] = pd.Series(form.cleaned_data.get('Alpha_1_globulins') if form.cleaned_data.get('Alpha_1_globulins') else 2.92)
df.loc[:,'Alpha-1-globulins1'] = pd.Series(form.cleaned_data.get('Alpha_1_globulins1') if form.cleaned_data.get('Alpha_1_globulins1') else 7.06)
df.loc[:,'Beta-globulins'] = pd.Series(form.cleaned_data.get('Beta_globulins') if form.cleaned_data.get('Beta_globulins') else 7.99)
df.loc[:,'Gamma-globulins'] = pd.Series(form.cleaned_data.get('Gamma_globulins') if form.cleaned_data.get('Gamma_globulins') else 11.47)
df.loc[:,'Triglycerides'] = pd.Series(form.cleaned_data.get('Triglycerides') if form.cleaned_data.get('Triglycerides') else 1.36)
df.loc[:,'Cholesterol'] = pd.Series(form.cleaned_data.get('Cholesterol') if form.cleaned_data.get('Cholesterol') else 5.48)
df.loc[:,'HDL Cholesterol'] = pd.Series(form.cleaned_data.get('HDL_Cholesterol') if form.cleaned_data.get('HDL_Cholesterol') else 1.37)
df.loc[:,'LDL cholesterol (by Friedewald)'] = pd.Series(form.cleaned_data.get('LDL_cholesterol') if form.cleaned_data.get('LDL_cholesterol') else 3.47)
df.loc[:,'Alkaline phosphatase'] = pd.Series(form.cleaned_data.get('Alkaline_phosphatase') if form.cleaned_data.get('Alkaline_phosphatase') else 85.96)
df.loc[:,'Calcium'] = pd.Series(form.cleaned_data.get('Calcium') if form.cleaned_data.get('Calcium') else 2.41)
df.loc[:,'Chlorine'] = pd.Series(form.cleaned_data.get('Chlorine') if form.cleaned_data.get('Chlorine') else 104.86)
df.loc[:,'Potassium'] = pd.Series(form.cleaned_data.get('Potassium') if form.cleaned_data.get('Potassium') else 4.36)
df.loc[:,'Sodium'] = pd.Series(form.cleaned_data.get('Sodium') if form.cleaned_data.get('Sodium') else 140.09)
df.loc[:,'Iron'] = pd.Series(form.cleaned_data.get('Iron') if form.cleaned_data.get('Iron') else 17.37)
df.loc[:,'Hemoglobin'] = pd.Series(form.cleaned_data.get('Hemoglobin') if form.cleaned_data.get('Hemoglobin') else 13.9)
df.loc[:,'Hematocrit'] = pd.Series(form.cleaned_data.get('Hematocrit') if form.cleaned_data.get('Hematocrit') else 40.89)
df.loc[:,'MCH'] = pd.Series(form.cleaned_data.get('MCH') if form.cleaned_data.get('MCH') else 29.51)
df.loc[:,'MCHC'] = pd.Series(form.cleaned_data.get('MCHC') if form.cleaned_data.get('MCHC') else 34.20)
df.loc[:,'MCV'] = pd.Series(form.cleaned_data.get('MCV') if form.cleaned_data.get('MCV') else 86.52)
df.loc[:,'Platelets'] = pd.Series(form.cleaned_data.get('Platelets') if form.cleaned_data.get('Platelets') else 259.77)
df.loc[:,'Erythrocytes'] = pd.Series(form.cleaned_data.get('Erythrocytes') if form.cleaned_data.get('Erythrocytes') else 4.75)
df.loc[:,'Leukocytes'] = pd.Series(form.cleaned_data.get('Leukocytes') if form.cleaned_data.get('Leukocytes') else 6.88)
df.loc[:,'ALT'] = pd.Series(form.cleaned_data.get('ALT') if form.cleaned_data.get('ALT') else 27.58)
df.loc[:,'AST'] = pd.Series(form.cleaned_data.get('AST') if form.cleaned_data.get('AST') else 24.96)
df.loc[:,'Albumen'] = pd.Series(form.cleaned_data.get('Albumen') if form.cleaned_data.get('Albumen') else 43.57)
df.loc[:,'Basophils, %'] = pd.Series(form.cleaned_data.get('Basophils') if form.cleaned_data.get('Basophils') else 0.32)
df.loc[:,'Eosinophils, %'] = pd.Series(form.cleaned_data.get('Eosinophils') if form.cleaned_data.get('Eosinophils') else 2.93)
df.loc[:,'Lymphocytes, %'] = pd.Series(form.cleaned_data.get('Lymphocytes') if form.cleaned_data.get('Lymphocytes') else 35.48)
df.loc[:,'Monocytes, %'] = pd.Series(form.cleaned_data.get('Monocytes') if form.cleaned_data.get('Monocytes') else 8.79)
df.loc[:,'NEUT'] = pd.Series(form.cleaned_data.get('NEUT') if form.cleaned_data.get('NEUT') else 55.10)
df.loc[:,'RDW'] = pd.Series(form.cleaned_data.get('RDW') if form.cleaned_data.get('RDW') else 13.71)
if metric == 'us':
df.loc[:,'Albumen'] = df.loc[:,'Albumen']*10.0
df.loc[:,'Glucose'] = df.loc[:,'Glucose']*0.0555
df.loc[:,'Alkaline phosphatase'] = df.loc[:,'Alkaline phosphatase']*1.0
df.loc[:,'Urea'] = df.loc[:,'Urea']*0.357
df.loc[:,'Erythrocytes'] = df.loc[:,'Erythrocytes']*1.0
df.loc[:,'Cholesterol'] = df.loc[:,'Cholesterol']*0.0259
df.loc[:,'RDW'] = df.loc[:,'RDW']*1.0
df.loc[:,'Alpha-1-globulins1'] = df.loc[:,'Alpha-1-globulins1']*1.0
df.loc[:,'Hematocrit'] = df.loc[:,'Hematocrit']*1.0
df.loc[:,'Lymphocytes, %'] = df.loc[:,'Lymphocytes, %']*1.0
df.rename(columns={'Alpha-1-globulins1': 'Alpha-1-globulins'}, inplace=True)
random_file_name = "%s.%s" % (uuid.uuid4(), 'csv')
df.to_csv(settings.MEDIA_ROOT+"/uploads/"+random_file_name, index=False)
upload = open(settings.MEDIA_ROOT+"/uploads/"+random_file_name)
try:
command = "python django_call_age.py ../../media/uploads/"+random_file_name
pipe = subprocess.Popen(command.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
cwd=settings.MEDIA_ROOT+"/../static/nnblood/")
stdout_data, stderr_data = pipe.communicate()
if pipe.returncode != 0:
raise RuntimeError("%r failed, status code %s stdout %r stderr %r" % (
command, pipe.returncode, stdout_data, stderr_data))
result = stdout_data
arResult = result.split('\n')
predicted_age = arResult[0]
median_age = np.median([float(predicted_age), float(age)])
except:
predicted_age = 0
median_age = age
#raise
kwargs = {"gender": sex, #0 = female, 1 = male
"country": country_name, #country names as in 2 column of cntr.txt. For example France, Nigeria (case not sensitive)
"age":median_age, #integer or float 0 - 999
"height":height/100.0, #float height in meters
"weight":weight, #float weight in kilograms
"alcohol":alcohol, #integer. Different meaning of value for men and women:
#For men 0 = non-drinker, 1 = < 1 drink/month, 2 = 0-4/week, 3 = 5-9/week, 4 = 10-24/week, 5 = binger
#For women 0 = non-drinker, 1 = < 1 drink/month, 2 = 0-2/week, 3 = 3-5/week, 4 = 6-17/week, 5 = binger
"smoking":smoke, #integer 0 = never smoked, 1 = formaer smoker, 2 = current light smoker, 3 = current heavy smoker
"activity":activity, #integer 0 = low activity, 1 = moderate, 2 = high
"social_status":social_status, #boolean. True = good social life, False = poor social life
"mental":mental} #boolean. True = active mental illnes, False = no active mental illness
expected_longevity = ages_left(**kwargs)
os.remove(settings.MEDIA_ROOT+"/uploads/"+random_file_name) #remove duplicate file
#saving to DB
new_test = RunnedTest(ip = ip,
metric = metric,
age = age,
sex = sex,
weight = weight,
height = height,
bmi = bmi,
smoking = smoke,
alcohol = alcohol,
ethnicity = objCountry,
social_status = social_status,
activity = activity,
mental_health = mental,
input_file = File(upload),
predicted_age = predicted_age,
expected_longevity = expected_longevity)
new_test.save()
self.request.session['test_id'] = new_test.id
#raise Exception('form')
self.request.session['test_id'] = new_test.id
return redirect(self.get_success_url())
def form_invalid(self, form):
return self.render_to_response(self.get_context_data(form=form))
class nnMortalityResult(TemplateView):
template_name = 'website/nn_mortality_result.html'
def dispatch(self, request, *args, **kwargs):
return super(nnMortalityResult, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(nnMortalityResult, self).get_context_data(**kwargs)
try:
objTest = RunnedTest.objects.get(id=int(self.request.session['test_id']))
context['test_id'] = objTest.id
context['expected_longevity'] = objTest.expected_longevity
except:
context['expected_longevity'] = 'Undefined'
context['result_text'] = Article.objects.get(idx='result_text')
return context
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,011
|
Mihkorz/mortality.ai
|
refs/heads/master
|
/website/admin.py
|
from django.contrib import admin
from django.db import models
from pagedown.widgets import AdminPagedownWidget
from .models import RunnedTest, Article
class RunnedTestAdmin(admin.ModelAdmin):
list_display = ('id',
'ip',
'datetime',
'metric',
'age',
'predicted_age',
'sex',
'height',
'weight',
'bmi',
'smoking',
'alcohol',
'ethnicity',
'social_status',
'activity',
'mental_health',
'expected_longevity')
class ArticleAdmin(admin.ModelAdmin):
list_display =('idx',
'header',
'text')
formfield_overrides = {
models.TextField: {'widget': AdminPagedownWidget },
}
admin.site.register(RunnedTest, RunnedTestAdmin)
admin.site.register(Article, ArticleAdmin)
|
{"/Mortality/urls.py": ["/website/views.py"], "/website/views.py": ["/core/algorythm.py", "/website/models.py"], "/website/admin.py": ["/website/models.py"]}
|
8,094
|
yorkurt/pygame_controllers
|
refs/heads/master
|
/joy.py
|
import math
import pygame
import helpers
class Joystick_L:
def __init__(self):
pygame.joystick.init()
numJoys = pygame.joystick.get_count()
self.joyInitL = False
self.x = 0
self.y = 0
self.rad = 0
self.throttle = 0
if (numJoys > 0):
self.joystick = pygame.joystick.Joystick(0)
self.joystick.init()
self.joyInitL = True
else:
print("No left joystick found")
self.joyInitL = False
self.numButtons = 0
return
self.numButtons = self.joystick.get_numbuttons()
self.buttons = [0]*self.numButtons
pygame.font.init()
self.font = pygame.font.Font(pygame.font.get_default_font(),32)
def compute(self):
self.x = self.joystick.get_axis(0)
self.y = self.joystick.get_axis(1)
self.throttle = ((-1 * self.joystick.get_axis(2)) + 1) / 2
self.rad = math.hypot(self.x,self.y)
self.rad = helpers.limitToRange(self.rad,0,1)
self.ang = math.atan2(self.y,self.x)
self.x = self.rad*math.cos(self.ang)
self.y = self.rad*math.sin(self.ang)
#'clicks' to middle
tab = .12
if -tab < self.x < tab:
self.x = 0
if -tab < self.y < tab:
self.y = 0
for i in xrange(self.numButtons):
self.buttons[i] = self.joystick.get_button(i)
''' def draw(self,surface):
r = 200
w = surface.get_width()
h = surface.get_height()
for i in xrange(self.numButtons):
if self.buttons[i]:
col = (0,255,0)
else:
col = (64,0,64)
text = self.font.render(str(i),1,col)
surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.numButtons+1),centery=h/2))
x = int(round(w/2+self.x*r))
y = int(round(h/2+self.y*r))
pygame.draw.aaline(surface,(128,128,128),(w/2,h/2),(x,y),1)
pygame.draw.circle(surface,(0,0,0),(x,y),8,4)
pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2) '''
class Joystick_R:
def __init__(self):
pygame.joystick.init()
numJoys = pygame.joystick.get_count()
self.joyInitR = False
self.x = 0
self.y = 0
self.rad = 0
self.throttle = 0
if (numJoys > 1):
self.joystick = pygame.joystick.Joystick(1)
self.joystick.init()
self.joyInitR = True
else:
print("No right joystick found")
self.joyInitR = False
self.numButtons = 0
return
self.numButtons = self.joystick.get_numbuttons()
self.buttons = [0]*self.numButtons
pygame.font.init()
self.font = pygame.font.Font(pygame.font.get_default_font(),32)
def compute(self):
self.x = self.joystick.get_axis(0)
self.y = self.joystick.get_axis(1)
self.throttle = ((-1 * self.joystick.get_axis(2)) + 1) / 2
self.rad = math.hypot(self.x,self.y)
self.rad = helpers.limitToRange(self.rad,0,1)
self.ang = math.atan2(self.y,self.x)
self.x = self.rad*math.cos(self.ang)
self.y = self.rad*math.sin(self.ang)
#'clicks' to middle
tab = .12
if -tab < self.x < tab:
self.x = 0
if -tab < self.y < tab:
self.y = 0
for i in xrange(self.numButtons):
self.buttons[i] = self.joystick.get_button(i)
''' def draw(self,surface):
r = 200
w = surface.get_width()
h = surface.get_height()
for i in xrange(self.numButtons):
if self.buttons[i]:
col = (0,255,0)
else:
col = (64,0,64)
text = self.font.render(str(i),1,col)
surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.numButtons+1),centery=h/2-60))
x = int(round(w/2+self.x*r))
y = int(round(h/2+self.y*r))
pygame.draw.aaline(surface,(128,0,0),(w/2,h/2),(x,y),1)
pygame.draw.circle(surface,(0,0,0),(x,y),8,4)
pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2) '''
|
{"/joy.py": ["/helpers.py"]}
|
8,095
|
yorkurt/pygame_controllers
|
refs/heads/master
|
/helpers.py
|
def limitToRange(a,b,c):
if a < b:
a = b
if a > c:
a = c
return a
|
{"/joy.py": ["/helpers.py"]}
|
8,096
|
yorkurt/pygame_controllers
|
refs/heads/master
|
/main.py
|
import math
import pygame
import joy
class Main:
def __init__(self):
self.SCREEN_WIDTH = 800
self.SCREEN_HEIGHT = 450
self.screen = pygame.display.set_mode((self.SCREEN_WIDTH,self.SCREEN_HEIGHT))
self.objects = []
self.mode = 1
pygame.font.init()
self.font = pygame.font.Font(pygame.font.get_default_font(),32)
def setupGame(self):
self.clock = pygame.time.Clock()
self.FPS = 60
self.joy = joy.Joystick_L()
self.joy2 = joy.Joystick_R()
self.objects.append(self.joy)
self.objects.append(self.joy2)
l = self.objects[0].get_numbuttons()
self.buttonArr1 = [0 for i in range(l)]
def runGame(self):
self.gameRunning = 1
while self.gameRunning:
buttonArr1 = [0 for i in range(len(buttonArr1))]
self.getInput()
self.compute()
self.draw(self.screen)
self.clock.tick(self.FPS)
#self.leftX = pygame.joystick.Joystick(0).get_axis(0)
#self.leftY = -1 * pygame.joystick.Joystick(0).get_axis(1)
#self.rightX = pygame.joystick.Joystick(1).get_axis(0)
#self.rightY = -1 * pygame.joystick.Joystick(1).get_axis(1)
self.leftX = self.objects[0].get_axis(0)
self.leftY = self.objects[0].get_axis(1)
#self.rightX = self.objects[1].get_axis(0)
#self.rightY = self.objects[1].get_axis(1)
#handle buttons
for event in pygame.event.get(pygame.JOYBUTTONUP): #event handling loop
#handle mode switching - buttons 8/9 on both sticks
print(event)
if (event.button == 7): #button 8 increases mode
buttonArr1[7] = 1
if (self.mode == 3):
self.mode = 1
else:
self.mode = self.mode + 1
print("Mode is now: " + str(self.mode))
if (event.button == 8): #button 9 decreases mode
buttonArr1[8] = 1
if (self.mode == 1):
self.mode = 3
else:
self.mode = self.mode - 1
print("Mode is now: " + str(self.mode))
def getInput(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.gameRunning = 0
#if(self.joy.joyInitL == True):
pygame.display.set_caption(str(self.joy.x) + ', ' + str(self.joy.y) + ', ' + str(self.joy.rad) + ', ' + str(self.joy.throttle))
def draw(self,surface):
self.screen.fill((255,255,255))
#for o in self.objects:
# o.draw(self.screen)
r = 200
w = surface.get_width()
h = surface.get_height()
for i in xrange(self.joy.numButtons):
if self.joy.buttons[i]:
col = (0,255,0)
else:
col = (64,0,64)
text = self.font.render(str(i),1,col)
surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.joy.numButtons+1),centery=h/2-30))
x = int(round(w/2+self.joy.x*r))
y = int(round(h/2+self.joy.y*r))
pygame.draw.aaline(surface,(128,128,128),(w/2,h/2),(x,y),1)
pygame.draw.circle(surface,(0,0,0),(x,y),8,4)
#pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2)
for i in xrange(self.joy2.numButtons):
if self.joy2.buttons[i]:
col = (0,255,0)
else:
col = (64,0,64)
text = self.font.render(str(i),1,col)
surface.blit(text,text.get_rect(centerx=w*(i+1)/(self.joy2.numButtons+1),centery=h/2+30))
x1 = int(round(w/2+self.joy2.x*r))
y1 = int(round(h/2+self.joy2.y*r))
pygame.draw.aaline(surface,(128,0,0),(w/2,h/2),(x1,y1),1)
pygame.draw.circle(surface,(0,0,0),(x1,y1),8,4)
pygame.draw.circle(surface,(0,255,255),(w/2,h/2),r,2)
pygame.display.flip()
def compute(self):
i = 0
while i < len(self.objects):
self.objects[i].compute()
i += 1
m = Main()
m.setupGame()
m.runGame()
pygame.quit()
|
{"/joy.py": ["/helpers.py"]}
|
8,097
|
yorkurt/pygame_controllers
|
refs/heads/master
|
/talker.py
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import String#, Float64MultiArray
from controller.msg import FloatList, IntList
#import main
def talker():
#float64[4] axes = [main.leftX,main.leftY,main.rightX,main.rightY]
pub_axes = rospy.Publisher('controls', FloatList, queue_size=10)
pub_buttons = rospy.Publisher('buttons', IntList, queue_size=10)
rospy.init_node('controller_base', anonymous=True)
rate = rospy.Rate(10) # 10hz
axes = FloatList()
buttons = IntList()
while not rospy.is_shutdown():
#axes.data = [main.leftX,main.leftY,main.rightX,main.rightY]
axes.data = [1,-1,0,1]
#buttons.data = main.buttonArr1
buttons.data = [1,0,1,0,1]
rospy.loginfo(axes)
pub_axes.publish(axes)
rospy.loginfo(buttons)
pub_buttons.publish(buttons)
rate.sleep()
if __name__ == '__main__':
try:
talker()
except rospy.ROSInterruptException:
pass
|
{"/joy.py": ["/helpers.py"]}
|
8,116
|
giahy2507/convae
|
refs/heads/master
|
/submodular/main.py
|
__author__ = 'MichaelLe'
import submodular
import loadFile
from numpy import *
from numpy import *
import numpy as np
from tempfile import TemporaryFile
clusters = load('cluster_my_format.npy')
sum = 0
for cluster in clusters:
if cluster != None:
# cluster la 1 dictionary
# - 'key' : id cua van ban
# - 'value' : list[instance]
V = []
P = []
L = []
for text_id in cluster.keys():
list_instance = cluster[text_id] # lay value cua key 'text_id'
p = []
for instance in list_instance:
instance.append(False)
p.append(instance[0])
V.append(instance[0])
L.append(len(instance[1].split()))
P.append(p)
sum = sum + len(V)
print (sum)
def insideMatrix(a, V):
n = len(V)
for i in range(0, n):
if a == V[i]:
return True
return False
def suma(clusters, alpha, galma, numberofWord):
for cluster in clusters:
if cluster != None:
# cluster la 1 dictionary
# - 'key' : id cua van ban
# - 'value' : list[instance]
V = []
P = []
L = []
for text_id in cluster.keys():
list_instance = cluster[text_id] # lay value cua key 'text_id'
p = []
for instance in list_instance:
instance.append(False)
p.append(instance[0])
V.append(instance[0])
L.append(len(instance[1].split()))
P.append(p)
summarize = sorted(submodular.maximizeF(V, P, alpha, galma, L, numberofWord))
print (summarize)
i = 0
k = 0
for text_id in cluster.keys():
list_instance = cluster[text_id]
for instance in list_instance:
if insideMatrix(k, summarize) == True:
instance[2] = True
k = k + 1
else:
k = k + 1
return clusters
# alpha = 0.4
# galma = 0.6
# numberofWord = 200
#
# global str
# for i in arange(0,1):
# galma = 0.4
# for j in arange(0,3):
# str1 = str(int(alpha*10))+'_'
# str2 = str(int(galma*10))+'_'
# str3 = str(numberofWord)
#
# strname = 'result' + str1 + str2 +str3
#
# clusters = load('cluster_my_format.npy')
# su = suma(clusters,alpha,galma,numberofWord)
# np.save(strname,su)
# galma = galma + 0.2
# alpha = alpha + 0.2
#
# alpha = 0.8
# for i in arange(0,1):
# galma = 0.2
# for j in arange(0,4):
# str1 = str(int(alpha*10))+'_'
# str2 = str(int(galma*10))+'_'
# str3 = str(numberofWord)
#
# strname = 'result' + str1 + str2 +str3
#
# clusters = load('cluster_my_format.npy')
# su = suma(clusters,alpha,galma,numberofWord)
# np.save(strname,su)
# galma = galma + 0.2
# alpha = alpha + 0.2
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,117
|
giahy2507/convae
|
refs/heads/master
|
/convaeclassification.py
|
__author__ = 'HyNguyen'
import theano
import theano.tensor as T
import numpy as np
from LayerClasses import MyConvLayer,FullConectedLayer,SoftmaxLayer
from tensorflow.examples.tutorials.mnist import input_data
from sys import stderr
import cPickle
import os
from scipy.misc import imread, imsave
if __name__ == "__main__":
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
print >> stderr, "readed data"
batch_size=100
number_featuremaps = 20
sentence_length = 28
embed_size = 28
learning_rate = 0.1
image_shape = (batch_size,1,sentence_length,embed_size)
filter_shape_encode = (20,1,5,28)
filter_shape_decode = (1,20,5,28)
rng = np.random.RandomState(23455)
params_save = [None]*8
if os.path.isfile("saveweight.bin"):
with open("saveweight.bin",mode="rb") as f:
params_save = cPickle.load(f)
# minibatch)
X = T.dmatrix("X") # data, presented as rasterized images
Y = T.dmatrix("Y")
layer0_encode_input = X.reshape((batch_size, 1, 28, 28))
layer0_encode = MyConvLayer(rng,layer0_encode_input,image_shape=image_shape,filter_shape=filter_shape_encode,border_mode="valid",activation = T.nnet.sigmoid, params=params_save[0:2])
layer1_encode_input = layer0_encode.output.flatten(2)
layer1_encode_input_shape = (batch_size,layer0_encode.output_shape[1] * layer0_encode.output_shape[2] * layer0_encode.output_shape[3])
layer1_encode = FullConectedLayer(layer1_encode_input,layer1_encode_input_shape[1],100,activation = T.nnet.sigmoid, params=params_save[2:4])
layer_hidden = FullConectedLayer(input=layer1_encode.output, n_in=100, n_out=50, activation= T.nnet.sigmoid)
layer_classification = SoftmaxLayer(input=layer_hidden.output, n_in=50, n_out=10)
err = layer_classification.error(Y)
cost = layer_classification.negative_log_likelihood(Y) + 0.001*(layer_classification.L2 + layer_hidden.L2)
params = layer_hidden.params + layer_classification.params
gparams = []
for param in params:
gparam = T.grad(cost, param)
gparams.append(gparam)
updates = []
for param, gparam in zip(params, gparams):
updates.append((param, param - learning_rate* gparam))
train_model = theano.function(inputs=[X,Y], outputs=[cost, err], updates=updates)
valid_model = theano.function(inputs=[X,Y], outputs=[cost, err])
predict_function = theano.function(inputs=[X], outputs=layer_classification.y_pred)
counter = 0
best_valid_err = 100
early_stop = 50
epoch_i = 0
while counter < early_stop:
epoch_i +=1
batch_number = int(mnist.train.labels.shape[0]/batch_size)
train_costs = []
train_errs = []
for batch in range(batch_number):
next_images, next_labels = mnist.train.next_batch(batch_size)
train_cost, train_err = train_model(next_images, next_labels)
train_costs.append(train_cost)
train_errs.append(train_err)
#print >> stderr, "batch "+str(batch)+" Train cost: "+ str(train_cost)
next_images, next_labels = mnist.validation.next_batch(batch_size)
valid_cost, val_err = valid_model(next_images, next_labels)
if best_valid_err > val_err:
best_valid_err = val_err
print >> stderr, "Epoch "+str(epoch_i)+" Train cost: "+ str(np.mean(np.array(train_costs)))+ "Train mae: "+ str(np.mean(np.array(train_errs))) + " Validation cost: "+ str(valid_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter)+ " __best__ "
counter = 0
with open("saveweight_caeclassification.bin", mode="wb") as f:
cPickle.dump(params,f)
else:
counter +=1
print >> stderr, "Epoch "+str(epoch_i)+" Train cost: "+ str(np.mean(np.array(train_costs)))+ "Train mae: "+ str(np.mean(np.array(train_errs))) + " Validation cost: "+ str(valid_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,118
|
giahy2507/convae
|
refs/heads/master
|
/mulNN.py
|
__author__ = 'HyNguyen'
import theano
import theano.tensor as T
import numpy as np
from LayerClasses import MyConvLayer,FullConectedLayer,SoftmaxLayer
from tensorflow.examples.tutorials.mnist import input_data
import cPickle
import os
import sys
from lasagne.updates import adam
from sys import stderr
if __name__ == "__main__":
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
X = T.dmatrix("X") # data, presented as rasterized images
Y = T.dmatrix("Y")
mini_batch_size = 100
filter_shape_encode = (20,1,5,5)
filter_shape_decode = (1,20,5,5)
rng = np.random.RandomState(23455)
layer0_encode_input = X.reshape((mini_batch_size, 1, 28, 28))
layer0_encode = MyConvLayer(rng,layer0_encode_input,image_shape=(mini_batch_size, 1, 28, 28),filter_shape=filter_shape_encode,border_mode="valid")
layer1_input = layer0_encode.output.flatten(2)
n_in = layer0_encode.output_shape[1] * layer0_encode.output_shape[2] * layer0_encode.output_shape[3]
layer1 = FullConectedLayer(layer1_input,n_in,100)
layer_classification = SoftmaxLayer(input=layer1.output, n_in=100, n_out=10)
err = layer_classification.error(Y)
cost = layer_classification.negative_log_likelihood(Y) + 0.001*(layer0_encode.L2 + layer_classification.L2 + layer1.L2)
params = layer0_encode.params + layer1.params + layer_classification.params
updates = adam(cost,params)
train_model = theano.function(inputs=[X,Y], outputs=[cost, err], updates=updates)
valid_model = theano.function(inputs=[X,Y], outputs=[cost, err])
predict_function = theano.function(inputs=[X], outputs=layer_classification.y_pred)
counter = 0
best_valid_err = 100
early_stop = 20
epoch_i = 0
while counter < early_stop:
epoch_i +=1
batch_number = int(mnist.train.labels.shape[0]/mini_batch_size)
train_costs = []
train_errs = []
for batch in range(batch_number):
next_images, next_labels = mnist.train.next_batch(mini_batch_size)
train_cost, train_err = train_model(next_images, next_labels)
train_costs.append(train_cost)
train_errs.append(train_err)
#print >> stderr, "batch "+str(batch)+" Train cost: "+ str(train_cost)
next_images, next_labels = mnist.validation.next_batch(mini_batch_size)
valid_cost, val_err = valid_model(next_images, next_labels)
if best_valid_err > val_err:
best_valid_err = val_err
print >> stderr, "Epoch "+str(epoch_i)+" Train cost: "+ str(np.mean(np.array(train_costs)))+ "Train mae: "+ str(np.mean(np.array(train_errs))) + " Validation cost: "+ str(valid_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter)+ " __best__ "
counter = 0
with open("saveweight_caeclassification.bin", mode="wb") as f:
cPickle.dump(params,f)
else:
counter +=1
print >> stderr, "Epoch "+str(epoch_i)+" Train cost: "+ str(np.mean(np.array(train_costs)))+ "Train mae: "+ str(np.mean(np.array(train_errs))) + " Validation cost: "+ str(valid_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,119
|
giahy2507/convae
|
refs/heads/master
|
/summary/summary.py
|
__author__ = 'MichaelLe'
from mmr import mmrelevance
from submodular import submodular
import kmean_sum
def do_summarize(V,n, P, L, alpha, galma, numberofWord, mode_str):
modeList = {"sub_cosine":0, "sub_euclid":1,"mmr_cosine":2,"mmr_euclid":3,"kmean_simple":4,
"mmr_kmean_cosine":5,"mmr_kmean_euclid":6,"mmr_pagerank_cosine":7,
"mmr_pagerank_euclid":8}
mode = modeList[mode_str]
k = 2
if (mode == 0) or mode == 1: ## cosine distance
return sorted(submodular.SubmodularFunc(V,n, P, L, alpha, galma, numberofWord, mode))
elif mode == 2 or mode == 3:
return sorted(mmrelevance.summaryMMR11(V, L, galma, numberofWord, mode-2))
elif mode == 4:
return sorted(kmean_sum.kmean_summary(V,L,numberofWord))
elif mode == 5 or mode == 6:
return sorted(mmrelevance.summaryMMR_centroid_kmean(V,L,galma,numberofWord,mode-5))
elif mode == 7 or mode == 8:
return sorted(mmrelevance.mmr_pagerank(V, L, alpha, numberofWord, mode - 7))
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,120
|
giahy2507/convae
|
refs/heads/master
|
/submodular/main2.py
|
__author__ = 'HyNguyen'
import numpy as np
import submodular
def insideMatrix(a, V):
n = len(V)
for i in range(0, n):
if a == V[i]:
return True
return False
def read_cluster_hy_format(cluster_hy_format_file):
clusters = np.load(cluster_hy_format_file)
sum1 = 0
for cluster in clusters:
V = []
P = []
L = []
if cluster !=None:
for text_id in cluster.keys():
p = []
instances = cluster[text_id]
for instance in instances:
#print(instance[1]) #vector (100,1)
instance.append(False)
p.append(instance[1])
V.append(instance[1])
L.append(len(instance[0].split()))
P.append(p)
alpha = 0.7
galma = 0.3
numberofWord = 200
summarize = sorted(submodular.maximizeF(V, P, alpha, galma, L, numberofWord))
print (summarize)
i = 0
k = 0
for text_id in cluster.keys():
list_instance = cluster[text_id]
for instance in list_instance:
if insideMatrix(k, summarize) == True:
instance[2] = True
k = k + 1
else:
k = k + 1
np.save('file_cluster_hy_format_2411_result.npy',clusters)
read_cluster_hy_format('file_cluster_hy_format_2411.npy')
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,121
|
giahy2507/convae
|
refs/heads/master
|
/submodular/test.py
|
__author__ = 'MichaelLe'
import numpy as np
S = [1, 2]
L = np.array([])
L = np.append(L,2)
L = np.append(L,40)
L = np.append(L, 60)
print L.shape
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,122
|
giahy2507/convae
|
refs/heads/master
|
/vector/main.py
|
__author__ = 'HyNguyen'
import time
import numpy as np
if __name__ == "__main__":
start = time.time()
A = np.load("data_processed.npy")
end = time.time()
print "load data ",end - start
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,123
|
giahy2507/convae
|
refs/heads/master
|
/preparedata4convae.py
|
__author__ = 'HyNguyen'
from vector.wordvectors import WordVectors
import time
import numpy as np
from gensim.models import word2vec
from nltk.corpus import brown
from nltk.corpus import treebank
import nltk
import xml.etree.ElementTree as ET
import os
import matplotlib.pyplot as plt
def statistic_freq():
wordvectors = WordVectors.load("model/wordvector.txt")
freq_array = [0] * 500
# Penn Tree Bank
treebank_sents = treebank.sents()
for i in range(len(treebank_sents)):
senttmp = " ".join(treebank_sents[i])
words = nltk.word_tokenize(senttmp)
freq_array[len(words)] +=1
# Brown
brown_sents = brown.sents()
for i in range(len(brown_sents)):
senttmp = " ".join(brown_sents[i])
words = nltk.word_tokenize(senttmp)
freq_array[len(words)] +=1
# DUC data
folder_path = "/Users/HyNguyen/Documents/Research/Data/DUC20042005/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
clusters_name = os.listdir(folder_path)
for cluster_name in clusters_name:
if cluster_name[0] == ".":
# except file .DStore in my macbook
continue
files_name = os.listdir(folder_path + "/" + cluster_name)
for file_name in files_name:
if file_name[0] == ".":
# except file .DStore in my macbook
continue
file_path = folder_path + "/" + cluster_name +"/"+ file_name
try:
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
freq_array[len(words)] +=1
except:
print "exception parse XML: ", file_name
continue
print("Finish cluster name:", cluster_name," , Wordvector size: ", str(wordvectors.embed_matrix.shape[0]))
plt.plot(range(200), freq_array[:200], color='red', marker='.')
plt.show()
def collect_data_from_ptb_brow_duc2004():
start_collect = time.time()
samples = []
# Penn Tree Bank
treebank_sents = treebank.sents()
for i in range(len(treebank_sents)):
senttmp = " ".join(treebank_sents[i])
words = nltk.word_tokenize(senttmp)
samples.append(words)
print("Finish collecting training data from Penn Tree Bank")
# Brown
brown_sents = brown.sents()
for i in range(len(brown_sents)):
senttmp = " ".join(brown_sents[i])
words = nltk.word_tokenize(senttmp)
samples.append(words)
print("Finish collecting training data from Brown")
# DUC data
folder_path = "/Users/HyNguyen/Documents/Research/Data/DUC20042005/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
clusters_name = os.listdir(folder_path)
for cluster_name in clusters_name:
if cluster_name[0] == ".":
# except file .DStore in my macbook
continue
files_name = os.listdir(folder_path + "/" + cluster_name)
for file_name in files_name:
if file_name[0] == ".":
# except file .DStore in my macbook
continue
file_path = folder_path + "/" + cluster_name +"/"+ file_name
try:
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
samples.append(words)
except:
print "exception parse XML: ", file_name
continue
print("Finish collecting training data from DUC2004")
print("length of samples", len(samples))
end_collect = time.time()
print("Total time for collecting training data: " + str(end_collect - start_collect))
return samples
if __name__ == "__main__":
wordvectors = WordVectors.load("model/wordvector.txt")
train_data = collect_data_from_ptb_brow_duc2004()
final_array = []
for i, words in enumerate(train_data):
words_array = wordvectors.cae_prepare_data_from_words(words, 10, 100)
final_array.append(words_array)
if i == 69:
break
final_array = np.array(final_array)
print(final_array.shape)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,124
|
giahy2507/convae
|
refs/heads/master
|
/summaryobject.py
|
__author__ = 'HyNguyen'
import os
import codecs
import numpy as np
import codecs
from vector.wordvectors import WordVectors
from convae import ConvolutionAutoEncoder
import xml.etree.ElementTree as ET
import nltk
class Cluster(object):
def __init__(self, cluster_id ,list_documents, list_references):
self.list_documents = list_documents
self.list_references = list_references
self.length_documents = len(list_documents)
self.length_references = len(list_references)
self.cluster_id = cluster_id
self.my_summarys = []
@classmethod
def load_from_folder_vietnamese_mds(cls, cluster_id , cluster_path):
if os.path.exists(cluster_path):
files_name = os.listdir(cluster_path)
list_documents = []
list_references = []
for file_name in files_name:
file_prefix = file_name.find('.body.tok.txt')
sentences = []
document_id = ""
if file_prefix > 0 :
document_id = file_name[:file_prefix]
file = codecs.open(cluster_path + '/' + file_name)
for line in file.readlines():
# remove name of authors
if len(line) < 50:
continue
sentences.append(Sentence(line))
list_documents.append(Document(sentences,document_id))
file.close()
elif file_name.find(".ref") != -1 and file_name.find(".tok.txt") != -1:
fi = codecs.open(cluster_path + '/' + file_name)
lines = fi.readlines()
sentences = [Sentence(line,None) for line in lines]
fi.close()
document_id = "ref"
list_references.append(Document(sentences,document_id))
return Cluster(cluster_id,list_documents, list_references)
else:
return None
@classmethod
def load_from_folder_duc(cls, cluster_id, cluster_path, wordvectors):
if os.path.exists(cluster_path):
file_names = os.listdir(cluster_path)
list_documents = []
list_references = []
for file_name in file_names:
if file_name[0] == ".":
continue
sentences_object = []
file_path = cluster_path + "/" + file_name
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
sent_vec = wordvectors.get_vector_addtion(words)
sentences_object.append(Sentence(sentence,sent_vec))
document_id = file_name
list_documents.append(Document(sentences_object,document_id))
return Cluster(cluster_id, list_documents,list_references)
else:
print("Not a path")
return None
@classmethod
def load_from_opinosis(cls, cluster_id, cluster_path, wordvectors):
if os.path.exists(cluster_path):
list_documents = []
list_references = []
sentences_object = []
with open(cluster_path, mode="r") as f:
sentences = f.readlines()
for sentence in sentences:
words = nltk.word_tokenize(sentence)
sent_vec = wordvectors.get_vector_addtion(words)
sentences_object.append(Sentence(sentence,sent_vec))
list_documents.append(Document(sentences_object,cluster_id))
return Cluster(cluster_id, list_documents,list_references)
else:
print("Not a path")
return None
class Document(object):
def __init__(self, list_sentences , document_id = -1,):
self.list_sentences = list_sentences
self.document_id = document_id
self.length = len(list_sentences)
self.word_count = sum([sentence.length for sentence in list_sentences if isinstance(sentence, Sentence)])
class Sentence(object):
def __init__(self, content, vector = None):
self.content = content
self.vector = vector
self.length = content.count(" ")
self.sentece_id = -1
import numpy as np
import time
import cPickle
if __name__ == "__main__":
clusterpath = "data/vietnamesemds/cluster_1/"
vectormodel = "model/word2vec/100"
vietnamesemds_path = "data/vietnamesemds/"
start = time.time()
w2v = WordVectors.load("vector/100")
end = time.time()
convae = ConvolutionAutoEncoder.rebuild_for_testing(mini_batch_size=1,filemodel="model/CAE.model")
clusters = [None]* 201
counter = 1
for cluster_id in os.listdir(vietnamesemds_path):
_, id = cluster_id.split("_")
cluster = Cluster.load_from_folder(cluster_id, vietnamesemds_path + cluster_id + "/")
print ("Cluster ", counter)
counter+=1
for document in cluster.list_documents:
for sentence in document.list_sentences:
sentence_matrix = w2v.cae_prepare_data(sentence.content)
if sentence_matrix is None:
sentence.vector = None
continue
sentence_vector = convae.get_vector_function(sentence_matrix)
sentence.vector = sentence_vector.T
clusters[int(id)] = cluster
with open("data/vietnamesemds.pikcle", mode="wb") as f:
cPickle.dump(clusters, f)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,125
|
giahy2507/convae
|
refs/heads/master
|
/mmr/mmrelevance.py
|
__author__ = 'MichaelLe'
from vector import *
import numpy as np
from numpy import linalg
from sklearn.cluster import KMeans
import networkx as nt
def build_sim_matrix(senList, mode):
########################
# senList: list of sentence to build sim_matrix
# ****** note: each element in senList must be np.array 1-d or equivalent
########################
# 1. Create the similarity matrix for each pair of sentence in document
# ***** note: the last row of matrix is the sum of similariry
# between a specific sentence and the whole document (include this sentence)
########################
numSen = np.size(senList,0)
simM = np.ones((numSen + 1, numSen))
for i in range(numSen):
for j in range(i,numSen,1):
simM[i,j] = similarity(senList[i],senList[j], mode)
simM[j,i] = simM[i,j]
#centroid_vec = np.average(senList, axis = 0)
for i in range(numSen):
simM[numSen,i] = np.sum(simM[:numSen,i])
#simM[numSen + 1, i] = linalg.norm(senList[i] - centroid_vec)
return simM
def get_sim_for_set(sim_matrix, sen, set_sen):
#################################
#sim_matrix: matrix of simmilarity of all pairs of sentence in documents
#sen: order of sentence in document
#set_sen: the set of order of sentence
#####################################
# 1. Calculate the similarity of a specific sentence and a set of sentence
# by linear combination
##################################
sum_cov = 0
for s in set_sen:
sum_cov = sum_cov + sim_matrix[sen,s]
return sum_cov
def scoreMMR1(sim_matrix, sen, n, summary, lamda):
########################################################################
#sim_matrix: matrix of simmilarity of all pairs of sentence in documents
#sen: order of sentence in document
#n: the number of sentence in document
#summary: list of sentence is selected to put into summary
#lamda: trade-off coefficent
########################################################################
# Calculate the MMR score (1 version):
# In this version, the similarity of one sentence and a set
# is only the linear combination of similarity of sentence with each sentence in this set.
# $ sim(S_i,D, S) = \lambda*\frac{1}{|D|}\sum\limits_{S_j \in D}{Sim_1(S_i, S_j)} - (1 - lambda)*\frac{1}{|S|}\sum\limits_{S_j \in S}{Sim(S_i, S_j)}
########################################################################
sim1 = sim_matrix[n,sen]/n
if (len(summary)> 0):
sim2 = get_sim_for_set(sim_matrix,sen,summary)/len(summary)
else: sim2 = 0
return np.abs(lamda*sim1 - (1-lamda)*sim2)
def get_simNorm_for_set(sen, setS):
##########################################
# sen: vector representation for sentence
# setS: the set of vector representation for sentences
#########################################
# Calculate the MMR score between sentence and set as below:
# 1. Find the centroid of S ==> centroid_vec
# 2. $Sim_(S_i,s) = \frac{1}{|S|}norm2(S_i - centroid_vec)$
#########################################
if (len(setS) > 0):
centroid_vec = np.average(setS, axis = 0)
return np.linalg.norm(sen - centroid_vec)
else:
return 0
def stopCondition(len_sen_mat, summary, max_word):
################################################################
# len_sen_mat: matrix of length of all sentence in document
# summary: the order of sentence in summary
# max_word: the maximum of number of word for a summary
# **** note: len_sen_mat must be a 1-d np.array or equivalent
# so that it can be access element through list
################################################################
# 1. return 1 if the length of summary > max_word or 0 otherwise
################################################################
length_summary = np.sum(len_sen_mat[summary])
if length_summary > max_word:
return 1
else:
return 0
def summaryMMR11(document, len_sen_mat,lamda, max_word, mode):
################################################################
# len_sen_mat: matrix of length of all sentence in document
# document: the set of all sentence
# max_word: the maximum of number of word for a summary
# **** note: len_sen_mat must be a 1-d np.array or equivalent
# so that it can be access element through list
################################################################
# return the set of sentence in summary
################################################################
sim_matrix = build_sim_matrix(document, mode)
n = len(document)
summary = [ ]
while (stopCondition(len_sen_mat,summary,max_word) == 0):
score_matrix = np.zeros(n)
for i in range(n):
if (i in summary) == False:
score_matrix[i] = scoreMMR1(sim_matrix,i,n,summary, lamda)
selected_sen = np.argmax(score_matrix)
summary.append(selected_sen)
return summary
# def scoreMMR2(sim_matrix_doc, pos_sen, sen, summary, lamda):
# centroid_vec = np.linalg.norm(summary)
# sim1 = sim_matrix_doc[pos_sen]
# if (len(summary) > 0):
# sim2 = get_simNorm_for_set(sen, summary)/(len(summary))
# return lamda*sim1 - (1-lamda)*sim2
# else:
# return lamda*sim1
#
# def get_sen(document, S):
# re = []
# for s in S:
# re.append(document[s])
# return re
#
def summaryMMR_centroid_kmean(document_list, len_sen_mat,lamda, max_word, mode):
sim_matrix = build_sim_matrix(document_list, mode)
n = len(document_list)
documet_tmp = np.array(document_list).reshape(n, document_list[0].shape[0])
centroid = np.argmin(KMeans(n_clusters=1).fit_transform(documet_tmp), axis = 0)
summary = []
summary.append(centroid[0])
while (stopCondition(len_sen_mat,summary,max_word) == 0):
score_matrix = np.zeros(n)
for i in range(n):
if (i in summary) == False:
score_matrix[i] = scoreMMR1(sim_matrix,i,n,summary, lamda)
selected_sen = np.argmax(score_matrix)
summary.append(selected_sen)
return summary
def check_threshold_mmr_pagerank(sim_matrix, summary, s, threshold_t):
'''
parameter:
sim_matrix: matrix of similarity of all pairs of sentences
summary: summary
s: sentence s
threshold_t: threshold wants to check
return:
1: if s is satified with all sentences in summary
(mean sim(s,each sentence in summary) < threshold_t)
0: otherwise
'''
for su in summary:
if (sim_matrix[s, su] > threshold_t):
return 0
return 1
def mmr_pagerank(document_list,len_sen_mat, threshold_t, max_word, mode):
n = len(document_list)
sim_matrix = build_sim_matrix(document_list, mode)
g = nt.Graph()
for i in range(n):
for j in range(i+1,n,1):
g.add_edge(i,j, distance_edge = sim_matrix[i,j])
page_rank = nt.pagerank(g, weight = "distance_edge")
score = []
for i in range(n):
score.append(page_rank[i])
summary = []
threshold_t = np.average(sim_matrix[0,:])
while (stopCondition(len_sen_mat,summary, max_word) == 0):
s = np.argmax(score)
score[s] = 0 #delele s from score
if check_threshold_mmr_pagerank(sim_matrix,summary,s,threshold_t) == 1:
summary.append(s)
return summary
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,126
|
giahy2507/convae
|
refs/heads/master
|
/vector/wordvectors.py
|
__author__ = 'HyNguyen'
import numpy as np
import time
from gensim.models import word2vec
import pickle
import copy
import os
class WordVectors(object):
def __init__(self, embsize, embed_matrix, word_index):
self.embsize = embsize
self.embed_matrix = embed_matrix
self.word_index = word_index
self.word_list = word_index.keys()
self.count_null_word = 0
self.count_exist_word = 0
def add_wordvector_from_w2vmodel(self, word2vec, words):
for word in words:
try:
vector = word2vec[word]
if word in self.word_index.keys():
continue
else:
self.word_index[word] = len(self.word_index.keys())
self.embed_matrix = np.concatenate((self.embed_matrix,vector.reshape(1,300)))
# print("hy")
# print(self.embed_matrix.shape)
self.count_exist_word +=1
except:
self.count_null_word +=1
continue
def save_pickle(self, filename):
with open(filename, mode="wb") as f:
pickle.dump(self,f)
@classmethod
def load_pickle(cls, filename):
if os.path.isfile(filename):
with open(filename, mode="rb") as f:
return pickle.load(f)
else:
print("no file")
def save_text_format(self, filename):
with open(filename, mode= "w") as f:
if self.embed_matrix.shape[0] != len(self.word_index.keys()):
print("co gi do sai sai")
f.write(str(self.embed_matrix.shape[0]) + " " + str(self.embsize)+ "\n")
print(self.embed_matrix.shape)
for key in self.word_index.keys():
index = self.word_index[key]
vector = self.embed_matrix[index].reshape(300)
listnum = map(str, vector.tolist())
f.write(key + " " + " ".join(listnum) + "\n")
@classmethod
def load(cls, filename):
fi = open(filename,mode="r")
dict_size, embsize = fi.readline().split()
dict_size, embsize = int(dict_size), int(embsize)
embed_matrix = np.zeros((dict_size+1,embsize),dtype=np.float32)
word_index = {"UNK":0}
counter = 1
for i in range(1,dict_size+1,1):
counter +=1
if counter % 10000 == 0:
print("Process wordvector line: ", counter)
elements = fi.readline().split()
word = elements[0]
vector = np.array(elements[1:]).reshape((1,embsize))
word_index[word] = i
embed_matrix[i] = vector
fi.close()
embed_matrix[0] = np.mean(embed_matrix[1:],axis=0,dtype=np.float32)
return WordVectors(embsize,embed_matrix,word_index)
def wordvector(self, word):
if word in self.word_list:
self.count_exist_word +=1
return self.embed_matrix[self.word_index[word]]
else:
#Null word
self.count_null_word +=1
return self.embed_matrix[0]
def get_vector_addtion(self, words):
result_vec = copy.deepcopy(self.wordvector(words[0]))
for i in range(1,len(words)):
result_vec += self.wordvector(words[i])
return result_vec
def cae_prepare_data_from_string(self, sentence, min_length=10, max_length=100):
sentence = sentence.replace("\n","")
elements = sentence.split()
sentence_matrix = np.array([self.wordvector(word) for word in elements])
padding = np.zeros((5,self.embsize),dtype=float)
if sentence_matrix.shape[0] < max_length and sentence_matrix.shape[0] > min_length:
sentence_matrix = np.concatenate((sentence_matrix,np.zeros((max_length-sentence_matrix.shape[0],self.embsize))))
else:
print(sentence)
return None
sentence_matrix_final = np.concatenate((padding,sentence_matrix,padding))
return sentence_matrix_final
def cae_prepare_data_from_words(self, words, min_length=10, max_length=100):
sentence_matrix = np.array([self.wordvector(word) for word in words])
padding = np.zeros((5,self.embsize),dtype=np.float32)
if sentence_matrix.shape[0] <= max_length and sentence_matrix.shape[0] >= min_length:
sentence_matrix = np.concatenate((sentence_matrix,np.zeros((max_length-sentence_matrix.shape[0],self.embsize))))
else:
# print(" ".join(words))
return None
sentence_matrix_final = np.concatenate((padding,sentence_matrix,padding))
return sentence_matrix_final
if __name__ == "__main__":
wordvector = WordVectors.load("../model/wordvector.txt")
# w2v = word2vec.Word2Vec.load_word2vec_format("/Users/HyNguyen/Documents/Research/Data/GoogleNews-vectors-negative300.bin",binary=True)
#
# for key in wordvector.word_index.keys():
# if key == "UNK":
# continue
# A = wordvector.wordvector(key).reshape(300)
# B = w2v[key].reshape(300)
# # print A.shape
# # print A.dtype
# # print B.shape
# # print B.dtype
#
# if np.array_equal(A,B) is False:
# print(key)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,127
|
giahy2507/convae
|
refs/heads/master
|
/mmr/__init__.py
|
__author__ = 'MichaelLe'
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,128
|
giahy2507/convae
|
refs/heads/master
|
/caesummarizer.py
|
__author__ = 'HyNguyen'
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
import numpy as np
from summaryobject import *
from summary import summary as smr
from vector.wordvectors import WordVectors
from convae import ConvolutionAutoEncoder
import cPickle
import os
class CAESummarizer(object):
def __init__(self, cae_model, word_vector_model, mode = 0):
self.cae = cae_model
self.wordvector = word_vector_model
self.mode = mode
@classmethod
def create_my_summarizer(cls, cae_model_path , word_vector_model_path = "vector/100", mode = 0):
word_vectors = WordVectors.load(word_vector_model_path)
convae = ConvolutionAutoEncoder.rebuild_for_testing(mini_batch_size=1, filemodel=cae_model_path)
return CAESummarizer(convae, word_vectors, mode)
@classmethod
def summary(self, cluster, max_word, mode="sub_cosine"):
"""
----------
Params
cluster:
cluster:
:return:
"""
summary_sentences = []
V = []
P = []
L = np.array([])
k = 0
if cluster !=None:
for document in cluster.list_documents:
p = []
for sentence in document.list_sentences:
if sentence.vector is None:
continue
p.append(k)
sentence.sentece_id = k
k = k + 1
V.append(sentence.vector)
L = np.append(L,sentence.length)
P.append(p)
alpha = 0.7
galma = 0.3
n = len(V)
numberofWord = max_word
mode = mode
summarize = smr.do_summarize(V, n, P, L, alpha, galma, numberofWord, mode)
print (summarize)
word_count = 0
for document in cluster.list_documents:
for sentence in document.list_sentences:
if sentence.sentece_id in summarize:
word_count += sentence.length
if word_count > max_word:
word_count -= sentence.length
continue
cluster.my_summarys.append(sentence.content)
return cluster.my_summarys
def generate_system(clusters , path_to_model , path_to_system, mode="sub_cosine"):
groups = [85,130,180,220,270,340]
counter = 0
for group in groups:
path_to_group_model = path_to_model + "/" + str(group)
for file_name in os.listdir(path_to_group_model):
counter +=1
cluster_id, _, _, _ = file_name.split(".")
_ , cluster_id = cluster_id.split("_")
number_count = 0
if file_name.find("ref1") != -1:
number_count = clusters[int(cluster_id)].list_references[0].word_count
elif file_name.find("ref2") != -1:
number_count = clusters[int(cluster_id)].list_references[1].word_count
summary_text = CAESummarizer.summary(clusters[int(cluster_id)], number_count, mode=mode)
path_to_system_group = path_to_system + "/" + str(group)
if not os.path.exists(path_to_system_group):
os.makedirs(path_to_system_group)
print "summary ", counter , "\""+file_name+"\"", "couting_word: ",number_count
fo = open(path_to_system_group + "/" +file_name,'w')
fo.writelines(summary_text)
fo.close()
print "finished group ", group
def create_summary_format_vn():
print("create summary format")
# vietnamesemds_path = "data/vietnamesemds/"
# caesummarizer = CAESummarizer.create_my_summarizer("model/CAE.model","vector/100")
#
# clusters = [None]* 201
# counter = 1
# for cluster_id in os.listdir(vietnamesemds_path):
# _, id = cluster_id.split("_")
# cluster = Cluster.load_from_folder(cluster_id, vietnamesemds_path + cluster_id + "/")
# print ("Cluster ", counter)
# counter+=1
# for document in cluster.list_documents:
# for sentence in document.list_sentences:
# sentence_matrix = caesummarizer.wordvector.cae_prepare_data(sentence.content)
# if sentence_matrix is None:
# sentence.vector = None
# continue
# sentence_vector = caesummarizer.cae.get_vector_function(sentence_matrix)
# sentence.vector = sentence_vector.T
# clusters[int(id)] = cluster
#
# with open("data/vietnamesemds.pikcle", mode="wb") as f:
# cPickle.dump(clusters, f)
with open("data/vietnamesemds.pickle", mode="rb") as f:
clusters = cPickle.load(f)
generate_system(clusters, "data/VietnameseMDS-grouped/model", "data/VietnameseMDS-grouped/system", mode="sub_euclid")
# modeList = {"sub_cosine":0, "sub_euclid":1,"mmr_cosine":2,"mmr_euclid":3,"kmean_simple":4,
# "mmr_kmean_cosine":5,"mmr_kmean_euclid":6,"mmr_pagerank_cosine":7,
# "mmr_pagerank_euclid":8}
def create_summary_format_duc2004(ducpath, wordvectors_path, summary_path):
wordvectors = WordVectors.load(wordvectors_path)
clusters = []
for cluster_id in os.listdir(ducpath):
if cluster_id[0] == ".":
continue
cluster = Cluster.load_from_folder_duc(cluster_id,ducpath+ "/"+cluster_id,wordvectors)
summary = CAESummarizer.summary(cluster,100)
file_summary = summary_path + "/" + cluster_id[:-1].upper()+".M.100.T.1"
with open(file_summary, mode="w") as f:
for line in summary:
f.write(line + "\n")
clusters.append(cluster)
print("Finish loading cluster_id: ", cluster_id)
return clusters
def create_summary_format_opinosis(opinosis_path, wordvectors_path, summary_path):
wordvectors = WordVectors.load(wordvectors_path)
clusters = []
for cluster_id in os.listdir(opinosis_path):
if cluster_id[0] == ".":
continue
cluster = Cluster.load_from_opinosis(cluster_id,opinosis_path+"/"+cluster_id, wordvectors)
summary = CAESummarizer.summary(cluster,25,"kmean_simple")
if len(summary) == 0:
print("ttdt")
cluster_id,_,_ = cluster_id.split(".")
folder_summary = summary_path+"/"+cluster_id
if not os.path.isdir(folder_summary):
os.makedirs(folder_summary)
file_summary = folder_summary+"/"+cluster_id+".1.txt"
with open(file_summary, mode="w") as f:
for line in summary:
f.write(line + "\n")
clusters.append(cluster)
print("Finish loading cluster_id: ", folder_summary)
return clusters
if __name__ == "__main__":
# ducpath = "/Users/HyNguyen/Documents/Research/Data/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
# wordvectors_path = "model/wordvector.txt"
# summary_path = "data/peer"
# clusters = create_summary_format_duc2004(ducpath, wordvectors_path, summary_path)
# with open("data/duc.sumobj.pickle", mode="wb") as f:
# cPickle.dump(clusters,f)
opinosis_path = "/Users/HyNguyen/Documents/Research/Data/OpinosisDataset1.0_0/topics"
wordvectors_path = "model/wordvector.txt"
summary_path = "data/peer"
clusters = create_summary_format_opinosis(opinosis_path,wordvectors_path,summary_path)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,129
|
giahy2507/convae
|
refs/heads/master
|
/preparedata4convaewmpi.py
|
__author__ = 'HyNguyen'
from vector.wordvectors import WordVectors
import time
import numpy as np
from nltk.corpus import brown
from nltk.corpus import treebank
import nltk
import xml.etree.ElementTree as ET
import os
import sys
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
def collect_data_from_ptb_brow_duc2004():
start_collect = time.time()
samples = []
# Penn Tree Bank
treebank_sents = treebank.sents()
for i in range(len(treebank_sents)):
senttmp = " ".join(treebank_sents[i])
words = nltk.word_tokenize(senttmp)
samples.append(words)
sys.stdout.write("Finish collecting training data from Penn Tree Bank")
sys.stdout.flush()
# Brown
brown_sents = brown.sents()
for i in range(len(brown_sents)):
senttmp = " ".join(brown_sents[i])
words = nltk.word_tokenize(senttmp)
samples.append(words)
sys.stdout.write("Finish collecting training data from Brown")
sys.stdout.flush()
# DUC data
folder_path = "/Users/HyNguyen/Documents/Research/Data/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
clusters_name = os.listdir(folder_path)
for cluster_name in clusters_name:
if cluster_name[0] == ".":
# except file .DStore in my macbook
continue
files_name = os.listdir(folder_path + "/" + cluster_name)
for file_name in files_name:
if file_name[0] == ".":
# except file .DStore in my macbook
continue
file_path = folder_path + "/" + cluster_name +"/"+ file_name
try:
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
samples.append(words)
except:
print "exception parse XML: ", file_name
continue
sys.stdout.write("Finish collecting training data from DUC2004")
sys.stdout.flush()
sys.stdout.write("length of samples" + str(len(samples)))
sys.stdout.flush()
end_collect = time.time()
sys.stdout.write("Total time for collecting training data: " + str(end_collect - start_collect))
sys.stdout.flush()
return samples
if __name__ == "__main__":
data_scatters = []
start_total = 0
if rank == 0:
start_total = time.time()
wordvectors = WordVectors.load("model/wordvector.txt")
print("Finished read wordvectors ...")
traindata = collect_data_from_ptb_brow_duc2004()
size_sample = int(len(traindata)/size)
for i in range(size):
if i* size_sample + size_sample > len(traindata):
data_scatters.append(traindata[i*size_sample:])
else:
data_scatters.append(traindata[i*size_sample : i*size_sample+size_sample])
else:
wordvectors = None
data_scatter = None
wordvectors = comm.bcast(wordvectors, root = 0)
print("Process:", rank, "broadcasted wordvectors ...")
data_scatter = comm.scatter(data_scatters,root=0)
print("Process:", rank, "Data scatter length: ", len(data_scatter))
# print("Process:", rank, "Data scatter [0]: ", data_scatter[0])
# print("Process:", rank, "Data scatter [-1]: ", data_scatter[-1])
#work with data_scatter
final_array = []
for i, words in enumerate(data_scatter):
if i != 0 and i% 1000 == 0:
print("Process:", rank, "Preparedata line ", i)
words_array = wordvectors.cae_prepare_data_from_words(words, 10, 100)
if words_array is not None:
final_array.append(words_array)
final_array = np.array(final_array)
print("Process:", rank, "Data final array shape: ", final_array.shape)
data_matrix_gather = comm.gather(final_array, root=0)
if rank == 0:
# gather and save
print("data gather")
data_matrix_final = data_matrix_gather[0]
for i in range(1,len(data_matrix_gather)):
data_matrix_final = np.concatenate((data_matrix_final,data_matrix_gather[i]))
print("Process:", rank, "data_matrix_final.shape: ", data_matrix_final.shape)
end_total = time.time()
print("Process:", rank, "Total time: ", end_total - start_total, "s")
np.save("data/data.convae", data_matrix_final)
print("Process:", rank, "Save to data/data.convae.np ")
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,130
|
giahy2507/convae
|
refs/heads/master
|
/vector/extractwordvectors.py
|
__author__ = 'HyNguyen'
from wordvectors import WordVectors
import time
import numpy as np
from gensim.models import word2vec
from nltk.corpus import brown
from nltk.corpus import treebank
import nltk
import xml.etree.ElementTree as ET
import os
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
if __name__ == "__main__":
# Load Word2Vec from Google
w2v = word2vec.Word2Vec.load_word2vec_format("/Users/HyNguyen/Documents/Research/Data/GoogleNews-vectors-negative300.bin",binary=True)
# Create object WordVectors
wordvectors = WordVectors(300,np.empty((0,300),dtype=float),{})
# wordvectors = WordVectors.load("model/wordvector.txt")
# Penn Tree Bank
treebank_sents = treebank.sents()
for i in range(len(treebank_sents)):
senttmp = " ".join(treebank_sents[i])
words = nltk.word_tokenize(senttmp)
wordvectors.add_wordvector_from_w2vmodel(w2v,words)
print("Finish penn tree bank corpus, Wordvector size: ", str(wordvectors.embed_matrix.shape[0]))
# Brown
brown_sents = brown.sents()
for i in range(len(brown_sents)):
if i % 1000 == 0:
print("brow, process line: ", i)
senttmp = " ".join(brown_sents[i])
words = nltk.word_tokenize(senttmp)
wordvectors.add_wordvector_from_w2vmodel(w2v,words)
print("Finish brow corpus, Wordvector size: ", str(wordvectors.embed_matrix.shape[0]))
# DUC data
folder_path = "/Users/HyNguyen/Documents/Research/Data/DUC20042005/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
clusters_name = os.listdir(folder_path)
for cluster_name in clusters_name:
if cluster_name[0] == ".":
# except file .DStore in my macbook
continue
files_name = os.listdir(folder_path + "/" + cluster_name)
for file_name in files_name:
if file_name[0] == ".":
# except file .DStore in my macbook
continue
file_path = folder_path + "/" + cluster_name +"/"+ file_name
try:
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
wordvectors.add_wordvector_from_w2vmodel(w2v,words)
except:
print "exception: ", file_name
continue
print("Finish cluster name:", cluster_name," , Wordvector size: ", str(wordvectors.embed_matrix.shape[0]))
wordvectors.save_text_format("model/wordvector.txt")
wordvectors.save_pickle("model/wordvector.pickle")
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,131
|
giahy2507/convae
|
refs/heads/master
|
/mmr/main.py
|
__author__ = 'MichaelLe'
import numpy as np
import vector as vec
from numpy import linalg
import mmrelevance as mmr
a = np.array([10,4,10])
b = np.array([1,3,13])
c = []
c.append(a)
c.append(b)
len_sen = np.array([3,3])
print (linalg.norm(a))
print mmr.summaryMMR12(c, len_sen,0.3, 5, 1)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,132
|
giahy2507/convae
|
refs/heads/master
|
/submodular/vector.py
|
__author__ = 'MichaelLe'
from numpy import *
from numpy import linalg as LA
def converArr(s):
lenS = ceil(len(s)/2.0)
a = zeros((1,lenS ))
i=0
for c in s:
if (c != ' '):
a[0,i] = c
i = i+1
return a
def dotProduct(a, b):
n = size(a,0)
sum = 0
for i in range(0,n):
sum = sum + a[i]*b[i]
return sum
def cosine(a, b):
c = dotProduct(a,b)
d = linalg.norm(a)*linalg.norm(b)
return (c/d + 1)/2
def euclid(a,b):
return linalg.norm(a-b)
def similarity(a,b, mode):
if (mode == 0):
return cosine(a,b)
elif mode == 1:
return euclid(a,b)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,133
|
giahy2507/convae
|
refs/heads/master
|
/submodular/loadFile.py
|
__author__ = 'MichaelLe'
import string
import numpy
import vector
def loadfile(filename):
f = open(filename,'r')
a = []
for line in f:
s = (str(line))
s = s.replace('\n','')
a.append(vector.converArr(s))
return a
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,134
|
giahy2507/convae
|
refs/heads/master
|
/main.py
|
__author__ = 'HyNguyen'
import numpy as np
import os
import xml.etree.ElementTree as ET
import nltk
if __name__ == "__main__":
# DUC data
folder_path = "/Users/HyNguyen/Documents/Research/Data/DUC20042005/duc2004/DUC2004_Summarization_Documents/duc2004_testdata/tasks1and2/duc2004_tasks1and2_docs/docs"
clusters_name = os.listdir(folder_path)
for cluster_name in clusters_name:
if cluster_name[0] == ".":
# except file .DStore in my macbook
continue
files_name = os.listdir(folder_path + "/" + cluster_name)
for file_name in files_name:
if file_name[0] == ".":
# except file .DStore in my macbook
continue
file_path = folder_path + "/" + cluster_name +"/"+ file_name
try:
tree = ET.parse(file_path)
root = tree.getroot()
text_tag = root._children[3]
if text_tag.tag == "TEXT":
text = text_tag.text.replace("\n", "")
sentences = nltk.tokenize.sent_tokenize(text)
for sentence in sentences:
words = nltk.word_tokenize(sentence)
# wordvectors.add_wordvector_from_w2vmodel(w2v,words)
except:
print "exception: ", cluster_name ,file_name
continue
# print("Finish cluster name:", cluster_name," , Wordvector size: ", str(wordvectors.embed_matrix.shape[0]))
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,135
|
giahy2507/convae
|
refs/heads/master
|
/LayerClasses.py
|
import numpy as np
from theano.tensor.nnet import conv
import theano
import theano.tensor as T
from theano.tensor.signal import downsample
# from tensorflow.examples.tutorials.mnist import input_data
from theano.tensor import shared_randomstreams
class MyConvLayer(object):
def __init__(self, rng, image_shape, filter_shape, border_mode = "valid", activation = T.tanh, params = [None, None]):
self.image_shape = image_shape
self.filter_shape = filter_shape
self.output_shape = (image_shape[0],filter_shape[0],image_shape[2]-filter_shape[2]+1,image_shape[3]-filter_shape[3]+1)
self.activation = activation
self.border_mode = border_mode
assert image_shape[1] == filter_shape[1]
fan_in = np.prod(filter_shape[1:])
fan_out = filter_shape[0] * np.prod(filter_shape[2:])
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
if params[0] == None:
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(value=b_values, borrow=True)
else:
self.W, self.b = params[0], params[1]
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum()
self.L2 = (self.W**2).sum()
def set_input(self, input, input_dropout, mini_batch_size):
self.input = input.reshape(self.image_shape)
self.conv_out = conv.conv2d(input=self.input, filters=self.W, filter_shape=self.filter_shape, image_shape=self.image_shape, border_mode=self.border_mode)
self.output = self.activation(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.output_dropout = self.output
class FullConectedLayer(object):
def __init__(self, n_in, n_out, activation = T.tanh, p_dropout = 0.5, params = [None,None]):
self.n_in = n_in
self.n_out = n_out
self.activation = activation
self.p_dropout = p_dropout
if params[0] == None:
self.W = theano.shared(value= np.asarray(np.random.rand(n_in,n_out)/np.sqrt(n_in+1),dtype=theano.config.floatX),
name = "W",
borrow=True)
self.b = theano.shared(value= np.asarray(np.random.rand(n_out,) ,dtype=theano.config.floatX),
name ="b",
borrow=True
)
else:
self.W, self.b = params[0], params[1]
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum()
self.L2 = (self.W**2).sum()
def set_input(self, input, input_dropout, mini_batch_size):
self.input = input.flatten(2)
self.output = self.activation(T.dot(self.input,self.W) + self.b)
self.inpt_dropout = dropout_layer(input_dropout.reshape((mini_batch_size, self.n_in)), self.p_dropout)
self.output_dropout = self.activation(T.dot(self.inpt_dropout, self.W) + self.b)
class SoftmaxLayer(object):
def __init__(self , n_in, n_out, params=[None, None]):
if params[0] == None:
self.W = theano.shared(value= np.asarray(np.random.rand(n_in,n_out)/np.sqrt(n_in+1),dtype=theano.config.floatX),
name = "W",
borrow=True)
self.b = theano.shared(value= np.asarray(np.random.rand(n_out,) ,dtype=theano.config.floatX),
name ="b",
borrow=True
)
else:
self.W, self.b = params[0], params[1]
self.n_in = n_in
self.n_out = n_out
# parameters of the model
self.params = [self.W, self.b]
self.L1 = abs(self.W).sum()
self.L2 = (self.W**2).sum()
def set_input(self, input):
self.input = input.flatten(2)
self.p_y_given_x = T.nnet.softmax(T.dot(self.input, self.W) + self.b)
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
self.output = self.y_pred
def negative_log_likelihood(self,y):
return -T.mean(T.log(self.p_y_given_x)*y)
def predict(self):
return self.y_pred
def error(self,y):
y = T.argmax(y,1)
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError(
'y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type)
)
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
def dropout_layer(layer, p_dropout):
srng = shared_randomstreams.RandomStreams(np.random.RandomState(0).randint(999999))
mask = srng.binomial(n=1, p=1-p_dropout, size=layer.shape)
return layer*T.cast(mask, theano.config.floatX)
def mask_k_maxpooling(variable, variable_shape ,axis, k):
"""
Params:
variable: tensor2D
axis: get k_max_pooling in axis'th dimension
k: k loop --> k max value
------
Return:
mask : tensor2D
1: if in position k_max
0: else
ex variable:
1 2 3 0 0 1
2 7 1 ---> 0 1 0
1 2 1 0 1 0
"""
min = -999999999
variable_tmp = variable
mask = T.zeros(variable_shape, dtype=theano.config.floatX)
for i in range(k):
max_idx = T.argmax(variable_tmp,axis=axis)
if axis == 0:
mask = T.set_subtensor(mask[max_idx,range(0,variable_shape[1])],1)
variable_tmp = T.set_subtensor(variable_tmp[max_idx,range(0,variable_shape[1])],min)
elif axis == 1:
mask = T.set_subtensor(mask[range(0,variable_shape[0]),max_idx],1)
variable_tmp = T.set_subtensor(variable_tmp[range(0,variable_shape[0]),max_idx],min)
return mask
class MyConvPoolLayer(object):
def __init__(self, rng, input, image_shape, filter_shape, k_pool_size, activation = T.tanh):
self.input = input
self.image_shape = image_shape
self.filter_shape = filter_shape
self.k_pool_size = k_pool_size
assert image_shape[1] == filter_shape[1]
fan_in = np.prod(filter_shape[1:])
fan_out = filter_shape[0] * np.prod(filter_shape[2:])
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
self.conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape, border_mode="valid")
self.mask_input = self.conv_out.flatten(2)
# loi cho nay
shape_afconv = (image_shape[0],image_shape[1],image_shape[2]-filter_shape[2]+1,image_shape[3]-filter_shape[3]+1)
self.mask_k_maxpooling_2D = mask_k_maxpooling(self.mask_input,(image_shape[0],shape_afconv[1]*shape_afconv[2]*shape_afconv[3]),axis=1,k=k_pool_size)
self.mask_k_maxpooling_4D = self.mask_k_maxpooling_2D.reshape(shape_afconv)
self.output = activation(self.mask_k_maxpooling_4D * self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
class MyUnPoolDeconvLayer(object):
def __init__(self, rng, input, mask_k_maxpooling_4D, input_shape, filter_shape, activation = T.tanh):
self.input = input
# mask4D (batch_size, n_chnnel. wifth, height
self.mask_k_maxpooling_4D = mask_k_maxpooling_4D
# input_shape: (batch_size, n_channel, width, height) e.g: (1,20,24,24)
self.input_shape = input_shape
# filter_shape: (n_kenel, n_channel, width, height) e.g: (1,20,5,5)
self.filter_shape = filter_shape
assert input_shape[1] == filter_shape[1]
unpool_out = input * mask_k_maxpooling_4D
fan_in = np.prod(filter_shape[1:])
fan_out = filter_shape[0] * np.prod(filter_shape[2:])
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
self.conv_out = conv.conv2d(input=unpool_out, filters=self.W, filter_shape=filter_shape, image_shape=input_shape, border_mode="full")
self.output = self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x')
# co su dung activation o day ko, vi sai activation se ve doan -1:1, nhieu luc gia tri cua vector tu > 1
# self.ouput = activation(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
class LenetConvPoolLayer(object):
def __init__(self, rng, input, image_shape, filter_shape, poolsize , border_mode ='valid' , activation = T.tanh):
self.input = input
self.image_shape = image_shape
self.filter_shape = filter_shape
self.poolsize = poolsize
assert image_shape[1] == filter_shape[1]
self.input = input
fan_in = np.prod(filter_shape[1:])
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
self.W = theano.shared(np.asarray(
rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX),
borrow=True)
b_values = np.zeros((filter_shape[0],), dtype=theano.config.floatX)
self.b = theano.shared(value=b_values, borrow=True)
# convolve input feature maps with filters
self.conv_out = conv.conv2d(input=input, filters=self.W, filter_shape=filter_shape, image_shape=image_shape, border_mode=border_mode)
# downsample each feature map individually, using maxpooling
pooled_out = downsample.max_pool_2d(input=self.conv_out, ds=poolsize, ignore_border=True)
self.output = T.tanh(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
if __name__ == "__main__":
print("main")
# mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
# # mnist.train.images.shape : (55000, 784)
# # mnist.train.labels : (55000) --> [list label ...]
#
# # next_images, next_labels = mnist.train.next_batch(100)
# # tuple: images, label : (100, 784) , (100, 10)
#
# nkerns=[20, 50]
# batch_size=100
# rng = np.random.RandomState(23455)
# # minibatch)
# x = T.dmatrix('x') # data, presented as rasterized images
# y = T.dmatrix('y') # labels, presented as 1D vector of [int] labels
#
# # construct the logistic regression class
# # Each MNIST image has size 28*28
# layer0_input = x.reshape((-1, 1, 28, 28))
#
# layer0 = LenetConvPoolLayer(rng, input=layer0_input,
# image_shape=(batch_size, 1, 28, 28),
# filter_shape=(nkerns[0], 1, 5, 5), poolsize=(2, 24))
#
# layer2_input = layer0.output.flatten(2)
#
# layer2 = FullConectedLayer(input=layer2_input, n_in=nkerns[0] * 12 * 1, n_out=500, activation=T.tanh)
#
# classifier = SoftmaxLayer(input=layer2.ouput, n_in=500, n_out=10)
#
# cost = classifier.negative_log_likelihood(y)
#
# error = classifier.error(y)
#
# params = layer0.params + layer2.params + classifier.params
#
# gparams = []
# for param in params:
# gparam = T.grad(cost, param)
# gparams.append(gparam)
#
# updates = []
# # given two list the zip A = [a1, a2, a3, a4] and B = [b1, b2, b3, b4] of
# # same length, zip generates a list C of same size, where each element
# # is a pair formed from the two lists :
# # C = [(a1, b1), (a2, b2), (a3, b3), (a4, b4)]
# for param, gparam in zip(params, gparams):
# updates.append((param, param - 0.1 * gparam))
#
# train_model = theano.function(inputs=[x,y], outputs=[cost,error,layer0.output, layer2_input],updates=updates)
#
# counter = 0
# best_valid_err = 100
# early_stop = 20
#
# batch_size = 100
#
# epoch_i = 0
#
# while counter < early_stop:
# epoch_i +=1
# batch_number = int(mnist.train.labels.shape[0]/batch_size)
# for batch in range(batch_number):
# next_images, next_labels = mnist.train.next_batch(100)
# train_cost, train_error, layer0_out, layer2_in = train_model(next_images, next_labels)
# print layer0_out.shape, layer2_in.shape
# # print train_cost, train_error
# next_images, next_labels = mnist.validation.next_batch(100)
# valid_cost, valid_error,_,_ = train_model(next_images, next_labels)
# if best_valid_err > valid_error:
# best_valid_err = valid_error
# print "Epoch ",epoch_i, " Validation cost: ", valid_cost, " Validation error: " , valid_error ," ",counter , " __best__ "
# counter = 0
# else:
# counter +=1
# print "Epoch ",epoch_i, " Validation cost: ", valid_cost, " Validation error: " , valid_error ," ",counter
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,136
|
giahy2507/convae
|
refs/heads/master
|
/submodular/submodular.py
|
__author__ = 'MichaelLe'
import numpy as np
import vector
import copy
# with S, V is the list of all sentence
def SimMatrix(senList, mode):
numSen = np.size(senList,0)
simM = np.ones((numSen + 1, numSen))
for i in range(numSen):
for j in range(i,numSen,1):
simM[i,j] = vector.similarity(senList[i],senList[j], mode)
simM[j,i] = simM[i,j]
for i in range(numSen):
simM[numSen,i] = np.sum(simM[:numSen,i])
return simM
def countC(v, S, simM):
#v la so thu tu cau trong V
#S la tap thu tu cau da tom tat (thu tu tuong ung trong V
sum_cov = 0
for c in S:
sum_cov = sum_cov + simM[v,c]
return sum_cov
def coverage(S, n, simM, alpha):
#S: van ban tom tat (chi chua thu tu cau
#V: tap cau dau vao (chi chua thu tu
#simM: ma tran similarity
#n: kich thuoc V
#alpha: he so trade-off
sum_cov = 0
for c in range(n):
CS = countC(c,S,simM)
CV = simM[n,c]
sum_cov = sum_cov + min(CS, alpha*CV)
return sum_cov
def intersectionSet(a,b):
#giao 2 cluster a, b
re = []
if len(a) >= len(b):
for t in a:
if (t in b) == True:
re.append(t)
return re
else:
return intersectionSet(b,a)
def diversityEachPart(S,Pi,n, simM):
#S: van ban tom tat
#Pi: cluster thu i
#n: so luong cau dau vao V
#simM: ma tran tuong quan
A = intersectionSet(S,Pi)
sum_div = 0
for a in A:
sum_div = sum_div + simM[n,a]
return sum_div
def diversity(S,n,P,simM):
sum_div = 0
for p in P:
sum_div = np.sqrt((1.0/n)*diversityEachPart(S,p, n, simM)) + sum_div
return sum_div
def f1(S, n, P, simM, alpha, lamda):
return coverage(S,n,simM,alpha) + lamda*diversity(S,n,P,simM)
def isStopCon(S,number_of_word_V, max_word):
epsilon = 2
#count sum word of S:
sum_S = np.sum(number_of_word_V[S])
if (sum_S > max_word):
return 1
else: return 0
def SubmodularFunc(V,n, P, V_word, alpha, lamda, max_word, mode):
simM = SimMatrix(V, mode)
#create V_number
V_number = range(n)
#find S
S = []
while (isStopCon(S,V_word,max_word)== 0):
score_matrix = np.zeros(n)
for i in range(n):
if (i in S) == False:
tmp_s = copy.deepcopy(S)
tmp_s.append(i)
k = f1(tmp_s,n,P,simM,alpha, lamda)
score_matrix[i] = k
# print(score_matrix)
selected_sen = np.argmax(score_matrix)
S.append(selected_sen)
return S
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,137
|
giahy2507/convae
|
refs/heads/master
|
/convae.py
|
__author__ = 'HyNguyen'
import theano
import theano.tensor as T
import numpy as np
from LayerClasses import MyConvLayer,FullConectedLayer
import cPickle
import os
import sys
from lasagne.updates import adam,rmsprop,adadelta
def load_np_data(path, onehost = False):
data = np.load(path)
valid_size = int(data.shape[0]*0.8)
shape = (-1, data.shape[1]*data.shape[2])
X_train = data[:valid_size]
X_valid = data[valid_size:]
return X_train.reshape(shape), X_valid.reshape(shape)
class ConvolutionAutoEncoder(object):
def __init__(self, layers, mini_batch_size, params = None, name = "CAE"):
self.name = name
self.layers = layers
self.mini_batch_size = mini_batch_size
if params is None:
self.params = [param for layer in self.layers for param in layer.params]
else:
self.params = params
for i in range(len(self.layers)):
self.layers[i].w = params[i*2]
self.layers[i].b = params[i*2 + 1]
self.X = T.dmatrix("X")
init_layer = self.layers[0]
init_layer.set_input(self.X, self.X, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j-1], self.layers[j]
layer.set_input(prev_layer.output, prev_layer.output_dropout , self.mini_batch_size)
self.output = self.layers[-1].output
self.vector_sentence = self.layers[int(len(self.layers)/2)].input
self.showfunction = theano.function([self.X], outputs=self.output)
self.get_vector_function = theano.function([self.X], outputs=self.vector_sentence)
def show(self, X_train):
return self.showfunction(X_train)
def load(self, filemodel = "model/CAE.model"):
with open(filemodel, mode="rb") as f:
self.params = cPickle.load(f)
for i in range(len(self.layers)):
self.layers[i].w = self.params[i*2]
self.layers[i].b = self.params[i*2 + 1]
def save(self, filemodel = "model/CAE.model"):
with open(filemodel, mode="wb") as f:
cPickle.dump(self.params,f)
@classmethod
def rebuild_for_testing(self, mini_batch_size, filemodel = "model/CAE.model" ):
mini_batch_size=mini_batch_size
number_featuremaps = 20
sentence_length = 100
embed_size = 100
image_shape = (mini_batch_size,1,sentence_length,embed_size)
filter_shape_encode = (number_featuremaps,1,5,embed_size)
filter_shape_decode = (1,number_featuremaps,5,embed_size)
rng = np.random.RandomState(23455)
layer1 = MyConvLayer(rng,image_shape=image_shape,filter_shape=filter_shape_encode, border_mode="valid")
layer2 = FullConectedLayer(n_in=layer1.output_shape[1] * layer1.output_shape[2] * layer1.output_shape[3],n_out=100)
layer3 = FullConectedLayer(n_in=layer2.n_out, n_out=layer2.n_in)
layer4 = MyConvLayer(rng,image_shape=layer1.output_shape, filter_shape=filter_shape_decode,border_mode="full")
layers = [layer1,layer2,layer3,layer4]
cae = ConvolutionAutoEncoder(layers,mini_batch_size)
cae.load(filemodel)
return cae
def train(self, X_train, X_valid, early_stop_count = 20 , X_test = None):
l2_norm_squared = 0.001*sum([layer.L2 for layer in self.layers])
mae = T.mean(T.sqrt(T.sum(T.sqr(self.layers[-1].output.flatten(2) - self.X), axis=1)), axis=0)
cost = mae + l2_norm_squared
updates = adadelta(cost,self.params)
# updates = adam(cost, self.params)
self.train_model = theano.function(inputs=[self.X], outputs=[cost, mae], updates=updates)
self.valid_model = theano.function(inputs=[self.X], outputs=[cost, mae])
num_training_batches = int(X_train.shape[0] / self.mini_batch_size)
num_validation_batches = int(X_valid.shape[0] / self.mini_batch_size)
counter = 0
best_valid_err = 100
early_stop = early_stop_count
epoch_i = 0
train_rand_idxs = list(range(0, X_train.shape[0]))
valid_rand_idxs = list(range(0, X_valid.shape[0]))
while counter < early_stop:
epoch_i +=1
train_costs = []
train_errs = []
valid_costs = []
valid_errs = []
np.random.shuffle(train_rand_idxs)
for batch_i in range(num_training_batches):
mnb_X = X_train[train_rand_idxs[batch_i*self.mini_batch_size: batch_i*self.mini_batch_size + self.mini_batch_size]]
train_cost, train_err = self.train_model(mnb_X)
train_costs.append(train_cost)
train_errs.append(train_err)
np.random.shuffle(valid_rand_idxs)
for batch_i in range(num_validation_batches):
mnb_X = X_train[train_rand_idxs[batch_i*self.mini_batch_size: batch_i*self.mini_batch_size + self.mini_batch_size]]
valid_cost, valid_err = self.valid_model(mnb_X)
valid_costs.append(valid_cost)
valid_errs.append(valid_err)
train_err = np.mean(np.array(train_errs))
train_cost = np.mean(np.array(train_costs))
val_err = np.mean(np.array(valid_errs))
val_cost = np.mean(np.array(valid_costs))
if val_err < best_valid_err:
best_valid_err = val_err
sys.stdout.write("Epoch "+str(epoch_i)+" Train cost: "+ str(train_cost)+ "Train mae: "+ str(train_err) + " Validation cost: "+ str(val_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter)+ " __best__ \n")
sys.stdout.flush()
counter = 0
with open("model/" + self.name +".model", mode="wb") as f:
cPickle.dump(self.params,f)
else:
counter +=1
sys.stdout.write("Epoch " + str(epoch_i)+" Train cost: "+ str(train_cost)+ "Train mae: "+ str(train_err) + " Validation cost: "+ str(val_cost)+" Validation mae "+ str(val_err) + ",counter "+str(counter) + "\n")
sys.stdout.flush()
if __name__ == "__main__":
mini_batch_size=100
number_featuremaps = 20
sentence_length = 100
embed_size = 100
image_shape = (mini_batch_size,1,sentence_length,embed_size)
filter_shape_encode = (20,1,5,embed_size)
filter_shape_decode = (1,20,5,embed_size)
rng = np.random.RandomState(23455)
X_train, X_valid = load_np_data("vector/data_processed.npy")
print("X_train.shape: ", X_train.shape)
# layer1 = MyConvLayer(rng,image_shape=image_shape,filter_shape=filter_shape_encode, border_mode="valid")
# layer2 = FullConectedLayer(n_in=layer1.output_shape[1] * layer1.output_shape[2] * layer1.output_shape[3],n_out=100)
# layer3 = FullConectedLayer(n_in=layer2.n_out, n_out=layer2.n_in)
# layer4 = MyConvLayer(rng,image_shape=layer1.output_shape, filter_shape=filter_shape_decode,border_mode="full")
# layers = [layer1,layer2,layer3,layer4]
# cae = ConvolutionAutoEncoder(layers, mini_batch_size)
# cae.train(X_train,X_valid)
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,138
|
giahy2507/convae
|
refs/heads/master
|
/summary/kmean_sum.py
|
__author__ = 'MichaelLe'
import numpy as np
import math
from sklearn.cluster import KMeans
def kmean_summary(V,len_sen_mat, max_word):
'''
parameter:
---------
V: List of vector sentence representation
len_sen_mat: matrix of length of all sentences in V
max_word: max of word in summary
----------
return:
list of number of sentences which are selected for summary
'''
## because Kmean is only applied to 2-d array
## --> V have 3-d (no of sentences, dim of one sentence, 1) -- pratice
V_numpy = np.array(V).reshape((len(V),V[0].shape[0]))
avg_len_sen = np.average(len_sen_mat)
numcluster = int(math.ceil(max_word/avg_len_sen))
cluster_re = KMeans(n_clusters = numcluster,n_init= 100).fit_transform(V_numpy)
summary = np.argmin(cluster_re,axis = 0)
return summary
|
{"/convaeclassification.py": ["/LayerClasses.py"], "/mulNN.py": ["/LayerClasses.py"], "/summary/summary.py": ["/mmr/__init__.py"], "/summaryobject.py": ["/vector/wordvectors.py", "/convae.py"], "/convae.py": ["/LayerClasses.py"]}
|
8,147
|
stats94/championship-bot
|
refs/heads/master
|
/config.py
|
api_key = **APIKEY**
endpoint = 'https://api-football-v1.p.rapidapi.com'
league_id = 565
|
{"/api_service.py": ["/config.py"], "/bot.py": ["/api_service.py", "/config.py"]}
|
8,148
|
stats94/championship-bot
|
refs/heads/master
|
/api_service.py
|
import requests;
import config;
class api_service:
endpoint = config.endpoint
api_key = config.api_key
def get(self, url):
response = requests.get(url, headers={'X-RapidAPI-Key': self.api_key})
'''
api element is just a wrapper.
api: {
results: 0 -> Number of results
fixtures/standing etc: [] -> array with data
}
'''
json = response.json()
return json['api']
def get_table(self, league_id):
url = '{}/v2/leagueTable/{}'.format(self.endpoint, league_id)
response = self.get(url)
return response['standings']
|
{"/api_service.py": ["/config.py"], "/bot.py": ["/api_service.py", "/config.py"]}
|
8,149
|
stats94/championship-bot
|
refs/heads/master
|
/bot.py
|
from api_service import api_service
import config
class bot:
api_service = api_service()
league_id = config.league_id
def build_table(self):
# The standings array is wrapped in another array
table_data = self.api_service.get_table(self.league_id)[0]
headers = '|Pos|Team|Pl|W|D|L|Form|GD|Pts|\n:-:|:--|:-:|:-:|:-:|:-:|:--|:-:|:-:'
# Position | Team Name | Played | Won | Drawn | Lost | Form | GD | Points |
teams = list(map(lambda team: '{}|{}|{}|{}|{}|{}|{}|{}|{}'.format(team['rank'], team['teamName'], team['all']['matchsPlayed'], team['all']['win'], team['all']['draw'], team['all']['lose'], team['forme'], team['goalsDiff'], team['points']), table_data))
return '{}\n{}'.format(headers, '\n'.join(teams))
|
{"/api_service.py": ["/config.py"], "/bot.py": ["/api_service.py", "/config.py"]}
|
8,150
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/admin.py
|
from django.contrib import admin
# Register your models here.
from news.models import New
class NewAdmin(admin.ModelAdmin):
model = New
list_display = ['id', 'title', 'created_at', 'is_enabled']
admin.site.register(New, NewAdmin)
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,151
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/accounts/migrations/0004_auto_20170520_2113.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-21 02:13
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20170520_2112'),
]
operations = [
migrations.AlterModelOptions(
name='user',
options={'verbose_name': 'Redactor', 'verbose_name_plural': 'Redactores'},
),
]
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,152
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py
|
from rest_framework.pagination import PageNumberPagination
__author__ = 'lucaru9'
class StandardResultsSetPagination(PageNumberPagination):
page_size = 5
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,153
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-21 00:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='New',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(max_length=300, verbose_name='Título')),
('sub_title', models.CharField(max_length=600, verbose_name='Copete')),
('body', models.TextField(verbose_name='Cuerpo')),
('image', models.ImageField(blank=True, null=True, upload_to='news/image')),
('is_enabled', models.BooleanField(default=True, verbose_name='Habilitado')),
],
),
]
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,154
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py
|
from django.db import models
# Create your models here.
class New(models.Model):
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación')
updated_at = models.DateTimeField(auto_now=True)
title = models.CharField(max_length=300, verbose_name='Título')
sub_title = models.CharField(max_length=600, verbose_name='Copete')
body = models.TextField(verbose_name='Cuerpo')
image = models.ImageField(upload_to='news/image', null=True, blank=True)
is_enabled = models.BooleanField(default=True, verbose_name='Habilitado')
def __str__(self):
return self.title
class Meta:
verbose_name = 'Noticia'
verbose_name_plural = 'Noticias'
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,155
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py
|
from .models import New
__author__ = 'lucaru9'
from rest_framework import serializers
class ListNewSerializer(serializers.ModelSerializer):
class Meta:
model = New
fields = ('created_at', 'updated_at', 'title', 'sub_title', 'body', 'image', 'is_enabled')
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,156
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py
|
__author__ = 'lucaru9'
from django.conf.urls import url
from .views import *
urlpatterns = [
url(r'^news/$', ListNewsAPI.as_view()),
]
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,157
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py
|
from rest_framework.generics import ListAPIView
from rest_framework.permissions import AllowAny
from .models import New
from .paginations import StandardResultsSetPagination
from .serializers import ListNewSerializer
class ListNewsAPI(ListAPIView):
serializer_class = ListNewSerializer
authentication_classes = ()
permission_classes = (AllowAny,)
pagination_class = StandardResultsSetPagination
def get_queryset(self):
return New.objects.filter(is_enabled=True).order_by('-created_at')
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,158
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/migrations/0003_auto_20170520_1955.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-21 00:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0002_auto_20170520_1950'),
]
operations = [
migrations.AlterField(
model_name='new',
name='created_at',
field=models.DateTimeField(auto_now_add=True, verbose_name='Fecha de creación'),
),
]
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,159
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/accounts/models.py
|
from django.contrib.auth.models import PermissionsMixin, BaseUserManager, \
AbstractBaseUser
from django.db import models
class UserManager(BaseUserManager):
def _create_user(self, email, password, is_staff, is_superuser,
**extra_fields):
user = self.model(email=email, is_active=True,
is_staff=is_staff, is_superuser=is_superuser,
**extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_user(self, email, username, password=None, **extra_fields):
return self._create_user(email, password, False, False,
**extra_fields)
def create_superuser(self, email, password, **extra_fields):
return self._create_user(email, password, True, True,
**extra_fields)
class User(AbstractBaseUser, PermissionsMixin):
email = models.EmailField(unique=True)
first_name = models.CharField(max_length=100, blank=True, null=True)
last_name = models.CharField(max_length=100, blank=True, null=True)
is_editor = models.BooleanField(default=True, verbose_name='Redactor')
objects = UserManager()
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'email'
def get_full_name(self):
return '{0} {1}'.format(self.first_name, self.last_name)
def get_short_name(self):
return '{0}'.format(self.first_name)
class Meta:
verbose_name = "Redactor"
verbose_name_plural = "Redactores"
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,160
|
WilsonJulcaMejia/GrupoRPP
|
refs/heads/master
|
/Libreria de trabajo/AINNI/Implementacion/Back/rpp/accounts/urls.py
|
from django.core.urlresolvers import reverse, reverse_lazy
from django.conf.urls import url
from .views import *
urlpatterns = [
]
|
{"/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/urls.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py"], "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/views.py": ["/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/models.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/paginations.py", "/Libreria de trabajo/AINNI/Implementacion/Back/rpp/news/serializers.py"]}
|
8,161
|
boubakersalmi/projet6
|
refs/heads/master
|
/main.py
|
## debut du code partie principale intégrant la POO
## les fonctions appelés ci-dessous sont importées du fichier main.py
## cette partie du programme n'intègre pas de fonctionnalités graphique
## Dans un premier temps, nous importons les fonctions programmées dans main.py
## afin de les intégrer dans la classe Action
## puis nous importons le module logging afin de générer des fichiers de logs
from setting import *
## definition de la classe action
class Action:
def __init__(self):
self.nettoyagebureau = DesktopCleaner()
self.corporatebg = wallpaper_update()
if __name__ == '__main__':
logger.info(chemindetoilestart)
Action()
logger.info(chemindetoileend)
|
{"/main.py": ["/setting.py"]}
|
8,162
|
boubakersalmi/projet6
|
refs/heads/master
|
/setting.py
|
## ce programme est réalisé sur python 3.7, certains points doivent être adaptés aux versions antérieures de pytho##
##
## DEBUT DU PROGRAMME
## Import des modules nécessaires à l'execution du programme
import os
import json
import glob
import ctypes
import time
import logging
import sys
import csv
import socket
############ Premièren partie du code : la journalisation ########################
## lors de la génération du log, l'heure et la date d'éxecution apparaitront dans le modèle jj/mm/aaaa hh:mm:ss
## le logger a été définit afin de pouvoir faire apparaitre les éléments voulu dans le fichier de log. celui-ci peut etre adapté
logging.basicConfig(
filename=r'S:\Booba\configfiles\logfile.log',
format="%(asctime)s - %(message)s",
datefmt="%d/%m/%Y %H:%M:%S",
level=logging.INFO
)
logger = logging.getLogger()
logger.addHandler(logging.StreamHandler(sys.stdout))
##configuration des messages de début et de fin de session
chemindetoileend = "**************** FIN DE SESSION *******************"
chemindetoilestart = "**************** DEBUT DE SESSION *******************"
############ Troisieme partie du code : Définition du fond d'écran adaptatif ########################
## le fichier config_data définit l'emplacement des liens des fonds d'écran en cas d'utilisation d'un fond d'écran non adaptatif.
## celui-ci peut etre modifié pour pointer sur une autre source
## config_data = r'C:\Users\booba\Desktop\filerepo\fond_ecran.txt'
## définition d'une fonction permettant de définir le service dans lequel une personne travaille
## pour cela, la fonction fait appel auu fichier usersandservices dans lequel nous retrouvons la relation entre utilisateur et service concerné.
## le nom de l'utilisateur est celui du hostname windows afin de permettre a la fonction gethostname de la récupérer.
## un logger.info a été rajouté afin de faire apparaitre la relation entre utilisateur et service dans le fichier de log
fichierutilisateurservices = r'S:\Booba\configfiles\usersandservices.csv'
def get_hostname_service():
# Traitement du csv
with open(fichierutilisateurservices, "r") as f:
csvreader = csv.reader(f, delimiter=',')
next(csvreader) # skip header
us_data = [row for row in csvreader]
current_hostname = socket.gethostname()
# On cherche a quel service il appartient
for hostname, service in us_data:
if hostname == current_hostname:
break
else:
raise Exception(
f"Impossible de trouver le service auquel '{hostname}' appartient.")
return service
## par l'intermédiaire de if, nous avons la possbilité de conditionner le chemin de la bibliothèque de fond d'écra
## a utiliser.
## selon la valeur de service nous pourrons définir l'emplacement du dossier dans lequel rechercher les liens de BG
servicesconcerne = get_hostname_service()
if servicesconcerne == "Technique":
config_data = r'S:\Booba\configfiles\fond_ecran_technique.txt'
elif servicesconcerne == "RH":
config_data = r'S:\Booba\configfiles\fond_ecran_rh.txt'
elif servicesconcerne == "Commercial":
config_data = r'S:\Booba\configfiles\fond_ecran_commerciaux.txt'
else:
print("Impossible de definir le service de l'utilisateur")
## definition de la fonction change_wallpaper
## les prints ci-dessous permettent de définir les messages à afficher. si l'etape ctypes... se déroule bien, nous aurons les deux messages
## suivants qui s'afficheront
def change_wallpaper(wallpaper_path):
"""On change le fond d'ecran"""
print("Actualisation du fond d'écran")
ctypes.windll.user32.SystemParametersInfoA(20, 0, wallpaper_path.encode("us-ascii"), 3)
logger.info("Actualisation du fond d'écran réalisée")
## on lit le fichiers fond_ecran contenant les 7 liens pour chacune des images disponibles sur le réseaux interne disque B
with open(config_data, "r") as f:
mesfonddecrans = f.readlines()
# On retire le '\n' (retour à la ligne)
mesfonddecrans = [p[:-1] for p in mesfonddecrans]
## definition des parametres de temps permettant de modifier chaque fond d'écran par rapport au jour d'apparition
localtime = time.localtime(time.time())
jdls = localtime[6]
image_du_jour = mesfonddecrans[jdls]
## si ecran noir apparait en fond d'écran, vérifier les liens
def wallpaper_update():
change_wallpaper(image_du_jour)
############ Troisieme partie du code : Nettoyage du bureau ########################
## definition de l'adresse du desktop intégrant la variable nomutilisateur
CHEMIN_BUREAU = r'C:\Users\booba\Desktop'
## définition du droit donné sur les dossiers contenant les fichiers nettoyés
permission_octal = 777
## fichier dans lequel nous retrouverons les éléments concernés par le tri
typeelementsconfig = r'S:\Booba\configfiles\type_fichier.json'
## creation du dossier si non existant
def creer_dossier(chemin_dossier):
# Si le dossier n'existe pas déjà, on le créer
if not os.path.exists(chemin_dossier):
os.makedirs(chemin_dossier, permission_octal)
## définition de la règle de gestion de doublon
def creer_version(nouveau_chemin):
## Si le fichier dans le dossier de destination existe déjà, on rajoute une version
## example test.txt existe, on renomme en test-v(1, 2, 3, ...).txt
## cette partie permet de ne jamais écraser un fichier si deux fichiers ont le même nom
version = 0
while os.path.isfile(nouveau_chemin):
version += 1
nom_fichier_liste = nom_fichier_liste.split(".")
nom_fichier_avec_version = "{}-v{}.{}".format(
nom_fichier_liste[0],
version,
nom_fichier_liste[1]
)
nouveau_chemin = os.path.join(
CHEMIN_BUREAU,
chemin_dossier,
nom_fichier_avec_version
)
return nouveau_chemin
## definition de la fonction de nettoyage du bureau
def DesktopCleaner ():
with open(typeelementsconfig, "r") as f:
## recherche dans le dictionnaire
dossier_et_extensions = json.load(f)
for dossier in dossier_et_extensions.keys():
## Liste des fichiers qui vont dans le dossier 'dossier'
## Si dossier = 'TEXTE'
## 'fichiers_dossier' ressemble à ça ['monfichiertxt.txt', 'blabla.txt', ...])
fichiers_dossier = []
for extension in dossier_et_extensions[dossier]:
for fichier in glob.glob(os.path.join(CHEMIN_BUREAU, "*%s" % extension)):
fichiers_dossier.append(fichier)
## Si on a trouvé un fichier alors on le met dans le dossier
if len(fichiers_dossier) > 0:
## Si le dossier n'existe pas déjà, on le créer
creer_dossier(os.path.join(CHEMIN_BUREAU, dossier))
## On met chaque fichier dans le (nouveau) dossier
for chemin_original in fichiers_dossier:
nom_fichier = os.path.basename(chemin_original)
## message de confirmation
print("On met le fichier '%s' dans le dossier '%s'" % (nom_fichier, dossier))
logger.info("Le fichier nommé '%s' a été déplacé dans le dossier '%s'" % (nom_fichier, dossier))
nouveau_chemin = os.path.join(
CHEMIN_BUREAU,
dossier,
nom_fichier
)
## On ajoute une version -v* si un fichier avec le même nom existe déjà
nouveau_chemin = creer_version(nouveau_chemin)
## on déplace effectivement le fichier dans le dossier
os.rename(chemin_original, nouveau_chemin)
## definition d'un else permettant d'informer du non déplacement de fichier
else:
print("Pas de fichiers a ranger pour le dossier %s." % dossier)
logger.info("Aucune modification n'a été apportée au dossier %s" % dossier)
|
{"/main.py": ["/setting.py"]}
|
8,163
|
fanout/headline
|
refs/heads/master
|
/headlineapp/apps.py
|
from django.apps import AppConfig
class HeadlineappConfig(AppConfig):
name = 'headlineapp'
|
{"/headlineapp/views.py": ["/headlineapp/models.py"]}
|
8,164
|
fanout/headline
|
refs/heads/master
|
/headlineapp/urls.py
|
from django.urls import path
from . import views
urlpatterns = [
path('', views.base, name='base'),
path('<int:headline_id>/', views.item, name='item'),
]
|
{"/headlineapp/views.py": ["/headlineapp/models.py"]}
|
8,165
|
fanout/headline
|
refs/heads/master
|
/headlineapp/models.py
|
from django.db import models
class Headline(models.Model):
type = models.CharField(max_length=64)
title = models.CharField(max_length=200)
text = models.TextField()
date = models.DateTimeField(auto_now=True)
def to_data(self):
out = {}
out['id'] = str(self.id)
out['type'] = self.type
if self.title:
out['title'] = self.title
out['date'] = self.date.isoformat()
out['text'] = self.text
return out
def __str__(self):
return '%s: %s' % (self.type, self.text[:100])
|
{"/headlineapp/views.py": ["/headlineapp/models.py"]}
|
8,166
|
fanout/headline
|
refs/heads/master
|
/headlineapp/views.py
|
import json
import calendar
from django.http import HttpResponse, HttpResponseRedirect, \
HttpResponseNotModified, HttpResponseNotAllowed
from django.shortcuts import get_object_or_404
from gripcontrol import HttpResponseFormat, HttpStreamFormat, \
WebSocketMessageFormat
from django_grip import set_hold_longpoll, set_hold_stream, publish
from headlineapp.models import Headline
def _json_response(data):
body = json.dumps(data, indent=4) + '\n' # pretty print
return HttpResponse(body, content_type='application/json')
def base(request):
if request.method == 'POST':
h = Headline(type='none', title='', text='')
h.save()
return _json_response(h.to_data())
else:
return HttpResponseNotAllowed(['POST'])
def item(request, headline_id):
h = get_object_or_404(Headline, pk=headline_id)
hchannel = str(headline_id)
if request.wscontext:
ws = request.wscontext
if ws.is_opening():
ws.accept()
ws.subscribe(hchannel)
while ws.can_recv():
message = ws.recv()
if message is None:
ws.close()
break
return HttpResponse()
elif request.method == 'GET':
if request.META.get('HTTP_ACCEPT') == 'text/event-stream':
resp = HttpResponse(content_type='text/event-stream')
set_hold_stream(request, hchannel)
return resp
else:
wait = request.META.get('HTTP_WAIT')
if wait:
wait = int(wait)
if wait < 1:
wait = None
if wait > 300:
wait = 300
inm = request.META.get('HTTP_IF_NONE_MATCH')
etag = '"%s"' % calendar.timegm(h.date.utctimetuple())
if inm == etag:
resp = HttpResponseNotModified()
if wait:
set_hold_longpoll(request, hchannel, timeout=wait)
else:
resp = _json_response(h.to_data())
resp['ETag'] = etag
return resp
elif request.method == 'PUT':
hdata = json.loads(request.read())
h.type = hdata['type']
h.title = hdata.get('title', '')
h.text = hdata.get('text', '')
h.save()
hdata = h.to_data()
hjson = json.dumps(hdata)
etag = '"%s"' % calendar.timegm(h.date.utctimetuple())
rheaders = {'Content-Type': 'application/json', 'ETag': etag}
hpretty = json.dumps(hdata, indent=4) + '\n'
formats = []
formats.append(HttpResponseFormat(body=hpretty, headers=rheaders))
formats.append(HttpStreamFormat('event: update\ndata: %s\n\n' % hjson))
formats.append(WebSocketMessageFormat(hjson))
publish(hchannel, formats)
resp = _json_response(hdata)
resp['ETag'] = etag
return resp
else:
return HttpResponseNotAllowed(['GET', 'PUT'])
|
{"/headlineapp/views.py": ["/headlineapp/models.py"]}
|
8,168
|
sstollenwerk/roguelike_learn
|
refs/heads/main
|
/entity.py
|
from dataclasses import dataclass, asdict, replace
from actions import EscapeAction, MovementAction
from basic_types import Color
@dataclass
class Entity:
x: int
y: int
string: str # len == 1
fg: Color
def move(self, action: MovementAction):
##return replace(self, x=self.x + action.dx, y=self.y + action.dy)
self.x += action.dx
self.y += action.dy
|
{"/entity.py": ["/basic_types.py"], "/tile_types.py": ["/basic_types.py"], "/engine.py": ["/entity.py"]}
|
8,169
|
sstollenwerk/roguelike_learn
|
refs/heads/main
|
/tile_types.py
|
import numpy as np # type: ignore
from basic_types import Color, graphic_dt, tile_dt
def new_tile(
*, # Enforce the use of keywords, so that parameter order doesn't matter.
walkable: bool,
transparent: bool,
dark: tuple[int, Color, Color],
) -> np.ndarray:
"""Helper function for defining individual tile types"""
return np.array((walkable, transparent, dark), dtype=tile_dt)
floor = new_tile(
walkable=True,
transparent=True,
dark=(ord(" "), (255, 255, 255), (50, 50, 150)),
)
wall = new_tile(
walkable=False,
transparent=False,
dark=(ord(" "), (255, 255, 255), (0, 0, 100)),
)
|
{"/entity.py": ["/basic_types.py"], "/tile_types.py": ["/basic_types.py"], "/engine.py": ["/entity.py"]}
|
8,170
|
sstollenwerk/roguelike_learn
|
refs/heads/main
|
/engine.py
|
from typing import Iterable, Any
from dataclasses import dataclass, asdict
from tcod.context import Context
from tcod.console import Console
from actions import EscapeAction, MovementAction
from entity import Entity
from input_handlers import EventHandler
from game_map import GameMap
class Engine:
def __init__(
self,
entities: list[Entity],
event_handler: EventHandler,
game_map: GameMap,
player: Entity,
):
if player not in entities:
entities += [player]
self.entities = entities
self.event_handler = event_handler
self.player = player
self.game_map = game_map
def handle_events(self, events: Iterable[Any]) -> None:
for event in events:
action = self.event_handler.dispatch(event)
if action is None:
continue
action.perform(self, self.player)
def render(self, console: Console, context: Context) -> None:
self.game_map.render(console)
for entity in self.entities:
console.print(**asdict(entity))
context.present(console)
console.clear()
|
{"/entity.py": ["/basic_types.py"], "/tile_types.py": ["/basic_types.py"], "/engine.py": ["/entity.py"]}
|
8,171
|
sstollenwerk/roguelike_learn
|
refs/heads/main
|
/basic_types.py
|
import numpy as np # type: ignore
Color = tuple[int, int, int]
# Tile graphics structured type compatible with Console.tiles_rgb.
graphic_dt = np.dtype(
[
("ch", np.int32), # Unicode codepoint.
("fg", "3B"), # 3 unsigned bytes, for RGB colors.
("bg", "3B"),
]
)
# Tile struct used for statically defined tile data.
tile_dt = np.dtype(
[
("walkable", np.bool), # True if this tile can be walked over.
("transparent", np.bool), # True if this tile doesn't block FOV.
("dark", graphic_dt), # Graphics for when this tile is not in FOV.
]
)
|
{"/entity.py": ["/basic_types.py"], "/tile_types.py": ["/basic_types.py"], "/engine.py": ["/entity.py"]}
|
8,183
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/serializers.py
|
from rest_framework.serializers import ModelSerializer, HyperlinkedModelSerializer
from .mixins.expandable_model_serializer import ExpandableModelSerializerMixin
class ExpandableHyperlinkedModelSerializer(
ExpandableModelSerializerMixin, HyperlinkedModelSerializer
):
pass
class ExpandableModelSerializer(ExpandableModelSerializerMixin, ModelSerializer):
pass
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,184
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/utils.py
|
import re
from django.db.models import Manager
from django.db.models.query import QuerySet
def get_class_name(obj=None):
# Get name of parent object.
if obj is None:
return "Unnamed"
else:
return obj.__class__.__name__
class HashableList(list):
def __hash__(self):
return id(self)
class HashableDict(dict):
"""
Hashable Dictionary
Hashables should be immutable -- not enforcing this but TRUSTING you not to mutate a
dict after its first use as a key.
https://stackoverflow.com/questions/1151658/python-hashable-dicts
"""
def __hash__(self):
vals = ()
for v in self.values():
try:
hash(v)
vals += (str(v),)
except TypeError:
if isinstance(v, list):
for x in v:
vals += (str(x),)
else:
vals += (str(v),)
return hash((frozenset(self), frozenset(vals)))
def normalize_path(path):
if path.startswith("."):
path = path[1:]
if path.endswith("."):
path = path[:-1]
return path
def get_path_parts(obj, path, base_name=None):
pattern = re.compile(r"(\w+\.\w+)")
parts = [normalize_path(x) for x in pattern.split(path, 1) if len(x)]
parts_final = []
for part in parts:
try:
part_field = part.split(".")[1]
except IndexError:
part_field = part
parts_final.append([part_field, part])
ret = (parts_final[0][0], parts_final[0][1])
if len(parts_final) > 1:
ret += (parts_final[1][0], parts_final[1][1])
else:
ret += ("", "")
return ret
def get_object(obj):
if isinstance(obj, Manager):
obj = obj.all()
if isinstance(obj, QuerySet):
obj = obj.first()
return obj
class DictDiffer(object):
"""
Calculate the difference between two dictionaries as:
(1) items added
(2) items removed
(3) keys same in both but changed values
(4) keys same in both and unchanged values
"""
def __init__(self, current_dict, past_dict):
self.current_dict, self.past_dict = current_dict, past_dict
self.current_keys, self.past_keys = (
set(current_dict.keys()),
set(past_dict.keys()),
)
self.intersect = self.current_keys.intersection(self.past_keys)
def added(self):
""" Find keys that have been added """
return self.current_keys - self.intersect
def removed(self):
""" Find keys that have been removed """
return self.past_keys - self.intersect
def changed(self):
""" Find keys that have been changed """
return set(
o for o in self.intersect if self.past_dict[o] != self.current_dict[o]
)
def unchanged(self):
""" Find keys that are unchanged """
return set(
o for o in self.intersect if self.past_dict[o] == self.current_dict[o]
)
def new_or_changed(self):
""" Find keys that are new or changed """
# return set(k for k, v in self.current_dict.items()
# if k not in self.past_keys or v != self.past_dict[k])
return self.added().union(self.changed())
def remove_redundant_paths(paths):
"""
Returns a list of unique paths.
"""
results = []
for path in paths:
redundant = False
paths_copy = paths[:]
paths_copy.pop(paths.index(path))
for p in paths_copy:
if p.startswith(path) and len(p) > len(path):
redundant = True
if redundant is False:
results.append(path)
return results
def sort_field_paths(field_paths):
"""
Clean up a list of field paths by removing duplicates, etc.
"""
result = list(set(field_paths))
result = remove_redundant_paths(result)
result = [x for x in result if len(x)]
return result
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,185
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/mixins/expandable_related_field.py
|
from rest_framework.relations import ManyRelatedField
from django.db.models import Manager
from django.db.models.query import QuerySet
from django.utils.module_loading import import_string
from .expandable import ExpandableMixin
from ..utils import (
get_object,
get_class_name,
get_path_parts,
DictDiffer,
HashableList,
HashableDict,
)
# TODO: Add an assertion for field names existing on the model.
# TODO: Detect and fallback to default representation for circular references instead of
# just removing the field completely on the parent.
class ExpandableRelatedFieldMixin(ExpandableMixin):
settings_attr = "expand_settings"
initialized_attrs = ["allowed", "ignored"]
comparison_field_name = "uuid"
def __init__(self, *args, **kwargs):
# When we set read_only on the related field instance, the queryset attribute
# will raise an exception. So, to avoid this, reset the queryset attribute to
# None to allow these instances to be read_only when specified.
read_only = kwargs.get("read_only", False)
if read_only is True:
setattr(self, "queryset", None)
for name in self.initialized_attrs:
kwarg = kwargs.pop(name, None)
if kwarg is not None:
setattr(self, name, kwarg)
super().__init__(*args, **kwargs)
@property
def settings(self):
"""
Returns the settings used for this related field instance.
"""
return getattr(self, self.settings_attr, {})
@property
def ignored_paths(self):
"""
Returns a list of field paths to ignore when generating the representation of
this field instance.
"""
ignored_paths = []
ignored = getattr(self, "ignored", None)
if ignored is not None:
for path in ignored:
ignored_paths.append(self.get_field_path(path))
return ignored_paths
def is_ignored(self, path):
"""
Returns True/False if the specified path is one of the ignored field paths. Used
by to_representation_for_field to determine if the field is the one to expand.
"""
if path in self.ignored_paths:
return True
return False
def to_non_circular_path(self, path):
if self.is_circular(path):
try:
prefix, field_name = path.rsplit(".", 1)
return prefix
except ValueError:
return path
return path
def is_circular(self, path):
try:
prefix, field_name = path.rsplit(".", 1)
except ValueError:
field_name = path
if field_name in self.circular_field_names:
return True
return False
@property
def circular_field_names(self):
circular_field_names = []
# Remove circular references to the parent model.
parent_model_name = self.model_serializer.get_model_name()
parent_set_name = "{}_set".format(parent_model_name)
parent_names = (parent_model_name, parent_set_name)
for parent_name in parent_names:
circular_field_names.append(parent_name)
return circular_field_names
def get_skipped_fields(self, skipped=None):
"""
Returns a list of field paths (ignored and skipped) to pass to the serializer
class so it doensn't return them in the representation.
"""
skipped_fields = self.ignored_paths
for field_name in self.circular_field_names:
skipped_fields.append(field_name)
if skipped is not None:
skipped_fields.extend(skipped)
return list(set(skipped_fields))
@property
def allowed_paths(self):
"""
Returns a list of field paths that are permitted to be expanded from this
expandable class instance.
"""
allowed = getattr(self, "allowed", [])
allowed_paths = [self.get_field_path(x) for x in allowed]
return allowed_paths
def is_allowed(self, path):
"""
Returns True/False if the specified path is one of the allowed field paths. Used
by to_representation_for_field to determine if the field is to be expanded.
"""
if path.startswith(self.allowed_prefix):
return True
if path in self.allowed_paths:
return True
return False
def assert_is_allowed(self, path):
"""
Raises an AssertionError if the field path specified is not in the list of
allowed field paths.
"""
model_serializer_name = get_class_name(self.model_serializer)
model_serializer_field_name = self.model_serializer_field_name
related_field_class_name = get_class_name(self)
if self.is_allowed(path) is False:
path = ".".join(path.split(".")[1:])
raise AssertionError(
"The path '{}' is not listed as an allowed field path on {}'s {} "
"field. Please add the path to 'allowed' kwarg on {}'s '{}' field "
"to allow its expansion.".format(
path,
model_serializer_name,
model_serializer_field_name,
model_serializer_name,
model_serializer_field_name,
)
)
def assert_is_specified(self, path):
"""
Raises an AssertionError if the field path specified is not in the list of
entries in the 'expands' attribute on the related field class instance.
"""
if self.is_specified(path) is False:
# if field_path.startswith(self.model_name):
# field_path.replace("{}.".format(self.model_name), "")
msg = []
indent = "\n"
for d in self.settings.get("serializers", []):
msg.append(
"{}{}{}".format(d["serializer"], indent, indent.join(d["paths"]))
)
raise AssertionError(
"The field path '{field_path}' is not specified in '{attr_name}' on "
"{related_field_class_name}.\n\nCurrently Specified:\n{specified}".format(
field_path=path,
attr_name=self.settings_attr,
related_field_class_name=get_class_name(self),
specified="\n".join(msg),
)
)
def is_specified(self, path):
"""
Returns True/False if the specified path is in any of the listed paths on the
class isntance's 'expands' attribute.
"""
for d in self.settings.get("serializers", []):
if path in d.get("paths", []):
return True
return False
def is_matching(self, requested_path):
"""
Returns True/False if the requested path starts with the current
'model_serializer_field_name'.
"""
base_path = self.get_field_path(self.model_serializer_field_name)
if requested_path == base_path:
return True
prefix = "{}.".format(base_path)
if requested_path.startswith(prefix):
return True
return False
def to_default_representation(self, obj):
"""
Returns the default representation of the object.
"""
return super().to_representation(obj)
def expand_object(self, obj, path):
"""
Method for expanding a model instance object. If a target field name is
specified, the serializer will use that nested object to generate a
representation.
"""
# If the field exists, but its an empty object (no entry saved), obj will be
# None. So, if we get None as obj, return None instead of trying to serializer
# its representation.
if obj is None:
return None
serializer = self.get_serializer(obj, path)
representation = serializer.to_representation(obj)
return representation
def get_alias(self, prefix_field, prefix_path, suffix_field, suffix_path):
for d in self.settings.get("aliases", []):
if prefix_path in d.get("paths", []):
alias = d.get("alias", {})
prefix_field = alias.get("prefix_field", prefix_field)
prefix_path = alias.get("prefix_path", prefix_path)
suffix_field = alias.get("suffix_field", suffix_field)
suffix_path = alias.get("suffix_path", suffix_path)
return (prefix_field, prefix_path, suffix_field, suffix_path)
def expand(self, obj, prefix_field, prefix_path, suffix_field, suffix_path):
if isinstance(obj, Manager):
obj = obj.all()
target = obj
target_name = get_class_name(get_object(target)).lower()
names = (target_name, "{}_set".format(target_name))
if len(prefix_field) and prefix_field not in names:
target = getattr(target, prefix_field, target)
expanded = self.expand_object(target, prefix_path)
if len(suffix_field):
# If our prefix path is a manytomanyfield, then use the first string in the
# suffix path as the field name.
if prefix_path.endswith("_set"):
try:
suffix_field, _ = suffix_path.split(".", 1)
except ValueError:
suffix_field = suffix_path
expanded[suffix_field] = self.get_expanded(target, suffix_path)
return expanded
def get_expanded(self, obj, path):
"""
Fascade method for expanding objects or querysets into expanded (nested)
representations.
"""
prefix_field, prefix_path, suffix_field, suffix_path = get_path_parts(obj, path)
prefix_field, prefix_path, suffix_field, suffix_path = self.get_alias(
prefix_field, prefix_path, suffix_field, suffix_path
)
if isinstance(obj, QuerySet):
return [self.get_expanded(o, path) for o in obj]
return self.expand(obj, prefix_field, prefix_path, suffix_field, suffix_path)
def has_comparison_field(self, d1, d2):
"""
Returns True/False if both 'd1' and 'd2' have the 'comparison_field' key,
regardless of their respective values.
"""
result = False
for name in self.settings.get("comparison_fields", []):
if result is True:
break
result = all([name in x for x in [d1, d2]])
return result
def compare_objects(self, d1, d2):
for name in self.settings.get("comparison_fields", []):
if all([name in x for x in [d1, d2]]):
return d1[name] == d2[name]
return False
def get_changed_field_names(self, d1, d2):
return DictDiffer(d1, d2).changed()
def get_target_field_names(self, paths):
result = []
for path in paths:
bits = path.split(".")
field_name = bits[-1]
try:
i = bits.index(field_name)
if bits[i - 2].endswith("_set"):
field_name = bits[i - 1]
except IndexError:
pass
result.append(field_name)
return result
def to_expanded_representation(self, obj, paths):
"""
Entry method for converting an model object instance into a representation by
expanding the paths specified (if they are allowed and specified).
"""
if isinstance(obj, Manager):
obj = obj.all()
expanded = None
target_fields = self.get_target_field_names(paths)
if len(paths) > 1:
# expand multiple fields
for path in paths:
current = self.get_expanded(obj, path)
if expanded is None:
expanded = current
elif isinstance(expanded, list):
for d1 in expanded:
for d2 in current:
if self.has_comparison_field(d1, d2):
if self.compare_objects(d1, d2):
changed_fields = self.get_changed_field_names(
d1, d2
)
for field_name in changed_fields:
# The dict with the updated (from a url) will
# have a smaller length.
if len(d2[field_name]) < len(d1[field_name]):
d1[field_name] = d2[field_name]
else:
# expand single field
expanded = self.get_expanded(obj, paths[0])
if isinstance(expanded, list):
return HashableList(expanded)
return HashableDict(expanded)
def get_serializer_context(self):
return self.context
def get_serializer(self, source, path=None, context=None):
"""
Finds and returns the serializer class instance to use. Either imports the class
specified in the entry on the 'expands' attribute of the ExpandableRelatedField
instance, or re-uses the serializer class that was already imported and saved to
the settings previously.
"""
serializer_class = None
if context is None:
context = self.context
ret = {"skipped_fields": [], "many": False, "context": context}
if isinstance(source, Manager):
source = source.all()
if isinstance(source, (ManyRelatedField, QuerySet)):
ret["many"] = True
for d in self.settings.get("serializers", []):
if path in d.get("paths", []):
serializer_class = self.get_serializer_class(d["serializer"])
ret["skipped_fields"] = self.get_skipped_fields(d.get("skipped", []))
ret["many"] = d.get("many", ret["many"])
if not isinstance(source, QuerySet):
ret["many"] = False
# if ret["many"] is True:
# if not isinstance(source, (QuerySet)):
# source = QuerySet(source)
if serializer_class is None:
raise RuntimeError(
"There is no specification for '{path}' in {class_name}.\n\n"
"Add a dictionary to the 'expandable' list with:\n"
" 'paths': ['{path}']".format(
path=path, class_name=get_class_name(self)
)
)
# print("---------- get_serializer_class -----------")
# print("path: ", path)
# print("serializer_class: ", serializer_class.__name__)
return serializer_class(**ret)
def get_serializer_class(self, serializer_path):
"""
Returns the serializer class to use for serializing the object instances.
"""
target = None
for d in self.settings.get("serializers", []):
if serializer_path == d.get("serializer", ""):
target = d
if target is None:
raise AttributeError(
"Failed to find an entry for serializer '{}'.".format(serializer_path)
)
klass = target.get("serializer_class", None)
if klass is None:
klass = target["serializer_class"] = import_string(serializer_path)
return klass
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,186
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/mixins/expandable.py
|
from ..utils import sort_field_paths
class ExpandableMixin(object):
model_name = None
query_param = "expand"
expanded_fields = None
@property
def request(self):
"""
Returns the current request context passed from DRF.
"""
context = getattr(self, "context", None)
if context is None:
raise AttributeError("Context not found.")
request = context.get("request", None)
if request is None:
raise AttributeError("Request not found in context.")
return request
@property
def all_query_params(self):
return getattr(self.request, "query_params", getattr(self.request, "GET", {}))
@property
def params(self):
"""
Returns a list of unique relative field paths that should be used for expanding.
"""
field_paths = []
target_param = getattr(self, "query_param", None)
if target_param is not None:
values = self.all_query_params.get(target_param, "").split(",")
for param in values:
field_paths.append(param)
return sort_field_paths(field_paths)
def get_model_name(self):
"""
Returns the model name from the ModelSerializer Meta class model specified, or
from the previously saved model name on the class.
"""
model_name = getattr(self, "model_name", None)
if model_name is None:
model = self.Meta.model
model_name = model.__name__.lower()
self.model_name = model_name
return model_name
def get_field_path(self, path):
"""
Returns a list of possible field paths that are prefixed with the current
serializers model name, plus one suffixed with _set for django's default
reverse relationship names.
"""
model_name = self.get_model_name()
prefix = "{}.".format(model_name)
if not path.startswith(prefix):
return "{}{}".format(prefix, path)
return path
@property
def requested_fields(self):
"""
Returns a list of field paths to expand.
Can be specified via class instance or via query params.
"""
requested_fields = self.params
# Add our target fields that we specified on the class.
if isinstance(self.expanded_fields, list):
for field_path in self.expanded_fields:
requested_fields.append(field_path)
requested_fields = sort_field_paths(requested_fields)
return requested_fields
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,187
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/fields.py
|
from rest_framework.serializers import SlugRelatedField, HyperlinkedRelatedField
from .mixins.expandable_related_field import ExpandableRelatedFieldMixin
class ExpandableHyperlinkedRelatedField(
ExpandableRelatedFieldMixin,
HyperlinkedRelatedField,
):
pass
class ExpandableSlugRelatedField(ExpandableRelatedFieldMixin, SlugRelatedField):
pass
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,188
|
alexseitsinger/django-rest-framework-expandable
|
refs/heads/master
|
/src/rest_framework_expandable/mixins/expandable_model_serializer.py
|
from django.db.models import Manager
from rest_framework.relations import ManyRelatedField
from .expandable import ExpandableMixin
from .expandable_related_field import ExpandableRelatedFieldMixin
from rest_framework_helpers.mixins import RepresentationMixin
class ExpandableModelSerializerMixin(RepresentationMixin, ExpandableMixin):
def __init__(self, *args, **kwargs):
self.expanded_fields = kwargs.pop("expanded_fields", None)
super().__init__(*args, **kwargs)
self.initialize_expandable_fields()
def initialize_expandable_fields(self):
model_name = self.get_model_name()
for field_name, field in self.expandable_fields:
field.model_name = model_name
field.model_serializer = self
field.model_serializer_field_name = field_name
field.allowed_prefix = "{}.{}.".format(model_name, field_name)
field.allowed = list(set([field_name] + getattr(field, "allowed", [])))
@property
def expandable_fields(self):
"""
Returns a list of all the fields that subclass ExpandableRelatedFieldMixin
"""
fields = []
for field_name, field in self.fields.items():
target = (
field.child_relation if isinstance(field, ManyRelatedField) else field
)
if isinstance(target, ExpandableRelatedFieldMixin):
fields.append([field_name, target])
return fields
def is_expandable(self, field):
"""
Returns True if the field is a subclass of the ExpandableRelatedFieldMixin
"""
target = field.child_relation if isinstance(field, ManyRelatedField) else field
for field_name, field in self.expandable_fields:
if field == target:
return True
return False
def get_matched_paths(self, expandable_field):
matched = []
for requested_path in self.requested_fields:
if expandable_field.is_matching(requested_path):
expandable_field.assert_is_allowed(requested_path)
expandable_field.assert_is_specified(requested_path)
matched.append(requested_path)
return matched
def to_representation_for_field(self, field, obj):
"""
A function to customize what each field representation produces. Can be
overwritten in sublclasses to add custom behavoir on a per-field basis.
By default, if the field is an expandable field, it will check if it should be
expanded, and do so if checks pass.
"""
if isinstance(obj, Manager):
obj = obj.all()
if self.is_expandable(field):
target = getattr(field, "child_relation", field)
matched = self.get_matched_paths(target)
if len(matched):
return target.to_expanded_representation(obj, matched)
return field.to_representation(obj)
|
{"/src/rest_framework_expandable/serializers.py": ["/src/rest_framework_expandable/mixins/expandable_model_serializer.py"], "/src/rest_framework_expandable/mixins/expandable_related_field.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/mixins/expandable.py": ["/src/rest_framework_expandable/utils.py"], "/src/rest_framework_expandable/fields.py": ["/src/rest_framework_expandable/mixins/expandable_related_field.py"], "/src/rest_framework_expandable/mixins/expandable_model_serializer.py": ["/src/rest_framework_expandable/mixins/expandable.py", "/src/rest_framework_expandable/mixins/expandable_related_field.py"]}
|
8,199
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/Parser.py
|
import EnsureVersion3
def is_number(string):
return string and (string[0] in '0123456789.')
def is_identifier(string):
return string and (string[0] in 'abcdefghijklmnopqrstuvwxyz')
class Parser:
def __init__(self, program_text):
self.tokenized = program_text.split()
self.index = 0
self.variable_addresses = {'input':0, 'output':1}
self.number_addresses = dict([]); self.numbers = []
self.next_free_address = 3
self.machine_code = []
def peek(self):
return self.tokenized[self.index]
def match(self, token):
assert(self.peek() == token)
self.index += 1
def at_end(self):
return self.index >= len(self.tokenized)
def gen_code(self,instr,a,r):
self.machine_code.append(instr+' '+str(a)+' '+str(r))
def use_next_free_address(self):
nfa = self.next_free_address
self.next_free_address += 1
return nfa
def write_constants_table(self):
l = len(self.machine_code)
for l in self.machine_code:
i, n, r = l.split(' ')
if i=='loadconst':
l[:] = 'load %s %s' % (int(self.number_addresses[n]) + l, r)
for n in self.numbers:
self.machine_code.append(n)
def match_number(self):
num=float(self.peek())
if num not in self.number_addresses:
self.number_addresses[num] = self.use_next_free_address()
self.numbers.append(num)
self.gen_code('loadconst',num,0)
self.match(self.peek())
def match_variable(self):
var=self.peek()
if var not in self.variable_addresses:
self.variable_addresses[var] = self.use_next_free_address()
self.next_free_address += 1
self.gen_code('load',self.variable_addresses[var],0)
self.match(self.peek())
def match_factor(self):
if is_number(self.peek()): self.match_number()
elif is_identifier(self.peek()): self.match_identifier()
else:
temp1 = self.use_next_free_address()
temp2 = self.use_next_free_address()
self.gen_code('store',temp1,1)
self.gen_code('store',temp2,2)
self.match('(')
self.match_expression()
self.match(')')
self.gen_code('load',temp1,1)
self.gen_code('load',temp2,2)
def match_term(self):
self.match_factor()
while not self.at_end() and self.peek() in ['*']:
self.match('*')
self.gen_code('swap',0,1)
self.match_factor()
self.gen_code('multiply',0,1)
def match_expression(self):
self.match_term()
while not self.at_end() and self.peek() in ['+']:
self.match('+')
self.gen_code('swap',0,2)
self.match_term()
self.gen_code('add',0,2)
def match_statement(self):
pass
def match_assignment(self):
#self.match_variable() #generates unnecessary load statement
var=self.peek(); assert(is_variable(var)); self.match(var)
self.match('=')
self.match_expression()
self.gen_code('store',self.variable_addresses[var],0)
#NOTE: notation easier to parse: expr->varname (assignment written backward)
def match_if(self):
self.match('if')
self.match('(')
self.match(')')
self.match_statement()
def match_while(self):
pass
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,200
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/ParserTest.py
|
from Parser import Parser
program = '1 * ( 2 + 3 * 4 + 5 ) + 6 * 7'
P = Parser(program)
P.match_expression()
P.write_constants_table()
for mc in P.machine_code:
print(mc)
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,201
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/Machine.py
|
#Michigan Hackers Presentation on Compilers
import EnsureVersion3
'''
instructions:
load A B R[B] <-- M[R[A]]
store A B R[B] --> M[R[A]]
copy A B R[B] <-- R[A]
set A B B --> R[A]
branchif0 A B PC <-- R[A] if R[B]==0
branchifneg A B PC <-- R[A] if R[B] < 0
jump A B PC <-- R[A] (so B is dummy var.)
add A B R[B] <-- R[B] + R[A]
sub A B R[B] <-- R[B] - R[A]
mul A B R[B] <-- R[B] * R[A]
div A B R[B] <-- R[B] / R[A]
mod A B R[B] <-- R[B] % R[A]
Note: program might also contain literal numbers in addition to instructions.
Machine halts when program counter reaches -1.
Machine Specifics:
Each memory address contains a float or program instruction.
Floats are rounded to integers when interpreted as addresses.
The program counter starts at 4.
The first 4 memory addresses are IO devices:
0 [Input, e.g. temperature sensor]
1 [Input, e.g. joystick]
2 [Output, e.g. LED]
3 [Output, e.g. motor]
4&beyond [Program then data]
'''
class Machine:
PRECISION = 0.0001
def __init__(self, num_addresses, num_registers):
self.memory = [0.0 for i in range(num_addresses)]
self.registers = [0.0 for i in range(num_registers)]
self.program_counter = None
def load_program(self, lines, inputs=(0.0,0.0)):
self.memory[:2] = inputs
for i in range(len(lines)):
self.memory[4+i] = lines[i] if ' ' in lines[i] else eval(lines[i])
self.program_counter = 4
def print_mem(self, l=8):
print('memory', ' '.join(str(s).replace(' ','_') for s in self.memory[:l])+'. . .')
print('registers', self.registers)
def step(self):
instr = self.memory[self.program_counter]
print("instr ", self.program_counter, instr)
command, arg0, arg1 = instr.split(' ')
getattr(self,command)(eval(arg0),eval(arg1))
self.program_counter += 1
def at_end(self):
return self.program_counter == -1
def load(self, r, r_): self.registers[r_] = self.memory[int(self.registers[r])]
def store(self, r, r_): self.memory[int(self.registers[r])] = self.registers[r_]
def copy(self, r, r_): self.registers[r_] = self.registers[r]
def set(self, r, f): self.registers[r] = f
def branchif0(self, r, r_):
if self.registers[r_]==0.0: self.jump(r)
def branchifneg(self, r, r_):
if self.registers[r_] < 0.0: self.jump(r)
def jump(self, r, dummy):
#subtract 1 to counter end-of-cycle PC increment:
self.program_counter = int(self.registers[r])-1
def add (self, r, r_): self.registers[r_] += self.registers[r]
def sub (self, r, r_): self.registers[r_] -= self.registers[r]
def mul (self, r, r_): self.registers[r_] *= self.registers[r]
def div (self, r, r_): self.registers[r_] /= self.registers[r]
def mod (self, r, r_): self.registers[r_] = self.registers[r_] % self.registers[r]
'''Beware of floating point modulo: 0.0 != 3.50 % 0.10 == 0.09999999999999992 != 0.10'''
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,202
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/MachineTest.py
|
import Machine
def readfile(filename):
with open(filename,'r') as f:
return f.read()
lines = readfile('MachineCode01.test').strip().split('\n')
lines = [l.split('#')[0].strip() for l in lines] #remove line-comments such as this one
print(lines)
num_registers, num_addresses = lines[0].split()
M = Machine.Machine(eval(num_registers), eval(num_addresses), debug=False)
M.print_mem()
M.load_program(lines[2:], float(input()))
M.run()
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,203
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/PrettyPrint.py
|
def pretty_print(string, minlen=10):
if type(string) in [type(0), type(0.0)] or ' ' not in string:
string = str(round(float(string),4))
else:
string = string[:3] + ';'.join(string.split(' ')[1:])
return string+' '*(minlen-len(string))
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,204
|
samueltenka/LearnToHack-Compiler
|
refs/heads/master
|
/NumberedTextboxTest.py
|
'''
Thanks to Robert@pytrash (see link below)
http://tk.unpythonic.net/wiki/A_Text_Widget_with_Line_Numbers
'''
import tkinter as tk
import NumberedTextbox
root = tk.Tk()
def demo(noOfLines):
pane = tk.PanedWindow(root, orient=tk.HORIZONTAL, opaqueresize=True)
ed = NumberedTextbox.EditorClass(root)
pane.add(ed.frame)
s = 'line %s'
s = '\n'.join( s%i for i in range(3, noOfLines+3) )
ed.text.insert(tk.END, s)
pane.pack(fill='both', expand=1)
root.title("Example - Line Numbers For Text Widgets")
if __name__ == '__main__':
demo(9)
tk.mainloop()
|
{"/ParserTest.py": ["/Parser.py"], "/MachineTest.py": ["/Machine.py"]}
|
8,217
|
phihhim/acu-sdk
|
refs/heads/main
|
/setup.py
|
import setuptools
setuptools.setup(
name="acunetix",
version = "0.0.1",
packages = ["acunetix"],
)
|
{"/acunetix/model.py": ["/acunetix/api_call.py"], "/acunetix/acunetix.py": ["/acunetix/api_call.py", "/acunetix/model.py"]}
|
8,218
|
phihhim/acu-sdk
|
refs/heads/main
|
/acunetix/model.py
|
from .api_call import APICall
class Target:
def __init__(self, id, address, description="", criticality=10, continuous_mode=False,
manual_intervention=None, type=None,verification=None, status=None, scans=[]):
self.id = id
self.address = address
self.description = description
self.criticality = criticality
self.continuous_mode = continuous_mode
self.manual_intervention = manual_intervention
self.type = type
self.verification = verification
self.scans = scans
self.status = status
def __repr__(self):
rep = self.id
return str(rep)
class Scan:
def __init__(self, id, profile, incremental=False,
max_scan_time=0, next_run=None, report=None, schedule=None, target=None, results=None):
self.id = id
self.profile = profile
self.incremental = incremental
self.max_scan_time = max_scan_time
self.next_run = next_run
self.report = report
self.schedule = schedule
self.target = target
if results is None:
results = []
def __repr__(self):
rep = self.id
return str(rep)
class Result:
def __init__(self, id, start_date, scan, end_date=None, status=""):
self.id = id
self.start_date = start_date
self.end_date = end_date
self.status = status
self.scan = scan
def __repr__(self):
rep = self.id
return str(rep)
class VulnDesciption:
def __init__(self, id, name, cvss2, cvss3, cvss_score, description, details,
highlights, impact, long_description, recommendation, references, request, response_info, source, tags):
self.id = id
self.name = name
self.cvss2 = cvss2
self.cvss3 = cvss3
self.cvss_score = cvss_score
self.description = description
self.details = details
self.highlights = highlights
self.impact = impact
self.long_description = long_description
self.recommendation = recommendation
self.references = references
self.request = request
self.response_info = response_info
self.source = source
self.tags = tags
def __repr__(self):
rep = self.id
return str(rep)
class Vulnerability:
def __init__(self, id, name, affects_url, affects_detail, confidence, criticality, last_seen, severity, status, result):
self.id = id
self.name = name
self.affects_url = affects_url
self.affects_detail = affects_detail
self.confidence = confidence
self.criticality = criticality
self.last_seen = last_seen
self.severity = severity
self.status = status
self.result = result
def __repr__(self):
rep = self.id
return str(rep)
def detail(self, api, token):
endpoint = '/scans/{}/results/{}/vulnerabilities/{}'.format(
self.result.scan.id, self.result.id, self.id)
new_call = APICall(api, token)
response = new_call.get(endpoint)
id = response['vt_id']
name = response['vt_name']
cvss2 = response['cvss2']
cvss3 = response['cvss3']
cvss_score = response['cvss_score']
description = response['description']
details = response['details']
highlights = response['highlights']
impact = response['impact']
long_description = response['long_description']
recommendation = response['recommendation']
references = response['references']
request = response['request']
response_info = response['response_info']
source = response['source']
tags = response['tags']
return VulnDesciption(id, name, cvss2, cvss3, cvss_score, description, details, highlights,
impact, long_description, recommendation, references, request, response_info, source, tags)
class Location:
def __init__(self, loc_id, loc_type, name, parent, path, source, tags, result):
self.loc_id = loc_id
self.loc_type = loc_type
self.name = name
self.parent = parent
self.path = path
self.source = source
self.tags = tags
self.result = result
def childrens(self, api, token):
try:
new_call = APICall(api, token)
response = new_call.get('/scans/{}/results/{}/crawldata/{}/children'.format(self.result.scan.id, self.result
.id, self.loc_id))
raw_locations = response['locations']
locations = []
for location in raw_locations:
loc_id = location['loc_id']
loc_type = location['loc_type']
name = location['name']
parent = None
path = location['path']
source = None
tags = location['tags']
locations.append(Location(loc_id, loc_type, name, parent, path, source, tags, self.result))
return locations
except:
return []
|
{"/acunetix/model.py": ["/acunetix/api_call.py"], "/acunetix/acunetix.py": ["/acunetix/api_call.py", "/acunetix/model.py"]}
|
8,219
|
phihhim/acu-sdk
|
refs/heads/main
|
/acunetix/api_call.py
|
import requests
import json
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class APICall:
def __init__(self, api, token):
self.apibase = api
self.apikey = token
self.headers = {
"X-Auth": self.apikey,
"content-type": "application/json",
}
def __send_request(self, method='get', endpoint='', data=None):
request_call = getattr(requests, method)
url = str("{}{}".format(self.apibase, endpoint if endpoint else "/"))
response = request_call(
url,
headers = self.headers,
data = json.dumps(data),
verify = False
)
return json.loads(response.text)
def get_raw(self, endpoint=""):
url = str("{}{}".format(self.apibase, endpoint if endpoint else "/"))
try:
response = requests.get(url, headers=self.headers, verify=False)
return response
except:
return None
def post_raw(self, endpoint, data=None):
if data is None:
data = {}
url = str("{}{}".format(self.apibase, endpoint if endpoint else "/"))
try:
response = requests.post(url, headers=self.headers, json=data, allow_redirects=False, verify=False)
return response
except:
return None
def delete_raw(self, endpoint, data=None):
if data is None:
data = {}
url = str("{}{}".format(self.apibase, endpoint if endpoint else "/"))
try:
response = requests.delete(url, headers=self.headers, json=data, allow_redirects=False, verify=False)
return response
except:
return None
def get(self, endpoint=""):
return self.__send_request("get", endpoint)
def post(self, endpoint, data=None):
if data is None:
data = {}
request = self.__send_request("post", endpoint, data)
return request
def delete(self, endpoint, data=None):
if data is None:
data = {}
return self.__send_request("delete", endpoint, data)
|
{"/acunetix/model.py": ["/acunetix/api_call.py"], "/acunetix/acunetix.py": ["/acunetix/api_call.py", "/acunetix/model.py"]}
|
8,220
|
phihhim/acu-sdk
|
refs/heads/main
|
/acunetix/acunetix.py
|
from .api_call import APICall
from .model import Target, Scan, Result, Vulnerability, Location
import re
import json
from pprint import pprint
class Acunetix:
def __init__(self, api: str, token: str):
self.api = api
self.token = token
def __str__(self):
return f'Acunetix: {self.api} token {self.token}'
def __repr__(self):
return f'Acunetix: {self.api} token {self.token}'
def create_target(self, url, description=""):
if not re.fullmatch(
r"^(http://www\.|https://www\.|http://|https://)?[a-z0-9]+([\-.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?$",
url, re.IGNORECASE):
return None
data = {
"targets": [
{
"address": url,
"description": description
}
],
"groups": []
}
new_call = APICall(self.api, self.token)
respose = new_call.post('/targets/add', data)
target = respose['targets'][0]
id = target['target_id']
address = target['address']
criticality = target['criticality']
description = target['description']
type = target['type']
return Target(id, address, description, criticality, type=type)
def create_targets(self, list_target):
r = re.compile(
r"^(http://www\.|https://www\.|http://|https://)?[a-z0-9]+([\-.]{1}[a-z0-9]+)*\.[a-z]{2,5}(:[0-9]{1,5})?(/.*)?$",
re.IGNORECASE)
tmp_targets = []
for i in list_target:
url = str(i['address'])
if r.match(url):
tmp_targets.append(i)
data = {
"targets": tmp_targets,
"groups": []
}
try:
new_call = APICall(self.api, self.token)
respose = new_call.post('/targets/add', data)
raw_targets = respose['targets']
targets = []
for target in raw_targets:
id = target['target_id']
address = target['address']
criticality = target['criticality']
description = target['description']
type = target['type']
targets.append(
Target(id, address, description, criticality, type=type))
return targets
except:
return []
def get_all_targets(self):
try:
new_call = APICall(self.api, self.token)
response = new_call.get('/targets')
raw_targets = response['targets']
targets = []
for target in raw_targets:
id = target['target_id']
address = target['address']
description = target['description']
criticality = target['criticality']
continuous_mode = target['continuous_mode']
manual_intervention = target['manual_intervention']
type = target['type']
verification = target['verification']
status = target['last_scan_session_status']
new_target = Target(id, address, description, criticality, continuous_mode,
manual_intervention, type, verification, status)
targets.append(new_target)
return targets
except:
return None
def get_target_by_id(self, id):
try:
id = id.strip()
id = id.lower()
if len(id) > 255:
return None
new_call = APICall(self.api, self.token)
target = new_call.get('/targets/{}'.format(id))
id = target['target_id']
address = target['address']
description = target['description']
criticality = target['criticality']
continuous_mode = target['continuous_mode']
manual_intervention = target['manual_intervention']
type = target['type']
verification = target['verification']
new_target = Target(id, address, description, criticality,
continuous_mode, manual_intervention, type, verification)
return new_target
except:
return None
def get_targets_by_ids(self, list_id):
all_target = self.get_all_targets()
for i in range(len(list_id)):
list_id[i] = list_id[i].strip()
list_id[i] = list_id[i].lower()
targets = [x for x in all_target if x.id in list_id]
return targets
def delete_targets(self, ids):
ids = [x for x in ids if len(x) <= 255]
data = {
"target_id_list": ids
}
new_call = APICall(self.api, self.token)
return new_call.post_raw('/targets/delete', data)
# scan
def create_scan(self, target, profile_id,
schedule=None):
if schedule is None:
schedule = {"disable": False, "start_date": None, "time_sensitive": False}
if len(profile_id) > 255:
return None
data = {
"profile_id": profile_id,
"incremental": False,
"schedule": schedule,
"target_id": target.id
}
try:
new_call = APICall(self.api, self.token)
res = new_call.post_raw('/scans', data)
#response = json.loads(res.text)
scan_id = res.headers['Location'].split('/')[-1]
'''
scan_id = res.headers['Location'].split('/')[-1]
incremental = response['incremental']
max_scan_time = response['max_scan_time']
new_scan = Scan(id=scan_id, profile=profile_id, incremental=incremental,
max_scan_time=max_scan_time, schedule=schedule, target=target)
'''
return scan_id
except:
return None
def get_all_scans(self):
try:
new_call = APICall(self.api, self.token)
response = new_call.get('/scans')
raw_scans = response['scans']
return raw_scans
except:
return []
def get_scan_by_id(self, scan_id):
try:
scan_id = scan_id.strip()
scan_id = scan_id.lower()
if len(scan_id) > 255:
return None
new_call = APICall(self.api, self.token)
scan = new_call.get('/scans/{}'.format(scan_id))
id = scan['scan_id']
profile = scan['profile_id']
incremental = scan['incremental']
max_scan_time = scan['max_scan_time']
next_run = scan['next_run']
report = scan['report_template_id']
schedule = scan['schedule']
new_scan = Scan(id, profile, incremental=incremental,
max_scan_time=max_scan_time, next_run=next_run, report=report, schedule=schedule)
return new_scan
except:
return None
def get_scans_by_ids(self, list_id):
all_scans = self.get_all_scans()
for i in range(len(list_id)):
list_id[i] = list_id[i].strip()
list_id[i] = list_id[i].lower()
scans = [x for x in all_scans if x.id in list_id]
return scans
def pause_scan(self, scan):
new_call = APICall(self.api, self.token)
return new_call.post_raw('/scans/{}/pause'.format(scan.id))
def resume_scan(self, scan):
new_call = APICall(self.api, self.token)
return new_call.post_raw('/scans/{}/resume'.format(scan.id))
def stop_scan(self, scan):
new_call = APICall(self.api, self.token)
return new_call.post_raw('/scans/{}/abort'.format(scan.id))
def delete_scan(self, scan):
id = scan.id
if len(id) > 255:
return None
new_call = APICall(self.api, self.token)
return new_call.delete_raw('/scans/{}'.format(id))
# result
def get_results_of_scan(self, scan_id):
new_call = APICall(self.api, self.token)
response = new_call.get('/scans/{}/results'.format(scan_id))
return response['results'][0]['result_id']
# vulnerability
def get_vulns_of_result(self, result_id, scan_id):
try:
new_call = APICall(self.api, self.token)
response = new_call.get('/scans/{}/results/{}/vulnerabilities'.format(result_id, scan_id))
raw_vulns = response['vulnerabilities']
return response
except:
return []
def get_result_statistic(self, scan_id, result_id):
new_call = APICall(self.api, self.token)
return new_call.get('/scans/{}/results/{}/statistics'.format(scan_id, result_id))
# location
def get_root_location(self, result):
try:
new_call = APICall(self.api, self.token)
response = new_call.get('/scans/{}/results/{}/crawldata/0/children'.format(result.scan.id, result.id))
raw_location = response['locations'][0]
loc_id = raw_location['loc_id']
loc_type = raw_location['loc_type']
name = raw_location['name']
parent = None
path = raw_location['path']
source = None
tags = raw_location['tags']
return Location(loc_id, loc_type, name, parent, path, source, tags, result)
except:
return None
|
{"/acunetix/model.py": ["/acunetix/api_call.py"], "/acunetix/acunetix.py": ["/acunetix/api_call.py", "/acunetix/model.py"]}
|
8,228
|
marty-Wallace/FibbonacciServer
|
refs/heads/master
|
/Fibonacci/fib_server.py
|
from socketserver import ThreadingMixIn, TCPServer, BaseRequestHandler
class FibonacciThreadedTCPServer(ThreadingMixIn, TCPServer):
"""
FibonacciThreadedTCPServer used to serve concurrent TCP requests for a fibonacci
number. The server holds the lookup table fib_dict shared by each instance of
FibonacciThreadedTCPRequestHandler to make optimized calculations.
"""
def __init__(self, server_address):
TCPServer.__init__(self, server_address, FibonacciThreadedTCPRequestHandler, bind_and_activate=True)
self.fib_dict = {0: 0, 1: 1, 2: 1}
class FibonacciThreadedTCPRequestHandler(BaseRequestHandler):
"""
FibonacciThreadedTCPRequestHandler class for our server. One instance will be created to
serve each request that comes into the server. Must override the handle() method which will
be called by the server on each new instance for each incoming request
"""
def handle(self):
"""
reads in an integer from the incoming socket connection, calculates the fibonacci value of
that number then returns that value to the socket
:return: None
"""
data = self.request.recv(1024).strip()
print('Serving new request, data=%s' % data)
try:
num = int(data)
if num < 0:
raise ValueError
except ValueError:
self.request.sendall(bytes('Must send a valid number >= 0\n', 'ascii'))
return
# calculate the result of fib(num)
result = self.calc_fib(self.server.fib_dict, num)
# encode into bytes
ret = bytes(str(result) + '\n', 'ascii')
# return result
self.request.sendall(ret)
@staticmethod
def calc_fib(fib_dict, n):
"""
Calculates the fibonacci value of n in an optimized way using a lookup table
and a linear calculation. Since the fib_table is a dictionary shared between
multiple threads we can only write to the dict. Any type of read->modify->write
sequence may be interrupted mid-execution, creating a race condition. If n is in
the fib_dict we can simply return it, otherwise we can begin calculating each value
of fib between the current highest value ( which is fib(len(fib_dict)-1) ) and n.
:param fib_dict: the dictionary of fib numbers shared between threads
:param n: the value of fib to calculate
:return: fib(n)
"""
length = len(fib_dict)
while length <= n:
fib_dict[length] = fib_dict[length - 1] + fib_dict[length - 2]
length = len(fib_dict)
return fib_dict[n]
# if module is imported this code won't run
if __name__ == '__main__':
# port of 0 will request an open port from the kernel
HOST, PORT = 'localhost', 0
with FibonacciThreadedTCPServer((HOST, PORT)) as server:
ip, port = server.server_address
print("Starting FibServer at %s:%d" % (ip, port))
print("Waiting for fibonacci requests...")
server.serve_forever()
|
{"/example_client.py": ["/Fibonacci/__init__.py"], "/example_server.py": ["/Fibonacci/__init__.py"], "/Fibonacci/__init__.py": ["/Fibonacci/fib_server.py", "/Fibonacci/fib_client.py"]}
|
8,229
|
marty-Wallace/FibbonacciServer
|
refs/heads/master
|
/Fibonacci/fib_client.py
|
import socket
import sys
import getopt
from threading import Thread
from random import randint
class FibClient(object):
"""
Base Class for the AutoClient and HumanClient to extend from. Implements some of the shared methods/attributes
"""
def __init__(self, ip, port):
self.ip = ip
self.port = port
@staticmethod
def receive_from_sock(sock, buffer_size):
"""
Generator function to yield the current buffer received from sock.
Can be used in the form of b''.join(recv_all(sock, buffer_size)) to
receive the full transmission from a socket
:param sock: the socket to receive data from
:param buffer_size: the size of the buffer to load on each yield
:return: yields the current buffer as a byte object
"""
message_buffer = sock.recv(buffer_size)
while message_buffer:
yield message_buffer
message_buffer = sock.recv(buffer_size)
@staticmethod
def receive_all_from_sock(sock, buffer_size=2048):
"""
Builds the full message received from a socket in bytes
:param sock: the socket to receive data from
:param buffer_size: the size of the buffer to load while building full result, defaults to 2048
:return: byte object containing full message
"""
return b''.join(FibClient.receive_from_sock(sock, buffer_size))
def get_fibonacci_number(self, number):
"""
Make a request to the fib server for a single fib number. If there is a socket or value error, return None
:param number: the fib number to request from the server
:return: fib of n, if an error occurs then None
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.ip, self.port))
response = None
try:
sock.sendall(bytes(str(number), 'ascii'))
response = int(FibClient.receive_all_from_sock(sock))
except socket.error as err:
print(err, file=sys.stderr)
except ValueError as err:
print(err, file=sys.stderr)
finally:
sock.close()
return response
class AutoClient(FibClient):
"""
Class to do automated testing on the fibonacci server. Capable of spinning up multiple threads
and requesting random fib numbers then testing their correctness.
"""
def _test_fib(self, number, verbose, silent):
"""
Requests a single fib number from the server then does the calculation locally to
ensure that the number is correct
:param number: the fib number to request/test
:param verbose flag if the printing level is high
:param silent flag if the printing level is for errors only
:return: None
"""
def local_fib(n):
"""
Generate the fib number locally to test against the server's result
:param n: the fib number to generate
:return: fib of n
"""
a, b = 1, 1
for i in range(n-1):
a, b = b, a+b
return a
# get server result
result = self.get_fibonacci_number(number)
# server errors will return None so check for None
if result is None:
if verbose:
print('Received None from server')
return None
if not silent:
print('Received result %d from server for fib(%d)' % (result, number))
# get local result
local_result = local_fib(number)
if verbose:
print('Calculated local value to be %d for fib(%d)' % (local_result, number))
# compare results
if result != local_result:
# even on silent we will display errors of this kind.
# if we enter this block it means the server is returning wrong numbers
print("Server returned %d for fib(%d) should have been %d" % (result, number, local_result))
def connect(self, num_threads=15, fib_min=1, fib_max=2000, verbose=False, silent=False):
"""
Runs some automated tests on the server by spinning up multiple concurrent clients, one to a thread,
each requesting a random fib number and double checking the results returned by the server
:param num_threads: the number of threads/clients to spin up concurrently. Defaults to 15
:param fib_min: the minimum fib number to request from the server. Defaults to 1
:param fib_max: the maximum fib number to request from the server. Defaults to 2000
:param verbose: sets the highest level of printing out whats going on
:param silent: sets the lowest level of printing out whats going on
:return: None
"""
threads = []
for i in range(num_threads):
num = randint(fib_min, fib_max)
if verbose:
print('Starting thread with target number %d' % num)
threads.append(Thread(target=self._test_fib, args=(num, verbose, silent)))
for thread in threads:
thread.start()
class HumanClient(FibClient):
def __init__(self, ip, port):
super().__init__(ip, port)
def connect(self):
"""
A loop that allows a human to repeatedly request fib numbers from the server.
:return: None
"""
while True:
bad_input = True
num = 0
while bad_input:
try:
num = int(input('Please enter which fibonacci number you would like: '))
if num <= 0:
print("Please enter a positive number. Negative fibonacci numbers are undefined.")
else:
bad_input = False
except ValueError as err:
print("Please enter a number")
continue
fib = self.get_fibonacci_number(num)
if fib is None:
print('Error: None returned by get_fibonacci_number(%s, %d, %d)' % (ip, port, num))
continue
print("Fib of %d is %d" % (num, fib))
print() # blank line
def usage(message=''):
"""
Displays a set of messages describing how to use the program
:param message: an optional message to display at the beggining of the output
:return: None
"""
if message != '':
print(message)
print('fib_client.py improper usage')
print('Usage: python fib_client.py --port=<portnumber> [options] ')
print('Options are:')
print(' -i, --ip= ip address of the fib server, defaults to localhost')
print(' -p, --port= port address of the server, required argument')
print(' -a, --auto sets that we are going to use the auto tester client rather than the human client')
print(' -t, --threads= only applies to the auto tester and it sets how many concurrent requests to make')
print(' -l, --low= sets the lowest fib number to randomly request for the auto client defaults to 1')
print(' -h, --high= sets the highest fib number to randomly request for the auto client defaults to 2000')
print(' -s, --silent sets the output level to silent for auto-testing (useful for large numbers)')
print(' -v, --verbose sets output level to verbose for auto-testing')
print(' --help requests this usage screen')
exit()
def main():
"""
Reads in opts and args from the command line and then takes the appropriate action
to either start up the human client or the auto-tester client.
"""
ip = '127.0.0.1' # ip address of the server
port = -1 # port of the server must be set by args
auto = False # flag to run auto_client over human_client
threads = 15 # number of threads to run auto_client with
low = 1 # lowest fib number to request with auto_client
high = 2000 # highest fib number to request with auto_client
silent = False # print nothing during auto_testing
verbose = False # print everything during auto-testing
# reads in all opts and args and sets appropriate variables
try:
opts, args = getopt.getopt(sys.argv[1:], "i:p:at:l:h:sv",
["ip=", "port=", "auto", "threads=", "low=", "high=", "silent", "verbose"])
except getopt.GetoptError:
usage()
for o, a in opts:
# ip address
if o in ('-i', '--ip'):
ip = a
# port number
elif o in ('-p', '--port'):
try:
port = int(a)
except ValueError:
usage("Port must be a number")
# auto client
elif o in ('-a', '--auto'):
auto = True
# threads
elif o in ('-t', '--threads'):
try:
threads = int(a)
except ValueError:
usage("Number of threads must be a number")
# low value
elif o in ('-l', '--low'):
try:
low = int(a)
if low < 1:
raise ValueError
except ValueError:
usage("Low must be a number greater than 0")
# high value
elif o in ('-h', '--high'):
try:
high = int(a)
if high < 1:
raise ValueError
except ValueError:
usage("High must be a number greater than 0")
# verbose
elif o in ('-v', '--verbose'):
if silent:
usage('Cannot set both verbose and silent to be true')
verbose = True
# silent
elif o in ('-s', '--silent'):
if verbose:
usage('Cannot set both verbose and silent to be true')
silent = True
# any other args/opts show usage
else:
usage()
# ensure port is set
if port == -1:
usage('The port number must be set')
# make sure our numbers make sense, take low if they don't
if high < low:
high = low
if auto:
if verbose:
print('Target server at %s:%d' % (ip, port))
print('Starting %d threads requesting numbers between %d-%d' % (threads, low, high))
AutoClient(ip, port).connect(num_threads=threads, fib_min=low, fib_max=high, verbose=verbose, silent=silent)
else:
HumanClient(ip, port).connect()
# Won't run if code is imported
if __name__ == '__main__':
main()
|
{"/example_client.py": ["/Fibonacci/__init__.py"], "/example_server.py": ["/Fibonacci/__init__.py"], "/Fibonacci/__init__.py": ["/Fibonacci/fib_server.py", "/Fibonacci/fib_client.py"]}
|
8,230
|
marty-Wallace/FibbonacciServer
|
refs/heads/master
|
/example_client.py
|
from Fibonacci import HumanClient, AutoClient
'''
Example file showing how to use and how to test out the Fibonacci client
'''
ip = 'localhost'
port = int(input('Please enter the port number of the Fibonacci server: '))
test_auto = True
if test_auto:
client = AutoClient(ip, port)
client.connect(num_threads=50, fib_min=4000, fib_max=5000, verbose=False, silent=False)
else:
client = HumanClient(ip, port)
client.connect()
|
{"/example_client.py": ["/Fibonacci/__init__.py"], "/example_server.py": ["/Fibonacci/__init__.py"], "/Fibonacci/__init__.py": ["/Fibonacci/fib_server.py", "/Fibonacci/fib_client.py"]}
|
8,231
|
marty-Wallace/FibbonacciServer
|
refs/heads/master
|
/example_server.py
|
from Fibonacci import FibonacciThreadedTCPServer
'''
Example file showing how to use the Fibonacci server
'''
address = ('localhost', 0)
server = FibonacciThreadedTCPServer(address)
print(server.server_address)
server.serve_forever()
|
{"/example_client.py": ["/Fibonacci/__init__.py"], "/example_server.py": ["/Fibonacci/__init__.py"], "/Fibonacci/__init__.py": ["/Fibonacci/fib_server.py", "/Fibonacci/fib_client.py"]}
|
8,232
|
marty-Wallace/FibbonacciServer
|
refs/heads/master
|
/Fibonacci/__init__.py
|
from .fib_server import *
from .fib_client import *
__all__ = [FibonacciThreadedTCPServer, AutoClient, HumanClient]
|
{"/example_client.py": ["/Fibonacci/__init__.py"], "/example_server.py": ["/Fibonacci/__init__.py"], "/Fibonacci/__init__.py": ["/Fibonacci/fib_server.py", "/Fibonacci/fib_client.py"]}
|
8,234
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0008_remove_book_owners.py
|
# Generated by Django 2.1.4 on 2018-12-25 13:05
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20181225_1256'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='owners',
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,235
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0003_auto_20181224_1059.py
|
# Generated by Django 2.1.4 on 2018-12-24 07:59
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0002_book_users_like'),
]
operations = [
migrations.AlterField(
model_name='book',
name='users_like',
field=models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,236
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0011_auto_20181227_1502.py
|
# Generated by Django 2.1.4 on 2018-12-27 12:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0010_remove_book_quantity'),
]
operations = [
migrations.AddField(
model_name='author',
name='patronymic',
field=models.CharField(blank=True, max_length=50, verbose_name='Отчество'),
),
migrations.AlterField(
model_name='author',
name='date_of_birth',
field=models.DateField(verbose_name='Дата рождения'),
),
migrations.AlterField(
model_name='author',
name='first_name',
field=models.CharField(max_length=100, verbose_name='Имя'),
),
migrations.AlterField(
model_name='author',
name='last_name',
field=models.CharField(blank=True, max_length=100, verbose_name='Фамилия'),
),
migrations.AlterField(
model_name='book',
name='author',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Author', verbose_name='Автор'),
),
migrations.AlterField(
model_name='book',
name='description',
field=models.TextField(blank=True, verbose_name='Описание'),
),
migrations.AlterField(
model_name='book',
name='photo',
field=models.ImageField(blank=True, default='/static/images/default.jpg', upload_to='photo/%Y', verbose_name='Фото'),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(max_length=100, verbose_name='Название'),
),
migrations.AlterField(
model_name='book',
name='users_like',
field=models.ManyToManyField(blank=True, related_name='book_liked', to=settings.AUTH_USER_MODEL, verbose_name='Лайки'),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,237
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/models.py
|
from django.conf import settings
from django.db import models
from django.urls import reverse
# Create your models here.
class Author(models.Model):
first_name = models.CharField(blank=False, null=False, max_length=100, verbose_name="Имя")
patronymic = models.CharField(blank=True, max_length=50, verbose_name="Отчество")
last_name = models.CharField(blank=True, max_length=100, verbose_name="Фамилия")
date_of_birth = models.DateField(verbose_name="Дата рождения")
def __str__(self):
return '{} {} {}'.format(self.first_name, self.patronymic, self.last_name)
class Book(models.Model):
title = models.CharField(blank=False, null=False, max_length=100, verbose_name="Название")
author = models.ForeignKey(Author, on_delete=models.CASCADE, verbose_name="Автор")
description = models.TextField(blank=True, verbose_name="Описание")
photo = models.ImageField(upload_to='photo/%Y', default='/static/images/default.jpg', blank=True,
verbose_name="Фото")
users_like = models.ManyToManyField(settings.AUTH_USER_MODEL, related_name='book_liked', blank=True,
verbose_name="Лайки")
def get_absolute_url(self):
return reverse('core:detail', args=[self.id])
def __str__(self):
return ' {}'.format(self.title)
def get_like_url(self):
return reverse("core:like-toggle", kwargs={"pk": self.pk})
def get_api_like_url(self):
return reverse("core:like-api-toggle", kwargs={"pk": self.pk})
def get_book_url(self):
return reverse("core:get-book", kwargs={"pk": self.pk})
def get_api_book_url(self):
return reverse("core:get-api-book", kwargs={"pk": self.pk})
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,238
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0010_remove_book_quantity.py
|
# Generated by Django 2.1.4 on 2018-12-25 20:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0009_book_quantity'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='quantity',
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,239
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0007_auto_20181225_1256.py
|
# Generated by Django 2.1.4 on 2018-12-25 09:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0006_auto_20181225_1256'),
]
operations = [
migrations.RenameField(
model_name='book',
old_name='quantity',
new_name='owners',
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,240
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0004_auto_20181224_1100.py
|
# Generated by Django 2.1.4 on 2018-12-24 08:00
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0003_auto_20181224_1059'),
]
operations = [
migrations.AlterField(
model_name='book',
name='users_like',
field=models.ManyToManyField(blank=True, related_name='book_liked', to=settings.AUTH_USER_MODEL),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,241
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/account/models.py
|
from django.conf import settings
from django.db import models
from core.models import Book
# Create your models here.
class Profile(models.Model):
user = models.OneToOneField(settings.AUTH_USER_MODEL,
on_delete=models.CASCADE,verbose_name="Юзер")
date_of_birth = models.DateField(blank=True, null=True,verbose_name="Дата рождения")
photo = models.ImageField(upload_to='users/%Y/%m/%d/',
blank=True,verbose_name="Фото")
my_books = models.ManyToManyField(Book, related_name='book_got', blank=True,verbose_name="Книги")
def __str__(self):
return '{}'.format(self.user.username)
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,242
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/views.py
|
from account.models import Profile
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import get_object_or_404
from django.views.generic import ListView, DetailView, RedirectView, CreateView, UpdateView,DeleteView
from django.urls import reverse_lazy
from .forms import BookForm
from .models import Book
# Create your views here.
class BooksView(ListView):
template_name = 'core/list.html'
queryset = Book.objects.all()
context_object_name = 'books'
paginate_by = 3
class UsersBooks(ListView):
template_name = 'core/users_books_list.html'
context_object_name = 'users_books'
def get_queryset(self):
user = self.request.user
# pk =self.kwargs.get("pk")
# obj1 = get_object_or_404(Profile,user=user)
# obj2 = get_object_or_404(Book,pk=pk)
# us_title =
a = Profile.objects.filter(user=user)
queryset = [p.my_books.all() for p in a]
# print([p.my_books.all()[0] for p in a])
# print(queryset)
# print(queryset[0])
# print(queryset[0][0])
if len(queryset[0]) != 0:
return queryset[0]
class BookDetail(DetailView):
model = Book
template_name = 'core/detail.html'
class BookLikeToggle(RedirectView):
def get_redirect_url(self, *args, **kwargs):
pk = self.kwargs.get("pk")
print(pk)
obj = get_object_or_404(Book, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
if user.is_authenticated:
if user in obj.users_like.all():
obj.users_like.remove(user)
else:
obj.users_like.add(user)
return url_
class BookGet(LoginRequiredMixin, RedirectView):
login_url = '/account/login/'
def get_redirect_url(self, *args, **kwargs):
pk = self.kwargs.get("pk")
print(pk)
obj = get_object_or_404(Book, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
obj_u = get_object_or_404(Profile, user=user)
print(user, obj_u, obj)
if user.is_authenticated:
if obj not in obj_u.my_books.all():
obj_u.my_books.add(obj)
return url_
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import authentication, permissions
class BookGetAPIToggle(APIView, LoginRequiredMixin):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
login_url = '/account/login/'
def get(self, request, pk=None, format=None):
pk = self.kwargs.get("pk")
print(pk)
obj = get_object_or_404(Book, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
obj_u = get_object_or_404(Profile, user=user)
updated = False
Get = False
if user.is_authenticated:
if obj not in obj_u.my_books.all():
Get = True
Get = True
obj_u.my_books.add(obj)
else:
Get = False
obj_u.my_books.remove(obj)
updated = True
data = {
"updated": updated,
"get": Get
}
return Response(data)
class BookLikeAPIToggle(APIView):
authentication_classes = (authentication.SessionAuthentication,)
permission_classes = (permissions.IsAuthenticated,)
def get(self, request, pk=None, format=None):
# pk = self.kwargs.get("pk")
obj = get_object_or_404(Book, pk=pk)
url_ = obj.get_absolute_url()
user = self.request.user
updated = False
liked = False
if user.is_authenticated:
if user in obj.users_like.all():
liked = False
obj.users_like.remove(user)
else:
liked = True
obj.users_like.add(user)
updated = True
data = {
"updated": updated,
"liked": liked
}
return Response(data)
class BookCreateView(CreateView):
template_name = 'core/book_create.html'
queryset = Book.objects.all()
form_class = BookForm
def form_valid(self, form):
print(form.cleaned_data)
return super().form_valid(form)
class BookUpdateView(UpdateView):
template_name = 'core/book_create.html'
queryset = Book.objects.all()
form_class = BookForm
def get_object(self):
pk = self.kwargs.get("pk")
return get_object_or_404(Book, pk=pk)
def form_valid(self, form):
print(form.cleaned_data)
return super().form_valid(form)
class BookDeleteView(DeleteView):
model = Book
success_url = reverse_lazy('core:list')
template_name = 'core/list.html'
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,243
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/urls.py
|
from django.urls import path
from . import views
app_name = 'core'
urlpatterns = [
path('', views.BooksView.as_view(), name='list'),
path('<int:pk>/like', views.BookLikeToggle.as_view(), name='like-toggle'),
path('api/<int:pk>/like', views.BookLikeAPIToggle.as_view(), name='like-api-toggle'),
path('api/<int:pk>/get', views.BookGetAPIToggle.as_view(), name='get-api-book'),
path('<int:pk>/', views.BookDetail.as_view(), name='detail'),
path('<int:pk>/get/', views.BookGet.as_view(), name='get-book'),
path('my_books/', views.UsersBooks.as_view(), name='get-mybook'),
path('create/', views.BookCreateView.as_view(), name='book-create'),
path('<int:pk>/update/', views.BookUpdateView.as_view(), name='book-create'),
path('<int:pk>/delete/', views.BookDeleteView.as_view(), name='book-delete')
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,244
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/account/migrations/0003_auto_20181227_1502.py
|
# Generated by Django 2.1.4 on 2018-12-27 12:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0002_profile_my_books'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='date_of_birth',
field=models.DateField(blank=True, null=True, verbose_name='Дата рождения'),
),
migrations.AlterField(
model_name='profile',
name='my_books',
field=models.ManyToManyField(blank=True, related_name='book_got', to='core.Book', verbose_name='Книги'),
),
migrations.AlterField(
model_name='profile',
name='photo',
field=models.ImageField(blank=True, upload_to='users/%Y/%m/%d/', verbose_name='Фото'),
),
migrations.AlterField(
model_name='profile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Юзер'),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,245
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/account/migrations/0002_profile_my_books.py
|
# Generated by Django 2.1.4 on 2018-12-25 10:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20181225_1256'),
('account', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='profile',
name='my_books',
field=models.ManyToManyField(blank=True, related_name='book_got', to='core.Book'),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,246
|
Alexxxtentancion/django-library-service
|
refs/heads/master
|
/BookShop/core/migrations/0006_auto_20181225_1256.py
|
# Generated by Django 2.1.4 on 2018-12-25 09:56
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0005_book_quantity'),
]
operations = [
migrations.RemoveField(
model_name='book',
name='quantity',
),
migrations.AddField(
model_name='book',
name='quantity',
field=models.ManyToManyField(blank=True, related_name='book_got', to=settings.AUTH_USER_MODEL),
),
]
|
{"/BookShop/core/views.py": ["/BookShop/core/models.py"]}
|
8,251
|
eekhait/1008_Project
|
refs/heads/master
|
/lrt_adj.py
|
import pandas as pd
import csv
import math
import main_graph as m_graph
lrtData = pd.read_csv('Punggol_LRT_Routing.csv', sep=',', header=None)
def round_up(n, decimals=0):
# this is to round up even is 0.1 above
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def bfs_route(graph, start, end):
# maintain a queue of paths
queue = []
# push the first path into the queue
queue.append([start])
while queue:
# get the first path from the queue
path = queue.pop(0)
# get the last node from the path
node = path[-1]
# path found
if node == end:
return path
# enumerate all adjacent nodes, construct a new path and push it into the queue
for adjacent in graph.get(node, []):
new_path = list(path)
new_path.append(adjacent)
queue.append(new_path)
def cal_distance(adj_list_val, result):
distance = 0
# e.g. PE1, PE2, PE3
for i in range(len(result) - 1): # number of time to run
for y in range(len(adj_list_val[result[i]])): # e.g. adj_list_val['PE1'] return number of list value
if result[i + 1] in adj_list_val[result[i]][
y]: # e.g. check if PE2 is in adj_list_val['PE1'][0] or adj_list_val['PE1'][1] LISTING
distance += int(adj_list_val[result[i]][y][result[
i + 1]]) # e.g. adj_list_val['PE1'][0][['PE2'] will return the distance weightage
return distance
def take_lrt(start_node, end_node):
start_node = str(start_node) # Store the start name
end_node = str(end_node) # Store the end name
walk_start_node = [] # Store the array from the TAKE_WALK Function FROM START POINT TO LRT
walk_end_node = [] # Store the array from the TAKE_WALK Function FROM LRT TO END POINT
lrt_name = [] # Store the LRT NAME
lrt_code = [] # Store the LRT CODE
adj_list = {} # Store the Adj list
adj_list_val = {} # Store the Adj list with value
with open('Punggol_LRT_Routing.csv', 'r') as csv_file:
reader = csv.reader(csv_file)
first = True
for row in reader:
if (first == True):
for i in range(len(row)):
first = False
else:
# for i in range(0, len(row)):
# # key_value = {row[0]: row[2].split()} # This is to create the Adj
lrt_name.append(row[1]) # Append the LRT NAME into the lrt_name
lrt_code.append(row[0]) # Append the LRT CODE into the lrt_code
keys = row[2].split(", ")
values = row[3].split(", ")
add_value = []
for i in range(len(keys)):
add_value.append({keys[i]: values[i]}) # Create a list of dict e.g. 'PE1' : 1010
adj_list_val[row[0]] = add_value # Append the linked code into the list
adj_list[row[0]] = row[2].split(", ") # Append the linked code into the list
# Check if start node is mrt or blocks
if start_node in lrt_name:
# Convert the LRT NAME INTO LRT CODE
for i in range(len(adj_list)):
if lrt_name[i] == start_node:
start_node = lrt_code[i] # Convert start_node Into LRT CODE
break
else:
temp_string_start_node = start_node # Store the postal code
start_node = m_graph.get_nearest_lrt(start_node) # To Store the nearest LRT station with the postal code
walk_start_node = m_graph.take_walk(temp_string_start_node, start_node) # Store the walking node from Start of Postal Code to LRT
if end_node in lrt_name:
for i in range(len(adj_list)):
if lrt_name[i] == end_node:
end_node = lrt_code[i] # Convert end_noce Into LRT CODE
break
else:
temp_string_end_node = end_node # Store the postal code
end_node = m_graph.get_nearest_lrt(end_node) # To Store the nearest LRT station with the postal code
walk_end_node = m_graph.take_walk(end_node, temp_string_end_node) # Store the walking node from LRT To the End of Postal code
# if start and end are connected
if m_graph.is_adjacent_lrt(adj_list, start_node, end_node):
result = [start_node, end_node]
# average SG MRT 45km/h == 12.5m/s
# Calculate the timing Second in minutes,
distance = cal_distance(adj_list_val, result)
timing = round_up((distance / 12.5) / 60)
# Check if there any array
if len(walk_start_node) != 0:
del result[0] # To delete the first array as is duplicated
result = walk_start_node[1] + result # Combine the Walking array with result (LRT)
timing = walk_start_node[0] + timing # Combine the Time required
if len(walk_end_node) != 0:
del result[-1] # To delete the last array as is duplicated
result = result + walk_end_node[1] # Combine the result (LRT) with Walking array
return [int(timing), result]
else:
result = (bfs_route(adj_list, start_node, end_node))
# average SG MRT 45km/h == 12.5m/s
# Calculate the timing Second in minutes,
distance = cal_distance(adj_list_val, result)
timing = round_up((distance / 12.5) / 60)
# average timing stop at each mrt is 30second == 0.5
mrt_stopping = 0.5 * int(len(result) - 1)
# Calculate the timing Second in minutes,
timing = round_up((distance / 12.5) / 60) + mrt_stopping
# Add another 5 min flat waiting for the train to arrival
timing = timing + 5
if len(walk_start_node) != 0:
del result[0] # To delete the first array as is duplicated
result = walk_start_node[1] + result # Combine the Walking array with result (LRT)
timing = walk_start_node[0] + timing # Combine the Time required
if len(walk_end_node) != 0:
del result[-1] # To delete the last array as is duplicated
result = result + walk_end_node[1] # Combine the result (LRT) with Walking array
# print([int(timing), result])
return [int(timing), result]
# print("LRT ROUTE: ", take_lrt("828858","65009"))
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,252
|
eekhait/1008_Project
|
refs/heads/master
|
/main_graph.py
|
import pandas as pd
# THIS PART CONCERNS WITH:
# THE CSV FILES AND EXTRACTING DATA FROM THEM
# --------------------------------- #
# Indexes of the Complete_Punggol_Graph.csv file:
# Columns: 0-Code, 1-Name, 2-Type, 3-Latitude, 4-Longitude, 5-Buses, 6-ConnectedWalks, 7-ConnectedDistances
# Columns: 8-1197 to refer to nodes (+7 difference from corresponding node in rows)
# Rows: 1-1190 to refer to nodes
# --------------------------------- #
# How to use pandas dataframe (treat it as a 2D array/excel file):
# mainGraph.at[row,column]
# --------------------------------- #
mainGraph = pd.read_csv('Complete_Punggol_Graph.csv', sep=',', header=None)
mainGraph[0] = mainGraph[0].apply(str) # converts column to be string-only (rather than int+str)
startIndex = 1
endIndex = len(mainGraph.index)
# --------------------------------- #
def get_distance_to_from(point_a, point_b):
index_a = get_node_index(point_a)+7
index_b = get_node_index(point_b)
return int(mainGraph.at[index_b, index_a])
def get_long_lat(target):
index = get_node_index(target)
return [round(float(mainGraph.at[index, 3]), 4), round(float(mainGraph.at[index, 4]), 4)]
def get_lat_long(target):
index = get_node_index(target)
return [round(float(mainGraph.at[index, 4]), 4), round(float(mainGraph.at[index, 3]), 4)]
def get_node_index(target):
# Start location codes are from index 1 to 1190
# print(type(target),target)
target=str(target)
low = startIndex
high = endIndex
mid = (startIndex+endIndex)//2
while target != str(mainGraph.at[mid, 0]):
if target < str(mainGraph.at[mid, 0]): # if target is in smaller half
high = mid
if mid == (low+high)//2:
return -1
mid = (low+high)//2
elif target > str(mainGraph.at[mid, 0]): # if target is in larger half
low = mid
if mid == (low+high)//2:
return -1
mid = (low+high)//2
return mid
def get_nearest_bus_stops(target, distance):
pass
def get_nearest_lrt(target):
if len(target) == 3:
return target
else:
index = get_node_index(target)
node = ""
distance = 3000
for i in range(endIndex+7-14, endIndex+7): # start and end of LRT columns in csv
if int(mainGraph.at[index, i]) < distance:
node = mainGraph.at[0, i]
distance = int(mainGraph.at[index, i])
return str(node)
def get_adjacent_walks(start_node):
start_index = get_node_index(start_node)
connected_nodes = mainGraph.at[start_index, 6].split(', ')
return connected_nodes
def is_adjacent_walk(start_node, end_node):
start_index = get_node_index(start_node)
connected_nodes = mainGraph.at[start_index, 6].split(', ')
if end_node in connected_nodes:
return True
else:
return False
def is_adjacent_bus(start_node, end_node):
pass
def is_adjacent_lrt(adj_list, start_node, end_node):
# Check If Are Both LRT are directly connected!
for i in adj_list:
if start_node == i: # To check if able to found the KEY
if end_node in adj_list[i]: # To check if both Start_Node & End_Node are directly connected
return 1 # If Yes, return 1
else:
return 0 # If No, return 0
# ----------------------------------
# THIS PART CONCERNS WITH ALGORITHMS:
# ----------------------------------
class AStarStack:
def __init__(self):
self.top = -1
self.data = []
self.total_distance = 0
self.distance_to_end = 0
def show_stack(self):
print("start")
for i in self.data:
print(i)
print("end")
def push(self, node, val):
self.top += 1
self.data.append(node)
if self.top > 0: # if there is at least two elements...
self.total_distance += get_distance_to_from(self.data[self.top], self.data[self.top-1])
self.distance_to_end = val
# def pop(self):
# if self.top > -1:
# node = self.data[self.top]
# if self.top > 0:
# self.total_distance -= get_distance_to_from(self.data[self.top], self.data[self.top-1])
# del self.data[self.top]
# self.top -= 1
# return node
def is_empty(self):
if self.top < 0:
return True
else:
return False
def peek(self):
if not self.is_empty():
return self.data[self.top]
def peek_distance(self):
if not self.is_empty():
return self.total_distance
def copy_from(self, a_stack):
for x in a_stack.data:
self.push(x, a_stack.distance_to_end)
class AStarQueue:
def __init__(self):
self.top = -1
self.data = []
self.distances_to_target = []
def enqueue(self, node):
temp = node.distance_to_end
front = 1
back = self.top
mid = (front+back)//2
if self.top > -1:
# print(str(temp) + " " + str(self.distances_to_target[0]))
if temp < self.distances_to_target[0]: # add to the front
self.data.insert(0, node)
self.distances_to_target.insert(0, temp)
elif temp > self.distances_to_target[self.top]: # add to the back
self.data.append(node)
self.distances_to_target.append(temp)
else:
while temp != self.distances_to_target[mid] and front != mid:
if temp < self.distances_to_target[mid]:
back = mid
mid = (front + back) // 2
elif temp > self.distances_to_target[mid]:
front = mid
mid = (front + back) // 2
# if temp == self.distances_to_target[mid]
self.data.insert(mid, node)
self.distances_to_target.insert(mid, temp)
elif self.top < 0:
self.data.append(node)
self.distances_to_target.append(temp)
self.top += 1
# print("[", end='')
# for i in self.data:
# print(str(i.distance_to_end) + ", ", end='')
# print("]")
# print(str(self.distances_to_target))
def dequeue(self):
if self.top > -1:
temp = self.data[0]
del self.data[0]
del self.distances_to_target[0]
self.top -= 1
return temp
def is_empty(self):
if self.top < 0:
return True
else:
return False
def peek(self):
if not self.is_empty():
return self.data[0]
def take_walk(start_node, end_node):
start_node = str(start_node)
end_node = str(end_node)
# if start and end are connected
if is_adjacent_walk(start_node, end_node):
return [1, [start_node, end_node]]
else: # this part begins like the word ladder
# initialization of queue and first stack (of just start node)
# also initialization of visited nodes
star_queue = AStarQueue()
star_stack = AStarStack()
star_stack.push(start_node, get_distance_to_from(start_node, end_node))
star_queue.enqueue(star_stack)
visited_nodes = {}
counter = 0
# while end node is not reached
while star_queue.data[0].peek() != end_node:
# dequeue the first stack
temp_stack = star_queue.dequeue()
mid_node = temp_stack.peek()
# add all adjacent nodes to mid_node in separate stacks
# move stacks to queue
for i in get_adjacent_walks(mid_node):
# create new stack with each adjacent node
temper_stack = AStarStack()
temper_stack.copy_from(temp_stack)
temper_stack.push(str(i), get_distance_to_from(str(i), end_node))
# temper_stack.show_stack()
# if node is visited before
if i in visited_nodes:
# only enqueue if new path/stack is shorter than old path
if temper_stack.total_distance < visited_nodes[i]:
star_queue.enqueue(temper_stack)
visited_nodes[i] = temper_stack.total_distance
# if node is new, enqueue normally
elif i not in visited_nodes:
# enqueue the stack
star_queue.enqueue(temper_stack)
visited_nodes[i] = temper_stack.total_distance
# return assumes a walking speed of 5km/h. first element is time taken in minutes
return [round(star_queue.data[0].total_distance/5000*60), star_queue.data[0].data]
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,253
|
eekhait/1008_Project
|
refs/heads/master
|
/oldfiles/DistanceTabulator.py
|
import pandas as pd
from geopy.distance import geodesic
# Take note:
# Latitude is vertical (How 'north-south' a place is)
# Longitude is horizontal (How 'east-west' a place is)
distance_table = pd.read_csv("Punggol Coordinates.csv", header=None)
# to mimic cell selection in pandas dataframe, for iteration
# iloc[horizontal, vertical]
# iloc[1123, 1119] <--- most bottom right value
# 0,0 0,1 0,2 0,3 0,4...
# postal_code Street Latitude Longitude 820136...
# FOR EVERY ROW... (Skips header row)
for i in range(1, 1191): # 1191
# FOR EVERY COLUMN... (Skips first 4 already-populated columns)
nearbyNodes = []
nearbyDistances = []
for j in range(7, 1197):
# Assign distance between nodes in meters
distance = 1000 * round(geodesic((distance_table.iloc[i, 2], distance_table.iloc[i, 3]),
(distance_table.iloc[j-6, 2], distance_table.iloc[j-6, 3])).km, 3)
distance_table.iloc[i, j] = distance
if 0 < distance < 180:
nearbyNodes.append(str(distance_table.iloc[j-6, 0]))
nearbyDistances.append(int(distance))
distance_table.iloc[i, 5] = str(nearbyNodes)
distance_table.iloc[i, 6] = str(nearbyDistances)
# Prints progress of population per row
print(round(i / 1191 * 100, 2))
# Create new csv
distance_table.to_csv('Complete_Punggol_Graph.csv', header=False, index=False)
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,254
|
eekhait/1008_Project
|
refs/heads/master
|
/oldfiles/LRT_Algorithm.py
|
from collections import deque
import csv
class Graph:
def __init__(self, lists):
self.lists = lists
def get_neighbours(self, i):
return self.lists[i]
import csv
cove_lrt, meridian_lrt, coraledge_lrt, riviera_lrt, kadaloor_lrt, oasis_lrt, damai_lrt, punggol_lrt, samkee_lrt, tecklee_lrt, punggolpoint_lrt, samudera_lrt, nibong_lrt, sumang_lrt, sooteck_lrt = ([] for i in range(15))
with open('LRT.csv') as file:
lrt = list(csv.reader(file))
cove_lrt.append(lrt[1])
meridian_lrt.append(lrt[2])
coraledge_lrt.append(lrt[3])
riviera_lrt.append(lrt[4])
kadaloor_lrt.append(lrt[5])
oasis_lrt.append(lrt[6])
damai_lrt.append(lrt[7])
punggol_lrt.append(lrt[8])
samkee_lrt.append(lrt[9])
tecklee_lrt.append(lrt[10])
punggolpoint_lrt.append(lrt[11])
samudera_lrt.append(lrt[12])
nibong_lrt.append(lrt[13])
sumang_lrt.append(lrt[14])
sooteck_lrt.append(lrt[15])
#heuristic function for all nodes
def heuristic(self, n):
Heuristic = {
'Punggol_MRT': 1,
'SamKee_LRT': 1,
'SooTeck_LRT': 1,
'PunggolPoint_LRT': 1,
'Samudera_LRT': 1,
'Sumang_LRT': 1,
'Nibong_LRT': 1,
'Damai_LRT': 1,
'Kadaloor_LRT': 1,
'Riviera_LRT': 1,
'CoralEdge_LRT': 1,
'Meridian_LRT': 1,
'Oasis_LRT': 1,
'Cove_LRT': 1,
}
return Heuristic[n]
def astar_algorithm(self, start_node, stop_node):
# open_list is a list of nodes which have been visited, but who's neighbors
# haven't all been inspected, starts off with the start node
# closed_list is a list of nodes which have been visited
# and who's neighbors have been inspected
open_list = set([start_node])
closed_list = set([])
# cdist contains current distances from start_node to all other nodes
# the default value (if it's not found in the map) is +infinity
cdist = {}
cdist[start_node] = 0
# parents contains an adjacency map of all nodes
parents = {}
parents[start_node] = start_node
while len(open_list) > 0:
n = None
# find a node with the lowest value of f() - evaluation function
for i in open_list:
if n == None or cdist[i] + self.heuristic(i) < cdist[n] + self.heuristic(n):
n = i;
if n == None:
print('Path does not exist!')
return None
# if the current node is the stop_node
# then we begin reconstructing the path from it to the start_node
if n == stop_node:
reconst_path = []
while parents[n] != n:
reconst_path.append(n)
n = parents[n]
reconst_path.append(start_node)
reconst_path.reverse()
print('Path found: {}'.format(reconst_path))
return reconst_path
# for all neighbors of the current node do
for (m, weight) in self.get_neighbours(n):
# if the current node isn't in both open_list and closed_list
# add it to open_list and note n as it's parent
if m not in open_list and m not in closed_list:
open_list.add(m)
parents[m] = n
cdist[m] = cdist[n] + weight
# otherwise, check if it's quicker to first visit n, then m
# and if it is, update parent data and g data
# and if the node was in the closed_list, move it to open_list
else:
if cdist[m] > cdist[n] + weight:
cdist[m] = cdist[n] + weight
parents[m] = n
if m in closed_list:
closed_list.remove(m)
open_list.add(m)
# remove n from the open_list, and add it to closed_list
# because all of his neighbors were inspected
open_list.remove(n)
closed_list.add(n)
print('Path does not exist!')
return None
lists = {
# LRT on Punggol East
'Punggol_MRT': [('SamKee_LRT', 0.589), ('SooTeck_LRT', 0.605), ('Damai_LRT', 0.690), ('Cove_LRT', 0.763)],
'SamKee_LRT': [('Punggol_MRT', 0.589), ('PunggolPoint_LRT', 0.815)],
'PunggolPoint_LRT': [('SamKee_LRT', 0.815), ('Samudera_LRT', 0.513)],
'Samudera_LRT': [('PunggolPoint_LRT', 0.513), ('Nibong_LRT', 0.493)],
'Nibong_LRT': [('Samudera_LRT', 0.493), ('Sumang_LRT', 0.429)],
'Sumang_LRT': [('Nibong_LRT', 0.429), ('SooTeck_LRT', 0.478)],
'SooTeck_LRT': [('Sumang_LRT', 0.478), ('Punggol_MRT', 0.605)],
# LRT on Punggol West
'Damai_LRT': [('Punggol_MRT', 0.690), ('Oasis_LRT', 0.563)],
'Oasis_LRT': [('Damai_LRT', 0.563), ('Kadaloor_LRT', 0.515)],
'Kadaloor_LRT': [('Oasis_LRT', 0.515), ('Riviera_LRT', 0.558)],
'Riviera_LRT': [('Kadaloor_LRT', 0.558), ('CoralEdge_LRT', 0.386)],
'CoralEdge_LRT': [('Riviera_LRT', 0.386), ('Meridian_LRT', 0.530)],
'Meridian_LRT': [('CoralEdge_LRT', 0.530), ('Cove_LRT', 0.443)],
'Cove_LRT': [('Meridian_LRT', 0.443), ('Punggol_MRT', 0.763)],
}
graph1 = Graph(lists)
graph1.astar_algorithm('Samudera_LRT', 'Riviera_LRT')
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,255
|
eekhait/1008_Project
|
refs/heads/master
|
/map.py
|
import folium
import io
import sys
import main_graph as m_graph
import bus_adj as bus_graph
import lrt_adj as lrt_graph
import pandas as pd
from PyQt5 import QtWidgets, QtWebEngineWidgets
import csv
# ----------------------------------
# THIS PART CONCERNS WITH UI
# EVERYTHING COMES TOGETHER HERE
# ----------------------------------
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
# Create map object, set default location, map theme & zoom
m = folium.Map(location=[1.4046357, 103.9090000], zoom_start=14.5, prefer_canvas=True)
# Global tooltip, hover info
tooltip = 'Click For More Info'
# ASKS FOR INPUT/OUTPUT HERE, EVERYTHING TAKEN IN AS STRING (Irvyn)
def check(start):
with open('Complete_Punggol_Graph.csv', 'rt') as f:
reader = csv.reader(f, delimiter=',')
for row in reader:
if start == row[0] or start == row[1]:
location.append(row[0])
name.append(row[1])
return location, name
def confirmation(msg):
while True:
answer = input(msg).upper()
if answer in ('Y', 'N'):
return answer
else:
print('Not a valid input, please try again')
def transportation(tp):
while True:
mode = input(tp).upper()
if mode in ('L', 'B', 'W', 'M'):
return mode
else:
print('Not a valid input, please try again')
def show_walks():
# Used to create a file to show graph of connectivity
# This one is just nodes that are of walkable distance
marked = []
for i in range(1, len(m_graph.mainGraph[0])):
for j in list(m_graph.mainGraph.at[i, 6].split(", ")):
# print(m_graph.mainGraph[0][i], j)
if [m_graph.mainGraph[0][i], j] not in marked and [j, m_graph.mainGraph[0][i]] not in marked:
coords_to_add = [m_graph.get_long_lat(m_graph.mainGraph[0][i]), m_graph.get_long_lat(j)]
# print(coords_to_add)
marked.append(coords_to_add)
folium.PolyLine(coords_to_add, color="grey", opacity=0.5, weight=0.5).add_to(m)
m.save("walks.html")
def show_lrts():
# Used to create a file to show graph of connectivity
# This one is just the LRTs, and what blocks are 'connected' to them
marked = []
# Buildings and their closest LRTs
for i in range(1, len(m_graph.mainGraph[0])-14):
closest_lrt = m_graph.get_nearest_lrt(m_graph.mainGraph[0][i])
if [m_graph.mainGraph[0][i], closest_lrt] not in marked and [closest_lrt, m_graph.mainGraph[0][i]] not in marked:
coords_to_add = [m_graph.get_long_lat(m_graph.mainGraph[0][i]), m_graph.get_long_lat(closest_lrt)]
marked.append(coords_to_add)
folium.PolyLine(coords_to_add, color="grey", opacity=1, weight=1).add_to(m)
# Markers for LRTs
marked = []
marked2 = []
with open('Punggol_LRT_Routing.csv', 'rt') as lrt:
reader = csv.reader(lrt, delimiter=',')
next(reader)
for row in reader:
for i in row[2].split(", "): # connected nodes
if [row, i] not in marked and [i, row] not in marked:
# print(row[0], i)
coords_to_add = [m_graph.get_long_lat(row[0]), m_graph.get_long_lat(i)]
marked.append(coords_to_add)
folium.PolyLine(coords_to_add, color="purple").add_to(m)
if coords_to_add not in marked2:
folium.Marker(coords_to_add[0],
icon=folium.Icon(color="purple", icon="train", prefix='fa'),
popup=i, tooltip=row[0]).add_to(m)
marked2.append(coords_to_add[0])
# Edges between LRTs
marked = []
m.save("lrts.html")
def show_buses():
b_graph = pd.read_csv('Punggol_Bus_Routing_Type2.csv', sep=',')
b_graph["ServiceNo"] = b_graph["ServiceNo"].apply(str) # converts column to be string-only (rather than int+str)
b_graph["NextStop"] = b_graph["NextStop"].apply(str) # converts column to be string-only (rather than int+str)
marked = []
for i in range(0,len(b_graph["ServiceNo"])):
longlats = [m_graph.get_long_lat(b_graph.at[i, "BusStopCode"]), m_graph.get_long_lat(b_graph.at[i, "NextStop"])]
# add marker (latlong)
if b_graph.at[i, "BusStopCode"] not in marked:
folium.Marker(m_graph.get_long_lat(b_graph.at[i, "BusStopCode"]),
icon=folium.Icon(color="green", icon="bus", prefix='fa'),popup="",tooltip="").add_to(m)
marked.append(i)
# add edge (longlat)
folium.PolyLine(longlats, color="green", weight=2, opacity=0.75).add_to(m)
m.save("buses.html")
# show_walks()
# show_lrts()
# show_buses()
print("\nWelcome to Punggol Pathfinder")
print("Valid inputs are: \033[1m Postal codes, bus stop numbers, train station names, train station codes. \033[0m")
while True:
name = []
# User start and end code will be stored in here
location = []
# User choosen mode will stored in here
mode = []
result_path = []
# Prompt user for start and destination point
start = input("\nWhere are you coming from?\n")
end = input("Where is your destination?\n")
check(start)
check(end)
# Calls function to check if input is valid by comparing with CSV
if len(location) != 2:
print("Location not valid, please try again\n")
continue
else:
sp = name[0]
ep = name[1]
if sp:
print("Start location: ", sp)
else:
print("Start location: ", start)
if ep:
print("Destination: ", ep)
else:
print("Destination: ", end)
answer = confirmation("\nConfirm start location and destination? [Y/N] \n")
if answer == 'N':
print("Let\'s try again")
elif answer == 'Y':
mode = transportation("Select mode of transport: LRT (L), Bus (B), Walk (W), or Mixed (M)\n")
if mode == 'L':
# Call Lrt algorithm here
result_path = lrt_graph.take_lrt(location[0], location[1])
print("Time taken:", result_path[0], "mins")
print("Take LRT from")
for i in range(0, len(result_path[1])):
print(result_path[1][i])
if len(result_path[1]) - 1 != i:
print("to")
elif mode == 'B':
# Call Bus algorithm here
result_path = bus_graph.route_finder(location[0], location[1])
print("Time taken:", result_path[0], "mins")
print("From")
for i in range(0, len(result_path[1])):
print(result_path[1][i])
if (result_path[2][i]) == True:
print("Take bus", result_path[3], "to ")
else:
if len(result_path[1]) - 1 != i:
print("Walk to")
elif mode == 'W':
# Call Walk algorithm here
result_path = m_graph.take_walk(location[0], location[1])
print("Time taken:", result_path[0], "mins")
print("Walk from")
for i in range(0, len(result_path[1])):
print(result_path[1][i])
if len(result_path[1]) - 1 != i:
print("to")
elif mode == 'M':
# Call Mixed algorithm here
print("Option not implemented. Please try again with a different options")
sys.exit()
break
# (khai)
# THIS PART IS WHERE THE MAP GETS POPULATED WITH NODES AND EDGES ---------------------------------------------
# Adding of markers and edges for Single Transport Routes
def singleTransportPlot(paths, markerColor, lineColor, markerIcon):
marker_coords = []
edge_coords = []
for i in paths:
# this loop creates a list of coordinates to add markers/nodes with
marker_coords.append(m_graph.get_lat_long(i))
edge_coords.append(m_graph.get_long_lat(i))
for i in range(0, len(marker_coords)):
folium.Marker([marker_coords[i][1], marker_coords[i][0]],
icon=folium.Icon(color=markerColor, icon=markerIcon, prefix='fa'), popup=i,
tooltip=result_path[1][i]).add_to(m)
folium.PolyLine(edge_coords, color=lineColor).add_to(m)
# Set icon for different transportation types
def iconMaker(length):
if length == 3:
return "train"
elif (length == 5):
return "bus"
elif length == 6:
return "building"
# Set color based on transportation type
def setColor(length):
if length == 3:
return "purple"
elif (length == 5):
return "green"
elif length == 6:
return "gray"
# Set route based on different transport
def routePlotting(MOT, paths):
changes_Indicator = 0
if (MOT == "L"):
marker_coords = []
edge_coords = []
for i in range(0, len(paths[1])):
marker_coords.append(m_graph.get_lat_long(paths[1][i]))
current_node = paths[1][i]
if i+1 < len(paths[1]):
next_node = paths[1][i+1]
edge_coords.append(m_graph.get_long_lat(current_node))
if len(current_node) == 3 and len(next_node) == 3:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color="purple").add_to(m)
edge_coords = []
else:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color="grey").add_to(m)
edge_coords = []
elif (MOT == "B"):
marker_coords = []
edge_coords = []
for i in range(0, len(paths[1])):
marker_coords.append(m_graph.get_lat_long(paths[1][i]))
current_node = paths[1][i]
if i+1 < len(paths[1]):
next_node = paths[1][i+1]
edge_coords.append(m_graph.get_long_lat(current_node))
if len(current_node) == 5 and len(next_node) == 5:
if paths[2][i] == True:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color="green").add_to(m)
edge_coords = []
else:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color="grey").add_to(m)
edge_coords = []
else:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color=setColor(len(current_node)),icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color="grey").add_to(m)
edge_coords = []
elif (MOT == "W"):
singleTransportPlot(paths[1], "gray", "grey", "building")
elif (MOT == "M"):
marker_coords = []
edge_coords = []
changes_Indicator = 0
for i in range(0, len(paths[1])):
marker_coords.append(m_graph.get_lat_long(paths[1][i]))
current_node = paths[1][i]
if i+1 < len(paths[1]):
next_node = paths[1][i+1]
edge_coords.append(m_graph.get_long_lat(current_node))
if len(current_node) == len(next_node):
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color="darkred",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color=setColor(len(current_node))).add_to(m)
edge_coords = []
elif len(current_node) != len(next_node):
if changes_Indicator == 1:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color="darkred",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color=setColor(len(next_node))).add_to(m)
edge_coords = []
changes_Indicator -= 1
else:
folium.Marker([marker_coords[i][1], marker_coords[i][0]], icon=folium.Icon(color="darkred",icon=iconMaker(len(current_node)), prefix='fa'), popup=i, tooltip=result_path[1][i]).add_to(m)
edge_coords.append(m_graph.get_long_lat(next_node))
folium.PolyLine(edge_coords, color=setColor(len(current_node))).add_to(m)
edge_coords = []
changes_Indicator +=1
# Call Set routes and pass in mode of transport and routes
# Sample Input: [Coming From: PE1], [Coming From: ]
routePlotting(mode, result_path)
# Initialization of the map
data = io.BytesIO() # creates a temporary 'container' for html code
m.save(data, close_file=False) # folium html code is saved inside data variable
w = QtWebEngineWidgets.QWebEngineView() # then the rest of the code is the map running
w.setHtml(data.getvalue().decode())
w.resize(840, 680)
w.show()
sys.exit(app.exec_())
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,256
|
eekhait/1008_Project
|
refs/heads/master
|
/bus_adj.py
|
import pandas as pd
import main_graph as m_graph
import csv
import sys
import math
import numpy as np
# Columns: 0-Code, ....... pending
busData = pd.read_csv('Punggol_Bus_Routing.csv', sep=',', header=None)
# Columns: 0-Code, ....... pending
busData2 = pd.read_csv('Punggol_Bus_Routing_Type2.csv', sep=',',header=None)
punggol = pd.read_csv('Complete_Punggol_Graph.csv',sep=',',header=None)
punggol1 = pd.read_csv('Punggol_complete_graph2.csv',sep=',',header=None)
bus_speed = 50000/60
bus_waiting_time = 5
'''
Test Cases
start = "65141"
end = "65339"
new_start = "828858"
new_end = "821266"
'''
def busStopCode1(data):
start = (punggol[0] == data)
return start
def busStopCode2(data):
start = punggol1[0] == data
return start
def connected(data):
connected1 = punggol[busStopCode1(data)]
if connected1.empty is True:
connected1 = punggol1[busStopCode2(data)]
hg = []
test = pd.DataFrame(connected1[6].str.split(',').tolist())
test1 = pd.DataFrame((connected1[7].str.split(',').tolist()))
if test.empty == True:
print("no such route For Buses")
sys.exit()
if test1.empty == True:
print ("no such route For buses")
sys.exit()
ht =[]
if len(data) == 5:
ht.append(int(data))
# print(int(test1[0].values))
try:
niii = max(test1.columns.values)
except ValueError:
niii = (test1.columns.values)
for i in test.iterrows():
for k in range (0, niii):
if int(test1[k].values) <=200:
#for connected nodes and distance
hg.append(((int(test[k].values)),(int(test1[k].values))))
#just for connected nodes
ht.append(int((test[k].values)))
return ht
# For finding starting bus Stop( See csv for column 1 and compare to check for bus stop code)
def busStopCode(data):
startStop = busData2[1] == data
return startStop
# For finding starting bus Stop( See csv for column 2 and compare to check for bus stop code)
def endStopCode(data):
endStop = busData2[2] == data
return endStop
def busNoInserter(data):
busNo = busData2[0] == data
return busNo
#For finding the starting point of the bus
def busStopCode_startfinder(data):
length = len(data)
new_array =[]
isa =0
for i in range(0,length):
test_test = busStopCode(str(data[i]))
test_test1 = busData2[test_test]
if test_test1.empty == False:
new_array.append(test_test1)
return new_array
#For findin the ending point of the bus
def busStopCode_endfinder(data):
length = len(data)
new_array =[]
isa =0
for i in range(0,length):
test_test = endStopCode(str(data[i]))
test_test1 = busData2[test_test]
if test_test1.empty == False:
new_array.append(test_test1)
return new_array
# Checking the routes taken by the buses to see if there is a route to the ending bus stop.
def take_bus(start_node, end_node,data):
bus_route = (busNoInserter(data)) & ((busStopCode(start_node) | endStopCode(end_node)))
asd =[]
asd.append(start_node)
bus_distance = 0
lol = np.int64(0)
lol1 = np.int64(0)
#bus_route = (bus_route[0]) >= 1 & (bus_route[0] <=3)
route = busData2[bus_route]
if len(route) < 2:
pass
else:
if route.empty == True:
pass
else:
lol = route.index.values[0]
try:
lol1= route.index.values[1]
except IndexError:
lol1 = lol
for i in range (lol,lol1+1):
if busData2.at[lol,6] != busData2.at[lol1,6]:
pass
else:
bus_distance += int(busData2.at[i,3])
asd.append(busData2.at[i,2])
if len(asd) < 2:
asd = []
return None
return (data,asd, math.ceil(bus_distance/bus_speed + bus_waiting_time + (lol1-lol)))
#For appending all the routes that could be taken and return the one with the least time
def route_finder(new_start, new_end):
starting = busStopCode_startfinder(connected(new_start))
ending = busStopCode_endfinder(connected(new_end))
str1 = ' '
str2 = ' '
k = []
n = []
for i in range (0,len(starting)):
bus_to_take = starting[i][0].values
asd = (starting[i][1].values)
#bus_to_take , indices = np.unique(asd,return_counts=True)
for l in bus_to_take:
try:
a ,indices= np.unique((starting[i][1].values),return_counts=True)
b, indices = np.unique((ending[i][2].values),return_counts= True)
str1 = str1.join(a)
str2 = str2.join(b)
if take_bus(str1,str2,l) is None:
pass
else:
p = list(take_bus(str1,str2,l))
n.append((take_bus(str1,str2,l))[2])
k.append(p)
except IndexError:
"Do Nothing"
df = pd.DataFrame(k)
if df.empty == True:
print("No common bus nearby start and end points. Please restart with another option. ")
sys.exit()
route = df[2] == min(n)
optimised_route = df[route]
optimised_route[0], optimised_route[2] = optimised_route[2], optimised_route[0]
pop = optimised_route.head(1)
first_route = []
lol = pd.DataFrame(pop[1].tolist())
starting_walk = m_graph.take_walk(new_start,lol[0].values[0])
lemon =[]
if ((starting_walk[0]) == 0):
pass
else:
first_route=starting_walk[1]
first_route.pop(len(first_route)-1)
for i in range(1,len(starting_walk)):
lemon.append(False)
for i in range (0,len(lol)):
for l in lol:
first_route.append((lol[l][i]))
if l == 0:
pass
else:
lemon.append(True)
length = max(lol)
Last_Point = lol[length].values[0]
ending_walk = m_graph.take_walk(Last_Point, new_end)
if len(ending_walk) <= 2:
end_route = ending_walk[1]
# print(end_route)
first_route.append(end_route[0])
end_route.pop(0)
first_route.append(end_route[0])
lemon.append(False)
else:
new = np.array(ending_walk[1])
counter = 1
for i in range(1, len(new)):
first_route.append(new[counter])
lemon.append(False)
counter = counter + 1
lemon.append(False)
k = []
# all route here
for i, l in optimised_route.iterrows():
k.append((l[0], l[1], l[2]))
route = []
test1 = pop
m = test1.index.values[0]
route.append(test1[0][m]) # time taken is fine
route[0] += starting_walk[0]
route[0] += ending_walk[0]
# print("first_route:", first_route)
route.append(first_route)
route.append(lemon) # lemon is fine
# print("")
route.append(test1[2][m]) # bus number is fine
return (route)
# print("BUS ROUTE: ", route_finder("828858","65009"))
|
{"/lrt_adj.py": ["/main_graph.py"], "/map.py": ["/main_graph.py", "/bus_adj.py", "/lrt_adj.py"], "/bus_adj.py": ["/main_graph.py"]}
|
8,266
|
owenvvv/Steam_helper
|
refs/heads/master
|
/steam-scraper/test.py
|
from scrapy.loader.processors import Compose, Join, MapCompose, TakeFirst
import pandas as pd
"""
pipi = Compose(lambda x: x[0], str.upper)
print(pipi(['iss', 'nus', 'mtech', 'ebac']))
pipi = MapCompose(lambda x: x[0], str.upper)
print(pipi(['iss', 'nus', 'mtech', 'ebac']))
"""
steam_id = pd.read_csv("D:\\NUS BA\\class\\nlp\\Project\\steam-scraper-master\\steam\\spiders\\steam_id.csv", header=None)
steam_id = list(steam_id.iloc[:,0])
print(len(steam_id))
print(len(list(set(steam_id))))
#steam_id .to_csv("steam_id.csv",header=False,index=False)
|
{"/main.py": ["/intention.py"], "/intention.py": ["/slotfiller.py", "/recommendegine.py"]}
|
8,267
|
owenvvv/Steam_helper
|
refs/heads/master
|
/recommendegine.py
|
import pickle as pk
import pandas as pd
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
from scipy.spatial.distance import cosine
import torch
mystopwords = stopwords.words("English") + ['game', 'play', 'steam']
WNlemma = nltk.WordNetLemmatizer()
nn = ['NN', 'NNS', 'NNP', 'NNPS', 'CD']
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
model.eval()
Doc2vec = pk.load(open('./data/des2vec.pkl', 'rb'))
Aspect = pd.read_csv('./data/Ratewithaspect.csv', index_col=0)
Aspect = Aspect.reset_index()
TagSmall = pd.read_csv('./data/Tagsmall.csv')
Datasmall = pd.read_csv('./data/steam_small.csv', index_col=0)
descrip1 = pk.load(open('./data/short_descrip.pkl', 'rb'))
keywords = pd.read_excel('./data/keywords.xlsx')
keywords_class = {'Gameplay': list(keywords[keywords['Gameplay'].isnull() == False]['Gameplay']),
'Market': list(keywords[keywords['Market'].isnull() == False]['Market']),
'Narrative': list(keywords[keywords['Narrative'].isnull() == False]['Narrative']),
'Social': list(keywords[keywords['Social'].isnull() == False]['Social']),
'Graphics': list(keywords[keywords['Graphics'].isnull() == False]['Graphics']),
'Technical': list(keywords[keywords['Technical'].isnull() == False]['Technical']),
'Audio': list(keywords[keywords['Audio'].isnull() == False]['Audio']),
'Content': list(keywords[keywords['Content'].isnull() == False]['Content'])}
Tagnames = []
Datasmall['avgscore'] = Datasmall.apply(
lambda row: row.positive_ratings / (row.positive_ratings + row.negative_ratings), axis=1)
applist = Datasmall['appid']
for tag in list(TagSmall.columns):
Tagnames.append(tag.replace('_', ' '))
def recommend(query, tags):
query = query.lower()
#print(query)
selectaspect = []
for key in keywords_class.keys():
for word in keywords_class[key]:
if word.lower() in query.split(' '):
selectaspect.append(key)
print(key)
genre = tags.get('genre')
for g in genre:
query=query + ' '+ str(g)
characters = tags.get('characters')
for c in characters:
query = query + ' '+ str(c)
print(query)
selecttag = []
for tags in Tagnames:
if tags in query:
selecttag.append(tags)
print(tags)
status = []
finalids = applist
if len(selecttag) > 0:
for tag in selecttag:
finalids = TagSmall[(TagSmall[tag.replace(' ', '_')] > 5) & (TagSmall['appid'].isin(finalids))]['appid']
else:
finalids = []
# 1 dont have aspect
# 2 have aspect
# 3 dont match
if len(finalids) > 5:
if len(selectaspect) == 0:
status.append(1)
status.append(selecttag[0])
return list(
Datasmall[Datasmall['appid'].isin(finalids)].sort_values('avgscore', ascending=False)['appid'][
0:5]), status
else:
status.append(2)
status.append(selecttag[0])
status.append(selectaspect[0])
return list(
Aspect[Aspect['gameid'].isin(finalids)].sort_values(selectaspect[0], ascending=False)['gameid'][
0:5]), status
else:
status.append(3)
gameids = recomend_by_keyword(demand=query, dataframe=descrip1, n=5)
if gameids!= '':
return gameids,status
return list(recomend_by_description(demand=query, dataframe=Doc2vec, n=5)), status
def recomend_by_description(demand, dataframe, n):
print('use similar result')
marked_text = "[CLS] " + demand + " [SEP]"
tokenized_text = tokenizer.tokenize(marked_text)
if len(tokenized_text) > 512:
tokenized_text = tokenized_text[:512]
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
segments_ids = [1] * len(tokenized_text)
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
token_vecs = encoded_layers[11][0]
sentence_embedding = torch.mean(token_vecs, dim=0)
cos = []
for i in range(len(dataframe)):
tmp = cosine(sentence_embedding, dataframe.iloc[i][1])
cos.append(tmp)
dataframe['cos'] = cos
dataframe.sort_values(by=['cos'], inplace=True, ascending=False, )
return dataframe[:n]['appid'].values
def pre_process(text):
try:
tokens = nltk.word_tokenize(text)
tokens = [t[0] for t in pos_tag(tokens) if t[1] in nn]
tokens = [WNlemma.lemmatize(t.lower()) for t in tokens]
tokens = [t for t in tokens if t not in mystopwords]
return tokens
except Exception:
return ('')
def recomend_by_keyword(demand, dataframe, n):
demand = list(set(pre_process(demand)))
nums = []
for i in range(len(dataframe)):
num = 0
for j in range(len(demand)):
num += dataframe.iloc[i][1].count(demand[j])
nums.append(num)
dataframe['nums'] = nums
dataframe.sort_values(by=['nums'], ascending=False, inplace=True)
if dataframe.iloc[n]['nums'] != 0:
return list(dataframe[:n]['appid'])
else:
return ''
|
{"/main.py": ["/intention.py"], "/intention.py": ["/slotfiller.py", "/recommendegine.py"]}
|
8,268
|
owenvvv/Steam_helper
|
refs/heads/master
|
/main.py
|
import json
from flask import Flask, request,render_template
from geventwebsocket.handler import WebSocketHandler
from gevent.pywsgi import WSGIServer
import intention
helper_session = []
app = Flask(__name__)
@app.route('/msg')
def msg():
global helper_session
#
user_socker = request.environ.get('wsgi.websocket')
#
while 1:
#
msg = user_socker.receive()
result={}
result['message']=msg
print(msg)
r_text, new_session = intention.response(result, helper_session)
# If only one sentence return, change it into a list.
r_text_return=[]
if not isinstance(r_text, list):
r_text_return.append(r_text)
else:
r_text_return=r_text
helper_session.extend(new_session)
# Packed in a dict
res = {"msg" : r_text_return}
# Sent to client
user_socker.send(json.dumps(res))
if __name__ == '__main__':
http_server = WSGIServer(('127.0.0.1', 5000), app, handler_class=WebSocketHandler)
# Start Listening:
http_server.serve_forever()
|
{"/main.py": ["/intention.py"], "/intention.py": ["/slotfiller.py", "/recommendegine.py"]}
|
8,269
|
owenvvv/Steam_helper
|
refs/heads/master
|
/slotfiller.py
|
import pycrfsuite
import en_core_web_sm
import nltk
wnl = nltk.WordNetLemmatizer()
nlp = en_core_web_sm.load()
def input_prep(text):
data_List = []
for sequence in text:
wordList=[]
posList=[]
tagList = []
sentlist=[]
text = sequence.strip().lower()
tokens = nltk.word_tokenize(text)
tokens = [wnl.lemmatize(t.lower(), pos='v') for t in tokens]
text = " ".join(tokens)
tokenList = text.split()
for tok in tokenList:
wordList.append(tok)
tagList.append('O')
sent = ' '.join(wordList)
sent_nlp = nlp(sent) #POS tag
for token in sent_nlp:
posList.append(token.tag_) #retrieve tag
for idx,word in enumerate(wordList):
sentlist.append((word,posList[idx],tagList[idx]))
data_List.append(sentlist)
return data_List
def word2features(sent, i): # function to create feature vector to represent each word
word = sent[i][0]
postag = sent[i][1]
features = [ # for all words
'bias',
'word.lower=' + word.lower(),
# 'word[-3:]=' + word[-3:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
'postag=' + postag,
'postag[:2]=' + postag[:2], # what is the POS tag for the next 2 word token
]
if i > 0: # if not <S>
word1 = sent[i - 1][0]
postag1 = sent[i - 1][1]
features.extend([
'-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(),
'-1:word.isdigit=%s' % word1.isdigit(),
'-1:postag=' + postag1,
'-1:postag[:2]=' + postag1[:2],
])
else:
features.append('BOS') # beginning of statement
if i < len(sent) - 1: # if not <\S>
word1 = sent[i + 1][0]
postag1 = sent[i + 1][1]
features.extend([
'+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(),
'+1:word.isdigit=%s' % word1.isdigit(),
'+1:postag=' + postag1,
'+1:postag[:2]=' + postag1[:2],
])
else:
features.append('EOS')
return features
def sent2features(sent):
return [word2features(sent, i) for i in range(len(sent))]
def sent2labels(sent):
return [label for token, postag, label in sent]
def sent2tokens(sent):
return [token for token, postag, label in sent]
def extract(text):
tagger = pycrfsuite.Tagger()
tagger.open('model/recommend_game.crfsuite')
text_split = text.replace(' and', '.').split('.')
sentence = input_prep(text_split)
features = [sent2features(s) for s in sentence]
tagList = [tagger.tag(s) for s in features]
print(tagList)
for idx_sent, sent in enumerate(tagList):
for idx_word, word in enumerate(sent):
if word != 'O':
words = sentence[idx_sent][idx_word]
words_new = (words[0], words[2], word)
sentence[idx_sent][idx_word] = words_new
#print(sentence)
ratingList = []
genreList = []
priceList = []
ageList = []
characterList = []
for idx_sent, sent in enumerate(sentence):
for idx_word, word in enumerate(sent):
if 'genre' in word[2]:
genreList.append(word[0])
elif 'age' in word[2]:
if word[0].isdigit():
ageList.append(word[0])
elif 'price' in word[2]:
if 'free' in word[0]:
priceList.append('0')
else:
if word[0].replace('$','').isdigit():
priceList.append(word[0].replace('$',''))
elif 'rating' in word[2]:
ratingList.append(word[0])
elif 'character' in word[2]:
characterList.append(word[0])
entitylist = {'genre': genreList, 'age': ageList, 'price': priceList, 'rating': ratingList, 'characters': characterList}
#print(f"entitylist: {entitylist}")
return sentence, entitylist
|
{"/main.py": ["/intention.py"], "/intention.py": ["/slotfiller.py", "/recommendegine.py"]}
|
8,270
|
owenvvv/Steam_helper
|
refs/heads/master
|
/intention.py
|
import pandas as pd
import pickle as pk
import re
import random
from nltk.tokenize import word_tokenize, sent_tokenize
import slotfiller as sf
import nltk
wnl = nltk.WordNetLemmatizer()
from nltk.corpus import stopwords
mystopwords = stopwords.words("english")
import recommendegine
model_filename = 'model/intent_SGDClassifier_v2.pkl'
classifier_probability_threshold = 0.35
price_words = ['cheap', 'cheaper', 'cheapest']
other_words = ['other', 'another', 'different']
intent_enc = {
'commonQ.assist': 0,
'commonQ.how': 1,
'commonQ.name': 2,
'commonQ.wait': 3,
'recommend.game': 4,
'game.age': 5,
'game.price': 6,
'response.abusive': 7,
'response.negative': 8,
'response.incorrect': 9,
'game.release_date': 10,
'game.platforms"': 11,
'response.positive': 12,
'game.details': 13
}
intent_dec = {
-1: 'unknown',
0: 'commonQ.assist',
1: 'commonQ.how',
2: 'commonQ.name',
3: 'commonQ.wait',
4: 'recommend.game',
5: 'game.age',
6: 'game.price',
7: 'response.abusive',
8: 'response.negative',
9: 'response.incorrect',
10: 'game.release_date',
11: 'game.platforms',
12: 'response.positive',
13: 'game.details'
}
gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
def retrieve_last_session(session):
last_session = ''
if len(session) > 0:
last_session = session[len(session) - 1] # retrieve last session details
return last_session
def clean_text(text, lemma=True):
text = re.sub(r"what's", "what is ", text)
text = re.sub(r"\'s", " ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", "can not ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"i'm", "i am ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r"\'scuse", " excuse ", text)
text = re.sub('"', '', text)
text = re.sub('\<br \/\>', '', text)
text = re.sub('etc.', 'etc', text)
# text = re.sub('\W', ' ', text)
text = re.sub('\s+', ' ', text)
text = re.sub('\<br\>', ' ', text)
text = re.sub('\<strong\>', '', text)
text = re.sub('\<\/strong\>', '', text)
text = text.strip(' ')
if lemma:
tokens = word_tokenize(text)
tokens = [wnl.lemmatize(t.lower(), pos='v') for t in tokens]
text = " ".join(tokens)
return text
def detect_intent(query):
text = [str(query['message'])]
queryDF = pd.DataFrame(text, columns=['Query'])
# Load trained Intent Detection Model
intent_model = pk.load(open(model_filename, 'rb'))
result = intent_model.predict(queryDF.Query)
result_proba = intent_model.predict_proba(queryDF.Query)
classes = list(intent_model.classes_)
class_proba = result_proba[0][classes.index(result[0])]
# print(f"intent: {result[0]}; probability: {class_proba}")
if result[0] == 4:
if class_proba >= classifier_probability_threshold:
intent = result[0]
else:
intent = -1
else:
intent = result[0]
return intent
def response(query, helper_session):
name_part1 = ['Hi, my name is Stella.', 'Hello, my name is Stella.']
wait_part1 = ['Sure!', 'Of course!', 'No problem!', 'Okay.']
wait_part2 = ['I will wait for you.', 'Whenever you are ready.', 'Write back when you are ready.',
'Just write back when you are ready.']
assist_part1 = ['How can I help you?', 'What can I do for you today?', 'How can I assist you?',
'Do you need help finding games?', 'Would you like me to recommend you a game?']
hru = ['Feeling great!', 'I am feeling awesome.', 'Feeling Good!', 'I am doing great']
recmd_part1 = ['I found this game - ', 'You might be interested in this game - ',
'I can suggest this game - ', 'Maybe you will be interested in - ']
recmd_part2 = ['I found this game about your requirement on <<reason>> -']
recmd_part3 = ['You may like this <<genre>> game which is good on its <<aspect>> aspect -']
recmd_part4 = ['I found this game - ',
'I would recommend the game because you like <<genre>> game - ']
abusive_resp = ['Please refrain from using such language',
'Let''s be nice to each other and refrain from using such strong words']
negative_part1 = ['I am sorry.', 'My apologise.']
negative_part2 = ['Can you tell me what is wrong?', 'What did I get wrong?', 'How can I correct myself?',
'How can I fix this?']
price_part1 = ['The price of the game is $<<price>>', 'It costs $<<price>>', '$<<price>>']
ask4more = ['Is there anything else you would like to know?',
'Would you like me to know more details about this game?']
age_part1 = ['This game is suitable for gamers age above <<age>> years old',
'This is suitable for gamers age <<age>> and above', 'This is for gamers above <<age>> years old.']
date_part = ['The release date is <<release_date>>', 'It was released on <<release_date>>', '<<release_date>>']
platform_part = ['This game supports <<platform>>', 'You can play the game on <<platform>>']
positive_resp = ['You are welcome :)']
unknown_part1 = ['Unfortunately,', 'Sorry,', 'Pardon me,']
unknown_part2 = ['I did not understand.', 'I did not get it.']
unknown_part3 = ['Can you repeat?', 'Can we try again?', 'Can you say it again?']
last_session = retrieve_last_session(helper_session) # retrieve the last session details
session_tags = {}
session_game = {}
session = {}
game = {}
resp_text = ''
genre = ''
if last_session != '':
if last_session.get("tags") is not None:
session_tags.update(last_session['tags'])
if last_session.get("game") is not None:
session_game.update(last_session['game'])
query_words = str(query['message']).lower().split(' ')
yeswords = ['yes', 'ok', 'sure']
if 'yes' in query_words or 'ok' in query_words or 'sure' in query_words:
last_intent = last_session['intent']
intent = last_intent
session.update(last_session)
if last_intent == 'commonQ.assist':
resp_text = 'What kind of games are you looking for? Any particular genre or price?'
elif last_intent == 'recommend.game':
session.update({'intent': 'game.details'})
game = last_session['game']
resp_text = f"{game['Title']} is released on {game['release']} by {game['publisher']}."
if game['Price'] == 0:
resp_text = resp_text + " It is free to play and "
else:
resp_text = resp_text + f" It costs ${game['Price']} and "
if game['Age'] == '0':
resp_text = resp_text + " suitable for all ages."
elif game['Age'] < 12:
resp_text = resp_text + f" suitable for kids age {game['Age']} and above."
else:
resp_text = resp_text + f" suitable for teenager age {game['Age']} and above."
resp_temp = resp_text
resp_text = []
resp_text.append(resp_temp)
resp_text.append('Would you like me to recommend you other similar games?')
elif last_intent == 'game.details':
try:
session.update({'intent': 'recommend.game'})
last_gameid = last_session['game']
# print(last_gameid)
gameids = last_session.get('gameids')
print(gameids)
gameids.remove(last_gameid['id'])
gameid = random.choice(gameids)
gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameImage = extract_game_summ(
gameid)
resp_text = []
resp_text.append(random.choice(recmd_part1) + gameTitle + '.')
resp_text.append(f'<img src="{gameImage}" target="_blank" style="width:100%">' + gameSummary)
resp_text.append(f'<a href="{gameURL}" target="_blank">{gameURL}</a>')
resp_text.append(random.choice(ask4more))
game = {'id': gameid, 'Title': gameTitle, 'URL': gameURL, 'Price': gamePrice, 'Age': gameAge,
'release': gameRelease, 'platform': gamePlatform, 'publisher': gamePublisher}
session.update({'game': game})
except Exception as e:
resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3)
else:
resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3)
else:
intent = intent_dec[detect_intent(query)]
print(intent)
session = {'intent': intent, 'query': str(query['message'])}
session.update({'tags': session_tags})
session.update({'game': session_game})
if intent == 'commonQ.how':
resp_text = random.choice(hru)
elif intent == 'commonQ.assist':
resp_text = random.choice(assist_part1)
elif intent == 'commonQ.wait':
resp_text = random.choice(wait_part1) + ' ' + random.choice(wait_part2)
elif intent == 'commonQ.name':
resp_text = random.choice(name_part1) + ' ' + random.choice(assist_part1)
elif intent == 'recommend.game':
sent_tag, tags = sf.extract(str(query['message']))
# manual set gameid for testing purpose. Remove once recommendation model is available
# tags = {'genre':[], 'price':[], 'age':[], 'rating':[]}
print(tags)
if tags.get('genre') is not None:
if tags['genre'] != '':
genre = ' and '.join(str(x) for x in tags['genre'])
for tags_word in tags['genre']:
if tags_word == 'cheaper':
price = session_game['Price']
tags.update({'price': [str(price)]})
new_tags = update_tags(tags, session_tags)
print(f"new tags: {new_tags}")
session.update({'tags': new_tags})
gameids, status = recommend_game(str(query['message']), tags)
session.update({'gameids': gameids})
resp_text = []
if len(gameids) == 0:
gameids = random.sample(list(gamesDF['appid']), 5)
status[0] = 0 # random result
gameid = random.choice(gameids)
gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameImage = extract_game_summ(
gameid)
if status[0] == 1:
print(status[1])
resp_text.append((random.choice(recmd_part4)).replace('<<genre>>', status[1]) + gameTitle + '.')
elif status[0] == -1:
resp_text.append((random.choice(recmd_part2)).replace('<<reason>>', status[1]) + gameTitle + '.')
elif status[0] == 2:
resp_text.append((random.choice(recmd_part3)).replace('<<genre>>', status[1]).replace('<<aspect>>',
status[
2]) + gameTitle + '.')
else:
resp_text.append((random.choice(recmd_part1)) + gameTitle + '.')
resp_text.append((f'<img src="{gameImage}" target="_blank" style="width:100%">' + gameSummary))
resp_text.append(f'<a href="{gameURL}" target="_blank">{gameURL}</a>')
resp_text.append(random.choice(ask4more))
game = {'id': gameid, 'Title': gameTitle, 'URL': gameURL, 'Price': gamePrice, 'Age': gameAge,
'release': gameRelease, 'platform': gamePlatform, 'publisher': gamePublisher}
session.update({'game': game})
elif intent == 'game.age':
resp_text = []
if session_game != '':
age = extract_game_age(session_game['id'])
# print(age)
resp_text.append((random.choice(age_part1)).replace('<<age>>', str(age)))
else:
resp_text.append(
random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3))
resp_text.append(random.choice(ask4more))
elif intent == 'game.price':
resp_text = []
if session_game != '':
price = extract_game_price(session_game['id'])
if price == 0.0:
resp_text.append('This is a free to play game.')
else:
resp_text.append((random.choice(price_part1)).replace('<<price>>', str(price)))
else:
resp_text.append(
random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3))
resp_text.append(random.choice(ask4more))
elif intent == 'response.abusive':
resp_text = random.choice(abusive_resp)
elif intent == 'response.negative':
resp_text = random.choice(negative_part1) + ' ' + random.choice(negative_part2)
elif intent == 'response.incorrect':
last_intent = last_session['intent']
last_query = last_session['query']
if last_intent == 'response.incorrect' and 'no' in last_query.lower() and 'no' in str(query['message']):
resp_text = 'Thank you for using Steam Helper. Have a nice day'
else:
resp_text = random.choice(assist_part1)
elif intent == 'game.release_date':
resp_text = []
if session_game != '':
date = extract_game_date(session_game['id'])
resp_text.append((random.choice(date_part)).replace('<<release_date>>', str(date)))
else:
resp_text.append(
random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3))
resp_text.append(random.choice(ask4more))
elif intent == 'game.platforms':
resp_text = []
if session_game != '':
plateforms = extract_game_platform(session_game['id'])
resp_text.append((random.choice(platform_part)).replace('<<platform>>', str(plateforms)))
else:
resp_text.append(
random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3))
resp_text.append(random.choice(ask4more))
elif intent == 'response.positive':
resp_text = random.choice(positive_resp)
else:
resp_text = random.choice(unknown_part1) + ' ' + random.choice(unknown_part2) + ' ' + random.choice(
unknown_part3)
# Change the response to a list for seperate the response
# print(f"new >> session: {session}; intent: {intent}; resp_text: {resp_text}")
return resp_text, [session]
def extract_about_game(text):
text_cleansed = clean_text(text, lemma=False)
sentences = sent_tokenize(text_cleansed)
text_sent = ' '.join(sentences[:2])
return text_sent
def recommend_game(query, tags):
status = []
# gamesDF["steamspy_tags"] = gamesDF["steamspy_tags"].str.lower()
gameslist = gamesDF
gameids = []
'''
if tags.get('genre') != None:
genre = tags.get('genre')
genre = '|'.join(genre)
gamelist_tmp = gamesDF[gamesDF["steamspy_tags"].str.contains(genre, na=False)]
gameids_tmp = gamelist_tmp['appid'].head(50).tolist()
if len(gameids_tmp) > 0:
gamelist = gamelist_tmp
gameids = gameids_tmp
else:
gameids = gamelist['appid'].head(50).tolist()
'''
if tags.get('price') != None and tags['price'] != []:
pricelimit = ' '.join(tags.get('price'))
gameslist_tmp = gameslist[gameslist.price < int(pricelimit)]
gameids_tmp = gameslist_tmp['appid'].head(10).tolist()
if len(gameids_tmp) > 0:
status.append(-1)
status.append('price')
gameslist = gameslist_tmp
gameids = gameids_tmp
if tags.get('age') != None and tags['age'] != []:
agelimit = ' '.join(tags.get('age'))
gameslist_tmp = gameslist[gameslist.required_age < int(agelimit)]
gameids_tmp = gameslist_tmp['appid'].head(10).tolist()
if len(gameids_tmp) > 0:
status.append(-1)
status.append('age')
gameslist = gameslist_tmp
gameids = gameids_tmp
if len(gameids) > 0:
return gameids, status
try:
gameids, status = recommendegine.recommend(query, tags)
except Exception as e:
print(e)
gameids = []
print(gameids)
return gameids, status
# Function to extract a short summary of the game
def extract_game_summ(gameid):
# Game Info Columns:
# 'appid', 'name', 'release_date', 'english', 'developer', 'publisher', 'platforms', 'required_age', 'categories', 'genres',
# 'steamspy_tags', 'achievements', 'positive_ratings', 'negative_ratings',
# 'average_playtime', 'median_playtime', 'owners', 'price', 'totalrating', 'about_the_game'
# gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
gameInfo = gamesDF[gamesDF['appid'] == gameid]
gameTitle = gameInfo.iloc[0]['name']
gameSummary = gameInfo.iloc[0]['short_description']
# gameSummary = extract_about_game(aboutgame)
gameURL = f'https://store.steampowered.com/app/{gameid}'
gamePrice = gameInfo.iloc[0]['price']
gameAge = gameInfo.iloc[0]['required_age']
gameRelease = gameInfo.iloc[0]['release_date']
gamePlatform = gameInfo.iloc[0]['platforms']
gamePublisher = gameInfo.iloc[0]['publisher']
gameimage = gameInfo.iloc[0]['header_image']
return gameTitle, gameSummary, gameURL, gamePrice, gameAge, gameRelease, gamePlatform, gamePublisher, gameimage
# Function to extract price of game last recommended
def extract_game_price(gameid):
gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
gameInfo = gamesDF[gamesDF['appid'] == gameid]
gamePrice = gameInfo.iloc[0]['price']
return gamePrice
def extract_game_age(gameid):
gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
gameInfo = gamesDF[gamesDF['appid'] == gameid]
gameAge = gameInfo.iloc[0]['required_age']
return gameAge
def extract_game_date(gameid):
gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
gameInfo = gamesDF[gamesDF['appid'] == gameid]
gameDate = gameInfo.iloc[0]['release_date']
return gameDate
def extract_game_platform(gameid):
# gamesDF = pd.read_csv("./data/steam_small.csv", encoding="utf-8")
gameInfo = gamesDF[gamesDF['appid'] == gameid]
gamePlatform = gameInfo.iloc[0]['platforms']
return gamePlatform
def update_tags(tags, session_tags):
new_tags = session_tags
if session_tags.get('genre') != None:
if tags.get('genre') != None:
new_tags['genre'].extend(tags['genre'])
else:
new_tags.update({'genre': tags.get('genre')})
if session_tags.get('price') != None:
if tags.get('price') != None:
new_tags.update({'price': tags.get('price')})
else:
new_tags.update({'price': tags.get('price')})
if session_tags.get('age') != None:
if tags.get('age') != None:
new_tags['age'].extend(tags['age'])
else:
new_tags.update({'age': tags.get('age')})
if session_tags.get('rating') != None:
if tags.get('rating') != None:
new_tags['rating'].extend(tags['rating'])
else:
new_tags.update({'rating': tags.get('rating')})
if session_tags.get('characters') != None:
if tags.get('characters') != None:
new_tags['characters'].extend(tags['characters'])
else:
new_tags.update({'characters': tags.get('characters')})
return new_tags
|
{"/main.py": ["/intention.py"], "/intention.py": ["/slotfiller.py", "/recommendegine.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.