index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
66,141 | pawan7697/django | refs/heads/main | /products/migrations/0003_auto_20210615_1842.py | # Generated by Django 3.2.3 on 2021-06-15 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0002_auto_20210614_1848'),
]
operations = [
migrations.AddField(
model_name='products',
name='product_img_nameB',
field=models.CharField(blank=True, max_length=100),
),
migrations.AddField(
model_name='products',
name='product_img_nameS',
field=models.CharField(blank=True, max_length=100),
),
]
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,142 | pawan7697/django | refs/heads/main | /subcategory/views.py | from django.shortcuts import render,redirect
# Create your views here.
from django.http import HttpResponse
from .models import category,subcategory
#from django.contrib.auth.models import User
# Create your views here.
def addSubcategory(request):
catdata = category.objects.all()
return render(request,'admin/addSubcategory.html',{'categorys': catdata})
# return HttpResponse('g')
def submitSubcategory(request):
if request.method =="POST":
category_names = int(request.POST.get('category'))
subcategorys = request.POST.get('subcategory')
user = subcategory.objects.create(category_name_id=category_names, subcategory_name=subcategorys,status=1)
return redirect('/subcategoryView/')
# return HttpResponse('success')
else:
return HttpResponse('fail')
def subcategoryView(request):
data = subcategory.objects.all().values('subcategory_name','id','category_name__category_name')
#mm = subcategory.objects.select_related()
# print(mm)
# print(data)
return render(request,'admin/subcateoryview.html', { 'all_data':data })
# return HttpResponse(data)
def subcategoryEdit(request,ids):
catids = subcategory.objects.filter(id=ids).values()
catdata = category.objects.all()
return render(request,'admin/subcategoryEdit.html', { 'all_data':catids ,'idss':ids,'categorys': catdata})
def subcategoryUpadte(request):
if request.method == "POST":
idssd = int(request.POST.get('idss'))
categoryID = request.POST.get('categoryID')
subcategory_name = request.POST.get('subcategoryname')
user = subcategory.objects.filter(id=idssd).update(category_name_id=categoryID,subcategory_name=subcategory_name)
return redirect('/subcategoryView/')
else:
return HttpResponse('fail')
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,143 | pawan7697/django | refs/heads/main | /products/migrations/0001_initial.py | # Generated by Django 3.2.3 on 2021-06-13 14:42
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category_name', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, size=None)),
('subcategory_name', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, size=None)),
('supercategory_name', django.contrib.postgres.fields.ArrayField(base_field=models.CharField(max_length=100), blank=True, size=None)),
('product_name', models.CharField(blank=True, max_length=100)),
('product_desc', models.CharField(blank=True, max_length=500)),
('product_Smallimg', models.CharField(blank=True, max_length=100)),
('product_Bigimg', models.CharField(blank=True, max_length=100)),
('product_code', models.CharField(blank=True, max_length=100)),
('product_price', models.CharField(blank=True, max_length=50)),
('product_sell_price', models.CharField(blank=True, max_length=50)),
('status', models.IntegerField()),
],
),
]
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,144 | pawan7697/django | refs/heads/main | /supercategory/views.py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import category,subcategory,Supercategory
# Create your views here.
def addSupercateory(request):
categorys = category.objects.all()
subcategorys = subcategory.objects.all()
return render(request,'admin/addSupercateory.html',{'catData': categorys,'subcatData': subcategorys })
def ajaxsubcategory(request):
if request.method =="POST":
catids = request.POST.get('categoryID')
subcatID = subcategory.objects.filter(category_name_id=catids).values()
return render(request,'ajax/subcategory.html', { 'Cdata': subcatID })
#catID = request.POST.get(categoryID)
return HttpResponse(catids)
def SupercategorySubmit(request):
if request.method =="POST":
category_ids = int(request.POST.get('category_id'))
subcatID_ids = int(request.POST.get('subcategory_id'))
sup = request.POST.get('Super')
users= Supercategory.objects.create(category_name_id=category_ids,subcategory_name_id=subcatID_ids,supercategory_name=sup,status=1)
#return HttpResponse('success')
return redirect('/SupercategoryView/')
def SupercategoryView(request):
data = Supercategory.objects.all().values('supercategory_name','subcategory_name__subcategory_name','id','category_name__category_name')
return render(request,'admin/supercatView.html',{ 'supercat': data })
# return HttpResponse(data)
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,145 | pawan7697/django | refs/heads/main | /products/views.py | from django.shortcuts import render
from django.http import HttpResponse
from category.models import category
from subcategory.models import subcategory
from supercategory.models import Supercategory
from .models import products
#from .models import UploadFileForm
from .forms import ImageForm
# Create your views here.
def addproducts(request):
categorys = category.objects.all()
subcategorys = subcategory.objects.all()
supers = Supercategory.objects.all()
return render(request,'newadmin/addproducts.html',{'cat':categorys,'subcat': subcategorys,'supers': supers})
#return HttpResponse('ok')
def productSubmit(request):
if request.method =="POST":
#mm = UploadFileForm(request.POST, request.FILES)
cat_values = request.POST.getlist('cat[]')
sub_values = request.POST.getlist('subcats[]')
super_values = request.POST.getlist('supercat[]')
pname = request.POST.get('pname')
pprice = request.POST.get('pprice')
sellprice = request.POST.get('sellprice')
desc = request.POST.get('desc')
simg = request.FILES['simg']
a = products.objects.create(category_name=cat_values,subcategory_name=sub_values,supercategory_name=super_values,product_name=pname,product_desc=desc,product_Smallimg=simg,product_img_nameB='m2',product_code='axe13',product_price=pprice,product_sell_price=sellprice,status=1)
# print((service_values))
# cat[] = request.POST.get('cat')
return HttpResponse('success')
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,146 | pawan7697/django | refs/heads/main | /products/forms.py | from django import forms
from .models import products
class ImageForm(forms.ModelForm):
class Meta:
model= products
fields= ["product_img_nameS", "product_Smallimg"] | {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,147 | pawan7697/django | refs/heads/main | /products/models.py | from django.db import models
from django.conf import settings
from django.contrib.postgres.fields import ArrayField
# Create your models here.
class products(models.Model):
category_name = ArrayField(models.CharField(max_length=100), blank=True)
subcategory_name = ArrayField(models.CharField(max_length=100), blank=True)
supercategory_name = ArrayField(models.CharField(max_length=100), blank=True)
product_name = models.CharField(max_length=100, blank=True)
product_desc = models.CharField(max_length=500, blank=True)
product_img_nameS = models.CharField(max_length=100, blank=True)
product_img_nameB = models.CharField(max_length=100, blank=True)
product_Smallimg = models.ImageField(upload_to='myupload/')
product_Bigimg = models.ImageField(upload_to='fullimages/')
product_code = models.CharField(max_length=100, blank=True)
product_price = models.CharField(max_length=50, blank=True)
product_sell_price = models.CharField(max_length=50, blank=True)
status = models.IntegerField()
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,148 | pawan7697/django | refs/heads/main | /subcategory/urls.py | from django.urls import path
from .import views
urlpatterns = [
path('addSubcategory/', views.addSubcategory, name='addSubcategory'),
path('submitSubcategory/', views.submitSubcategory, name='submitSubcategory'),
path('subcategoryView/', views.subcategoryView, name='subcategoryView'),
path('subcategoryEdit/<int:ids>', views.subcategoryEdit, name='subcategoryEdit'),
path('subcategoryUpadte/', views.subcategoryUpadte, name='subcategoryUpadte'),
] | {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,149 | pawan7697/django | refs/heads/main | /subcategory/models.py | from django.db import models
from django.conf import settings
from category.models import category
# Create your models here.
class subcategory(models.Model):
category_name = models.ForeignKey(category, on_delete=models.CASCADE)
subcategory_name = models.CharField(max_length=50, blank=True)
status = models.IntegerField()
# def __str__(self):
# return self.subcategory_name
# class Meta:
# ordering = ['subcategory_name']
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,150 | pawan7697/django | refs/heads/main | /products/migrations/0004_alter_products_product_smallimg.py | # Generated by Django 3.2.3 on 2021-06-16 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_auto_20210615_1842'),
]
operations = [
migrations.AlterField(
model_name='products',
name='product_Smallimg',
field=models.ImageField(upload_to='myupload/'),
),
]
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,151 | pawan7697/django | refs/heads/main | /products/urls.py | from django.urls import path
from .import views
urlpatterns = [
path('addproducts/', views.addproducts, name='addproducts'),
path('productSubmit/', views.productSubmit, name='productSubmit'),
] | {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,152 | pawan7697/django | refs/heads/main | /category/views.py | from django.shortcuts import render,redirect
from django.http import HttpResponse
from .models import category
# Create your views here.
def categorys(request):
context = {}
return render(request,'admin/category.html', context)
# return HttpResponse('g')
def submitcategory(request):
if request.method == 'POST':
category_name = request.POST.get('category')
#print(category_name)
user = category.objects.create(category_name=category_name,status=1)
# user.save()
return redirect('/categoryView/')
#return HttpResponse(category_name)
else:
return HttpResponse('fail')
def categoryView(request):
all_entries=category.objects.all()
return render(request,'admin/category_view.html', { 'all_data':all_entries })
# return HttpResponse(all_entries)
def categoryEdit(request,ids):
# print(ids)
catids = category.objects.filter(id=ids).values()
# print(catids)
# return HttpResponse('h')
return render(request,'admin/categoryEdit.html',{'ids': catids})
def categoryUpdate(request):
if request.method =="POST":
category_name = request.POST.get('category')
idss = request.POST.get('cat_ids')
user = category.objects.filter(id=idss).update(category_name=category_name)
# user = category.objects.create(category_name=category_name,id=idss)
return redirect('/categoryView/')
# return HttpResponse(category_name)
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,153 | pawan7697/django | refs/heads/main | /products/migrations/0002_auto_20210614_1848.py | # Generated by Django 3.2.3 on 2021-06-14 18:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='products',
name='product_Bigimg',
field=models.ImageField(upload_to='fullimages/'),
),
migrations.AlterField(
model_name='products',
name='product_Smallimg',
field=models.ImageField(upload_to='thumb/'),
),
]
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,154 | pawan7697/django | refs/heads/main | /supercategory/migrations/0002_auto_20210612_1907.py | # Generated by Django 3.2.3 on 2021-06-12 19:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('subcategory', '0001_initial'),
('supercategory', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='supercategory',
name='supercategory_name',
field=models.CharField(blank=True, max_length=50),
),
migrations.AlterField(
model_name='supercategory',
name='subcategory_name',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='subcategory.subcategory'),
),
]
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,155 | pawan7697/django | refs/heads/main | /category/urls.py | from django.urls import path
from .import views
urlpatterns = [
path('categorys/', views.categorys, name='categorys'),
path('submitcategory/', views.submitcategory, name='submitcategory'),
path('categoryView/', views.categoryView, name='categoryView'),
path('categoryEdit/<int:ids>', views.categoryEdit, name='categoryEdit'),
path('categoryUpdate/', views.categoryUpdate, name='categoryUpdate'),
] | {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,156 | pawan7697/django | refs/heads/main | /dashbord/apps.py | from django.apps import AppConfig
class DashbordConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'dashbord'
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,157 | pawan7697/django | refs/heads/main | /supercategory/models.py | from django.db import models
from django.conf import settings
from category.models import category
from subcategory.models import subcategory
# Create your models here.
class Supercategory(models.Model):
category_name = models.ForeignKey(category, on_delete=models.CASCADE)
subcategory_name = models.ForeignKey(subcategory, on_delete=models.CASCADE)
supercategory_name = models.CharField(max_length=50, blank=True)
status = models.IntegerField()
# def __str__(self):
# return self.subcategory_name
# class Meta:
# ordering = ['subcategory_name']
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,158 | pawan7697/django | refs/heads/main | /subcategory/admin.py | from django.contrib import admin
from .models import subcategory
class SubCategoryAdmin(admin.ModelAdmin):
list_display=('category_name','subcategory_name','status')
# Register your models here.
admin.site.register(subcategory,SubCategoryAdmin)
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,159 | pawan7697/django | refs/heads/main | /dashbord/views.py | from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def dashbord(request):
#text={}
return render(request,'admin/index.html')
| {"/subcategory/views.py": ["/subcategory/models.py"], "/supercategory/views.py": ["/supercategory/models.py"], "/products/views.py": ["/subcategory/models.py", "/supercategory/models.py", "/products/models.py", "/products/forms.py"], "/products/forms.py": ["/products/models.py"], "/supercategory/models.py": ["/subcategory/models.py"], "/subcategory/admin.py": ["/subcategory/models.py"]} |
66,161 | goryfigment/inventory | refs/heads/master | /inventory/models.py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
from django_mysql.models import JSONField
import time
def get_utc_epoch_time():
return int(round(time.time()))
def default_link_columns():
return {"quantity": False, "price": False, "cost": False, 'name': False}
def default_transaction_filter():
return ['ALL']
class Store(models.Model):
name = models.CharField(max_length=100)
tax = models.CharField(default='0.00', max_length=12)
link_columns = JSONField(default=default_link_columns)
include_columns = JSONField()
columns = JSONField(default=list)
picture_column = models.CharField(max_length=100, blank=True)
inventory = JSONField()
# settings
order_by = models.CharField(max_length=100, default='none')
reverse = models.BooleanField(default=False)
transaction_filter = JSONField(default=default_transaction_filter)
class Meta:
db_table = "store"
class Business(models.Model):
stores = models.ManyToManyField(Store)
name = models.CharField(max_length=100)
class Meta:
db_table = "business"
class Settings(models.Model):
start_time = models.IntegerField(default=0, blank=True)
date_range = models.CharField(max_length=15, default='*')
# RECEIPT SETTINGS
ip_address = models.CharField(max_length=100, default='192.168.0.0')
header = JSONField()
footer = JSONField()
class Meta:
db_table = "settings"
class Boss(models.Model):
settings = models.OneToOneField(Settings, on_delete=models.CASCADE)
business = models.OneToOneField(Business, on_delete=models.CASCADE)
class Meta:
db_table = "boss"
class Employee(models.Model):
boss = models.ForeignKey(Boss, default=None)
type = models.CharField(choices=(('admin', 'admin'), ('employee', 'employee'), ('read', 'read')), max_length=255, default='read')
store = models.ForeignKey(Store, default=None)
class Meta:
db_table = "employee"
class User(AbstractBaseUser):
email = models.EmailField(max_length=255, unique=True, blank=True, null=True)
username = models.CharField(max_length=15, unique=True)
first_name = models.CharField(max_length=255)
last_name = models.CharField(max_length=255)
reset_link = models.CharField(default=None, null=True, max_length=255)
reset_date = models.IntegerField(default=None, blank=True, null=True)
is_staff = models.BooleanField(default=True)
is_superuser = models.BooleanField(default=True)
boss = models.OneToOneField(Boss, default=None, null=True, on_delete=models.CASCADE)
employee = models.OneToOneField(Employee, default=None, null=True, on_delete=models.CASCADE)
# password = models.CharField(max_length=255)
# last_login = models.DateTimeField(default=timezone.now, blank=True)
USERNAME_FIELD = 'username'
def __unicode__(self):
return self.email
def get_short_name(self):
return self.first_name
def has_perm(self, perm, obj=None):
return self.is_superuser
def has_module_perms(self, app_label):
return self.is_superuser
class Meta:
db_table = "user"
class ItemLog(models.Model):
user = models.ForeignKey(User, default=None)
business = models.ForeignKey(Business, null=True, default=None)
store = models.ForeignKey(Store, null=True, default=None)
action = models.CharField(max_length=255, blank=True)
operation = models.CharField(choices=(('Received', 'Received'), ('Damaged', 'Damaged'), ('Reset Cost', 'Reset Cost'), ('Reset Price', 'Reset Price')), max_length=255, default='Received')
item_name = models.CharField(max_length=255, blank=True)
change = models.CharField(max_length=255, blank=True)
previous_value = models.CharField(max_length=255, blank=True)
date = models.IntegerField(default=get_utc_epoch_time, blank=True)
details = JSONField()
class Meta:
db_table = "item_log"
class Transaction(models.Model):
boss = models.ForeignKey(Boss, default=None)
seller = models.ForeignKey(User, default=None)
store = models.ForeignKey(Store, null=True, default=None)
items = JSONField()
payment_type = models.CharField(choices=(('Cash', 'Cash'), ('American Express', 'American Express'), ('Discover', 'Discover'), ('MasterCard', 'MasterCard'), ('Visa', 'Visa')), max_length=255, default='Cash')
tax = models.CharField(default='0.00', max_length=12)
subtotal = models.CharField(max_length=255)
memo = models.CharField(max_length=255, blank=True)
date = models.IntegerField(default=get_utc_epoch_time, blank=True)
def __unicode__(self):
return self.seller.first_name + ': ' + str(self.subtotal)
class Meta:
db_table = "transaction"
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,162 | goryfigment/inventory | refs/heads/master | /inventory/urls.py | """inventory URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from inventory.controllers import site, account_handler, store, inventory_handler, transaction
urlpatterns = [
url(r'^$', site.home, name='home'),
url(r'^admin/', admin.site.urls),
url(r'^register/$', site.register, name='register_page'),
url(r'^login/$', site.login, name='login_page'),
url(r'^forgot_password/$', site.forgot_password, name='forgot_password'),
url(r'^inventory/$', site.inventory, name='inventory_page'),
url(r'^transaction/$', site.transaction, name='transaction_page'),
url(r'^overview/$', site.overview, name='overview_page'),
url(r'^employee/$', site.employee, name='employee_page'),
# Account Handler
url(r'^account/register/$', account_handler.register, name='register'),
url(r'^account/login/$', account_handler.user_login, name='login'),
# url(r'^account/settings/$', account_handler.settings, name='settings'),
# url(r'^account/save_settings/$', account_handler.save_settings, name='save_settings'),
url(r'^account/reset_password/$', account_handler.reset_password, name='reset_password'),
url(r'^account/change_password/$', account_handler.change_password, name='change_password'),
url(r'^logout/$', account_handler.user_logout, name='logout'),
# Store
url(r'^store/create_store/$', store.create_store, name='create_store'),
url(r'^store/edit_store/$', store.edit_store, name='edit_store'),
url(r'^store/delete_store/$', store.delete_store, name='delete_store'),
# Inventory
url(r'^inventory/add_column/$', inventory_handler.add_column, name='add_column'),
url(r'^inventory/add_row/$', inventory_handler.add_row, name='add_row'),
url(r'^inventory/edit_column/$', inventory_handler.edit_column, name='edit_column'),
url(r'^inventory/edit_row/$', inventory_handler.edit_row, name='edit_item'),
url(r'^inventory/delete_column/$', inventory_handler.delete_column, name='delete_column'),
url(r'^inventory/delete_row/$', inventory_handler.delete_row, name='delete_item'),
url(r'^inventory/read_excel/$', inventory_handler.read_excel, name='read_excel'),
url(r'^inventory/import_submit/$', inventory_handler.import_submit, name='import_submit'),
url(r'^inventory/export_submit/$', inventory_handler.export_submit, name='export_submit'),
url(r'^inventory/drop_table/$', inventory_handler.drop_table, name='drop_table'),
# Inventory Operation
url(r'^inventory/received/$', inventory_handler.received, name='received'),
url(r'^inventory/damaged/$', inventory_handler.damaged, name='damaged'),
url(r'^inventory/reset_cost/$', inventory_handler.reset_cost, name='reset_cost'),
url(r'^inventory/reset_price/$', inventory_handler.reset_price, name='reset_price'),
# Transaction
url(r'^transaction/link_columns/$', transaction.linked_columns, name='link_columns'),
url(r'^transaction/search/$', transaction.inventory_search, name='inventory_search'),
url(r'^transaction/create/$', transaction.create_transaction, name='create_transaction'),
url(r'^transaction/print_receipt/$', transaction.print_receipt, name='print_receipt'),
url(r'^transaction/save_receipt/$', transaction.save_receipt_settings, name='save_receipt'),
# Employee
url(r'^employee/register/$', account_handler.register_employee, name='register_employee'),
url(r'^employee/edit/$', account_handler.edit_employee, name='edit_employee'),
url(r'^employee/delete/$', account_handler.delete_employee, name='delete_employee'),
]
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,163 | goryfigment/inventory | refs/heads/master | /inventory/modules/base.py | import json, math, bcrypt, re, time
from django.conf import settings
from django.http import HttpResponse
from django.core import serializers
from django.http import HttpResponseBadRequest
from inventory.models import Transaction, Store, ItemLog
def get_base_url():
return settings.BASE_URL
def render_json(data):
return HttpResponse(json.dumps(data), 'application/json')
def decimal_format(f, n, round_decimal):
d = '{:0.' + str(n) + 'f}'
if round_decimal:
return d.format(round(float(f) * 10 ** n) / 10 ** n)
else:
return d.format(math.floor(float(f) * 10 ** n) / 10 ** n)
def bad_request(message, data):
data = {'success': False, 'error_msg:': message, 'data': data}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
def model_to_dict(model):
try:
serial_obj = serializers.serialize('json', [model])
obj_as_dict = json.loads(serial_obj)[0]['fields']
obj_as_dict['id'] = model.pk
return obj_as_dict
except:
return None
def models_to_dict(model_list):
model_list = list(model_list)
my_list = []
for model in model_list:
model_dict = model_to_dict(model)
if model_dict:
my_list.append(model_dict)
return my_list
def transaction_name_regex(string, item):
key_list = re.findall('\{{.*?\}}', string)
for key in key_list:
item_key = key.replace('{{', '').replace('}}', '')
string = string.replace(key, item[item_key])
return string
def transaction_total(transactions):
total = {'cash': 0, 'credit': 0, 'total': 0}
for trans in transactions:
item_discount = 0
trans['total'] = 0
trans['timestamp'] = epoch_strftime(trans['date'], '%b %#d, %Y %I:%M%p')
for item in trans['items']:
item_discount += float(item['discount'])
# Calculations
trans_tax = round(float(trans['tax'])*float(trans['subtotal'])*100)/100
trans_total = float(trans['subtotal']) + trans_tax - float(item_discount)
# Data: Tax, Discount, Total
trans['tax'] = '{0:.2f}'.format(trans_tax)
trans['discount'] = '{0:.2f}'.format(item_discount)
trans['total'] = '{0:.2f}'.format(trans_total)
if trans['payment_type'] == 'Cash':
total['cash'] += trans_total
else:
total['credit'] += trans_total
total['total'] += trans_total
return {'total': {'cash': '{0:.2f}'.format(total['cash']), 'credit': '{0:.2f}'.format(total['credit']), 'total': '{0:.2f}'.format(total['total'])}, 'transactions': transactions}
def get_utc_epoch_time(days=0):
return int(round(time.time() - (int(days)*86400)))
def epoch_strftime(utc_time, regex):
return time.strftime(regex, time.localtime(int(utc_time)))
def get_transactions(boss_id, start_time=None, end_time=None, order='date'):
if start_time and end_time:
return models_to_dict(Transaction.objects.filter(boss=boss_id, date__range=(start_time, end_time)).order_by(order))
else:
return models_to_dict(Transaction.objects.filter(boss=boss_id).order_by(order))
def validate_password(password, hashed_password):
return bcrypt.hashpw(password.encode('utf8'), hashed_password.encode('utf8')) == hashed_password
def create_password(password):
return bcrypt.hashpw(password.encode('utf8'), bcrypt.gensalt())
def get_boss(current_user):
if current_user.boss:
return current_user.boss
else:
return current_user.employee.boss
def sort_inventory(store, user_inventory):
if store.order_by != 'none':
return sorted(user_inventory.items(), key=lambda (k, v): v[store.order_by], reverse=store.reverse)
else:
return sorted(user_inventory.items(), key=lambda (k, v): int(k), reverse=False)
def check_req_data(required_data, request):
# Check if all necessary data is present
for data in required_data:
if data not in request:
data = {'success': False, 'error_msg': 'Data not set.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
def inventory_operation(request, action, operation, link_column, callback_function):
current_user = request.user
store = Store.objects.get(id=request.POST['id'])
linked_columns = store.link_columns
changing_column = linked_columns[link_column]
name_column = linked_columns['name']
item = store.inventory[request.POST['item_id']]
previous_value = item[changing_column]
# Do operation
item[changing_column] = callback_function(item[changing_column], request.POST['change_value'])
store.save()
created_item_log = ItemLog.objects.create(
user=current_user,
action=action,
operation=operation,
item_name=item[name_column],
change=request.POST['change_value'],
previous_value=previous_value,
details={"notes": request.POST['details']}
)
created_item_log.store = store
created_item_log.save()
item_logs = list(ItemLog.objects.filter(store=store).order_by('-date').values(
'user__first_name', 'user__last_name', 'action', 'operation', 'item_name', 'change', 'previous_value',
'date', 'details', 'id'))
store.inventory = sort_inventory(store, store.inventory)
store_dict = model_to_dict(store)
ordered_logs = []
datetime_holder = ''
l_dict = None
for l in item_logs:
current_datetime = epoch_strftime(l['date'], "%A %B %d, %Y")
# Data: Tax, Discount, Total
l['timestamp'] = epoch_strftime(l['date'], '%b %#d, %Y %I:%M%p')
# Split different dates
if datetime_holder == current_datetime:
l_dict['logs'].append(l)
else:
if l_dict is not None:
ordered_logs.append(l_dict)
l_dict = {'datetime': current_datetime, 'logs': []}
datetime_holder = current_datetime
l_dict['logs'].append(l)
# Append the last date
if l_dict is not None:
ordered_logs.append(l_dict)
store_dict['item_log'] = ordered_logs
return {'store': store_dict}
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,164 | goryfigment/inventory | refs/heads/master | /inventory/controllers/site.py | import time
import json
from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.forms.models import model_to_dict
from base import get_base_url
from inventory.modules.base import get_boss, models_to_dict, epoch_strftime
from inventory.models import User, Transaction, ItemLog, Employee
def error_page(request):
data = {
'base_url': get_base_url()
}
return render(request, '404.html', data)
def server_error(request):
data = {
'base_url': get_base_url()
}
return render(request, '500.html', data)
def home(request):
data = {
'base_url': get_base_url()
}
# If user is login redirect to overview
if request.user.is_authenticated():
return HttpResponseRedirect('/inventory/')
return render(request, 'home.html', data)
def register(request):
data = {
'base_url': get_base_url()
}
# If user is login redirect to overview
if request.user.is_authenticated():
return HttpResponseRedirect('/inventory/')
return render(request, 'register.html', data)
def login(request):
data = {
'base_url': get_base_url()
}
# If user is login redirect to overview
if request.user.is_authenticated():
return HttpResponseRedirect('/inventory/')
return render(request, 'login.html', data)
def forgot_password(request):
data = {
'base_url': get_base_url(),
'expired': False
}
if 'code' in request.GET:
current_user = User.objects.get(reset_link=request.GET['code'])
if (int(round(time.time())) - current_user.reset_date) > 86400:
data['expired'] = True
# If user is login redirect to overview
if request.user.is_authenticated():
return HttpResponseRedirect('/inventory/')
return render(request, 'forgot_password.html', data)
def overview(request):
current_user = request.user
# If not login go to login page
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
current_boss = get_boss(current_user)
user_business = current_boss.business
# user_settings['business_tax'] = decimal_format(float(user_business.tax)*100, 3, False)
stores = user_business.stores.all().values()
store_dict = {}
for current_store in stores:
store_id = str(current_store['id'])
store_dict[store_id] = current_store
store_dict[store_id]['transactions'] = []
transactions = models_to_dict(Transaction.objects.filter(store_id=store_id).order_by('-date'))
ordered_transactions = []
datetime_holder = ''
t_dict = None
for t in transactions:
item_discount = 0
current_datetime = epoch_strftime(t['date'], "%A %B %d, %Y")
epoch_date = time.mktime(time.strptime(current_datetime, "%A %B %d, %Y"))
# Calculations
t_tax = round(float(t['tax'])*float(t['subtotal'])*100)/100
t_total = float(t['subtotal']) + t_tax - float(item_discount)
# Data: Tax, Discount, Total
t['timestamp'] = epoch_strftime(t['date'], '%b %#d, %Y %I:%M%p')
t['tax'] = '{0:.2f}'.format(t_tax)
t['discount'] = '{0:.2f}'.format(item_discount)
t['total'] = '{0:.2f}'.format(t_total)
# Split different dates
if datetime_holder == current_datetime:
t_dict['transactions'].append(t)
else:
if t_dict is not None:
ordered_transactions.append(t_dict)
t_dict = {'datetime': current_datetime, 'epoch': epoch_date, 'transactions': []}
datetime_holder = current_datetime
t_dict['transactions'].append(t)
# Append the last date
if t_dict is not None:
ordered_transactions.append(t_dict)
store_dict[store_id]['transactions'] = ordered_transactions
data = {
'base_url': get_base_url(),
'name': current_user.first_name + " " + current_user.last_name,
'username': current_user.username,
'business_id': user_business.id,
'business_name': user_business.name,
'stores': json.dumps(store_dict)
}
# if len(user_business.inventory):
# user_settings['example_item'] = next(iter(user_business.inventory.items()))[1]
return render(request, 'overview.html', data)
def inventory(request):
current_user = request.user
# Only go to overview if user is logged in
if not current_user.is_authenticated():
return HttpResponseRedirect('/login/')
current_boss = get_boss(current_user)
user_type = 'boss'
if not current_user.boss:
user_type = current_user.employee.type
user_business = current_boss.business
stores = user_business.stores.all().values()
store_dict = {}
if len(stores):
active_store = str(stores.first()['id'])
else:
active_store = ''
for current_store in stores:
store_id = str(current_store['id'])
store_dict[store_id] = current_store
store_inventory = current_store['inventory']
if current_store['order_by'] != 'none':
current_store['inventory'] = sorted(store_inventory.items(), key=lambda (k, v): v[current_store['order_by']], reverse=current_store['reverse'])
else:
current_store['inventory'] = sorted(store_inventory.items(), key=lambda (k, v): int(k), reverse=False)
store_log = list(ItemLog.objects.filter(store_id=store_id).order_by('-date').values(
'user__first_name', 'user__last_name', 'action', 'operation', 'item_name', 'change', 'previous_value',
'date', 'details', 'id'))
ordered_logs = []
datetime_holder = ''
l_dict = None
for l in store_log:
current_datetime = epoch_strftime(l['date'], "%A %B %d, %Y")
# Data: Tax, Discount, Total
l['timestamp'] = epoch_strftime(l['date'], '%b %#d, %I:%M%p')
# Split different dates
if datetime_holder == current_datetime:
l_dict['logs'].append(l)
else:
if l_dict is not None:
ordered_logs.append(l_dict)
l_dict = {'datetime': current_datetime, 'logs': []}
datetime_holder = current_datetime
l_dict['logs'].append(l)
# Append the last date
if l_dict is not None:
ordered_logs.append(l_dict)
current_store['item_log'] = ordered_logs
data = {
'base_url': get_base_url(),
'business_id': user_business.id,
'business_name': user_business.name,
'active_store': active_store,
'name': current_user.first_name + " " + current_user.last_name,
'username': current_user.username,
'stores': json.dumps(store_dict),
'user_type': user_type
}
return render(request, 'inventory.html', data)
def transaction(request):
current_user = request.user
# If not login go to login page
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
current_boss = get_boss(current_user)
user_type = 'boss'
if not current_user.boss:
user_type = current_user.employee.type
user_settings = model_to_dict(current_boss.settings)
user_business = current_boss.business
# user_settings['business_tax'] = decimal_format(float(user_business.tax)*100, 3, False)
user_settings['ip_address'] = user_settings['ip_address'].split('.')
stores = user_business.stores.all().values()
store_dict = {}
for current_store in stores:
store_id = str(current_store['id'])
store_dict[store_id] = current_store
store_dict[store_id]['transactions'] = []
transactions = models_to_dict(Transaction.objects.filter(store_id=store_id).order_by('-date'))
ordered_transactions = []
datetime_holder = ''
t_dict = None
for t in transactions:
item_discount = 0
current_datetime = epoch_strftime(t['date'], "%A %B %d, %Y")
# Calculations
t_tax = round(float(t['tax'])*float(t['subtotal'])*100)/100
t_total = float(t['subtotal']) + t_tax - float(item_discount)
# Data: Tax, Discount, Total
t['timestamp'] = epoch_strftime(t['date'], '%b %#d, %Y %I:%M%p')
t['tax'] = '{0:.2f}'.format(t_tax)
t['discount'] = '{0:.2f}'.format(item_discount)
t['total'] = '{0:.2f}'.format(t_total)
# Split different dates
if datetime_holder == current_datetime:
t_dict['transactions'].append(t)
else:
if t_dict is not None:
ordered_transactions.append(t_dict)
t_dict = {'datetime': current_datetime, 'transactions': []}
datetime_holder = current_datetime
t_dict['transactions'].append(t)
# Append the last date
if t_dict is not None:
ordered_transactions.append(t_dict)
store_dict[store_id]['transactions'] = ordered_transactions
data = {
'base_url': get_base_url(),
'name': current_user.first_name + " " + current_user.last_name,
'username': current_user.username,
'business_id': user_business.id,
'business_name': user_business.name,
'stores': json.dumps(store_dict),
'start_point': user_settings['start_time'],
'date_range': user_settings['date_range'],
'settings': json.dumps(user_settings),
'all': 'ALL',
'user_type': user_type
}
# if len(user_business.inventory):
# user_settings['example_item'] = next(iter(user_business.inventory.items()))[1]
return render(request, 'transaction.html', data)
def employee(request):
current_user = request.user
# If user is login redirect to overview
if not request.user.is_authenticated():
return HttpResponseRedirect('/login/')
current_boss = get_boss(current_user)
user_type = 'boss'
if not current_user.boss:
user_type = current_user.employee.type
user_business = current_boss.business
stores = user_business.stores.all().values()
store_dict = {}
for current_store in stores:
store_id = str(current_store['id'])
store_dict[store_id] = current_store
employees = Employee.objects.filter(boss=current_boss, store=store_id).order_by('-type')
employees_dict = {}
for current_employee in employees:
employee_user = User.objects.get(employee_id=current_employee.id)
employee_id = current_employee.id
employees_dict[employee_id] = {'first_name': employee_user.first_name, 'last_name': employee_user.last_name,
'type': current_employee.type, 'username': employee_user.username, 'email': employee_user.email,
'id': employee_id}
store_dict[store_id]['employees'] = employees_dict
data = {
'base_url': get_base_url(),
'name': current_user.first_name + " " + current_user.last_name,
'stores': json.dumps(store_dict),
'user_type': user_type,
'username': current_user.username
}
return render(request, 'employee.html', data)
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,165 | goryfigment/inventory | refs/heads/master | /inventory/controllers/transaction.py | import re
from django.http import HttpResponse, JsonResponse, HttpResponseBadRequest
from django.forms.models import model_to_dict
from inventory.modules.base import decimal_format, get_boss, epoch_strftime, models_to_dict
from inventory.models import Store, Transaction
from inventory.decorators import login_required, data_required, user_permission
from inventory.modules.receipt_printer import receipt_printer
@login_required
@user_permission('boss_only')
@data_required(['store_id', 'link_columns'], 'BODY')
def linked_columns(request):
store = Store.objects.get(id=request.BODY['store_id'])
store_inventory = store.inventory
link_columns = request.BODY['link_columns']
for link_type, column in link_columns.iteritems():
if link_type == 'price' or link_type == 'cost': # Turn all data to float values
for item_id, item in store_inventory.iteritems():
current_price = item[column]
if current_price.replace('.', '', 1).isdigit():
item[column] = decimal_format(float(current_price), 2, False)
else:
item[column] = '0.00'
elif link_type == 'quantity': # Turn all data to int values
for key, item in store_inventory.iteritems():
current_quantity = item[column]
if str(current_quantity).isdigit():
item[column] = int(current_quantity)
else:
item[column] = 0
store.link_columns = link_columns
store.save()
return JsonResponse(model_to_dict(store), safe=False)
@login_required
@data_required(['search_value', 'id'], 'GET')
def inventory_search(request):
store = Store.objects.get(id=request.GET['id'])
search_value = re.sub(r'[^\w]', '', request.GET['search_value'])
search_results = []
# Get inventory
user_inventory = store.inventory
link_columns = store.link_columns
name_key = link_columns['name']
price_key = link_columns['price']
# Get filters
filters = store.transaction_filter
if 'ALL' in filters:
filters = user_inventory.values()[0].keys()
# Loop through inventory
for key, item in user_inventory.iteritems():
# Loop through filters
for data in filters:
# Check if 'search' matches!
current_data = re.sub(r'[^\w]', '', str(item[data])).lower()
if search_value in current_data:
# Create new data defined by the user
new_data = {'price': item[price_key], 'name': item[name_key], 'id': key}
search_results.append(new_data)
break
return JsonResponse(search_results, safe=False)
@login_required
@user_permission('transaction')
@data_required(['store_id', 'items', 'payment_type', 'tax', 'subtotal', 'memo'], 'BODY')
def create_transaction(request):
current_user = request.user
current_boss = get_boss(current_user)
store_id = request.BODY['store_id']
store = Store.objects.get(id=store_id)
user_inventory = store.inventory
quantity_column = store.link_columns['quantity']
cost_column = store.link_columns['cost']
transaction_items = request.BODY['items']
if not len(transaction_items):
return HttpResponseBadRequest('Must have at least one item per transaction.', 'application/json')
for key, item in transaction_items.iteritems():
item['id'] = key
item['cost'] = user_inventory[key][cost_column]
item_list = []
# Subtract from inventory
for key, item in transaction_items.iteritems():
inventory_item = user_inventory[key]
inventory_qty = int(inventory_item[quantity_column])
transaction_qty = int(item['quantity'])
inventory_qty -= transaction_qty
if inventory_qty < 0:
inventory_qty = 0
user_inventory[key][quantity_column] = inventory_qty
item_list.append(item)
store.save()
transaction = Transaction.objects.create(
boss=current_boss,
seller=current_user,
store=store,
payment_type=request.BODY['payment_type'],
subtotal=request.BODY['subtotal'],
tax=request.BODY['tax'],
memo=request.BODY['memo'],
items=item_list
)
transactions = models_to_dict(store.transaction_set.all().order_by('-date'))
ordered_transactions = []
datetime_holder = ''
t_dict = None
for t in transactions:
item_discount = 0
current_datetime = epoch_strftime(t['date'], "%A %B %d, %Y")
# Calculations
t_tax = round(float(t['tax'])*float(t['subtotal'])*100)/100
t_total = float(t['subtotal']) + t_tax - float(item_discount)
# Data: Tax, Discount, Total
t['timestamp'] = epoch_strftime(t['date'], '%b %#d, %Y %I:%M%p')
t['tax'] = '{0:.2f}'.format(t_tax)
t['discount'] = '{0:.2f}'.format(item_discount)
t['total'] = '{0:.2f}'.format(t_total)
# Split different dates
if datetime_holder == current_datetime:
t_dict['transactions'].append(t)
else:
if t_dict is not None:
ordered_transactions.append(t_dict)
t_dict = {'datetime': current_datetime, 'transactions': []}
datetime_holder = current_datetime
t_dict['transactions'].append(t)
# Append the last date
if t_dict is not None:
ordered_transactions.append(t_dict)
return JsonResponse({'transaction': model_to_dict(transaction), 'store_transactions': ordered_transactions, 'success': True}, safe=False)
@login_required
@data_required(['transaction'], 'BODY')
def print_receipt(request):
current_user = request.user
current_boss = get_boss(current_user)
# Print receipt
receipt_printer(current_boss.settings, request.BODY['transaction'])
return JsonResponse({'success': True}, safe=False)
@login_required
@user_permission('boss_only')
@data_required(['ip_address', 'header', 'footer'], 'BODY')
def save_receipt_settings(request):
current_user = request.user
current_boss = get_boss(current_user)
user_settings = current_boss.settings
user_settings.ip_address = request.BODY['ip_address']
user_settings.header = request.BODY['header']
user_settings.footer = request.BODY['footer']
user_settings.save()
return JsonResponse({'transaction_settings': model_to_dict(user_settings)}, safe=False)
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,166 | goryfigment/inventory | refs/heads/master | /inventory/controllers/account_handler.py | import json
import re
import uuid
import time
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from django.http import HttpResponseRedirect, HttpResponseBadRequest, JsonResponse
from django.contrib.auth import authenticate, login, logout
from inventory.settings_secret import GMAIL, GMAIL_PASSWORD
from inventory.modules.base import render_json, get_boss, model_to_dict
import inventory.modules.base as helper
from inventory.models import User, Boss, Business, Settings, Employee, Store
from inventory.decorators import data_required, user_permission
def register(request):
helper.check_req_data(['username', 'email', 'password', 'first_name', 'last_name', 'business_name'], request.POST)
username = request.POST['username'].strip().lower()
email = request.POST['email'].strip().lower()
password = request.POST['password']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
business_name = request.POST['business_name']
# Check first name
if not len(first_name):
print username
data = {'success': False, 'error_msg': 'Must have a first name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check last name
if not len(last_name):
print username
data = {'success': False, 'error_msg': 'Must have a last name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check business name
if not len(business_name):
print username
data = {'success': False, 'error_msg': 'Must have a business name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check username
if len(username) <= 2 or len(username) >= 16:
print username
data = {'success': False, 'error_msg': 'Username must be between 3 to 15 characters.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check Email
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
data = {'success': False, 'error_msg': 'Invalid email.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if valid password: Must be 8 or more characters and contain a combo of letters and numbers
if not len(password) >= 8:
data = {'success': False, 'error_msg': 'Password must be 8 characters or more.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
if not bool(re.search(r'\d', password)) or not bool(re.search(r'[a-zA-Z]', password)):
data = {'success': False, 'error_msg': 'Invalid password.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(username=username).exists():
data = {'success': False, 'error_msg': 'Username exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(email=email).exists():
data = {'success': False, 'error_msg': 'Email exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
user_settings = Settings.objects.create()
business = Business.objects.create(name=business_name)
boss = Boss.objects.create(settings=user_settings, business=business)
User.objects.create(
username=username,
email=email,
password=helper.create_password(password),
first_name=first_name,
last_name=last_name,
boss=boss
)
# Validate password
auth_user = authenticate(email=email, password=password)
# Login user
login(request, auth_user)
return render_json({'success': True})
def user_login(request):
helper.check_req_data(['username', 'password'], request.POST)
username = request.POST['username'].strip().lower()
password = request.POST['password'].strip().lower()
if '@' in username:
# Check Email
if not re.match(r"[^@]+@[^@]+\.[^@]+", username):
data = {'success': False, 'error_msg': 'Invalid email'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if the user exist first
if not User.objects.filter(email=username).exists():
data = {'success': False, 'error_msg': 'User does not exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Validate password
user = authenticate(email=username, password=password)
else:
# Check if username is over 15 characters
if len(username) > 15:
data = {'success': False, 'error_msg': 'Username to long.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if the user exist first
if not User.objects.filter(username=username).exists():
data = {'success': False, 'error_msg': 'User does not exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Validate password
user = authenticate(username=username, password=password)
login(request, user)
return render_json({'success': True})
def user_logout(request):
logout(request)
return HttpResponseRedirect('/login/')
@data_required(['username', 'base_url'], 'POST')
def reset_password(request):
username = request.POST['username']
try:
if '@' in username:
current_user = User.objects.get(email=username)
else:
current_user = User.objects.get(username=username)
except:
data = {'success': False, 'error_msg': 'User does not exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
reset_link = uuid.uuid4().hex
current_user.reset_link = reset_link
current_user.reset_date = int(round(time.time()))
current_user.save()
from_email = "whey2ez@noreply.com"
to_email = current_user.email
name = current_user.first_name
link = request.POST['base_url'] + '/forgot_password?code=' + reset_link
# Create message container - the correct MIME type is multipart/alternative.
msg = MIMEMultipart('alternative')
msg['Subject'] = "Whey2eZ - Forgotten Password"
msg['From'] = from_email
msg['To'] = to_email
# Create the body of the message (a plain-text and an HTML version).
text = "Hi " + name + "!\nWe received a request to reset your Whey2eZ password.\n\n" \
"Click the link to change your password: " + link
html = """\
<html>
<head></head>
<body>
<div>
<p>Hi """ + name + """!<br><br>
We received a request to reset your Whey2eZ password.<br><br>
<a href='""" + link + """'>Click here to change your password.</a>
</p>
</body>
</html>
"""
# Record the MIME types of both parts - text/plain and text/html.
part1 = MIMEText(text, 'plain')
part2 = MIMEText(html, 'html')
msg.attach(part1)
msg.attach(part2)
# Send the message via local SMTP server.
s = smtplib.SMTP('smtp.gmail.com', 587)
s.ehlo()
s.starttls()
s.login(GMAIL, GMAIL_PASSWORD)
# sendmail function takes 3 arguments: sender's address, recipient's address
s.sendmail(from_email, to_email, msg.as_string())
s.quit()
return JsonResponse({'success': True}, safe=False)
@data_required(['password1', 'password2', 'code'], 'POST')
def change_password(request):
password1 = request.POST['password1']
password2 = request.POST['password2']
current_user = User.objects.get(reset_link=request.POST['code'])
if (int(round(time.time())) - current_user.reset_date) > 86400:
data = {'success': False, 'error_msg': 'Password recovery expired.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
if password1 == password2:
if not len(password1) >= 8:
data = {'success': False, 'error_msg': 'Password must be 8 characters or more.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
if not bool(re.search(r'\d', password1)) or not bool(re.search(r'[a-zA-Z]', password1)):
data = {'success': False, 'error_msg': 'Invalid password.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
current_user.password = helper.create_password(password1)
current_user.reset_link = ''
current_user.reset_date = 0
current_user.save()
else:
data = {'success': False, 'error_msg': 'Both passwords do not match.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
return JsonResponse({'success': True}, safe=False)
@user_permission('boss_only')
def register_employee(request):
helper.check_req_data(['username', 'email', 'password', 'first_name', 'last_name', 'type', 'store'], request.POST)
username = request.POST['username'].strip().lower()
email = request.POST['email'].strip().lower()
password = request.POST['password']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
user_type = request.POST['type']
store = Store.objects.get(id=request.POST['store'])
boss = get_boss(request.user)
# Check first name
if not len(first_name):
print username
data = {'success': False, 'error_msg': 'Must have a first name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check last name
if not len(last_name):
print username
data = {'success': False, 'error_msg': 'Must have a last name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check username
if len(username) <= 2 or len(username) >= 16:
print username
data = {'success': False, 'error_msg': 'Username must be between 3 to 15 characters.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check Email
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
data = {'success': False, 'error_msg': 'Invalid email.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if valid password: Must be 8 or more characters and contain a combo of letters and numbers
if not len(password) >= 8:
data = {'success': False, 'error_msg': 'Password must be 8 characters or more.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
if not bool(re.search(r'\d', password)) or not bool(re.search(r'[a-zA-Z]', password)):
data = {'success': False, 'error_msg': 'Invalid password.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(username=username).exists():
data = {'success': False, 'error_msg': 'Username exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(email=email).exists():
data = {'success': False, 'error_msg': 'Email exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
employee = Employee.objects.create(boss=boss, type=user_type, store=store)
User.objects.create(
username=username,
email=email,
password=helper.create_password(password),
first_name=first_name,
last_name=last_name,
employee=employee
)
employees = Employee.objects.filter(boss=boss, store=store).order_by('-type')
employees_dict = {}
store = model_to_dict(store)
for current_employee in employees:
employee_user = User.objects.get(employee_id=current_employee.id)
employee_id = current_employee.id
employees_dict[employee_id] = {'first_name': employee_user.first_name, 'last_name': employee_user.last_name,
'type': current_employee.type, 'username': employee_user.username, 'email': employee_user.email,
'id': employee_id}
store['employees'] = employees_dict
return render_json({'store': store, 'success': True})
@user_permission('boss_only')
def edit_employee(request):
helper.check_req_data(['username', 'email', 'password', 'first_name', 'last_name', 'type', 'store', 'employee'], request.POST)
username = request.POST['username'].strip().lower()
email = request.POST['email'].strip().lower()
password = request.POST['password']
first_name = request.POST['first_name']
last_name = request.POST['last_name']
user_type = request.POST['type']
store = Store.objects.get(id=request.POST['store'])
employee = Employee.objects.get(id=request.POST['employee'])
boss = get_boss(request.user)
# Check first name
if not len(first_name):
print username
data = {'success': False, 'error_msg': 'Must have a first name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check last name
if not len(last_name):
print username
data = {'success': False, 'error_msg': 'Must have a last name.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check username
if len(username) <= 2 or len(username) >= 16:
print username
data = {'success': False, 'error_msg': 'Username must be between 3 to 15 characters.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check Email
if not re.match(r"[^@]+@[^@]+\.[^@]+", email):
data = {'success': False, 'error_msg': 'Invalid email.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if valid password: Must be 8 or more characters and contain a combo of letters and numbers
if not len(password) >= 8:
data = {'success': False, 'error_msg': 'Password must be 8 characters or more.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
if not bool(re.search(r'\d', password)) or not bool(re.search(r'[a-zA-Z]', password)):
data = {'success': False, 'error_msg': 'Invalid password.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(username=username).exists():
data = {'success': False, 'error_msg': 'Username exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
# Check if email exist in the database
if User.objects.filter(email=email).exists():
data = {'success': False, 'error_msg': 'Email exists.'}
return HttpResponseBadRequest(json.dumps(data), 'application/json')
employee.type = user_type
employee.save()
user = User.objects.get(employee=employee)
user.username = username
user.email = email
user.password = helper.create_password(password)
user.first_name = first_name
user.last_name = last_name
user.save()
employees = Employee.objects.filter(boss=boss, store=store).order_by('-type')
employees_dict = {}
store = model_to_dict(store)
for current_employee in employees:
employee_user = User.objects.get(employee_id=current_employee.id)
employee_id = current_employee.id
employees_dict[employee_id] = {'first_name': employee_user.first_name, 'last_name': employee_user.last_name,
'type': current_employee.type, 'username': employee_user.username, 'email': employee_user.email,
'id': employee_id}
store['employees'] = employees_dict
return render_json({'store': store, 'success': True})
@user_permission('boss_only')
@data_required(['employee', 'store'], 'POST')
def delete_employee(request):
employee = Employee.objects.get(id=request.POST['employee'])
user = User.objects.get(employee=employee)
store = Store.objects.get(id=request.POST['store'])
boss = get_boss(request.user)
employee.delete()
user.delete()
employees = Employee.objects.filter(boss=boss, store=store).order_by('-type')
employees_dict = {}
store = model_to_dict(store)
for current_employee in employees:
employee_user = User.objects.get(employee_id=current_employee.id)
employee_id = current_employee.id
employees_dict[employee_id] = {'first_name': employee_user.first_name, 'last_name': employee_user.last_name,
'type': current_employee.type, 'username': employee_user.username, 'email': employee_user.email,
'id': employee_id}
store['employees'] = employees_dict
return render_json({'store': store, 'success': True}) | {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,167 | goryfigment/inventory | refs/heads/master | /inventory/controllers/store.py | from django.forms.models import model_to_dict
from django.http import JsonResponse, HttpResponseBadRequest
from inventory.models import Store
from inventory.decorators import login_required, user_permission, data_required
from inventory.modules.base import get_boss
@login_required
@user_permission('boss_only')
@data_required(['store_name'], 'POST')
def create_store(request):
current_user = request.user
current_boss = get_boss(current_user)
business = current_boss.business
store_name = request.POST['store_name']
if store_name == '':
return HttpResponseBadRequest('This field is required.', 'application/json')
user_stores = business.stores.all()
for user_store in user_stores:
if user_store.name == store_name:
return HttpResponseBadRequest('Name already exist.', 'application/json')
store = Store.objects.create(name=request.POST['store_name'])
# ADD TO BUSINESS STORE LIST
business.stores.add(store)
return JsonResponse(model_to_dict(store), safe=False)
@login_required
@user_permission('boss_only')
@data_required(['id', 'store_name'], 'POST')
def edit_store(request):
current_user = request.user
current_boss = get_boss(current_user)
business = current_boss.business
store_name = request.POST['store_name']
if store_name == '':
return HttpResponseBadRequest('This field is required.', 'application/json')
user_stores = business.stores.all()
for user_store in user_stores:
if user_store.name == store_name:
return HttpResponseBadRequest('Name already exist.', 'application/json')
store = Store.objects.get(id=request.POST['id'])
store.name = store_name
store.save()
return JsonResponse(model_to_dict(store), safe=False)
@login_required
@user_permission('boss_only')
@data_required(['id'], 'POST')
def delete_store(request):
current_user = request.user
current_boss = get_boss(current_user)
business = current_boss.business
user_stores = business.stores.all()
store_id = request.POST['id']
store = Store.objects.get(id=store_id)
# Check if boss owns the store
if store not in user_stores:
return HttpResponseBadRequest('Store does not exist.', 'application/json')
store.delete()
return JsonResponse({'id': store_id}, safe=False)
| {"/inventory/controllers/transaction.py": ["/inventory/modules/base.py", "/inventory/models.py"], "/inventory/controllers/store.py": ["/inventory/models.py", "/inventory/modules/base.py"]} |
66,205 | david-a-wheeler/hello | refs/heads/master | /test_hello_unittest.py | #!/usr/bin/python3
import unittest, io, contextlib, os
import hello
class TestHello(unittest.TestCase):
"""Test our program"""
def test_output(self):
"""Ensure program produces correct output"""
f = io.StringIO() # Create pseudo-file where output will be sent
with contextlib.redirect_stdout(f): # Redirect output
hello.print_hello()
self.assertEqual(f.getvalue(), 'Hello, world!' + os.linesep)
if __name__ == '__main__':
unittest.main()
| {"/test_hello_unittest.py": ["/hello.py"]} |
66,206 | david-a-wheeler/hello | refs/heads/master | /hello.py | #!/usr/bin/python3
"""This is a trivial module to say 'Hello, world!'"""
def print_hello():
"""Say 'Hello, world!'"""
print("Hello, world!")
if __name__ == "__main__":
print_hello()
| {"/test_hello_unittest.py": ["/hello.py"]} |
66,207 | JamesSibbit/k-means | refs/heads/master | /k_means.py | from sklearn.cluster import KMeans
def k_means_clustering(data):
#Now carry out k-means clustering
k_mean = KMeans(n_clusters = 2)
return k_mean.fit(data)
| {"/test.py": ["/k_means.py"]} |
66,208 | JamesSibbit/k-means | refs/heads/master | /test.py | from k_means import k_means_clustering
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from scipy.stats import norm
#Create some random data using normal dist and plot to see seperation
mu_one = float(input("Enter mean for first normal cluster: "))
mu_two = float(input("Enter mean for second normal cluster: "))
var_one = float(input("Enter variance for first normal cluster: "))
var_two = float(input("Enter variance for second normal cluster: "))
X1 = np.array(norm.rvs(loc=mu_one, scale =var_one, size=100))
Y1 = np.array(norm.rvs(loc=mu_one, scale =var_one, size=100))
X2 = np.array(norm.rvs(loc=mu_two, scale =var_two, size=100))
Y2 = np.array(norm.rvs(loc=mu_two, scale =var_two, size=100))
data_zero = np.vstack((X1,Y1)).T
data_one = np.vstack((X2,Y2)).T
data = np.vstack((data_zero,data_one))
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.scatter(data_zero[:,0], data_zero[:, 1], label="Class 1")
ax1.scatter(data_one[:,0], data_one[:, 1], label="Class 2")
plt.legend(loc='upper left')
plt.show()
k_mean = k_means_clustering(data)
centroids = k_mean.cluster_centers_
print("K-means centroid output is as follows.")
print("Centroid of class one is x="+str(centroids[0][0])+", y="+str(centroids[0][1]))
print("Centroid of class two is x="+str(centroids[1][0])+", y="+str(centroids[1][1]))
print("Now enter a test sample.")
value_x = float(input("Enter x value of data point: "))
value_y = float(input("Enter y value of data point: "))
test_sample = np.array([value_x, value_y]).reshape(1,-1)
result = k_mean.predict(test_sample)
print("Value belongs to cluster "+str(result[0]+1))
| {"/test.py": ["/k_means.py"]} |
66,211 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /utils/losses.py |
import torch.nn as nn
import torch.nn.functional as F
class MultiTaskLoss(nn.Module):
def __init__(self):
super(MultiTaskLoss, self).__init__()
def forward(self, yhat_list: list, y_list: list):
loss = 0
for yhat, y in zip(yhat_list, y_list):
loss += F.mse_loss(yhat, y.view(-1,1))
return loss
| {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,212 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /model/shared_bottem.py |
import torch.nn as nn
from model.mlp import SingleLayerPerception as SLP
from model.mlp import MultiLayerPerceptron as MLP
class SharedBottom(nn.Module):
def __init__(
self,
input_size: int,
shared_size: int,
tower_size: int,
num_tasks: int
):
super(SharedBottom, self).__init__()
self.num_tasks = num_tasks
self.shared_layer = SLP(input_size, shared_size)
self.tower_layer = nn.ModuleList(
[MLP(shared_size, tower_size) for _ in range(num_tasks)]
)
def forward(self, x):
shared = self.shared_layer(x)
outs = [self.tower_layer[i](shared) for i in range(self.num_tasks)]
return outs
| {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,213 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /model/__init__.py |
from model.mlp import SingleLayerPerception as SLP
from model.mlp import MultiLayerPerceptron as MLP
from model.omoe import OnegateMixtureOfExperts as OMOE
from model.mmoe import MultigateMixtureOfExperts as MMOE
from model.shared_bottem import SharedBottom
from model.cgc import CustomizedGateControl as CGC | {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,214 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /model/mlp.py |
import torch.nn as nn
class SingleLayerPerception(nn.Module):
def __init__(self, input_size: int, output_size: int):
super(SingleLayerPerception, self).__init__()
self.layer = nn.Sequential(
nn.Linear(input_size, output_size),
nn.LogSoftmax(1)
)
def forward(self, x):
return self.layer(x)
class MultiLayerPerceptron(nn.Module):
def __init__(self, input_size: int, hidden_size: int):
super(MultiLayerPerceptron, self).__init__()
self.layer = nn.Sequential(
nn.Linear(input_size, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1),
nn.LogSigmoid()
)
def forward(self, x):
return self.layer(x)
| {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,215 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /model/cgc.py |
import torch
import torch.nn as nn
from model.mlp import SingleLayerPerception as SLP
from model.mlp import MultiLayerPerceptron as MLP
class CustomizedGateControl(nn.Module):
def __init__(
self,
input_size: int,
expert_size: int,
tower_size: int,
num_tasks: int,
num_shared_experts: int,
num_task_experts: list
):
super(CustomizedGateControl, self).__init__()
self.expert_size = expert_size
self.num_shared_experts = num_shared_experts
self.num_task_experts = num_task_experts
self.num_tasks = num_tasks
self.shared_expert_layer = nn.ModuleList(
[SLP(input_size, expert_size) for _ in range(num_shared_experts)]
)
self.task_expert_layer = nn.ModuleList([nn.ModuleList(
[SLP(input_size, expert_size) for _ in range(num_experts)]
) for num_experts in num_task_experts])
self.task_gate = nn.ParameterList(
[nn.Parameter(torch.zeros(input_size, num_shared_experts + num_task_experts[i]), \
requires_grad=True) for i in range(num_tasks)]
)
self.tower_layer = nn.ModuleList(
[MLP(expert_size, tower_size) for _ in range(num_tasks)]
)
def forward(self, x):
task_gates = [torch.matmul(x, gate) for gate in self.task_gate]
shared_experts = [self.shared_expert_layer[i](x) for i in range(self.num_shared_experts)]
shared_infos = []
for t in range(self.num_tasks):
num_task_experts = self.num_task_experts[t]
task_expert_layer = self.task_expert_layer[t]
task_experts = [task_expert_layer[i](x) for i in range(num_task_experts)]
gates = task_gates[t]
shared_info = torch.zeros(gates.size()[0], self.expert_size)
for i in range(self.expert_size):
tmp = 0
for j in range(self.num_shared_experts):
tmp += gates[:,j] * shared_experts[j][:,i]
for k in range(self.num_task_experts[t]):
tmp += gates[:,k+self.num_shared_experts] * task_experts[k][:,i]
shared_info[:,i] = tmp
shared_infos.append(shared_info)
outs = [self.tower_layer[i](shared_infos[i]) for i in range(self.num_tasks)]
return outs
| {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,216 | daramg-suminlee/multitask-learning-pytorch | refs/heads/main | /data/synthetic_data.py |
import numpy as np
import torch
from torch.utils.data import Dataset
class SyntheticDataset(Dataset):
def __init__(self, num_data, feature_dim, task_corr=0.9, scale=0.5, sin_param=10, seed=1):
self.num_data = num_data
self.feature_dim = feature_dim
self.task_corr = task_corr
torch.manual_seed(seed)
# generate two orthogonal unit vectors u1 and u2
u1, u2 = torch.rand(feature_dim), torch.rand(feature_dim)
u1 -= u1.dot(u2) * u2 / torch.linalg.norm(u2)**2
u1 /= torch.linalg.norm(u1)
u2 /= torch.linalg.norm(u2)
# generate two weight vector w1 and w2
w1 = scale * u1
w2 = scale * (task_corr*u1 + np.sqrt((1-task_corr**2))*u2)
# randomly sample an input data point
self.X = torch.normal(0, 1, size=(num_data, feature_dim))
# generate two labels y1 and y2 for two tasks
eps1, eps2 = np.random.normal(0, 0.01), np.random.normal(0, 0.01)
sum1, sum2 = 0, 0
for i in range(sin_param):
alpha, beta = np.random.normal(0, 0.01), np.random.normal(0, 0.01)
sum1 += torch.sin(alpha*torch.matmul(self.X, w1) + beta)
sum2 += torch.sin(alpha*torch.matmul(self.X, w2) + beta)
self.y1 = torch.matmul(self.X, w1) + sum1 + eps1
self.y2 = torch.matmul(self.X, w2) + sum1 + eps2
self.y = torch.transpose(
torch.reshape(torch.cat((self.y1, self.y2)), (2, -1)), -1, 0
)
def __len__(self):
return self.num_data
def __getitem__(self, index):
X = self.X[index]
y1 = self.y1[index]
y2 = self.y2[index]
return X, (y1, y2) | {"/model/shared_bottem.py": ["/model/mlp.py"], "/model/__init__.py": ["/model/mlp.py", "/model/shared_bottem.py", "/model/cgc.py"], "/model/cgc.py": ["/model/mlp.py"]} |
66,261 | webenable-ie/soicrm | refs/heads/master | /configtables/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-04 12:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ClubType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('club_type', models.CharField(max_length=100)),
],
),
migrations.CreateModel(
name='Region',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('region', models.CharField(max_length=250)),
],
),
migrations.CreateModel(
name='Sport',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sport', models.CharField(max_length=100)),
],
),
]
| {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,262 | webenable-ie/soicrm | refs/heads/master | /clubs/migrations/0003_club_region.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-11-04 19:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('configtables', '0001_initial'),
('clubs', '0002_auto_20171104_1256'),
]
operations = [
migrations.AddField(
model_name='club',
name='region',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='clubs', to='configtables.Region'),
),
]
| {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,263 | webenable-ie/soicrm | refs/heads/master | /clubs/urls.py | from django.conf.urls import url
from .views import ClubListView, ClubDetailsView
urlpatterns=[
url(r'^$', ClubListView.as_view(), name="club_list"),
url(r'^(?P<slug>[-\w]+)/$', ClubDetailsView.as_view(), name="club_details")
] | {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,264 | webenable-ie/soicrm | refs/heads/master | /clubs/views.py | from django.shortcuts import render
from django.views.generic import ListView, DetailView
from .models import Club
# Create your views here.
class ClubListView(ListView):
def get_queryset(self):
return Club.objects.all()
class ClubDetailsView(DetailView):
model = Club
def get_context_data(self, **kwargs):
context = super(ClubDetailsView, self).get_context_data(**kwargs)
return context | {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,265 | webenable-ie/soicrm | refs/heads/master | /clubs/models.py | from django.db import models
from django.utils.text import slugify
from django.db.models.signals import pre_save
from configtables.models import Region
# Create your models here.
class Club(models.Model):
name = models.CharField(max_length=250)
slug = models.SlugField(null=True, blank=True)
region = models.ForeignKey(Region, null=True, blank=True,on_delete=None, related_name='clubs')
def __str__(self):
return self.name
def create_slug(instance, new_slug=None):
slug = slugify(instance.name)
if new_slug is not None:
slug = new_slug
qs = Club.objects.filter(slug=slug).order_by("-id")
exists = qs.exists()
if exists:
new_slug = "%s-%s" %(slug, qs.first().id)
return create_slug(instance, new_slug=new_slug)
return slug
def pre_save_club_receiver(sender, instance, *args, **kwargs):
if not instance.slug:
instance.slug = create_slug(instance)
pre_save.connect(pre_save_club_receiver, sender=Club) | {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,266 | webenable-ie/soicrm | refs/heads/master | /configtables/models.py | from django.db import models
# Create your models here.
class Sport(models.Model):
sport = models.CharField(max_length=100)
class Region(models.Model):
region = models.CharField(max_length=250)
class ClubType(models.Model):
club_type = models.CharField(max_length=100) | {"/clubs/urls.py": ["/clubs/views.py"], "/clubs/views.py": ["/clubs/models.py"], "/clubs/models.py": ["/configtables/models.py"]} |
66,271 | jhammarstedt/chatbot | refs/heads/master | /app.py | from flask import Flask,request
import random
from pymessenger.bot import Bot
import json
from brain import Dialog
with open('tokens.json') as f:
data = json.load(f)
app = Flask(__name__)
ACCESS_TOKEN =data['tokens'][0]['Access']
VERIFY_TOKEN = data['tokens'][0]['Verify']
bot = Bot(ACCESS_TOKEN)
COUNT = 0 #message count
dialog = Dialog() #dialog class
@app.route('/',methods= ['GET','POST'])
def receive_messeage():
global COUNT
if request.method == 'GET':
# confirms that all requests that your bot receives came from fb
token_sent = request.args.get("hub.verify_token")
return verify_fb_token(token_sent)
else: # if the request was not get, it will be a POST - fb is sending the bot a message sent by the user.
# Get our message that the user sent the bot
output = request.get_json()
for event in output['entry']:
messaging = event['messaging']
for message in messaging:
if message.get('message'): # if a message exists here
# Fb messenger ID for user so we know where to send reponse back to
recipient_id = message['sender']['id']
if message['message'].get('text'): # if there is text here
#Here we get the message and will handle how we respond:
user_message = message['message'].get('text')
print(user_message)
if COUNT == 0: #quick fix for initial message
initial_response = 'Hi, I am the Movie Bot! What type of movies do you seek today?'
send_message(recipient_id,initial_response)
COUNT += 1
elif user_message in ['bye','Bye','Goodbye']: #end dialog
goodbye = 'Hope you enjoy the movie, bye!'
COUNT = 0
send_message(recipient_id,goodbye)
else: #if we're still in dialog
#dialog.process_message_txt(user_message)
bot_answer_text = demo_message() #now we just send back a random message
send_message(recipient_id, bot_answer_text)
# if users sends something else than text (gif, photo etc..)
if message['message'].get('attachments'):
response_sent_notext = demo_message()
send_message(recipient_id, response_sent_notext)
return "Message Processed"
def send_message(recipient_id,response):
#sends user the text msg through response
bot.send_text_message(recipient_id,response)
return "success"
def demo_message():
# Just passing in a couple of random responses for demo purposes
samples = ['I only recommend the best!', 'I swear you will be happy with me!', 'My role here is to serve you!']
return random.choice(samples)
def verify_fb_token(token_sent):
#take the token sent by fb and verify that it matches the verf token we sent
# If match, we allow requests otherwise return error
if token_sent == VERIFY_TOKEN:
return request.args.get("hub.challenge")
return "Invalid verification token" #if they're not the same
if __name__=='__main__':
app.debug = True
app.run()
| {"/app.py": ["/brain.py"], "/main.py": ["/brain.py", "/app.py"]} |
66,272 | jhammarstedt/chatbot | refs/heads/master | /brain.py | # Here is where the NLP magic will happen!
import json
import tmdbsimple as tm
def start_api():
with open('tokens.json') as f:
data = json.load(f)
api = data['tokens'][0]['TMDB']
tm.API_KEY = api
def get_data():
"""The basic language logic for now will just be to map keywords"""
with open('keyword_mapper.json') as f:
data = json.load(f)
keyword_genres = data['genre']
keyword_popularity = data['popularity']
keyword_countries = data['countries']
return keyword_genres, keyword_countries, keyword_popularity
class Dialog():
"""
This class will keep somewhat track of the dialog to know what movies to recommend
Will hold: keywords, current selection, (maybe past selection too)
functions:
- incomming message : something that handles the incoming message and sorts the keywords
- process_message: Takes the current lists and applies the keywords
"""
def __init__(self):
self.keywords = {'genre': None,
'actor': None, # is there even
'country': None,
'rating_threshold': None,
'popular': False}
self.next_action = None
self.current_movies = [] # list of current relevant movies
self.kw_genres, self.kw_countries, self.kw_pop = get_data()
self.current_chat = []
def get_popular(self, pages=5):
if len(self.current_movies) == 0: # if we don't have any previous recs
d = tm.Discover() # Setting discovery mode for the API
movies = []
a = []
for i in range(1, pages):
movies.append(
d.movie(sort_by=['vote_count.desc'], page=i)['results']) # get 5 pages with most vote counts
flatten = [item for sublist in movies for item in
sublist] # Since we get a list of lists of dicts we have to flatten it to one list of dicts
flatten = sorted(flatten, key=lambda k: k['vote_average'], reverse=True) # sort them by vote_average
self.current_movies = flatten
else: # if we just want to sort the current selection by popularity
self.current_movies = sorted(self.current_movies, key=lambda k: k['vote_average'],
reverse=True) # sort them by vote_average
def filter_by_genre(self, pages=5):
if len(self.current_movies) == 0: # if this was our first request to filter on
movies = []
for i in range(1, pages):
movies.append(tm.Genres(
self.kw_genres[self.keywords['genre']], page=i)['results']) # get 5 pages with most vote counts
flatten = [item for sublist in movies for item in sublist]
self.current_movies = flatten
else:
# filter out by genre and update our selection, will probably be generalized later
selection = []
for movie in self.current_movies: # go through all movies
if self.kw_genres[self.keywords['genre']] in movie[
'genre_ids']: # check if the genre id that we want is in the current movie
selection.append(movie)
self.current_movies = selection # update our selections
def process_message_txt(self, msg: str):
"""Very simply word logic for now"""
split = msg.split(' ')
for word in split:
if word in self.kw_pop:
self.keywords['popular'] = True
self.next_action = 'popular'
elif word in list(self.kw_genres.keys):
self.keywords['genre'] = [word, self.kw_genres[word]]
self.next_action = 'genre'
elif word in self.kw_countries:
self.keywords['country'] = word
self.next_action = 'country'
def act_on_message(self):
if self.next_action == 'popular': # filter on popular movies
self.get_popular() # sort or create a list of popular movies
elif self.next_action == 'genre':
self.filter_by_genre() # filter or create a list based on genre | {"/app.py": ["/brain.py"], "/main.py": ["/brain.py", "/app.py"]} |
66,273 | jhammarstedt/chatbot | refs/heads/master | /main.py | import brain
if __name__ == '__main__':
brain.start_api()
from app import *
| {"/app.py": ["/brain.py"], "/main.py": ["/brain.py", "/app.py"]} |
66,292 | TongMoNumb/DeepRL | refs/heads/master | /deep_rl/crossEntropy.py | #######################################################################
# Copyright (C) kgmills@ualberta.ca, June 2019 #
# Keith G. Mills, Dept. Electrical and Computer Engineering, Ualberta #
# Permission to modify so long as you keep this declaration #
#######################################################################
import numpy as np
from math import inf
class CETuner:
def __init__(self, agent):
# Population size, alpha (parameter for matrices) and rho (used for determining best candidates)
self.rho = 0.01
self.alpha = 0.6
# TODO make this like a config with insertable popSize
self.popSize = max(1000, int(1 / self.rho))
self.nElite = int(self.popSize * self.rho)
self.agent = agent
# Commented out, but these are the relevant weights
# self.agent.network.fc_action.parameters()
self.bestParams = [-inf, -inf, -inf]
self.bestGen = 0
# The 2 is there for the biases
self.nFeatures = agent.network.actor_body.feature_dim * \
agent.network.action_dim + agent.network.action_dim
# Weight matrix - One more column is added for the scores
self.weightsAndScores = np.zeros((self.popSize, self.nFeatures + 1),)
# Means and standard deviations of each weight (as well as the rewards)
if agent.finalWeightHistory is None:
self.Mus = np.zeros(self.nFeatures + 3,)
self.Sigmas = np.ones(self.nFeatures + 3,) * 50
self.vanillaWeights = agent.getFinalActorWeights()
else:
self.Mus = np.append(np.mean(agent.finalWeightHistory, axis = 0), [0, 0, 0])
self.Sigmas = np.append(np.std(agent.finalWeightHistory, axis = 0), [0, 0, 0])
self.muList = None
self.sigList = None
# A single generation
def executeGeneration(self, gen):
# Generate all the weights and dummy scores at once.
self.weightsAndScores = np.random.normal(self.Mus, self.Sigmas,
(self.popSize, self.nFeatures + 3))
# Run for each individual
for i in range(self.popSize):
if self.agent.finalWeightHistory is None:
self.agent.setAgentFinalLayerWeights(
np.add(self.vanillaWeights, self.weightsAndScores[i, :-3])
)
else:
self.agent.setAgentFinalLayerWeights(self.weightsAndScores[i, :-3])
# Run for the number of episodes specified
for j in range(self.agent.config.tunerEpisodes):
self.agent.stepNoUpdate()
self.agent.switch_task()
rewardDict = self.agent.eval_episodes(log = False, tune = True)
self.weightsAndScores[i, -3] = rewardDict['episodic_return_test']
self.weightsAndScores[i, -2] = rewardDict['test_std']
self.weightsAndScores[i, -1] = rewardDict['episodic_return_test'] - rewardDict['test_std']
# https://stackoverflow.com/questions/2828059/sorting-arrays-in-numpy-by-column
# Sort the matrix of weights and scores by the scores, in descending order.
self.weightsAndScores = self.weightsAndScores[self.weightsAndScores[:, -1].argsort()[::-1]]
# Select the top performing weight vectors
eliteWeights = self.weightsAndScores[:self.nElite, :]
# TODO fix this for early generations
# Compute Mu and Sigma for each weight as well as the reward
self.Mus = np.add((1 - self.alpha) * self.Mus,
self.alpha * np.mean(eliteWeights, axis = 0))
self.Sigmas = np.add((1 - self.alpha) * self.Sigmas,
self.alpha * np.std(eliteWeights, axis = 0))
# Check if the best is better than the global best
if eliteWeights[0, -3] > self.bestParams[-3]:
self.bestParams = eliteWeights[0, :]
self.bestGen = gen
self.recordMuSigma()
def recordMuSigma(self):
if self.muList is None:
self.muList = np.asmatrix(self.weightsAndScores[:, -3]).T
self.sigList = np.asmatrix(self.weightsAndScores[:, -2]).T
else:
self.muList = np.append(self.muList, np.asmatrix(
self.weightsAndScores[:, -3]).T, axis = 1)
self.sigList = np.append(self.sigList, np.asmatrix(
self.weightsAndScores[:, -2]).T, axis = 1)
def saveMats(self):
print(self.agent.config.tag)
fileName = "mu" + str(self.agent.config.tag) + ".csv"
np.savetxt(fileName, self.muList, delimiter = ',')
fileName = "sig" + str(self.agent.config.tag) + ".csv"
np.savetxt(fileName, self.sigList, delimiter = ',')
| {"/deep_rl/utils/misc.py": ["/deep_rl/crossEntropy.py"]} |
66,293 | TongMoNumb/DeepRL | refs/heads/master | /examples.py | #######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
# Modified by Keith Mills
# Dept. Electrical and Computer Engineering
# University of Alberta
from deep_rl import *
import argparse
# DQN
def dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim))
# config.network_fn = lambda: DuelingNet(config.action_dim, FCBody(config.state_dim))
# config.replay_fn = lambda: Replay(memory_size=int(1e4), batch_size=10)
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e4), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 1000
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e3)
config.max_steps = 1e5
config.async_actor = False
run_steps(DQNAgent(config))
def dqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(
params, lr=0.00025, alpha=0.95, eps=0.01, centered=True)
config.network_fn = lambda: VanillaNet(config.action_dim, NatureConvBody(in_channels=config.history_length))
# config.network_fn = lambda: DuelingNet(config.action_dim, NatureConvBody(in_channels=config.history_length))
config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)
# config.replay_fn = lambda: Replay(memory_size=int(1e6), batch_size=32)
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e6), batch_size=32)
config.batch_size = 32
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps = 50000
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.history_length = 4
# config.double_q = True
config.double_q = False
config.max_steps = int(2e7)
run_steps(DQNAgent(config))
# QR DQN
def quantile_regression_dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: QuantileNet(config.action_dim, config.num_quantiles, FCBody(config.state_dim))
# config.replay_fn = lambda: Replay(memory_size=int(1e4), batch_size=10)
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e4), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.num_quantiles = 20
config.gradient_clip = 5
config.sgd_update_frequency = 4
config.eval_interval = int(5e3)
config.max_steps = 1e5
run_steps(QuantileRegressionDQNAgent(config))
def quantile_regression_dqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00005, eps=0.01 / 32)
config.network_fn = lambda: QuantileNet(config.action_dim, config.num_quantiles, NatureConvBody())
config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)
# config.replay_fn = lambda: Replay(memory_size=int(1e6), batch_size=32)
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e6), batch_size=32)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.exploration_steps = 50000
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.num_quantiles = 200
config.max_steps = int(2e7)
run_steps(QuantileRegressionDQNAgent(config))
# C51
def categorical_dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: CategoricalNet(config.action_dim, config.categorical_n_atoms, FCBody(config.state_dim))
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e4)
# config.replay_fn = lambda: Replay(memory_size=10000, batch_size=10)
config.replay_fn = lambda: AsyncReplay(memory_size=10000, batch_size=10)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 100
config.categorical_v_max = 100
config.categorical_v_min = -100
config.categorical_n_atoms = 50
config.gradient_clip = 5
config.sgd_update_frequency = 4
config.eval_interval = int(5e3)
config.max_steps = 1e5
run_steps(CategoricalDQNAgent(config))
def categorical_dqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.Adam(params, lr=0.00025, eps=0.01 / 32)
config.network_fn = lambda: CategoricalNet(config.action_dim, config.categorical_n_atoms, NatureConvBody())
config.random_action_prob = LinearSchedule(1.0, 0.01, 1e6)
# config.replay_fn = lambda: Replay(memory_size=int(1e6), batch_size=32)
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e6), batch_size=32)
config.discount = 0.99
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.target_network_update_freq = 10000
config.exploration_steps = 50000
config.categorical_v_max = 10
config.categorical_v_min = -10
config.categorical_n_atoms = 51
config.sgd_update_frequency = 4
config.gradient_clip = 0.5
config.max_steps = int(2e7)
run_steps(CategoricalDQNAgent(config))
# A2C
def a2c_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.num_workers = 5
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.optimizer_fn = lambda params: torch.optim.Adam(params, 0.001)
config.network_fn = lambda: CategoricalActorCriticNet(
config.state_dim, config.action_dim, FCBody(config.state_dim))
config.discount = 0.99
config.use_gae = True
config.gae_tau = 0.95
config.entropy_weight = 0.01
config.rollout_length = 5
config.gradient_clip = 0.5
run_steps(A2CAgent(config))
def a2c_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.num_workers = 16
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=1e-4, alpha=0.99, eps=1e-5)
config.network_fn = lambda: CategoricalActorCriticNet(config.state_dim, config.action_dim, NatureConvBody())
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.use_gae = True
config.gae_tau = 1.0
config.entropy_weight = 0.01
config.rollout_length = 5
config.gradient_clip = 5
config.max_steps = int(2e7)
run_steps(A2CAgent(config))
def a2c_continuous(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.num_workers = 16
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.0007)
config.network_fn = lambda: GaussianActorCriticNet(
config.state_dim, config.action_dim,
actor_body=FCBody(config.state_dim), critic_body=FCBody(config.state_dim))
config.discount = 0.99
config.use_gae = True
config.gae_tau = 1.0
config.entropy_weight = 0.01
config.rollout_length = 5
config.gradient_clip = 5
config.max_steps = int(2e7)
run_steps(A2CAgent(config))
# N-Step DQN
def n_step_dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.num_workers = 5
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim))
config.random_action_prob = LinearSchedule(1.0, 0.1, 1e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.rollout_length = 5
config.gradient_clip = 5
run_steps(NStepDQNAgent(config))
def n_step_dqn_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.num_workers = 16
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=1e-4, alpha=0.99, eps=1e-5)
config.network_fn = lambda: VanillaNet(config.action_dim, NatureConvBody())
config.random_action_prob = LinearSchedule(1.0, 0.05, 1e6)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.rollout_length = 5
config.gradient_clip = 5
config.max_steps = int(2e7)
run_steps(NStepDQNAgent(config))
# Option-Critic
def option_critic_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.num_workers = 5
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: OptionCriticNet(FCBody(config.state_dim), config.action_dim, num_options=2)
config.random_option_prob = LinearSchedule(1.0, 0.1, 1e4)
config.discount = 0.99
config.target_network_update_freq = 200
config.rollout_length = 5
config.termination_regularizer = 0.01
config.entropy_weight = 0.01
config.gradient_clip = 5
run_steps(OptionCriticAgent(config))
def option_critic_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.num_workers = 16
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=1e-4, alpha=0.99, eps=1e-5)
config.network_fn = lambda: OptionCriticNet(NatureConvBody(), config.action_dim, num_options=4)
config.random_option_prob = LinearSchedule(0.1)
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.target_network_update_freq = 10000
config.rollout_length = 5
config.gradient_clip = 5
config.max_steps = int(2e7)
config.entropy_weight = 0.01
config.termination_regularizer = 0.01
run_steps(OptionCriticAgent(config))
# PPO
def ppo_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.num_workers = 5
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.001)
config.network_fn = lambda: CategoricalActorCriticNet(config.state_dim, config.action_dim, FCBody(config.state_dim))
config.discount = 0.99
config.use_gae = True
config.gae_tau = 0.95
config.entropy_weight = 0.01
config.gradient_clip = 5
config.rollout_length = 128
config.optimization_epochs = 10
config.mini_batch_size = 32 * 5
config.ppo_ratio_clip = 0.2
config.log_interval = 128 * 5 * 10
run_steps(PPOAgent(config))
def ppo_pixel(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game, num_envs=config.num_workers)
config.eval_env = Task(config.game)
config.num_workers = 8
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, lr=0.00025, alpha=0.99, eps=1e-5)
config.network_fn = lambda: CategoricalActorCriticNet(config.state_dim, config.action_dim, NatureConvBody())
config.state_normalizer = ImageNormalizer()
config.reward_normalizer = SignNormalizer()
config.discount = 0.99
config.use_gae = True
config.gae_tau = 0.95
config.entropy_weight = 0.01
config.gradient_clip = 0.5
config.rollout_length = 128
config.optimization_epochs = 3
config.mini_batch_size = 32 * 8
config.ppo_ratio_clip = 0.1
config.log_interval = 128 * 8
config.max_steps = int(2e7)
run_steps(PPOAgent(config))
def ppo_continuous(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.network_fn = lambda: GaussianActorCriticNet(
config.state_dim, config.action_dim, actor_body=FCBody(config.state_dim, gate=torch.tanh),
critic_body=FCBody(config.state_dim, gate=torch.tanh))
config.optimizer_fn = lambda params: torch.optim.Adam(params, 3e-4, eps=1e-5)
config.discount = 0.99
config.use_gae = True
config.gae_tau = 0.95
config.gradient_clip = 0.5
config.rollout_length = 2048
config.optimization_epochs = 10
config.mini_batch_size = 64
config.ppo_ratio_clip = 0.2
config.log_interval = 2048
config.max_steps = 1e6
config.state_normalizer = MeanStdNormalizer()
run_steps(PPOAgent(config))
# DDPG
def ddpg_continuous_setup(iters = 1e6, folder = 'log', **kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.max_steps = int(iters)
config.eval_interval = int(1e4)
config.eval_episodes = 20
config.folder = folder
config.network_fn = lambda: DeterministicActorCriticNet(
config.state_dim, config.action_dim,
actor_body=FCBody(config.state_dim, (400, 300), gate=F.relu),
critic_body=TwoLayerFCBodyWithAction(
config.state_dim, config.action_dim, (400, 300), gate=F.relu),
actor_opt_fn=lambda params: torch.optim.Adam(params, lr=1e-4),
critic_opt_fn=lambda params: torch.optim.Adam(params, lr=1e-3))
config.replay_fn = lambda: Replay(memory_size=int(1e6), batch_size=64)
config.discount = 0.99
config.random_process_fn = lambda: OrnsteinUhlenbeckProcess(
size=(config.action_dim,), std=LinearSchedule(0.2))
config.warm_up = int(1e4)
config.target_network_mix = 1e-3
return config
def ddpg_continuous(iters = 1e6, ce = False, **kwargs):
config = ddpg_continuous_setup(iters, **kwargs)
if ce:
run_CE_steps(DDPGAgent(config))
else:
run_steps(DDPGAgent(config))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f', "--folder",
help = "Subfolder for logs")
parser.add_argument("-g", "--game",
help = "Game experiments to be performed")
parser.add_argument("-i", "--iter",
help = "Maximum number of iterations")
parser.add_argument("-c", "--crossEntropy",
help = "Use Cross Entropy after training")
parser.add_argument("-j", "--jiter",
help = "Maximum number of Cross Entropy generations")
parser.add_argument("-w", "--window",
help = "Size of the window for previous weight history using Cross Entropy")
parser.add_argument("-e", "--episode",
help = "Number of episodes to run for Cross Entropy candidates")
args = parser.parse_args()
if args.folder is not None:
myDir = 'log/' + args.folder
else:
myDir = 'log'
mkdir(myDir)
mkdir('tf_' + myDir)
set_one_thread()
random_seed()
select_device(-1) # 0 or -1
if args.iter is None:
args.iter = 1e6
if args.jiter is None:
args.jiter = 50
if args.window is None:
args.window = 0
if args.episode is None:
args.episode = 500
gameDict = { #'r': 'RoboschoolReacher-v1',
#'h': 'RoboschoolHopper-v1',
#'w': 'RoboschoolWalker2d-v1',
'c': 'RoboschoolHalfCheetah-v1',
'a': 'RoboschoolAnt-v1',
}
if 'test' in args.game:
print("Debug Test! Running", 'RoboschoolPong-v1', "for", str(args.iter), "iterations:")
ddpg_continuous(iters = float(args.iter),
folder = myDir,
ce = args.crossEntropy,
game = 'RoboschoolPong-v1',
generations = int(args.jiter),
window = int(args.window),
tunerEpisodes = int(args.episode))
elif 'all' in args.game:
for gameChar in gameDict.keys():
print("Running", gameDict[gameChar], "for", str(args.iter), "iterations:")
ddpg_continuous(iters = float(args.iter),
folder = myDir,
ce = args.crossEntropy,
game = gameDict[gameChar],
generations = int(args.jiter),
window = int(args.window),
tunerEpisodes = int(args.episode))
| {"/deep_rl/utils/misc.py": ["/deep_rl/crossEntropy.py"]} |
66,294 | TongMoNumb/DeepRL | refs/heads/master | /deep_rl/agent/DDPG_agent.py | #######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from ..network import *
from ..component import *
from .BaseAgent import *
import torchvision
import torch as t
import roboschool
class DDPGAgent(BaseAgent):
def __init__(self, config):
BaseAgent.__init__(self, config)
self.config = config
self.task = config.task_fn()
self.network = config.network_fn()
self.target_network = config.network_fn()
self.target_network.load_state_dict(self.network.state_dict())
self.replay = config.replay_fn()
self.random_process = config.random_process_fn()
self.total_steps = 0
self.state = None
self.finalWeightHistory = None
def soft_update(self, target, src):
for target_param, param in zip(target.parameters(), src.parameters()):
target_param.detach_()
target_param.copy_(target_param * (1.0 - self.config.target_network_mix) +
param * self.config.target_network_mix)
def eval_step(self, state):
self.config.state_normalizer.set_read_only()
state = self.config.state_normalizer(state)
action = self.network(state)
self.config.state_normalizer.unset_read_only()
return to_np(action)
def step(self):
config = self.config
if self.state is None:
self.random_process.reset_states()
self.state = self.task.reset()
self.state = config.state_normalizer(self.state)
if self.total_steps < config.warm_up:
action = [self.task.action_space.sample()]
else:
action = self.network(self.state)
action = to_np(action)
action += self.random_process.sample()
action = np.clip(action, self.task.action_space.low, self.task.action_space.high)
next_state, reward, done, info = self.task.step(action)
next_state = self.config.state_normalizer(next_state)
self.record_online_return(info)
reward = self.config.reward_normalizer(reward)
experiences = list(zip(self.state, action, reward, next_state, done))
self.replay.feed_batch(experiences)
if done[0]:
self.random_process.reset_states()
self.state = next_state
self.total_steps += 1
if self.replay.size() >= config.warm_up:
experiences = self.replay.sample()
states, actions, rewards, next_states, terminals = experiences
states = tensor(states)
actions = tensor(actions)
rewards = tensor(rewards).unsqueeze(-1)
next_states = tensor(next_states)
mask = tensor(1 - terminals).unsqueeze(-1)
phi_next = self.target_network.feature(next_states)
a_next = self.target_network.actor(phi_next)
q_next = self.target_network.critic(phi_next, a_next)
q_next = config.discount * mask * q_next
q_next.add_(rewards)
q_next = q_next.detach()
phi = self.network.feature(states)
q = self.network.critic(phi, actions)
critic_loss = (q - q_next).pow(2).mul(0.5).sum(-1).mean()
self.network.zero_grad()
critic_loss.backward()
self.network.critic_opt.step()
phi = self.network.feature(states)
action = self.network.actor(phi) # Need the weights of this network.
policy_loss = -self.network.critic(phi.detach(), action).mean()
self.network.zero_grad()
policy_loss.backward()
self.network.actor_opt.step()
self.soft_update(self.target_network, self.network)
# Function used ONLY during the Cross Entropy tuning process
# Runs an episode, but does not update any weights or network optimization
# Mostly copy-pasted from step(self) with code for logging removed
def stepNoUpdate(self):
config = self.config
if self.total_steps < config.warm_up:
action = [self.task.action_space.sample()]
else:
action = self.network(self.state)
action = to_np(action)
action += self.random_process.sample()
action = np.clip(action, self.task.action_space.low, self.task.action_space.high)
next_state, reward, done, info = self.task.step(action)
next_state = self.config.state_normalizer(next_state)
reward = self.config.reward_normalizer(reward)
experiences = list(zip(self.state, action, reward, next_state, done))
self.replay.feed_batch(experiences)
if done[0]:
self.random_process.reset_states()
self.state = next_state
self.total_steps += 1
def getFinalActorWeights(self):
weights = self.network.fc_action.state_dict()['weight'].tolist()
weights = [item for sublist in weights for item in sublist]
bias = self.network.fc_action.state_dict()['bias'].tolist()
return weights + bias
def preserveWeightHistory(self):
if (self.total_steps - (self.config.max_steps - self.config.window)) >= 0:
weights = self.getFinalActorWeights()
if self.finalWeightHistory is None:
self.finalWeightHistory = np.zeros((1, len(weights)),)
self.finalWeightHistory[0, :] = weights
else:
self.finalWeightHistory = np.append(self.finalWeightHistory, [weights], axis = 0)
# Function loads a weight vector to the model's actual parameters
# self.agent.network.fc_action.state_dict()['weight'].data.copy_(t.from_numpy
# (weight.reshape(action_dim, feature_dim)))
# Include separate update for the biases
# https://discuss.pytorch.org/t/how-can-i-modify-certain-layers-weight-and-bias/11638
def setAgentFinalLayerWeights(self, weightVec):
index = len(weightVec) - self.network.action_dim
weights = t.from_numpy(weightVec[:index].reshape(self.network.action_dim,
self.network.actor_body.feature_dim))
bias = t.from_numpy(weightVec[index:])
self.network.fc_action.state_dict()['weight'].data.copy_(weights)
self.network.fc_action.state_dict()['bias'].data.copy_(bias)
| {"/deep_rl/utils/misc.py": ["/deep_rl/crossEntropy.py"]} |
66,295 | TongMoNumb/DeepRL | refs/heads/master | /deep_rl/utils/misc.py | #######################################################################
# Copyright (C) 2017 Shangtong Zhang(zhangshangtong.cpp@gmail.com) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
import datetime
import time
from .torch_utils import *
from pathlib import Path
from ..crossEntropy import CETuner
def run_CE_steps(agent):
start = time.time()
run_steps(agent, wrapping = True)
end = time.time()
trainTime = end - start
agent.logger.info("Time to train normally: %.2f; %.2f step/sec" %
(trainTime, agent.config.max_steps/trainTime))
tuner = CETuner(agent)
startTune = time.time()
agent.logger.info("Running Cross Entropy for %d generations; %d episodes per individual" \
% (agent.config.generations, agent.config.tunerEpisodes))
for i in range(1, agent.config.generations + 1):
startGen = time.time()
tuner.executeGeneration(i)
agent.logger.info("Generation %d: Mean Reward = %.3f(%.3f); Time: %.2f" %
(i, tuner.Mus[-3], tuner.Sigmas[-3], time.time() - startGen))
end = time.time()
agent.logger.info("Best episodic return test found through tuning: %.3f(%.3f), in generation %d", tuner.bestParams[-3],
tuner.bestParams[-2], tuner.bestGen)
agent.logger.info("Best episodic return test found through training: %.3f", agent.config.bestTestReturn)
agent.logger.info("Time to tune: %.2f; total time to execute: %.2f" % (end - startTune, end - start))
tuner.saveMats()
agent.close()
def run_steps(agent, wrapping = False):
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
while True:
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
meanReward = agent.eval_episodes()['episodic_return_test']
# Record the best mean reward found, report at the end
if meanReward > agent.config.bestTestReturn:
agent.config.bestTestReturn = meanReward
if config.max_steps and agent.total_steps >= config.max_steps:
if not wrapping:
agent.close()
break
agent.preserveWeightHistory()
agent.step()
agent.switch_task()
def get_time_str():
return datetime.datetime.now().strftime("%y%m%d-%H%M%S")
def get_default_log_dir(name):
return './log/%s-%s' % (name, get_time_str())
def mkdir(path):
Path(path).mkdir(parents=True, exist_ok=True)
def close_obj(obj):
if hasattr(obj, 'close'):
obj.close()
def random_sample(indices, batch_size):
indices = np.asarray(np.random.permutation(indices))
batches = indices[:len(indices) // batch_size * batch_size].reshape(-1, batch_size)
for batch in batches:
yield batch
r = len(indices) % batch_size
if r:
yield indices[-r:]
def generate_tag(params):
# Added by kgmills
if 'folder' in params.keys():
del params['folder']
if 'tag' in params.keys():
return
game = params['game']
params.setdefault('run', 0)
run = params['run']
del params['game']
del params['run']
str = ['%s_%s' % (k, v) for k, v in sorted(params.items())]
tag = '%s-%s-run-%d' % (game, '-'.join(str), run)
params['tag'] = tag
params['game'] = game
params['run'] = run
def translate(pattern):
groups = pattern.split('.')
pattern = ('\.').join(groups)
return pattern
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))
| {"/deep_rl/utils/misc.py": ["/deep_rl/crossEntropy.py"]} |
66,296 | FransValentino/ProyekAkhir_AI | refs/heads/main | /Main.py | from Jalur import Jalur
depature = input("Keberangkatan: ")
destination = input("Tujuan : ")
departure = depature.lower()
destination = destination.lower()
dept = "".join(departure.split())
dest = "".join(destination.split())
| {"/Main.py": ["/Jalur.py"]} |
66,297 | FransValentino/ProyekAkhir_AI | refs/heads/main | /Jalur.py | Mangkubumi1 = ("tugu", "pantiwilosoprojo", "hotelarjuna" )
Mangkubumi2 = ("klinikmediksa", "bcapusat", "gkiwongsodirjan")
Malioboro = ("grandina", "malioboromall", "bappeda", "smpn3", "ramaimall", "malioboro")
Sudirman1 = ("uiversitaskristendutawacana", "klitren", "bethesda", "galeria",'ukdw')
Sudirman2 = ("ojk", "tugu", "gramedia")
AhmadYani = ("museumbenteng", "ramayana", "bringharjo", "alunalun")
Senopati = ("tamanpintar", "titiknol", "smapangudiluhur")
TamanPintar = ("tamanpintar", "titiknol", "smapangudiluhur")
Katamso2 = ("smpmariaimakulata", "xiaomiservice", "brikatamso")
Katamso1 = ("arenasport", "dinaskomunikasiinformasi", "bnnp", "kantorpertanahan")
YosSudarso = ("kridosono", "kotabaru", "rssoetarto", "smpn5")
MandalaKrida = ("universitasahmaddahlan", "dinaskebudayaan","pengadilannegriyogya")
KHADahlan2 = ("bniahmaddahlan", "moneychanger", "jlahmaddahlan")
MTHaryono1 = ("sman7", "gerejahatikudus", "pasarsepeda")
MTHaryono2 = ("sman7", "opposervicecenter", "pasarsepeda")
Tejoklusuman = MTHaryono1
kusumanegara3 = ("bpnjogja","universitassarjanawiyatatamansiswa","masjidalbadar")
kusumanegara4 = ("bpnjogja","universitassarjanawiyatatamansiswa","masjidalbadar")
kusumanegara = ("gembiraloka","masjidbaitulhamdi","sdngedongkuning")
lempuyangan = ("stasiunlempuyangan","pasarlempuyangan","kantorsicepatlempuyangan")
apmd1 = ("sekolahtinggipembangunanmasyarakatdesa","ciclektimoho29b","timohopetsshop")
apmd2 = ("sekolahtinggipembangunanmasyarakatdesa","messkotemtimoho","gmahktimohojogja")
debrito = ("smadebrito","administrasiuin","wismapu")
gedungwanita = ("smadebrito", "administrasiuin","mandalabhaktiwanitatama")
soloambarukmo = ("grandambarukmoplaza", "ambarukmoplaza", "masjidjamialiman")
JantiBawah = ("flyoverjanti","gorlanudadisucipto","masjidalmukhlishun")
janti = ("mirotakampurbabarsari","upnvybabarsari","jnebabarsari")
maguwotrasmart = ("grandorchidhotel","transmartmaguwo","lionparcel")
Maguwoharjo = ("maguwolama","bungabangsamedika","kesehatanpelabuhanjogja")
AdiSucipto = ("imigrasikelasitpijogja","spbupertaminaadisucipto","bankbnikalasan", "adisucipto")
Samirono = ("sanatadharma","samirono","uny")
sanatadharma = ("uny", "samirono", "sanatadharma")
santren = ("depok","dazzlegejayan","hartonomall")
CondongCatur = ("vivoapartemensenturanjogja","pelayananpajakpratamasleman","condongcatur")
kusumanegara2 = ("balaikerajinandanbatik","smksmtijogja","gedungkeuangannegarajogja")
kusumanegara1 = ("balaikerajinandanbatik","makampahlawannasionalkusumanegara","gedungkeuangannegarajogja")
pakualaman = ("museumsasmitalokapanglimabesarjendralsudirman","pasarsentuljogja","bebadanpuropakualaman")
museumbiologi = ("museumsasmitalokapanglimabesarjendralsudirman","superindosultanagung","gerejakatoliksantoyusuf")
Ngabean= ("khususibudananakrachmi","univesitasaisyiyahjogja","klinikprismadiagnostika")
upy = ("universitaspgri","universitaspgriunit2","universitaspgriunit3")
kotabaru = ("mueseumsandi","balaibahasa","ramintenkitchen")
UIN = ("uin", "bniuin", "maxmarastoretimoho")
GedongKuning = ("jec", "grandrohan", "grhapradipta")
Babarsari = ("dinasperhubungan", "universitasatmajaya", "upnveteran")
KopmaUGM = ("mirotakampus", "kfcugm", "smkbopkri")
RRU = ("upnveteran", "amikom", "ringroadutara", "banksinarmas")
UNY = ("tokomerah", "sanatadharma", "uny")
Giwangan = ("pasargiwangan", "terminalgiwangan", "dinasperhubunganyogya")
Monjali = ("asuransijiwakresna", "tamanpelangi", "terminalmonjali", "monumenjogja")
Samsat = ("vivobike", "samsatkota", "smpn14")
Tegalgendu = ("bnikotagede", "planetban", "sdmuhamadiyahkleco")
SGM = ("smkn5", "smkydppmm52", "masjidalislah")
AtmaJaya = ("universitasatmajaya", "babarsari", "bcajwalkmall")
Halte = {
'Mangkubumi1': Mangkubumi1,
'Mangkubumi2': Mangkubumi2,
'Malioboro': Malioboro,
'Katamso1': Katamso1,
'Katamso2': Katamso2,
'kusumanegara': kusumanegara,
'kusumanegara1': kusumanegara1,
'kusumanegara2': kusumanegara2,
'kusumanegara3': kusumanegara3,
'kusumanegara4': kusumanegara4,
'kotabaru': kotabaru,
'KopmaUGM': KopmaUGM,
'SGM': SGM,
'AtmaJaya': AtmaJaya,
'UIN': UIN,
'UNY': UNY,
'CondongCatur': CondongCatur,
'pakualaman': pakualaman,
'Samirono': Samirono,
'santren': santren,
'sanatadharma': sanatadharma,
'AdiSucipto': AdiSucipto,
'Maguwoharjo': Maguwoharjo,
'maguwotrasmart': maguwotrasmart,
'janti': janti,
'JantiBawah': JantiBawah,
'soloambarukmo': soloambarukmo,
'gedungwanita': gedungwanita,
'museumbiologi': museumbiologi,
'Ngabean':Ngabean,
'upy': upy,
'GedongKuning': GedongKuning,
'KHADahlan2': KHADahlan2,
'MTHaryono1': MTHaryono1,
'MTHaryono2': MTHaryono2,
'AhmadYani': AhmadYani,
'Senopati': Senopati,
'Sudirman1': Sudirman1,
'Sudirman2': Sudirman2,
'TamanPintar': TamanPintar,
'YosSudarso': YosSudarso,
'MandalaKrida': MandalaKrida,
'Tejoklusuman': Tejoklusuman,
'lempuyangan': lempuyangan,
'apmd1': apmd1,
'apmd2': apmd2,
'debrito': debrito,
'RRU': RRU,
'Monjali': Monjali,
'soloambarukmo': soloambarukmo,
'Babarsari': Babarsari,
'Giwangan': Giwangan,
'Samsat': Samsat
}
a1= ('AdiSucipto', 'Maguwoharjo', 'JantiBawah', 'UIN', 'Sudirman2', 'Mangkubumi1', 'Mangkubumi2', 'Malioboro', 'AhmadYani', 'TamanPintar', 'pakualaman', 'SGM','kusumanegara', 'GedongKuning', 'janti')
b1= ('AdiSucipto','Maguwoharjo', 'Babarsari', 'JantiBawah', 'GedongKuning','kusumanegara', 'SGM', 'pakualaman', 'TamanPintar', 'AhmadYani','KHADahlan2', 'Malioboro', 'Samsat', 'Mangkubumi1', 'Sudirman2', 'Samirono', 'gedungwanita', 'CondongCatur', 'UIN')
a2 = ('Mangkubumi1', 'Mangkubumi2', 'Malioboro', 'AhmadYani', 'Senopati', 'GedongKuning', 'kusumanegara', 'SGM', 'MandalaKrida', 'santren', 'lempuyangan', 'YosSudarso','ukdw', 'Sudirman1', 'Sudirman2', 'tugu','Samirono', 'CondongCatur', 'RRU', 'Monjali', 'malioboro')
b2 = ('RRU', 'CondongCatur', 'Samirono', 'Sudirman2', 'YosSudarso', 'JantiBawah','Katamso2', 'MandalaKrida', 'SGM', 'kusumanegara', 'GedongKuning', 'Senopati', 'AhmadYani', 'KHADahlan2', 'Ngabean', 'KHADahlan2', 'Samsat', 'Monjali')
a3 = ('Giwangan', 'Tegalgendu','janti', 'AdiSucipto', 'Maguwoharjo', 'CondongCatur', 'RRU', 'KopmaUGM', 'Samsat', 'Malioboro', 'Ngabean')
b3 = ('Giwangan', 'Tegalgendu', 'Ngabean', 'KHADahlan2', 'Samsat', 'Mangkubumi1', 'Sudirman2', 'KopmaUGM', 'RRU', 'CondongCatur', 'Maguwoharjo', 'AdiSucipto', 'janti', 'GedongKuning', 'Tegalgendu')
a4 = ('Giwangan', 'lempuyangan', 'KopmaUGM', 'UNY','KopmaUGM', 'UNY', 'gedungwanita', 'UIN', 'SGM', 'kusumanegara3')
b4 = ('Giwangan', 'kusumanegara1', 'SGM', 'kusumanegara4', 'UIN', 'Sudirman1', 'KopmaUGM', 'UNY', 'Sudirman2', 'lempuyangan', 'kusumanegara1', 'KHADahlan2')
a5 = ('RRU', 'Samirono', 'gedungwanita', 'RRU', 'CondongCatur', 'RRU', 'Monjali')
b5 = ('RRU', 'Monjali', 'RRU', 'CondongCatur', 'RRU', 'janti', 'gedungwanita', 'Samirono', 'YosSudarso', 'KopmaUGM', 'lempuyangan')
a6 = ('Ngabean', 'Tejoklusuman', 'RRU', 'lempuyangan')
b6 = ('Ngabean', 'Tejoklusuman', 'lempuyangan', 'RRU', 'MTHaryono2')
a7 = ('Giwangan', 'KHADahlan2', 'GedongKuning', 'janti', 'Babarsari', 'AtmaJaya', 'janti', 'KHADahlan2')
a8 = ('RRU', 'KopmaUGM', 'Malioboro', 'Ngabean', 'MTHaryono2', 'Ngabean', 'KHADahlan2', 'Malioboro')
a9 = ('Giwangan', 'Ngabean', 'Samsat', 'RRU', 'Samsat', 'Ngabean')
a10 = ('Giwangan', 'Tegalgendu', 'SGM', 'apmd1', 'Katamso2', 'Katamso1', 'Ngabean','Katamso1', 'Mangkubumi2', 'Samirono', 'JantiBawah', 'apmd2', 'SGM', 'Tegalgendu')
a11 = ('Giwangan', 'MTHaryono2', 'Ngabean', 'Samsat', 'Sudirman1', 'KopmaUGM', 'Samirono', 'CondongCatur','Samirono', 'Samirono', 'Sudirman2', 'Samsat', 'Ngabean', 'MTHaryono2')
Jalur = {"1A":a1, "1B":b1, "2A":a2, "2B":b2, "3A": a3, "3B":b3, "4A":a4, "4B":b4, "5A": a5, "5B":b5, "6A":a6, "6B":b6, "7":a7, "8":a8, "9":a9, "10":a10, "11":a11}
depature = input("Keberangkatan: ")
destination = input("Tujuan : ")
departure = depature.lower()
destination = destination.lower()
dept = "".join(departure.split())
dest = "".join(destination.split())
def BusStop(x):
for key, value in Halte.items():
if x in value:
return key
def iter(tup, x, y):
x_index = tup.index(x)
y_index = tup.index(y)
if x_index<y_index:
return y_index-x_index
elif x_index>y_index:
return x_index+y_index
def Find(x, y):
start = BusStop(x)
stop = BusStop(y)
short = 100
track = ""
for key, value in Jalur.items():
if (start in value) and (stop in value):
count = iter(value, start, stop)
if short>count:
short = count
track = key
return [start, stop, track]
result= Find(dept, dest)
print("naik halte ", result[0])
print("turun halte ", result[1])
print("jalur ", result[2])
print(Find(dept, dest))
| {"/Main.py": ["/Jalur.py"]} |
66,298 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/align/suffix_tree/hash/sliding_framer.py | from src.utils.global_settings import GlobalSettings as Settings
class SlidingHashFramer:
class HashFrame:
def __init__(self, initial_subsequence: str):
if len(initial_subsequence) == Settings.CHUNK_LEN:
self.value = sum(
[self.__nucleo_to_num(initial_subsequence[i]) * (4 ** i) for i in range(len(initial_subsequence))])
else:
raise Exception('Cannot hash init subsequence which len is not as in AlignGlobalSettings')
def slide_by_value(self, add_value: int) -> int:
_remainder = self.value % 4
self.value //= 4
self.value += add_value * (4 ** (Settings.CHUNK_LEN - 1))
return _remainder
def slide_by_nucleo(self, add_nucleo: str) -> int:
return self.slide_by_value(self.__nucleo_to_num(add_nucleo))
@property
def complimentary(self) -> int:
return 4 ** Settings.CHUNK_LEN - 1 - self.value
@staticmethod
def __nucleo_to_num(nucleo: str) -> int:
if nucleo == 'A':
return 0
elif nucleo == 'C':
return 1
elif nucleo == 'G':
return 2
elif nucleo == 'T':
return 3
else:
raise Exception('Unknown nucleo \'{}\''.format(nucleo))
def __init__(self, initial_subsequence: str):
if len(initial_subsequence) == Settings.CHUNK_LEN * Settings.TREE_DEPTH:
self.__hash_path = [
self.HashFrame(initial_subsequence[_i * Settings.CHUNK_LEN:(_i + 1) * Settings.CHUNK_LEN]) for _i in
range(Settings.TREE_DEPTH)]
else:
raise Exception('Cannot hash init subsequence which len is not as in AlignGlobalSettings')
def slide_by_nucleo(self, add_nucleo: str):
_value_additive = self.__hash_path[-1].slide_by_nucleo(add_nucleo)
for _i in range(len(self.__hash_path) - 2, -1, -1):
_value_additive = self.__hash_path[_i].slide_by_value(_value_additive)
def slide_by_frame(self, initial_subsequence: str):
self.__hash_path.pop(0)
self.__hash_path.append(self.HashFrame(initial_subsequence))
@property
def values(self):
return [_h.value for _h in self.__hash_path]
@property
def complimentary(self):
return [_h.complimentary for _h in self.__hash_path]
def __getitem__(self, item):
return self.__hash_path[item].value
def __len__(self):
return len(self.__hash_path)
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,299 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/representation.py | from src.utils.global_settings import GlobalSettings
from src.affinity_structure.bacterial import BacterialAffinityStructure
GlobalSettings.init(tree_depth=9, segment_min_size=int(2.5e3))
affinity_structure = BacterialAffinityStructure('grouptest')
for _ in range(2):
affinity_structure.handle_next_genome()
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,300 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/align/approximate.py | from src.align.suffix_tree.query import QuerySuffixTree
from src.utils.fasta import FastaContent
from src.align.suffix_tree.hash.sliding_framer import SlidingHashFramer as HashFramer
from src.utils.global_settings import GlobalSettings as Settings
class ApproximateAlign:
def __init__(self, target: FastaContent.FastaSequence, query_tree: QuerySuffixTree):
self.__chunk_matches = {_description: dict() for _description in query_tree.descriptions}
self.__compare_seq_with_tree(target, query_tree)
def __compare_seq_with_tree(self, target: FastaContent.FastaSequence, query_tree: QuerySuffixTree) -> None:
for _reversed in [False, True]:
print('Comparing {} sequence : '.format('reversed' if _reversed else 'initial'), end='')
_hash_framer = HashFramer(target[:Settings.CHUNK_LEN * Settings.TREE_DEPTH])
self.__handle_hash_framer(_hash_framer, 0, query_tree)
for _i in range(Settings.CHUNK_LEN * Settings.TREE_DEPTH, len(target)):
if _i % 400000 == 0:
print(f'{_i // 100000}e+5..', end='')
_hash_framer.slide_by_nucleo(target[_i])
_entry_index = _i - Settings.CHUNK_LEN * Settings.TREE_DEPTH + 1
if _reversed:
_entry_index = len(target) - 1 - _entry_index
_entry_index *= -1
self.__handle_hash_framer(_hash_framer, _entry_index, query_tree)
del _hash_framer
target.reverse()
print()
def __handle_hash_framer(self, hash_framer: HashFramer, entry_index: int, query_tree: QuerySuffixTree):
for _hash_path in [hash_framer.values, hash_framer.complimentary]:
_query_leaf = query_tree.get_leaf(_hash_path)
if _query_leaf:
for _leaf_description in _query_leaf.keys():
if entry_index not in self.__chunk_matches[_leaf_description].keys():
self.__chunk_matches[_leaf_description][entry_index] = list()
self.__chunk_matches[_leaf_description][entry_index] += _query_leaf[_leaf_description]
@property
def keys(self) -> list:
return list(self.__chunk_matches.keys())
def __getitem__(self, item) -> dict:
return self.__chunk_matches[item]
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,301 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/utils/global_settings.py | class GlobalSettings:
CHUNK_LEN = None
TREE_DEPTH = None
MIN_CONSIDERING_SEGMENT_LEN = None
SEGMENT_MIN_SIZE = None
SEGMENTS_JOIN_SIZE = None
DOT_SKIP_RATE = 1
DATA_FOLDER = './data/'
@staticmethod
def init(chunk_len=14, tree_depth=7, min_considering_segment_amount=int(1e2), segment_min_size=int(1e4)):
GlobalSettings.CHUNK_LEN = chunk_len
GlobalSettings.TREE_DEPTH = tree_depth
GlobalSettings.MIN_CONSIDERING_SEGMENT_LEN = min_considering_segment_amount
GlobalSettings.SEGMENT_MIN_SIZE = segment_min_size * GlobalSettings.CHUNK_LEN
GlobalSettings.SEGMENTS_JOIN_SIZE = (segment_min_size + 3) * GlobalSettings.CHUNK_LEN
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,302 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/analysis/segmental.py | from src.align.segmental import SegmentalAlign
class SegmentalAnalysis:
def __init__(self, segmental_align: SegmentalAlign):
self.__run(segmental_align)
def __run(self, segmental_align: SegmentalAlign):
pass
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,303 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/utils/fasta.py | from src.utils.global_settings import GlobalSettings
from copy import copy
class FastaContent:
class FastaSequence:
def __init__(self, _description: str, _sequence: str):
self.__description = _description
self.__sequence = _sequence
def reverse(self):
self.__sequence = self.__sequence[::-1]
@property
def description(self):
return copy(self.__description)
def __getitem__(self, item): return self.__sequence[item]
def __len__(self): return len(self.__sequence)
def __init__(self, filename: str):
self.__sequences = list()
with open(GlobalSettings.DATA_FOLDER + filename, 'r') as _f:
for _raw_seq in ''.join(_f.readlines()).split('>')[1:]:
_splitted = _raw_seq.split('\n')
self.__sequences.append(FastaContent.FastaSequence(_splitted[0], ''.join(_splitted[1:])))
def __getitem__(self, item): return self.__sequences[item]
def __len__(self): return len(self.__sequences)
@property
def first_seq(self):
return self.__sequences[0]
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,304 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/align/suffix_tree/query.py | from src.utils.fasta import FastaContent
from src.utils.global_settings import GlobalSettings as Settings
from src.align.suffix_tree.hash.sliding_framer import SlidingHashFramer as HashFramer
from copy import copy, deepcopy
class QuerySuffixTree:
def __init__(self):
self.__tree = dict()
self.__seqs_descriptions = list()
def supply(self, sequence: FastaContent.FastaSequence) -> None:
print('Building suffix tree : ', end='') # TODO Delete after debugging
self.__seqs_descriptions.append(sequence.description)
_hash_framer = HashFramer(sequence[:Settings.CHUNK_LEN * Settings.TREE_DEPTH])
self.__update_leaf(_hash_framer.values, 0, sequence.description)
for _i in range(Settings.CHUNK_LEN * Settings.TREE_DEPTH, len(sequence) - Settings.CHUNK_LEN,
Settings.CHUNK_LEN):
if _i % 100000 == 0:
print(f'{_i // 100000}e+5..', end='')
_hash_framer.slide_by_frame(sequence[_i:_i + Settings.CHUNK_LEN])
self.__update_leaf(
_hash_framer.values, _i - Settings.CHUNK_LEN * (Settings.TREE_DEPTH - 1), sequence.description)
print()
del _hash_framer
def __update_leaf(self, _hash_path: list, entry_index: int, seq_description: str) -> None:
_current_node = self.__tree
for _h in _hash_path:
if _h not in _current_node.keys():
_current_node[_h] = dict()
_current_node = _current_node[_h]
if seq_description not in _current_node.keys():
_current_node[seq_description] = list()
if entry_index not in _current_node[seq_description]:
_current_node[seq_description].append(entry_index)
def get_leaf(self, _hash_path: list) -> dict:
_current_node = self.__tree
for _h in _hash_path:
if _h not in _current_node.keys():
return dict()
else:
_current_node = _current_node[_h]
return deepcopy(_current_node)
@property
def descriptions(self) -> list:
return copy(self.__seqs_descriptions)
def is_empty(self) -> bool:
return len(self.__seqs_descriptions) == 0
def __repr__(self) -> str:
return str(self.__tree)
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,305 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/affinity_structure/bacterial.py | from src.utils.global_settings import GlobalSettings as Settings
from src.utils.fasta import FastaContent
from src.align.segmental import SegmentalAlign
from src.align.suffix_tree.query import QuerySuffixTree
import os
class BacterialAffinityStructure:
def __init__(self, folder_name: str):
self.__filenames = [folder_name + '/' + _file for _file in os.listdir(Settings.DATA_FOLDER + folder_name)]
self.__query_tree = QuerySuffixTree()
def handle_next_genome(self) -> None:
if self.__query_tree.is_empty():
self.__build_in_tree_next(FastaContent(self.__filenames.pop(0)))
_fasta_compared = self.__compare_with_tree_next()
print('Comparing : {}'.format(_fasta_compared.first_seq.description)) # TODO delete after debugging
self.__build_in_tree_next(_fasta_compared)
del _fasta_compared
def __build_in_tree_next(self, fasta: FastaContent) -> None:
self.__query_tree.supply(fasta.first_seq)
def __compare_with_tree_next(self) -> FastaContent:
_fasta = FastaContent(self.__filenames.pop(0))
_segmental = SegmentalAlign(_fasta.first_seq, self.__query_tree)
_segmental.plot()
del _segmental
return _fasta
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,306 | xhiggs/BacterialGenomesAligner | refs/heads/main | /src/align/segmental.py | from src.align.suffix_tree.query import QuerySuffixTree
from src.align.approximate import ApproximateAlign
from src.utils.global_settings import GlobalSettings as Settings
from src.utils.fasta import FastaContent
from matplotlib import pyplot as plt
class SegmentalAlign:
class Segment:
def __init__(self, start_x=None, start_y=None, end_x=None, end_y=None, dots=None):
self.start_x = start_x
self.start_y = start_y
self.end_x = end_x
self.end_y = end_y
self.dots = dots if dots is not None else list()
@property
def coords(self):
return self.start_x, self.start_y, self.end_x, self.end_y
@property
def center_x(self):
return (self.start_x + self.end_x) // 2
@property
def center_y(self):
return (self.start_y + self.end_y) // 2
@property
def size_x(self):
return abs(self.start_x - self.end_x)
@property
def size_y(self):
return abs(self.start_y - self.end_y)
def is_tilted_correctly(self):
return self.start_y <= self.end_y
@property
def k(self):
return (self.end_y - self.start_y) / (self.end_x - self.start_x)
@property
def b(self):
return self.end_y - self.end_x * self.k
def cope_coords(self):
return SegmentalAlign.Segment(self.start_x, self.start_y, self.end_x, self.end_y, dots=[])
def shift(self, dx=0, dy=0):
self.start_x += dx
self.start_y += dy
self.end_x += dx
self.end_y += dy
for _i in range(len(self.dots)):
self.dots[_i][0] += dx
self.dots[_i][1] += dy
return self
def rotate_y(self, rotation_center, segment=True, dots=None):
if segment:
self.start_y -= (self.start_y - rotation_center) * 2
self.end_y -= (self.end_y - rotation_center) * 2
if dots is not None:
for i in range(len(self.dots)):
self.dots[i][1] -= (self.dots[i][1] - rotation_center) * 2
return self
@staticmethod
def linear_approx_dots(dots):
_n, _sum_x, _sum_y, _sum_x2, _sum_xy = len(dots), 0, 0, 0, 0
for _x, _y in dots:
_sum_x += _x
_sum_y += _y
_sum_x2 += _x ** 2
_sum_xy += _x * _y
_k = (_n * _sum_xy - _sum_x * _sum_y) / (_n * _sum_x2 - _sum_x * _sum_x)
return _k, (_sum_y - _k * _sum_x) / _n
@staticmethod
def distance2(x1, y1, x2, y2):
return (x1 - x2) ** 2 + (y1 - y2) ** 2
def __repr__(self):
return "Segment(start_x={}, start_y={}, end_x={}, end_y={}, dots=[{}])".format(
self.start_x, self.start_y, self.end_x, self.end_y, len(self.dots))
def __init__(self, target_sequence: FastaContent.FastaSequence, query_tree: QuerySuffixTree):
self.__seqs_segments = dict()
_approx = ApproximateAlign(target_sequence, query_tree)
self.__plot_approx(_approx)
for _seq_d in _approx.keys:
_graph = [list() for _ in range(len(target_sequence) + 1)]
for _k in _approx[_seq_d].keys():
for _v in _approx[_seq_d][_k]:
_graph[abs(_k)].append(abs(_v))
self.__seqs_segments[_seq_d] = self.__find_segments(_graph)
del _graph
del _approx
@staticmethod
def __find_segments(graph: list) -> list:
print('Searching for segments ...')
_all_segments = list()
_segments_join_size = Settings.SEGMENTS_JOIN_SIZE ** 2
_segment_min_size = Settings.SEGMENT_MIN_SIZE ** 2
for _x in range(0, len(graph), Settings.DOT_SKIP_RATE):
for _y in graph[_x]:
for _segment in _all_segments:
if SegmentalAlign.Segment.distance2(_x, _y, *_segment.dots[-1]) <= _segments_join_size and \
(len(_segment.dots) == 1 or SegmentalAlign.Segment.distance2(_x, _y, *_segment.dots[-2]) <=
_segments_join_size):
_segment.dots.append([_x, _y])
break
else:
_all_segments.append(SegmentalAlign.Segment(dots=[[_x, _y]]))
for _segment in _all_segments:
_segment.dots.sort()
_segment.start_x, _segment.start_y = _segment.dots[0]
_segment.end_x, _segment.end_y = _segment.dots[-1]
if len(_segment.dots) >= 2:
k, b = SegmentalAlign.Segment.linear_approx_dots(_segment.dots) # \
_segment.start_y = int(k * _segment.start_x + b) # |--> Approximation TODO: int
_segment.end_y = int(k * _segment.end_x + b) # /
# _segment[4] = _segment[4][::settings["dot_skip_rate"]] # Optional compress
_all_segments = [_segment for _segment in _all_segments if SegmentalAlign.Segment.distance2(
_segment.start_x, _segment.start_y, _segment.end_x, _segment.end_y) >= _segment_min_size]
_all_segments.sort(key=lambda _segment: (_segment.start_x, _segment.start_y))
for _segment in _all_segments:
# print(_segment, len(_segment.dots))
if len(_segment.dots) < Settings.MIN_CONSIDERING_SEGMENT_LEN:
_all_segments.remove(_segment)
# print(" {} segments :".format(len(_all_segments)))
# print(*_all_segments, sep='\n')
return _all_segments
def __len__(self) -> int:
return len(self.__seqs_segments.keys())
def __getitem__(self, item) -> list:
return self.__seqs_segments[item]
def plot(self): # TODO delete after debugging
for _s_q in self.__seqs_segments.keys():
plt.figure(figsize=(8, 6))
for _segment in self.__seqs_segments[_s_q]:
plt.plot([_segment.start_x, _segment.end_x], [abs(_segment.start_y), abs(_segment.end_y)])
plt.grid()
plt.title(_s_q)
plt.show()
@staticmethod
def __plot_approx(approx: ApproximateAlign):
for _seq_q in approx.keys:
plt.figure(figsize=(8, 6))
_x, _y = list(), list()
for _k in approx[_seq_q]:
for _v in approx[_seq_q][_k]:
_x.append(abs(_k))
_y.append(abs(_v))
plt.plot(_x, _y, '.')
plt.title(_seq_q)
plt.grid()
plt.show()
| {"/src/align/suffix_tree/hash/sliding_framer.py": ["/src/utils/global_settings.py"], "/src/representation.py": ["/src/utils/global_settings.py", "/src/affinity_structure/bacterial.py"], "/src/align/approximate.py": ["/src/align/suffix_tree/query.py", "/src/utils/fasta.py", "/src/align/suffix_tree/hash/sliding_framer.py", "/src/utils/global_settings.py"], "/src/analysis/segmental.py": ["/src/align/segmental.py"], "/src/utils/fasta.py": ["/src/utils/global_settings.py"], "/src/align/suffix_tree/query.py": ["/src/utils/fasta.py", "/src/utils/global_settings.py", "/src/align/suffix_tree/hash/sliding_framer.py"], "/src/affinity_structure/bacterial.py": ["/src/utils/global_settings.py", "/src/utils/fasta.py", "/src/align/segmental.py", "/src/align/suffix_tree/query.py"], "/src/align/segmental.py": ["/src/align/suffix_tree/query.py", "/src/align/approximate.py", "/src/utils/global_settings.py", "/src/utils/fasta.py"]} |
66,309 | sansice/stardust | refs/heads/master | /book_search/__init__.py | from flask import Flask
app = Flask(__name__, static_folder='./web/dist', template_folder="./web/html")
from book_search.serve.serve import serve_blueprint
# register the blueprints
app.register_blueprint(serve_blueprint)
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,310 | sansice/stardust | refs/heads/master | /book_search/process/utils/const.py | # home_dir = "D:\sans\OneDrive - HCL Technologies Ltd\work\HCL\projects\stardust"
import os
project_name = 'stardust'
sub_project = 'book_search'
file_path = os.path.dirname(os.path.abspath(__file__))
sub_project_home = os.path.dirname(os.path.dirname(file_path))
project_home = os.path.dirname(os.path.dirname(os.path.dirname(file_path)))
data_path = os.path.join(sub_project_home, 'data')
work_dir = os.path.join(sub_project_home, 'work_dir')
# bx_books_data_files -
bx_books_csv_path = os.path.join(data_path, 'bxbooks')
bx_books_info_csv = os.path.join(bx_books_csv_path, 'BX-Books.csv')
bx_books_users_csv = os.path.join(bx_books_csv_path, 'BX-Users.csv')
bx_books_ratings_csv = os.path.join(bx_books_csv_path, 'BX-Book-Ratings.csv')
bx_books_encoding = 'latin-1'
bx_books_algorithm = 'brute'
bx_books_metrics = 'cosine'
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,311 | sansice/stardust | refs/heads/master | /tests/test_book_search/test_process/test_churner/test_recommend_books.py | import os
import unittest
from book_search.process.churner.process_bk_books_data import ProcessBXBooksData
from book_search.process.churner.recommend_books import RecommendBooks
class TestRecommendBooks(unittest.TestCase):
def setUp(self):
bk_books_data_processor = ProcessBXBooksData()
bk_books_data_processor.skim_data()
self.recommend_books = RecommendBooks(bk_books_data_processor)
def tearDown(self) -> None:
pass
def test_get_popular_books(self):
# self.bk_books_data_processor.print_data()
return_str = self.recommend_books.get_popular_items()
print(return_str)
# def find_similar_users(self):
# # self.bk_books_data_processor.print_data()
# similarities, indices = self.recommend_books.find_similar_users(11676)
# print(similarities)
# print(indices)
# def test_predict_userbased(self):
# # self.bk_books_data_processor.print_data()
# prediction = self.recommend_books.predict_userbased(11676, '0001056107')
# print(prediction)
# def test_predict_itembased(self):
# # self.bk_books_data_processor.print_data()
# prediction = self.recommend_books.predict_itembased(11676, '0001056107')
# print(prediction)
# def test_recommend_item(self):
# # self.bk_books_data_processor.print_data()
# # prediction = self.recommend_books.recommend_item(4385)
# # print(prediction)
# # prediction = self.recommend_books.recommend_item(4385, False)
# # print(prediction)
# prediction = self.recommend_books.recommend_item(4385, True, metric='correlation')
# print(prediction)
# # prediction = self.recommend_books.recommend_item(4385, False, metric='correlation')
# # print(prediction)
if __name__ == '__main__':
testsuite = TestRecommendBooks()
testsuite.test_recommend_item()
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,312 | sansice/stardust | refs/heads/master | /tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py | import os
import unittest
from book_search.process.churner.process_bk_books_data import ProcessBXBooksData
class TestProcessBXBooksData(unittest.TestCase):
def setUp(self):
self.bk_books_data_processor = ProcessBXBooksData()
def tearDown(self) -> None:
pass
def test_skim_data(self):
# self.bk_books_data_processor.print_data()
self.bk_books_data_processor.skim_data()
self.bk_books_data_processor.print_data()
sparsity = self.bk_books_data_processor.check_sparcity()
print(sparsity)
# def test_check_sparcity(self):
# pass
# self.bk_books_data_processor.plot_ratings()
def test_get_explicit_ratings(self):
explicit_ratings = self.bk_books_data_processor.get_explicit_ratings()
print(explicit_ratings)
if __name__ == '__main__':
unittest.main()
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,313 | sansice/stardust | refs/heads/master | /book_search/process/churner/recommend_books.py | import json
import numpy
import pandas
from sklearn.neighbors import NearestNeighbors
from book_search.process.utils import const
import book_search.process.utils.utils as utils
class RecommendBooks(metaclass=utils.Singleton):
def __init__(self, data_object):
self.data_object = data_object
self.rating_matrix = self._get_rating_matrix()
def get_popular_items(self):
ratings_explicit = self.data_object.get_explicit_ratings()
ratings_count = pandas.DataFrame(ratings_explicit.groupby(['ISBN'])['bookRating'].sum())
top10 = ratings_count.sort_values('bookRating', ascending=False).head(10)
print("Following books are recommended")
formatted_data = top10.merge(self.data_object.books_info, left_index=True, right_on='ISBN')
# print(formatted_data)
# return_array = []
#
# for item in formatted_data:
# print("item", formatted_data[item])
formatted_data.columns = ["ratings", "isbn", "name", "author", "yop", "publisher"]
formatted_data = formatted_data.applymap(str)
return formatted_data.to_json(orient='records')
def _get_rating_matrix(self):
ratings_explicit = self.data_object.get_explicit_ratings()
counts1 = ratings_explicit['userID'].value_counts()
ratings_explicit = ratings_explicit[ratings_explicit['userID'].isin(counts1[counts1 >= 100].index)]
counts = ratings_explicit['bookRating'].value_counts()
ratings_explicit = ratings_explicit[ratings_explicit['bookRating'].isin(counts[counts >= 100].index)]
ratings_matrix = ratings_explicit.pivot(index='userID', columns='ISBN', values='bookRating')
ratings_matrix.fillna(0, inplace=True)
ratings_matrix = ratings_matrix.astype(numpy.int32)
return ratings_matrix
def find_similar_users(self, user_id, metric, k):
ratings = self.rating_matrix
model_knn = NearestNeighbors(metric=metric, algorithm=const.bx_books_algorithm)
model_knn.fit(ratings)
loc = ratings.index.get_loc(user_id)
distances, indices = model_knn.kneighbors(ratings.iloc[loc, :].values.reshape(1, -1), n_neighbors=k + 1)
similarities = 1 - distances.flatten()
return similarities, indices
def find_simialr_items(self, item_id, metric, k):
ratings = self.rating_matrix.T
# print("ratings - ", ratings)
# print("ratings .t - ", self.rating_matrix)
loc = ratings.index.get_loc(item_id)
# print("location - ", loc)
model_knn = NearestNeighbors(metric=metric, algorithm=const.bx_books_algorithm)
model_knn.fit(ratings)
# print("ratings.iloc - ", ratings.iloc)
# print("ratings.iloc[loc, :]", ratings.iloc[loc, :])
# print("ratings.iloc[loc, :].values", ratings.iloc[loc, :].values)
# print("ratings.iloc[loc, :].values.reshape(1, -1)", ratings.iloc[loc, :].values.reshape(1, -1))
distances, indices = model_knn.kneighbors(ratings.iloc[loc, :].values.reshape(1, -1), n_neighbors=k + 1)
# print("distances, indices", distances, indices)
similarities = 1 - distances.flatten()
print("similarities, indices", similarities, indices)
return similarities, indices
def predict_userbased(self, user_id, item_id, metric, k):
ratings = self.rating_matrix
user_loc = ratings.index.get_loc(user_id)
item_loc = ratings.columns.get_loc(item_id)
similarities, indices = self.find_similar_users(user_id, metric, k) # similar users based on cosine similarity
mean_rating = ratings.iloc[user_loc, :].mean() # to adjust for zero based indexing
sum_wt = numpy.sum(similarities) - 1
wtd_sum = 0
for i in range(0, len(indices.flatten())):
if indices.flatten()[i] == user_loc:
continue
else:
ratings_diff = ratings.iloc[indices.flatten()[i], item_loc] - numpy.mean(ratings.iloc[indices.flatten()[i], :])
product = ratings_diff * (similarities[i])
wtd_sum = wtd_sum + product
prediction = int(round(mean_rating + (wtd_sum / sum_wt)))
if prediction <= 0:
prediction = 1
elif prediction > 10:
prediction = 10
print('\nPredicted rating for user {0} -> item {1}: {2}'.format(user_id, item_id, prediction))
return prediction
def predict_itembased(self, user_id, item_id, metric, k):
ratings = self.rating_matrix
wtd_sum = 0
user_loc = ratings.index.get_loc(user_id)
item_loc = ratings.columns.get_loc(item_id)
similarities, indices = self.find_simialr_items(item_id, metric, k) # similar users based on correlation coefficients
sum_wt = numpy.sum(similarities) - 1
for i in range(0, len(indices.flatten())):
if indices.flatten()[i] == item_loc:
continue
else:
product = ratings.iloc[user_loc, indices.flatten()[i]] * (similarities[i])
wtd_sum = wtd_sum + product
prediction = int(round(wtd_sum / sum_wt))
# in case of very sparse datasets, using correlation metric for collaborative based approach may give negative ratings
# which are handled here as below //code has been validated without the code snippet below, below snippet is to avoid negative
# predictions which might arise in case of very sparse datasets when using correlation metric
if prediction <= 0:
prediction = 1
elif prediction > 10:
prediction = 10
print('\nPredicted rating for user {0} -> item {1}: {2}'.format(user_id, item_id, prediction))
return prediction
# This function utilizes above functions to recommend items for item/user based approach and cosine/correlation.
# Recommendations are made if the predicted rating for an item is >= to 6,and the items have not been rated already
def recommend_item(self, user_id, item_based=True, metric='cosine', k=10):
ratings = self.rating_matrix
recommended_books = []
recommended_book = {"name": "", "ratings": "", "isbn": "", "author": "", "yop": "", "publisher": ""}
recommended_books.append(recommended_book)
if not str(user_id).isdigit():
print("User id is not digit")
return recommended_books
else:
user_id = int(user_id)
if user_id not in ratings.index.values:
print("User id is not valid")
return recommended_books
else:
prediction = []
for i in range(ratings.shape[1]):
if ratings[str(ratings.columns[i])][user_id] != 0: # not rated already
if item_based:
prediction.append(self.predict_itembased(user_id, str(ratings.columns[i]), metric, k))
else:
prediction.append(self.predict_userbased(user_id, str(ratings.columns[i]), metric, k))
else:
prediction.append(-1) # for already rated items
prediction = pandas.Series(prediction)
prediction = prediction.sort_values(ascending=False)
recommended = prediction[:10]
for i in range(len(recommended)):
recommended_book = {"name": self.data_object.books_info.bookTitle[recommended.index[i]],
"ratings": "*****",
"isbn": str(self.data_object.books_info.ISBN[recommended.index[i]]),
"author": self.data_object.books_info.bookAuthor[recommended.index[i]],
"yop": str(self.data_object.books_info.yearOfPublication[recommended.index[i]]),
"publisher": self.data_object.books_info.publisher[recommended.index[i]]}
recommended_books.append(recommended_book)
return json.dumps(recommended_books)
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,314 | sansice/stardust | refs/heads/master | /book_search/process/churner/churn_data.py |
import book_search.process.utils.const as const
import book_search.process.utils.utils as utils
from book_search.process.churner.data_factory import DataFactory
class ChurnData(metaclass=utils.Singleton):
def __init__(self, data_type):
data_factory = DataFactory()
self.data_object = data_factory.get_data_object(data_type)
self.recommender = data_factory.get_recommender_object(data_type, self.data_object)
self.data_object.skim_data()
def get_popular_items(self):
return self.recommender.get_popular_items()
def recommend_item(self, user_id):
# self.data_object.users.
# for item
return self.recommender.recommend_item(user_id)
# print(books_raw.shape)
# print(users.shape)
# print(ratings.shape)
#
# print(books_raw.head())
# print(books_info.head())
#
# print(ratings.bookRating.unique())
# ratings_new = ratings[ratings.ISBN.isin(books_info.ISBN)]
# print(ratings_new)
# ratings = ratings[ratings.userID.isin(users.userID)]
#
# print(books_info.yearOfPublication.unique())
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,315 | sansice/stardust | refs/heads/master | /tests/test_book_search/test_process/test_churner/test_churn_data.py | import os
import unittest
import book_search.process.churner.churn_data as const
from book_search.process.churner.churn_data import ChurnData
class TestChurnData(unittest.TestCase):
def setUp(self):
self.churn_data = ChurnData('bx_books')
def tearDown(self) -> None:
pass
def test_get_popular_items(self):
data = self.churn_data.get_popular_items()
print(data)
# def test_its_singleton(self):
# churn_data_1 = ChurnData('bx_books')
# churn_data_2 = ChurnData('bx_boo2ks')
#
# self.assertEqual(churn_data_1, churn_data_2)
# def test_correct_data(self):
# self.churn_data.correct_data()
if __name__ == '__main__':
unittest.main()
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,316 | sansice/stardust | refs/heads/master | /book_search/start.py | import sys
import os
# sys.path.append(os.path.abspath(os.path.dirname(__file__)))
sys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(__file__))))
from book_search import app
if __name__ == '__main__':
app.config.from_object('configurations.DevelopmentConfig')
app.run()
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,317 | sansice/stardust | refs/heads/master | /book_search/process/utils/test_imports.py | import re
import os
import sys
import pandas
import matplotlib.pyplot as plt
import sklearn.metrics as metrics
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.spatial.distance import correlation
from sklearn.metrics.pairwise import pairwise_distances
import ipywidgets as widgets
from IPython.display import display, clear_output
from contextlib import contextmanager
import warnings
# warnings.filterwarnings('ignore')
import numpy as np
import seaborn as sns | {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,318 | sansice/stardust | refs/heads/master | /book_search/process/utils/utils.py |
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
def string_to_file(string, file):
text_file = open(file, "w")
text_file.write(str(string))
text_file.close()
def file_to_string(file_locaiton, as_lines=False):
with open(file_locaiton, 'r') as file:
if as_lines:
data = file.read().replace('\n', '')
else:
data = file.readline()
return data | {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,319 | sansice/stardust | refs/heads/master | /book_search/process/churner/data_factory.py | from book_search.process.churner.process_bk_books_data import ProcessBXBooksData
from book_search.process.churner.recommend_books import RecommendBooks
class DataFactory:
@staticmethod
def get_data_object(data_type):
data_type = data_type.lower()
if data_type == 'bx_books':
return globals()['ProcessBXBooksData']()
@staticmethod
def get_recommender_object(data_type, data_object):
data_type = data_type.lower()
if data_type == 'bx_books':
return globals()['RecommendBooks'](data_object)
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,320 | sansice/stardust | refs/heads/master | /book_search/process/churner/process_data.py |
class ProcessData(object):
def __init__(self):
pass
def skim_data(self):
pass
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,321 | sansice/stardust | refs/heads/master | /book_search/serve/serve.py | # from search.word_cloud.word_cloud import WordClouds
from flask import render_template, Blueprint, request, jsonify
from book_search.process.churner.churn_data import ChurnData
serve_blueprint = Blueprint('serve', __name__)
@serve_blueprint.route('/')
def index():
churn_data = ChurnData('bx_books')
most_popular_items = churn_data.get_popular_items()
return render_template("index.html", url="localhost", port="5000", items=most_popular_items)
@serve_blueprint.route('/process', methods=['POST', 'GET'])
def index_post():
churn_data = ChurnData('bx_books')
search_text = request.args.get('search_text', None)
return_table = ""
if search_text is None or search_text.strip() == "":
return_table = churn_data.get_popular_items()
else:
return_table = churn_data.recommend_item(search_text)
return str(return_table)
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,322 | sansice/stardust | refs/heads/master | /tests/test_book_search/test_process/test_utils/test_const.py | import os
import unittest
import book_search.process.utils.const as const
class TestUnittestGenerator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self) -> None:
pass
def test_project_path(self):
project_home = const.project_home
print(project_home)
self.assertTrue(os.path.exists(project_home))
pass
def test_sub_project_path(self):
sub_project_home = const.sub_project_home
print(sub_project_home)
self.assertTrue(os.path.exists(sub_project_home))
pass
def test_project_data_path(self):
data_path = const.data_path
print(data_path)
self.assertTrue(os.path.exists(data_path))
pass
def test_bx_books_csv_path(self):
bx_books_csv_path = const.bx_books_csv_path
print(bx_books_csv_path)
self.assertTrue(os.path.exists(bx_books_csv_path))
pass
def test_bx_books_ratings_csv(self):
bx_books_ratings_csv = const.bx_books_ratings_csv
print(bx_books_ratings_csv)
self.assertTrue(os.path.isfile(bx_books_ratings_csv))
self.assertTrue(os.path.exists(bx_books_ratings_csv))
pass
def test_bx_books_info_csv(self):
bx_books_info_csv = const.bx_books_info_csv
print(bx_books_info_csv)
self.assertTrue(os.path.isfile(bx_books_info_csv))
self.assertTrue(os.path.exists(bx_books_info_csv))
pass
def test_bx_books_users_csv(self):
bx_books_users_csv = const.bx_books_users_csv
print(bx_books_users_csv)
self.assertTrue(os.path.isfile(bx_books_users_csv))
self.assertTrue(os.path.exists(bx_books_users_csv))
pass
if __name__ == '__main__':
unittest.main()
| {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,323 | sansice/stardust | refs/heads/master | /book_search/process/churner/process_bk_books_data.py | import copy
import numpy
import pandas
import logging
import seaborn as sns
import matplotlib.pyplot as plt
from book_search.process.churner.process_data import ProcessData
from book_search.process.utils import const
logger = logging.getLogger()
class ProcessBXBooksData(ProcessData):
def __init__(self):
super().__init__()
self.books_raw = pandas.read_csv(const.bx_books_info_csv, sep=';', error_bad_lines=False, encoding=const.bx_books_encoding)
self.books_raw.columns = ['ISBN', 'bookTitle', 'bookAuthor', 'yearOfPublication', 'publisher', 'imageUrlS', 'imageUrlM', 'imageUrlL']
self.books_info = copy.deepcopy(self.books_raw)
self.users = pandas.read_csv(const.bx_books_users_csv, sep=';', error_bad_lines=False, encoding=const.bx_books_encoding)
self.users.columns = ['userID', 'Location', 'Age']
self.ratings_raw = pandas.read_csv(const.bx_books_ratings_csv, sep=';', error_bad_lines=False, encoding=const.bx_books_encoding)
self.ratings_raw.columns = ['userID', 'ISBN', 'bookRating']
self.ratings = copy.deepcopy(self.ratings_raw)
def skim_data(self):
self.books_info.drop(['imageUrlS', 'imageUrlM', 'imageUrlL'], axis=1, inplace=True)
self._correct_yop()
self._correct_pub()
self._correct_age()
def get_explicit_ratings(self):
ratings_explicit = self.ratings[self.ratings.bookRating != 0]
return ratings_explicit
def get_implicit_ratings(self):
ratings_implicit = self.ratings[self.ratings.bookRating == 0]
return ratings_implicit
def _correct_pub(self):
self.books_info.loc[(self.books_info.ISBN == '193169656X'), 'publisher'] = 'other'
self.books_info.loc[(self.books_info.ISBN == '1931696993'), 'publisher'] = 'other'
def _correct_ratings(self):
self.ratings = self.ratings_raw[self.ratings_raw.ISBN.isin(self.books_info.ISBN)]
def check_sparcity(self):
ratings_new = self.ratings[self.ratings.ISBN.isin(self.books_info.ISBN)]
n_books = len(self.books_info)
n_users = len(self.users)
sparsity = 1.0 - len(ratings_new) / float(n_users * n_books)
return sparsity * 100
def _correct_age(self):
self.users.loc[(self.users.Age > 90) | (self.users.Age < 5), 'Age'] = numpy.nan
self.users.Age = self.users.Age.fillna(self.users.Age.mean())
self.users.Age = self.users.Age.astype(numpy.int32)
def _correct_yop(self):
self.books_info.loc[self.books_info.ISBN == '0789466953', 'yearOfPublication'] = 2000
self.books_info.loc[self.books_info.ISBN == '0789466953', 'bookAuthor'] = "James Buckley"
self.books_info.loc[self.books_info.ISBN == '0789466953', 'publisher'] = "DK Publishing Inc"
self.books_info.loc[
self.books_info.ISBN == '0789466953', 'bookTitle'] = "DK Readers: Creating the X-Men, How Comic Books Come to Life (Level 4: Proficient Readers)"
self.books_info.loc[self.books_info.ISBN == '078946697X', 'yearOfPublication'] = 2000
self.books_info.loc[self.books_info.ISBN == '078946697X', 'bookAuthor'] = "Michael Teitelbaum"
self.books_info.loc[self.books_info.ISBN == '078946697X', 'publisher'] = "DK Publishing Inc"
self.books_info.loc[
self.books_info.ISBN == '078946697X', 'bookTitle'] = "DK Readers: Creating the X-Men, How It All Began (Level 4: Proficient Readers)"
self.books_info.loc[self.books_info.ISBN == '2070426769', 'yearOfPublication'] = 2003
self.books_info.loc[self.books_info.ISBN == '2070426769', 'bookAuthor'] = "Jean-Marie Gustave Le Cl�©zio"
self.books_info.loc[self.books_info.ISBN == '2070426769', 'publisher'] = "Gallimard"
self.books_info.loc[self.books_info.ISBN == '2070426769', 'bookTitle'] = "Peuple du ciel, suivi de 'Les Bergers"
self.books_info.yearOfPublication = pandas.to_numeric(self.books_info.yearOfPublication, errors='coerce')
self.books_info.loc[(self.books_info.yearOfPublication > 2006) | (self.books_info.yearOfPublication == 0), 'yearOfPublication'] = numpy.NAN
self.books_info.yearOfPublication.fillna(round(self.books_info.yearOfPublication.mean()), inplace=True)
self.books_info.yearOfPublication = self.books_info.yearOfPublication.astype(numpy.int32)
def plot_ratings(self):
sns.countplot(data=self.get_explicit_ratings(), x='bookRating')
plt.show()
def print_data(self):
# unique_yop = self.books_info.yearOfPublication.unique()
# print(unique_yop)
books_yop_null = self.books_info.yearOfPublication.isnull().sum()
print('The number of year of pub null are - {books_yop_null}'.format(books_yop_null=books_yop_null))
books_pub_null = len(self.books_info.loc[self.books_info.publisher.isnull(), :])
print('The number of pub null are - {books_pub_null}'.format(books_pub_null=books_pub_null))
users_age_null = self.users.Age.isnull().sum()
print('The number of users with age null are - {users_age_null}'.format(users_age_null=users_age_null)) | {"/book_search/__init__.py": ["/book_search/serve/serve.py"], "/tests/test_book_search/test_process/test_churner/test_recommend_books.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/tests/test_book_search/test_process/test_churner/test_process_bk_books_data.py": ["/book_search/process/churner/process_bk_books_data.py"], "/book_search/process/churner/recommend_books.py": ["/book_search/process/utils/utils.py"], "/book_search/process/churner/churn_data.py": ["/book_search/process/utils/const.py", "/book_search/process/utils/utils.py", "/book_search/process/churner/data_factory.py"], "/tests/test_book_search/test_process/test_churner/test_churn_data.py": ["/book_search/process/churner/churn_data.py"], "/book_search/start.py": ["/book_search/__init__.py"], "/book_search/process/churner/data_factory.py": ["/book_search/process/churner/process_bk_books_data.py", "/book_search/process/churner/recommend_books.py"], "/book_search/serve/serve.py": ["/book_search/process/churner/churn_data.py"], "/tests/test_book_search/test_process/test_utils/test_const.py": ["/book_search/process/utils/const.py"], "/book_search/process/churner/process_bk_books_data.py": ["/book_search/process/churner/process_data.py"]} |
66,352 | newlife591/learngit | refs/heads/master | /clik.py | ##!/usr/bin/python
# open chrome in windows
import pyautogui as pg
import pyperclip as pc
import sys
import time
#def open_explor(str="chrome")
## This programe have 5 Jobs ##
def open_explor(expname):
##打开浏览器
pg.hotkey('win','r')
pg.PAUSE=2
pg.write(expname,interval=0.2,pause=0.2)
pg.press('enter',interval=1)
def open_new_url(url):
##新窗口打开地址
pg.hotkey('ctrl','t')
pg.PAUSE=1
pg.getPointOnLine
pg.write(url)
pg.press('enter',interval=1)
pg.PAUSE = 1
def max_windows():
##最大化窗口
pg.keyDown('alt')
pg.keyDown('space')
pg.press('x',presses=1)
pg.keyUp('space')
pg.keyUp('alt')
pg.PAUSE = 0.1
if __name__ == "__main__":
print('==== 打开浏览器 Press Ctrl-C to quit====')
time_start=time.time()
#time.strftime("%Y-%m-%d %H:%M:%S",time_start)
exp_name=' chrome' #浏览器名称 前面加一个空格
login_dir='www.zhibugongzuo.com/login'
job1_dir='www.zhibugongzuo.com'
job2_dirs = (
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=129724224566fc2714a48723d741bf67c3e677660d83e0eee367d1b07a099457&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=63dd774c8e4bebd7273c8eb4f2d83eeb97cae0777b94214e188ab3ed0a9c2d9e&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=049c3f45aa680543e9c146a87e70fb11afe76e58060018a8d08a31e814bb5720&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=9e5fe9f6124634f4f08a318aa87875fc1ad137e9d7aa51138777b633df753243&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=170d44e93738220dd0418590e17d4e9461e58532e238a278e26bdd617a1b12cd&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=e1db726bf9a732b409a11dfd5683a88e0061084e5a728c58713bd38881a9380f&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=2e8d404e204c88a1c774e9dc50d33826a3f4d2d63af58992300050fbc39a09d9&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=4d8058f0c6224c1e38b0f3340002589b09505e728b111d3f7d8a74fd935ad2a4&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=20bbd8f9f3f098455043b937e2e6bf62b6e2037929b118ae8fb139554f3f61a2&template=1',
' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=8fec49fc3a634bc87956816c0a04392d03d86c6f7af1479b37df4062e73b742d&template=1')
job2_txt = '学习'
job3_dir=' https://www.zhibugongzuo.com/study#/materialDetail/b30bbe82e55773d4cec1cce82fe339e6'
job3_txt ='经过全国上下和广大人民群众艰苦努力,疫情防控取得阶段性重要成效,经济社会秩序加快恢复,彰显了中国共产党领导和中国特色社会主义制度的显著优势'
job3_counter=20
job4_dir=' https://www.zhibugongzuo.com/moments#/index'
job4_txt = '经过全国上下和广大人民群众艰苦努力,疫情防控取得阶段性重要成效,经济社会秩序加快恢复,彰显了中国共产党领导和中国特色社会主义制度的显著优势'
try:
## Run Chrome and SignIn
open_explor(exp_name)
#open_new_url(login_dir)
max_windows()
#pg.click()
pg.write(login_dir)
pg.press('enter')
time.sleep(20)
### Start JOB1: SIGNIN +5 point ##
open_new_url(job1_dir)
pg.moveTo(1142,328,0.1)
pg.click()
pg.click()
pg.click()
pg.hotkey('ctrl','F4')
## Start JOB2: Reading +10 and Discuss +5 point ##
for job2_dir in job2_dirs:
open_new_url(job2_dir)
pg.moveTo(304,193,0.0)
pg.doubleClick()
#pg.write(job2_txt,interval=0.01,pause=0.2)
pc.copy(job2_txt)
pg.hotkey('ctrl','v')
#pc.paste()
pg.moveTo(1024,668,0.0)
pg.doubleClick()
pg.PAUSE=2
pg.hotkey('ctrl','F4')
## Start JOB4: Publish +2 point ##
open_new_url(job4_dir)
pg.moveTo(430,302,0.1)
pg.doubleClick()
#pg.write(job4_txt,interval=0.01,pause=0.2)
pc.copy(job4_txt)
pg.hotkey('ctrl','v')
#pc.paste()
pg.moveTo(935,392,0.1)
pg.click()
pg.PAUSE=2
pg.hotkey('ctrl','F4')
## Start JOB3: Leaning Online +5 and Note +2 point ##
#time.sleep(1)
open_new_url(job3_dir)
pg.moveTo(323,289,0.1)
pg.click()
pg.moveTo(463,289,0.1)
pg.click()
#pg.write(job4_txt,interval=0.01,pause=0.2)
pc.copy(job3_txt)
pg.hotkey('ctrl','v')
#pc.paste()
pg.moveTo(1051,415,0.1)
pg.click()
pg.PAUSE=2
time_2=time.time()
for i in range (job3_counter):
pg.middleClick()
pg.move(0,400,duration=1)
pg.middleClick()
pg.middleClick()
pg.move(0,-400,duration=1)
pg.middleClick()
#time.sleep(1)
job3_counter-=1
pg.hotkey('ctrl','F4')
#pg.hotkey('ctrl','F4')
## Start JOB5: Practise +6 point ##
time_end=time.time()
pg.hotkey('alt','F4')
except KeyboardInterrupt:
print('\n')
#time.strftime("%Y-%m-%d %H:%M:%S",time_end)
time_count=time_end-time_start
read_count=time_end-time_2
print('Time totle cost:',time_count,'s /n.Reading cost:',read_count,'s')
#return;
| {"/zbgzlogin.py": ["/baitu_rec.py"]} |
66,353 | newlife591/learngit | refs/heads/master | /learning1.py | #!python
# open chrome in windows
import pyautogui as pg
import sys
#def open_explor(str="chrome")
print('==== 打开浏览器 Press Ctrl-C to quit====')
liulanqi_name = ' chrome'
mb_direct1 = ' https://www.zhibugongzuo.com/news#/workinfodetail?act_id=129724224566fc2714a48723d741bf67c3e677660d83e0eee367d1b07a099457&template=1'
try:
#pg.dragTo(186, 348, 0.5, button='left')
#pg.doubleClick()
pg.hotkey('ctrl', 't')
pg.PAUSE = 2
pg.getPointOnLine
pg.write(mb_direct1)
pg.press('enter',interval=1)
pg.PAUSE=1
pg.dragTo(304,193,0.5,button='left')
pg.doubleClick()
| {"/zbgzlogin.py": ["/baitu_rec.py"]} |
66,354 | newlife591/learngit | refs/heads/master | /hello.py | #print('hello python')
print ('nihao\\ womenhao')
a=60
b=13
c=0
c=a&b;
print("1-c的值为",c)
c=a|b;
print("2-c的值为", c)
c=a^b;
print("3-c的值为",c)
c=~a;
print("4-c的值为",c)
c=a<<2;
print("5-c的值为",c)
c=a>>2;
print("6-c的值为",c) | {"/zbgzlogin.py": ["/baitu_rec.py"]} |
66,355 | newlife591/learngit | refs/heads/master | /zbgzlogin.py | #登录支部工作 获取验证码 并另存图片到.\easy_img 目录下
#
#
from selenium import webdriver
from PIL import Image
from selenium.webdriver.common.keys import Keys
from aip import AipOcr
import os,time
import requests
import base64
from img_optimization import *
from baitu_rec import *
def main_login():
uname='13718759896'
pwd='820121'
url ='http://www.zhibugongzuo.com/login'
browser=webdriver.Chrome()
browser.get(url)
browser.implicitly_wait(5)
browser.maximize_window()
#定位到 账号密码 页面
ZHMM=browser.find_element_by_xpath("//li[contains(text(),'账号密码')]")
ZHMM.click()
#填入用户名
browser.find_element_by_id("uname").clear()
browser.find_element_by_id("uname").send_keys(uname)
browser.find_element_by_id("uname").send_keys(Keys.TAB)
time.sleep(2)
#填入密码
browser.find_element_by_id("pwd").send_keys(pwd)
browser.find_element_by_id("pwd").send_keys(Keys.TAB)
time.sleep(2)
#取验证码并保存
png=browser.find_element_by_id("captcha-img")
png.screenshot('.\easy_img\capt.png')
#调用验证码处理函数进行图片处理
img_main()
#调用百度识别验证码
str_code=baidu_rec_main(".\easy_img\capt-grey.jpg")
#打印验证码
print('code is:',str_code)
#填入验证码
browser.find_element_by_id("captcha").send_keys(str_code)
#browser.find_element_by_id("captcha").send_keys(ENTER)
time.sleep(3)
DL=browser.find_element_by_xpath("//button[contains(text(),'登录')]")
DL.click()
time.sleep(3)
if __name__ == "__main__":
main_login()
| {"/zbgzlogin.py": ["/baitu_rec.py"]} |
66,356 | newlife591/learngit | refs/heads/master | /baitu_rec.py | # encoding:utf-8
##使用百度aip进行图像识别
##图片保存在.\out_img目录下
import requests
import json
from aip import AipOcr
def get_file_content(filePath):
with open(filePath,'rb')as fp:
return fp.read()
def baidu_rec_main(PNG_filePath):
# client_id 为官网获取的AK, client_secret 为官网获取的SK
APP_ID='19131433'
API_KEY='kSvXEj1rR3xa9SI1vsKOaGFj'
SECRET_KEY='pyY7PrDGZClczOxniBhTqPmKfWzjMjg4'
#PNG_filePath = 'captcha.png'
#PNG_filePath = '.\out_img\captcha-clearBorder.jpg' #传入需要进行识别的图片名
OPTIONS = {
'language_type':'ENG',
'detect_direction':'true',
'detect_language':'true'
}
client = AipOcr(APP_ID,API_KEY,SECRET_KEY)
image=get_file_content(PNG_filePath)
Result=client.basicAccurate(image,OPTIONS)
if Result:
#print(Result)
print(json.dumps(Result))
print('---------------')
dict1=Result['words_result']
captcha=''
for word in dict1:
print(word,'\n')
capt = (word['words'])
captcha=captcha+capt
print('===>'+captcha)
captcha=captcha.replace(" ", "") #去除掉空格
#循环去除标点符号
for i in ',.。;;??<>-+()!@#$%^&*[]{}:':
captcha=captcha.replace(i,"")
print('识别结果:' + captcha)
Answer_str=captcha
print("answer is:",Answer_str)
return Answer_str
if __name__ == "__main__":
str_1=baidu_rec_main(".\easy_img\capt-grey.jpg")
print(str_1)
| {"/zbgzlogin.py": ["/baitu_rec.py"]} |
66,362 | anyasidr/my-repository | refs/heads/master | /windows.py | import shelve
import os
import indexer
import re
from moytokenizer import Tokenizer
from indexer import Position_with_lines
class ContextWindow(object):
"""
This class is used to store context windows data
"""
def __init__(self, line, position, start, end):
"""
method creates an instance of ContextWindow class
params:
position: list of positions of words for context window
line: string that contains the word for context
start: position of the first character of the context window
end: position after the last character of the context window
"""
self.line = line
self.position = position
self.start = start
self.end = end
@classmethod
def find_window(cls, filename, position, size):
"""
method creates an instance of class ContextWindow loading from file
@param filename: path to the file with the word
@param position: position of the searching word in context window
@param size: size of the context window
"""
t = Tokenizer()
with open(filename) as f:
for i, line in enumerate(f):
if i == position.line:
break
if i != position.line:
raise ValueError('Inappropriate number')
line = line.strip("\n")
positions = [position]
right = line[position.start:]
left = line[:position.end][::-1]
for i, token in enumerate(t.for_index_tokenize(left)):
if i == size:
break
start = position.end - token.position - len(token.text)
for i, token in enumerate(t.for_index_tokenize(right)):
if i == size:
break
end = position.start + token.position + len(token.text)
return cls(line, positions, start, end)
def is_cross(self, wnd):
"""
Check cross of two context windows
@param wnd: context window to check
"""
return (self.start <= wnd.end and
self.end >= wnd.start and
wnd.line == self.line)
def join_cont(self, wnd):
"""
Join context windows and set it to self
@param wnd: context window to join
"""
for position in wnd.position:
if position not in self.position:
self.position.append(position)
self.start = min(self.start, wnd.start)
self.end = max(self.end, wnd.end)
def expand_cont(self):
"""
Expand context window to sentence
"""
first = re.compile(r'[.!?]\s[A-ZА-Яa-zа-я]')
last = re.compile(r'[A-ZА-Яa-zа-я]\s[.!?]')
right = self.line[self.end:]
left = self.line[:self.start+1][::-1]
if left:
try:
self.start = self.start - last.search(left).start()
except:
pass
if right:
try:
self.end += first.search(right).start() + 1
except:
pass
def highlight(self):
"""
Creates a string with highlighted words in search query
"""
highlighted = self.line[self.start:self.end]
for pos in self.position[::-1]:
end = pos.end - self.start
start = pos.start - self.start
highlighted = highlighted[:end] + '</strong>' + highlighted[end:]
highlighted = highlighted[:start] + '<strong>' + highlighted[start:]
return highlighted
def __eq__(self, wnd):
"""
Check if two context windows are equal
@param wnd: context window to check
"""
return ((self.position == wnd.position) and
(self.line == wnd.line) and
(self.start == wnd.start) and
(self.end == wnd.end))
def __repr__(self):
"""
Represents ContextWindow instance to string
"""
return str(self.position)+ ', ' + str(self.start)+ ', ' \
+ str(self.end)+ ', ' + self.line
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,363 | anyasidr/my-repository | refs/heads/master | /testSearchEngine.py | import unittest
import make_db
import shelve
import os
from indexer import Indexator, Position, Position_with_lines
from searchengine import SearchEngine
from windows import ContextWindow
test1 = "this is my test"
test2 = "my test"
database = {'this': {'test1.txt': [Position_with_lines(0, 4, 0)]},
'is': {'test1.txt': [Position_with_lines(5, 7, 0)]},
'my': {'test1.txt': [Position_with_lines(8, 10, 0)],
'test2.txt': [Position_with_lines(0, 2, 0)]},
'test': {'test1.txt': [Position_with_lines(11, 15, 0)],
'test2.txt': [Position_with_lines(3, 7, 0)]}}
class TestContextWindow(unittest.TestCase):
def setUp(self):
with open("test1.txt", 'w') as file:
file.write(test1)
with open("test2.txt", 'w') as file:
file.write(test2)
# def test_input(self):
# with self.assertRaises(ValueError):
# ContextWindow.find_window(0, 0, 50)
def test_wrong_line(self):
with self.assertRaises(ValueError):
ContextWindow.find_window("test1.txt", Position_with_lines(0, 4, 3), 3)
def test_one(self):
result = ContextWindow.find_window("test1.txt", Position_with_lines(5, 7, 0), 1)
self.assertEqual(result.position, [Position_with_lines(5, 7, 0)])
self.assertEqual(result.start, 0)
self.assertEqual(result.end, 10)
self.assertEqual(result.line, test1)
def test_no_context(self):
result = ContextWindow.find_window("test1.txt", Position_with_lines(5, 7, 0), 0)
self.assertEqual(result.position, [Position_with_lines(5, 7, 0)])
self.assertEqual(result.start, 5)
self.assertEqual(result.end, 7)
self.assertEqual(result.line, test1)
def test_join(self):
query1 = ContextWindow.find_window('test1.txt', Position_with_lines(5, 7, 0), 1)
query2 = ContextWindow.find_window('test1.txt', Position_with_lines(11, 15, 0), 1)
result = query1.join_cont(query2)
self.wnd = ContextWindow('this is my test', [Position_with_lines(5, 7, 0), Position_with_lines(11, 15, 0)], 0, 15)
self.assertEqual(query1.start, self.wnd.start)
self.assertEqual(query1.end, self.wnd.end)
self.assertEqual(query1.line, self.wnd.line)
os.remove('test1.txt')
def test_highlight(self):
query = ContextWindow.find_window('test1.txt', Position_with_lines(5, 7, 0), 1)
result = query.highlight()
text = 'this <strong>is</strong> my'
self.assertEqual(result, text)
def tearDown(self):
if 'test1.txt' in os.listdir(os.getcwd()):
os.remove('test1.txt')
if 'test2.txt' in os.listdir(os.getcwd()):
os.remove('test2.txt')
class TestDB(unittest.TestCase):
def make_db_test(self):
with open("test1.txt", 'w') as file:
file.write(test1)
with open("test2.txt", 'w') as file:
file.write(test2)
make_db.make(['test1.txt', 'test2.txt'], 'db_name')
result = open('db_name.dir', 'r').read()
self.assertEqual(result, "'this', (0, 107)\n'is', (512, 107)\n'my', (1024, 152)\n'test', (1536, 152)")
def tearDown(self):
for filename in os.listdir(os.getcwd()):
if filename == 'db_name' or filename.startswith('db_name'):
os.remove(filename)
if 'test1.txt' in os.listdir(os.getcwd()):
os.remove('test1.txt')
if 'test2.txt' in os.listdir(os.getcwd()):
os.remove('test2.txt')
class TestSearchEngine(unittest.TestCase):
def setUp(self):
self.engine = SearchEngine('db_name')
self.engine.database.update(database)
with open("test1.txt", 'w') as file:
file.write(test1)
with open("test2.txt", 'w') as file:
file.write(test2)
def test_empty(self):
result = self.engine.search_one('')
self.assertEqual(result, {})
def test_search_one(self):
result = self.engine.search_one('test')
self.assertEqual(result, {'test1.txt': [Position_with_lines(11, 15, 0)],
'test2.txt': [Position_with_lines(3, 7, 0)]})
def test_search_many_one(self):
result = self.engine.search_many('test')
self.assertEqual(result, {'test1.txt': [Position_with_lines(11, 15, 0)],
'test2.txt': [Position_with_lines(3, 7, 0)]})
def test_search_many_two(self):
result = self.engine.search_many('my test')
self.assertEqual(result, {'test1.txt': [Position_with_lines(8, 10, 0),
Position_with_lines(11, 15, 0)],
'test2.txt': [Position_with_lines(0, 2, 0),
Position_with_lines(3, 7, 0)]})
def test_search_limit_offset_default(self):
result = self.engine.search_limit_offset('test')
self.assertEqual(result, {'test1.txt': [], 'test2.txt': []})
def test_search_limit_offset_all(self):
result = self.engine.search_limit_offset('test', doclimit=2, docoffset=0, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test1.txt': ['this is my <strong>test</strong>'], 'test2.txt': ['my <strong>test</strong>']})
def test_search_limit_offset_one(self):
result = self.engine.search_limit_offset('test', doclimit=1, docoffset=0, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test1.txt': ['this is my <strong>test</strong>'], 'test2.txt': []})
def test_search_limit_offset_shift(self):
result = self.engine.search_limit_offset('test', doclimit=2, docoffset=1, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test1.txt': [], 'test2.txt': ['my <strong>test</strong>']})
def test_search_many_limit_offset_one(self):
result = self.engine.search_many_limit_offset('test', limit=1, offset=0, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test1.txt': [Position_with_lines(11, 15, 0)]})
def test_search_many_limit_offset_shift(self):
result = self.engine.search_many_limit_offset('test', limit=1, offset=1, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test2.txt': [Position_with_lines(3, 7, 0)]})
def test_search_many_limit_offset_all(self):
result = self.engine.search_many_limit_offset('test', limit=2, offset=0, limits=[2, 2], offsets=[0, 0])
self.assertEqual(result, {'test1.txt': [Position_with_lines(11, 15, 0)],
'test2.txt': [Position_with_lines(3, 7, 0)]})
def test_generator(self):
result = self.engine.generator([
[Position_with_lines(12, 13, 1), Position_with_lines(3, 7, 0)],
[Position_with_lines(11, 15, 0), Position_with_lines(3, 7, 0)],
[]
])
a = []
for r in result:
a.append(r)
self.assertEqual(a, [Position_with_lines(11, 15, 0),
Position_with_lines(3, 7, 0),
Position_with_lines(12, 13, 1),
Position_with_lines(3, 7, 0)])
def test_search_many_limit_offset_gen_one(self):
result = self.engine.search_many_limit_offset_gen('test', limit=1, offset=0, limits=[2, 2], offsets=[0, 0])
result_keys = list(result.keys())
self.assertEqual(result_keys, ['test1.txt'])
for key in result.keys():
for data in result[key]:
self.assertEqual(data, Position_with_lines(11, 15, 0))
def test_search_many_limit_offset_gen_shift(self):
result = self.engine.search_many_limit_offset_gen('test', limit=1, offset=1, limits=[2, 2], offsets=[0, 0])
result_keys = list(result.keys())
self.assertEqual(result_keys, ['test2.txt'])
for key in result.keys():
for data in result[key]:
self.assertEqual(data, Position_with_lines(3, 7, 0))
def test_search_many_limit_offset_gen_all(self):
result = self.engine.search_many_limit_offset_gen('test', limit=2, offset=0, limits=[2, 2], offsets=[0, 0])
result_keys = list(result.keys())
self.assertEqual(result_keys, ['test1.txt', 'test2.txt'])
for key in result.keys():
for data in result[key]:
self.assertEqual(data, database['test'][key][0])
def tearDown(self):
del self.engine
for filename in os.listdir(os.getcwd()):
if filename == 'db_name' or filename.startswith('db_name'):
os.remove(filename)
if 'test1.txt' in os.listdir(os.getcwd()):
os.remove('test1.txt')
if 'test2.txt' in os.listdir(os.getcwd()):
os.remove('test2.txt')
if __name__ == '__main__':
unittest.main()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,364 | anyasidr/my-repository | refs/heads/master | /searchengine.py | import shelve
import os
import indexer
import re
import windows
from moytokenizer import Tokenizer
from indexer import Position_with_lines
class SearchEngine(object):
"""
This class is used for searching of
positions of tokens in a given database.
"""
def __init__(self, dbname):
"""
This method creates an example of
class SearchEngine.
"""
self.database = shelve.open(dbname, writeback=True)
self.tokenizer = Tokenizer()
def search_one(self, query):
"""
This method searches in a database. The method uses
a key that is a tokens, returns all the positions
of the token.
"""
if not isinstance(query, str):
raise ValueError
return self.database.get(query, {})
def search_many(self, query):
"""
This method uses tokenization. The method searches in a database,
finds tokens in a tokenized string. Returns a dictionary where
the tokens are keys with their positions in all given files.
"""
if not isinstance(query, str):
raise ValueError
if query == '':
return {}
tokenizer = Tokenizer() # using tokenizer for extracting tokens
words = list(tokenizer.for_index_tokenize(query))
results = [] # creating a tuple
for word in words:
results.append(self.database[word.text])
files = set(results[0]) # converting tuple into set
for result in results:
files &= set(result) # intersecting sets of documents
positions = {} # creating a dictionary with positions
for file in files:
for result in results:
positions.setdefault(file, []).extend(result[file])
return positions
def get_window(self, in_dict, size=3):
"""
Сreate dictionary of files and context windows
"""
if not (isinstance(in_dict, dict) and
isinstance(size, int)):
raise ValueError
conts_dict = {}
for f, positions in in_dict.items():
for position in positions:
cont = windows.ContextWindow.find_window(f, position, size)
conts_dict.setdefault(f, []).append(cont)
joined_conts_dict = self.join_windows(conts_dict)
return joined_conts_dict
def join_windows(self, in_dict):
"""
Join cross windows in a dictionary of files
@param in_dict: dict to join
"""
conts_dict = {}
empty = windows.ContextWindow([], "", 0, 0)
for f, conts in in_dict.items():
previous_cont = empty
for cont in conts:
if previous_cont.is_cross(cont):
previous_cont.join_cont(cont)
else:
if previous_cont is not empty:
conts_dict.setdefault(f, []).append(previous_cont)
previous_cont = cont
conts_dict.setdefault(f, []).append(previous_cont)
return conts_dict
def search_to_window(self, query, size=3):
"""
Search query words in database
"""
positions_dict = self.search_many(query)
cont_dict = self.get_window(positions_dict, size)
return cont_dict
def search_to_sentence(self, query, size=3):
"""
Search multiword query in database
"""
context_dict = self.search_to_window(query, size)
for contexts in context_dict.values():
for context in contexts:
context.expand_cont()
sentence_dict = self.join_windows(context_dict)
return sentence_dict
def search_to_highlight(self, query, size=3):
"""
Search multiword query in database and highlighting them with
<strong> tag
"""
sentence_dict = self.search_to_sentence(query, size)
quote_dict = {}
for f, conts in sentence_dict.items():
for cont in conts:
quote_dict.setdefault(f, []).append(cont.highlight())
return quote_dict
def search_limit_offset(self, query, size=3, doclimit=0, docoffset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
'''
filter result
:param query:
:param size:
:param doclimit: documents limit (0..4)
:param docoffset: documents offset (0..4)
:param limits: list of limits in document
:param offsets: list of offsets in document
:return:
'''
r = self.search_to_highlight(query, size)
j = 0
myres = {}
key_list = list(r.keys())
key_list.sort()
for key in key_list:
myres[key] = []
if (j >= int(docoffset)) and (j < int(docoffset) + int(doclimit)):
i = 0
for val in r[key]:
if (i >= int(offsets[j])) and (i < int(offsets[j]) + int(limits[j])):
myres[key].append(val)
i = i + 1
j = j + 1
return myres
# task acc0 - add to all functions limit and offset parameters
def search_many_limit_offset(self, query, limit=0, offset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
'''
this function for filtering result search many with limit and offset parameters
(task acc0)
:param query: multiword query
:param limit: limit of documents
:param offset: offset of documents
:return:
'''
if not isinstance(query, str):
raise ValueError
if not isinstance(limit, int):
raise ValueError
if not isinstance(offset, int):
raise ValueError
for lim in limits:
if not isinstance(lim, int):
raise ValueError
for of in offsets:
if not isinstance(of, int):
raise ValueError
if query == '':
return {}
if offset < 0:
offset = 0
if limit < 0:
limit = 0
tokenizer = Tokenizer() # using tokenizer for extracting tokens
words = list(tokenizer.for_index_tokenize(query))
results = [] # creating a tuple
for word in words:
results.append(self.database[word.text])
files = sorted(set(results[0])) # converting tuple into set
i = 0
filtered = set([])
for file in files:
if (i >= int(offset)) and (i < (int(offset) + int(limit))):
filtered.add(file)
i = i + 1
files = filtered
for result in results:
files &= set(result) # intersecting sets of documents
files = sorted(files)
positions = {} # creating a dictionary with positions
i = 0
for file in files:
for result in results:
k = i + offset
positions.setdefault(file, []).extend(result[file][offsets[k]: limits[k] + offsets[k]])
i = i + 1
return positions
def search_many_limit_offset_gen(self, query, limit=0, offset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
if not isinstance(query, str):
raise ValueError
if not isinstance(limit, int):
raise ValueError
if not isinstance(offset, int):
raise ValueError
for lim in limits:
if not isinstance(lim, int):
raise ValueError
for of in offsets:
if not isinstance(of, int):
raise ValueError
if query == '':
return {}
if offset < 0:
offset = 0
if limit < 0:
limit = 0
tokenizer = Tokenizer()
searchlist = []
for token in tokenizer.gen_type_tokenize(query):
if token.typ == 'a' or token.typ == 'd':
searchlist.append(token.text)
results = []
for token in searchlist:
results.append(set(self.search_one(token)))
files = results[0]
for f in results:
files = files & f
final_dict = {}
files = sorted(files)
i = 0
for f in files:
if (i >= offset) and (i < (limit + offset)):
lists = []
for token in searchlist:
lists.append(self.database[token][f][offsets[i]: limits[i] + offsets[i]])
final_dict[f] = self.generator(lists)
i = i + 1
return final_dict
def generator(self, lists):
itr = [iter(lst) for lst in lists if len(lst) > 0]
firsts = [next(it) for it in itr]
while len(firsts) > 0:
minimal = min(firsts)
yield minimal
min_position = firsts.index(minimal)
try:
firsts[min_position] = next(itr[min_position])
except StopIteration:
itr.pop(min_position)
firsts.pop(min_position)
def search_to_window_limit_offset(self, query, size=3, limit=0, offset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
"""
Search query words in database with limit and offset parameters
"""
positions_dict = self.search_many_limit_offset_gen(query, limit, offset, limits, offsets)
cont_dict = self.get_window(positions_dict, size)
return cont_dict
def search_to_sentence_limit_offset(self, query, size=3, limit=0, offset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
"""
Search multiword query in database with limit and offset parameters
"""
context_dict = self.search_to_window_limit_offset(query, size, limit, offset, limits, offsets)
for contexts in context_dict.values():
for context in contexts:
context.expand_cont()
sentence_dict = self.join_windows(context_dict)
return sentence_dict
def search_to_highlight_limit_offset(self, query, size=3, limit=0, offset=0, limits=[1, 1, 1, 1], offsets=[0, 0, 0, 0]):
"""
Search multiword query in database and highlighting them with
<strong> tag
"""
int_limits = []
for lim in limits:
int_limits.append(int(lim))
int_offsets = []
for of in offsets:
int_offsets.append(int(of))
sentence_dict = self.search_to_sentence_limit_offset(query, size, int(limit), int(offset), int_limits, int_offsets)
quote_dict = {}
for f, conts in sentence_dict.items():
for cont in conts:
quote_dict.setdefault(f, []).append(cont.highlight())
files = os.listdir('books\\')
for f in files:
if not(('books\\'+f) in quote_dict.keys()):
quote_dict['books\\'+f] = []
return quote_dict
def close(self):
"""
methos closes database.
"""
self.database.close()
def main():
i = indexer.Indexator('db_name')
file1 = open('test1.txt', 'w')
file1.write('Да, это пустые слова, здесь нет ничего полезного. привет как твои дела ? у меня все хорошо, я хочу домой приди ко мне! но ты же не свободна?')
file1.close()
file2 = open('test2.txt', 'w')
file2.write('да я хочу сказать тебе . привет и все, но зачем все привет эти слова? я хочу быть счастливым! И точка')
file2.close()
i.indextie_with_lines('test1.txt')
i.indextie_with_lines('test2.txt')
del i
search_engine = SearchEngine('db_name')
#result = search_engine.search_many('my test')
#print(result)
r = search_engine.search_to_highlight('привет', 4)
print(r)
"""i = indexer.Indexator('tolstoy')
i.indextie_with_lines('tolstoy1.txt')
del i
search_engine = SearchEngine('tolstoy')
r = search_engine.search_to_highlight('Анна', 4)
for key in r.keys():
for val in r[key]:
print (val)"""
del search_engine
if 'test1.txt' in os.listdir(os.getcwd()):
os.remove('test1.txt')
if 'test2.txt' in os.listdir(os.getcwd()):
os.remove('test2.txt')
for filename in os.listdir(os.getcwd()):
if filename == 'db_name' or filename.startswith('db_name.'):
os.remove(filename)
if __name__=='__main__':
main()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,365 | anyasidr/my-repository | refs/heads/master | /unittest.py | import unittest
from moytokenizer import Tokenizer
from search_engine import SearchEngine
class Test(unittest.TestCase):
def setUp(self):
self.Tokenizer = Tokenizer()
# unittest for method tokenize
def test_type_output(self):
result = self.Tokenizer.tokenize('text')
self.assertIsInstance(result, list)
def test_type_input_notlist(self):
with self.assertRaises(ValueError):
self.Tokenizer.tokenize(['eto', 'ne', 'spisok'])
def test_type_input_number(self):
with self.assertRaises(ValueError):
self.Tokenizer.tokenize(5)
def test_result_words(self):
result = self.Tokenizer.tokenize('we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def test_result_characters_beginning(self):
result = self.Tokenizer.tokenize('$%$we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 3)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 33)
def test_result_characters_end(self):
result = self.Tokenizer.tokenize('we ^&* are testing- *&$^ this thing()(')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def test_result_characters_begin_end(self):
result = self.Tokenizer.tokenize('720@!we ^&* are testing- *&$^ this thing*%@3')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 5)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 35)
# unittest for method gen_tokenize
def gen_test_type_input_notlist(self):
with self.assertRaises(ValueError):
self.Tokenizer.gen_tokenize(['eto', 'ne', 'spisok'])
def gen_test_type_input_number(self):
with self.assertRaises(ValueError):
self.Tokenizer.gen_tokenize(5)
def gen_test_result_words(self):
result = self.Tokenizer.gen_tokenize('we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def gen_test_result_characters_beginning(self):
result = self.Tokenizer.gen_tokenize('$%$we ^&* are testing- *&$^ this thing')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 3)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 33)
def gen_test_result_characters_end(self):
result = self.Tokenizer.gen_tokenize('we ^&* are testing- *&$^ this thing()(')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 0)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 30)
def gen_test_result_characters_begin_end(self):
result = self.Tokenizer.gen_tokenize('720@!we ^&* are testing- *&$^ this thing*%@3')
self.assertEqual(len(result), 5)
self.assertEqual(result[0].text, 'we')
self.assertEqual(result[0].position, 5)
self.assertEqual(result[4].text, 'thing')
self.assertEqual(result[4].position, 35)
# unittest for method gen_type_tokenize
def gen_type_test_list(self):
with self.assertRaises(ValueError):
result = self.Tokenizer.gen_type_tokenize(['eto', 'ne', 'spisok'])
def gen_test_type_input_number(self):
with self.assertRaises(ValueError):
result = self.Tokenizer.gen_type_tokenize(5)
def test_type(self):
result = self.Tokenizer.gen_type_tokenize('Test - thats right')
sequence = list(result)
self.assertEqual(len(sequence), 7)
self.assertEqual(sequence[0].text, 'Test')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "a")
self.assertEqual(sequence[1].text, ' ')
self.assertEqual(sequence[1].position, 4)
self.assertEqual(sequence[1].typ, "s")
self.assertEqual(sequence[2].text, '-')
self.assertEqual(sequence[2].position, 5)
self.assertEqual(sequence[2].typ, "p")
def test_type_notlatin(self):
result = self.Tokenizer.gen_type_tokenize('大好きです。 Мне это нравится')
sequence = list(result)
self.assertEqual(len(sequence), 8)
self.assertEqual(sequence[0].text, '大好きです')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "a")
self.assertEqual(sequence[1].text, '。')
self.assertEqual(sequence[1].position, 5)
self.assertEqual(sequence[1].typ, "p")
self.assertEqual(sequence[2].text, ' ')
self.assertEqual(sequence[2].position, 6)
self.assertEqual(sequence[2].typ, "s")
self.assertEqual(sequence[3].text, 'Мне')
self.assertEqual(sequence[3].position, 7)
self.assertEqual(sequence[3].typ, "a")
def test_type_other(self):
result = self.Tokenizer.gen_type_tokenize('... ой6ой + @')
sequence = list(result)
self.assertEqual(len(sequence), 9)
self.assertEqual(sequence[0].text, '...')
self.assertEqual(sequence[0].position, 0)
self.assertEqual(sequence[0].typ, "p")
self.assertEqual(sequence[3].text, '6')
self.assertEqual(sequence[3].position, 6)
self.assertEqual(sequence[3].typ, "d")
self.assertEqual(sequence[6].text, '+')
self.assertEqual(sequence[6].position, 10)
self.assertEqual(sequence[6].typ, "o")
class IndexerTest(unittest.TestCase):
def setUp(self):
self.indexer = Indexator("database")
def tearDown(self):
del self.indexer
for filename in os.listdir(os.getcwd()):
if (filename == "database" or filename.startswith("database.")):
os.remove(filename)
if "text.txt" in os.listdir(os.getcwd()):
os.remove("text.txt")
def test_wrong_input(self):
with self.assertRaises(FileNotFoundError):
self.indexer.indextie("i am not a document")
def test_error_wrong_input_wrong_path(self):
with self.assertRaises(FileNotFoundError):
self.indexer.indextie("текст.txt")
def test_two_words(self):
test = open("text.txt", 'w' )
test.write("my test")
test.close()
self.indexer.indextie("text.txt")
words1 = dict(shelve.open("database"))
words2 = {
"my":{"text.txt": [Position(0, 2)]},
"test":{"text.txt": [Position(3, 7)]
}}
self.assertEqual(words1, words2)
if __name__ == '__main__':
unittest.main()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,366 | anyasidr/my-repository | refs/heads/master | /make_db.py | import indexer
import os
def make(dir = 'books', files=[], db_name='mydb'):
i = indexer.Indexator(db_name)
for f in files:
print(f)
i.indextie_with_lines(f)
def make_from_dir(dir='books', db_name='database\\mydb'):
i = indexer.Indexator(db_name)
files = os.listdir(dir)
for f in files:
print(dir + "\\" + f)
i.indextie_with_lines(dir + "\\" + f)
make_from_dir('books', 'database\\database')
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,367 | anyasidr/my-repository | refs/heads/master | /testIndexer.py | import unittest
import moytokenizer
import os
import shelve
from indexer import Indexator, Position
class TestIndexator(unittest.TestCase):
def setUp(self):
self.indexator = Indexator('database')
def test_digit(self):
with self.assertRaises(TypeError):
self.indexator.indextie(123456)
def test_input(self):
with self.assertRaises(FileNotFoundError):
self.indexator.indextie('lalala')
def test_filename(self):
with self.assertRaises(FileNotFoundError):
self.indexator.indextie('lalala.txt')
def test_one_word(self):
file = open('test.txt', 'w')
file.write('indexator')
file.close()
self.indexator.indextie('test.txt')
data_dict = dict(shelve.open('database'))
dictionary = {'indexator': {'test.txt': [Position(0, 9)]}}
self.assertEqual(data_dict, dictionary)
def test_many_words(self):
file = open('test.txt', 'w')
file.write('testing my indexator')
file.close()
self.indexator.indextie('test.txt')
data_dict = dict(shelve.open('database'))
dictionary = {
'testing': {
'test.txt': [Position(0, 7)]
},
'my': {
'test.txt': [Position(8, 10)]
},
'indexator': {
'test.txt': [Position(11, 20)]}}
self.assertEqual(data_dict, dictionary)
def test_two_files(self):
file1 = open('test1.txt', 'w')
file1.write('file number one')
file1.close()
self.indexator.indextie('test1.txt')
test2 = open('test2.txt', 'w')
test2.write('file number two')
test2.close()
self.indexator.indextie('test2.txt')
data_dict = dict(shelve.open('database'))
dictionary = {
'file': {
'test1.txt': [Position(0, 4)],
'test2.txt': [Position(0, 4)]
},
'number': {
'test1.txt': [Position(5, 11)],
'test2.txt': [Position(5, 11)]
},
'one': {
'test1.txt': [Position(12, 15)]
},
'two': {
'test2.txt': [Position(12, 15)]}}
self.assertEqual(data_dict, dictionary)
def tearDown(self):
del self.indexator
for filename in os.listdir(os.getcwd()):
if filename == 'database' or filename.startswith('database.'):
os.remove(filename)
if 'test.txt' in os.listdir(os.getcwd()):
os.remove('test.txt')
if 'test1.txt' in os.listdir(os.getcwd()):
os.remove('test1.txt')
if 'test2.txt' in os.listdir(os.getcwd()):
os.remove('test2.txt')
if __name__=='__main__':
unittest.main()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,368 | anyasidr/my-repository | refs/heads/master | /moytokenizer.py | """This module is used for tokenizing strings.
The string must be divided into alphabetic characters."""
import re
"""
importing the module of regular expressions
"""
import unicodedata
class Token(object):
"""
this class represents tokens aka alphabetic sequences
"""
def __init__(self, position, text):
"""
position is a position of each first character of a token
text is a representation of tokens
"""
self.position = position
self.text = text
class TokenwithType(Token):
"""
this class represents tokens with types
"""
def __init__(self, position, text, typ):
"""
position is a position of each first character of a token
text is a representation of tokens
type is a type of the token
"""
self.position = position
self.text = text
self.typ = typ
class Tokenizer(object):
"""
this class uses method tokenize to tokenize a string
"""
def __init__(self):
"""
this method makes groups of letters
"""
# searching for alphabetic sequences only
self.pattern = re.compile("[^\W\d]+")
def tokenize(self, text):
"""
this method divides a string into tokens
consisting of alphabetic symbols
@param text: string that'll be divided into tokens
@return: list of tokens
"""
if not type(text) is str:
raise ValueError
tokens = []
# searching for pattern in a string
for match in self.pattern.finditer(text):
# extracting tokens with their positions
token = Token(match.start(), match.group())
tokens.append(token)
return tokens
def gen_tokenize(self, text):
"""
this method divides a string into tokens
consisting of alphabetic symbols
@param text: string that'll be divided into tokens
@return: generator
"""
if not type(text) is str:
raise ValueError
# searching for pattern in a string
for match in self.pattern.finditer(text):
# extracting tokens with their positions
token = Token(match.start(), match.group())
yield token
@staticmethod
def Type(c):
"""
this method defines a type of the character
"""
if c.isalpha():
typ='a'
elif c.isdigit():
typ= 'd'
elif c.isspace():
typ='s'
elif unicodedata.category(c)[0] == 'P':
typ='p'
else:
typ = 'o'
return typ
def gen_type_tokenize(self,text):
"""
this method divides a string into tokens consisting of
different types of characters
@param text: string that'll be divided into tokens
@return: generator
"""
if not isinstance(text, str):
raise ValueError
if text == "":
return
pos = 0
for index, character in enumerate(text):
# definiton of the current type
ctype = self.Type(character)
# definition of the previous type
ptype = self.Type(text[index-1])
# check if the type of the current character is
# different from the type of the previous character
if ctype != ptype:
typ = ptype
word = text[pos:index]
token = TokenwithType(pos, word, typ)
yield token
pos = index
# looking for the last character
typ = ctype
word = text[pos:index+1]
token = TokenwithType(pos, word, typ)
yield token
def for_index_tokenize(self, string):
for word in self.gen_type_tokenize(string):
if word.typ == 'a' or word.typ == 'd':
yield word
if __name__ == '__main__':
text = "доброе утро44 !!! - ++ 6&13 **( спокойной темно-синий 441 ночи привет. Стол - это предмет мебели"
words = Tokenizer().tokenize(text)
for token in words:
print(token.text, token.position)
gen_words = Tokenizer().gen_tokenize(text)
for token in gen_words:
print(token.text, token.position)
gen_type_words = Tokenizer()
tokens = list(gen_type_words.gen_type_tokenize(text))
for token in tokens:
print(token.text, token.position, token.typ)
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,369 | anyasidr/my-repository | refs/heads/master | /indexer.py | from moytokenizer import Tokenizer
import shelve
import os
class Position(object):
"""
This class contains positions of tokens that are
alphas and digits. Positions consist of beginnings
and endings of tokens.
"""
def __init__(self, start, end):
"""
method that creates an example of
class Position.
"""
self.start = start
self.end = end
def __eq__(self, obj):
"""
method that compares tokens by their
initial and final positions.
"""
return self.start == obj.start and self.end == obj.end
def __repr__(self):
"""
This method provides an appropriate
representation.
"""
return '(' + str(self.start) + ';' + ' ' + str(self.end) + ')'
class Position_with_lines(object):
"""
This class contains positions of the first and
last symbol of each token and also the number
of its line.
"""
def __init__(self, start, end, line):
"""
method that creates an example of
class Position_with_lines.
"""
self.start = start
self.end = end
self.line = line
def __eq__(self, obj):
"""
method that compares tokens by their
initial and final positions and also
by their number of lines.
"""
return (self.start == obj.start and self.end == obj.end and
self.line == obj.line)
def __repr__(self):
"""
This method provides an appropriate
representation.
"""
return '(' + str(self.start) + ',' + ' ' + str(self.end) + ',' + str(self.line) + ')'
class Indexator(object):
"""
This class is used for indexing text.
Indexing means to create a database that will
contain positions of all tokens in given text.
"""
def __init__(self, db_name):
"""
method that creates an example
of class Indexator.
"""
self.database = shelve.open(db_name, writeback=True)
def indextie(self, filename):
"""
This method indexties text that is stored
in some file. The method opens the file,
indexties the text and puts all tokens with their
positions in a database.
"""
if not isinstance(filename, str):
raise TypeError('Inappropriate type')
text = open(filename)
tokenizer = Tokenizer()
for word in tokenizer.for_index_tokenize(text.read()):
self.database.setdefault(word.text, {}).setdefault(filename, []).append(Position(word.position,
(word.position + len(word.text))))
text.close()
self.database.sync()
def indextie_with_lines(self, filename):
"""
This method indexties text that is stored
in some file. The method opens the file,
indexties the text and puts all tokens with their
positions and number of the line of in a database.
"""
if not isinstance(filename, str):
raise TypeError('Inappropriate type')
text = open(filename)
tokenizer = Tokenizer()
for number, line in enumerate(text):
for word in tokenizer.for_index_tokenize(line):
self.database.setdefault(word.text, {}).setdefault(filename, []).append(Position_with_lines
(word.position, (word.position + len(word.text)), number))
text.close()
self.database.sync()
def __del__(self):
"""
the method closes our database.
"""
self.database.close()
def main():
indexator = Indexator('database')
file = open('text.txt', 'w')
file.write('well well well')
file.close()
indexator.indextie_with_lines('text.txt')
del indexator
os.remove('text.txt')
print(dict(shelve.open('database')))
for filename in os.listdir(os.getcwd()):
if filename == 'database' or filename.startswith('database.'):
os.remove(filename)
if __name__=='__main__':
main()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,370 | anyasidr/my-repository | refs/heads/master | /webserver.py | from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import unquote
import urllib.parse as urllib
import os
import re
from searchengine import SearchEngine
"""
This is a response, that server sends back to the client
1st peace without from and data
"""
resp = """<html>
<head>
<title>ASXER (Anya's Super indeXER)</title>
<style>
body{background-color: #2F4F4F;font-family: sans-serif; color: #B8860B;}
h1{border-bottom: 3px solid #DAA520;padding-bottom: 5px;}
input{font-size: 14px; border: 3px solid #C71585;border-radius: 20px;padding: 6px; background-color: #2F4F4F;color:#FFB6C1;;width: 70%}
input:focus{outline: none;}
input[type=submit]{background-color: #C71585;width: auto;}
strong{color:#DC143C;}
ol{text-align: left;}
</style>
</head>"""
data="""<body>
<div align="center">
<form method="post">
<h1>Enter query to search</h1>
<input type="text" name="query" value="{0}"><br>
<input type="submit" value="SEARCH"><br>
{1}
</form>
<br><br>
<sub>© ASXER (Anya's Super indeXER)</sub>
</div>
</body>
</html>
"""
class WebServer(BaseHTTPRequestHandler):
"""
This class is used for request handling in our searchengine
"""
def do_GET(self):
"""
Defaut get request from client to get site
"""
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
response = """
Documents Limit<br><input type="text" name="limit" value="0"><br>
Documents Offset<br><input type="text" name="offset" value="0"><br>
"""
files = os.listdir(".\\")
i = 0
for file in files:
if re.match(".*\.txt", file):
response += (file + "<br>")
response += 'Limit<br><input type="text" name=doc'+str(i)+'limit value="0"><br>'
response += 'Offset<br><input type="text" name=doc'+str(i)+'offset value="0"><br>'
response += '<input type="submit" name=action'+str(i)+' value="perv">'
response += '<input type="submit" name=action'+str(i)+' value="back">'
response += '<input type="submit" name=action'+str(i)+' value="next"> <br>'
i = i + 1
self.wfile.write(bytes((resp + data.format('', response)), "utf-8"))
def get_new_offset_limit(self, action='', action_doc='', offsets=[], limits=[]):
'''
function for getting next/prev results of research
:param action: next or back or perv
:param action_doc: for which document
:param offsets: offsets list
:param limits: limits list
:return: new offsets list
'''
doc_num = int(action_doc.replace('action', ''))
print(action)
if action == 'next':
offsets[doc_num] = str(int(offsets[doc_num]) + int(limits[doc_num]))
if action == 'back':
offsets[doc_num] = str(int(offsets[doc_num]) - int(limits[doc_num]))
if int(offsets[doc_num]) < 0:
offsets[doc_num] = str(0)
if action == 'perv':
offsets[doc_num] = str(0)
return offsets
def parse_url(self, body=''):
'''
function for parsing request string
:param body: string with parameters of request
:return: parsed parameters
'''
s = unquote(urllib.urlparse(body)[2], "utf-8").replace("b'", "").replace("'", "").replace("\"", '')
query_data = urllib.parse_qs(s)
print("data = " + str(query_data))
query = str(query_data['query'][0])
limit = str(query_data['limit'][0])
offset = str(query_data['offset'][0])
if (re.match('\D', limit)) or (re.match('\D', offset)):
raise TypeError
if int(limit) < 0 or int(offset) < 0:
raise TypeError
action = ''
action_doc = ''
limits = []
offsets = []
action_exists = False
for key in query_data.keys():
if re.match('action.', key):
action = str(query_data[key][0])
action_doc = str(key)
action_exists = True
if re.match('doc.limit', key):
if (re.match('\D', query_data[key][0])) or (int(query_data[key][0]) < 0):
raise TypeError
limits.append(query_data[key][0])
if re.match('doc.offset', key):
if (re.match('\D', query_data[key][0])) or (int(query_data[key][0]) < 0):
raise TypeError
offsets.append(query_data[key][0])
return query, limit, offset, limits, offsets, action, action_doc, action_exists
def do_POST(self):
"""
POST handler for query
"""
try:
content_length = int(self.headers['Content-Length'])
body = str(self.rfile.read(content_length))
print("body = " + body)
query, limit, offset, limits, offsets, action, action_doc, action_exists = self.parse_url(body)
print("query = " + query)
print("doclimit = " + limit)
print("docoffset = " + offset)
print("action = " + action)
print("actiondoc = " + action_doc)
if action_exists:
offsets = self.get_new_offset_limit(action, action_doc, offsets, limits)
print('limits = ' + str(limits))
print('offsets = ' + str(offsets))
search_engine = SearchEngine('database')
r = search_engine.search_limit_offset(query, 4, limit, offset, limits, offsets)
myresp = ''
myresp += 'Documents Limit<br><input type="text" name="limit" value="' + str(limit) + '"><br>'
myresp += 'Documents Offset<br><input type="text" name="offset" value="' + str(offset) + '"><br>'
key_list = list(r.keys())
key_list.sort()
j = 0
for key in key_list:
myresp += '<ol>\n'
myresp += '<li>' + key + '</li>\n<ul>'
myresp += 'Limit<br><input type="text" name="doc' + str(j) + 'limit" value="' + limits[j] + '"><br>'
myresp += 'Offset<br><input type="text" name=doc' + str(j) + 'offset" value="' + offsets[j] + '"><br>'
myresp += '<input type="submit" name=action' + str(j) + ' value="perv">'
myresp += '<input type="submit" name=action' + str(j) + ' value="back">'
myresp += '<input type="submit" name=action' + str(j) + ' value="next"> <br>'
for val in r[key]:
myresp += '<li>'+val+'</li>'
myresp += '</ul>'
j = j + 1
myresp += '</ol>'
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
self.wfile.write(bytes((resp + data.format(query, myresp)), "utf-8"))
except TypeError:
response = 'fields "limit" and "offset" can not take a negative or fractional values'
self.wfile.write(bytes((resp + data.format('', response)), "utf-8"))
except Exception as ex:
response = '<br>Uuups. Something went wrong. Error message: ' + str(ex) + '<br>'
self.send_response(200)
self.send_header("Content-type", "text/html; charset=utf-8")
self.end_headers()
files = os.listdir(".\\")
i = 0
response += 'Documents Limit<br><input type="text" name="limit" value="0"><br>'
response += 'Documents Offset<br><input type="text" name="offset" value="0"><br>'
for f in files:
if re.match(".*\.txt", f):
response += (f + "<br>")
response += 'Limit<br><input type="text" name=doc' + str(i) + 'limit value="0"><br>'
response += 'Offset<br><input type="text" name=doc' + str(i) + 'offset value="0"><br>'
response += '<input type="submit" name=action' + str(i) + ' value="perv">'
response += '<input type="submit" name=action' + str(i) + ' value="back">'
response += '<input type="submit" name=action' + str(i) + ' value="next"> <br>'
i = i + 1
self.wfile.write(bytes((resp + data.format('', 'Not Found<br>' + response)), "utf-8"))
ws = HTTPServer(('0.0.0.0', 80), WebServer)
# Server running until Ctrl-C pressed
try:
ws.serve_forever()
except KeyboardInterrupt:
pass
ws.server_close()
| {"/windows.py": ["/indexer.py", "/moytokenizer.py"], "/testSearchEngine.py": ["/unittest.py", "/make_db.py", "/indexer.py", "/searchengine.py", "/windows.py"], "/searchengine.py": ["/indexer.py", "/windows.py", "/moytokenizer.py"], "/unittest.py": ["/moytokenizer.py"], "/make_db.py": ["/indexer.py"], "/testIndexer.py": ["/unittest.py", "/moytokenizer.py", "/indexer.py"], "/indexer.py": ["/moytokenizer.py"], "/webserver.py": ["/searchengine.py"]} |
66,452 | stuyspec/flask-api | refs/heads/develop | /app/views.py | from flask import render_template, flash, redirect
from flask import request, session, url_for, jsonify, make_response
from app import app, db, models
#---------------------------------------------- Error Handlers
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
#---------------------------------------------- Section and Article Endpoints
@app.route('/sections/<string:section_slug>/' + \
'subsection/<string:subsection_slug>' )
def get_section_by_slug(section_slug,subsection_slug):
if subsection_slug == "main":
target = models.Section.query.filter(
models.Section.slug == section_slug
).first()
return jsonify({"description": target.description})
else:
target = models.Subsection.query.filter(
models.Subsection.slug == subsection_slug
).first()
if target.parent_slug == section_slug:
return jsonify({"description": target.description})
article_route = '''/sections/<string:section_slug>/subsection/<string:subsection_slug>/articles/'''
@app.route(article_route , defaults={'article_slug': None})
@app.route(article_route + '<string:article_slug>' )
def get_section_articles(section_slug,subsection_slug,article_slug):
if article_slug != None and article_slug != "None":
articles = [models.Article.query.filter(
models.Article.slug == article_slug
).first()]
elif subsection_slug == "main":
section = models.Section.query.filter(
models.Section.slug == section_slug
).first()
articles = models.Article.query.filter(
models.Article.section == section
).all()
else:
subsection = models.Subsection.query.filter(
models.Subsection.slug == subsection_slug
).first()
articles = models.Article.query.filter(
models.Article.subsection == subsection
).all()
secure_articles = []
for article in articles:
article_dict = {
"content": article.content,
"datetime": article.datetime,
"id": article.id,
"is_draft": article.is_draft,
"issue": article.issue,
"section_id": article.section_id,
"slug": article.slug,
"subsection_id": article.subsection_id,
"title": article.title,
"volume": article.volume
}
secure_articles.append(article_dict)
return jsonify({"articles": secure_articles})
@app.route('/newspaper/<int:volume>/<int:issue>' )
def get_issue_articles(volume,issue):
articles = models.Article.query.filter(models.Article.volume == volume
and models.Article.issue == issue).all()
converted_articles = [] #Container for articles that has been converted to a dictionary to display data
for article in articles:
article_dict = {
"content": article.content,
"datetime": article.datetime,
"id": article.id,
"is_draft": article.is_draft,
"issue": article.issue,
"section_id": article.section_id,
"slug": article.slug,
"subsection_id": article.subsection_id,
"title": article.title,
"volume": article.volume
}
converted_articles.append(article_dict)
issuu_code = models.Issuu.query.filter(models.Issuu.volume == volume
and models.Issuu.issue == issue).first().code
return jsonify({"issuu_code": issuu_code, "articles": secure_articles})
@app.route('/list_articles/articles/' )
def get_all_articles():
articlesInIssue = models.Article.query.all()
secure_articles = []
for article in articlesInIssue:
article_dict = {
"content": article.content,
"datetime": article.datetime,
"id": article.id,
"is_draft": article.is_draft,
"issue": article.issue,
"section_id": article.section_id,
"slug": article.slug,
"subsection_id": article.subsection_id,
"title": article.title,
"volume": article.volume
}
secure_articles.append(article_dict)
limit = request.args.get('limit')
if limit is not None:
secure_articles = secure_articles[:int(limit)]
return jsonify( {"articles": secure_articles} )
#---------------------------------------------- User data endpoints
@app.route('/user/<int:user_id>')
def get_user(user_id):
user = models.User.query.get(user_id)
user_data = {
"description": user.description,
"email": user.email,
"firstname": user.firstname,
"id": user.id,
"lastname": user.lastname,
"username": user.username
}
return jsonify( {"user_data": user_data} )
#----------------------------------------------
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,453 | stuyspec/flask-api | refs/heads/develop | /db_repository/versions/.#004_migration.py | nicholas@Nicholass-MacBook-Pro.local.65212 | {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,454 | stuyspec/flask-api | refs/heads/develop | /db_repository/versions/002_migration.py | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
user = Table('user', pre_meta,
Column('id', INTEGER, primary_key=True, nullable=False),
Column('fname', VARCHAR(length=128)),
Column('lname', VARCHAR(length=128)),
Column('nickname', VARCHAR(length=128)),
Column('username', VARCHAR(length=128)),
Column('password', VARCHAR(length=1024)),
Column('email', VARCHAR(length=1024)),
Column('permissions', VARCHAR(length=1024)),
)
advertisement = Table('advertisement', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('url', String(length=200), primary_key=True, nullable=False),
Column('name', String(length=200), primary_key=True, nullable=False),
Column('importance', Integer, primary_key=True, nullable=False),
)
issuu = Table('issuu', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('code', Integer, primary_key=True, nullable=False),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].drop()
post_meta.tables['advertisement'].create()
post_meta.tables['issuu'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
pre_meta.tables['user'].create()
post_meta.tables['advertisement'].drop()
post_meta.tables['issuu'].drop()
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,455 | stuyspec/flask-api | refs/heads/develop | /db_repository/__init__.py | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object('config')
db = SQLAlchemy(app)
import views | {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,456 | stuyspec/flask-api | refs/heads/develop | /app/__init__.py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from werkzeug.contrib.fixers import ProxyFix
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
app.config.from_object('config')
db = SQLAlchemy(app)
import views
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,457 | stuyspec/flask-api | refs/heads/develop | /config.py | import os
# The base directory (DO NOT CHANGE, unless you know EXACTLY what you are doing)
basedir = os.path.abspath(os.path.dirname(__file__))
# The base directory for the database (DO NOT CHANGE, unless you know EXACTLY what you are doing)
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
# More sql setup, reference previous comment
SQLALCHEMY_MIGRATE_REPO = os.path.join(basedir, 'db_repository')
# WTForms Config
# Enable the security on the forms
WTF_CSRF_ENABLED = True
# Secret key for the hashes
SECRET_KEY = 'THIS IS A SECRET KEY'
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,458 | stuyspec/flask-api | refs/heads/develop | /db_repository/versions/004_migration.py | from sqlalchemy import *
from migrate import *
from migrate.changeset import schema
pre_meta = MetaData()
post_meta = MetaData()
article = Table('article', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('title', String(length=500)),
Column('titleSlug', String(length=500)),
Column('content', Text),
Column('p_index', Integer),
Column('timestamp', DateTime),
Column('volume', Integer),
Column('issue', Integer),
Column('section_id', Integer),
Column('subsection_id', Integer),
)
article_tag = Table('article_tag', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('article_id', Integer),
Column('tag_id', Integer),
)
media = Table('media', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('article_id', Integer),
Column('url', String(length=600)),
Column('title', String(length=500)),
Column('caption', String(length=500)),
Column('isFeatured', Boolean),
Column('isPhoto', Boolean),
)
role = Table('role', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('title', String(length=200)),
)
role_user = Table('role_user', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('role_id', Integer),
)
section = Table('section', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('name', String(length=500)),
Column('description', Text),
)
user_article = Table('user_article', post_meta,
Column('id', Integer, primary_key=True, nullable=False),
Column('user_id', Integer),
Column('article_id', Integer),
)
def upgrade(migrate_engine):
# Upgrade operations go here. Don't create your own engine; bind
# migrate_engine to your metadata
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['article'].create()
post_meta.tables['article_tag'].create()
post_meta.tables['media'].create()
post_meta.tables['role'].create()
post_meta.tables['role_user'].create()
post_meta.tables['section'].create()
post_meta.tables['user_article'].create()
def downgrade(migrate_engine):
# Operations to reverse the above upgrade go here.
pre_meta.bind = migrate_engine
post_meta.bind = migrate_engine
post_meta.tables['article'].drop()
post_meta.tables['article_tag'].drop()
post_meta.tables['media'].drop()
post_meta.tables['role'].drop()
post_meta.tables['role_user'].drop()
post_meta.tables['section'].drop()
post_meta.tables['user_article'].drop()
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,459 | stuyspec/flask-api | refs/heads/develop | /app/models.py | from app import db
from app import app
from werkzeug.security import generate_password_hash, check_password_hash
class Article(db.Model):
id = db.Column(db.Integer, primary_key = True)
title = db.Column(db.String(500))
slug = db.Column(db.String(500))
content = db.Column(db.Text)
date_time = db.Column(db.DateTime)
volume = db.Column(db.Integer)
issue = db.Column(db.Integer)
is_draft = db.Column(db.Boolean)
section_id = db.Column(db.Integer, db.ForeignKey('section.id'))
subsection_id = db.Column(db.Integer, db.ForeignKey('subsection.id'))
class Section(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(500))
slug = db.Column(db.String(500))
description = db.Column(db.Text)
parent_slug = db.Column(db.String(500))
article_id = db.relationship('Article', backref='section', lazy='dynamic')
class Subsection(db.Model):
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(500))
slug = db.Column(db.String(500))
description = db.Column(db.Text)
article_id = db.relationship('Article', backref='subsection', lazy='dynamic')
class Issuu(db.Model):
id = db.Column(db.Integer, primary_key = True)
code = db.Column(db.String(20))
volume = db.Column(db.Integer)
issue = db.Column(db.Integer)
class User(db.Model):
id = db.Column(db.Integer, primary_key = True)
first_name = db.Column(db.String(200))
last_name = db.Column(db.String(200))
username = db.Column(db.String(200))
password = db.Column(db.String(200))
email = db.Column(db.String(200))
description = db.Column(db.Text)
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,460 | stuyspec/flask-api | refs/heads/develop | /run.py | #!flask/bin/python
from app import app
import db_seed
# This will run the application, change the debug do deliminate the nice error messages (every error would then result in an error 404 message)
if __name__ == "__main__":
app.run(debug = True, host='0.0.0.0', port=8000) # Can change back debug
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,461 | stuyspec/flask-api | refs/heads/develop | /db_seed.py | #!flask/bin/python
from app import models, db
from datetime import datetime
section_sample = models.Section(name = "humor",
slug = "humorstuff",
description = "this is the humor department",
parent_slug = "humor")
subsection_sample = models.Subsection(name = "year_review",
slug = "more_humor_stuff",
description = "this is humor department",
)
article_sample = models.Article(
title = "george thingy",
slug = "george_thingy",
content = "good riddance and thank god",
date_time = datetime.today(),
volume = 111,
issue = 12,
is_draft = False,
section = section_sample,
subsection = subsection_sample
)
more_sample = models.Article(
title = "jason thingy",
slug = "jason_thingy",
content = "gasdsaood riddance and thank god",
date_time = datetime.today(),
volume = 111,
issue = 12,
is_draft = False,
section = section_sample,
subsection = subsection_sample
)
db.session.add(section_sample)
db.session.add(subsection_sample)
db.session.add(article_sample)
db.session.add(more_sample)
db.session.commit()
article_sample = models.Article(
title = "geoasdsadrge potato",
slug = "geoasfe_thingy",
content = "good fsafagod",
date_time = datetime.today(),
volume = 5,
issue = 112,
is_draft = False,
section = section_sample,
subsection = subsection_sample
)
more_sample = models.Article(
title = "jasonasnj thingy",
slug = "jasond_thingy",
content = "gsfaafagod",
date_time = datetime.today(),
volume = 5,
issue = 112,
is_draft = True,
section = section_sample,
subsection = subsection_sample
)
db.session.add(article_sample)
db.session.add(more_sample)
db.session.commit()
issuu_one = models.Issuu(code = "111/12",
volume = 111,
issue = 12)
issuu_two = models.Issuu(code = "5/112",
volume = 5,
issue = 112)
db.session.add(issuu_one)
db.session.add(issuu_two)
db.session.commit()
issuu_one = models.User(
first_name = "jason",
last_name = "kao",
username = "jkao",
password = "donut",
email = "jkao@stuy.edu",
description = "avocado"
)
issuu_two = models.User(
first_name = "geprge",
last_name = "zheng",
username = "gz",
password = "asad",
email = "gzhen@stuy.edu",
description = "peanut"
)
db.session.add(issuu_one)
db.session.add(issuu_two)
db.session.commit()
| {"/app/views.py": ["/app/__init__.py"], "/app/models.py": ["/app/__init__.py"], "/run.py": ["/app/__init__.py", "/db_seed.py"], "/db_seed.py": ["/app/__init__.py"]} |
66,470 | PorterDalton1/Text_Adventure | refs/heads/master | /main.py | """
Driver file for the game. This is the file that get's started that
runs everything.
"""
from window_GUI import WindowBase
| {"/main.py": ["/window_GUI.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.