index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
77,819 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0010_eye.py | # Generated by Django 3.0.4 on 2020-05-04 08:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0009_caracesseries_carrepair_cartyres_carwash_motercyclerepair_newcars'),
]
operations = [
migrations.CreateModel(
name='eye',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='denistsdoctors')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='denistsdoctors')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,820 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/account/urls.py | from django.conf.urls import url
from django.urls import path
from . import views
# from .views import ActivateAccount
urlpatterns = [
path("register", views.register, name="register"),
path("login",views.login, name="login"),
path("logout",views.logout,name="logout"),
# path("bloodd",views.bloodd,name="blood"),
path("view_profile", views.view_profile, name="view_profile"),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,821 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/viewprofile/urls.py | from django.urls import path
from . import views
urlpatterns = [
#automobile//////
path("vh1<int:pk>", views.vh1, name="vh1"),
path("vh2<int:pk>", views.vh2, name="vh2"),
path("vh3<int:pk>", views.vh3, name="vh3"),
path("vh4<int:pk>", views.vh4, name="vh4"),
path("vh5<int:pk>", views.vh5, name="vh5"),
path("vh6<int:pk>", views.vh6, name="vh6"),
path("vh7<int:pk>", views.vh7, name="vh7"),
path("vh8<int:pk>", views.vh8, name="vh8"),
path("vh9<int:pk>", views.vh9, name="vh9"),
path("vh10<int:pk>", views.vh10, name="vh10"),
#doctor//////////
path("vd1<int:pk>", views.vd1, name="vd1"),
path("vd2<int:pk>", views.vd2, name="vd2"),
path("vd3<int:pk>", views.vd3, name="vd3"),
path("vd4<int:pk>", views.vd4, name="vd4"),
#////hotel
path("vh<int:pk>", views.vh, name="vh"),
#////resturnts
path("vr<int:pk>", views.vr, name="vr"),
#////electricians
path("ve<int:pk>", views.ve, name="ve"),
#////automobiles
path("va1<int:pk>", views.va1, name="va1"),
path("va2<int:pk>", views.va2, name="va2"),
path("va3<int:pk>", views.va3, name="va3"),
path("va4<int:pk>", views.va4, name="va4"),
path("va5<int:pk>", views.va5, name="va5"),
#///////plumber
path("vp1<int:pk>", views.vp1, name="vp1"),
path("vp2<int:pk>", views.vp2, name="vp2"),
path("vp3<int:pk>", views.vp3, name="vp3"),
path("vp4<int:pk>", views.vp4, name="vp4"),
] | {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,822 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0011_blooddonor.py | # Generated by Django 3.0.4 on 2020-05-04 11:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0010_eye'),
]
operations = [
migrations.CreateModel(
name='blooddonor',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=254)),
('age', models.IntegerField()),
('gender', models.CharField(max_length=20)),
('blood_group', models.CharField(max_length=20)),
('mobile_no', models.IntegerField()),
('address', models.CharField(max_length=100)),
('city', models.CharField(max_length=50)),
],
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,823 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0003_automobile.py | # Generated by Django 3.0.4 on 2020-04-24 14:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0002_destination_price'),
]
operations = [
migrations.CreateModel(
name='automobile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='drpics')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('department', models.CharField(max_length=100)),
('location', models.TextField()),
('mobNo', models.CharField(max_length=15)),
],
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,824 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/viewprofile/views.py | from django.shortcuts import render
# Create your views here.
from Webapp.models import hospital
from Webapp.models import childhospital
from Webapp.models import eyehospital
from Webapp.models import publichospital
from Webapp.models import ENThospital
from Webapp.models import privatehospital
from Webapp.models import cancerhospital
from Webapp.models import mentalhospital
from Webapp.models import multisuperhospital
from Webapp.models import orthrohospital
from Webapp.models import carrepair
from Webapp.models import cartyres
from Webapp.models import carwash
from Webapp.models import caracesseries
from Webapp.models import motercyclerepair
from Webapp.models import dentists
from Webapp.models import eye
from Webapp.models import bone
from Webapp.models import dentists
from Webapp.models import dermatology
from Webapp.models import plumbercont
from Webapp.models import plumberproducts
from Webapp.models import plumberinstall
from Webapp.models import plumberservice
from Webapp.models import electrician
from Webapp.models import hotel
from Webapp.models import reasurant
#//////////hospitals profileview ///////////
def vh1(request, pk=None):
v = hospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh2(request, pk=None):
v = childhospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh3(request, pk=None):
v = eyehospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh4(request, pk=None):
v = publichospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh5(request, pk=None):
v = ENThospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh6(request, pk=None):
v =privatehospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh7(request, pk=None):
v = cancerhospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh8(request, pk=None):
v = mentalhospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh9(request, pk=None):
v =multisuperhospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vh10(request, pk=None):
v = orthrohospital.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#/////////////////doctor view /////////
def vd1(request, pk=None):
v = dentists.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vd2(request, pk=None):
v = eye.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vd3(request, pk=None):
v = dermatology.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vd4(request, pk=None):
v = bone.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#///////hotels
def vh(request, pk=None):
v = hotel.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#//////resturents
def vr(request, pk=None):
v = reasurant.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#//////electricians
def ve(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#/////automobiles
def va1(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def va2(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def va3(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def va4(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def va5(request, pk=None):
v =electrician.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
#///////plumber
def vp1(request, pk=None):
v =plumberservice.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vp2(request, pk=None):
v =plumberproducts.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vp3(request, pk=None):
v =plumbercont.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
def vp4(request, pk=None):
v =plumberinstall.objects.get(pk=pk)
return render(request, 'profile.html', {'v': v})
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,825 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/admin.py | from django.contrib import admin
from .models import Destination
from .models import automobile
from .models import dentists
from .models import plumbercont
from .models import plumberinstall
from .models import plumberproducts
from .models import plumberservice
from .models import electrician
from .models import hotel
from .models import reasurant
from .models import hospital
from .models import childhospital
from .models import eyehospital
from .models import publichospital
from .models import privatehospital
from .models import ENThospital
from .models import cancerhospital
from .models import mentalhospital
from .models import multisuperhospital
from .models import orthrohospital
from .models import newcars
from .models import carrepair
from .models import caracesseries
from .models import carwash
from .models import cartyres
from .models import motercyclerepair
from .models import eye
from .models import blooddonor
from .models import bone
from .models import dermatology
from .models import Requestaddservice
# Register your models here
admin.site.register(Destination)
admin.site.register(automobile)
admin.site.register(dentists)
admin.site.register(plumbercont)
admin.site.register(plumberservice)
admin.site.register(plumberproducts)
admin.site.register(plumberinstall)
admin.site.register(electrician)
admin.site.register(hotel)
admin.site.register(reasurant)
admin.site.register(hospital)
admin.site.register(childhospital)
admin.site.register(eyehospital)
admin.site.register(publichospital)
admin.site.register(privatehospital)
admin.site.register(ENThospital)
admin.site.register(cancerhospital)
admin.site.register(mentalhospital)
admin.site.register(multisuperhospital)
admin.site.register(orthrohospital)
admin.site.register(newcars)
admin.site.register(carrepair)
admin.site.register(caracesseries)
admin.site.register(carwash)
admin.site.register(cartyres)
admin.site.register(motercyclerepair)
admin.site.register(eye)
admin.site.register(blooddonor)
admin.site.register(bone)
admin.site.register(dermatology)
admin.site.register(Requestaddservice)
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,826 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/views.py | from django.http import HttpResponse
from django.shortcuts import render, redirect
from django.template import loader
from .models import Destination, hotel
from .models import automobile
from .models import dentists
from .models import plumbercont
from .models import plumberinstall
from .models import plumberproducts
from .models import plumberservice
from .models import reasurant
from .models import blooddonor
from .models import hospital
from .models import childhospital
from Webapp.models import eyehospital
from Webapp.models import publichospital
from Webapp.models import ENThospital
from Webapp.models import privatehospital
from Webapp.models import cancerhospital
from Webapp.models import mentalhospital
from Webapp.models import multisuperhospital
from Webapp.models import orthrohospital
from .models import carrepair
from Webapp.models import cartyres
from Webapp.models import carwash
from Webapp.models import caracesseries
from Webapp.models import motercyclerepair
from .models import bone
from .models import eye
from .models import dermatology
from .models import electrician
from .models import Requestaddservice
# Create your views here.
def viewprofile(request, pk=None):
if pk:
v = dentists.objects.get(pk=pk)
else:
v = request.dentists
return render(request, 'profile.html', {'v': v})
def bloodd(request):
if request.method == 'POST':
name = request.POST['name']
email = request.POST['email']
age = request.POST['age']
gender= request.POST['gender']
blood_group = request.POST['bg']
mobile_no = request.POST['mobileno']
address = request.POST['address']
city = request.POST['city']
x = blooddonor(name=name,email=email ,age=age ,gender=gender ,blood_group=blood_group ,mobile_no=mobile_no ,address=address ,city=city)
x.save()
return redirect('table')
else:
return render(request,"blood/blooddonate.html")
def adds(request):
if request.method == 'POST':
Category = request.POST['cat']
Name = request.POST['fn']
Speciality = request.POST['sp']
Department= request.POST['dp']
Address = request.POST['ad']
ServiceDescription = request.POST['des']
img = request.POST['img']
# Ownername = request.POST['n']
# Ownermobno= request.POST['mb']
x = Requestaddservice(Category=Category,Name=Name ,Speciality =Speciality ,Department=Department ,Address=Address , ServiceDescription= ServiceDescription ,img=img )
x.save()
return redirect('adddone')
else:
return render(request,"addservice.html")
# ////////////////index page
def index(request):
return render(request, "index.html")
def test(request):
return render(request,"forgotpassword.html")
#/////////////add Services page/////
def addservices(request):
return render(request,"addservice.html")
def adddone(request):
return render(request,"adddone.html")
# ///////////1st All pages
def doctor(request):
return render(request,"doctors.html")
def resutrants(request):
pss = reasurant.objects.all()
return render(request, "reasurants/reasurants.html", {'pss': pss})
def plumbers(request):
return render(request,"plumber.html")
def ele(request):
pss = electrician.objects.all()
return render(request, "electricians/electricians.html", {'pss': pss})
def automobiles(request):
return render(request,"automobile.html")
def hotels(request):
pss = hotel.objects.all()
return render(request, "hotels/hotels.html", {'pss': pss})
def hospitals(request):
return render(request,"hospitals.html")
def aboutus(request):
return render(request, "about.html")
def contactus(request):
return render(request, "contact.html")
def blooddonate(request):
return render(request,"blood.html")
#///blood table
def table(request):
pss=blooddonor.objects.all()
return render(request,"blood/table.html" ,{'pss':pss})
#///////////blood Donate////////////
def db(request):
return render(request, "blood/blooddonate.html")
def fb(request):
return render(request, "blood/findblood.html")
#/////////////////doctor speciality
def d1(request):
dens = dentists.objects.all()
return render(request, "dr/denist.html", {'dens': dens})
def d2(request):
dens = eye.objects.all()
return render(request, "dr/eye.html", {'dens': dens})
def d3(request):
dens = dermatology.objects.all()
return render(request, "dr/dermatology.html", {'dens': dens})
def d4(request):
dens = bone.objects.all()
return render(request, "dr/bone.html", {'dens': dens})
#///////////////////////plumber services
def pservice(request):
pss = plumberservice.objects.all()
return render(request,"plumbers/plumber1.html",{'pss': pss})
def pproduct(request):
pss = plumberproducts.objects.all()
return render(request, "plumbers/plumber2.html", {'pss': pss})
def pcontractors(request):
dens = plumbercont.objects.all()
return render(request,"plumbers/plumber3.html",{'dens': dens})
def pinstalltion(request):
dens = plumberinstall.objects.all()
return render(request,"plumbers/plumber4.html",{'dens': dens})
#////////////Hospitals////////
def h1(request):
pss = hospital.objects.all()
return render(request, "hospitals/h1.html", {'pss': pss})
def h2(request):
pss = childhospital.objects.all()
return render(request, "hospitals/h2.html", {'pss': pss})
def h3(request):
pss = eyehospital.objects.all()
return render(request, "hospitals/h3.html", {'pss': pss})
def h4(request):
pss = publichospital.objects.all()
return render(request, "hospitals/h4.html", {'pss': pss})
def h5(request):
pss = ENThospital.objects.all()
return render(request, "hospitals/h5.html", {'pss': pss})
def h6(request):
pss = privatehospital.objects.all()
return render(request, "hospitals/h6.html", {'pss': pss})
def h7(request):
pss = cancerhospital.objects.all()
return render(request, "hospitals/h7.html", {'pss': pss})
def h8(request):
pss = mentalhospital.objects.all()
return render(request, "hospitals/h8.html", {'pss': pss})
def h9(request):
pss = multisuperhospital.objects.all()
return render(request, "hospitals/h9.html", {'pss': pss})
def h10(request):
pss = orthrohospital.objects.all()
return render(request, "hospitals/h10.html", {'pss': pss})
#/////////////automobile all uls?/////////
def a1(request):
pss = carrepair.objects.all()
return render(request, "automobile/a1.html", {'pss': pss})
def a2(request):
pss = caracesseries.objects.all()
return render(request, "automobile/a2.html", {'pss': pss})
def a3(request):
pss = carwash.objects.all()
return render(request, "automobile/a3.html", {'pss': pss})
def a4(request):
pss = cartyres.objects.all()
return render(request, "automobile/a4.html", {'pss': pss})
def a5(request):
pss = motercyclerepair.objects.all()
return render(request, "automobile/a5.html", {'pss': pss})
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,827 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0009_caracesseries_carrepair_cartyres_carwash_motercyclerepair_newcars.py | # Generated by Django 3.0.4 on 2020-05-03 11:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0008_auto_20200501_1111'),
]
operations = [
migrations.CreateModel(
name='caracesseries',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='carrepair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='cartyres',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='carwash',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='motercyclerepair',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
migrations.CreateModel(
name='newcars',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('drimg', models.ImageField(upload_to='automobile')),
('name', models.CharField(max_length=100)),
('speciality', models.CharField(max_length=100)),
('deprtimg', models.ImageField(upload_to='automobile')),
('department', models.CharField(max_length=100)),
('location', models.TextField(max_length=100)),
('mobNo', models.CharField(max_length=15)),
],
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,828 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/paygate/views.py |
from django.shortcuts import render, redirect
from django.views.decorators.csrf import csrf_exempt
from . import checksum
MERCHANT_KEY = "6smUhPYx3kvX&iV0"
def paymentMode(request):
param_dict = {
"MID": "ANnlmg05342462072571",
"ORDER_ID": "15362",
"CUST_ID": "1434",
"TXN_AMOUNT": "5",
"CHANNEL_ID": "WEB",
"INDUSTRY_TYPE_ID": "Retail",
"WEBSITE": "WEBSTAGING",
#"CALLBACK_URL": "http/127.0.0.1:8000/handleRequest/",
"CALLBACK_URL":"https://merchant.com/callback/"
}
param_dict['CHECKSUMHASH'] = checksum.generate_checksum(param_dict,MERCHANT_KEY)
return render(request,'paytm.html',{'params':param_dict})
@csrf_exempt
def handlerequest(request):
#paytm will send you post request here
return redirect('/Thanks') | {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,829 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0012_auto_20200504_1657.py | # Generated by Django 3.0.4 on 2020-05-04 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0011_blooddonor'),
]
operations = [
migrations.AlterField(
model_name='blooddonor',
name='mobile_no',
field=models.CharField(max_length=15),
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,830 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/account/views.py | from django.shortcuts import render
# Create your views here.
from django.conf.global_settings import EMAIL_HOST_USER
from django.contrib.sites.shortcuts import get_current_site
from django.core.mail import send_mail
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.models import User, auth
# from .models import blooddonation
# Create your views here.
from django.template.loader import render_to_string
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_decode, urlsafe_base64_encode
from django.views.generic.base import View
from account.tokens import account_activation_token
def login(request):
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = auth.authenticate(username=username, password=password)
if user is not None:
auth.login(request, user)
return redirect("/")
else:
messages.info(request, 'invalid username or Password !')
return redirect('login')
else:
return render(request, 'login.html')
def register(request):
if request.method == 'POST':
first_name = request.POST['first_name']
username = request.POST['username']
password1 = request.POST['password1']
password2 = request.POST['password2']
email = request.POST['email']
if password1 == password2:
if User.objects.filter(username=username).exists():
messages.info(request, 'Username Taken')
return redirect('register')
elif User.objects.filter(email=email).exists():
messages.info(request, 'Email Taken')
return redirect('register')
else:
user = User.objects.create_user(username=username, password=password1, email=email,
first_name=first_name)
user.is_active = False
user.save()
current_site = get_current_site(request)
subject = 'Activate Your Account'
message = render_to_string('activate_account.html', {
'user': user,
'domain': current_site.domain,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': account_activation_token.make_token(user),
})
send_mail(
subject,
message,
EMAIL_HOST_USER,
[email],
fail_silently=False,
)
return redirect('login')
else:
messages.info(request, 'password not matching..')
return redirect('register')
return redirect('/')
else:
return render(request, 'register.html')
# ///////////////Logout////////////////////////
def logout(request):
auth.logout(request)
return redirect('/')
# ////////////////////activate Acc///////////////////
class ActivateAccount(View):
def get(self, request, uidb64, token, *args, **kwargs):
try:
uid = urlsafe_base64_decode(uidb64).decode()
user = User.objects.get(pk=uid)
except (TypeError, ValueError, OverflowError, User.DoesNotExist):
user = None
if user is not None and account_activation_token.check_token(user, token):
user.is_active = True
user.save()
login(request)
messages.success(request, ('Your account have been confirmed.'))
return render(request, 'index.html')
else:
messages.warning(request, ('The confirmation link was invalid, possibly because it has already been used.'))
return redirect('/Thanks')
def view_profile(request, pk=None):
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
args = {'user': user}
return render(request, 'snippets/profile.html', args)
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,831 | Akashpb07/Chdproject | refs/heads/master | /chadigarh Dial/Webapp/migrations/0014_requestaddservice.py | # Generated by Django 3.0.4 on 2020-05-06 06:20
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Webapp', '0013_auto_20200504_1907'),
]
operations = [
migrations.CreateModel(
name='Requestaddservice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Category', models.CharField(max_length=100)),
('Name', models.CharField(max_length=50)),
('Speciality', models.CharField(max_length=50)),
('Department', models.CharField(max_length=20)),
('Address', models.TextField(max_length=200)),
('ServiceDescription', models.TextField(max_length=200)),
('img', models.ImageField(upload_to='req add')),
('Ownername', models.CharField(max_length=50)),
('Ownermobno', models.CharField(max_length=15)),
],
),
]
| {"/chadigarh Dial/Webapp/admin.py": ["/chadigarh Dial/Webapp/models.py"], "/chadigarh Dial/Webapp/views.py": ["/chadigarh Dial/Webapp/models.py"]} |
77,890 | cht33/questionaire | refs/heads/master | /api/models.py | import random
import json
# class Questions:
# '''
# 读取所有问题的类
# '''
# def __init__(self, fileuser_id, QUESTION_SHUFFLE):
# with open(fileuser_id, 'r', encoding='utf8') as f:
# questions = f.readlines()
# questions = [t.strip('\r\n').split('\t') for t in questions]
# self.questions = questions
# self.question_shuffle = QUESTION_SHUFFLE
# # 返回从开始位置往后的num个问题及问题序号
# def get_questions(self, start_pos, num):
# end_pos = min(start_pos + num, len(self.questions))
# questions_id = list(range(start_pos, end_pos))
# if self.question_shuffle:
# random.shuffle(questions_id)
# questions = [self.questions[i] for i in questions_id]
# return questions_id, questions
class QueryModel:
'''
questions_id 记录问卷的所有问题的id
questions 记录问卷的所有问题的内容
user_data 记录所有用户的问卷结果和当前问题序号
--user_data[user_id]['ans_list'] 结果列表
--user_data[user_id]['curr_id'] 当前问题序号
'''
def __init__(self, start_pos=0, num=0, all_questions=None):
self.questions_id = []
self.questions = []
self.user_data = {}
if all_questions != None:
self.reset_questions(start_pos, num, all_questions)
# 返回问卷中第index个问题
def get_question(self, index):
if index >= 0 and index < len(self.questions):
return self.questions[index]
else:
return None
# 重置问题集
def reset_questions(self, start_pos, num, all_questions):
self.questions_id, self.questions = all_questions.get_questions(start_pos, num)
# 判断用户是否已存在
def has_user(self, user_id):
return self.user_data.get(user_id, None) != None
# 返回已存在用户当前的问题序号
def get_user_ques_id(self, user_id):
return self.user_data[user_id]['curr_id']
# 添加新用户,问卷结果初始值为-1,表示该问题未被回答
def add_new_user(self, user_id):
self.user_data[user_id] = {
'ans_list': [-1] * len(self.questions),
'time_cost': [-1] * len(self.questions),
'curr_id': 0
}
# 保存某个用户第index个问题的答案和当前问题序号以及花费时间
def set_ans(self, user_id, index, ans, t1):
user = self.user_data[user_id]
user['ans_list'][index] = ans
user['time_cost'][index] = t1
if index == user['curr_id']:
user['curr_id'] = index + 1
# 返回问题总数
def __len__(self):
return len(self.questions)
# 将问卷结果保存至本地文件
def save(self, user_id, filepath=None):
s = ''
ans_list = self.user_data[user_id]['ans_list']
t1 = self.user_data[user_id]['time_cost']
for i in range(0, len(self.questions)):
s += '{}\t{}\t{}\n'.format(self.questions_id[i], ans_list[i], t1[i])
if filepath == None:
print(s)
else:
filename = user_id + '.txt'
filename = filepath + filename
with open(filename, 'a', encoding='utf8') as fout:
print(s, file=fout)
class Questions:
'''
读取所有问题的类
'''
def __init__(self, fileuser_id, QUESTION_SHUFFLE, sample=False):
sess_lens = []
questions = []
random.seed(233)
with open(fileuser_id, 'r', encoding='utf8') as f:
for line in f:
col = line.strip('\r\n').split(', ')
rank, num = int(col[0]), int(col[1])
val = '-1'
if len(col) == 3: val = int(col[2])
q_list = f.readline().strip('\r\n').split('\t')
time_points = f.readline().strip('\r\n').split('\t')
time_points = [int(t) for t in time_points]
time_points = time_points[1:]
poi_lists = [json.loads(f.readline().strip('\r\n')) for _ in range(num)]
sess_len = len(q_list)
if sess_len > 20: continue
sess_lens.append(sess_lens)
questions.append({
'rank': rank,
'val': val,
'q_list': q_list,
'time_points': time_points,
'poi_lists': poi_lists
})
self.questions = questions
self.question_shuffle = QUESTION_SHUFFLE
# 返回从开始位置往后的num个问题及问题序号
def get_questions(self, start_pos, num):
end_pos = min(start_pos + num, len(self.questions))
questions_id = list(range(start_pos, end_pos))
if self.question_shuffle:
random.shuffle(questions_id)
questions = [self.questions[i] for i in questions_id]
return questions_id, questions
| {"/api/views.py": ["/api/models.py"]} |
77,891 | cht33/questionaire | refs/heads/master | /api/views.py | from django.views.decorators.http import require_http_methods
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
from .models import QueryModel, Questions
# 问题在数据集中的起始序号
QUSETION_START_POS = 0
# 问题总数
QUSETION_NUM = 10000
# 是否打乱问题顺序
QUESTION_SHUFFLE = False
# 数据集和结果保存路径
# QUESTION_DATA = 'data/tasks_shuffle.txt'
QUESTION_DATA = 'data/final_tasks.txt'
SAVE_PATH = 'data/results/'
all_questions = Questions(QUESTION_DATA, QUESTION_SHUFFLE)
model = QueryModel(QUSETION_START_POS, QUSETION_NUM, all_questions)
# Create your views here.
@require_http_methods(["POST"])
@csrf_exempt
def login(request):
print(request.POST.get('userName'))
userName = request.POST.get('userName')
if model.has_user(userName):
qid = model.get_user_ques_id(userName)
else:
model.add_new_user(userName)
qid = 0
questionNum = len(model)
return JsonResponse({ 'qid': qid, 'questionNum': questionNum })
@require_http_methods(["POST"])
@csrf_exempt
def question(request):
userName = request.POST.get('userName')
curr_qid = model.get_user_ques_id(userName)
qid = int(request.POST.get('qid'))
grade = request.POST.get('grade')
if grade != None:
timeCost = request.POST.get('timeCost')
model.set_ans(userName, qid-1, int(grade), timeCost)
if qid <= curr_qid:
return JsonResponse({ 'repost': True })
t = model.get_question(qid)
if t == None:
model.save(userName, SAVE_PATH)
return JsonResponse({ 'ended': True })
else:
t['qid'] = model.get_user_ques_id(userName)
return JsonResponse(t) | {"/api/views.py": ["/api/models.py"]} |
77,892 | clvsit/text_proofreading | refs/heads/master | /checker/views.py | import json
import time
from django.http import HttpResponse
from django.shortcuts import render
from django.template import loader
from django.views.decorators.csrf import csrf_exempt
def index(request):
return render(request, "index.html")
@csrf_exempt
def get_service(request):
print(request.method)
time.sleep(0.5)
if request.method == "POST":
resp = {
'code': 1,
'msg': '调用服务成功',
'data': {
"answerList": [
# {"id": "60e38bfe7af14d7c96c586cc3443a7f7", "start": 0, "end": 3, "answer": "成都", "source": "程度",
# "type": "0"},
# {"id": "96873e8dcf074405963b6f9b9ab02dc6", "start": 0, "end": 3, "answer": "篮", "source": "蓝",
# "type": "0"},
{"id": "b16845bfc12744f591b9c4ed22ec5f74", "start": 0, "end": 3, "answer": "考虑", "source": "考虑考虑",
"type": "1"}
]
}
}
else:
resp = {"code": 0, "msg": "请求方法有误!", "data": {}}
return HttpResponse(json.dumps(resp), content_type="application/json")
@csrf_exempt
def get_judge(request):
resp = {'code': 1, 'msg': '调用服务成功', 'data': {
"answerList": [
{id: "60e38bfe-7af1-4d7c-96c5-86cc3443a7f7", "start": 0, "end": 3, "answer": "成都", "source": "程度", type: 0},
{id: "96873e8d-cf07-4405-963b-6f9b9ab02dc6", "start": 0, "end": 3, "answer": "篮", "source": "蓝", type: 0},
{id: "b16845bf-c127-44f5-91b9-c4ed22ec5f74", "start": 0, "end": 3, "answer": "考虑", "source": "考虑考虑",
type: 1},
]
}}
return HttpResponse(json.dumps(resp), content_type="application/json")
| {"/checker/admin.py": ["/checker/models.py"]} |
77,893 | clvsit/text_proofreading | refs/heads/master | /manager/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('data/get', views.get_result_list, name="get_result_list"),
]
| {"/checker/admin.py": ["/checker/models.py"]} |
77,894 | clvsit/text_proofreading | refs/heads/master | /checker/models.py | from django.db import models
from mongoengine import Document, StringField, IntField
class FeedBack(Document):
id = StringField(max_length=36)
document = StringField()
start = IntField(max_length=8)
end = IntField(max_length=8)
answer = StringField(max_length=24)
confidence = StringField(max_length=10)
type = IntField(max_length=1)
date = StringField(max_length=19)
remark1 = StringField()
remark2 = StringField()
class AdminRole(models.Model):
id = models.AutoField("角色ID", primary_key=True)
name = models.CharField('角色名称', max_length=12)
authority = models.CharField("角色权限", max_length=24)
brief = models.CharField("简要介绍", max_length=64)
create_date = models.DateField("创建日期", auto_now=True)
modify_date = models.DateField("修改日期", auto_now=True)
reason = models.CharField("修改原因", max_length=64)
remark1 = models.CharField("备注1", max_length=64, blank=True)
remark2 = models.CharField("备注2", max_length=64, blank=True)
def __str__(self):
return self.name
class AdminUser(models.Model):
account = models.CharField('用户账号', primary_key=True, max_length=32)
password = models.CharField('用户密码', max_length=32)
name = models.CharField('用户姓名', max_length=4)
role_id = models.ForeignKey(AdminRole, on_delete=models.CASCADE, default=2)
create_date = models.DateField("创建日期", auto_now=True)
modify_date = models.DateField("修改日期", auto_now=True)
reason = models.CharField("修改原因", max_length=64)
remark1 = models.CharField("备注1", max_length=64, blank=True)
remark2 = models.CharField("备注2", max_length=64, blank=True)
def __str__(self):
return self.name
| {"/checker/admin.py": ["/checker/models.py"]} |
77,895 | clvsit/text_proofreading | refs/heads/master | /checker/admin.py | from django.contrib import admin
from .models import AdminUser, AdminRole
# Register your models here.
admin.site.register(AdminUser)
admin.site.register(AdminRole)
| {"/checker/admin.py": ["/checker/models.py"]} |
77,896 | clvsit/text_proofreading | refs/heads/master | /checker/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name="index"),
path('service/spell', views.get_service, name="get_service"),
path('service/repeat', views.get_service, name="get_service"),
path('service/judge', views.get_judge, name="get_judge"),
]
| {"/checker/admin.py": ["/checker/models.py"]} |
77,898 | kirimaks/data_plot | refs/heads/master | /grab_data.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import time
time_begin = time.time()
import tools
import argparse
import task_types
import ConfigParser
def arguments_analysis():
args = argparse.ArgumentParser(description=u'Calculate data from files and write to database.')
args.add_argument(u'-v', '--version', action=u'version', version='%(prog)s 2.0')
args.add_argument(u'-d', dest=u'debug_mode', action=u'store_true', help=u'Debug mode (default mode is INFO).')
args.add_argument(u'-c', dest=u'config_file', metavar=u'config.cfg', required=True, help=u'Configuration file.')
args.add_argument(u'--initdb', dest=u'initdb', action=u'store_true', help=u'Delte old and create new database file.')
cmdargs = args.parse_args()
return args.parse_args()
def config_analysis(config_file):
config = ConfigParser.RawConfigParser()
config.read(config_file)
return config
#----------------------------
if __name__ == '__main__':
#----------------------------
#------------- Preparations. ----------------------------------------------------------
cmdargs = arguments_analysis()
conf = config_analysis(cmdargs.config_file)
log_tool = tools.Log_tool(cmdargs.debug_mode)
db_tool = tools.Db_tool(db_dir = conf.get(u'Basic', u'workdir'), log_tool = log_tool, config_file = conf, recreate_db = cmdargs.initdb)
#--------------------------------------------------------------------------------------
for cur_task in conf.sections()[1:]:
log_tool.debug([u'Processing for [%s]', cur_task])
### Processing for network interface. ###
if cur_task in conf.get(u'Basic', u'network_interfaces'):
net_task = task_types.Network_Task( cur_task, conf, log_tool, db_tool )
net_task.reading_data_from_file()
net_task.write_data_to_db()
### Processing for regular file. ###
else:
reg_task = task_types.Regular_Task( cur_task, conf, log_tool, db_tool )
reg_task.reading_data_from_file()
reg_task.write_data_to_db()
log_tool.debug(['(%s) execution time: [%s]', __file__, time.time() - time_begin])
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,899 | kirimaks/data_plot | refs/heads/master | /tools/drawing.py | import matplotlib.pyplot as plt
import os.path
class Drawing(object):
''' Super class for all another drawing classes. '''
def __init__(self, task_name, data, conf, log_tool, minuts):
self.__log_tool = log_tool
self.__task_name = task_name
self.__data = data
self.__output_file = os.path.join(conf.get(u'Basic', u'workdir'), conf.get(task_name, u'graph_file'))
self.__minuts = minuts
if int(self.__minuts) <= 5:
self.__log_tool.crit([u'Too few minutes. Exit.....'])
def create_graph(self):
self.__log_tool.debug( [u'Write data for [%s] to [%s]', self.__task_name, self.__output_file] )
### Adjust time. ###
if len(self.__data[u'Time']) < self.__minuts:
self.__minuts = len(self.__data[u'Time'])
### Create x points. ###
x_points = [ t for t in range(self.__minuts) ]
### Prepare time. ###
time = self.prepare_time(self.__data[u'Time'])
ax = plt.gca()
time_range = range(-1, self.__minuts, self.__minuts/4)
time_range[0] = 0
ax.set_xticks(time_range)
ax.set_xticklabels(time)
#------ Colors. -----------------------------------------------
colors_list = [ u'b', u'g', u'r', u'c', u'm', u'y', u'k' ]
#--------------------------------------------------------------
#---- Add every array with data. -------
for k in self.__data.keys():
#--- Generage random color. ----------------------------------------
import random
cur_color = colors_list[ random.randrange(0, len(colors_list)-1) ]
#-------------------------------------------------------------------
if k != u'Time':
plt.plot(x_points, self.__data[k], cur_color, label=k)
#---------------------------------------
plt.title(self.__task_name)
plt.grid(True)
plt.legend(loc=u'upper left', shadow=True)
plt.xlabel(u'[%d] : minuts' % self.__minuts)
plt.savefig( self.__output_file, dpi=70 )
plt.close()
def prepare_time(self, t):
time = []
first_elem = 0
last_elem = len(t)-1
mid_elem = last_elem/2
first_part = mid_elem/2
last_part = mid_elem + first_part
time.append(t[first_elem])
time.append(t[first_part])
time.append(t[mid_elem])
time.append(t[last_part])
time.append(t[last_elem])
return time
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,900 | kirimaks/data_plot | refs/heads/master | /task_types/__init__.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from regular import Regular_Task
from network import Network_Task
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,901 | kirimaks/data_plot | refs/heads/master | /task_types/network.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import time
import tools
import os.path
import regular # Write_data function.
class Network_File(regular.CpuTemp):
# Need write_data method from CpuTemp.
@staticmethod
def get_interface_id(interface, db_tool, log_tool ):
data = db_tool.select_data_where( u'Network_Interfaces', u'Name, Id', u'Name', interface ).fetchone()
if data == None:
# Create a record about interface.
log_tool.debug([u'Create record for [%s] interface.', interface])
field = u'(Name)'
value = u'("' + unicode(interface) + u'")'
db_tool.insert_into(u'Network_Interfaces', field, value)
data = db_tool.select_data_where( u'Network_Interfaces', u'Name, Id', u'Name', interface ).fetchone()
if len(data) == 0:
log_tool.crit([u"Can't get information from database."])
return data
@staticmethod
def read_db_data( db_tool, conf, interface ):
''' Reading data from database, and create dict with data (For network). '''
network_data = { u'Time' : [] }
fields = u'Time'
stat_types = conf.get( u'Basic', u'network_stat_types' ).split()
for stat_type in stat_types:
if u'bytes' in stat_type:
fields += u',rx_bytes,tx_bytes'
network_data[u'rx_bytes'] = []
network_data[u'tx_bytes'] = []
elif u'packets' in stat_type:
fields += u',rx_packets,tx_packets'
network_data[u'rx_packets'] = []
network_data[u'tx_packets'] = []
elif u'errors' in stat_type:
fields += u',rx_errors,tx_errors'
network_data[u'rx_errors'] = []
network_data[u'tx_errors'] = []
fields += u',Network_Statistic.Id'
fields_list = fields.rsplit(u',')
for row in db_tool.select_for_interface( fields, interface ):
n = 0
for field in fields_list:
if field != u'Network_Statistic.Id':
network_data[field].insert(0,row[n])
n += 1
return network_data
class Network_Task(object):
known_types = [u'bytes', u'packets', u'errors']
def __init__( self, cur_task, config_file, log_tool, db_tool ):
self.__interface = cur_task
self.__log_tool = log_tool
self.__db_tool = db_tool
self.__config = config_file
self.__network_data = {}
#------------------ Store data. ---------------------------------------------------------------------------------
def reading_data_from_file(self):
# Reading data from network interface.
list_of_stat_types = self.__config.get(u'Basic', u'network_stat_types').split(u',')
for stat_type in list_of_stat_types:
stat_type = stat_type.strip()
if stat_type not in self.known_types:
self.__log_tool.crit( [u'Unknown statistic type [%s]', stat_type] )
# Create path to network file.
for io_path in [ u'rx_', u'tx_' ]:
full_path = os.path.join(u'/sys/class/net/', self.__interface, u'statistics', io_path + stat_type)
data1 = data2 = 0
try:
with open(full_path) as fp:
data1 = int(fp.readline())
fp.seek(0)
time.sleep(1)
data2 = int(fp.readline())
except IOError as Exc:
self.__log_tool.crit([u'[%s], %s', Exc.filename, Exc.args[1] ])
data = int(data2 - data1)
data = 0 if data < 0 else data
self.__network_data[io_path + stat_type] = data / 1024 if stat_type == u'bytes' else data
def write_data_to_db(self):
# Get interfaces id.
Interface_data = Network_File.get_interface_id( self.__interface, self.__db_tool, self.__log_tool )
self.__network_data[u'InterfaceId'] = Interface_data[1]
# Write network_data.
Network_File.write_data( u'Network_Statistic', self.__network_data, self.__db_tool, self.__log_tool )
#----------------------------------------------------------------------------------------------------------------
#--------- Retrive and draw data. -------------------------------------------------------------------------------
def retrive_data(self):
self.__log_tool.debug([u'Retrive data for [%s]', self.__interface])
self.__cur_data = Network_File.read_db_data( self.__db_tool, self.__config, self.__interface )
def draw_data(self):
self.__log_tool.debug( [u'Rrawing data for [%s]', self.__interface] )
minuts = self.__db_tool.minuts_limit
figure = tools.Drawing( self.__interface, self.__cur_data, self.__config, self.__log_tool, minuts )
figure.create_graph()
#----------------------------------------------------------------------------------------------------------------
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,902 | kirimaks/data_plot | refs/heads/master | /tools/db_tool.py | import sqlite3
import os, os.path
import re
class Db_tool(object):
# TODO: Add truncate_db method (keep database small size).
def __init__(self, db_dir, log_tool, config_file, recreate_db = False, min_limit = 10):
self.__log_tool = log_tool
self.__db_file = u'data.db' # Default database file name (no reason to set in config file).
self.__db_path = os.path.join(db_dir, self.__db_file)
self.__conf = config_file
self.__minuts_limit = min_limit
if not os.path.isfile(self.__db_path) or not os.path.isdir(db_dir) or recreate_db:
self.initdb(db_dir, self.__db_path)
def initdb(self, db_dir, db_path):
self.__log_tool.info( [u'Create new database in [%s]', db_path] )
#------------- Create working directory and database file. ----------------------------
try:
self.__log_tool.debug([u'Creating working directory [%s]', db_dir])
os.mkdir(db_dir)
except Exception as Exc:
if Exc.args[1] == u'File exists':
self.__log_tool.error([u'Working direcotry [%s] exist.', db_dir])
else:
self.__log_tool.crit([u'[%s], exit...', Exc.args[1]])
try:
self.__log_tool.debug([u'Creating database file %s/[%s]', db_dir, self.__db_file])
open(db_path, u'w').close()
except Exception as Exc:
self.__log_tool.crit([u'[%s], exit...', Exc.args[1]])
#--------------------------------------------------------------------------------------
#-------------- Create necessary tables. ----------------------------------------------
#------ Cpu Temp. -----------------------------
### Calculate number of sensors ###
db_string = { u'Id' : u'INTEGER PRIMARY KEY', u'Time' : u'TEXT' }
# TODO: test about sensorN lower and upper case.
sensors_pattern = re.compile(u'sensor\d')
for item in self.__conf.items(u'CpuTemp'):
if sensors_pattern.search(item[0]):
db_string[item[0]] = u'REAL'
self.create_table( u'CpuTemp', **db_string )
#------ Load Average. -------------------------
self.create_table( u'LoadAverage',
Id = u'INTEGER PRIMARY KEY',
Load_1min = u'REAL',
Load_5min = u'REAL',
Load_15min = u'REAL',
Time = u'TEXT'
)
#---- Network Interfaces. ---------------------
self.create_table( u'Network_Interfaces',
Id = u'INTEGER PRIMARY KEY',
Name = u'TEXT'
)
#------ Network Statistic. --------------------
self.create_table( u'Network_Statistic',
Id = u'INTEGER PRIMARY KEY',
InterfaceId = u'INTEGER',
rx_bytes = u'INTEGER',
tx_bytes = u'INTEGER',
rx_packets = u'INTEGER',
tx_packets = u'INTEGER',
rx_errors = u'INTEGER',
tx_errors = u'INTEGER',
Time = u'TEXT'
)
#--------------------------------------------------------------------------------------
@property
def db_path(self):
return sqlite3.connect(self.__db_path)
def create_table(self, tab_name, **fields):
self.__log_tool.debug([ u'Create table [%s]', tab_name ])
#----------- Create string with fields and types. ------------
field_string = '('
for (Field, Type) in fields.iteritems():
if len(field_string) > 1:
field_string += ', '
field_string += Field + ' ' + Type
field_string += ')'
#-------------------------------------------------------------
cmd = u'CREATE TABLE ' + tab_name + u' ' + field_string
self.__log_tool.debug( [ u'[%s]', cmd ] )
with self.db_path as conn:
cur = conn.cursor()
cur.execute(cmd)
def add_column(self, tab_name, col_name, col_type):
with self.db_path as conn:
cur = conn.cursor()
cur.execute('')
def insert_into(self, tab_name, fields, values):
with self.db_path as conn:
cur = conn.cursor()
cmd = u'INSERT INTO ' + tab_name + fields + u' VALUES' + values
self.__log_tool.debug(['%s', cmd])
try:
cur.execute(cmd)
except sqlite3.OperationalError as Exc:
self.__log_tool.crit([u'%s. Use --initdb for recreate database.', Exc.message])
def select_data_where( self, tab_name, col, where_col, where_pattern):
with self.db_path as conn:
cur = conn.cursor()
cmd = u'SELECT ' + col + u' FROM ' + tab_name + u' WHERE ' + where_col + u' == ' + '"' + where_pattern + '"'
self.__log_tool.debug(['%s', cmd])
return cur.execute(cmd)
def select( self, tab_name, cols ):
cmd = u'SELECT ' + cols + u' FROM ' + tab_name + u' ORDER BY Id DESC LIMIT ' + unicode(self.__minuts_limit)
self.__log_tool.debug(['%s', cmd])
with self.db_path as conn:
cur = conn.cursor()
cur.execute(cmd)
while True:
data = cur.fetchone()
if data == None:
break
yield data
@property
def minuts_limit(self):
return self.__minuts_limit
def select_for_interface(self, cols, interface_name ):
cmd = u'SELECT ' + cols + u' FROM ' + u'Network_Statistic' + u' JOIN Network_Interfaces ON Network_Statistic.InterfaceId == Network_Interfaces.Id WHERE Network_Interfaces.Name == ' + u'"' + interface_name + u'"' + u' ORDER BY Network_Statistic.Id DESC LIMIT ' + unicode(self.__minuts_limit)
self.__log_tool.debug(['%s', cmd])
with self.db_path as conn:
cur = conn.cursor()
cur.execute(cmd)
while True:
data = cur.fetchone()
if data == None:
break
yield data
#if __name__ == u'__main__':
# print u'Test'
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,903 | kirimaks/data_plot | refs/heads/master | /task_types/regular.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import re
import tools
class CpuTemp(object):
''' Super class for everything. Many things inherit from here. '''
# Reading and preparing data for CpuTemp task.
@staticmethod
def get_data(config_file, log_tool):
''' Reading data from file. '''
#---------- Create list of sensors. -------------------------------------
sensors = {}
sensors_pattern = re.compile(u'sensor\d')
for section in list(config_file.items(u'CpuTemp')):
if sensors_pattern.search(section[0]):
sensors[section[0]] = section[1]
#------------------------------------------------------------------------
#----------- Reading for every sensors and store data. ------------------
temp_data = {}
for (sensor_name, sensor_file) in sensors.iteritems():
try:
with open(sensor_file) as sf:
data = list(sf.readline().rstrip())
data.insert(2,u'.')
temp_data[sensor_name] = ''.join(data)
except Exception as Exc:
log_tool.crit([u'[%s] %s, exit...', Exc.filename, Exc.args[1] ])
#------------------------------------------------------------------------
return temp_data
@staticmethod
def write_data(tab_name, cur_data, db_tool, log_tool): # Inherided in LoadAverage.
''' Write data to database. '''
#----------- Calculate string for insertion -----------------
fields = u'('
values = u'('
for k in cur_data.keys():
if len(fields) > 1:
fields += u', '
if len(values) > 1:
values += u', '
fields += k
#values += cur_data[k]
values += unicode(cur_data[k])
fields += u', Time'
values += u", time('now', 'localtime')"
fields += u')'
values += u')'
#------------------------------------------------------------
db_tool.insert_into( tab_name, fields, values )
@staticmethod
def read_db_data(db_tool, conf):
''' Reading data from database, and create dict with data (For CpuTemp only). '''
#--- Create string with fields and prepare data dict. ----------
cpu_temp_data = {u'Time' : [] }
fields = u'Time'
sensors_pattern = re.compile(u'sensor\d')
for section in list(conf.items(u'CpuTemp')):
if sensors_pattern.search(section[0]):
fields += ','
fields += section[0]
cpu_temp_data[section[0]] = []
### Generate list of fields ###
fields_list = fields.rsplit(u',')
#---------------------------------------------------------------
### Reading by one row. ###
for row in db_tool.select(u'CpuTemp', fields + u',Id'):
n = 0
for field in fields_list:
cpu_temp_data[field].insert(0,row[n]) # Add data for particular dict value.
n += 1
return cpu_temp_data
class LoadAverage(CpuTemp):
@staticmethod
def get_data(config_file, log_tool):
''' Read data from file. '''
load_avg_data = {}
try:
with open(config_file.get( u'LoadAverage', u'load_file' )) as LoadFile:
data = LoadFile.readline()
except Exception as Exc:
log_tool.crit([u'[%s] %s, exit...', Exc.filename, Exc.args[1] ])
data = data.split(' ')
load_avg_data[u'Load_1min'] = data[0]
load_avg_data[u'Load_5min'] = data[1]
load_avg_data[u'Load_15min'] = data[2]
return load_avg_data
@staticmethod
def read_db_data(db_tool, conf):
load_avg_data = { u'Time' : [], u'Load_1min' : [], u'Load_5min' : [], u'Load_15min' : [] }
fields = u'Time,Load_1min,Load_5min,Load_15min'
fields_list = fields.rsplit(u',')
### Reading by one row. ###
for row in db_tool.select(u'LoadAverage', fields + u',Id'):
n = 0
for field in fields_list:
load_avg_data[field].insert(0, row[n]) # Add data for particular dict value.
n += 1
return load_avg_data
class Regular_Task(object):
# Regular task, just read data from one file.
known_regular_tasks = [u'CpuTemp', u'LoadAverage']
def __init__(self, task_name, config, log_tool, db_tool):
if task_name not in Regular_Task.known_regular_tasks:
log_tool.crit([u'Unknown task: [%s].', task_name])
self.__task_name = task_name
self.__config = config
self.__log_tool = log_tool
self.__db_tool = db_tool
#----------------- Methods for store data to database. ----------------------------------------------------------
def reading_data_from_file(self):
self.__log_tool.debug([u'Reading data for [%s] task.', self.__task_name])
if self.__task_name == u'CpuTemp':
self.__cur_data = CpuTemp.get_data(self.__config, self.__log_tool) # Hold the dictionary with data.
elif self.__task_name == u'LoadAverage':
self.__cur_data = LoadAverage.get_data(self.__config, self.__log_tool)
def write_data_to_db(self):
self.__log_tool.debug([u'Write data for [%s] task.', self.__task_name])
if self.__task_name == u'CpuTemp':
CpuTemp.write_data( u'CpuTemp', self.__cur_data, self.__db_tool, self.__log_tool )
elif self.__task_name == u'LoadAverage':
LoadAverage.write_data( u'LoadAverage', self.__cur_data, self.__db_tool, self.__log_tool )
#----------------------------------------------------------------------------------------------------------------
#----------------- Methods for retrive data from database. ------------------------------------------------------
def retrive_data(self):
self.__log_tool.debug( [u'Retrive data for [%s] task.', self.__task_name] )
if self.__task_name == u'CpuTemp':
self.__cur_data = CpuTemp.read_db_data( self.__db_tool, self.__config )
elif self.__task_name == u'LoadAverage':
self.__cur_data = LoadAverage.read_db_data( self.__db_tool, self.__config )
def draw_data(self):
self.__log_tool.debug( [u'Drawing data for [%s] task.', self.__task_name] )
minuts = self.__db_tool.minuts_limit
if self.__task_name == u'CpuTemp':
figure = tools.Drawing( u'CpuTemp', self.__cur_data, self.__config, self.__log_tool, minuts )
figure.create_graph()
elif self.__task_name == u'LoadAverage':
figure = tools.Drawing( u'LoadAverage', self.__cur_data, self.__config, self.__log_tool, minuts )
figure.create_graph()
#----------------------------------------------------------------------------------------------------------------
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,904 | kirimaks/data_plot | refs/heads/master | /tools/__init__.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from db_tool import Db_tool
from log_tool import Log_tool
from drawing import Drawing
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,905 | kirimaks/data_plot | refs/heads/master | /tools/log_tool.py | import sys
import logging
class Log_tool(object):
#__defalut_debug_level = 20
def __init__(self, debug_mode):
debug_level = 10 if debug_mode else 20 # If debug_lever is fase, set 20 to debug mode.
logging.basicConfig( level=debug_level, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S' )
if debug_mode:
self.debug([u'Starting with debug mode.'])
def info(self, args):
logging.info(*args)
def debug(self, args):
logging.debug(*args)
def error(self, args):
logging.error(*args)
def crit(self, args, exit_code = 1):
logging.critical(*args)
sys.exit(exit_code)
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,906 | kirimaks/data_plot | refs/heads/master | /draw_data.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import time
time_begin = time.time()
import tools
import argparse
import task_types
import ConfigParser
def arguments_analysis():
args = argparse.ArgumentParser(description=u'Reading database and create graphics.')
args.add_argument(u'-v', '--version', action=u'version', version='%(prog)s 2.0' )
args.add_argument(u'-d', dest=u'debug_mode', action=u'store_true', help=u'Debug mode (default mode is INFO).' )
args.add_argument(u'-c', dest=u'config_file', metavar=u'config.cfg', required=True, help=u'Configuration file.' )
args.add_argument(u'-m', dest=u'minuts_limit', metavar=u'M', default=10, help=u'Minuts limit (default 10).' )
cmdargs = args.parse_args()
return args.parse_args()
def config_analysis(config_file):
config = ConfigParser.RawConfigParser()
config.read(config_file)
return config
if __name__ == '__main__':
#------------- Preparations. ----------------------------------------------------------
cmdargs = arguments_analysis()
conf = config_analysis(cmdargs.config_file)
log_tool = tools.Log_tool(cmdargs.debug_mode)
db_tool = tools.Db_tool(db_dir = conf.get(u'Basic', u'workdir'), log_tool = log_tool, config_file = conf, min_limit = cmdargs.minuts_limit )
#--------------------------------------------------------------------------------------
for cur_task in conf.sections()[1:]:
log_tool.debug([u'Processing for [%s]', cur_task])
### Processing for network interface. ###
if cur_task in conf.get(u'Basic', u'network_interfaces'):
net_task = task_types.Network_Task( cur_task, conf, log_tool, db_tool )
net_task.retrive_data()
net_task.draw_data()
### Processing for regular file. ###
else:
reg_task = task_types.Regular_Task( cur_task, conf, log_tool, db_tool )
reg_task.retrive_data()
reg_task.draw_data()
log_tool.debug(['(%s) execution time: [%s]', __file__, time.time() - time_begin])
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,907 | kirimaks/data_plot | refs/heads/master | /drawing.py | import tools
import matplotlib
matplotlib.use(u'Agg')
import matplotlib.pyplot as plt
import os.path
import config
def prepare_time(t):
time = []
first_elem = 0
last_elem = len(t)-1
mid_elem = last_elem/2
first_part = mid_elem/2
last_part = mid_elem + first_part
time.append(t[first_elem][11:-3])
time.append(t[first_part][11:-3])
time.append(t[mid_elem][11:-3])
time.append(t[last_part][11:-3])
time.append(t[last_elem][11:-3])
return time
def draw_data(data, task, minuts, tab_col = None ):
save_destination = None
# Lenght of graph.
#x = [ t for t in range(minuts) ]
#x = None
#if len(data[u'Time']) < minuts:
# x = [ t for t in range(len(data[u'Time'])) ]
#else:
# x = [ t for t in range(minuts) ]
if len(data[u'Time']) < minuts:
minuts = len(data[u'Time'])
x = [ t for t in range(minuts) ]
#print len(data[u'Time'])
#print minuts
# Calculate time.
time = prepare_time(data[u'Time'])
ax = plt.gca()
time_range = range(-1, minuts, minuts/4)
time_range[0] = 0
ax.set_xticks(time_range)
ax.set_xticklabels(time)
titleis = task[u'title']
if task[u'title'] == u'cpu_temp':
save_destination = os.path.join(config.workdir, task[u'graph_file'])
ylabelis = u'Temperature C'
plt.plot(x, data[u'f1'], u'k', label=u'Temperature')
plt.fill_between(x, data[u'f1'], 0, color=u'red', alpha='0.8')
plt.yticks(range(5,101, 5))
elif task[u'title'] == u'load_average':
save_destination = os.path.join(config.workdir, task[u'graph_file'])
ylabelis = u'Load Average'
plt.plot(x, data[u'f1'], u'r', label=u'1 minut')
plt.plot(x, data[u'f2'], u'b', label=u'5 minuts')
plt.plot(x, data[u'f3'], u'g', label=u'15 minuts')
# Fill for 15 minuts.
plt.fill_between(x, data[u'f3'], 0, color=u'green', alpha='0.8')
elif task[u'title'] == u'network_statistic':
save_destination = os.path.join(config.workdir, tab_col + '_io.png')
titleis = task[u'title'] + ': [' + tab_col + ']'
ylabelis = u'kB/s: [%s]' % tab_col
plt.plot(x, data[u'f1'], u'g-', label=u'RX')
plt.plot(x, data[u'f2'], u'b-', label=u'TX')
# Fill rx.
plt.fill_between(x, data[u'f1'], 0, color=u'green', alpha='0.6')
plt.fill_between(x, data[u'f2'], 0, color=u'blue', alpha='0.4')
#plt.title(task[u'title'])
plt.title(titleis)
plt.grid(True)
plt.legend(loc=u'upper left', shadow=True)
plt.xlabel(u'[%d] : minuts' % minuts)
plt.ylabel(ylabelis)
plt.savefig(save_destination, dpi=70)
plt.close()
tools.log.debug(u'Drawing data for [%s] to (%s)\n', task[u'title'], save_destination)
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,908 | kirimaks/data_plot | refs/heads/master | /tools/tools.py | #import logging as log
#log.basicConfig( level=log.DEBUG, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S' )
#log.basicConfig( level=config.debug_level, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%I:%M:%S' )
import time
time_begin = time.time()
#if __name__ == '__main__':
# log.debug('Hello from (%s)', __file__)
#def create_table(cur, tab_name):
# log.debug('Create table [%s]', tab_name)
# cur.execute('CREATE TABLE ' + tab_name + '(Id INTEGER PRIMARY KEY, Time TEXT)')
#def add_column(cur, tab_name, col_name, col_type):
# log.debug('Add column [%s] to table [%s]', col_name, tab_name)
# cur.execute('ALTER TABLE ' + tab_name + ' ADD COLUMN ' + col_name + ' ' + col_type)
#def insert_into(cur, tab_name, col_name, data):
# log.debug('Insert into table [%s], column [%s] - (%s)', tab_name, col_name, data)
# cur.execute('INSERT INTO ' + tab_name + '(Time,' + col_name + ') ' + 'VALUES(datetime("now", "localtime"), ' + data + ')')
#def select_data(cur, tab_name, cols, rows_limit):
# log.debug('Select %s from %s with limit: %d', cols, tab_name, rows_limit)
# last_col = cols.split(',')[-1]
# cur.execute('SELECT ' + cols + ' FROM ' + tab_name + ' WHERE ' + last_col + ' IS NOT Null' + ' ORDER BY ' + 'Id ' + 'DESC LIMIT ' + unicode(rows_limit))
#log.info('(%s) execution time: [%s]\n', __file__, time.time() - time_begin)
if __name__ == u'__main__':
log_tool = Log_tool(10)
log_tool.info(['(%s) execution time: [%s]\n', __file__, time.time() - time_begin])
log_tool.debug(['Select %s from %s with limit: %s', u'cols', u'tab_name', u'rows_limit'])
| {"/grab_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/task_types/network.py": ["/tools/__init__.py"], "/task_types/regular.py": ["/tools/__init__.py"], "/tools/__init__.py": ["/drawing.py"], "/draw_data.py": ["/tools/__init__.py", "/task_types/__init__.py"], "/drawing.py": ["/tools/__init__.py"]} |
77,922 | davidklaing/py_524 | refs/heads/master | /setup.py | from distutils.core import setup
setup(
name='py_524',
version='0.1py_524',
packages=['py_524'],
license='Creative Commons Attribution-Noncommercial-Share Alike license',
long_description=open('README.txt').read(), requires=['pytest']
)
| {"/py_524/tests/test_py_524.py": ["/py_524/__init__.py"]} |
77,923 | davidklaing/py_524 | refs/heads/master | /py_524/tests/test_py_524.py | import pytest
from py_524 import utils
def test_sd_math():
assert utils.standard_deviation([0, 1]) == 0.7071067811865476
def test_sd_at_least_length_three():
assert utils.standard_deviation([0, 1, 2]) == 1
def test_sd_neg_numbers():
assert utils.standard_deviation([-1, 0, 1]) == 1
def test_sd_same_element():
assert utils.standard_deviation([100, 100, 100]) == 0
def test_sd_too_small():
with pytest.raises(ZeroDivisionError):
utils.standard_deviation([0])
def test_sd_is_list():
with pytest.raises(TypeError):
utils.standard_deviation(0)
def test_sd_null():
with pytest.raises(TypeError):
utils.standard_deviation()
def test_sd_string_convert():
with pytest.raises(TypeError):
utils.standard_deviation(["0", "1"])
def test_se_math():
assert utils.standard_error([0, 1]) == 0.5
def test_se_at_least_length_three():
assert utils.standard_error([0, 1, 2]) == 0.5773502691896258
def test_se_neg_numbers():
assert utils.standard_error([-1, 0, 1]) == 0.5773502691896258
def test_se_same_element():
assert utils.standard_error([100, 100, 100]) == 0
def test_se_too_small():
with pytest.raises(ZeroDivisionError):
utils.standard_error([0])
def test_se_is_list():
with pytest.raises(TypeError):
utils.standard_error(0)
def test_se_null():
with pytest.raises(TypeError):
utils.standard_error()
def test_se_string_convert():
with pytest.raises(TypeError):
utils.standard_error(["0", "1"])
| {"/py_524/tests/test_py_524.py": ["/py_524/__init__.py"]} |
77,924 | davidklaing/py_524 | refs/heads/master | /py_524/__init__.py | from py_524 import utils
| {"/py_524/tests/test_py_524.py": ["/py_524/__init__.py"]} |
77,925 | davidklaing/py_524 | refs/heads/master | /py_524/utils.py | def standard_deviation(x):
"""
Calculates the standard deviation.
:param x: an array of numbers
:return: The standard deviation.
>>> standard_error([1, 2, 3])
1
"""
n = len(x)
mean = sum(x) / n
ssq = sum((x_i - mean) ** 2 for x_i in x)
standard_dev = (ssq / (n - 1)) ** 0.5
return standard_dev
def standard_error(x):
"""
Calculates the standard error.
:param x: an array of numbers
:return: The standard error.
>>> standard_error([1, 2, 3])
0.5773502691896257
"""
return standard_deviation(x) / len(x) ** 0.5
| {"/py_524/tests/test_py_524.py": ["/py_524/__init__.py"]} |
78,035 | Briles/gruvbox | refs/heads/master | /main.py | #!/usr/bin/env python
# coding: utf-8
from .src import *
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,036 | Briles/gruvbox | refs/heads/master | /src/documentation.py | #!/usr/bin/env python
# coding: utf-8
import sublime
import sublime_plugin
import webbrowser
PACKAGE_NAME = 'gruvbox'
def status_msg(msg):
sublime.status_message(PACKAGE_NAME + ': ' + msg)
def plugin_loaded():
from package_control import events
if events.install(PACKAGE_NAME):
status_msg('Installed %s' % events.install(PACKAGE_NAME))
elif events.post_upgrade(PACKAGE_NAME):
status_msg('Upgraded to %s' % events.post_upgrade(PACKAGE_NAME))
def plugin_unloaded():
from package_control import events
if events.pre_upgrade(PACKAGE_NAME):
status_msg('Upgrading from %s' % events.pre_upgrade(PACKAGE_NAME))
elif events.remove(PACKAGE_NAME):
status_msg('Removing %s' % events.remove(PACKAGE_NAME))
class GruvboxChangelog(sublime_plugin.TextCommand):
def run(self, edit):
import mdpopups
md = sublime.load_resource('Packages/' + PACKAGE_NAME + '/CHANGELOG.md')
v = sublime.active_window().new_file()
v.set_name(PACKAGE_NAME + ': CHANGELOG')
v.settings().set('gutter', False)
mdpopups.add_phantom(v, 'changelog', sublime.Region(0), md, sublime.LAYOUT_INLINE, wrapper_class='gruvbox-docs', on_navigate=self.on_navigate)
v.set_read_only(True)
v.set_scratch(True)
def is_visible(self):
try:
import mdpopups
except Exception as e:
return False
return (mdpopups.version() >= (1, 7, 3)) and (int(sublime.version()) >= 3118)
def on_navigate(self, href):
webbrowser.open_new_tab(href)
class GruvboxReadme(sublime_plugin.TextCommand):
def run(self, edit):
webbrowser.open_new_tab('https://github.com/Briles/gruvbox#readme')
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,037 | Briles/gruvbox | refs/heads/master | /src/support.py | #!/usr/bin/env python
# coding: utf-8
import sublime
import sublime_plugin
import json
import webbrowser
PACKAGE_NAME = 'gruvbox'
PACKAGE_VERSION = None
def format_version(module, attr, call=False):
try:
if call:
version = getattr(module, attr)()
else:
version = getattr(module, attr)
except Exception as e:
print(e)
version = 'Version could not be acquired!'
if not isinstance(version, str):
version = '.'.join([str(x) for x in version])
return version
def get_support_info():
pc_settings = sublime.load_settings('Package Control.sublime-settings')
is_installed_by_pc = str(PACKAGE_NAME in set(pc_settings.get('installed_packages', [])))
info = {}
info['channel'] = sublime.channel()
info['version'] = sublime.version()
info['platform'] = sublime.platform()
info['arch'] = sublime.arch()
info['package_name'] = PACKAGE_NAME
info['package_version'] = PACKAGE_VERSION
info['pc_install'] = is_installed_by_pc
try:
import mdpopups
info['mdpopups_version'] = format_version(mdpopups, 'version', call=True)
except Exception:
info['mdpopups_version'] = 'Version could not be acquired!'
try:
import markdown
info['markdown_version'] = format_version(markdown, 'version')
except Exception:
info['markdown_version'] = 'Version could not be acquired!'
try:
import jinja2
info['jinja_version'] = format_version(jinja2, '__version__')
except Exception:
info['jinja_version'] = 'Version could not be acquired!'
try:
import pygments
info['pygments_version'] = format_version(pygments, '__version__')
except Exception:
info['pygments_version'] = 'Version could not be acquired!'
return '''%(package_name)s:\n\n* version: %(package_version)s\n* installed via Package Control: %(pc_install)s\n\nSublime Text:\n\n* channel: %(channel)s\n* version: %(version)s\n* platform: %(platform)s\n* architecture: %(arch)s\n\nDependency versions:\n\n* mdpopups: %(mdpopups_version)s\n* markdown: %(markdown_version)s\n* pygments: %(pygments_version)s\n* jinja2: %(jinja_version)s''' % info
def plugin_loaded():
pkg = json.loads(sublime.load_resource("Packages/" + PACKAGE_NAME + "/package.json"))
global PACKAGE_VERSION
PACKAGE_VERSION = pkg["version"]
class GruvboxIssues(sublime_plugin.TextCommand):
def run(self, edit):
if sublime.ok_cancel_dialog('Override current clipboard with support info and open browser to report issue?'):
sublime.set_clipboard(get_support_info())
webbrowser.open_new_tab('https://github.com/Briles/gruvbox/issues')
class GruvboxContributing(sublime_plugin.TextCommand):
def run(self, edit):
import mdpopups
md = sublime.load_resource('Packages/' + PACKAGE_NAME + '/CONTRIBUTING.md')
v = sublime.active_window().new_file()
v.set_name(PACKAGE_NAME + ': CONTRIBUTING')
v.settings().set('gutter', False)
mdpopups.add_phantom(v, 'contributing', sublime.Region(0), md, sublime.LAYOUT_INLINE, wrapper_class='gruvbox-docs', on_navigate=self.on_navigate)
v.set_read_only(True)
v.set_scratch(True)
def is_visible(self):
try:
import mdpopups
except Exception as e:
return False
return (mdpopups.version() >= (1, 7, 3)) and (int(sublime.version()) >= 3118)
def on_navigate(self, href):
webbrowser.open_new_tab(href)
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,038 | Briles/gruvbox | refs/heads/master | /src/gruvbox.py | import math
import sublime
import sublime_plugin
class GruvboxSelect(sublime_plugin.TextCommand):
def run(self, action):
color_schemes = sublime.find_resources("gruvbox*.sublime-color-scheme")
color_themes = sublime.find_resources("gruvbox.sublime-theme")
temp_schemes = []
self.themes = []
self.schemes = []
for scheme in color_schemes:
if 'Packages/gruvbox/' in scheme:
temp_schemes.append(scheme[17:-21])
for i in range(len(temp_schemes)):
if (i % 2) == 0:
self.schemes.insert(i + 1, temp_schemes[i])
else:
self.schemes.insert(i - 1, temp_schemes[i])
for theme in color_themes:
if 'Packages/gruvbox/' in theme:
self.themes.append(theme[17:])
self.show_panel()
def show_panel(self):
self.view.window().show_quick_panel(self.schemes, self.on_done, on_highlight=self.on_highlighted)
def on_done(self, index):
self.set_scheme('Packages/gruvbox/' + self.schemes[index] + '.sublime-color-scheme')
self.set_theme(self.themes[0])
self.save_settings(self.schemes[index])
def on_highlighted(self, index):
self.set_scheme('Packages/gruvbox/' + self.schemes[index] + '.sublime-color-scheme')
self.set_theme(self.themes[0])
def set_scheme(self, scheme):
self.get_settings().set('color_scheme', scheme)
def set_theme(self, theme):
self.get_settings().set('theme', theme)
def get_settings(self):
return sublime.load_settings('Preferences.sublime-settings')
def save_settings(self, theme):
sublime.save_settings('Preferences.sublime-settings')
sublime.status_message('gruvbox: ' + theme)
print('')
print('[gruvbox] ' + theme)
print('')
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,039 | Briles/gruvbox | refs/heads/master | /color_scheme_tests/dark_medium/color_scheme_test.py | # COLOR SCHEME TEST "gruvbox/gruvbox (Dark) (Medium).sublime-color-scheme" "Python" # flake8: noqa
# This indented comment is to the preceding whitespace.
# ^ fg=#928374 fs=italic
# ^^^^ fg=#928374 fs=italic
# ^^^^^^^^ fg=#928374 fs=italic
# ^^^^^^^ fg=#928374 fs=italic
# ^^ fg=#928374 fs=italic
# ^^ fg=#928374 fs=italic
# ^^^ fg=#928374 fs=italic
# ^^^^^^^^^ fg=#928374 fs=italic
# ^^^^^^^^^^^ fg=#928374 fs=italic
import os
# ^^^^ fg=#fb4934 fs=
# ^^ fg=#ebdbb2 fs=
import path from os
# ^^^^ fg=#fb4934 fs=
# ^^^^ fg=#ebdbb2 fs=
# ^^^^ fg=#ebdbb2 fs=
# ^^ fg=#ebdbb2 fs=
__all__
# ^^^^^ fg=#fabd2f fs=
__file__
# ^^^^^^ fg=#fabd2f fs=
__missing__
# ^^^^^^^^^ fg=#8ec07c fs=
__bool__
# ^^^^^^ fg=#8ec07c fs=
__debug__
# ^^^^^^^ fg=#d3869b fs=
abc = 'x'
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
BC = 'x'
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
x = ABC
# ^ fg=#8ec07c fs=
# ^^^ fg=#fabd2f fs=
x = "_\x00_\xaa_\'_%s_"
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^^^ fg=#fb4934 fs=
# ^ fg=#b8bb26 fs=
# ^^^^ fg=#fb4934 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#fb4934 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#8ec07c fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
x = '_\m_\\m_'
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 bg=#fb4934 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#fb4934 fs=
# ^^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
x = b'x'
# ^ fg=#8ec07c fs=
# ^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
'ab'.upper()
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
# ^^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
x = '|'.join(sorted(x))
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
# ^^^^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^^^^^^ fg=#8ec07c fs=
# ^^^^ fg=#ebdbb2 fs=
x = f"{x}"
# ^ fg=#8ec07c fs=
# ^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#83a598 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
def x():
# ^ fg=#8ec07c fs=
# ^ fg=#b8bb26 fs=
# ^^^ fg=#ebdbb2 fs=
pass
# ^^^^ fg=#fb4934 fs=
def x():
"""x"""
# ^^^^^^^ fg=#928374 fs=italic
pass
def x():
"""
# ^^^ fg=#928374 fs=italic
x
# ^ fg=#928374 fs=italic
"""
# ^^^ fg=#928374 fs=italic
# pass
def x():
# ^ fg=#8ec07c fs=
# ^ fg=#b8bb26 fs=
# ^^^ fg=#ebdbb2 fs=
abc = 'x'
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
call(x, 'y', True, False)
# ^^^^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
# ^^^^ fg=#d3869b fs=
# ^ fg=#ebdbb2 fs=
# ^^^^^ fg=#d3869b fs=
# ^ fg=#ebdbb2 fs=
call(x=y)
# ^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
if isinstance(var, list):
# ^^ fg=#fb4934 fs=
# ^^^^^^^^^^ fg=#8ec07c fs=
# ^^^^^ fg=#ebdbb2 fs=
# ^^^^ fg=#fabd2f fs=
# ^^ fg=#ebdbb2 fs=
arr = []
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
arr.append('x')
# ^^^^ fg=#ebdbb2 fs=
# ^^^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
arr.sort()
# ^^^^ fg=#ebdbb2 fs=
# ^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
if len(x):
# ^^ fg=#fb4934 fs=
# ^^^ fg=#8ec07c fs=
# ^^^^ fg=#ebdbb2 fs=
print('Hi')
# ^^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
fmt = 'x={}'.format(s['y'])
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^^ fg=#b8bb26 fs=
# ^^^^ fg=#ebdbb2 fs=
# ^^^^^^ fg=#8ec07c fs=
# ^^^^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^^ fg=#ebdbb2 fs=
x = u'x%s' % y
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
x = "x {y} z".format(y=z)
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
# ^^^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
x = re.match('^.+\\.x$')
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^^^^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^^^ fg=#b8bb26 fs=
# ^^ fg=#fb4934 fs=
# ^^^ fg=#b8bb26 fs=
# ^^ fg=#ebdbb2 fs=
@requires_x
# ^^^^^^^^^ fg=#83a598 fs=
def f_name(arg1='', arg2=0):
# ^ fg=#8ec07c fs=
# ^^^^^^ fg=#b8bb26 fs=
# ^^^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^ fg=#d3869b fs=
# ^^ fg=#ebdbb2 fs=
if a > b: # x
# ^^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#928374 fs=italic
# ^ fg=#928374 fs=italic
print 'a\'b'
# ^^^^^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^^ fg=#fb4934 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
abc = d[0]
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#d3869b fs=
# ^ fg=#ebdbb2 fs=
abc.d(e)
# ^^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
return None
# ^^^^^^ fg=#fb4934 fs=
# ^^^^ fg=#d3869b fs=
class X():
# ^^^ fg=#fb4934 fs=
# ^ fg=#fabd2f fs=
# ^^^ fg=#ebdbb2 fs=
pass
# ^^^^ fg=#fb4934 fs=
class X(Y):
# ^^^ fg=#fb4934 fs=
# ^ fg=#fabd2f fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#fabd2f fs=
# ^^ fg=#ebdbb2 fs=
def __init__(self):
# ^^^ fg=#8ec07c fs=
# ^^^^^^^^ fg=#8ec07c fs=
# ^^^^^^^ fg=#ebdbb2 fs=
self.x = 123
# ^^^^ fg=#d3869b fs=
# ^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^^ fg=#d3869b fs=
self.x()
# ^^^^ fg=#d3869b fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
self.x.y()
# ^^^^ fg=#d3869b fs=
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
abc(y)
# ^^^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
def __str__(self)
# ^^^ fg=#8ec07c fs=
# ^^^^^^^ fg=#8ec07c fs=
# ^^^^^^ fg=#ebdbb2 fs=
return 'x'
# ^^^^^^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^ fg=#b8bb26 fs=
# ^ fg=#ebdbb2 fs=
def z(self, a, b):
# ^^^ fg=#8ec07c fs=
# ^ fg=#b8bb26 fs=
# ^^^^^^ fg=#ebdbb2 fs=
# ^^ fg=#ebdbb2 fs=
# ^^^ fg=#ebdbb2 fs=
if a == b:
# ^^ fg=#fb4934 fs=
# ^ fg=#ebdbb2 fs=
# ^^ fg=#8ec07c fs=
# ^^ fg=#ebdbb2 fs=
if fcall(a, b):
# ^^ fg=#fb4934 fs=
# ^^^^^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^^^ fg=#ebdbb2 fs=
return True
# ^^^^^^ fg=#fb4934 fs=
# ^^^^ fg=#d3869b fs=
return None
# ^^^^^^ fg=#fb4934 fs=
# ^^^^ fg=#d3869b fs=
@zyx
# ^ fg=#ebdbb2 fs=
# ^^^ fg=#83a598 fs=
def x(self):
pass
# ^^^^ fg=#fb4934 fs=
>>> msg = '''interpreter
# ^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^ fg=#8ec07c fs=
# ^^^ fg=#ebdbb2 fs=
# ^^^^^^^^^^^ fg=#b8bb26 fs=
... prompt'''
# ^ fg=#b8bb26 fs=
# ^^^^^^ fg=#b8bb26 fs=
# ^^^ fg=#ebdbb2 fs=
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,040 | Briles/gruvbox | refs/heads/master | /src/__init__.py | #!/usr/bin/env python
# coding: utf-8
from .documentation import *
from .support import *
from .gruvbox import *
| {"/main.py": ["/src/__init__.py"], "/src/__init__.py": ["/src/documentation.py", "/src/support.py", "/src/gruvbox.py"]} |
78,045 | nishntr/django-react-SA-app | refs/heads/main | /backend/django/sentiment/apps.py | from django.apps import AppConfig
from tensorflow.keras import models
from tensorflow.keras.models import model_from_json
from keras_bert import get_custom_objects
import pandas as pd
import pickle
import ktrain
class SentimentConfig(AppConfig):
name = 'sentiment'
json_file = open("sentiment/model/model.json",'r')
features = pickle.load(open('sentiment/model/tf_model.preproc', 'rb'))
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json,custom_objects=get_custom_objects())
loaded_model.load_weights("sentiment/model/model.h5")
| {"/backend/django/sentiment/views.py": ["/backend/django/sentiment/apps.py"], "/backend/django/sentiment/urls.py": ["/backend/django/sentiment/views.py"]} |
78,046 | nishntr/django-react-SA-app | refs/heads/main | /backend/django/sentiment/views.py | from django.shortcuts import render
from rest_framework import status
from rest_framework.decorators import api_view
from rest_framework.response import Response
from rest_framework.views import APIView
from .apps import SentimentConfig
class Sentiment_View(APIView):
def post(self,request,format=None):
data = request.data
model = SentimentConfig.loaded_model
features = SentimentConfig.features
text = data['0']
y = model.predict(features.preprocess([text]))
if y[0][0] > y[0][1]:
res = {"res":"Negative"}
else:
res = {"res":"Positive"}
return Response(res,status=200) | {"/backend/django/sentiment/views.py": ["/backend/django/sentiment/apps.py"], "/backend/django/sentiment/urls.py": ["/backend/django/sentiment/views.py"]} |
78,047 | nishntr/django-react-SA-app | refs/heads/main | /backend/django/sentiment/urls.py | from django.urls import path
from .views import Sentiment_View
urlpatterns = [
path('sentiment/',Sentiment_View.as_view(),name='sentiment')
] | {"/backend/django/sentiment/views.py": ["/backend/django/sentiment/apps.py"], "/backend/django/sentiment/urls.py": ["/backend/django/sentiment/views.py"]} |
78,048 | Code-Institute-Submissions/carprowler | refs/heads/master | /cars/models.py | from django.db import models
import datetime
from django.utils import timezone
class Manufacturer(models.Model):
name = models.CharField(max_length=100)
def __str__(self):
return self.name
class Car(models.Model):
manufacturer = models.ForeignKey(Manufacturer, on_delete=models.CASCADE)
model = models.CharField(max_length=200)
year = models.IntegerField(default=1990)
price = models.IntegerField(default=0)
def __str__(self):
return self.model
| {"/cars/views.py": ["/cars/models.py"]} |
78,049 | Code-Institute-Submissions/carprowler | refs/heads/master | /carprowler/settings.py | import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '1$(46qw^uc2q&c)gad(*4^y)a8g2^dbr$%)nlvyf3jygfbv70('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
# - The Django admin system
'django.contrib.admin',
# - The authentication system
'django.contrib.auth',
# - Framework for content types
'django.contrib.contenttypes',
# - Session Framework
'django.contrib.sessions',
# - Message Framework
'django.contrib.messages',
# - Manages static files
'django.contrib.staticfiles',
'cars.apps.CarsConfig',
'signup.apps.SignupConfig',
]
# 6 We can add data to the DB using the Python shell
# - python3 manage.py shell
# - Import Models : from cars.models import Question, Choice
# - Display Questions : Question.objects.all()
# - Create a Question
# - from django.utils import timezone
# - q = Question(question_text="What's New?", pub_date=timezone.now())
# - Save to the DB : q.save()
# - Get the questions id : q.id
# - Get the questions text : q.question_text
# - Get the pub date : q.pub_date
# - Change the question : q.question_text = "What's Up?"
# - Save the change : q.save()
# - Display Questions : Question.objects.all()
# 6 Change cars/models.py to provide more info on question and choice
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'carprowler.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'carprowler.wsgi.application'
# 4 Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
# 4 We'll use the default database of SQLite3
# - You can use other DBs, but must add USER, PASSWORD and HOST
# - django.db.backends.mysql
# - django.db.backends.postgresql
# - django.db.backends.oracle
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
# - Change the time zone to yours using
# - https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
TIME_ZONE = 'America/New_York'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
# - Add a path for static files
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_STORAGE = 'whitenoise.django.GzipManifestStaticFilesStorage'
| {"/cars/views.py": ["/cars/models.py"]} |
78,050 | Code-Institute-Submissions/carprowler | refs/heads/master | /cars/views.py | # 1 Create the cars app inside our project
# 1 python3 manage.py startapp cars
# 1 You can have multiple apps in your project
# 1 Now we will create a view
from django.http import HttpResponse
from .models import Car
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
from django.shortcuts import render, redirect, get_object_or_404
def index(request):
all_cars_list = Car.objects.all()
context = {
'all_cars_list': all_cars_list,
}
return render(request, 'cars/index.html', context)
def detail(request, car_id):
car = get_object_or_404(Car, pk=car_id)
return render(request, 'cars/detail.html', {'car': car})
| {"/cars/views.py": ["/cars/models.py"]} |
78,052 | gj686/finalmaster | refs/heads/master | /main.py | #Author Andrea Sessa, 2016
import os, logging
from telegram.ext import Updater, CommandHandler, Job
from twitter import *
from user import *
INTERVAL = 1 #15 mins
# Telegram TOKEN
TOKEN = '695404392:AAHt5Th2xSiD-lGkiN4tOA9IcbN0xoicWqg'
# Twitter access data
# Consumer Key (API Key)
CONS_KEY = 'eB1aTOyqYKtvVQc2iVlyg1CL3'
# Consumer Secret (API Secret)
CONS_SECRET = 'hqEkZaJk0yittXWYkWa2Hx72YophngEL6z7nPWBFQGfEBuwxlv'
# Access Token
ACCESS_TOKEN = '941030573128040448-oR3K81rCUTO54ZZOg5Z5WA3SoTrYhEq'
# Access Token Secret
ACCESS_TOKEN_SECRET = 'PLG99Rq2dGgfegADHzFQrJo40nCqD9ah5ygEHWgdKhiWI'
# Enable logging
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Monitored users
users = [User('atm_informa'), User('TRENORD_treVA')]
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update, job_queue):
chat_id = update.message.chat_id
bot.sendMessage(update.message.chat_id, text='Hi! Use /add [username] to monitor a new user')
if len(users) != 0:
bot.sendMessage(update.message.chat_id, text='Starting monitoring for: ')
for u in users:
bot.sendMessage(update.message.chat_id, text=u.name)
job = Job(getLastTweets, INTERVAL, repeat=True, context=chat_id)
job_queue.put(job)
# Add a new twitter user to the monitored user list
def add(bot, update, args):
chat_id = update.message.chat_id
users.append(User(args[1]))
def help_handler(bot, update):
chat_id = update.message.chat_id
bot.sendMessage(chat_id, text='Use /start to start(or restart) the bot')
bot.sendMessage(chat_id, text='Use /add [username] to start monitoring a new user')
bot.sendMessage(chat_id, text='Use /help to get some help :)')
def error(bot, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
def getLastTweets(bot, job):
# Log into twitter
t = Twitter(auth=OAuth(ACCESS_TOKEN, ACCESS_TOKEN_SECRET, CONS_KEY, CONS_SECRET))
for u in users:
tweets = list(reversed(t.statuses.user_timeline(screen_name=u.name)))
for tweet in tweets:
if not(tweet['id'] in u.last_tweets):
bot.sendMessage(job.context, text=tweet['text'])
u.last_tweets.append(tweet['id'])
def startTelegramBot():
updater = Updater(TOKEN)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start, pass_job_queue=True))
dp.add_handler(CommandHandler("add", add, pass_args=True))
dp.add_handler(CommandHandler("help", help_handler))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Block until the you presses Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
def main():
startTelegramBot()
if __name__ == '__main__':
main()
| {"/main.py": ["/user.py"]} |
78,053 | gj686/finalmaster | refs/heads/master | /user.py | #Author Andrea Sessa, 2016
from collections import deque
class User:
def __init__(self, name):
self.name = name
self.last_tweets = deque(maxlen=20)
| {"/main.py": ["/user.py"]} |
78,056 | arielespinosa/pronostico | refs/heads/master | /security/migrations/0001_initial.py | # Generated by Django 2.2.5 on 2020-06-24 14:57
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AppUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('avatar', models.ImageField(null=True, upload_to='user_avatar')),
('lastname1', models.CharField(max_length=30)),
('lastname2', models.CharField(max_length=30)),
('ocupation', models.CharField(max_length=30)),
('category', models.CharField(choices=[('Dr.', 'Doctor'), ('Dra.', 'Doctora'), ('Msc.', 'Master en Ciencias'), ('Lic.', 'Licenciado'), ('Ing.', 'Ingeniero'), ('Téc.', 'Técnico')], max_length=50, null=True)),
],
),
migrations.CreateModel(
name='ForecastCenter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('latitud', models.FloatField(blank=True, null=True)),
('longitud', models.FloatField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'Centros',
},
),
migrations.CreateModel(
name='AppUserContact',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('contact_type', models.CharField(choices=[('EMAIL', 'Correo electrónico'), ('PHONE', 'Teléfono'), ('CELLPHONE', 'Celular')], max_length=20, null=True)),
('contact', models.CharField(max_length=20, null=True)),
('appuser', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='security.AppUser')),
],
),
migrations.AddField(
model_name='appuser',
name='forecast_center',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='security.ForecastCenter'),
),
migrations.AddField(
model_name='appuser',
name='user',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,057 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/migrations/0006_auto_20200624_1124.py | # Generated by Django 2.2.5 on 2020-06-24 15:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('national_forecast_center', '0005_auto_20200624_1108'),
]
operations = [
migrations.AlterField(
model_name='phenomena',
name='type_of_phenomena',
field=models.CharField(blank=True, choices=[('TT', 'Tormenta Tropical'), ('CT', 'Ciclón Tropical'), ('DT', 'Depresión Tropical')], max_length=255, null=True),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,058 | arielespinosa/pronostico | refs/heads/master | /security/urls.py | from django.urls import path
from django.contrib.auth import views as auth_views
from . import views, forms
#app_name = 'security'
urlpatterns = [
path('signup/', views.signup_user_view, name='signup'),
path('', views.AppLoginView.as_view(), name='login'),
path('logout/', auth_views.LogoutView.as_view(template_name='logout.html'), name='logout'),
path('password-reset/', auth_views.PasswordResetView.as_view(template_name='password_reset.html'), name='password_reset'),
path('password-reset/done/', auth_views.PasswordResetDoneView.as_view(template_name='password_reset_done.html'), name='password_reset_done'),
path('password-reset-confirm/<uidb64>/<token>/', auth_views.PasswordResetConfirmView.as_view(template_name='password_reset_confirm.html'), name='password_reset_confirm'),
path('profile/<int:id>', views.AppUserProfile.as_view(), name='user_profile_view'),
path('update_appuser/<int:pk>', views.AppUserUpdateView.as_view(), name='update_appuser'),
path('join/', views.JoinFormView.as_view(), name='login2'),
path('index/', views.index, name='index'),
] | {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,059 | arielespinosa/pronostico | refs/heads/master | /security/models.py | from django.db import models
from django.contrib.auth.models import User
from django.urls import reverse
class ForecastCenter(models.Model):
name = models.CharField(max_length = 30, null=True, blank=True)
latitud = models.FloatField(null=True, blank=True)
longitud = models.FloatField(null=True, blank=True)
class Meta:
verbose_name_plural = 'Centros'
def __str__(self):
return self.name
#--------------------------------------------------------------------------------------
class AppUser(models.Model):
SCIENTIFIC_CATEGORY = [
('Dr.', 'Doctor'),
('Dra.', 'Doctora'),
('Msc.', 'Master en Ciencias'),
('Lic.', 'Licenciado'),
('Ing.', 'Ingeniero'),
('Téc.', 'Técnico'),
]
user = models.OneToOneField(User, null=True, blank=True, on_delete=models.CASCADE)
name = models.CharField(max_length = 30)
avatar = models.ImageField(upload_to='user_avatar', null=True)
lastname1 = models.CharField(max_length = 30)
lastname2 = models.CharField(max_length = 30)
ocupation = models.CharField(max_length = 30)
category = models.CharField(max_length=50, choices=SCIENTIFIC_CATEGORY, null=True)
forecast_center = models.ForeignKey(ForecastCenter, on_delete=models.CASCADE, blank=True, null=True)
def __str__(self):
return self.name
def full_name(self):
return self.name + ' ' + self.lastname1 + ' ' + self.lastname2
def sign_name(self):
return self.category + ' ' + self.full_name()
def get_absolute_url(self):
return reverse("user_profile_view", kwargs={"id":self.id})
#--------------------------------------------------------------------------------------
class AppUserContact(models.Model):
CONTACT_CHOICES = {
('PHONE', 'Teléfono'),
('CELLPHONE', 'Celular'),
('EMAIL', 'Correo electrónico'),
}
appuser = models.ForeignKey(AppUser, null=True, blank=True, on_delete=models.CASCADE)
contact_type = models.CharField(max_length=20, null=True, choices=CONTACT_CHOICES)
contact = models.CharField(max_length=20, null=True)
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,060 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/views.py | # Python library
import json
# Django framework
from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, JsonResponse, HttpResponseRedirect
from django.views.generic.list import ListView
from django.views.generic.edit import FormView
from django.urls import reverse_lazy
from django.core.paginator import Paginator
from django.contrib.auth.models import User
# Thirds projects
# bootstrap_modal_forms
from bootstrap_modal_forms.generic import (BSModalLoginView,
BSModalCreateView,
BSModalUpdateView,
BSModalReadView,
BSModalDeleteView)
# Django-notifications-hq
from django.db.models.signals import post_save
from notifications.signals import notify
# CNP Project
# Security APP
from security.models import AppUser
# This App
from . import forms
from .mixins import BSModalAjaxFormMixin
from .models.documents import *
from .process_docx import handle_docx_file
from datetime import date
from django.utils.translation import activate, get_language
from django.utils.translation import ugettext
from cnp import settings
import os
app_name = 'national_forecast_center'
def forecast(request):
"""
today = date.today()
print("\n",today.strftime("%B %Y"))
activate('en')
print(get_language())
print(ugettext("Hello"))
activate('zh-cn')
print(get_language())
print(ugettext("Hello"))
subject = _("Topics for {date}").format(date=today.strftime("%B %Y"))
print(subject, "\n")
"""
all_documents = list()
all_documents.extend(AE.objects.all())
all_documents.extend(NI.objects.all())
all_documents.extend(PT5.objects.all())
all_documents.extend(PTM.objects.all())
all_documents.extend(PTHOY.objects.all())
all_documents.extend(PTRD.objects.all())
all_documents.extend(PTT.objects.all())
all_documents.extend(DP10.objects.all())
all_documents.extend(PTTN.objects.all())
all_documents.extend(EGT.objects.all())
all_documents.extend(ACT.objects.all())
notifications = request.user.notifications.unread()[:5]
paginator = Paginator(all_documents, 10)
page = request.GET.get('page')
context = {
'documents_issues': None,
'notifications':notifications,
'form':forms.InputFileForm(),
}
if paginator.count > 0:
context['documents_issues'] = paginator.get_page(page)
return render(request, 'forecast.html', context)
def notifications(request):
response = HttpResponse(content_type="cnp/reports" )
return response
def reports(request):
response = HttpResponse(content_type="cnp/reports" )
return response
def documents(request):
response = HttpResponse(content_type="cnp/reports" )
return response
def upload_file(request):
if request.method == 'POST':
form = forms.InputFileForm(request.POST, request.FILES)
if form.is_valid():
data = handle_docx_file(request)
print(data)
if data:
return HttpResponseRedirect("/cnp")
else:
form = forms.InputFileForm()
return render(request, 'forecast.html', {'form': form})
class UploadFileView(FormView):
'''
Esta vista sube un archivo al servidor
'''
template_name = "upload_file.html"
form_class = forms.FormUpload
success_url = '/cnp'
def get(self, request, *args, **kwargs):
data = {'form': self.form_class}
return render(request, self.template_name, data)
def post(self, request, *args, **kwargs):
form = forms.FormUpload(request.POST, request.FILES)
if form.is_valid():
if 'file' in request.FILES:
file = request.FILES['file']
form.handle_file(file, request.user)
self.savefile(file)
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un {}.'.format(center, form.filetype)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form, **kwargs)
else:
return self.form_invalid(form, **kwargs)
else:
return self.form_invalid(form, **kwargs)
def savefile(self, file):
with open(os.path.join(settings.MEDIA_FILES, file.name), 'wb+') as destination:
for chunk in file.chunks():
destination.write(chunk)
class NoticeListView(ListView):
template_name = 'forecast.html'
def get_context_data(self, *args, **kwargs):
context = super(NoticeListView, self).get_context_data(**kwargs)
context['ae'] = AE.objects.all()
context['act'] = ACT.objects.all()
return context
class DocumentCreateView(BSModalAjaxFormMixin, BSModalCreateView):
def form_valid(self, form):
if form.instance.main_author is None:
form.instance.main_author = self.request.user.appuser
return super().form_valid(form)
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
form.fields['main_author'].queryset = AppUser.objects.filter(forecast_center=request.user.appuser.forecast_center).exclude(id=request.user.appuser.id)
form.fields['authors'].queryset = form.fields['main_author'].queryset
return render(request, self.template_name, {'form':form})
# ACT CRUD
class ACTCreateView(BSModalAjaxFormMixin, BSModalCreateView):
template_name = 'additional/add_act.html'
form_class = forms.FormACT
success_message = 'El ACT se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def form_valid(self, form):
form.instance.author1 = self.request.user.appuser
return super().form_valid(form)
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
form.fields['main_author'].queryset = AppUser.objects.filter(forecast_center=request.user.appuser.forecast_center).exclude(id=request.user.appuser.id)
return render(request, self.template_name, {'form':form})
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un ACT.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class ACTReadView(BSModalReadView):
model = ACT
template_name = 'additional/view_act.html'
class ACTUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = ACT
template_name = 'additional/update_act.html'
form_class = forms.FormACT
success_message = 'El ACT fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class ACTDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = ACT
template_name = 'additional/delete_element.html'
success_message = 'El ACT fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class ACTListView(ListView):
model = ACT
template_name = 'act_listview.html'
context_object_name = 'act'
# EGT00 CRUD
class EGTCreateView(BSModalAjaxFormMixin, BSModalCreateView):
template_name = 'additional/add_egt.html'
form_class = forms.FormEGT
success_message = 'El EGT se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def form_valid(self, form):
form.instance.main_author = self.request.user.appuser
return super().form_valid(form)
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
form.fields['main_author'].queryset = AppUser.objects.filter(forecast_center=request.user.appuser.forecast_center)
form.fields['authors'].queryset = AppUser.objects.filter(forecast_center=request.user.appuser.forecast_center).exclude(id=request.user.appuser.id)
return render(request, self.template_name, {'form':form})
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un EGT.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user, action_object=form.instance)
return self.form_valid(form)
else:
return self.form_invalid(form)
class EGTReadView(BSModalReadView):
model = EGT
template_name = 'additional/view_egt.html'
class EGTUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = EGT
template_name = 'additional/update_egt.html'
form_class = forms.FormEGT
success_message = 'El EGT fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class EGTDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = EGT
template_name = 'additional/delete_element.html'
success_message = 'El EGT fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class EGTListView(ListView):
model = EGT
template_name = 'egt00_listview.html'
context_object_name = 'egt00'
# AE CRUD
class AECreateView(DocumentCreateView):
template_name = 'additional/add_special_notice.html'
form_class = forms.FormAE
success_message = 'El aviso especial se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un AE.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class AEReadView(BSModalReadView):
model = AE
template_name = 'additional/view_special_notice.html'
class AEUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = AE
template_name = 'additional/update_special_notice.html'
form_class = forms.FormAE
success_message = 'El aviso especial fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class AEDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = AE
template_name = 'additional/delete_element.html'
success_message = 'El aviso especial fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class AEListView(ListView):
model = AE
template_name = 'special_notice_listview.html'
context_object_name = 'special_notice'
# NI CRUD
class NICreateView(DocumentCreateView):
template_name = 'additional/add_meteorological_notice.html'
form_class = forms.FormNI
success_message = 'La nota meteorológica se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = 'El {} emitió una NM.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class NIReadView(BSModalReadView):
model = NI
template_name = 'additional/view_meteorological_notice.html'
class NIUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = NI
template_name = 'additional/update_meteorological_notice.html'
form_class = forms.FormNI
success_message = 'La nota meteorológica fue modificada satisfactoriamente.'
success_url = reverse_lazy('forecast')
class NIDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = NI
template_name = 'additional/delete_element.html'
success_message = 'La nota meteorológica fue eliminada satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PT5 CRUD
class PT5CreateView(DocumentCreateView):
template_name = 'additional/add_pt5.html'
form_class = forms.FormPT5
success_message = 'El PT5 se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def get(self, request, *args, **kwargs):
last_pt5 = PT5.objects.last()
_initial = {
'day1': last_pt5.day2,
'day2': last_pt5.day3,
'day3': last_pt5.day4,
'day4': last_pt5.day5,
}
form = self.form_class(initial=_initial)
form.fields['main_author'].queryset = AppUser.objects.filter(forecast_center=request.user.appuser.forecast_center).exclude(id=request.user.appuser.id)
return render(request, self.template_name, {'form':form})
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PT5.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PT5ReadView(BSModalReadView):
model = PT5
template_name = 'additional/view_pt5.html'
class PT5UpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PT5
template_name = 'additional/update_pt5.html'
form_class = forms.FormPT5
success_message = 'El PT5 fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class PT5DeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PT5
template_name = 'additional/delete_element.html'
success_message = 'El PT5 fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PTM CRUD
class PTMCreateView(DocumentCreateView):
template_name = 'additional/add_ptm.html'
form_class = forms.FormPTM
success_message = 'El PTM se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PTM.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PTMReadView(BSModalReadView):
model = PTM
template_name = 'additional/view_ptm.html'
class PTMUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PTM
template_name = 'additional/update_ptm.html'
form_class = forms.FormPTM
success_message = 'El PTM fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class PTMDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PTM
template_name = 'additional/delete_element.html'
success_message = 'El PTM fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PTHOY CRUD
class PTHOYCreateView(DocumentCreateView):
template_name = 'additional/add_pthoy.html'
form_class = forms.FormPTHOY
success_message = 'El PTHOY se emitió satisfactoriamente'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PTHOY.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PTHOYReadView(BSModalReadView):
model = PTHOY
template_name = 'additional/view_pthoy.html'
class PTHOYUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PTHOY
template_name = 'additional/update_pthoy.html'
form_class = forms.FormPTHOY
success_message = 'El PTHOY fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class PTHOYDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PTHOY
template_name = 'additional/delete_element.html'
success_message = 'El PTHOY fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PTRD CRUD
class PTRDCreateView(DocumentCreateView):
template_name = 'additional/add_ptrd.html'
form_class = forms.FormPTRD
success_message = 'El PTRD se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PTRD.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PTRDReadView(BSModalReadView):
model = PTRD
template_name = 'additional/view_ptrd.html'
class PTRDUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PTRD
template_name = 'additional/update_ptrd.html'
form_class = forms.FormPTRD
success_message = 'El PTRD fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
error_message = 'No tiene permisos para modificar el PTRD.'
def get(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if self.model.author1 == self.request.user.appuser or self.model.main_author == request.user.appuser:
return render(request, self.template_name, {'form':form})
else:
return super().form_invalid(form)
class PTRDDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PTRD
template_name = 'additional/delete_element.html'
success_message = 'El PTRD fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PTT CRUD
class PTTCreateView(DocumentCreateView):
template_name = 'additional/add_ptt.html'
form_class = forms.FormPTT
success_message = 'El PTT se emitió satisfactoriamente.'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PTT.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PTTReadView(BSModalReadView):
model = PTT
template_name = 'additional/view_ptt.html'
class PTTUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PTT
template_name = 'additional/update_ptt.html'
form_class = forms.FormPTT
success_message = 'El PTT fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class PTTDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PTT
template_name = 'additional/delete_element.html'
success_message = 'El PTT fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# DP10 CRUD
class DP10CreateView(DocumentCreateView):
template_name = 'additional/add_dp10.html'
form_class = forms.FormDP10
success_message = 'El DP10 se emitió satisfactoriamente'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un DP10.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class DP10ReadView(BSModalReadView):
model = DP10
template_name = 'additional/view_dp10.html'
class DP10UpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = DP10
template_name = 'additional/update_dp10.html'
form_class = forms.FormDP10
success_message = 'El DP10 fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class DP10DeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = DP10
template_name = 'additional/delete_element.html'
success_message = 'El DP10 fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
# PTTN CRUD
class PTTNCreateView(DocumentCreateView):
template_name = 'additional/add_pttn.html'
form_class = forms.FormPTTN
success_message = 'El PTTN se emitió satisfactoriamente'
success_url = reverse_lazy('forecast')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
# Emitir aqui una notificacion
center = self.request.user.appuser.forecast_center
verb = '{} emitió un PTTN.'.format(center)
notify.send(self.request.user, recipient=User.objects.all(), verb=verb, target=self.request.user)
return self.form_valid(form)
else:
return self.form_invalid(form)
class PTTNReadView(BSModalReadView):
model = PTTN
template_name = 'additional/view_pttn.html'
class PTTNUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = PTTN
template_name = 'additional/update_pttn.html'
form_class = forms.FormPTTN
success_message = 'El PTTN fue modificado satisfactoriamente.'
success_url = reverse_lazy('forecast')
class PTTNDeleteView(BSModalAjaxFormMixin, BSModalDeleteView):
model = PTTN
template_name = 'additional/delete_element.html'
success_message = 'El PTTN fue eliminado satisfactoriamente.'
success_url = reverse_lazy('forecast')
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,061 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/apps.py | from django.apps import AppConfig
class NationalForecastCenterConfig(AppConfig):
name = 'national_forecast_center'
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,062 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/admin.py | from django.contrib import admin
from .models.documents import *
admin.site.register(Phenomena)
admin.site.register(AE)
admin.site.register(NI)
admin.site.register(PT5)
admin.site.register(PTM)
admin.site.register(PTHOY)
admin.site.register(PTRD)
admin.site.register(PTT)
admin.site.register(PTTN)
admin.site.register(EGT)
admin.site.register(DP10)
admin.site.register(ACT)
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,063 | arielespinosa/pronostico | refs/heads/master | /security/forms.py | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from django.contrib.auth import authenticate
from django.utils.translation import gettext_lazy as _
from bootstrap_modal_forms.mixins import PopRequestMixin, CreateUpdateAjaxMixin
from bootstrap_modal_forms.forms import BSModalForm
from .models import AppUser
class UserAuthenticationForm(AuthenticationForm):
username = forms.CharField(min_length=1, label='Usuario', widget=forms.TextInput())
password = forms.CharField(min_length=1, label='Contraseña', widget=forms.PasswordInput(render_value=True))
error_messages = {
'invalid_login': _("No se reconoce la combinación de nombre de usuario y contraseña. "
"Note que ambos campos pueden ser sensibles a las mayúsculas."),
'inactive': _("Su cuenta está inactiva. Póngase en contacto con el administrador para activarla."),
}
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username is not None and password:
self.user_cache = authenticate(self.request, username=username, password=password)
#print(self.user_cache)
if self.user_cache is None:
try:
user_temp = User.objects.get(username=username)
except:
user_temp = None
print(user_temp)
if user_temp is not None:
if user_temp.is_active:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
else:
try:
#print(self.user_cache)
self.confirm_login_allowed(user_temp)
except:
raise forms.ValidationError(
self.error_messages['inactive'],
code='inactive',
params={'username': self.username_field.verbose_name},
)
else:
try:
self.confirm_login_allowed(user_temp)
except:
raise forms.ValidationError(
self.error_messages['invalid_login'],
code='invalid_login',
params={'username': self.username_field.verbose_name},
)
return self.cleaned_data
class UserRegistrationForm(UserCreationForm):
username = forms.CharField(min_length=1, label='Nombre de usuario', widget=forms.TextInput(), error_messages={'unique': 'El usuario ya existe'})
password1 = forms.CharField(min_length=1, label='Contraseña', widget=forms.PasswordInput(render_value=True))
password2 = forms.CharField(min_length=2, label='Confirmar contraseña', widget=forms.PasswordInput(render_value=True))
email = forms.EmailField(label='E-mail', widget=forms.TextInput())
agree = forms.BooleanField(required=True)
class Meta:
model = User
fields = ('username', 'password1', 'password2', 'email')
# Validar que los passwords coincidan
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Las contraseñas no coinciden")
return password2
# Validar email
def clean_email(self):
email_address = self.cleaned_data['email']
if '@insmet.cu' not in email_address:
raise forms.ValidationError('La dirección de correo debe ser del dominio insmet.cu')
return email_address
class JoinForm(forms.Form):
email = forms.EmailField()
name = forms.CharField(max_length=120)
class FormAppUser(BSModalForm):
class Meta:
model = AppUser
fields = '__all__'
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,064 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/models/documents.py | from datetime import timedelta
from django.db import models
from security.models import AppUser
from django.utils import timezone
import pytz
# Phenomena
class Phenomena(models.Model):
TYPE_OF_PHENOMENA = {
('DT', 'Depresión Tropical'),
('TT', 'Tormenta Tropical'),
('CT', 'Ciclón Tropical'),
}
name = models.CharField(max_length=255, blank=True, null=True)
type_of_phenomena = models.CharField(max_length=255, choices=TYPE_OF_PHENOMENA, blank=True, null=True)
# Document
class Document(models.Model):
# When the user create de forecast
creation_date = models.DateTimeField(default=timezone.now, blank=True, null=True)
# The datetime than document make reference
emision_date = models.DateTimeField(default=timezone.now, blank=True, null=True)
emision_date_utc = models.DateTimeField(default=timezone.now, blank=True, null=True)
name = models.CharField(max_length=250, blank=True, null=True)
title = models.CharField(max_length=250, blank=True, null=True)
leyend = models.CharField(max_length=250, blank=True, null=True) # Revisar que funcion cumple
content = models.TextField(blank=True, null=True)
notes = models.TextField(blank=True, null=True)
main_author = models.ForeignKey(AppUser, related_name='main_author', on_delete=models.CASCADE, blank=True, null=True)
authors = models.ManyToManyField(AppUser, related_name='secondary_author', blank=True)
class Meta:
verbose_name_plural = 'Documentos'
def __str__(self):
return self.name
def emision_date_in_utc(self):
#TZ_GMT0 = pytz.timezone('Etc/GMT-0')
return self.emision_date.astimezone(pytz.timezone('America/Bogota'))
# DP10
class DP10(Document):
code = models.CharField(default='FECU42 MUHV', max_length=20, blank=True, null=True)
class Meta:
verbose_name_plural = 'Discusión de Plazo Medio'
def __str__(self):
return 'Discusión de Plazo Medio'
def typeof(self):
return 'DP10'
def vaild_timespace(self):
return {
"initial": self.emision_date + timedelta(days=2),
"end": self.emision_date + timedelta(days=11)
}
# PTTN
class PTTN(Document):
code = models.CharField(default='FECU42 MUHV', max_length=20, blank=True, null=True)
class Meta:
verbose_name_plural = 'Pronóstico del Tiempo para la Tarde y la Noche'
def typeof(self):
return 'PTTN'
# EGT00
class EGT(Document):
code = models.CharField(default='AXCU40 MUHV', max_length=20, blank=True, null=True)
class Meta:
verbose_name_plural = 'Estado General del Tiempo'
def __str__(self):
return 'Estado General del Tiempo'
def typeof(self):
return 'EGT'
def vaild_timespace(self):
return None
# ACT
class ACT(Document):
code = models.CharField(default='WOCU31 MUHV', max_length=255)
no = models.AutoField()
phenomena = models.ForeignKey(Phenomena, on_delete=models.CASCADE, blank=True, null=True)
class Meta:
verbose_name_plural = 'Avisos de Ciclones Tropicales'
def __str__(self):
return 'Aviso de Ciclón Tropical No. ' + str(self.pk)
def typeof(self):
return 'ACT'
# AE
class AE(Document):
no = models.IntegerField(blank=True, null=True)
code = models.CharField(default='FECU42 MUHV 121530', max_length=1000, blank=True, null=True)
class Meta:
verbose_name_plural = 'Avisos Especiales'
def __str__(self):
return 'Aviso Especial No. ' + str(self.no)
def typeof(self):
return 'AE'
# NI
class NI(Document):
class Meta:
verbose_name_plural = 'Notas Informativas'
def __str__(self):
return 'Nota Informativa No.' + str(self.pk)
def typeof(self):
return 'Nota Informativa'
# PT5
class PT5(Document):
sinopsis = models.CharField(max_length=250, blank=True, null=True)
day1 = models.TextField(blank=True, null=True)
day2 = models.TextField(blank=True, null=True)
day3 = models.TextField(blank=True, null=True)
day4 = models.TextField(blank=True, null=True)
day5 = models.TextField(blank=True, null=True)
class Meta:
verbose_name_plural = 'PT5'
def __str__(self):
return 'PT5 No. ' + str(self.pk)
def typeof(self):
return 'PT5'
# PTM
class PTM(Document):
interest_aditional_info = models.TextField(blank=True, null=True)
class Meta:
verbose_name_plural = 'PTM'
def __str__(self):
return 'PTM No. ' + str(self.pk)
def typeof(self):
return 'PTM'
# PTHOY
class PTHOY(Document):
interest_aditional_info = models.TextField(blank=True, null=True)
class Meta:
verbose_name_plural = 'PTHOY'
def __str__(self):
return 'PTHOY ' + str(self.pk)
def typeof(self):
return 'PTHOY'
# PTRD
class PTRD(Document):
class Meta:
verbose_name_plural = 'PTRD'
def __str__(self):
return 'PTRD No. ' + str(self.pk)
def typeof(self):
return 'PTRD'
# PTT
class PTT(Document):
class Meta:
verbose_name_plural = 'PTT'
def __str__(self):
return 'PTT No. ' + str(self.pk)
def typeof(self):
return 'PTT'
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,065 | arielespinosa/pronostico | refs/heads/master | /security/migrations/0009_auto_20200712_0832.py | # Generated by Django 2.2.5 on 2020-07-12 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('security', '0008_auto_20200712_0829'),
]
operations = [
migrations.AlterField(
model_name='appusercontact',
name='contact_type',
field=models.CharField(choices=[('PHONE', 'Teléfono'), ('EMAIL', 'Correo electrónico'), ('CELLPHONE', 'Celular')], max_length=20, null=True),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,066 | arielespinosa/pronostico | refs/heads/master | /configuration/forms.py | from django.http import JsonResponse
from django.views.generic.edit import CreateView
from django.utils.translation import gettext_lazy as _
from django import forms
from django.contrib.auth.models import Group
from bootstrap_modal_forms.forms import BSModalForm
class FormGroup(BSModalForm):
class Meta:
model = Group
fields = '__all__'
"""
labels = {
'title': _('Título'),
'content': _('Contenido'),
'notes': _('Notas'),
'author2': _('Segundo autor'),
}
help_texts = {
'title': _('El debe ser lo más describtivo posible'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'notes' : forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
"""
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,067 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/mixins.py | from django.http import JsonResponse
from time import sleep
class AjaxFormMixin(object):
def form_invalid(self, form):
response = super(AjaxFormMixin, self).form_invalid(form)
if self.request.is_ajax():
return JsonResponse(form.errors, status=400)
else:
return response
def form_valid(self, form):
response = super(AjaxFormMixin, self).form_valid(form)
if self.request.is_ajax():
data = {
'message': "No se añadio el usuario"
}
if form.is_valid():
form.save()
data = {
'message': "Se añadio el usuario"
}
return JsonResponse(data)
else:
return response
class BSModalAjaxFormMixin(object):
def get_data_as_json(self, form):
return form.cleaned_data
def form_invalid(self, form):
#response = super(BSModalAjaxFormMixin, self).form_invalid(form)
if self.request.is_ajax():
datos = {
'title': "Notificación",
'message': self.error_message,
#'data': form.cleaned_data,
}
return JsonResponse(datos)
else:
#return JsonResponse(form.errors, status=400)
print("no es Ajax")
return response
def form_valid(self, form):
response = super(BSModalAjaxFormMixin, self).form_valid(form)
if self.request.is_ajax():
if form.is_valid():
form.save(commit=False)
form.author1 = self.request.user.appuser
datos = {
'title': "Notificación",
'message': self.success_message,
#'data': form.cleaned_data,
}
return JsonResponse(datos)
else:
return response
class DocumentAjaxFormMixin(object):
def get_data_as_json(self, form):
return form.cleaned_data
def form_invalid(self, form):
#response = super(BSModalAjaxFormMixin, self).form_invalid(form)
if self.request.is_ajax():
datos = {
'title': "Notificación",
'message': self.error_message,
#'data': form.cleaned_data,
}
return JsonResponse(datos)
else:
return response
def form_valid(self, form):
response = super(BSModalAjaxFormMixin, self).form_valid(form)
if self.request.is_ajax():
if form.is_valid():
form.save(commit=False)
form.author1 = self.request.user.appuser
datos = {
'title': "Notificación",
'message': self.success_message,
#'data': form.cleaned_data,
}
return JsonResponse(datos)
else:
return response | {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,068 | arielespinosa/pronostico | refs/heads/master | /security/views.py | from django.shortcuts import render, get_object_or_404, redirect
from django.views.generic.edit import CreateView
from django.views.generic import DetailView
from django.contrib import messages
from django.contrib.auth.views import LoginView
from . import forms
from .models import AppUser
from .mixins import AjaxFormMixin, BSModalAjaxFormMixin
from django.urls import reverse_lazy
from django.http import JsonResponse
from bootstrap_modal_forms.generic import (BSModalLoginView,
BSModalCreateView,
BSModalUpdateView,
BSModalReadView,
BSModalDeleteView)
from django.contrib.auth.models import User
import time
from django.contrib.auth.decorators import login_required
def index(request):
return render(request, 'index.html')
def signup_user_view(request):
if request.method == 'POST':
form = forms.UserRegistrationForm(request.POST, instance=User(is_active=False))
if form.is_valid():
#new_user = form.cleaned_data
form.save()
#username = form.cleaned_data.get('username')
#messages.success(request, 'Account created for {{username}}!')
return redirect('/')
else:
form = forms.UserRegistrationForm()
return render(request, 'signup.html', {'form':form})
class AppUserProfile(DetailView):
template_name = "user_profile.html"
def get_object(self):
id = self.kwargs.get("id")
return get_object_or_404(AppUser, id=id)
# -----------------------------------------
class AppUserUpdateView(BSModalAjaxFormMixin, BSModalUpdateView):
model = AppUser
template_name = 'additional/update_appuser.html'
form_class = forms.FormAppUser
success_message = 'Su información personal fue modificada satisfactoriamente.'
#success_url = reverse_lazy('forecast')
# -----------------------------------------
class AppLoginView(LoginView):
template_name = 'login.html'
authentication_form = forms.UserAuthenticationForm
# -----------------------------------------
class JoinFormView(AjaxFormMixin, CreateView):
model = User
fields = ['username', 'password']
template_name = 'ajax.html'
success_url = '/form-success/'
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,069 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/forms.py | from django.utils.translation import activate, gettext_lazy as _
from django import forms
from .models.documents import *
from security.models import AppUser
from django.db.models import Q
from bootstrap_modal_forms.mixins import PopRequestMixin, CreateUpdateAjaxMixin
from bootstrap_modal_forms.forms import BSModalForm
from docx import Document
from cnp import settings
from datetime import datetime, date
class FormSecundaryAuthors(forms.Form):
authors = None
class FormAE(BSModalForm):
class Meta:
model = AE
fields = ['no','emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'no': _('No'),
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
help_texts = {
'title': _('El título debe ser lo más describtivo posible'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormNI(BSModalForm):
class Meta:
model = NI
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
help_texts = {
'no': _('El valor debe ser único'),
}
error_messages = {
'no': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPT5(BSModalForm):
class Meta:
model = PT5
fields = ['title', 'sinopsis', 'content', 'day1', 'day2', 'day3', 'day4', 'day5', 'notes', 'main_author']
labels = {
'title': _('Título'),
'sinopsis':_('Sinopsis'),
'day1': _('Día 1'),
'day2': _('Día 2'),
'day3': _('Día 3'),
'day4': _('Día 4'),
'day5': _('Día 5'),
'content': _('Contenido'),
'notes': _('Notas'),
'main_author': _('Author principal'),
}
help_texts = {
'title': _('El valor debe ser único'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'day1': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'day2': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'day3': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'day4': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'day5': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
'notes' : forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPTM(BSModalForm):
class Meta:
model = PTM
fields = ['emision_date', 'title', 'content', 'main_author', 'authors', 'notes']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
help_texts = {
'no': _('El valor debe ser único'),
}
error_messages = {
'no': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPTHOY(BSModalForm):
class Meta:
model = PTHOY
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPTRD(BSModalForm):
class Meta:
model = PTRD
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPTT(BSModalForm):
class Meta:
model = PTT
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormDP10(BSModalForm):
class Meta:
model = DP10
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormPTTN(BSModalForm):
class Meta:
model = PTTN
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class FormEGT(BSModalForm):
class Meta:
model = EGT
exclude = ['creation_date']
labels = {
'code': _('Código'),
'emision_date_utc': _('Fecha en UTC'),
'emision_date': _('Fecha'),
'nombre': _('Nombre'),
'title': _('Título'),
'content': _('Contenido'),
'notes': _('Notas'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
'authors': {
'error': _("Pepe no está."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
"""
def clean_authors(self):
authors = self.cleaned_data.get('authors')
print(authors)
if "Pepe" not in authors:
raise forms.ValidationError("Pepe no está")
return authors
"""
class FormACT(BSModalForm):
class Meta:
model = ACT
fields = ['emision_date', 'title', 'content', 'main_author', 'authors']
labels = {
'emision_date': _('Fecha'),
'title': _('Título'),
'content': _('Contenido'),
'main_author': _('Autor principal'),
'authors': _('Autores secundarios'),
}
error_messages = {
'title': {
'max_length': _("This writer's name is too long."),
},
}
widgets = {
'content': forms.Textarea(attrs={'cols': 80, 'rows': 5}),
}
class InputFileForm(forms.Form):
file = forms.FileField()
class FormUpload(forms.Form):
file = forms.FileField()
filetype = None
def __init__(self, *args, **kwargs):
super(FormUpload, self).__init__(*args, **kwargs)
def datestring(self, dstring):
dstring = dstring.replace("Fecha: ", "")
dstring = dstring.replace("Hora: ", "")
dstring = dstring.split(".")
h = dstring[1].strip()[:2]
m = dstring[1].strip()[3:5]
med = "AM" if dstring[1].count("a") > 0 or dstring[1].count("A") > 0 else "PM"
creation = dstring[0].split("de")
months = {
"enero": "01",
"febrero": "02",
"marzo": "03",
"abril": "04",
"mayo": "05",
"junio": "06",
"julio": "07",
"agosto": "08",
"septiembre": "09",
"octubre": "10",
"noviembre": "11",
"diciembre": "12",
}
date = creation[0].strip().zfill(2) + months[creation[1].strip()] + creation[2].strip().zfill(2) + h.zfill(2) + m + med
return datetime.strptime(date, "%d%m%Y%I%M%p")
def handle_file(self, filename, user):
document = Document(filename)
paragraphs = [paragraph for paragraph in document.paragraphs if paragraph.text != ""]
if "PTH" in str(filename):
data = self.proccess_pth(paragraphs, user)
notice = PTHOY(
emision_date=data["emision_date"],
title=data["title"],
content=data["content"],
notes=data["notes"],
main_author=data["main_author"],
)
elif "AE" in str(filename):
data = self.proccess_ae(paragraphs, user)
notice = AE(
no=data["no"],
emision_date=data["emision_date"],
title=data["title"],
content=data["content"],
main_author=data["main_author"],
)
elif "ACT" in str(filename):
data = self.proccess_act(paragraphs)
notice = ACT(
emision_date=data["emision_date"],
title=data["title"],
phenomena=data["phenomena"],
content=data["content"],
main_author=data["main_author"],
)
elif "DP10" in str(filename):
data = self.proccess_dp10(paragraphs)
notice = DP10(
emision_date=data["emision_date"],
notes=data["notes"],
content=data["content"],
main_author=data["main_author"],
)
elif "EGT00" in str(filename):
data = self.proccess_egt00(paragraphs)
notice = EGT00(
emision_date=data["emision_date"],
notes=data["notes"],
content=data["content"],
main_author=data["main_author"],
)
elif "EGT12" in str(filename):
data = self.proccess_egt12(paragraphs)
notice = EGT12(
emision_date=data["emision_date"],
notes=data["notes"],
content=data["content"],
main_author=data["main_author"],
)
elif "P5" in str(filename):
data = self.proccess_p5(paragraphs)
notice = PT5(
emision_date=data["emision_date"],
notes=data["notes"],
content=data["content"],
main_author=data["main_author"],
)
elif "PTM" in str(filename):
data = self.proccess_ptm(paragraphs)
notice = PTM(
emision_date=data["emision_date"],
title=data["title"],
content=data["content"],
notes=data["notes"],
main_author=data["main_author"],
)
elif "PTRD" in str(filename):
data = self.proccess_ptrd(paragraphs)
notice = PTRD(
emision_date=data["emision_date"],
title=data["title"],
content=data["content"],
notes=data["notes"],
main_author=data["main_author"],
)
elif "PTT" in str(filename) and "PTTN" not in str(filename):
data = self.proccess_ptt(paragraphs)
notice = PTT(
emision_date=data["emision_date"],
content=data["content"],
main_author=data["main_author"],
)
elif "PTTN" in str(filename):
data = self.proccess_pttn(paragraphs)
notice = PTTN(
emision_date=data["emision_date"],
title=data["title"],
content=data["content"],
notes=data["notes"],
main_author=data["main_author"],
)
else:
# Let user know than kind of doc don't exist
pass
self.filetype = notice.typeof()
notice.save(False)
notice.authors.set(data["authors"])
notice.save()
def proccess_pth(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in range(len(doc_authors))]
data = {
"emision_date": creation_date,
"title": paragraphs[6].text,
"content": content,
"notes": paragraphs[5].text,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_pth(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in range(len(doc_authors))]
data = {
"emision_date": creation_date,
"title": paragraphs[6].text,
"content": content,
"notes": paragraphs[5].text,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_ae(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
no = int(paragraphs[5].text.split(".")[-1])
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in range(len(doc_authors))]
data = {
"no": no,
"emision_date": creation_date,
"title": paragraphs[6].text,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_act(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
phenomena = Phenomena(
name=paragraphs[6].text,
type_of_phenomena="CT"
)
phenomena.save()
data = {
"emision_date": creation_date,
"title": paragraphs[5].text,
"phenomena": phenomena,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_dp10(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
data = {
"emision_date": creation_date,
"notes": paragraphs[5].text,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_egt00(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
data = {
"emision_date": creation_date,
"title": paragraphs[3].text,
"notes": paragraphs[5].text,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_egt12(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
data = {
"emision_date": creation_date,
"title": paragraphs[3].text,
"notes": paragraphs[5].text,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
return data
def proccess_p5(self, paragraphs, user):
return None
def proccess_ptm(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
return {
"emision_date": creation_date,
"title": paragraphs[6].text,
"content": content,
"notes": paragraphs[5].text,
"main_author": authors[0],
"authors": authors[1:],
}
def proccess_ptrd(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
return {
"emision_date": creation_date,
"title": paragraphs[7].text,
"content": content,
"notes": paragraphs[5].text,
"main_author": authors[0],
"authors": authors[1:],
}
def proccess_ptt(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
return {
"emision_date": creation_date,
"content": content,
"main_author": authors[0],
"authors": authors[1:],
}
def proccess_pttn(self, paragraphs, user):
content = ""
for i in range(8, len(paragraphs) - 1):
content += paragraphs[i].text + "\n"
creation_date = self.datestring(paragraphs[4].text)
doc_authors = paragraphs[-1].text.split("/")
authors = [self.find_userapp(doc_authors[i].split(".")[1].lstrip(), user.appuser.forecast_center) for i in
range(len(doc_authors))]
return {
"emision_date": creation_date,
"title": paragraphs[7].text,
"content": content,
"notes": paragraphs[5].text,
"main_author": authors[0],
"authors": authors[1:],
}
def find_userapp(self, lastname, center):
return AppUser.objects.get(
Q(forecast_center__name=center),
Q(lastname1=lastname) | Q(lastname2=lastname))
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,070 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/urls.py | from django.urls import path
from . import views
urlpatterns = [
#path('', views.dashboard, name='dashboard'),
path('', views.forecast, name='forecast'),
path('reportes', views.reports, name='reports'),
path('upload_file', views.upload_file, name='upload_file'),
path('upload_docx_file', views.UploadFileView.as_view(), name='upload_docx_file'),
# CRUD views
# Create
path('create_ae/', views.AECreateView.as_view(), name='create_ae'),
path('create_ni/', views.NICreateView.as_view(), name='create_ni'),
path('create_pt5/', views.PT5CreateView.as_view(), name='create_pt5'),
path('create_ptm/', views.PTMCreateView.as_view(), name='create_ptm'),
path('create_pthoy/', views.PTHOYCreateView.as_view(), name='create_pthoy'),
path('create_ptrd/', views.PTRDCreateView.as_view(), name='create_ptrd'),
path('create_ptt/', views.PTTCreateView.as_view(), name='create_ptt'),
path('create_dp10/', views.DP10CreateView.as_view(), name='create_dp10'),
path('create_pttn/', views.PTTNCreateView.as_view(), name='create_pttn'),
path('create_egt/', views.EGTCreateView.as_view(), name='create_egt'),
path('create_act/', views.ACTCreateView.as_view(), name='create_act'),
# Read
path('view_ae/<int:pk>', views.AEReadView.as_view(), name='view_ae'),
path('view_ni/<int:pk>', views.NIReadView.as_view(), name='view_ni'),
path('view_pt5/<int:pk>', views.PT5ReadView.as_view(), name='view_pt5'),
path('view_ptm/<int:pk>', views.PTMReadView.as_view(), name='view_ptm'),
path('view_pthoy/<int:pk>', views.PTHOYReadView.as_view(), name='view_pthoy'),
path('view_ptrd/<int:pk>', views.PTRDReadView.as_view(), name='view_ptrd'),
path('view_ptt/<int:pk>', views.PTTReadView.as_view(), name='view_ptt'),
path('view_dp10/<int:pk>', views.DP10ReadView.as_view(), name='view_dp10'),
path('view_pttn/<int:pk>', views.PTTNReadView.as_view(), name='view_pttn'),
path('view_egt/<int:pk>', views.EGTReadView.as_view(), name='view_egt'),
path('view_act/<int:pk>', views.ACTReadView.as_view(), name='view_act'),
# Update
path('update_ae/<int:pk>', views.AEUpdateView.as_view(), name='update_ae'),
path('update_ni/<int:pk>', views.NIUpdateView.as_view(), name='update_ni'),
path('update_pt5/<int:pk>', views.PT5UpdateView.as_view(), name='update_pt5'),
path('update_ptm/<int:pk>', views.PTMUpdateView.as_view(), name='update_ptm'),
path('update_pthoy/<int:pk>', views.PTHOYUpdateView.as_view(), name='update_pthoy'),
path('update_ptrd/<int:pk>', views.PTRDUpdateView.as_view(), name='update_ptrd'),
path('update_ptt/<int:pk>', views.PTTUpdateView.as_view(), name='update_ptt'),
path('update_dp10/<int:pk>', views.DP10UpdateView.as_view(), name='update_dp10'),
path('update_pttn/<int:pk>', views.PTTNUpdateView.as_view(), name='update_pttn'),
path('update_egt/<int:pk>', views.EGTUpdateView.as_view(), name='update_egt'),
path('update_act/<int:pk>', views.ACTUpdateView.as_view(), name='update_act'),
# Delete
path('delete_ae/<int:pk>', views.AEDeleteView.as_view(), name='delete_ae'),
path('delete_ni/<int:pk>', views.NIDeleteView.as_view(), name='delete_ni'),
path('delete_pt5/<int:pk>', views.PT5DeleteView.as_view(), name='delete_pt5'),
path('delete_ptm/<int:pk>', views.PTMDeleteView.as_view(), name='delete_ptm'),
path('delete_pthoy/<int:pk>', views.PTHOYDeleteView.as_view(), name='delete_pthoy'),
path('delete_ptrd/<int:pk>', views.PTRDDeleteView.as_view(), name='delete_ptrd'),
path('delete_ptt/<int:pk>', views.PTTDeleteView.as_view(), name='delete_ptt'),
path('delete_dp10/<int:pk>', views.DP10DeleteView.as_view(), name='delete_dp10'),
path('delete_pttn/<int:pk>', views.PTTNDeleteView.as_view(), name='delete_pttn'),
path('delete_egt/<int:pk>', views.EGTDeleteView.as_view(), name='delete_egt'),
path('delete_act/<int:pk>', views.ACTDeleteView.as_view(), name='delete_act'),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,071 | arielespinosa/pronostico | refs/heads/master | /configuration/views.py | from django.shortcuts import render
from django.urls import reverse_lazy
from django.contrib.auth.models import Group
from bootstrap_modal_forms.generic import (BSModalCreateView,
BSModalUpdateView,
BSModalReadView,
BSModalDeleteView)
from .mixins import BSModalAjaxFormMixin
from . import forms
app_name = 'configuration'
def configuration(request):
context = {
}
return render(request, 'configuration_users.html', context)
def users(request):
groups = Group.objects.all()
context = {
'groups': groups
}
return render(request, 'configuration_users.html', context)
class GroupCreateView(BSModalAjaxFormMixin, BSModalCreateView):
template_name = 'additional/add_group.html'
form_class = forms.FormGroup
success_message = 'El grupo se creó satisfactoriamente.'
success_url = reverse_lazy('configuration')
def post(self, request, *args, **kwargs):
form = self.get_form(self.form_class)
if form.is_valid():
print(request)
return self.form_valid(form)
else:
return self.form_invalid(form)
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,072 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/migrations/0001_initial.py | # Generated by Django 2.2.5 on 2020-06-24 14:57
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('security', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('emision_date', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('emision_date_utc', models.DateTimeField(blank=True, default=django.utils.timezone.now, null=True)),
('name', models.CharField(blank=True, max_length=250, null=True)),
('title', models.CharField(blank=True, max_length=250, null=True)),
('leyend', models.CharField(blank=True, max_length=250, null=True)),
('content', models.TextField(blank=True, null=True)),
('notes', models.TextField(blank=True, null=True)),
('authors', models.ManyToManyField(blank=True, null=True, related_name='secondary_author', to='security.AppUser')),
('main_author', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='main_author', to='security.AppUser')),
],
options={
'verbose_name_plural': 'Documentos',
},
),
migrations.CreateModel(
name='Phenomena',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=255, null=True)),
('type_of_phenomena', models.CharField(blank=True, choices=[('TT', 'Tormenta Tropical'), ('DT', 'Depresión Tropical'), ('CT', 'Ciclón Tropical')], max_length=255, null=True)),
],
),
migrations.CreateModel(
name='AE',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('no', models.IntegerField(blank=True, null=True)),
('code', models.CharField(blank=True, default='FECU42 MUHV 121530', max_length=1000, null=True)),
],
options={
'verbose_name_plural': 'Avisos Especiales',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='DP10',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('code', models.CharField(blank=True, default='FECU42 MUHV', max_length=1000, null=True)),
],
options={
'verbose_name_plural': 'Discusión de Plazo Medio',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='EGT',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('code', models.CharField(blank=True, default='AXCU40 MUHV', max_length=20, null=True)),
],
options={
'verbose_name_plural': 'Estado General del Tiempo',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='NI',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
],
options={
'verbose_name_plural': 'Notas Informativas',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PT5',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('sinopsis', models.CharField(blank=True, max_length=250, null=True)),
('day1', models.TextField(blank=True, null=True)),
('day2', models.TextField(blank=True, null=True)),
('day3', models.TextField(blank=True, null=True)),
('day4', models.TextField(blank=True, null=True)),
('day5', models.TextField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'PT5',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PTHOY',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('interest_aditional_info', models.TextField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'PTHOY',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PTM',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('interest_aditional_info', models.TextField(blank=True, null=True)),
],
options={
'verbose_name_plural': 'PTM',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PTRD',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
],
options={
'verbose_name_plural': 'PTRD',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PTT',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
],
options={
'verbose_name_plural': 'PTT',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='PTTN',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
],
options={
'verbose_name_plural': 'PTTN',
},
bases=('national_forecast_center.document',),
),
migrations.CreateModel(
name='ACT',
fields=[
('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='national_forecast_center.Document')),
('phenomena', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='national_forecast_center.Phenomena')),
],
options={
'verbose_name_plural': 'Avisos de Ciclones Tropicales',
},
bases=('national_forecast_center.document',),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,073 | arielespinosa/pronostico | refs/heads/master | /test.py | from datetime import date
from django.utils.translation import activate
from django.utils.translation import ugettext_lazy as _
today = date.today()
print(today.strftime("%B %Y"))
activate('ru')
subject = _("Topics for {date}").format(date=today.strftime("%B %Y"))
print(subject) | {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,074 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/migrations/0003_auto_20200624_1104.py | # Generated by Django 2.2.5 on 2020-06-24 15:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('national_forecast_center', '0002_auto_20200624_1057'),
]
operations = [
migrations.AlterField(
model_name='dp10',
name='code',
field=models.CharField(blank=True, default='FECU42 MUHV', max_length=20, null=True),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,075 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/migrations/0004_auto_20200624_1106.py | # Generated by Django 2.2.5 on 2020-06-24 15:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('national_forecast_center', '0003_auto_20200624_1104'),
]
operations = [
migrations.AlterField(
model_name='document',
name='authors',
field=models.ManyToManyField(blank=True, related_name='secondary_author', to='security.AppUser'),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,076 | arielespinosa/pronostico | refs/heads/master | /security/admin.py | from django.contrib import admin
from .models import *
admin.site.register(ForecastCenter)
admin.site.register(AppUser)
admin.site.register(AppUserContact)
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,077 | arielespinosa/pronostico | refs/heads/master | /configuration/urls.py | from django.urls import path
from . import views
app_name = 'configuration'
urlpatterns = [
path('', views.configuration, name='configuration'),
path('users', views.users, name='users'),
path('create_group/', views.GroupCreateView.as_view(), name='create_group'),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,078 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/migrations/0008_auto_20200712_0832.py | # Generated by Django 2.2.5 on 2020-07-12 12:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('national_forecast_center', '0007_auto_20200712_0829'),
]
operations = [
migrations.AlterModelOptions(
name='pttn',
options={'verbose_name_plural': 'Pronóstico del Tiempo para la Tarde y la Noche'},
),
migrations.AddField(
model_name='pttn',
name='code',
field=models.CharField(blank=True, default='FECU42 MUHV', max_length=20, null=True),
),
migrations.AlterField(
model_name='phenomena',
name='type_of_phenomena',
field=models.CharField(blank=True, choices=[('TT', 'Tormenta Tropical'), ('CT', 'Ciclón Tropical'), ('DT', 'Depresión Tropical')], max_length=255, null=True),
),
]
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,079 | arielespinosa/pronostico | refs/heads/master | /national_forecast_center/process_docx.py | from docx import Document
from docx.shared import Inches
from django.db.models import Q
from .models.documents import *
from security.models import AppUser
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import activate
def handle_docx_file(request):
filename = request.FILES['file']
user = request.user
document = Document(filename)
paragraphs = [paragraph for paragraph in document.paragraphs if paragraph.text != ""]
print("PTTN" in str(filename))
if "PTH" in str(filename):
return proccess_pth(user, paragraphs)
elif "ACT" in str(filename):
return proccess_act(user, paragraphs)
elif "DP10" in str(filename):
print("Hola2")
return proccess_dp10(user, paragraphs)
elif "EGT00" in str(filename):
return proccess_egt00(user, paragraphs)
elif "EGT12" in str(filename):
return proccess_egt12(user, paragraphs)
elif "P5" in str(filename):
return proccess_p5(user, paragraphs)
elif "PTM" in str(filename):
return proccess_ptm(user, paragraphs)
elif "PTRD" in str(filename):
return proccess_ptrd(user, paragraphs)
elif "PTT" in str(filename):
return proccess_ptt(user, paragraphs)
elif "PTTN" in str(filename):
print("Hola")
#return proccess_pttn(user, paragraphs)
else:
pass
def proccess_pth(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
if (author1.user == user or author2.user == user) and (author1.forecast_center.name == author2.forecast_center.name == center):
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
pthoy = PTHOY(
title=data["title"],
content=data["content"],
notes=data["notes"],
author1=data["author1"],
author2=data["author2"])
pthoy.save()
return True
else:
return None
def proccess_act(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
#print(author1.name)
#print(author2.name)
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
act = ACT(
title=data["title"],
content=data["content"],
notes=data["notes"])
#print(notice.title)
act.save()
return data
return None
def proccess_dp10(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
if (author1.user == user or author2.user == user) and (author1.forecast_center.name == author2.forecast_center.name == center):
for i in range(10, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
dp10 = DP10(
content=data["content"],
notes=data["notes"],
author1=data["author1"],
author2=data["author2"])
dp10.save()
return True
else:
return None
def proccess_egt00(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
if (author1.user == user or author2.user == user) and (author1.forecast_center.name == author2.forecast_center.name == center):
for i in range(10, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
dp10 = DP10(
content=data["content"],
notes=data["notes"],
author1=data["author1"],
author2=data["author2"])
dp10.save()
return True
else:
return None
def proccess_egt12(user, paragraphs):
return None
def proccess_p5(user, paragraphs):
return None
def proccess_ptm(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
if (author1.user == user or author2.user == user) and (author1.forecast_center.name == author2.forecast_center.name == center):
for i in range(8, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
ptm = PTM(
title=data["title"],
content=data["content"],
notes=data["notes"],
author1=data["author1"],
author2=data["author2"])
ptm.save()
return True
else:
return None
def proccess_ptrd(user, paragraphs):
return None
def proccess_ptt(user, paragraphs):
return None
def proccess_pttn(user, paragraphs):
center = paragraphs[2].text.split(",")[-1].lstrip().replace(".", "")
authors = paragraphs[-1].text.split("/")
author1 = find_userapp(authors[0].split(".")[1].lstrip(), center)
author2 = find_userapp(authors[1].split(".")[1].lstrip(), center)
content = str()
print("\n")
print(center)
print(author1)
print(author2)
print(user)
print("\n")
if (author1.user == user or author2.user == user) and (author1.forecast_center.name == author2.forecast_center.name == center):
for i in range(10, len(paragraphs)-1):
content += paragraphs[i].text + "\n"
data = {
#"emision_date":paragraphs[4].text,
"title":paragraphs[6].text,
"content":content,
"notes":paragraphs[5].text,
"author1":author1,
"author2":author2,
}
pttn = PTTN(
content=data["content"],
notes=data["notes"],
author1=data["author1"],
author2=data["author2"])
pttn.save()
return True
else:
return None
def find_userapp(lastname, center):
try:
return AppUser.objects.get(
Q(forecast_center__name=center),
Q(lastname1=lastname) | Q(lastname2=lastname))
except:
return None
| {"/national_forecast_center/views.py": ["/security/models.py", "/national_forecast_center/mixins.py", "/national_forecast_center/models/documents.py", "/national_forecast_center/process_docx.py"], "/national_forecast_center/admin.py": ["/national_forecast_center/models/documents.py"], "/security/forms.py": ["/security/models.py"], "/national_forecast_center/models/documents.py": ["/security/models.py"], "/security/views.py": ["/security/models.py"], "/national_forecast_center/forms.py": ["/national_forecast_center/models/documents.py", "/security/models.py"], "/security/admin.py": ["/security/models.py"], "/national_forecast_center/process_docx.py": ["/national_forecast_center/models/documents.py", "/security/models.py"]} |
78,081 | vovka643/remember_it | refs/heads/master | /main.py | from MyBot import MyBot
from Translator import Translator
from User import User
from DataBase import DataBase
import const
import datetime
import traceback
import pymongo
def get_user(users, id):
#find user
for usr in users:
if usr.id == id:
return usr
#create new user
user = User(id)
users.append(user)
return user
def help_handler(user_id, bot):
text = 'Этот бот помогает запоминать английские слова, продбирая к ним словосочетание и повторяя их тебе. Просто введи незнакомое английское слово, остальное сделает этот бот. Переводчик - yandex translator'
bot.send_message(user_id, text)
translator = Translator()
bot_token = const.bot_token #justReadBooktmp2_bot
bot = MyBot(bot_token)
#add Data Base
db = DataBase()
#full list of users from data base
users = db.get_all_users()
upd_offset = 0
special_i = 0
while special_i < 5:
special_i = special_i + 1
try:
updates = bot.get_updates(offset=upd_offset)
while (len(updates)==0):
updates = bot.get_updates(offset=upd_offset)
update = updates[-1]
last_message_id = update.message.message_id
upd_offset = update.update_id - 1
#my fucking dispatcher with black jack and whores
while True:
updates = bot.get_updates(offset=upd_offset)
i = -1
if (len(updates)>0):
mes_id = updates[i].message.message_id
if updates[i].message:
while ((mes_id != last_message_id) and (-i <= len(updates))):
user_id = updates[i].message.from_user.id
user = get_user(users, user_id)
mes = updates[i].message.text
if (mes == '/help'):
help_handler(user.id, bot)
else:
user.answer(mes, translator, bot)
#todo: logging
db.update(user)
i = i-1
if (-i <= len(updates)):
mes_id = updates[i].message.message_id
last_message_id = updates[-1].message.message_id
upd_offset = updates[-1].update_id - 1
now = round((datetime.datetime.now() - datetime.datetime(1970,1,1)).total_seconds())
for us in users:
if now >= us.next_word['next_time'] and (not us.qflag) :
us.send_question(bot)
#todo: logging
db.update(user)
except Exception as e:
file = open('error.log','w')
file.write('FATAL ERROR\n')
# file.write(date_str())
file.write(traceback.format_exc())
#print(traceback.format_exc())
file.write('end of error')
file.close() | {"/main.py": ["/MyBot.py", "/Translator.py", "/User.py", "/DataBase.py"], "/DataBase.py": ["/User.py"]} |
78,082 | vovka643/remember_it | refs/heads/master | /User.py | import numpy as np
import datetime
class User:
def __init__(self, id = 0):
self.intervals = [10, 600, 18000, 86400, 432000, 2160000, 3153600000] # in seconds
# self.intervals = [60, 60, 60, 60, 60, 3153600000] # in seconds
self.schedule = [{'word':'Hello', 'next_time': 3153600000+self.get_now(), 'interval_number':-1,'pair_transl':'Hello', 'pair': 'Hello there'} ] # 3153600000 seconds in 100 years
self.next_word = self.schedule[0]
self.id = id
self.history = {}
self.qflag = False #question flag
self.correct_answer = -1
self.current_answers = []
def process_word (self, message_text, translator, bot): # change name of function
bot.send_message(self.id, message_text + ' - ' + translator.translate(message_text))
#todo if message text in one word
word = message_text
#get pair
pair = translator.get_phrase(message_text)
pair_transl = translator.translate(pair)
bot.send_message(self.id, pair + ' - ' + pair_transl)
self.add_to_history(word, pair, pair_transl)
self.add_to_schedule(word, pair, pair_transl)
pass
def answer(self, message_text, translator, bot): # change name of function
# пусть сюда прлетает сообщения, которые только для пользоветля, без служебных
if self.qflag:
if (message_text in set(self.current_answers)):
#check it with correct answer
if (message_text == self.current_answers[self.correct_answer]):
bot.send_message(self.id, 'Correct!')
self.change_current_interval(1)
else:
bot.send_message(self.id, self.next_word['pair'] + ' - ' + self.next_word['pair_transl'])
self.change_current_interval(-1)
self.qflag = False
self.set_next_word()
else:
self.process_word(message_text, translator, bot)
self.send_question(bot)
else:
self.process_word(message_text, translator, bot)
def send_question(self, bot):
random_pairs = self.get_random_pairs()
answers = [rp[2] for rp in random_pairs]
self.qflag = True
self.increase_word_interval()
self.correct_answer = np.random.randint(3)
answers.insert(self.correct_answer, self.next_word['pair_transl'])
self.current_answers = answers
bot.send_message(self.id, self.next_word['pair'], answers)
def get_random_pairs(self):
result = []
if len(self.history) > 3:
words = list(self.history.keys())
for i in range(3):
result.append(self.history[words[np.random.randint(len(self.history))]][0])
else:
result = [(0, 'green house', 'зеленый дом'),
(0, 'white snow', 'белый снег'),
(0, 'long snake', 'длинная змея')]
return result
def add_to_history(self, word, pair, pair_transl):
now = self.get_now()
if word in self.history.keys():
self.history[word].append((now, pair, pair_transl))
else:
self.history[word] = [(now, pair, pair_transl)]
def change_current_interval(self, upper):
interval_number = self.next_word['interval_number']
self.intervals[interval_number] = self.intervals[interval_number]*(1+upper*0.05)
def set_next_word(self):
next_word = self.schedule[0]
for word in self.schedule:
if (next_word['next_time'] > word['next_time']):
next_word = word
self.next_word = next_word
def add_to_schedule(self, word, pair, pair_transl):
self.schedule.append({'word':word, 'pair':pair, 'pair_transl':pair_transl, 'interval_number':0, 'next_time': self.get_now()+self.intervals[0]})
self.set_next_word()
def increase_word_interval(self):
if (self.next_word['interval_number'] < len(self.intervals)-1):
self.next_word['interval_number'] += 1
self.next_word['next_time'] = self.get_now() + self.intervals[self.next_word['interval_number']]
#self.set_current_word()
else:
#self.schedule.remove(self.next_word)
pass
def get_now(self):
return round((datetime.datetime.now() - datetime.datetime(1970,1,1)).total_seconds())
| {"/main.py": ["/MyBot.py", "/Translator.py", "/User.py", "/DataBase.py"], "/DataBase.py": ["/User.py"]} |
78,083 | vovka643/remember_it | refs/heads/master | /DataBase.py | import pymongo
from User import User
class DataBase:
def __init__(self):
self.client = pymongo.MongoClient()
self.db = self.client.jrdb #database
self.users_db = self.db.users #collections for users
def get_all_users(self):
users = []
for u in self.users_db.find():
user = User()
user.intervals = u['intervals']
user.schedule = u['schedule']
user.next_word = u['next_word']
user.id = u['id']
user.history = u['history']
user.qflag = u['qflag']
user.correct_answer = u['correct_answer']
user.current_answers = u['current_answers']
users.append(user)
return users
def update(self, user):
voc = {
'intervals': user.intervals,
'schedule':user.schedule,
'next_word':user.next_word,
'id':user.id,
'history':user.history,
'qflag':user.qflag,
'correct_answer':user.correct_answer,
'current_answers':user.current_answers
}
self.users_db.update_one({"id": user.id}, {"$set": voc}, upsert = True)
| {"/main.py": ["/MyBot.py", "/Translator.py", "/User.py", "/DataBase.py"], "/DataBase.py": ["/User.py"]} |
78,084 | vovka643/remember_it | refs/heads/master | /MyBot.py | import telebot
class MyBot:
def __init__(self, token):
self.bot = telebot.TeleBot(token)
def send_message(self, user_id, message, answers = None):
user_markup = telebot.types.ReplyKeyboardMarkup(True, False, row_width=1)
if (answers != None):
for i in range(len(answers) // 2):
user_markup.row(answers[i*2],answers[i*2 + 1] )
if (len(answers) % 2 > 0):
user_markup.add(answers[-1])
else:
user_markup = telebot.types.ReplyKeyboardRemove() # hideBoard
self.bot.send_message(user_id, message, reply_markup=user_markup)
def get_updates(self, offset = -1):
updates = []
try:
updates = self.bot.get_updates(offset=offset)
except Exception as e:
file = open('error.log','w')
file.write('error in MyBot.get_updates\n')
# file.write(date_str())
file.write(traceback.format_exc())
file.write('end of error')
file.close()
return updates | {"/main.py": ["/MyBot.py", "/Translator.py", "/User.py", "/DataBase.py"], "/DataBase.py": ["/User.py"]} |
78,085 | vovka643/remember_it | refs/heads/master | /Translator.py | from yandex_translate import YandexTranslate
import json
import numpy as np
import const
class Translator:
def __init__(self):
yandex_token = const.yandex_token
self.trans = YandexTranslate(yandex_token)
with open('phrases.txt') as json_file:
self.dictionary = json.load(json_file)
def translate(self, phrase):
return self.trans.translate(phrase, 'ru')['text'][0].rstrip()
def get_phrase(self, word):
if (word in set (self.dictionary.keys())):
phrases = self.dictionary[word]
phrase = phrases[np.random.randint(len(phrases))]
else:
phrase = word
return phrase | {"/main.py": ["/MyBot.py", "/Translator.py", "/User.py", "/DataBase.py"], "/DataBase.py": ["/User.py"]} |
78,089 | Sirzhangsheng/Taobao | refs/heads/master | /gtyfg.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import base64
password = '021794'
print(base64.b64encode(bytes(password.encode('utf8')))) | {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,090 | Sirzhangsheng/Taobao | refs/heads/master | /test2.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import re
import requests
import time
import base64
import copy
import random
import datetime
class YiDong(object):
def __init__(self):
self.session = requests.session()
self.detail_dict = {
'01': '套餐及固定费',
'02': '通话详单',
'03': '短信和彩信详单',
'04': '上网详单',
'05': '增值业务详单',
'06': '代收业务详单',
'07': '其他',
}
def login(self):
url2 = 'https://login.10086.cn/genqr.htm'
header = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Host': 'shop.10086.cn',
'Referer': 'Referer: https://login.10086.cn/login.html?\
channelID=12003&backUrl=https://shop.10086.cn/i/?f=home',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome\
/65.0.3325.181 Safari/537.36',
}
header.update({'Host': 'login.10086.cn'})
# 获取二维码图片
res3 = self.session.get(url=url2, verify=False, headers=header)
cookies = res3.headers.get("Set-Cookie")
lgtoken = re.findall(re.compile(r"lgToken=(.*?);"), cookies)
if lgtoken:
lgtoken = lgtoken[0]
# 保存图片
with open('yidong.png', 'wb') as f:
f.write(res3.content)
print('请扫描验证码')
# 二维码轮询
url_check = 'https://login.10086.cn/chkqr.htm'
for i in range(33):
check_response = self.session.post(url=url_check, verify=False, headers=header,
data={"lgToken": lgtoken, 'targetChannelID': '12003',
'backUrl': 'https://shop.10086.cn/i/?f=home'})
code = re.findall(re.compile(r'"resultCode":"(\d+)",'), check_response.text)
try:
if '0000' in code:
print(check_response.text)
print('二维码轮询的cookie{}'.format(self.session.cookies))
artifact1 = re.findall(re.compile(r'"artifact":"(.*?)"'), check_response.text)
if artifact1:
artifact = artifact1[0]
success_url = 'https://shop.10086.cn/i/v1/auth/getArtifact?backUrl=https://shop.10086.cn/i/?f=home&artifact={}'.format(
artifact)
header.update({'Host': 'shop.10086.cn'})
# 验证1
redirect_res0 = self.session.get(url=success_url, headers=header, verify=False,
allow_redirects=False)
redirect_url = redirect_res0.headers['Location']
redirect_res1 = self.session.get(url=redirect_url, headers=header, verify=False)
# 获取个人信息telephone
telephone = self.obtain_telephone(redirect_res1=redirect_res1, header=header)
if not telephone:
break
# 验证查询功能正常不?
acoount_url = 'https://shop.10086.cn/i/v1/res/funcavl?_={}'.format(
int(time.time() * 1000))
acoount_res = self.session.get(url=acoount_url, headers=header, verify=False)
if '成功' in acoount_res.text:
# 第1次账单的身份认证
if '认证成功' in self.auth_user(telephone=telephone, header=header):
# 进行解析
self.parse_detail(telephone=telephone, header=header)
else:
# 第2次账单的身份认证
time.sleep(60)
auth_second = self.auth_user(telephone=telephone, header=header)
if '认证成功' in auth_second:
# 进行解析
self.parse_detail(telephone=telephone, header=header)
else:
print('查询功能不正常')
break
elif '8020' in code:
print(check_response.text + "二维码失效!!!")
break
else:
time.sleep(2)
print('请扫码并确认!!')
except Exception as e:
print("出错{}".format(e))
def auth_image(self, telephone, header):
# 保存图片,输入图片验证码
image_url = 'https://shop.10086.cn/i/authImg'
image_res = self.session.get(url=image_url, headers=header, verify=False)
with open('yanzheng.png', 'wb') as f:
f.write(image_res.content)
yanzheng = input("请输入验证码")
# 图片验证码的检测
preckeck_url = 'https://shop.10086.cn/i/v1/res/precheck/{}?captchaVal={}&_={}' \
.format(telephone, yanzheng, int(time.time() * 1000))
preckeck_res = self.session.get(url=preckeck_url, headers=header, verify=False)
print(preckeck_res.text)
if '输入正确,校验成功' in preckeck_res.text:
print('图片验证码输入正确!!')
return yanzheng
else:
print('校验失败,请重新输入图片验证码!!')
self.auth_image(telephone=telephone, header=header)
def send_message(self, telephone, header):
duanxin_url = 'https://shop.10086.cn/i/v1/fee/detbillrandomcodejsonp/{}?_={}'
duanxin_res = self.session.get(url=duanxin_url.format(
telephone, int(time.time() * 1000)), headers=header, verify=False)
# 发送短信正常?
if 'success' in duanxin_res.text:
print('发送成功!')
duanxin = input("请输入短信验证码")
return duanxin
if '次数过多' in duanxin_res.text:
print('单位时间内下发短信次数过多,请稍后再使用!')
time.sleep(60)
self.send_message(telephone=telephone, header=header)
else:
print('发送短信失败,正在重新发送!')
time.sleep(60)
self.send_message(telephone=telephone, header=header)
def obtain_telephone(self, redirect_res1, header):
# 获取个人信息及return telephone
referer = redirect_res1.url
header.update({'Referer': referer})
ur4 = 'https://shop.10086.cn/i/v1/auth/loginfo?_={}'.format(int(time.time() * 1000))
successauth_response1 = self.session.get(url=ur4, headers=header, verify=False)
print("成功验证???{}".format(successauth_response1.text))
if 'loginValue' in successauth_response1.text:
telephone = re.findall(re.compile(r'"loginValue":"(\d+)",'), successauth_response1.text)
if telephone:
telephone = telephone[0]
print('這是用户手机号{}'.format(telephone))
if_url = 'https://shop.10086.cn/i/v1/cust/mergecust/{}?_={}'.format(
telephone, int(time.time() * 1000))
inf0_res = self.session.get(url=if_url, headers=header, verify=False)
print('這是用户信息{}'.format(inf0_res.text))
return telephone
else:
print('手机号获取失败!2')
telephone = ''
return telephone
else:
print('手机号获取失败!1')
telephone = ''
return telephone
def auth_user(self, telephone, header):
# 输入服务密码
password = input("请输入服务密码:")
# 输入图片验证码
yanzheng = self.auth_image(telephone=telephone, header=header)
# 输入短信验证码
duanxin = self.send_message(telephone=telephone, header=header)
# 对服务密码和短信验证码base64加密
pwdtempsercode = base64.b64encode(bytes(password.encode('utf8')))
pwdtemprandcode = base64.b64encode(bytes(duanxin.encode('utf8')))
# 账单的身份认证
zhangdan_url = 'https://shop.10086.cn/i/v1/fee/detailbilltempidentjsonp/{}?pwdTempSerCode={}&pwdTempRandCode={}&captchaVal={}&_={}'
zhangdan_res = self.session.get(url=zhangdan_url.format(
telephone, pwdtempsercode.decode('utf-8'), pwdtemprandcode.decode('utf-8'),
yanzheng, int(time.time() * 1000)), headers=header, verify=False)
print(zhangdan_res.text)
if '认证成功' in zhangdan_res.text:
print('认证成功!')
return '认证成功!'
else:
print('认证失败!')
return '认证失败!'
def parse_detail(self, telephone, header):
# param bill_type: 01表示套餐及固定费,02表示通话详单,03短信/彩信详单,04上网详情,05表示增值业务详单,06表示代收业务详单,07表示其他
# 进行解析
for x in range(1, 8):
bill_type = '0' + str(x)
time.sleep(random.randint(2, 3))
tem1 = int(time.strftime('%Y%m'))
detail_time = copy.deepcopy(tem1)
# 循环遍历年月
for v in range(6):
zhangdan_url2 = 'https://shop.10086.cn/i/v1/fee/detailbillinfojsonp/{}?curCuror=1&step=1000&qryMonth={}&billType={}&_={}'
zhangdan_res2 = self.session.get(url=zhangdan_url2.format(
telephone, detail_time, bill_type, int(time.time() * 1000)),
headers=header, verify=False)
time.sleep(random.randint(2, 3))
print('用户的{}月{}信息{}'.format(
detail_time, self.detail_dict.get(bill_type), zhangdan_res2.text))
un_date = (datetime.datetime.now() + datetime.timedelta(days=-365)).date()
start_date = un_date.strftime('%Y%m%d')
end_date = datetime.datetime.now().strftime('%Y%m%d')
detail_time -= 1
# 访问过于频繁出现的账单身份认证
if '临时身份凭证不存在' in zhangdan_res2.text:
if '认证成功' in self.auth_user(telephone=telephone, header=header):
zhangdan_res2 = self.session.get(url=zhangdan_url2.format(
telephone, detail_time, bill_type, int(time.time() * 1000)),
headers=header, verify=False)
if '临时身份凭证不存在' in zhangdan_res2.text:
print(zhangdan_res2.text)
print('爬虫失败!!!!!!')
break
else:
break
# 用户缴费记录
if x == 4:
pay_record = self.session.get(
url='https://shop.10086.cn/i/v1/cust/his/15934117585?startTime={}&endTime={}&_={}'.format(
start_date, end_date, int(time.time() * 1000)
))
print('用户缴费记录{}'.format(pay_record.text))
if __name__ == '__main__':
yi_dong = YiDong()
yi_dong.login()
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,091 | Sirzhangsheng/Taobao | refs/heads/master | /TaobaoSpider/models.py | import datetime
from sqlalchemy import Column, String, create_engine, Integer, DateTime, TEXT
from sqlalchemy.ext.declarative import declarative_base
from TaobaoSpider.settings import db_host, db_user, db_pawd, db_name, db_port
# 创建对象的基类:
Base = declarative_base()
# 淘宝订单
class TbOrderModel(Base):
# 表的名字:
__tablename__ = 'taobaov1_tborder'
# 表的结构:
id = Column(Integer, primary_key=True)
token = Column(String(64), default='')
orderId = Column(String(200), )
orderTime = Column(String(200), )
orderAmt = Column(String(200), )
orderStatus = Column(String(200), )
deliverType = Column(String(200), )
deliverCompany = Column(String(200), )
deliverNo = Column(String(200), )
consignee = Column(String(200), )
consigneeMobile = Column(String(200), )
consigneeAddress = Column(String(200), )
add_time = Column(DateTime, default=datetime.datetime.now)
# 用户基本信息表
class TbBsinfoModel(Base):
# 表的名字:
__tablename__ = 'taobaov1_tbbasicinfo'
# 表的结构:
id = Column(Integer, primary_key=True)
token = Column(String(64), default='')
username = Column(String(300), )
nickName = Column(String(300), )
gender = Column(String(300), )
birthday = Column(String(300), )
name = Column(String(300), )
identityNo = Column(String(300), )
identityChannel = Column(String(300), )
email = Column(String(300), )
mobile = Column(String(300), )
vipLevel = Column(String(300), )
growthValue = Column(String(300), )
creditPoint = Column(String(300), )
favorableRate = Column(String(300), )
securityLevel = Column(String(300), )
add_time = Column(DateTime, default=datetime.datetime.now)
# 收货地址表
class TbAddressModel(Base):
# 表的名字:
__tablename__ = 'taobaov1_tbaddresses'
# 表的结构:
id = Column(Integer, primary_key=True)
token = Column(String(300), default='')
name = Column(String(300), )
address = Column(String(300), )
mobile = Column(String(300), )
zipCode = Column(String(300), )
isDefault = Column(String(300), )
add_time = Column(DateTime, default=datetime.datetime.now)
# 商品信息表
class TbGoodsModel(Base):
# 表的名字:
__tablename__ = 'taobaov1_tbitem'
# 表的结构:
id = Column(Integer, primary_key=True)
token = Column(String(300), default='')
itemId = Column(String(300), )
itemName = Column(String(300), )
itemUrl = Column(String(300), )
itemPrice = Column(String(300), )
itemQuantity = Column(String(300), )
orderId = Column(String(300), )
add_time = Column(DateTime, default=datetime.datetime.now)
# 淘宝登陆信息表
class TbLoginModel(Base):
# 表的名字:
__tablename__ = 'taobaov1_tblogin'
# 表的结构:
id = Column(Integer, primary_key=True)
token = Column(String(300), default='')
username = Column(String(300), )
password = Column(String(300), )
identityNo = Column(String(300), )
name = Column(String(300), )
uid = Column(String(300), )
accessType = Column(String(300), )
loginType = Column(String(300), )
cookie = Column(TEXT, )
login_state = Column(String(300), )
crawl_status = Column(String(300), )
create_data = Column(String(300), )
target_crawl = Column(String(300), )
msg_code = Column(String(300), )
image_base64 = Column(TEXT, )
image_save_time = Column(Integer, )
add_time = Column(DateTime, )
# 日志信息模块
class TbLogModel(Base):
# 表的名字:
__tablename__ = "taobaov1_tblog"
# 表的结构:
id = Column(Integer, unique=True, primary_key=True)
uid = Column(String(255), )
token = Column(String(255), )
file_name = Column(String(255), )
line_no = Column(String(255), )
message = Column(TEXT, )
log_time = Column(DateTime, )
if __name__ == "__main__":
engine = create_engine('mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8'
.format(db_user, db_pawd, db_host, db_port, db_name), max_overflow=500)
Base.metadata.create_all(engine)
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,092 | Sirzhangsheng/Taobao | refs/heads/master | /TaobaoSpider/items.py | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TaobaospiderItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
# 用户基本信息
class BasicinfoItem(scrapy.Item):
token = scrapy.Field()
username = scrapy.Field()
nickName = scrapy.Field()
gender = scrapy.Field()
birthday = scrapy.Field()
name = scrapy.Field()
identity_no = scrapy.Field()
identity_channel = scrapy.Field()
email = scrapy.Field()
mobile = scrapy.Field()
vip_level = scrapy.Field()
growth_value = scrapy.Field()
credit_point = scrapy.Field()
favorable_rate = scrapy.Field()
security_level = scrapy.Field()
# 收货地址信息
class AddressItem(scrapy.Item):
token = scrapy.Field()
name = scrapy.Field()
address = scrapy.Field()
mobile = scrapy.Field()
zipCode = scrapy.Field()
isDefault = scrapy.Field()
# 订单信息
class OrdersItem(scrapy.Item):
token = scrapy.Field()
order_id = scrapy.Field()
order_createtime = scrapy.Field()
order_rmb = scrapy.Field()
order_status = scrapy.Field()
deliver_type = scrapy.Field()
deliver_company = scrapy.Field()
deliver_no = scrapy.Field()
consignee = scrapy.Field()
consignee_mobile = scrapy.Field()
consignee_address = scrapy.Field()
# 商品信息
class GoodsItem(scrapy.Item):
token = scrapy.Field()
goods_id = scrapy.Field()
goods_name = scrapy.Field()
goods_url = scrapy.Field()
goods_price = scrapy.Field()
goods_nums = scrapy.Field()
order_id = scrapy.Field()
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,093 | Sirzhangsheng/Taobao | refs/heads/master | /TaobaoSpider/use_proxy.py | import requests
import json
import redis
from .settings import REDIS_HOST, REDIS_PORT, REDIS_DB_PROXY, REDIS_DB_KEY, USE_PROXY
class ProxyPoolInfo(object):
def __init__(self):
pool = redis.ConnectionPool(host=REDIS_HOST, port=REDIS_PORT, db=REDIS_DB_PROXY, )
self.redis_conn = redis.Redis(connection_pool=pool,)
# 获取代理IP
def create_proxy(self):
while True:
if USE_PROXY:
# 判断代理池中是否有可用IP
redis_ip = self.redis_conn.scard(REDIS_DB_KEY) # 获取代理池中代理的长度
if redis_ip:
# 从代理池随机取IP并返回
proxy_ip_port = self.redis_conn.spop(REDIS_DB_KEY).decode('utf-8') # 获取任意一个元素
else:
proxy_url = "http://api.xdaili.cn/xdaili-api/greatRecharge/getGreatIp" \
"?spiderId=d882f2dedc1741e087d228c208060a36" \
"&orderno=YZ20181087213QEydHG" \
"&returnType=2" \
"&count=1"
proxy_resp = requests.get(proxy_url)
print("代理ip:{}".format(proxy_resp.text))
procy_text = json.loads(proxy_resp.text)
proxy_ip = procy_text["RESULT"][0]["ip"]
proxy_port = procy_text["RESULT"][0]["port"]
proxy_ip_port = proxy_ip + ":" + proxy_port
# 将取到的IP放入到Redis池
self.redis_conn.sadd(REDIS_DB_KEY, proxy_ip_port) # 新获取到的代理放入代理池
else:
proxy_ip_port = ""
return proxy_ip_port
# 移除不可用代理IP
def remove_proxy(self, proxy):
self.redis_conn.srem(REDIS_DB_KEY, proxy)
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,094 | Sirzhangsheng/Taobao | refs/heads/master | /TaobaoSpider/pipelines.py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import time
import datetime
import logging
from .items import OrdersItem, BasicinfoItem, GoodsItem, AddressItem
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from TaobaoSpider.models import TbOrderModel, TbBsinfoModel, TbGoodsModel, TbAddressModel, TbLoginModel, TbLogModel
from TaobaoSpider.settings import db_host, db_user, db_pawd, db_name, db_port
# 创建对象的基类:
Base = declarative_base()
# 淘宝pipeline
class TaobaospiderPipeline(object):
def __init__(self): # '数据库类型+数据库驱动名称://用户名:口令@机器地址:端口号/数据库名'
engine = create_engine('mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'
.format(db_user, db_pawd, db_host, db_port, db_name), max_overflow=500)
# 创建DBSession类型:
db_session = sessionmaker(bind=engine)
self.session = db_session()
def process_item(self, item, spider):
if isinstance(item, OrdersItem):
info = TbOrderModel(
token=item['token'],
orderId=item['order_id'],
orderTime=item['order_createtime'],
orderAmt=item['order_rmb'],
orderStatus=item['order_status'],
deliverType=item['deliver_type'],
deliverCompany=item['deliver_company'],
deliverNo=item['deliver_no'],
consignee=item['consignee'],
consigneeMobile=item['consignee_mobile'],
consigneeAddress=item['consignee_address'],
add_time=datetime.datetime.now(),
)
# self.order_nums = self.order_nums + 1
# logging.info('用户{}订单数{}'.format(item['token'], self.order_nums))
elif isinstance(item, BasicinfoItem):
info = TbBsinfoModel(
token=item['token'],
username=item['username'],
nickName=item['nickName'],
gender=item['gender'],
birthday=item['birthday'],
name=item['name'],
identityNo=item['identity_no'],
identityChannel=item['identity_channel'],
email=item['email'],
mobile=item['mobile'],
vipLevel=item['vip_level'],
growthValue=item['growth_value'],
creditPoint=item['credit_point'],
favorableRate=item['favorable_rate'],
securityLevel=item['security_level'],
add_time=datetime.datetime.now()
)
elif isinstance(item, GoodsItem):
info = TbGoodsModel(
token=item['token'],
itemId=item['goods_id'],
itemName=item['goods_name'],
itemUrl=item['goods_url'],
itemPrice=item['goods_price'],
itemQuantity=item['goods_nums'],
orderId=item['order_id'],
add_time=datetime.datetime.now()
)
elif isinstance(item, AddressItem):
info = TbAddressModel(
token=item['token'],
name=item['name'],
address=item['address'],
mobile=item['mobile'],
zipCode=item['zipCode'],
isDefault=item['isDefault'],
add_time=datetime.datetime.now()
)
else:
info = ''
logging.info('数据yield失败')
try:
self.session.add(info)
self.session.commit()
except Exception as e:
logging.error("[UUU] 淘宝插入数据异常 Error :{}".format(e))
self.session.rollback()
return item
# 更改登陆状态
'''
crawl_state 爬虫工作状态。0:登陆成功 1:登陆失败 2:登陆等待中 -1:队列等待登陆
:param token:
'''
def change_login_state(self, token, login_state):
try:
self.session.query(TbLoginModel).filter(TbLoginModel.token == token).update(
{TbLoginModel.login_state: login_state})
self.session.commit()
except Exception as e:
self.session.rollback()
logging.error('更改登陆状态失败:{}'.format(e))
# 更改爬虫状态
'''
crawl_state 爬虫工作状态。0:未爬过的用户 1:正在爬取数据 2:数据爬取结束且成功返回 -1:爬取失败
:param token:
'''
def change_crawl_status(self, token, crawl_status):
try:
self.session.query(TbLoginModel).filter(TbLoginModel.token == token).update(
{TbLoginModel.crawl_status: crawl_status})
self.session.commit()
except Exception as e:
self.session.rollback()
logging.error('更改爬虫状态失败:{}'.format(e))
# 插入图片
def insert_image_base64(self, token, image_base64):
try:
self.session.query(TbLoginModel).filter(TbLoginModel.token == token).update(
{TbLoginModel.image_base64: image_base64, TbLoginModel.image_save_time: int(time.time())})
self.session.commit()
except Exception as e:
self.session.rollback()
logging.error('图片插入失败:{}'.format(e))
# 查询要爬的用户,并返回用户名和密码
def select_crawl_user(self, token):
try:
result = self.session.query(TbLoginModel).filter(TbLoginModel.token == token, ).first()
if result:
user_id = result.id
uid = result.uid
username = result.username
crawl_status = result.crawl_status
return user_id, username, crawl_status, uid
else:
logging.info('没有结果')
return None
except Exception as e:
logging.error("数据库查询异常:{}".format(e))
# 将异常日志存入数据库中
def insert_log(self, uid, token, file_name, line_no, message):
try:
adds = TbLogModel(
uid=uid,
token=token,
file_name=file_name,
line_no=line_no,
message=message,
log_time=datetime.datetime.now()
)
self.session.add(adds)
self.session.commit()
except Exception as e:
self.session.rollback()
logging.info("将日志存入数据库中异常:{}".format(e))
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,095 | Sirzhangsheng/Taobao | refs/heads/master | /test.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
from selenium import webdriver
import time
import copy
import re
import requests
import datetime
def get_cookie():
# 构造所需的cookie信息俩个:cookies_success和cookies
# driver = webdriver.PhantomJS()
# driver.implicitly_wait(120)
# driver.get('https://login.taobao.com/member/login.jhtml')
# time.sleep(2)
# if 'login-box no-longlogin module-quick' in driver.page_source:
# umid_token = re.findall(re.compile(r';umid_token=(.*?);'), driver.page_source)
# else:
# driver.find_element_by_id('J_Static2Quick').click()
# # 必须为2秒!!
# time.sleep(2)
# umid_token = re.findall(re.compile(r'&umid_token=(.*?)&'), driver.page_source)
# if len(umid_token) > 0:
# print(umid_token[0].replace('&', ''))
# 构造所需的cookie信息俩个:cookies_success和cookies
driver = webdriver.PhantomJS()
driver.implicitly_wait(120)
driver.get('https://login.taobao.com/member/login.jhtml')
cookies_success = dict()
for cook in driver.get_cookies():
cookies_success[cook["name"]] = cook["value"] # cookies_success为最后一步登陆所用的cookie
cookies = copy.deepcopy(cookies_success) # cookies为后面登陆所用的通用cookie
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
}
response = requests.get(url='https://login.taobao.com/member/login.jhtml', headers=headers)
print(response.cookies)
try:
print('cookies_success里面的数据为{}'.format(cookies_success))
cookies.pop('_uab_collina')
cookies.pop('cookieCheck')
cookies.pop('um')
cookies.pop('_umdata')
cookies.pop('isg')
except:
pass
print('cookies里面的数据为{}'.format(cookies))
# if 'login-box no-longlogin module-quick' in driver.page_source:
# driver.find_element_by_id('J_Quick2Static').click()
# url1 = re.findall(re.compile(r'<script src="(.*?)" async=""></script>'), driver.page_source)
# else:
# # 必须为2秒!!
# time.sleep(2)
# url1 = re.findall(re.compile(r'<script src="(.*?)" async=""></script>'), driver.page_source)
# if len(url1) > 0:
# url = url1[-3].replace(';', '&')
# umid_token = re.findall(re.compile(r'&umid_token=(.*?)&'), url)
# if umid_token:
# umid_token = umid_token[0]
# print(umid_token)
# else:
# self.logger.error("获取umid_token重要数据出错!")
# else:
# url = ''
# umid_token = ""
# self.logger.error("获取产生二维码地址出错!")
if __name__ == '__main__':
get_cookie()
print('C{}{}'.format(int(time.time() * 100000000000000), int(time.time() * 1000000)))
print(int(time.time() * 1000000))
# def lian_tong():
# url = 'http://uac.10010.com/oauth2/genqr?timestamp={}'.format(int(time.time() * 1000))
# headers = {
# 'Accept': 'image/webp,image/apng,image/*,*/*;q=0.8',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
# }
# headers2 = {
#
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
# }
# response = requests.get(url=url, headers=headers)
# unisecid = response.cookies.get('unisecid')
# print(unisecid)
# with open('liantong.png', 'wb') as f:
# f.write(response.content)
# print('请扫描验证码')
# print(response.status_code)
# url_check = 'http://uac.10010.com/qrcode/qrcode_hbt?secsnid={}&_={}'
# print(url_check)
#
# for i in range(200):
# response2 = requests.get(url=url_check.format(unisecid, int(time.time() * 1000)), headers=headers2)
# try:
# code = "".join(re.findall(re.compile(r'resultcode":"(\d+)"'), response2.text))
# print(response2.text)
# if '00' in code:
# time.sleep(2)
# print(code)
#
# elif '10' in code:
# time.sleep(2)
# print(code + "等待用户确认.......")
# elif '11' in code:
# pass
# else:
# print("check二维码出现错误或者失效")
# break
#
# except Exception as e:
# print("出错{}".format(e))
# def yi_dong():
# url2 = 'http://login.10086.cn/genqr.htm'
# header = {
# 'Accept': '*/*',
# 'Accept-Encoding': 'gzip, deflate, br',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Connection': 'keep-alive',
# 'Host': 'shop.10086.cn',
# 'Referer': 'https://login.10086.cn/login.html?channelID=12034&backUrl=http%3A%2F%2Fwww.10086.cn%2Findex%2Fsx%2Findex_351_354.html',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
# }
# session = requests.session()
# header.update({'Host': 'login.10086.cn'})
# # 获取二维码图片
# res3 = session.get(url=url2, headers=header)
# cookies = res3.headers.get("Set-Cookie")
# lgToken = re.findall(re.compile(r"lgToken=(.*?);"), cookies)
# if lgToken:
# lgToken = lgToken[0]
#
# # 保存图片
# with open('yidong.png', 'wb') as f:
# f.write(res3.content)
# print('请扫描验证码')
#
# # 二维码轮询
# url_check = 'https://login.10086.cn/chkqr.htm'
# for i in range(33):
# response1 = session.post(url=url_check, headers=header,
# data={"lgToken": lgToken,
# 'targetChannelID': '12034',
# 'backUrl': 'http%3A%2F%2Fwww.10086.cn%2Findex%2Fsx%2Findex_351_354.html'})
#
# code = re.findall(re.compile(r'"resultCode":"(\d+)",'), response1.text)
# try:
# if '0000' in code:
# print(response1.text)
# print('二维码轮询的cookie{}'.format(session.cookies))
# artifact1 = re.findall(re.compile(r'"artifact":"(.*?)"'), response1.text)
# if artifact1:
# artifact = artifact1[0]
# success_url = 'http://www1.10086.cn/web-Center/authCenter/receiveArtifact.do?backUrl=http%3A%2F%2Fwww.10086.cn%2Findex%2Fsx%2Findex_351_354.html&artifact={}'.format(
# artifact)
# header.update({'Host': 'www1.10086.cn'})
# # 验证1
# redirect_res0 = session.get(url=success_url, headers=header, allow_redirects=False)
# redirect_url = redirect_res0.headers['Location']
# header.update({'Referer': ''})
# redirect_res1 = session.get(url=redirect_url, headers=header, )
# # redirect_res1.encoding = 'utf-8'
# # print(redirect_res1.text)
# # print(session.cookies)
# # 验证2
# header.update({'Host': 'login.10086.cn'})
# header.update({'Referer': 'https://shop.10086.cn/i/?f=home'})
#
# ur3 = 'https://login.10086.cn/SSOCheck.action?channelID=12034&backUrl=https://shop.10086.cn/i/?f=home'
# auth_response1 = session.get(url=ur3, headers=header, verify=False, allow_redirects=False)
# print(auth_response1.text)
# artifact2 = re.findall(re.compile(r'artifact=(.*?)&'), auth_response1.text)
# if artifact2:
# header.update({'Host': 'shop.10086.cn'})
# redirect_url2 = 'https://shop.10086.cn/i/v1/auth/getArtifact?artifact={}&backUrl=https%3A%2F%2Fshop.10086.cn%2Fi%2F%3Ff%3Dhome'.format(
# artifact2[0])
# redirect_res2 = session.get(url=redirect_url2, headers=header, verify=False)
#
# # 成功验证??
# Referer = redirect_res2.url
# header.update({'Referer': Referer})
# header.update({'Host': 'shop.10086.cn'})
# print(header)
# ur4 = 'https://shop.10086.cn/i/v1/auth/loginfo?_={}'.format(int(time.time() * 1000))
# successauth_response1 = session.get(url=ur4, headers=header, verify=False)
# print("成功验证???{}".format(successauth_response1.text))
#
# # 获取个人信息
#
# break
# elif '8020' in code:
# print(response1.text + "二维码失效!!!")
# break
# else:
# time.sleep(2)
# print('请扫码并确认!!')
# except Exception as e:
# print("出错{}".format(e))
#
#
# if __name__ == '__main__':
# yi_dong()
# print(str(datetime.datetime.now().date()).replace('-', ''))
# print(time.strftime('%Y%m'))
# print(int(time.strftime('%Y%m'))-2)
# time1 = int(time.strftime('%Y%m'))
# tem = copy.deepcopy(time1)
# for i in range(5):
# tem = tem - 1
# print(tem)
# zhangdan_url2 = 'https://shop.10086.cn/i/v1/fee/detailbillinfojsonp/{}?\
# curCuror=1&step=100&qryMonth={}&billType=01&_={}'
# print(zhangdan_url2.format(123, int(time.strftime('%Y%m')), int(time.time() * 1000)))
# detail_time = 201810
# for v in range(6):
# zhangdan_url2 = 'https://shop.10086.cn/i/v1/fee/detailbillinfojsonp/{}?curCuror=1&step=1000&qryMonth={}&billType={}&_={}'
# url = zhangdan_url2.format(
# 1, 1, detail_time, int(time.time() * 1000)),
# detail_time -= 1
# print(url)
# a = """null({"data":null,"retCode":"520001","retMsg":"临时身份凭证不存在。","sOperTime":null})"""
# print('临时身份凭证不存在' in a)
# print(int(time.strftime('%Y%m%d')))
# print(int(time.strftime('%Y%m%d')))
# t=(datetime.datetime.now() + datetime.timedelta(days=-365)).date()
# print(t.strftime('%Y%m%d'))
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,096 | Sirzhangsheng/Taobao | refs/heads/master | /redis_push.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import redis
def main():
r = redis.Redis(host='127.0.0.1', port=6379, db=0)
# 提取1到100页的url
r.lpush("taobao", 'jianjian')
if __name__ == '__main__':
main()
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,097 | Sirzhangsheng/Taobao | refs/heads/master | /TaobaoSpider/settings.py | # -*- coding: utf-8 -*-
# Scrapy settings for TaobaoSpider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
BOT_NAME = 'TaobaoSpider'
SPIDER_MODULES = ['TaobaoSpider.spiders']
NEWSPIDER_MODULE = 'TaobaoSpider.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = 'TaobaoSpider (+http://www.yourdomain.com)'
# 远程调试数据库
# DOWNLOAD_DELAY = 2
db_host = 'rm-bp1582z2vc8ca63txo.mysql.rds.aliyuncs.com'
db_user = 'shengdun'
db_pawd = 'SdPQ_!)@$-1024'
db_name = 'shengdun'
db_port = 3306
# REDIS_HOST = '47.98.205.93'
REDIS_HOST = '127.0.0.1'
REDIS_PORT = 6379
# 代理池相关配置
USE_PROXY = False
REDIS_DB_PROXY = 15 # 代理IP池所使用的DB
REDIS_DB_KEY = "proxy_key" # 代理IP池所使用的 redis key
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# REDIS 相关配置 分布式Reids配置
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
SCHEDULER_PERSIST = True
from scrapy.core.scheduler import Scheduler
from scrapy_redis.scheduler import Scheduler
# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 1
# CONCURRENT_REQUESTS_PER_DOMAIN = 1
# CONCURRENT_REQUESTS_PER_IP = 1
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# The download delay setting will honor only one of:
# Disable cookies (enabled by default)
# COOKIES_ENABLED = True
# HTTPERROR_ALLOWED_CODES = [302, ]
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'TaobaoSpider.middlewares.TaobaospiderSpiderMiddleware': 543,
# }
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'TaobaoSpider.middlewares.TaobaospiderDownloaderMiddleware': 543,
}
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
# }
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'TaobaoSpider.pipelines.TaobaospiderPipeline': 300,
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
| {"/TaobaoSpider/models.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/use_proxy.py": ["/TaobaoSpider/settings.py"], "/TaobaoSpider/pipelines.py": ["/TaobaoSpider/items.py", "/TaobaoSpider/models.py", "/TaobaoSpider/settings.py"]} |
78,098 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /launchers/start_ngrok.py | import time
from pyngrok import ngrok
ssh_tunnel = ngrok.connect(8888, "http")
print(ssh_tunnel.public_url, flush=True)
while True:
time.sleep(10)
| {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,099 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /main.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Training script for the retrieval solution.
"""
import concurrent.futures
import functools
import itertools
import json
import logging
import operator
import os
import queue
import shlex
import socket
import subprocess
import tempfile
import time
from typing import Any, Callable, Dict, List, Optional
from absl import app
from absl import flags
from absl import logging as absl_logging
import colorama
import constants
import numpy as np
from rich import console
from rich import table
import task_specific
import tensor2tensor.utils.adafactor
import tensorflow as tf
import tensorflow.python.distribute.values as values
import tensorflow.python.framework.ops as ops
import tf_utils
import toolz
import transformers
import utils
# assert tf.__version__.strip() == "2.5.0", tf.__version__
os.environ["TOKENIZERS_PARALLELISM"] = "true"
LOGGER = logging.getLogger(__name__)
SCRIPT_DIRECTORY = os.path.realpath(os.path.dirname(__file__))
LOGGER.debug(
"############################################################"
">>>>>>>>>>>>>>> Tensorflow version: %s <<<<<<<<<<<<<<<<"
"############################################################",
str(tf.__version__)
)
################################################################################
# Flag Definitions
################################################################################
FLAGS = flags.FLAGS
# It is now recommended that one uses the return values of DEFINE_* calls
# because they can by pytype-checked and the intellisense/linter can know
# if the wrong variable name is called, contrarily to the FLAGS.* case.
FLAG_ALPHA_MODE = flags.DEFINE_bool(
"alpha_mode",
False,
"",
)
FLAG_APPROACH_TYPE = flags.DEFINE_enum(
"approach_type",
None,
constants.ApproachTypeChoices.choices(),
"Type of approach to use.\n"
)
FLAG_MODEL_KEY = flags.DEFINE_string(
"model_key",
None,
"Hugging Face key associated to the pre-trained model."
)
FLAG_RUN_NAME = flags.DEFINE_string(
"run_name",
None,
"Name of the run. Can be anything."
)
FLAG_OUTPUT_DIR = flags.DEFINE_string(
"output_dir",
None,
"Where to save the results to."
)
FLAG_BATCH_SIZE = flags.DEFINE_integer(
"batch_size",
None,
"Inference batch size."
)
FLAG_BATCH_SPLIT = flags.DEFINE_integer(
"batch_split",
None,
"Used for manual_improved. Sub-batch size."
)
FLAG_TASK = flags.DEFINE_enum(
"task",
constants.TaskChoices.train,
constants.TaskChoices.choices(),
"Whether to train or to evaluate the mode."
)
FLAG_RANDOM_SEED = flags.DEFINE_integer(
"random_seed", 0,
"Random seed used used for the random elements of the script."
)
FLAG_DB_PATH = flags.DEFINE_string(
"db_path",
None,
"Path to the h5 file containing the dataset prepared with query_cacher.py"
)
# TPU Specific Args
FLAG_EXPERIMENTAL_COMPILE = flags.DEFINE_bool(
"experimental_compile",
False,
"Whether to use experimental compile with the train and eval functions."
)
FLAG_DISTRIBUTE_MODE = flags.DEFINE_enum(
"distribute_mode",
constants.DistributeModeChoices.onedevicestrategy,
constants.DistributeModeChoices.choices(),
"What type of infrastructure to use to distribute the work."
)
FLAG_NUM_REPLICAS = flags.DEFINE_integer(
"num_replicas",
1,
"Number of replicas to use fordata parallelism."
)
# Training specific flags
FLAG_MODEL_OUTPUT_PATH = flags.DEFINE_string(
"model_output_path",
None,
"Where to save the model."
)
FLAG_LEARNING_RATE = flags.DEFINE_float(
"learning_rate",
None,
"Learning rate for the optimizer."
)
FLAG_BATCHES_BETWEEN_EVALS = flags.DEFINE_integer(
"batches_between_evals",
5,
"Number of batches between eval passes."
)
FLAG_NUMBER_EVAL_BATCHES = flags.DEFINE_integer(
"number_eval_batches",
1,
"Number of eval batches when doing an eval pass."
)
FLAG_USE_HELPER_WORDS = flags.DEFINE_boolean(
"use_helper_words",
True,
"Whether to add guiding words in the inputs, like `Question:`,"
" `Answer:` and `Context:`. "
)
# Retriever specific flags
FLAG_QUERY_END = flags.DEFINE_integer(
"query_end",
256,
"When querying once, length of the query being taken from the inputs."
)
# FLAG_RETRIEVER_CONFIG_PATH = flags.DEFINE_string(
# "retriever_config_path",
# None,
# "Path to the configuration file for the retrievers."
# )
FLAG_SCANN_CONFIG_PATH = flags.DEFINE_string(
"scann_config_path",
os.path.join(
SCRIPT_DIRECTORY, "configs", "scann_configs", "default_config.json"
),
"Configuration file for the ScaNN MIPS library."
)
FLAG_NUM_RETRIEVALS = flags.DEFINE_integer(
"num_retrievals",
None,
"Number of neighbors to get with each retrieval."
)
FLAG_RETRIEVAL_TEMPERATURE = flags.DEFINE_float(
"retrieval_temperature",
None,
"Temperature to be used with the sampling in the softmax of certain "
"retrievers (just retrievers.FullyCacherRetriever currently)."
)
FLAG_FULLYCACHED_H5_PATH = flags.DEFINE_string(
"fullycached_h5_path",
None,
"Path to the .h5 file to be used by the fully cached retriever."
)
FLAG_RETRIEVAL_BANK_SIZE = flags.DEFINE_integer(
"retrieval_bank_size",
10,
"Number of segments to sample from for the retrievals."
)
# Dataset specific flags
FLAG_DATASET_DEBUG = flags.DEFINE_boolean(
"dataset_debug",
False,
"Whether to enable costly runtime checks for the dataset."
)
FLAG_INPUT_FIXED_SIZE = flags.DEFINE_boolean(
"input_fixed_sized",
True,
"Whether to pad all inputs to the same size.")
FLAG_DATASET_NAME = flags.DEFINE_enum(
"dataset_name",
None,
constants.DatasetNameChoices.choices(),
"Name or TFDS key of the dataset we want"
)
FLAG_USE_SUBSET = flags.DEFINE_bool(
"use_subset",
False,
"Whether to just use a subset of the data."
)
FLAG_SUBSET_SIZE = flags.DEFINE_integer(
"subset_size",
1000,
"If we are using a subset of the data number of samples to use."
)
FLAG_DATASET_TYPE = flags.DEFINE_enum(
"dataset_type",
constants.DatasetTypeChoices.tfr,
constants.DatasetTypeChoices.choices(),
"Use TFR. Used to have more choices."
)
FLAG_QTY_SHUFFLE = flags.DEFINE_integer(
"qty_shuffle",
100,
"Shuffle how many samples every time."
)
FLAG_TFR_PREFIX = flags.DEFINE_string(
"tfr_prefix",
None,
"Prefix of the location of the tf record dataset.",
)
FLAG_MAX_LENGTH_GENERATION = flags.DEFINE_integer(
"max_length_generation",
None,
"Maximum length of the generation."
)
FLAG_SAVE_PERIOD_MIN = flags.DEFINE_integer(
"save-period-min",
20,
"How many minutes to wait between saves."
)
FLAG_TPU_NAME = flags.DEFINE_string(
"tpu-name",
socket.gethostname(),
"Name of the TPU to use."
)
FLAG_OPTIMIZER_TYPE = flags.DEFINE_enum(
"optimizer_type",
None,
constants.OptimizerTypes.choices(),
"Which optimizer to use."
)
FLAG_LOG_SAMPLES = flags.DEFINE_boolean(
"log_samples",
None,
"Whether to log the values of the samples. Very Costly!"
)
FLAG_DO_RESUME = flags.DEFINE_boolean(
"do-resume",
False,
"Whether to resume training from a checkpoint."
)
FLAG_RESUME_PATH = flags.DEFINE_string(
"resume-path",
"",
"From which path to resume from."
)
FLAG_TRAIN_ON_INPUT = flags.DEFINE_boolean(
"train-on-input",
False,
"Whether to also train over the questions and the retrievals."
)
FLAG_TPU_IS_LOCAL = flags.DEFINE_boolean(
"tpu-is-local",
True,
"Whether the TPU is on the same machine as the python interpreter, ie, "
"whether we are using a one-vm machine.",
)
################################################################################
# Training and evaluation step functions.
################################################################################
# With tf.function, one can't pass non-tensor objects. This makes it so all
# non-tensor objects need to be passed through non-local references, making
# the step functions closures. In order to make our code cleaner / make
# dependencies more explicit, we build the closures with builder functions that
# explicitly show each step function's dependencies.
def build_regular_training_step(
model,
optimizer,
strategy,
tf_function_kwargs = None
):
"""Build the training step that is used in all cases but vertical mod. par."""
tf_function_kwargs = {} if tf_function_kwargs is None else tf_function_kwargs
@tf.function(**tf_function_kwargs)
def training_step(input_ids, label_ids):
"""Computes the loss, backpropagates gradients, updates weights."""
losses = []
# According to the TF2 guide, there are advantages to doing multiple
# batches in the same tf.function call
with tf.GradientTape() as tape:
partial_loss = model(
input_ids,
labels=label_ids,
training=True,
return_dict=True).loss
if isinstance(partial_loss, values.PerReplica):
average_loss = strategy.reduce(
tf.distribute.ReduceOp.MEAN, partial_loss, axis=None
)
else:
average_loss = tf.math.reduce_mean(partial_loss)
losses.append(average_loss)
grads = tape.gradient(average_loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
return tf.math.reduce_mean(losses)
return training_step
def build_evaluation_step(
model,
tf_function_kwargs = None,
):
# Can't assign {} to the default value, as assigning mutable values to
# default value is a bad practice, warned against by the linter
tf_function_kwargs = {} if tf_function_kwargs is None else tf_function_kwargs
@tf.function(**tf_function_kwargs)
def fn(input_ids, label_ids):
losses = []
for i in range(
0, FLAG_BATCH_SIZE.value // FLAG_BATCH_SPLIT.value
):
start = i * FLAG_BATCH_SPLIT.value
end = (i + 1) * FLAG_BATCH_SPLIT.value
loss = model(
input_ids[start:end],
labels=label_ids[start:end],
training=False,
return_dict=True).loss
losses.append(loss)
return tf.math.reduce_mean(losses)
return fn
class Saver:
"""Save the model and log the flags, locally, then copy over to GS.
"""
def __init__(self, instance_output_dir: str, checkpoint: tf.train.Checkpoint):
utils.check_not_none(instance_output_dir)
utils.check_operator(operator.gt, len(instance_output_dir), 0)
instance_output_dir = str(instance_output_dir)
self._instance_output_dir = (
instance_output_dir +
("/" if not instance_output_dir.endswith("/") else "")
)
self._checkpoint = checkpoint
self._checkpoint_manager = tf.train.CheckpointManager(
checkpoint=checkpoint,
directory=self._instance_output_dir,
max_to_keep=None,
)
# self._tmp_dir = tempfile.TemporaryDirectory()
self._pool = concurrent.futures.ThreadPoolExecutor(1)
self._futures = []
# def _save_model(
# self,
# local_path: str,
# ):
# command = shlex.join(
# [
# "gsutil",
# "-m",
# "cp",
# "-r",
# str(local_path),
# self._instance_output_dir,
# ]
# )
# LOGGER.debug("Sending model. Command:\n\t- `%s`", command)
# subprocess.Popen(command, shell=True).wait()
def save_model(
self,
train_steps: int,
model_or_replicas,
optimizer,
):
# save_directory = os.path.join(
# self._tmp_dir.name,
# time.strftime(f"{train_steps}_ckpt_%Y%m%d-%H%M%S")
# )
# model_or_replicas.save_pretrained(
# os.path.join(save_directory, "model")
# )
# self._save_model(save_directory)
self._checkpoint_manager.save(checkpoint_number=tf.constant(train_steps))
# self._futures.append(
# self._pool.submit(self._save_model, save_directory)
# )
def __del__(self):
self._pool.shutdown()
def main(argv):
##############################################################################
# Initial Setup. Logging, Flags, Random seeds.
##############################################################################
if len(argv) > 1:
raise app.UsageError("Too many command-line arguments.")
absl_logging.use_python_logging()
flags_dict = {
flag.name: flag.value
for flag in FLAGS.flags_by_module_dict()[argv[0]]
}
if FLAGS.use_subset:
message = (f"{colorama.Back.RED}{colorama.Fore.WHITE}"
f"{colorama.Style.BRIGHT}USING A SUBSET OF THE DATASET"
f"{colorama.Style.RESET_ALL}")
LOGGER.warning(
message
)
utils.log_module_args(LOGGER, argv[0])
if not FLAGS.output_dir.startswith("gs://"):
utils.check_exists(FLAG_OUTPUT_DIR.value)
if not tf.io.gfile.isdir(FLAG_OUTPUT_DIR.value):
raise RuntimeError("Output dir needs to be a directory.")
tf.random.set_seed(FLAG_RANDOM_SEED.value)
np.random.seed(FLAG_RANDOM_SEED.value)
# Prepare the instance output directory path and save the config there
# Prepare the path
folder_name = time.strftime(
f"{FLAG_RUN_NAME.value}_{FLAG_APPROACH_TYPE.value}_%Y%m%d-%H%M%S"
)
instance_output_dir = os.path.join(
FLAG_OUTPUT_DIR.value, folder_name
).strip()
if not instance_output_dir.endswith("/"):
instance_output_dir += "/"
json_target = os.path.join(instance_output_dir, "training_params.json")
# Make the folder if we're not on gcloud
if not json_target.strip().startswith("gs://"):
subprocess.check_call(["mkdir", "-p", instance_output_dir])
# Safe the config file
utils.to_json_file(json_target, flags_dict)
##############################################################################
# Initialization and Configuration of the Devices.
##############################################################################
tpu_setup = None
accel = tf_utils.current_accelerator_type()
if FLAG_TPU_IS_LOCAL.value:
assert accel == "TPU", accel
if accel == "TPU":
assert FLAG_TPU_IS_LOCAL.value, FLAG_TPU_IS_LOCAL.value
if tf_utils.current_accelerator_type() in {"CPU", "TPU"}:
tpu_setup = tf_utils.init_tpus(
tpu_name=FLAG_TPU_NAME.value,
local=FLAG_TPU_IS_LOCAL.value
)
LOGGER.debug("Devices we are computing on:\n%s",
utils.wrap_iterable(map(str, tf_utils.devices_to_use())))
LOGGER.debug("All devices:")
LOGGER.debug(tf_utils.device_mapping())
if tf_utils.current_accelerator_type() == "GPU":
tf.config.set_soft_device_placement(True)
if tf_utils.current_accelerator_type() != "TPU":
tf.debugging.set_log_device_placement(True)
utils.check_operator(
operator.ne, tf_utils.current_accelerator_type(), "CPU"
)
assert FLAG_TPU_NAME.value == socket.gethostname(), (
"This is a configuration choice. You can remove this. "
"There will be no side effects.")
if FLAG_DISTRIBUTE_MODE.value in constants.PURE_DATA_PARALLEL_STRATEGIES:
actual_num_replicas = len(tf_utils.devices_to_use())
elif FLAG_DISTRIBUTE_MODE.value in constants.DATA_PARALLEL_DMC:
actual_num_replicas = FLAG_NUM_REPLICAS.value
else:
actual_num_replicas = 1
##############################################################################
# We load the retriever model if it is needed.
##############################################################################
# Not currently used. See old commits.
retriever = None
##############################################################################
# Distributed training task
##############################################################################
if FLAG_TASK.value == constants.TaskChoices.train:
with utils.log_duration(LOGGER, "main", "Load model"):
utils.print_mem("before loading model", LOGGER)
model_specific = task_specific.load_model(
FLAG_MODEL_KEY.value,
FLAG_DISTRIBUTE_MODE.value,
tpu_setup,
FLAG_NUM_REPLICAS.value
)
utils.print_mem("after loading model", LOGGER)
model = model_specific.model
if isinstance(model, list):
model: List[transformers.TFGPT2LMHeadModel]
else:
model: transformers.TFGPT2LMHeadModel
tokenizer = model_specific.tokenizer
def make_optimizer():
if FLAG_OPTIMIZER_TYPE.value == constants.OptimizerTypes.adafactor:
return tensor2tensor.utils.adafactor.AdafactorOptimizer(
learning_rate=FLAG_LEARNING_RATE.value
)
elif FLAG_OPTIMIZER_TYPE.value == constants.OptimizerTypes.adam:
return tf.keras.optimizers.Adam(
learning_rate=FLAG_LEARNING_RATE.value
)
else:
raise ValueError(FLAG_OPTIMIZER_TYPE.value)
if model_specific.strategy:
with model_specific.strategy.scope():
optimizer = make_optimizer()
else:
optimizer = make_optimizer()
############################################################################
# Prepare the dataset functions
############################################################################
rg = np.random.default_rng(FLAG_RANDOM_SEED.value)
def call_lm_preproc(
repeat,
split,
random_seed
):
"""Using functools.partial prevents the linter from doing its job."""
if FLAG_DATASET_NAME.value == constants.DatasetNameChoices.kilt_eli5:
return task_specific.create_lm_ds_kilt_eli5(
tokenizer=tokenizer,
context_window_size=model.config.n_positions,
dataset_name=FLAG_DATASET_NAME.value,
# Batches are split over the replicas:
batch_size=FLAG_BATCH_SIZE.value * actual_num_replicas,
db_path=FLAG_DB_PATH.value,
random_seed=random_seed,
use_subset=FLAG_USE_SUBSET.value,
subset_size=FLAG_SUBSET_SIZE.value,
use_helper_words=FLAG_USE_HELPER_WORDS.value,
approach_type=FLAG_APPROACH_TYPE.value,
num_retrievals=FLAG_NUM_RETRIEVALS.value,
retrieval_temperature=FLAG_RETRIEVAL_TEMPERATURE.value,
retriever=retriever,
repeat=repeat,
split=split,
enable_debug_checks=FLAG_DATASET_DEBUG.value,
retrieval_bank_size=FLAG_RETRIEVAL_BANK_SIZE.value,
dataset_type=FLAG_DATASET_TYPE.value,
qty_shuffle=FLAG_QTY_SHUFFLE.value,
tfr_prefix=FLAG_TFR_PREFIX.value,
max_length_generation=FLAG_MAX_LENGTH_GENERATION.value,
)
else:
raise NotImplementedError(
f"FLAG_DATASET_NAME.value unsupported: `{FLAG_DATASET_NAME.value}`"
)
make_training_dataset: Callable[..., tf.data.Dataset] = functools.partial(
call_lm_preproc,
split="train",
repeat=False,
)
make_eval_dataset: Callable[..., tf.data.Dataset] = functools.partial(
call_lm_preproc,
split="eval",
repeat=True,
)
############################################################################
# Prepare the step functions
############################################################################
utils.check_contained(
FLAG_DISTRIBUTE_MODE.value, constants.DistributeModeChoices.choices()
)
tf_function_flags = dict(
experimental_compile=FLAG_EXPERIMENTAL_COMPILE.value,
experimental_relax_shapes=not FLAG_INPUT_FIXED_SIZE.value
)
training_step = build_regular_training_step(
model,
optimizer,
strategy=model_specific.strategy,
tf_function_kwargs=tf_function_flags
)
evaluation_step = build_evaluation_step(
model, tf_function_flags
)
timestamp_last_ckpt_secs = time.time()
# Model checkpoints are saved to the tmp_directory and then rsynced to GCS
############################################################################
# Prepare the statistics and the logging facilities.
############################################################################
# Tensorboard
with model_specific.strategy.scope():
checkpoint = tf.train.Checkpoint(
optimizer=optimizer, model=model
)
saver = Saver(instance_output_dir, checkpoint)
train_log_dir = os.path.join(instance_output_dir, "tensorboard", "train")
eval_log_dir = os.path.join(instance_output_dir, "tensorboard", "eval")
flags_log_dir = os.path.join(instance_output_dir, "tensorboard", "params")
writers = dict(
train=tf.summary.create_file_writer(train_log_dir),
eval=tf.summary.create_file_writer(eval_log_dir),
flags=tf.summary.create_file_writer(flags_log_dir)
)
with writers["flags"].as_default():
tf.summary.text(
"Flags",
# Tensorboard takes Markdown:
json.dumps(flags_dict, indent=4).replace("\n", "\n\n"),
step=0
)
# Different information to log.
ma_loss = dict(
train=utils.MovingAverage(0.9),
eval=utils.MovingAverage(0.9)
)
step_counters = dict(train=0, eval=0)
batch_counters = dict(train=0, eval=0)
prev_batch_end = time.time()
############################################################################
# Create the Eval DS object.
# ==========================================================================
# The eval ds has no real concept of epoch, repeats forever, shuffling
# each time it reaches its end.
############################################################################
# Create
with utils.log_duration(LOGGER, "main", "All of make_eval_dataset"):
eval_ds_instance = make_eval_dataset(
random_seed=rg.integers(-2**63, 2**63 - 1),
)
# Maybe distribute
LOGGER.debug("Distributing the eval dataset to the replicas.")
if FLAG_DATASET_TYPE.value == "tfr":
eval_ds_instance = (
model_specific.strategy.experimental_distribute_dataset(
eval_ds_instance
)
)
# Start the iteration. We step by calling `next(...)`.
LOGGER.debug("Done distributing the eval dataset to the replicas.")
eval_ds_instance = iter(eval_ds_instance)
step_function = dict(train=training_step, eval=evaluation_step)
############################################################################
# Training Loop
# ==========================================================================
# Create a new training dataset object that lasts for one epoch.
# This is different from the eval training dataset object, which loops
# forever.
############################################################################
for epoch in itertools.count():
##########################################################################
# Epoch Setup
##########################################################################
LOGGER.debug("EPOCH %d START", epoch)
# Shuffle differently every epoch
with utils.log_duration(
LOGGER, "main", "All of make_training_dataset"
):
train_ds_instance = make_training_dataset(
random_seed=rg.integers(-2**63, 2**63 - 1),
)
LOGGER.debug(
"Attempting to distribute the training dataset to the replicas."
)
if FLAG_DATASET_TYPE.value == "tfr":
train_ds_instance = (
model_specific.strategy.experimental_distribute_dataset(
train_ds_instance
)
)
LOGGER.debug(
"Done distributing the training dataset to the replicas."
)
train_ds_instance = iter(train_ds_instance)
# To change splits, we use `itertools.islice` over the dataset generator.
# When the training dataset generator is done, a new loop of the following
# while loop occurs, but no training batch is done because we are taking
# an `islice` of a generator that is done.
did_at_least_one_training_batch = True
split = "eval"
while did_at_least_one_training_batch:
utils.check_operator(
operator.ne, tf_utils.current_accelerator_type(), "CPU"
)
# Invert split
if split == "train":
split = "eval"
else:
split = "train"
# Prepare to test if we did at least one training batch
if split == "train":
did_at_least_one_training_batch = False
########################################################################
# Take slices from the dataset iterator
# ======================================================================
# We only want to do a certain number of batches before switching splits
# We do this by using an `itertools.islice` of the dataset iterators.
########################################################################
if split == "train":
dataset_iterator = toolz.take(
FLAG_BATCHES_BETWEEN_EVALS.value, train_ds_instance
)
else:
# The evaluation dataset generator is infinite, reshuffles everytime
# it gets to its end.
# Still, we take a fixed size slice form that infinite generator.
dataset_iterator = toolz.take(
FLAG_NUMBER_EVAL_BATCHES.value, eval_ds_instance
)
LOGGER.debug("Batching")
for batch in dataset_iterator:
if FLAG_LOG_SAMPLES.value:
####################################################################
# Print elements of the dataset
####################################################################
# Make ourselves resistant to values possibly being a PerReplica
# object
LOGGER.warning(
f"%(red)sLOGGING SAMPLES. THIS IS VERY SLOW.%(reset)s",
dict(
red=colorama.Fore.RED,
reset=colorama.Style.RESET_ALL,
)
)
is_distributed = isinstance(
batch["input_ids"], values.PerReplica
)
for in_batch_idx in range(FLAG_BATCH_SIZE.value):
for replica_idx in (
range(actual_num_replicas) if is_distributed
else [0]
):
if is_distributed:
sample = {k: batch[k].values[replica_idx] for k in batch}
else:
sample = batch
# input_sentence = tokenizer.decode(
# [x for x in sample["input_ids"][i] if x != tokenizer.eos_token_id]
# )
# LOGGER.debug(
# "%sInput [%d / %d]%s:\n\"%s\"",
# colorama.Fore.GREEN,
# replica_idx + 1,
# actual_num_replicas,
# colorama.Style.RESET_ALL,
# input_sentence,
# )
#
# answer = tokenizer.decode(
# [(x if x != -100 else 0) for x in sample["label_ids"][i]]
# )
# LOGGER.debug(
# "%sLabel [%d / %d]%s:\n\"%s\"",
# colorama.Fore.GREEN,
# replica_idx + 1,
# actual_num_replicas,
# colorama.Style.RESET_ALL,
# answer,
# )
cons = console.Console()
sentences = table.Table()
sentences.add_column("BPE Index", justify="center")
sentences.add_column("Inputs", justify="center")
sentences.add_column("Labels", justify="center")
for bpe_idx, (x, y) in enumerate(itertools.zip_longest(
sample["input_ids"][in_batch_idx].numpy(),
sample["label_ids"][in_batch_idx].numpy(),
fillvalue=None,
)):
x_w = tokenizer.decode([x]) if x >= 0 else f"[ {x} ]"
y_w = tokenizer.decode([y]) if y >= 0 else f"[ {y} ]"
sentences.add_row(str(bpe_idx), x_w, y_w)
cons.print(sentences)
# We only care about training epochs as, obviously, we don't train
# over eval samples; the number of eval samples seen only
# contributes to lowering the variance in the evaluation of when to
# do early stopping.
if split == "train":
did_at_least_one_training_batch = True
input_ids = batch["input_ids"]
label_ids = batch["label_ids"]
# Per split step counter
step_counters[split] += FLAG_BATCH_SIZE.value * actual_num_replicas
batch_counters[split] += 1
######################################################################
# Model step function.
######################################################################
step_function_kwargs = dict(
input_ids=input_ids,
label_ids=label_ids,
)
utils.print_mem(f"[{split}] - Mem before `strategy.run`", LOGGER)
LOGGER.debug("[%s] - Calling `strategy.run`", split)
loss = model_specific.strategy.run(
step_function[split],
kwargs=step_function_kwargs
)
LOGGER.debug("[%s] - Done `strategy.run`", split)
utils.print_mem(f"[{split}] - Mem after `strategy.run`", LOGGER)
####################################################################
# End of logging step code / Logging and saving the model.
####################################################################
if (FLAG_DISTRIBUTE_MODE.value in
constants.PURE_DATA_PARALLEL_STRATEGIES):
utils.check_equal(len(loss.values), actual_num_replicas)
LOGGER.debug(
"[%s] - Real num replicas: %s", split, actual_num_replicas
)
average_loss = float(tf.math.reduce_mean(loss.values).numpy())
LOGGER.debug("[%s] - Loss: %s", str(split), str(average_loss))
else:
average_loss = float(loss.numpy())
tf.debugging.check_numerics(
loss.values if isinstance(loss, values.PerReplica) else loss,
"Numerics failed."
)
now = time.time()
batch_duration = now - prev_batch_end
prev_batch_end = now
ma_loss[split].update(average_loss)
LOGGER.info("[%s] - Epoch: # %d", split, epoch)
LOGGER.info("[%s] - Tensorboard_dir: %s", split, instance_output_dir)
LOGGER.info("[%s] - Batch: # %d", split, batch_counters[split])
LOGGER.info("[%s] - Step: # %d", split, step_counters[split])
if FLAG_USE_SUBSET.value:
LOGGER.warning(">> USING A SUBSET OF THE DATASET <<")
LOGGER.info(
"[%(split)s] - Batch loss: %(metric)f",
dict(split=split, metric=average_loss)
)
LOGGER.info(
"[%(split)s] - Moving average loss: %(metric)f",
dict(split=split, metric=ma_loss[split].average)
)
LOGGER.info(
"[%(split)s] - Moving average ppl: %(metric)f",
dict(split=split, metric=np.exp(ma_loss[split].average))
)
LOGGER.info(
"[%(split)s] - Batch duration: %(duration)s",
dict(
split=split,
duration=utils.TimeStamp.from_seconds(
batch_duration).format()
)
)
# Write to Tensorboard
with writers[split].as_default():
tf.summary.scalar(
f"Loss/{split}", average_loss, step_counters[split]
)
tf.summary.scalar(
f"PPL/{split}", np.exp(average_loss), step_counters[split]
)
writers[split].flush()
######################################################################
# Save every `FLAG_SAVE_PERIOD_MIN.value` minutes.
######################################################################
delta_sec = time.time() - timestamp_last_ckpt_secs
utils.check_operator(operator.gt, delta_sec, 0)
period_sec = 60 * FLAG_SAVE_PERIOD_MIN.value
utils.check_operator(operator.gt, period_sec, 0)
ratio = delta_sec / period_sec
LOGGER.info(
"[%(split)s] - RATIO: %(ratio)s",
dict(
split=split,
ratio=str(ratio)
)
)
LOGGER.info(
"[%(split)s] - Target: %(target)s, Present: %(present)s",
dict(
split=split,
target=str(period_sec),
present=str(delta_sec),
)
)
if ratio >= 1:
dur = delta_sec / 60
timestamp_last_ckpt_secs = time.time()
LOGGER.debug("SAVING MODEL - CAUSE: DURATION - %0.2f min", dur)
# checkpoint.save(ckpt_prefix)
saver.save_model(
train_steps=step_counters["train"],
model_or_replicas=model,
optimizer=optimizer,
)
############################################################################
# Post Training Cleanup
############################################################################
for writer in writers.values():
writer.close()
if __name__ == "__main__":
app.run(main)
| {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,100 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /task_specific.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset and model specific code.
"""
import logging
import numpy as np
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from absl import flags
import constants
import dataclasses
import rich.console
import rich.panel
print = rich.console.Console(color_system="256").print
import tensorflow as tf
import tf_utils
import transformers
import utils
# tf.config.run_functions_eagerly(True)
FLAGS = flags.FLAGS
LOGGER = logging.getLogger(__name__)
TokenizerType = Union[transformers.PreTrainedTokenizer,
transformers.PreTrainedTokenizerFast]
################################################################################
# Model Specific
################################################################################
@dataclasses.dataclass
class CreateModelReturn:
tokenizer: TokenizerType
model: Union[transformers.PreTrainedModel, List[transformers.PreTrainedModel]]
strategy: Optional[tf.distribute.Strategy]
def load_model(
model_key,
distribute_mode,
tpu_setup,
num_replicas,
):
"""Tries to load the model.
Logs duration and memory use. Logs additional information if loading the model
fails.
Args:
model_key: Key used to select the correct model loading function from
the MODEL_FACTORIES dict.
distribute_mode: A string describing how the model is distributed.
tpu_setup: TPU configuration information.
num_replicas: Number of data parallelism replicas.
Returns:
Returns an object containing the tokenizer, the model and the strategy.
Raises:
RuntimeError: If model_load_path points to nothing.
"""
if distribute_mode not in constants.DistributeModeChoices.choices():
raise ValueError(f"Unsupported distribute_mode: `{distribute_mode}`")
if distribute_mode == constants.DistributeModeChoices.tpustrategy:
if tpu_setup:
strategy = tf.distribute.TPUStrategy(
tpu_setup.resolver,
)
else:
strategy = tf.distribute.TPUStrategy()
elif distribute_mode == constants.DistributeModeChoices.onedevicestrategy:
# Test mode with a single device, possibly a CPU.
strategy = tf.distribute.OneDeviceStrategy(tf_utils.devices_to_use()[0])
else:
raise NotImplementedError(distribute_mode)
with strategy.scope():
config: CreateModelReturn = MODEL_FACTORIES[model_key](
model_key,
distribute_mode,
None # The replicas are created by the tf.distribute.Strategy obj
)
config.strategy = strategy
return config
def _create_gpt2(
model_name,
distribute_mode,
num_replicas # pylint: disable=unused-argument
):
"""Loads the tokenizer and the model for the GPT2 extra large model."""
##############################################################################
# Load the tokenizer
##############################################################################
LOGGER.debug("Loading the weights: `%s`", model_name)
tokenizer = transformers.GPT2TokenizerFast.from_pretrained(model_name)
LOGGER.debug("Done loading the tokenizer.")
LOGGER.debug("Loading the model weights.")
with utils.log_duration(LOGGER, "main", "Loading the model."):
model = transformers.TFGPT2LMHeadModel.from_pretrained(
model_name,
)
logging.debug("Done loading the %s model.", model_name)
return CreateModelReturn(
tokenizer=tokenizer,
model=model,
strategy=None,
)
def make_parse_fn(split: str, context_window_size: int) -> Callable:
description: Dict[str, tf.io.FixedLenFeature] = {
constants.CTH5Fields.distances:
tf.io.FixedLenFeature((), tf.string),
constants.CTH5Fields.gpt2_retrieved_ids:
tf.io.FixedLenFeature((), tf.string),
constants.CTH5Fields.gpt2_question_ids_inputs:
tf.io.FixedLenFeature((), tf.string),
}
if split != constants.SplitChoices.test:
description[
constants.CTH5Fields.gpt2_answer_ids_inputs
] = tf.io.FixedLenFeature((), tf.string)
feature_dtypes: Dict[str, tf.dtypes] = {
constants.CTH5Fields.distances:
tf.float32,
constants.CTH5Fields.gpt2_retrieved_ids:
tf.int32,
constants.CTH5Fields.gpt2_question_ids_inputs:
tf.int32,
}
if split != constants.SplitChoices.test:
feature_dtypes[
constants.CTH5Fields.gpt2_answer_ids_inputs
] = tf.int32
feature_shape: Dict[str, Tuple[int, Ellipsis]] = {
constants.CTH5Fields.distances:
(10,),
constants.CTH5Fields.gpt2_retrieved_ids:
(10, context_window_size,),
constants.CTH5Fields.gpt2_question_ids_inputs:
(context_window_size,),
}
if split != constants.SplitChoices.test:
feature_shape[constants.CTH5Fields.gpt2_answer_ids_inputs] = (
context_window_size
)
# @tf.function
def parse(sample):
example = tf.io.parse_single_example(sample, description)
output = {}
for k, v in example.items():
output[k] = tf.io.parse_tensor(v, out_type=feature_dtypes[k])
output[k].set_shape(feature_shape[k])
return output
return parse
################################################################################
# Dataset Specific
################################################################################
_HELPER_TEXT = {
"question": "Question:\n",
"context": "\nContext:\n",
"answer": "\nAnswer:\n"
}
def create_lm_ds_kilt_eli5(
*,
tokenizer,
context_window_size,
dataset_name, # pylint: disable=unused-argument
batch_size,
split,
db_path, # pylint: disable=unused-argument
random_seed,
use_subset, # pylint: disable=unused-argument
subset_size, # pylint: disable=unused-argument
repeat,
use_helper_words,
approach_type,
retriever,
num_retrievals,
retrieval_temperature,
enable_debug_checks,
retrieval_bank_size, # pylint: disable=unused-argument
dataset_type,
qty_shuffle,
tfr_prefix,
max_length_generation,
):
"""Dataset preparation function for the Kilt version of the ELI5 dataset.
This is for when the dataset is consumed by language models.
Args:
tokenizer: Tokenizer of the reader model.
context_window_size: Size of the context of the reader model.
Not used here.
dataset_name: Exact name of the dataset. Some datasets share the same
function, with small specific differences. Not used here.
batch_size: Size of the batch for the reader model.
prefetch_size: How many batches to prefetch.
split: The train, evaluation or test split.
dataset_paths_root: Root directory of the datasets. Not used here.
random_seed: Seed used to shuffle the dataset. Should change at each epoch.
use_subset: Whether to use a subset of the data
subset_size: Size of the subset
repeat: Whether to repeat the dataset
use_helper_words: Whether to add helper words in the merged samples.
approach_type: Type of overall solution we are using.
retriever: Object that does the retrieval.
num_retrievals: Number of retrievals to do.
retrieval_temperature: For the retrieval methods that do sampling, what
temperature to use.
Returns:
A tf.data.Dataset object that generates input_ids and label_ids for the
generator model.
Raises:
RuntimeError: If we didn't find any files with the glob pattern.
RuntimeError: If we are using a dataset type that is not supported.
"""
maybe_retrieve_and_merge = _make_maybe_retrieve_and_merge_fn(
tokenizer=tokenizer,
context_size=context_window_size,
retriever=retriever,
temperature=retrieval_temperature,
num_retrievals=num_retrievals,
ds_split=split,
approach_type=approach_type, # FLAG_APPROACH_TYPE.value
use_helper_words=use_helper_words, # FLAG_USE_HELPER_WORDS
enable_debug_checks=enable_debug_checks,
max_length_generation=max_length_generation,
)
utils.check_equal(dataset_type, constants.DatasetTypeChoices.tfr)
glob_pattern = os.path.join(tfr_prefix, f"{split}*")
filenames = list(tf.io.gfile.glob(glob_pattern))
if not filenames:
raise RuntimeError(
f"filnames is empty. Glob pattern was: {glob_pattern}"
)
parse = make_parse_fn(split, context_window_size)
ds = tf.data.TFRecordDataset(
filenames=filenames,
num_parallel_reads=tf.data.experimental.AUTOTUNE,
)
ds = ds.map(
parse,
num_parallel_calls=tf.data.experimental.AUTOTUNE,
deterministic=False,
)
if repeat:
ds = ds.repeat()
utils.check_not_none(random_seed)
utils.check_not_none(qty_shuffle)
ds = ds.shuffle(qty_shuffle, seed=random_seed)
ds = ds.batch(
batch_size,
drop_remainder=split != constants.SplitChoices.test,
)
# We can't use parallel calls here, the huggingface Rust fast tokenizer
# breaks with multiple threads. It seems to still be worth it over their
# slow one though, vs using parallel threads.
ds = ds.map(maybe_retrieve_and_merge)
# return map(maybe_retrieve_and_merge, ds)
return ds
# return ds.prefetch(tf.data.experimental.AUTOTUNE)
def _make_maybe_retrieve_and_merge_fn(
*,
tokenizer,
context_size,
ds_split,
approach_type, # FLAG_APPROACH_TYPE.value
use_helper_words, # FLAG_USE_HELPER_WORDS
retriever, # pylint: disable=unused-argument
temperature,
num_retrievals,
enable_debug_checks,
max_length_generation,
tf_function_kwargs=None,
):
"""Build the `maybe_retrieve_and_merge` closure."""
tf_function_kwargs = {} if tf_function_kwargs is None else tf_function_kwargs
not_test_split = ds_split != constants.SplitChoices.test
# @tf.function(**tf_function_kwargs)
def maybe_retrieve_and_merge(
batch,
):
"""Retrieve if needed, then finalize the prep. for model consumption."""
batch_size = tf.shape(batch[
constants.CTH5Fields.gpt2_question_ids_inputs
])[0]
# Prepare the question ids inputs
question_ids_inputs = batch[constants.CTH5Fields.gpt2_question_ids_inputs]
question_ids_inputs = tf.RaggedTensor.from_tensor(
question_ids_inputs,
padding=constants.RAGGED_PADDING_ID
)
# Prepare the answer ids inputs
answer_ids_inputs = None
answer_ids_labels = None
if not_test_split:
answer_ids_inputs = batch[constants.CTH5Fields.gpt2_answer_ids_inputs]
answer_ids_inputs = tf.RaggedTensor.from_tensor(
answer_ids_inputs,
padding=constants.RAGGED_PADDING_ID
)
answer_ids_labels = answer_ids_inputs
############################################################################
# Prepare the helper words
############################################################################
helper_word_token_ids = None
if use_helper_words:
helper_word_token_ids = {}
for k in _HELPER_TEXT:
ids = tf.constant(tokenizer.encode(_HELPER_TEXT[k]), dtype=tf.int32)
ids = tf.repeat(tf.expand_dims(ids, 0), batch_size, axis=0)
helper_word_token_ids[k] = ids
question_ids_inputs = tf.concat(
[helper_word_token_ids["question"], question_ids_inputs],
axis=1
)
##########################################################################
# W/ Cached Retrievals
##########################################################################
label_ids = None
if approach_type == constants.ApproachTypeChoices.cached_pretok:
bpe_indices_gpt2 = batch[constants.CTH5Fields.gpt2_retrieved_ids]
bpe_indices_gpt2 = tf.RaggedTensor.from_tensor(
bpe_indices_gpt2,
ragged_rank=2,
padding=constants.RAGGED_PADDING_ID
)
distances = batch[constants.CTH5Fields.distances]
input_ids, label_ids = _prepare_samples_w_retrieval(
split=ds_split,
batch_size=batch_size,
question_ids_inputs=question_ids_inputs,
answer_ids_inputs=(
answer_ids_inputs if not_test_split else None
),
gpt2_tokenized_retrieved=bpe_indices_gpt2,
num_retrievals_to_use=num_retrievals,
temperature=temperature,
context_size=context_size,
enable_debug_checks=enable_debug_checks,
distances=distances,
max_generation_length=max_length_generation,
helper_word_token_ids=helper_word_token_ids,
use_helper_words=constants.HelperWordModeChoices.multiple,
)
elif approach_type == constants.ApproachTypeChoices.naked_lm:
##########################################################################
# Without Retrievals
##########################################################################
if use_helper_words:
question_ids_inputs = tf.concat([
question_ids_inputs,
helper_word_token_ids["answer"],
], axis=1)
question_ids_labels = tf.ones_like(
question_ids_inputs
) * constants.PPL_MASK_ID
if not_test_split:
input_ids = tf.concat((question_ids_inputs, answer_ids_inputs),
axis=1)
label_ids = tf.concat((question_ids_labels, answer_ids_labels),
axis=1)
else:
input_ids = question_ids_inputs
else:
raise RuntimeError("Unnsupported approach_type value"
f" {approach_type}")
############################################################################
# Finalize the preparation
############################################################################
# Convert to dense tensors
input_ids = input_ids.to_tensor(tokenizer.eos_token_id)
if not_test_split:
final_eos = tf.RaggedTensor.from_tensor(
tokenizer.eos_token_id * tf.ones([batch_size, 1], dtype=tf.int32)
)
label_ids = tf.concat([label_ids, final_eos], axis=1)
label_ids = label_ids.to_tensor(constants.PPL_MASK_ID)
# All samples need to have at least one token != -100 (PPL_MASK_ID)
if enable_debug_checks and not_test_split:
not_any_padding = tf.reduce_any(
label_ids != constants.PPL_MASK_ID, axis=1
)
none_has_padding = tf.math.reduce_all(
not_any_padding
)
qty_doesnt_have_padding = tf.reduce_sum(
tf.cast(not_any_padding))
check_no_padding = tf.Assert(
none_has_padding,
[qty_doesnt_have_padding]
)
with tf.control_dependencies([check_no_padding]):
label_ids = tf.identity(label_ids)
# Limit size
input_ids = input_ids[:, :context_size]
if not_test_split:
label_ids = label_ids[:, :context_size]
############################################################################
# Pad `input_ids` and `label_ids` to context_size
############################################################################
# Prepare the ones
pad_qty = tf.math.maximum(
0, tf.constant(context_size) - tf.shape(input_ids)[1]
)
padding_ones = tf.ones(
[batch_size, pad_qty],
dtype=input_ids.dtype
)
# Pad the inputs
input_padding = tokenizer.eos_token_id * padding_ones
input_ids = tf.concat((input_ids, input_padding), axis=1)
# Pad the labels labels
if not_test_split:
pad_qty = tf.math.maximum(
0, tf.constant(context_size) - tf.shape(label_ids)[1]
)
padding_ones = tf.ones(
[batch_size, pad_qty],
dtype=input_ids.dtype
)
label_padding = -100 * padding_ones
label_ids = tf.concat((label_ids, label_padding), axis=1)
# Make checks
if enable_debug_checks:
control_dependencies = []
control_dependencies.append(tf.Assert(
tf.math.reduce_all(input_ids != -1),
[input_ids],
name="NoMinusOnesInputs"
))
if not_test_split:
control_dependencies.append(tf.Assert(
tf.math.reduce_all(label_ids != -1),
[label_ids],
name="NoMinusOnesLabel"
))
control_dependencies.append(tf.Assert(
tf.logical_not(
tf.math.reduce_any(
tf.math.reduce_all(label_ids != -100, axis=1)
)
),
[label_ids],
name="NotAllMinusOneHundred"
))
with tf.control_dependencies(control_dependencies):
input_ids = tf.identity(input_ids)
return dict(
input_ids=input_ids,
label_ids=label_ids if not_test_split else None
)
return maybe_retrieve_and_merge
# @tf.function
def _tokenize_and_concat_while_loop(
all_retrieved_contexts: tf_utils.TFTensorType,
selected_context_indices: tf_utils.TFTensorType,
num_retrievals_to_use: tf_utils.TFTensorType,
batch_size: tf_utils.TFTensorType,
helper_word_mode: constants.HelperWordModeChoices,
context_helper_word_tokens: tf_utils.TFTensorType,
):
tf_utils.check_tf_tensor(all_retrieved_contexts)
tf_utils.check_tf_tensor(selected_context_indices)
"""Tokenizes and puts together the retrievals, per batch unit."""
def condition(
loop_index: tf.Tensor,
_, # pylint: disable=unused-argument
):
"""While we have concatenated fewer contexts than `num_retrievals_to_use`
"""
return tf.less(loop_index, num_retrievals_to_use)
def body(
loop_index,
previously_concat_contexts: tf.RaggedTensor,
):
# Take the retrieved contexts associated to the context index associated
# to the current loop index
context_to_concat: tf.RaggedTensor = tf.gather(
all_retrieved_contexts,
selected_context_indices[:, loop_index],
batch_dims=1
)
# print("")
# print(f"{previously_concat_contexts.row_lengths() = }")
# print(f"{context_to_concat.row_lengths() = }")
# print("")
# Concatenate the tokens of the new context to the previously concatenated
# contexts. Possibly add helper words.
if helper_word_mode == constants.HelperWordModeChoices.once:
previously_concat_contexts = tf.concat([
previously_concat_contexts,
context_to_concat
], axis=1)
elif helper_word_mode == constants.HelperWordModeChoices.multiple:
previously_concat_contexts = tf.concat([
previously_concat_contexts,
context_helper_word_tokens,
context_to_concat
], axis=1)
else:
raise RuntimeError(f"Unsupported helper_word_mode: {helper_word_mode}")
# Increment the counter.
return loop_index + 1, previously_concat_contexts
if batch_size is None:
raise RuntimeError("batch_size is `None`. This should not happen.")
return tf.while_loop(
condition, body, [
0, # loop index
tf.RaggedTensor.from_tensor(
tf.zeros(
shape=(batch_size, 0),
dtype=tf.int32
),
) # previously concatenated contexts
])[1]
def _print_info(
concat_retrieved_: tf.RaggedTensor, title, tokenizer, helper_word_token_ids,
):
panel_text = []
panel_text += [f"{concat_retrieved_.shape = }"]
panel_text += [f"{concat_retrieved_.row_lengths(axis=-1) = }"]
for batch_idx in range(concat_retrieved_.shape[0]):
whole_text = tokenizer.decode(concat_retrieved_[batch_idx])
text_array = np.array(whole_text.split())
helper_text = tokenizer.decode(helper_word_token_ids['context'][0]).strip()
num_context_tokens = np.sum(text_array == helper_text)
panel_text += [f"{num_context_tokens = }"]
print(rich.panel.Panel("\n\n".join(panel_text), title=title))
# @tf.function
def _prepare_samples_w_retrieval(
split,
batch_size,
question_ids_inputs: tf_utils.TFTensorType,
answer_ids_inputs: tf_utils.TFTensorType,
gpt2_tokenized_retrieved: tf_utils.TFTensorType,
distances,
num_retrievals_to_use,
temperature,
context_size,
enable_debug_checks,
use_helper_words,
helper_word_token_ids,
max_generation_length
):
utils.check_contained(
use_helper_words,
constants.HelperWordModeChoices.choices()
)
"""Prepares the samples that use retrieval.
In regards to helper words, we only use them once. This could be changed.
It would have many advantages.
"""
assert (split == constants.SplitChoices.test) == (
answer_ids_inputs is None
), (split == constants.SplitChoices.test, answer_ids_inputs)
tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2-xl")
# panel_title = "Begining of _prepare_samples_w_retrieval"
# panel_text = [f"{question_ids_inputs.shape = }"]
# panel_text += [f"{question_ids_inputs.row_lengths(axis=-1) = }"]
# panel_text += [f"{answer_ids_inputs.shape = }"]
# panel_text += [f"{answer_ids_inputs.row_lengths(axis=-1) = }"]
# panel_text += [f"{distances.shape = }"]
# panel_text += [f"{gpt2_tokenized_retrieved.shape = }"]
# panel_text += [f"{gpt2_tokenized_retrieved.row_lengths(axis=-1) = }"]
# print(rich.panel.Panel("\n\n".join(panel_text), title=panel_title))
is_not_test = split != constants.SplitChoices.test
if not isinstance(question_ids_inputs, tf.RaggedTensor):
question_ids_inputs = tf.RaggedTensor.from_tensor(
question_ids_inputs,
padding=constants.RAGGED_PADDING_ID
)
if enable_debug_checks:
asserts = []
asserts.append(
tf.Assert(
tf.math.reduce_all(
question_ids_inputs != constants.RAGGED_PADDING_ID,
),
[question_ids_inputs.to_tensor()]
)
)
if is_not_test:
asserts.append(
tf.Assert(
tf.math.reduce_all(
answer_ids_inputs != constants.RAGGED_PADDING_ID,
),
[answer_ids_inputs.to_tensor()]
)
)
with tf.control_dependencies(asserts):
question_ids_inputs = tf.identity(question_ids_inputs)
# These checks are at graph composition time, so OK
utils.check_isinstance(question_ids_inputs, tf.RaggedTensor)
if is_not_test:
utils.check_isinstance(answer_ids_inputs, tf.RaggedTensor)
##############################################################################
# Sample from the possible retrievals
##############################################################################
# Choose the indices
selected_context_indices = tf_utils.sample_without_replacement(
distances / temperature, num_retrievals_to_use
)
# Concatenate the retrievals
utils.check_isinstance(helper_word_token_ids, dict)
utils.check_isinstance(
helper_word_token_ids['context'],
tuple([np.ndarray] + list(tf_utils.TfTensorTypeTuple))
)
concat_retrieved = _tokenize_and_concat_while_loop(
gpt2_tokenized_retrieved,
selected_context_indices=selected_context_indices,
batch_size=batch_size,
num_retrievals_to_use=num_retrievals_to_use,
helper_word_mode=use_helper_words,
context_helper_word_tokens=helper_word_token_ids['context'],
)
if use_helper_words == constants.HelperWordModeChoices.once:
concat_retrieved = tf.concat([
helper_word_token_ids["context"],
concat_retrieved,
], axis=1)
# _print_info(
# concat_retrieved,
# f"Num of 'context' helper words. Mode: {use_helper_words}",
# tokenizer,
# helper_word_token_ids
# )
# Cut the lengths down to max_lens_retrieval.
# The eventual length of the ["question"] helper_tokens is included in
# question_ids_inputs.
if is_not_test:
max_lens_retrieval = (
context_size * tf.ones(
shape=(batch_size,),
dtype=tf.int64,
)
- (question_ids_inputs.row_lengths() +
# We always generate the same length of text.
max_generation_length + # answer_ids_inputs.row_lengths() +
(helper_word_token_ids["answer"].shape[1] if use_helper_words else 0)
)
)
else:
max_lens_retrieval = (
context_size * tf.ones(
shape=(batch_size,),
dtype=tf.int64,
) - (question_ids_inputs.row_lengths() +
max_generation_length +
(helper_word_token_ids["answer"].shape[1]
if use_helper_words else 0
)
)
)
concat_retrieved = tf.ragged.boolean_mask(
concat_retrieved,
(
tf.ragged.range(concat_retrieved.row_lengths()) <
tf.expand_dims(max_lens_retrieval, axis=1)
)
)
panel_text = []
panel_text += [f"{selected_context_indices.shape = }"]
panel_text += [f"{concat_retrieved.shape = }"]
panel_text += [f"{concat_retrieved.row_lengths(axis=-1) = }"]
panel_text += [f"{max_lens_retrieval = }"]
print(rich.panel.Panel("\n\n".join(panel_text)))
if enable_debug_checks:
asserts = [
tf.Assert(
tf.math.reduce_all(max_lens_retrieval < context_size),
[max_lens_retrieval, context_size]
),
]
with tf.control_dependencies(asserts):
concat_retrieved = tf.identity(concat_retrieved)
if use_helper_words:
if is_not_test:
new_input_ids = tf.concat(
[question_ids_inputs,
concat_retrieved,
helper_word_token_ids["answer"],
answer_ids_inputs
],
axis=1
)
new_label_ids = tf.concat(
[-100 * tf.ones_like(question_ids_inputs),
-100 * tf.ones_like(concat_retrieved),
-100 * tf.ones_like(helper_word_token_ids["answer"]),
answer_ids_inputs
],
axis=1
)
else:
new_input_ids = tf.concat(
[question_ids_inputs,
concat_retrieved,
helper_word_token_ids["answer"],
],
axis=1
)
else:
if is_not_test:
new_input_ids = tf.concat(
[question_ids_inputs,
concat_retrieved,
answer_ids_inputs
],
axis=1
)
new_label_ids = tf.concat(
[-100 * tf.ones_like(question_ids_inputs),
-100 * tf.ones_like(concat_retrieved),
answer_ids_inputs
],
axis=1
)
else:
new_input_ids = tf.concat(
[question_ids_inputs,
concat_retrieved,
],
axis=1
)
new_input_ids : tf.RaggedTensor
return new_input_ids, new_label_ids if is_not_test else None
################################################################################
# Varia
################################################################################
DATASET_CARDINALITIES = {
constants.DatasetNameChoices.kilt_eli5: {
"train": 272637,
"eval": 1507,
"test": 600,
}
}
# Pick the correct model creation function from the Hugging Face Model key.
MODEL_FACTORIES = {
"gpt2": _create_gpt2,
"gpt2-medium": _create_gpt2,
"gpt2-large": _create_gpt2,
"gpt2-xl": _create_gpt2,
"distilgpt2": _create_gpt2,
}
| {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,101 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /notebooks/notebook_to_script.py | import json
import os
import sys
DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.dirname(DIR))
import utils
def main(in_target: utils.PathType, out_target: utils.PathType):
utils.check_exists(in_target)
parent_dir_out = os.path.dirname(os.path.abspath(out_target))
utils.check_exists(parent_dir_out)
utils.check_exists(out_target, inverse=True)
all_code = ""
with open(in_target) as fin:
input_json = json.load(fin)
cells = input_json["cells"]
code_cells = lambda : (c for c in cells if c["cell_type"] == "code")
for cell in code_cells():
all_code += "".join(cell["source"]) + "\n\n"
with open(out_target, "w") as fout:
fout.write(all_code)
if __name__ == "__main__":
assert 2 <= len(sys.argv) <= 3, len(sys.argv)
if len(sys.argv) == 2:
output_path = sys.argv[1] + ".py"
elif len(sys.argv) == 3:
output_path = sys.argv[2]
else:
raise RuntimeError()
main(sys.argv[1].strip(), output_path) | {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,102 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /notebooks/display_generation_input.py | print("stdlib")
import itertools
import logging
import os
import sys
print("third party")
import numpy as np
import rich
import rich.console
import tensorflow as tf
import transformers
import tqdm
DIR = os.getcwd()
# Add project dir to PYTHONPATH
sys.path.append(os.path.dirname(DIR))
print("first party")
import constants
import generation
import task_specific
import utils
print("done")
LOGGER = logging.getLogger(__name__)
# Args
APPROACH_TYPE = constants.ApproachTypeChoices.cached_pretok
SPLIT = constants.SplitChoices.eval
BATCH_SIZE = 3
NUM_ENTRIES = 4
DATA_PATH = "../../data/cached_pretok"
assert os.path.exists(DATA_PATH)
tokenizer = transformers.AutoTokenizer.from_pretrained("gpt2-xl")
ds = generation.prep_ds_for_generation(dict(
tokenizer=tokenizer,
context_window_size=1024,
dataset_name="kilt_eli5",
batch_size=BATCH_SIZE, # >> We set our own batch size elsewhere
db_path=None, # None,
random_seed=0,
use_subset=False,
subset_size=-1,
use_helper_words=constants.HelperWordModeChoices.multiple,
approach_type=APPROACH_TYPE,
num_retrievals=5, # Will never change
retrieval_temperature=1.,
retriever=None, # Cached retrievals don't need a retriever
repeat=False, # Will never change
split=SPLIT,
enable_debug_checks=False,
retrieval_bank_size=10, # Will never change
dataset_type=constants.DatasetTypeChoices.tfr,
tfr_prefix=DATA_PATH,
qty_shuffle=1, # Will never change
max_length_generation=350
), tokenizer, BATCH_SIZE, SPLIT)
num_entries_in_split = (
task_specific.DATASET_CARDINALITIES["kilt_eli5"][SPLIT]
)
entries_counter = tqdm.tqdm(total=num_entries_in_split)
for batch_no, batch in enumerate(itertools.islice(ds, NUM_ENTRIES)):
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Display the inputs and outputs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rich_console = rich.console.Console(color_system="256")
print_sample = generation.make_print_sample()
assert not np.all(batch[0] == batch[1]), batch[0] == batch[1]
with utils.log_duration(
LOGGER, "main", "all of tokenizer.decode for a batch."
):
for i in range(batch.shape[0]):
print(f"{batch.shape = }")
utils.check_equal(len(batch.shape), 2)
utils.check_equal(batch.shape[0], BATCH_SIZE)
tokens = batch.numpy()[i]
input_text = tokenizer.decode(tokens)
print(f"Batch {batch_no}, Sample {i} / {BATCH_SIZE} of batch:")
print(f"\tNum tokens: {len(tokens)}")
print_sample(
input_text, f"input batch_no {batch_no}", rich_console
) | {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,103 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /launchers/launch-instance.py | r"""
This scripts assumes that we are running on Google Cloud Compute.
pytype launchers/launch-instance.py -P . --check-variable-types \
--check-container-types \
--check-parameter-types --precise-return && \
python check_flags.py launchers/launch-instance.py && \
FLAGS="$(python json_to_args.py configs/launcher_configs/query_cacher_tfrecord.json)" && \
python launchers/launch-instance.py $FLAGS
"""
import colored_traceback.auto
import pathlib
import operator
import os
from rich import print
import shlex
import subprocess
import sys
import time
import yaml
from absl import flags
from absl import app
import git
_SCRIPT_DIRECTORY = pathlib.Path(__file__).resolve().parent
sys.path.append(str(_SCRIPT_DIRECTORY.parent))
import utils
_ONEVM_RUNTIME_VERSION = "v2-alpha"
_FLAG_ZONE = flags.DEFINE_string(
"gcloud-zone",
"europe-west4-a",
"Which Google Cloud zone to use.",
)
_FLAG_RUN_SCRIPT = flags.DEFINE_boolean(
"run-script",
True,
"Whether or not to run the training script at the end."
)
_FLAG_BOOT_DISK_SIZE = flags.DEFINE_integer(
"boot-disk-size",
250,
"Size of the boot disk, in gigabytes"
)
_FLAG_IMAGE_FAMILY = flags.DEFINE_string(
"image-family",
"tf2-2-4-cpu",
"See https://cloud.google.com/ai-platform/deep-learning-vm/docs/images"
)
_FLAG_INSTANCE_NAME = flags.DEFINE_string(
"instance-name",
"jules",
"Name of the VM and TPU instances.",
)
_FLAG_INSTANCE_TYPE = flags.DEFINE_string(
"instance-type",
None,
"See https://cloud.google.com/compute/docs/machine-types for details."
)
_FLAG_PREEMPTIBLE_TPU = flags.DEFINE_boolean(
"preemptible-tpu",
False,
"Whether or not we want the TPU instance to be preemtible."
)
_FLAG_PREEMPTIBLE_VM = flags.DEFINE_boolean(
"preemptible-vm",
False,
"Whether or not we want the VM instance to be preemtible."
)
_FLAG_SLEEP_TIME = flags.DEFINE_integer(
"sleep-time",
10,
"How long to sleep between retries in seconds. "
"Is also the duration of the sleep between major "
"commands that take time."
)
_FLAG_TF_VERSION = flags.DEFINE_enum(
"tf-version",
"2.4.0",
["2.4.0"],
"",
)
_FLAG_TPU_ONLY = flags.DEFINE_boolean(
"tpu-only",
False,
"",
)
_FLAG_TPU_QTY = flags.DEFINE_enum(
"tpu-qty",
"8",
["8"],
"Size of the TPU group. This currently should always "
"be 8.",
)
_FLAG_TPU_TYPE = flags.DEFINE_enum(
"tpu-type",
"v3",
["v2", "v3"],
"",
)
_FLAG_USE_TPUS = flags.DEFINE_boolean(
"use-tpus",
False,
"Whether to create a TPU."
)
_FLAG_USER_NAME = flags.DEFINE_string(
"username",
"jules",
"The gcloud username. "
)
_FLAG_VM_ONLY = flags.DEFINE_boolean(
"vm-only",
False,
"Whether to only create a VM and not reserve TPUs."
"Great for running other tasks that don't require a TPU, "
"but that still require a similar setup.",
)
_FLAG_NGROK_CONFIG_PATH = flags.DEFINE_string(
"ngrok-config-path",
None,
"Path of the user configuration file for ngrok."
)
_FLAG_USE_ONE_VM = flags.DEFINE_boolean(
"use-one-vm",
False,
"Whether to use the 1VM setup, for IE jax."
)
def h1(text):
print("\n" + "#" * utils.term_size())
print("# " + "[green bold]" + text + "[/]")
print("#" * utils.term_size())
def h2(text):
print("[blue bold italic]" + text + "[/]")
def h3(text):
print(text)
def try_command(command, title, sleep_time, shell=False):
while True:
try:
run_gcloud_command(command, shell=shell)
print("")
break
except subprocess.SubprocessError as err:
print("")
print(f"Got error: `{err}`")
print(f"Sleeping for {sleep_time} seconds.")
time.sleep(sleep_time)
print("")
h2(f"Retrying {title}.")
def validate_instance_type_flag():
# Validate the value:
instance_tuple = _FLAG_INSTANCE_TYPE.value.strip().split("-")
utils.check_equal(len(instance_tuple), 3)
utils.check_contained(instance_tuple[0], {"n1", "n2"})
utils.check_contained(instance_tuple[1], {"standard", "highmem"})
num_cpus = int(instance_tuple[2])
utils.check_operator(operator.le, num_cpus, 64)
utils.check_operator(operator.ge, num_cpus, 0)
def run_gcloud_command(command, shell=False):
print(f"Running gcloud command (with shell={shell}):\n\t{command}")
subprocess.run(command, check=True, shell=shell)
def create_vm():
if not _FLAG_INSTANCE_TYPE.value:
raise ValueError(
"Using the full gcloud launcher is useless "
"without an instance type."
)
validate_instance_type_flag()
positional = [
"gcloud", "compute", "instances", "create", _FLAG_INSTANCE_NAME.value,
]
if _FLAG_PREEMPTIBLE_VM.value:
positional.append("--preemptible")
named_flags = {
"--zone": _FLAG_ZONE.value,
"--image-family": _FLAG_IMAGE_FAMILY.value,
"--image-project": "deeplearning-platform-release",
"--machine-type": _FLAG_INSTANCE_TYPE.value,
"--boot-disk-size": f"{_FLAG_BOOT_DISK_SIZE.value}GB",
"--scopes": "cloud-platform",
}
for key, value in named_flags.items():
utils.check_isinstance(value, str)
utils.check_isinstance(key, str)
for key in named_flags:
assert key.startswith("--"), key
h2("Creating the VM instance.")
command = positional + [
f"{k}={shlex.quote(v)}" for k, v
in named_flags.items()
]
run_gcloud_command(command)
print("")
time.sleep(_FLAG_SLEEP_TIME.value)
h2("Starting the instance.")
command = [
"gcloud", "compute", "instances", "start", _FLAG_INSTANCE_NAME.value
]
run_gcloud_command(command)
print("")
time.sleep(_FLAG_SLEEP_TIME.value)
def create_one_vm_vm():
runtime = _ONEVM_RUNTIME_VERSION
if runtime == "v2-alpha":
utils.check_equal(_FLAG_TPU_QTY.value, "8")
command = ["gcloud", "alpha", "compute", "tpus",
"tpu-vm", "create",
f"{_FLAG_INSTANCE_NAME.value}",
f"--zone={_FLAG_ZONE.value}",
f"--accelerator-type={make_accelerator_type()}",
f"--version={runtime}",
]
run_gcloud_command(command)
def make_accelerator_type() -> str:
utils.check_equal(_FLAG_TPU_TYPE.value, "v3")
utils.check_equal(_FLAG_TPU_QTY.value, "8")
assert not _FLAG_PREEMPTIBLE_TPU.value, _FLAG_PREEMPTIBLE_TPU.value
return f"{_FLAG_TPU_TYPE.value}-{_FLAG_TPU_QTY.value}"
def create_tpu_using_gcloud():
positional_cmd = [
"gcloud", "compute", "tpus", "create", _FLAG_INSTANCE_NAME.value
]
if _FLAG_PREEMPTIBLE_TPU.value:
positional_cmd += "--preemptible"
named_arguments = {
"--version": "2.4.1",
"--accelerator-type": make_accelerator_type(),
}
cmd = positional_cmd + [
f"{k}={shlex.quote(v)}" for k, v in named_arguments.items()
]
h2("Starting the TPUs.")
run_gcloud_command(cmd)
def git_is_dirty(directory=_SCRIPT_DIRECTORY) -> bool:
os.chdir(directory)
root = subprocess.check_output([
"git", "rev-parse", "--show-toplevel",
]).decode().strip()
return git.Repo(root).is_dirty(untracked_files=False)
def git_is_pushed(directory=_SCRIPT_DIRECTORY) -> bool:
os.chdir(directory)
root = subprocess.check_output([
"git", "rev-parse", "--show-toplevel",
]).decode().strip()
repo = git.Repo(root)
return "Your branch is up to date with" in repo.git.status()
def git_get_commit_id(directory=_SCRIPT_DIRECTORY) -> str:
os.chdir(directory)
commit_id = subprocess.check_output([
"git", "rev-parse", "HEAD"
]).decode().strip()
return commit_id
def send_file(input_file, target):
if _FLAG_USE_ONE_VM.value:
filename = os.path.basename(input_file)
target = os.path.join(target, filename)
internal_command = shlex.quote(f"cat > {shlex.quote(target)}")
command = "gcloud alpha compute tpus tpu-vm ssh "
command += (f"{shlex.quote(_FLAG_USER_NAME.value)}@"
f"{shlex.quote(_FLAG_INSTANCE_NAME.value)} "
f"--command={internal_command}")
command = f"cat {shlex.quote(input_file)} | " + command
helper_text = f"Copying file `{input_file}`."
try_command(
command, helper_text, shell=True, sleep_time=_FLAG_SLEEP_TIME.value
)
else:
try_command(
[
"gcloud", "compute", "scp",
input_file,
f"{_FLAG_USER_NAME.value}@{_FLAG_INSTANCE_NAME.value}:{target}",
], f"Copying `{input_file}`", sleep_time=_FLAG_SLEEP_TIME.value
)
def ssh_command(command: str, helper_text: str, retry: bool = False) -> None:
if _FLAG_USE_ONE_VM.value:
ssh_start = ["gcloud", "alpha", "compute", "tpus", "tpu-vm", "ssh"]
else:
ssh_start = ["gcloud", "compute", "ssh",]
h1(helper_text)
ssh_command_ = ssh_start + [
f"{_FLAG_USER_NAME.value}@{_FLAG_INSTANCE_NAME.value}",
f"--command={command}"
]
if retry:
try_command(ssh_command_,
helper_text,
sleep_time=_FLAG_SLEEP_TIME.value,
)
else:
run_gcloud_command(ssh_command_, shell=False)
def main(argv):
if len(argv) > 1:
raise RuntimeError(argv)
if git_is_dirty() or not git_is_pushed():
raise RuntimeError(
"The git directory is dirty. Push the changes before running."
)
remote_home_dir = f"/home/{_FLAG_USER_NAME.value}/"
h1("Module args:")
args = utils.get_module_args(argv[0])
print(args)
print("")
if not subprocess.check_output(["which", "gcloud"]).strip():
raise RuntimeError("`gcloud` is not in the path. `ctpu` won't work.")
if (_FLAG_USE_TPUS.value
and not _FLAG_VM_ONLY.value
and not _FLAG_USE_ONE_VM.value):
create_tpu_using_gcloud()
if _FLAG_TPU_ONLY.value:
return
###########################################################################
# Beginning of the VM-only stuff
###########################################################################
if _FLAG_USE_ONE_VM.value:
create_one_vm_vm()
else:
create_vm()
###########################################################################
# Copying files over
###########################################################################
h1("Copying bashrc")
send_file(
f"{_SCRIPT_DIRECTORY}/bashrc",
remote_home_dir,
)
h1("Copying setup.sh")
send_file(
f"{_SCRIPT_DIRECTORY}/setup.sh",
remote_home_dir,
)
h1("Copying start_notebooks.sh")
send_file(
f"{_SCRIPT_DIRECTORY}/start_notebooks.sh",
remote_home_dir,
)
##############################################################################
# Running setup.sh
##############################################################################
# Build Screen Command
project_dir = (
f"{remote_home_dir}eli5_retrieval_large_lm/"
)
training_script_uri = (
f"launchers/scripts/training.sh"
)
training_command = shlex.quote(
f"cd {project_dir} && bash {training_script_uri}; exec bash"
)
with open(_FLAG_NGROK_CONFIG_PATH.value) as f_in:
ngrok_auth = yaml.load(f_in, Loader=yaml.Loader)["authtoken"]
setup_command_list = [
f"source",
f"{remote_home_dir}setup.sh",
f"{git_get_commit_id()}", # Argument 1: git commit id
str(_FLAG_USE_ONE_VM.value), # Argument 2: whether we are using one vm
ngrok_auth, # Argument 3: ngrok auth token
_FLAG_INSTANCE_NAME.value, # Argument 4: Instance name
]
# Build Setup Command
setup_command = shlex.join(setup_command_list)
ssh_command(setup_command, "Running setup.sh", retry=False)
if _FLAG_RUN_SCRIPT.value:
screen_command = f"screen -S training -dm bash -c {training_command}"
ssh_command(screen_command, "Running training", retry=False)
h1("All done.")
if __name__ == "__main__":
app.run(main)
| {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,104 | JulesGM/eli5_retrieval_large_lm | refs/heads/main | /generation.py | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the samples from the models."""
import logging
import operator
import os
import re
import subprocess
import tempfile
import time
from typing import Dict
import absl.app as app
import absl.flags as flags
import absl.logging as absl_logging
import colored_traceback.auto
import rich
import rich.console
import rich.panel
import rich.style
import tensorflow as tf
import tqdm
import transformers
import constants
import task_specific
import tf_utils
import utils
LOGGER = logging.getLogger(__name__)
_ACCEPTABLE_APPROACHES = frozenset([
constants.ApproachTypeChoices.naked_lm,
constants.ApproachTypeChoices.cached_pretok
])
_FLAG_H5_MODEL_PATH = flags.DEFINE_string(
"h5_path",
None,
"Path to the model save."
)
_FLAG_CKPT_MODEL_PATH = flags.DEFINE_string(
"ckpt_path",
None,
"Path to the model save."
)
_FLAG_APPROACH_TYPE = flags.DEFINE_enum(
"approach_type",
None,
_ACCEPTABLE_APPROACHES,
"Path to the model save."
)
_FLAG_OUTPUT_PATH = flags.DEFINE_string(
"output_path",
None,
"Where to save the generations. A json file. Can be on Google Cloud."
)
_FLAG_DATASET_TYPE = flags.DEFINE_enum(
"dataset_type",
"tfr",
constants.DatasetTypeChoices.choices(),
"Whether to use the hdf5 or the tfr pipeline."
)
# Need one here
_FLAG_TFR_PREFIX = flags.DEFINE_string(
"tfr_prefix",
None,
"Glob prefix of the tfr files."
)
# 1 or 2 ?
_FLAG_BATCH_SIZE = flags.DEFINE_integer(
"batch_size",
None,
"Size of the batch PER DEVICE."
)
# ok
_FLAG_SPLIT = flags.DEFINE_enum(
"split",
"test",
{"eval", "test"},
"Which split to generate from."
)
_FLAG_GENERATION_LENGTH_LIMIT = flags.DEFINE_integer(
"generation_length_limit",
None,
"Number of tokens to reserve for generation at the end."
)
# No flag necessary
_FLAG_IS_LOCAL_TPU = flags.DEFINE_bool(
"tpu-is-local",
True,
"Whether we are using a one-vm TPU.",
)
# No flag necessary
_FLAG_TPU_NAME = flags.DEFINE_string(
"tpu-name",
"",
"Name of the TPU to use."
)
# No flag necessary
_FLAG_HF_MODEL_KEY = flags.DEFINE_string(
"hf-model-key",
"gpt2-xl",
"Used when loading the model with checkpoints.",
)
def make_further_prep_generate(eos_token_id, split):
def further_prep_generate(
batch: Dict[str, tf.Tensor],
) -> tf.Tensor:
"""
-> Removes the answer tokens.
-> Removes the padding tokens.
All samples should have the same size.
"""
print(f"further_prep_generate: {batch['input_ids'].shape = }")
if split == "test":
setup_tokens = batch["label_ids"] == -100
else:
setup_tokens = tf.logical_and(
batch["label_ids"] == -100, batch["input_ids"] != eos_token_id
)
assert len(batch["input_ids"].shape) == 2, batch["input_ids"].shape
batch = tf.boolean_mask(batch["input_ids"], setup_tokens)
return batch
return further_prep_generate
def make_model_tf(path: str, mode: str) -> tf.Tensor:
"""Prepare the model for generation.
Loads the model architecture from the huggingface pre-trained model, then
loads a checkpoint.
TODO: There must be a way to just load from config + checkpoint, no pretrained
weights.
"""
with utils.log_duration(LOGGER, make_model_tf.__name__, "Load model."):
if mode == constants.SaveModeChoices.hfh5:
config_path = os.path.join(path, "config.json")
model_path = os.path.join(path, "tf_model.h5")
utils.check_exists(config_path)
utils.check_exists(model_path)
config = transformers.GPT2Config.from_pretrained(config_path)
return transformers.TFGPT2LMHeadModel.from_pretrained(
model_path,
config=config
)
elif mode == constants.SaveModeChoices.ckpt:
model = transformers.TFGPT2LMHeadModel.from_pretrained(
_FLAG_HF_MODEL_KEY.value,
)
ckpt = tf.train.Checkpoint(model=model)
ckpt.restore(_FLAG_CKPT_MODEL_PATH.value)
else:
raise RuntimeError(f"Unsupported Save Mode: {mode}")
return model
def make_print_sample():
# Monokai
title_color = "#6c99bb"
normal_color = "#d6d6d6"
background_color = "#2e2e2e"
titles = ["Question:", "Answer:", "Context:"]
def print_sample(sample, panel_title, console):
"""Pretty print samples using Python rich.
The parsing is pretty frail, but that's not a big deal.
"""
# sample = sample.replace("\n", " <\\n> ")
for title in titles:
sample = re.sub(re.escape(title) + "\n+", title, sample)
sample = sample.replace(
title, f"\n\n[{title_color} bold]{title}[/]\n"
)
panel = rich.panel.Panel(
sample.strip(),
title=panel_title,
style=rich.style.Style(
bgcolor=background_color, color=normal_color
)
)
console.print(panel)
return print_sample
def prep_ds_for_generation(args, tokenizer, split):
ds = task_specific.create_lm_ds_kilt_eli5(**args)
# ds = ds.map(make_further_prep_generate(tokenizer.eos_token_id, split))
ds = map(make_further_prep_generate(tokenizer.eos_token_id, split), ds)
return ds
def main(argv):
if len(argv) > 1:
raise RuntimeError(argv[1:])
absl_logging.use_python_logging()
utils.check_contained(_FLAG_APPROACH_TYPE.value, _ACCEPTABLE_APPROACHES)
utils.check_operator(
operator.xor,
bool(_FLAG_H5_MODEL_PATH.value),
bool(_FLAG_CKPT_MODEL_PATH.value)
)
if _FLAG_H5_MODEL_PATH.value:
model_path = _FLAG_H5_MODEL_PATH.value
mode = constants.SaveModeChoices.hfh5
elif _FLAG_CKPT_MODEL_PATH.value:
model_path = _FLAG_CKPT_MODEL_PATH.value
mode = constants.SaveModeChoices.ckpt
else:
raise RuntimeError("Logically should never happen.")
utils.check_exists(model_path)
device_type = tf_utils.devices_to_use()[0].device_type
# ONLY GPU IS SUPPORTED
utils.check_equal(device_type, "GPU")
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Build the distribution strategy
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if device_type == "TPU":
# ONLY LOCAL TPU IS "SUPPORTED"
utils.check_isinstance(_FLAG_IS_LOCAL_TPU.value, bool)
assert _FLAG_IS_LOCAL_TPU.value
tpu_config = tf_utils.init_tpus(local=True)
utils.check_isinstance(tpu_config, tf_utils.TpuConfigType)
utils.check_not_none(tpu_config)
strategy = tf.distribute.TPUStrategy(tpu_config.resolver)
elif device_type == "GPU":
strategy = tf.distribute.MirroredStrategy(
devices=tf.config.experimental.list_logical_devices('GPU')
)
else:
raise RuntimeError(device_type)
# ONLY GPU IS SUPPORTED
print(tf.config.list_logical_devices())
utils.check_isinstance(strategy, tf.distribute.MirroredStrategy)
##############################################################################
# Load Model
##############################################################################
with utils.log_duration(LOGGER, main.__name__, "All of model preparation"):
with strategy.scope():
# HF isn't able to read directly from GCS
if (model_path.startswith("gs://")
and mode == constants.SaveModeChoices.hfh5):
with utils.log_duration(
LOGGER,
main.__name__,
"Download model from GS"
):
with tempfile.TemporaryDirectory() as td:
td += os.path.sep
if os.path.exists("/root/google-cloud-sdk/bin/gsutil"):
exec_ = "/root/google-cloud-sdk/bin/gsutil"
else:
exec_ = "gsutil"
command = [
exec_,
"-m",
"cp",
"-r",
os.path.join(model_path, "*"),
td,
]
LOGGER.debug("Running bash command: %s", " ".join(command))
subprocess.check_call(command)
LOGGER.debug(
"Files at the temp dir(%s): %s", td, str(os.listdir(td))
)
model = make_model_tf(td, mode=mode)
else:
model = make_model_tf(model_path, mode=mode)
utils.check_not_none(model)
##############################################################################
# Load Dataset Pipeline
##############################################################################
utils.check_contained(_FLAG_APPROACH_TYPE.value, {
constants.ApproachTypeChoices.naked_lm,
constants.ApproachTypeChoices.cached_pretok
})
devices = tf_utils.devices_to_use()
num_replicas = (
len(devices) if devices[0].device_type in {"GPU", "TPU"} else 1
)
utils.check_equal(devices[0].device_type, "GPU")
# Only a batch size of 1 is currently supported. We need attention masks
batch_size = _FLAG_BATCH_SIZE.value * num_replicas
approach_type = _FLAG_APPROACH_TYPE.value
logging.debug("Loading dataset.")
tokenizer = transformers.GPT2TokenizerFast.from_pretrained("gpt2-xl")
ds = prep_ds_for_generation(dict(
tokenizer=tokenizer,
context_window_size=1024,
dataset_name="kilt_eli5",
batch_size=1, # >> We set our own batch size elsewhere
db_path=None, # None,
random_seed=0,
use_subset=False,
subset_size=-1,
use_helper_words=True,
approach_type=approach_type,
num_retrievals=5, # Will never change
retrieval_temperature=1.,
retriever=None, # Cached retrievals don't need a retriever
repeat=False, # Will never change
split=_FLAG_SPLIT.value,
enable_debug_checks=False,
retrieval_bank_size=5, # Will never change
dataset_type=_FLAG_DATASET_TYPE.value,
tfr_prefix=_FLAG_TFR_PREFIX.value,
qty_shuffle=1, # Will never change
max_length_generation=350
), tokenizer, _FLAG_SPLIT.value)
ds = strategy.experimental_distribute_dataset(ds)
##############################################################################
# Generate
##############################################################################
LOGGER.debug("Generating.")
generations = []
num_entries_in_split = (
task_specific.DATASET_CARDINALITIES["kilt_eli5"][_FLAG_SPLIT.value]
)
entries_counter = tqdm.tqdm(total=num_entries_in_split)
for batch_no, batch in enumerate(ds):
# Calling model.generate. We should make a config file with the
# hyperparameters for generation, or make a facility in the one we already
# have. I feel like a separate one would be better, separating concerns.
output = strategy.run(model.generate, kwargs=dict(
input_ids=batch,
max_length=_FLAG_GENERATION_LENGTH_LIMIT.value,
use_cache=True,
attention_mask=tf.cast(batch != tokenizer.eos_token_id, tf.int32),
repetition_penalty=2.,
num_beams=5,
))
output = tf_utils.process_strat_output(
strategy_outputs=output,
current_batch_size=batch_size,
strategy=strategy,
name="generations"
)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Display the inputs and outputs.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
rich_console = rich.console.Console(color_system="256")
print_sample = make_print_sample()
with utils.log_duration(
LOGGER, "main", "all of tokenizer.decode for a batch."
):
for i in range(batch_size):
input_text = tokenizer.decode(batch.numpy()[i])
output_text = tokenizer.decode(output.numpy()[i])
print("#" * 1000)
print(f"Batch {batch_no} Generation {i}")
print_sample(
input_text, f"input batch_no {batch_no}", rich_console
)
print_sample(
output_text, f"output batch_no {batch_no}", rich_console
)
generations.append(output_text)
print("#" * 1000)
entries_counter.update(batch.shape[0])
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Save the output to a JSON File.
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
utils.to_json_file(
os.path.join(
_FLAG_OUTPUT_PATH.value,
_FLAG_SPLIT.value,
_FLAG_APPROACH_TYPE.value,
time.strftime("%Y%m%d-%H%M%S.json")
),
dict(
flags={
flag.name: flag.value
for flag in flags.FLAGS.flags_by_module_dict()[argv[0]]
},
generations=generations
)
)
logging.debug("Saved to: %s", _FLAG_OUTPUT_PATH.value)
if __name__ == "__main__":
app.run(main)
| {"/main.py": ["/task_specific.py"], "/notebooks/display_generation_input.py": ["/generation.py", "/task_specific.py"], "/generation.py": ["/task_specific.py"]} |
78,115 | luis2arm/vlearn | refs/heads/master | /vlearn/classifiers/cnn3d_opt_frmwrks.py | import pdb
import numpy as np
from .cnn3d_architectures import CNN3DArchs
from sklearn.model_selection import ParameterGrid, StratifiedKFold
import tensorflow as tf
class OptFrmWrk(CNN3DArchs):
"""
Optimization frameworks to explore and find best models.
Todo:
* Use singel cv inside nested cv
"""
def __init__(self, Xtr, ytr):
"""
Args:
X (np_array): A numpy array having training samples
y (np._array): A numpy array having labels
"""
self._Xtr = Xtr
self._ytr = ytr
def nested_cv(self, params, split):
"""
Optimizes for best parameters and model using nested cv.
Args:
params (dict): Dictionary of parameters to optimize
split (tuple): A tuple having cross validation split parts.
(inner split, outer split)
"""
var_param, stat_param = self._get_var_params(params)
param_grid = ParameterGrid(var_param)
in_cv = StratifiedKFold(split[0])
out_cv = StratifiedKFold(split[1])
# Outer cross validation loop
best_perfs = []
best_params_lst = []
for out_tr_idx, out_tst_idx in out_cv.split(self._Xtr, self._ytr):
print("Outer CV")
# Parameter loop
param_best_perf = -np.inf
for pidx, cparams in enumerate(param_grid):
print("\tParameters loop")
# Build model based on parameters
all_cparams = {**cparams, **stat_param}
model = CNN3DArchs(all_cparams,
self._Xtr[out_tr_idx],
self._ytr[out_tr_idx]).build_model()
epochs_ = all_cparams["epochs"]
batch_size_ = all_cparams["batch_size"]
# Inner cross validation loop
in_perfs = []
for in_tr_idx, in_tst_idx in in_cv.split(self._Xtr[out_tr_idx], self._ytr[out_tr_idx]):
tf.keras.backend.clear_session()
model.fit(
self._Xtr[in_tr_idx],
self._ytr[in_tr_idx],
epochs=epochs_,
validation_split=0.2,
batch_size=batch_size_,
verbose=0,
)
in_loss, in_perf = model.evaluate(
self._Xtr[in_tst_idx], self._ytr[in_tst_idx], verbose=0
)
in_perfs.append(in_perf)
print("\t\tInner CV ", str(in_perf))
# Mean inner performance
in_mean_perf = np.mean(in_perfs)
print("\t\tMean performance ", str(in_mean_perf))
if in_mean_perf > param_best_perf:
param_best_perf = in_mean_perf
best_params = cparams
print("\tInner best parameters ", best_params)
print("\tMean Best performance ", param_best_perf)
# Performance of best parameters on outer split
all_cparams = {**best_params, **stat_param}
model = CNN3DArchs(all_cparams, self._Xtr[out_tr_idx], self._ytr[out_tr_idx]).build_model()
epochs_ = all_cparams["epochs"]
batch_size_ = all_cparams["batch_size"]
tf.keras.backend.clear_session()
model.fit(
self._Xtr[out_tr_idx],
self._ytr[out_tr_idx],
epochs=epochs_,
validation_split=0.2,
batch_size=batch_size_,
verbose=0,
)
out_loss, out_perf = model.evaluate(self._Xtr[out_tst_idx], self._ytr[out_tst_idx], verbose=0)
print("Best parameters ", best_params)
print("Performance on outer testing ", str(out_perf))
# Storing best parameters for outer loop
best_params_lst.append({**stat_param,**best_params})
best_perfs.append(out_perf)
return best_params_lst, best_perfs
def _get_var_params(self, params):
"""
Returns parameters that can be varied during optimization.
Args:
params (dict): All the parameters in an array
"""
var_dict = {}
static_dict = {}
for key in params:
if len(params[key]) > 1:
var_dict[key] = params[key]
else:
static_dict[key] = params[key][0]
return var_dict, static_dict
| {"/vlearn/classifiers/cnn3d_opt_frmwrks.py": ["/vlearn/classifiers/cnn3d_architectures.py"], "/vlearn/classifiers/cnn3d.py": ["/vlearn/classifiers/cnn3d_opt_frmwrks.py", "/vlearn/classifiers/cnn3d_architectures.py"]} |
78,116 | luis2arm/vlearn | refs/heads/master | /vlearn/roa/__init__.py | from .optical_flow import OptFlw
from .object_detection import ObjDet
__all__ = ["OptFlw", "ObjDet"]
| {"/vlearn/classifiers/cnn3d_opt_frmwrks.py": ["/vlearn/classifiers/cnn3d_architectures.py"], "/vlearn/classifiers/cnn3d.py": ["/vlearn/classifiers/cnn3d_opt_frmwrks.py", "/vlearn/classifiers/cnn3d_architectures.py"]} |
78,117 | luis2arm/vlearn | refs/heads/master | /vlearn/classifiers/cnn3d_architectures.py | import sys
import pdb
import tensorflow as tf
import tensorflow.keras.layers as tfkr_layers
class CNN3DArchs:
"""
Contains methods to build 3D CNN architectures. The architecture is a tf.keras
model and supports funtional api of tf.keras.
"""
def __init__(self, params, X, y):
"""
Returns a tf.keras model
"""
self._X = X
self._y = y
self._params = params
def build_model(self):
"""
Builds a tf.keras model based the parameters.
Args:
params (dict):
Parameters to use for building model.
"""
arch_name = self._params["arch_name"]
if arch_name == "flat":
model = self._build_flat_model()
else:
print("Architecture not supported ", arch_name)
sys.exit()
return model
def _build_flat_model(self):
"""
Builds a flat cnn3d model using tensorflow 2. It has same
number of convolutional kernels throughout.
Args:
params (dict): Dictionary having parameters for architecture.
1. num_conv_layers
2. num_kernels
"""
# Extracting architecture parameters from dictionary
num_conv_layers_ = self._params["num_conv_layers"]
num_kernels_ = self._params["num_kernels"]
kernel_size_ = self._params["kernel_size"]
activation_ = self._params["activation"]
data_format_ = self._params["data_format"]
pool_size_ = self._params["pool_size"]
batch_norm_ = self._params["batch_norm"]
num_dense_layers_ = self._params["num_dense_layers"]
final_activation_ = self._params["final_activation"]
loss_ = self._params["loss"]
optimizer_ = self._params["optimizer"]
dense_units_ = self._params["dense_units"]
metric_ = self._params["metric"]
# Input Layer
sample_shape = self._X.shape[1:]
input_layer = tfkr_layers.Input(sample_shape)
# First convoluton and pooling layers
conv_layer = tfkr_layers.Conv3D(
filters=num_kernels_,
kernel_size=kernel_size_,
activation=activation_,
data_format=data_format_,
)(input_layer)
pool_layer = tfkr_layers.MaxPool3D(
pool_size=pool_size_, data_format=data_format_
)(conv_layer)
# Remaining convolution and pooling layers
for layer_idx in range(1, num_conv_layers_):
conv_layer = tfkr_layers.Conv3D(
filters=num_kernels_,
kernel_size=kernel_size_,
activation=activation_,
data_format=data_format_,
)(pool_layer)
pool_layer = tfkr_layers.MaxPool3D(
pool_size=pool_size_, data_format=data_format_
)(conv_layer)
# Batch Normalization
if batch_norm_:
pool_layer = tfkr_layers.BatchNormalization()(pool_layer)
# Flatten
flat_layer = tfkr_layers.Flatten()(pool_layer)
# dense layers
dense_layer = tfkr_layers.Dense(units=dense_units_, activation=activation_)(
flat_layer
)
for layer_idx in range(1, num_dense_layers_):
dense_layer = tfkr_layers.Dense(units=dense_units_, activation=activation_)(
dense_layer
)
# output layer, sigmoid for binary classificaiton
output_layer = tfkr_layers.Dense(units=1, activation=final_activation_)(
dense_layer
)
# Return model
model = tf.keras.Model(inputs=input_layer, outputs=output_layer)
model.compile(loss=loss_, optimizer=optimizer_, metrics=[metric_])
return model
| {"/vlearn/classifiers/cnn3d_opt_frmwrks.py": ["/vlearn/classifiers/cnn3d_architectures.py"], "/vlearn/classifiers/cnn3d.py": ["/vlearn/classifiers/cnn3d_opt_frmwrks.py", "/vlearn/classifiers/cnn3d_architectures.py"]} |
78,118 | luis2arm/vlearn | refs/heads/master | /vlearn/classifiers/cnn3d.py | import os
import sys
import pdb
import numpy as np
import pandas as pd
from .cnn3d_opt_frmwrks import OptFrmWrk
from .cnn3d_architectures import CNN3DArchs
from sklearn.model_selection import ParameterGrid, StratifiedKFold
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
class CNN3D(OptFrmWrk):
"""
The following class provides an intuitive way to
build custom neural networks using tensorflow 2
for activity detection in trimmed videos.
Todo:
* Divide parameters into static and dynamic in this class
* After nested cross validation use n fold cross validation to get
best of the best model.
"""
def __init__(self, arch_params, training_params):
"""
Initializes ParameterGrid with different parameters that can be
varied in architecture and training as proved in the arguments.
args:
arch_params: Parameters that define architecture.
train_params: Training parameter dictionary.
"""
self._arch_params = arch_params
self._training_params = training_params
def get_best_model(self, Xtr, ytr, method="nestedcv", ncv_split=(3, 3)):
"""
Optimizes for best parameters and model using nested corss validation.
Args:
Xtr (nparray): An array having samples for training
ytr (nparray): An array having labels corresponding to each sample in Xtr
method (str) : A string having name of the parameter
parameter tuning method. Default is nested cross validation.
ncv_split (tuple): Cross validation split for nestedcv,
(inner split, outer split). Default is (3,3)
"""
# Getting optimal architecture and training parameters
opt = OptFrmWrk(Xtr, ytr)
if method == "nestedcv":
params = {**self._arch_params, **self._training_params}
ncv_best_params, perfs = opt.nested_cv(params, (3, 3))
best_params = ncv_best_params[np.argmax(perfs)]
best_model = CNN3DArchs(best_params, Xtr, ytr).build_model()
epochs_ = best_params["epochs"]
batch_size_ = best_params["batch_size"]
best_model.fit(Xtr, ytr, epochs = epochs_, batch_size = batch_size_,
validation_split=0.2, verbose=1)
else:
print("Parameter tuning not supported")
sys.exit()
return best_params, best_model
| {"/vlearn/classifiers/cnn3d_opt_frmwrks.py": ["/vlearn/classifiers/cnn3d_architectures.py"], "/vlearn/classifiers/cnn3d.py": ["/vlearn/classifiers/cnn3d_opt_frmwrks.py", "/vlearn/classifiers/cnn3d_architectures.py"]} |
78,119 | pumpkinduo/ELMO_sentence_encoder | refs/heads/master | /ELMO_sentence/data_util.py | from collections import Counter
import json
import random
def get_vocab():
sentence_vocab = []
f = open("../PICO/trainPICO.json", 'r')
for line in f.readlines():
temp = json.loads(line)
for word in temp[0]:
sentence_vocab.append(word.lower())
g = open("../PICO/valiPICO.json", 'r')
for lines in g.readlines():
temps = json.loads(lines)
for word in temps[0]:
sentence_vocab.append(word.lower())
vocab = list(set(sentence_vocab))
return vocab
def _genVocabFile(vocabFile):
allWords = get_vocab()
wordCount = Counter(allWords) # 统计词频
sortWordCount = sorted(wordCount.items(), key=lambda x: x[1], reverse=True)
words = [item[0] for item in sortWordCount]
allTokens = ['<S>', '</S>', '<UNK>'] + words
with open(vocabFile, 'w') as fout:
fout.write('\n'.join(allTokens))
print("vocabfileget")
def get_data():
f = open("../PICO/trainPICO.json")
f = f.readlines()
dataset = []
for line in f:
temp = json.loads(line)
dataset.append(temp[0])
print("数据集Get")
return dataset
def padSentence(datasets):
dataset = []
inputs_length = [len(sample) for sample in datasets]
max_source_length = max(inputs_length)
for j,sample in enumerate(datasets):
dataset.append(sample+[""]*(max_source_length - len(sample)))
print("paddingget")
return dataset
| {"/ELMO_sentence/train.py": ["/ELMO_sentence/data_util.py"]} |
78,120 | pumpkinduo/ELMO_sentence_encoder | refs/heads/master | /ELMO_sentence/train.py | import tensorflow as tf
from ELMO_sentence.data_util import get_data,_genVocabFile,padSentence
import os
from ELMO_sentence.bilm import TokenBatcher, BidirectionalLanguageModel, weight_layers, dump_token_embeddings
import json
import numpy as np
k = open("PICO_elmo_train.json","a")
# Dump the token embeddings to a file. Run this once for your dataset.
token_embedding_file = 'elmo_token_embeddings.hdf5'
vocab_file = "../ELMO_sentence/vocab.txt"
_genVocabFile(vocab_file)
options_file = "../ELMO_sentence/elmo_options.json"
weight_file = "../ELMO_sentence/elmo_weights.hdf5"
dump_token_embeddings(
vocab_file, options_file, weight_file, token_embedding_file
)
tf.reset_default_graph()
# Build the biLM graph.
bilm = BidirectionalLanguageModel(
options_file,
weight_file,
use_character_inputs=False,
embedding_weight_file=token_embedding_file
)
context_token_ids = tf.placeholder(tf.int32,[None,None],"context_token_ids")
# Get ops to compute the LM embeddings.
context_embeddings_op = bilm(context_token_ids)
elmo_context_input = weight_layers('input', context_embeddings_op, l2_coef=0.0)
# run
dataset = get_data()
data = padSentence(dataset)
batcher = TokenBatcher(vocab_file)
with tf.Session() as sess:
# It is necessary to initialize variables once before running inference.
sess.run(tf.global_variables_initializer())
# Create batches of data.
batchdata = batcher.batch_sentences(data[200:])
step = 1
for i in range(0, len(batchdata), 128):
elmo_input = []
# Compute ELMo representations (here for the input only, for simplicity).
elmo_context_input_ = sess.run(
[elmo_context_input['weighted_op']],
feed_dict={context_token_ids: batchdata[i:min(i+128,len(batchdata))]}
)
print(step)
for input in elmo_context_input_[0]:
elmo_input.append(np.mean(input,axis=0))
step+=1
count = 0
sentenceembedding = {}
for i in elmo_input:
sentenceembedding[count] = i.tolist()
json.dump(sentenceembedding[count], k)
k.write("\n")
count+=1 | {"/ELMO_sentence/train.py": ["/ELMO_sentence/data_util.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.