index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
78,423 | yzgyyang/wsl-tray | refs/heads/master | /src/systray/win32_adapter.py | import ctypes
import ctypes.wintypes
import locale
import sys
RegisterWindowMessage = ctypes.windll.user32.RegisterWindowMessageA
LoadCursor = ctypes.windll.user32.LoadCursorA
LoadIcon = ctypes.windll.user32.LoadIconA
LoadImage = ctypes.windll.user32.LoadImageA
RegisterClass = ctypes.windll.user32.RegisterClassA
CreateWindowEx = ctypes.windll.user32.CreateWindowExA
UpdateWindow = ctypes.windll.user32.UpdateWindow
DefWindowProc = ctypes.windll.user32.DefWindowProcA
GetSystemMetrics = ctypes.windll.user32.GetSystemMetrics
InsertMenuItem = ctypes.windll.user32.InsertMenuItemA
PostMessage = ctypes.windll.user32.PostMessageA
PostQuitMessage = ctypes.windll.user32.PostQuitMessage
SetMenuDefaultItem = ctypes.windll.user32.SetMenuDefaultItem
GetCursorPos = ctypes.windll.user32.GetCursorPos
SetForegroundWindow = ctypes.windll.user32.SetForegroundWindow
TrackPopupMenu = ctypes.windll.user32.TrackPopupMenu
CreatePopupMenu = ctypes.windll.user32.CreatePopupMenu
CreateCompatibleDC = ctypes.windll.gdi32.CreateCompatibleDC
GetDC = ctypes.windll.user32.GetDC
CreateCompatibleBitmap = ctypes.windll.gdi32.CreateCompatibleBitmap
GetSysColorBrush = ctypes.windll.user32.GetSysColorBrush
FillRect = ctypes.windll.user32.FillRect
DrawIconEx = ctypes.windll.user32.DrawIconEx
SelectObject = ctypes.windll.gdi32.SelectObject
DeleteDC = ctypes.windll.gdi32.DeleteDC
DestroyWindow = ctypes.windll.user32.DestroyWindow
GetModuleHandle = ctypes.windll.kernel32.GetModuleHandleA
GetMessage = ctypes.windll.user32.GetMessageA
TranslateMessage = ctypes.windll.user32.TranslateMessage
DispatchMessage = ctypes.windll.user32.DispatchMessageA
Shell_NotifyIcon = ctypes.windll.shell32.Shell_NotifyIcon
DestroyIcon = ctypes.windll.user32.DestroyIcon
NIM_ADD = 0
NIM_MODIFY = 1
NIM_DELETE = 2
NIF_ICON = 2
NIF_MESSAGE = 1
NIF_TIP = 4
MIIM_ID = 2
MIIM_SUBMENU = 4
MIIM_STRING = 64
MIIM_BITMAP = 128
WM_DESTROY = 2
WM_CLOSE = 16
WM_COMMAND = 273
WM_USER = 1024
WM_LBUTTONDBLCLK = 515
WM_RBUTTONUP = 517
WM_LBUTTONUP = 514
WM_NULL = 0
CS_VREDRAW = 1
CS_HREDRAW = 2
IDC_ARROW = 32512
COLOR_WINDOW = 5
WS_OVERLAPPED = 0
WS_SYSMENU = 524288
CW_USEDEFAULT = -2147483648
LR_LOADFROMFILE = 16
LR_DEFAULTSIZE = 64
IMAGE_ICON = 1
IDI_APPLICATION = 32512
TPM_LEFTALIGN = 0
SM_CXSMICON = 49
SM_CYSMICON = 50
COLOR_MENU = 4
DI_NORMAL = 3
MFT_SEPARATOR = 0x00000800
WPARAM = ctypes.wintypes.WPARAM
LPARAM = ctypes.wintypes.LPARAM
HANDLE = ctypes.wintypes.HANDLE
if ctypes.sizeof(ctypes.c_long) == ctypes.sizeof(ctypes.c_void_p):
LRESULT = ctypes.c_long
elif ctypes.sizeof(ctypes.c_longlong) == ctypes.sizeof(ctypes.c_void_p):
LRESULT = ctypes.c_longlong
SZTIP_MAX_LENGTH = 128
LOCALE_ENCODING = locale.getpreferredencoding()
def encode_for_locale(s):
"""
Encode text items for system locale. If encoding fails, fall back to ASCII.
"""
try:
return s.encode(LOCALE_ENCODING, 'ignore')
except (AttributeError, UnicodeDecodeError):
return s.decode('ascii', 'ignore').encode(LOCALE_ENCODING)
POINT = ctypes.wintypes.POINT
RECT = ctypes.wintypes.RECT
MSG = ctypes.wintypes.MSG
LPFN_WNDPROC = ctypes.CFUNCTYPE(LRESULT, HANDLE, ctypes.c_uint, WPARAM, LPARAM)
class WNDCLASS(ctypes.Structure):
_fields_ = [("style", ctypes.c_uint),
("lpfnWndProc", LPFN_WNDPROC),
("cbClsExtra", ctypes.c_int),
("cbWndExtra", ctypes.c_int),
("hInstance", HANDLE),
("hIcon", HANDLE),
("hCursor", HANDLE),
("hbrBackground", HANDLE),
("lpszMenuName", ctypes.c_char_p),
("lpszClassName", ctypes.c_char_p),
]
class MENUITEMINFO(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_uint),
("fMask", ctypes.c_uint),
("fType", ctypes.c_uint),
("fState", ctypes.c_uint),
("wID", ctypes.c_uint),
("hSubMenu", HANDLE),
("hbmpChecked", HANDLE),
("hbmpUnchecked", HANDLE),
("dwItemData", ctypes.c_void_p),
("dwTypeData", ctypes.c_char_p),
("cch", ctypes.c_uint),
("hbmpItem", HANDLE),
]
class NOTIFYICONDATA(ctypes.Structure):
_fields_ = [("cbSize", ctypes.c_uint),
("hWnd", HANDLE),
("uID", ctypes.c_uint),
("uFlags", ctypes.c_uint),
("uCallbackMessage", ctypes.c_uint),
("hIcon", HANDLE),
("szTip", ctypes.c_char * SZTIP_MAX_LENGTH),
("dwState", ctypes.c_uint),
("dwStateMask", ctypes.c_uint),
("szInfo", ctypes.c_char * 256),
("uTimeout", ctypes.c_uint),
("szInfoTitle", ctypes.c_char * 64),
("dwInfoFlags", ctypes.c_uint),
("guidItem", ctypes.c_char * 16),
]
if sys.getwindowsversion().major >= 5:
_fields_.append(("hBalloonIcon", HANDLE))
def PackMENUITEMINFO(text=None, hbmpItem=None, wID=None, hSubMenu=None):
res = MENUITEMINFO()
res.cbSize = ctypes.sizeof(res)
if text == "-----":
res.fType = MFT_SEPARATOR
return res
res.fMask = 0
if hbmpItem is not None:
res.fMask |= MIIM_BITMAP
res.hbmpItem = hbmpItem
if wID is not None:
res.fMask |= MIIM_ID
res.wID = wID
if text is not None:
text = encode_for_locale(text)
res.fMask |= MIIM_STRING
res.dwTypeData = text
if hSubMenu is not None:
res.fMask |= MIIM_SUBMENU
res.hSubMenu = hSubMenu
return res
def LOWORD(w):
return w & 0xFFFF
def PumpMessages():
msg = MSG()
while GetMessage(ctypes.byref(msg), None, 0, 0) > 0:
TranslateMessage(ctypes.byref(msg))
DispatchMessage(ctypes.byref(msg))
def NotifyData(hWnd=0, uID=0, uFlags=0, uCallbackMessage=0, hIcon=0, szTip=""):
szTip = encode_for_locale(szTip)[:SZTIP_MAX_LENGTH]
res = NOTIFYICONDATA()
res.cbSize = ctypes.sizeof(res)
res.hWnd = hWnd
res.uID = uID
res.uFlags = uFlags
res.uCallbackMessage = uCallbackMessage
res.hIcon = hIcon
res.szTip = szTip
return res
| {"/src/app.py": ["/src/systray/win32_adapter.py"]} |
78,436 | nekeal/product | refs/heads/master | /cars/models.py | from django.db import models
# Create your models here.
class Producer(models.Model):
name = models.CharField(max_length=20, unique=True)
def __str__(self):
return self.name
class CarModel(models.Model):
name = models.CharField(max_length=20)
producer = models.ForeignKey(Producer, on_delete=models.CASCADE)
type = models.CharField(max_length=20)
def __str__(self):
return f'{self.name} - {self.type}'
class Meta:
unique_together = ('name', 'producer', 'type')
class Car(models.Model):
CATEGORY_CHOICES = (
('eco', 'Economic'),
('buss', 'Business'),
('first', 'First class'),
)
registration_number = models.CharField(max_length=20)
people_capacity = models.PositiveIntegerField()
manufacture_year = models.PositiveIntegerField()
model = models.ForeignKey(CarModel, on_delete=models.CASCADE)
category = models.CharField(max_length=10,choices=CATEGORY_CHOICES, blank=True)
hybrid_or_electric = models.BooleanField(default=False)
def __str__(self):
return self.registration_number
| {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,437 | nekeal/product | refs/heads/master | /cars/migrations/0004_auto_20190510_1347.py | # Generated by Django 2.2.1 on 2019-05-10 13:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cars', '0003_auto_20190510_1313'),
]
operations = [
migrations.RemoveField(
model_name='carmodel',
name='category',
),
migrations.RemoveField(
model_name='carmodel',
name='hybrid_or_electric',
),
migrations.AddField(
model_name='car',
name='category',
field=models.CharField(blank=True, choices=[('eco', 'Economic'), ('buss', 'Business'), ('first', 'First class')], max_length=10),
),
migrations.AddField(
model_name='car',
name='hybrid_or_electric',
field=models.BooleanField(default=False),
),
]
| {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,438 | nekeal/product | refs/heads/master | /cars/serializers.py |
from rest_framework import serializers
from rest_framework.validators import ValidationError
from .models import Car, CarModel, Producer
class ProducerModelSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Producer
fields = ('name','url')
class CarModelModelSerializer(serializers.HyperlinkedModelSerializer):
producer = serializers.CharField(read_only=True)
producer_id = serializers.PrimaryKeyRelatedField(write_only=True, queryset=Producer.objects.all())
def validate_type(self, value):
return value.capitalize()
def create(self, validated_data):
validated_data['producer_id'] = validated_data['producer_id'].id
return super(CarModelModelSerializer, self).create(validated_data)
class Meta:
model = CarModel
fields = ('name', 'producer', 'type', 'producer_id', 'url')
class CarModelSerializer(serializers.HyperlinkedModelSerializer):
model = CarModelModelSerializer(required=False)
category = serializers.ChoiceField(source='get_category_display', choices=Car.CATEGORY_CHOICES, required=False)
model_id = serializers.PrimaryKeyRelatedField(queryset=CarModel.objects.all(), required=False, write_only=True)
class Meta:
model = Car
fields = ('registration_number', 'people_capacity', 'manufacture_year', 'category', 'hybrid_or_electric', 'model', 'model_id', 'url')
def __init__(self, *args, **kwargs):
super(CarModelSerializer, self).__init__(*args, **kwargs)
fields = kwargs['context'].pop('fields', None)
if fields:
allowed = set(fields)
existing = set(self.fields.keys())
for field in existing - allowed:
self.fields[field].read_only = True
def validate(self, attrs):
if not(any([attrs.get('model'), attrs.get('model_id')])) or all([attrs.get('model'), attrs.get('model_id')]):
raise ValidationError("Provide one of this: new model data, existing model id")
return super(CarModelSerializer, self).validate(attrs)
def update(self, instance, validated_data):
validated_data['model_id'] = validated_data['model_id'].id
return super(CarModelSerializer, self).update(instance, validated_data)
def create(self, validated_data):
car_model = validated_data.pop('model_id', None) or validated_data.pop('model', None)
validated_data['category'] = validated_data.pop('get_category_display')
if isinstance(car_model, CarModel):
car = Car.objects.create(**validated_data, model_id=car_model.id)
else:
# car_model_data = validated_data.pop('model')
car_model['producer'] = car_model.pop('producer_id')
car_model_instance = CarModel.objects.create(**car_model)
car = Car.objects.create(**validated_data, model=car_model_instance)
return car
class FileSerializer(serializers.Serializer):
file = serializers.FileField() | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,439 | nekeal/product | refs/heads/master | /product/settings/production.py | from .base import *
DEBUG = False
DATABASES = {
'local': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
},
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'product',
'USER': 'product',
'PASSWORD': '12SDdv@7F?lJ',
'HOST': 'localhost',
}
}
MEDIA_ROOT = '/var/www/product/media/' | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,440 | nekeal/product | refs/heads/master | /cars/migrations/0005_auto_20190515_1409.py | # Generated by Django 2.2.1 on 2019-05-15 14:09
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cars', '0004_auto_20190510_1347'),
]
operations = [
migrations.AlterUniqueTogether(
name='carmodel',
unique_together={('name', 'producer', 'type')},
),
]
| {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,441 | nekeal/product | refs/heads/master | /cars/views.py | import csv
import io
from django.shortcuts import render
from rest_framework.viewsets import ModelViewSet
from rest_framework.decorators import action
from rest_framework.response import Response
from .models import Car, CarModel, Producer
from .serializers import CarModelSerializer, CarModelModelSerializer, ProducerModelSerializer, FileSerializer
class CarModelViewSet(ModelViewSet):
'''
Create form does not work properly.
Valid data can be found in test_views.py file.
'''
serializer_class = CarModelSerializer
queryset = Car.objects.all()
def get_serializer_class(self):
if self.action == 'import_models':
return FileSerializer
return super(CarModelViewSet, self).get_serializer_class()
def get_serializer_context(self):
context = super(CarModelViewSet, self).get_serializer_context()
if self.action == 'update':
context['fields'] = ('registration_number', 'people_capacity', 'manufacture_year',\
'category', 'hybrid_or_electric', 'model_id')
return context
@action(detail=False, methods=['post',], url_path='importmodels')
def import_models(self, request):
file = request.FILES['file']
text = file.read().decode('utf-8')
for row in text.split('\n')[1:]:
if len(row) < 3:
continue
row = row.split(', ')
# producer_serializer = ProducerModelSerializer(data={'name':row[0]})
producer,_ = Producer.objects.get_or_create(name=row[0])
print(producer.id)
car_model_serializer = CarModelModelSerializer(data={'producer_id': producer.id, 'name':row[1],
'type':row[2]})
if car_model_serializer.is_valid():
car_model_serializer.save()
else:
return Response(car_model_serializer.errors)
return Response({'Succes':True})
class CarModelModelViewSet(ModelViewSet):
serializer_class = CarModelModelSerializer
queryset = CarModel.objects.all()
class ProducerModelViewSet(ModelViewSet):
serializer_class = ProducerModelSerializer
queryset = Producer.objects.all() | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,442 | nekeal/product | refs/heads/master | /cars/admin.py | from django.contrib import admin
from .models import Car, CarModel, Producer
# Register your models here.
admin.site.register(Car)
admin.site.register(CarModel)
admin.site.register(Producer) | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,443 | nekeal/product | refs/heads/master | /cars/tests/test_views.py | import pytest
import json
from model_mommy import mommy
from django.shortcuts import reverse
from rest_framework import status
from ..views import CarModelViewSet
from ..serializers import CarModelSerializer
from ..models import Car, CarModel, Producer
class TestCarModelViewSet:
@pytest.mark.django_db
def test_list_car_view(self, api_rf):
request = api_rf.get(reverse('car-list'))
car = mommy.make(Car,2)
response = CarModelViewSet.as_view({'get':'list'})(request)
assert response.status_code == status.HTTP_200_OK
assert response.data == CarModelSerializer(car, context={'request': request}, many=True).data
@pytest.mark.django_db
def test_create_car_with_id(self, api_rf):
car_model = mommy.make(CarModel)
data = {
"registration_number": "1258",
"people_capacity": 5,
"manufacture_year": 2005,
"category": "eco",
"hybrid_or_electric": False,
"model_id": car_model.id,
}
request = api_rf.post(reverse('car-list'), data=data)
response = CarModelViewSet.as_view({'post':'create'})(request)
car = Car.objects.first()
serializer = CarModelSerializer(car, context={'request':request})
assert response.status_code == status.HTTP_201_CREATED
assert response.data == serializer.data
@pytest.mark.django_db
def test_create_car_with_new_model(self, api_rf):
producer = mommy.make(Producer, name='Ford')
data = {
"registration_number": "1258",
"people_capacity": 5,
"manufacture_year": 2005,
"category": "eco",
"hybrid_or_electric": False,
"model": {
'name': 'Mustang',
'producer_id': producer.id,
'type': 'Sedan',
},
}
request = api_rf.post(reverse('car-list'), data=data)
response = CarModelViewSet.as_view({'post':'create'})(request)
car = Car.objects.first()
serializer = CarModelSerializer(car, context={'request':request})
assert response.status_code == status.HTTP_201_CREATED
assert response.data == serializer.data
@pytest.mark.django_db
def test_create_car_with_bad_data(self, api_rf):
producer = mommy.make(Producer, name='Ford')
car_model = mommy.make(CarModel)
bad_data = {
"registration_number": "1258",
"people_capacity": 5,
"manufacture_year": 2005,
"category": "eco",
"hybrid_or_electric": False,
"model": {
'name': 'Mustang',
'producer_id': 1,
'type': 'Sedan',
},
"model_id": 1
}
request = api_rf.post(reverse('car-list'), data=bad_data)
response = CarModelViewSet.as_view({'post': 'create'})(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Provide one of this" in str(response.data)
bad_data = {
"registration_number": "1258",
"people_capacity": 5,
"manufacture_year": 2005,
"category": "eco",
"hybrid_or_electric": False,
"model": {
'name': 'Mustang',
'producer_id': 1,
'type': 'Sedan',
},
"model_id": 1
}
request = api_rf.post(reverse('car-list'), data=bad_data)
response = CarModelViewSet.as_view({'post': 'create'})(request)
assert response.status_code == status.HTTP_400_BAD_REQUEST
assert "Provide one of this" in str(response.data)
@pytest.mark.django_db
def test_update_car_with_model_id(self, api_rf):
car = mommy.make(Car, people_capacity=3)
car_model = mommy.make(CarModel)
data = {
"registration_number": "1258",
"people_capacity": 5,
"manufacture_year": 2005,
"category": "eco",
"hybrid_or_electric": False,
"model_id": car_model.id
}
request = api_rf.put(reverse('car-detail', args=(car.id,)), data=data)
response = CarModelViewSet.as_view({'put': 'update'})(request, pk=car.id)
car.refresh_from_db()
assert response.status_code == status.HTTP_200_OK
assert car.model_id == car_model.id
@pytest.mark.django_db
def test_delete_car_view(self, api_rf):
car = mommy.make(Car)
request = api_rf.delete(reverse('car-detail', args=(car.id,)))
response = CarModelViewSet.as_view({'delete': 'destroy'})(request, pk=car.id)
assert response.status_code == status.HTTP_204_NO_CONTENT
assert response.data is None
assert Car.objects.first() is None | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,444 | nekeal/product | refs/heads/master | /cars/utils.py | from .models import Car
def models_import():
print(Car.objects.all()) | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,445 | nekeal/product | refs/heads/master | /cars/urls.py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from .views import CarModelViewSet, CarModelModelViewSet, ProducerModelViewSet
router = DefaultRouter()
router.register('cars', CarModelViewSet)
router.register('carmodels', CarModelModelViewSet)
router.register('producers', ProducerModelViewSet)
urlpatterns = [
path('', include(router.urls))
] | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,446 | nekeal/product | refs/heads/master | /conftest.py | import pytest
from rest_framework.test import APIRequestFactory
@pytest.fixture()
def api_rf():
return APIRequestFactory() | {"/cars/serializers.py": ["/cars/models.py"], "/cars/views.py": ["/cars/models.py", "/cars/serializers.py"], "/cars/admin.py": ["/cars/models.py"], "/cars/tests/test_views.py": ["/cars/views.py", "/cars/serializers.py", "/cars/models.py"], "/cars/utils.py": ["/cars/models.py"], "/cars/urls.py": ["/cars/views.py"]} |
78,447 | AustinDumm/automeno | refs/heads/master | /main.py | from Automeno.Types import *
from Automeno.DefaultComponents import *
from Automeno.ComponentFactory import *
from Automeno.Macheno import *
from Automeno.Interactive import *
from midiutil import MIDIFile
midi_file = MIDIFile(numTracks=1, removeDuplicates=True, eventtime_is_ticks=True)
midi_file.addTempo(0, 0, 120)
interactive(Macheno())
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,448 | AustinDumm/automeno | refs/heads/master | /Automeno/Macheno.py | from Automeno.Types import DictSerializable
from Automeno.Component import *
from Automeno.ComponentFactory import AutomenoComponentFactory
from midiutil import MIDIFile
from functools import reduce
import json
class Macheno(DictSerializable):
percussion_channel = 9
def __init__(self):
self.components = {}
self.channels_keys = []
def find_component(self, name):
if name in self.components:
return self.components[name]
return None
def add_component(self, name, component):
self.components[name] = component
def add_channel(self, name, channel):
self.components[name] = channel
self.channels_keys.append(name)
def num_tracks_needed(self):
num_drums_tracks = len(list(filter(lambda channel_key: "Track" in self.components[channel_key].parameters and self.components[channel_key].parameters["Track"] == Macheno.percussion_channel, self.channels_keys)))
return max(num_drums_tracks, (len(self.components) // 16) + 1)
def run(self, file_name):
print(self.num_tracks_needed())
midi_file = MIDIFile(numTracks=self.num_tracks_needed(), removeDuplicates=True, eventtime_is_ticks=True)
next_drum_track = 0
for index, key in enumerate(self.channels_keys):
channel = self.components[key]
track = 0
if channel.parameters["Channel"] == Macheno.percussion_channel:
track = next_drum_track
next_drum_track += 1
else:
track = index // 16
print("track {}".format(track))
midi_file.addProgramChange(track, channel.parameters["Channel"], 0, channel.parameters["Program"])
for tick in range(0, 960 * 30, 960 // 2):
for channel_key in self.channels_keys:
channel = self.components[channel_key]
notes_groups = channel.evaluate(tick)
for group in notes_groups:
for note in group:
midi_file.addNote(0, channel.parameters["Channel"], note.pitch.midi_pitch(), tick, note.tick_length, note.volume.volume)
for component in self.components.values():
component.reset()
with open(file_name, "wb") as f:
midi_file.writeFile(f)
def update_self(self, dictionary):
new_macheno = Macheno.deserialize(dictionary)
self.components = new_macheno.components
self.channels_keys = new_macheno.channels_keys
def serialized_links(self):
format_link = lambda outport, inport: "{}:{}->{}:{}".format(outport.component.name, outport.name, inport.component.name, inport.name)
pairs_for_inport = lambda inport: list(map(lambda outport: (outport, inport), inport.connected_outports))
serialized_links_for_inport = lambda inport: format_link(*pairs_for_inport(inport))
serialized_links_for_inport = lambda inport: list(map(lambda pair: format_link(*pair), pairs_for_inport(inport)))
serialized_links_for_component = lambda component: list(reduce(lambda acc, inport: acc + serialized_links_for_inport(inport), component.inports.values(), []))
return list(reduce(lambda acc, component: acc + serialized_links_for_component(component), self.components.values(), []))
def serialize(obj):
return { "components": dict(map(lambda name_component: (name_component[0], Component.serialize(name_component[1])), obj.components.items())),\
"channels_keys": obj.channels_keys,
"links": obj.serialized_links() }
def deserialize(dictionary):
macheno = Macheno()
for key, value in dictionary["components"].items():
if key in dictionary["channels_keys"]:
macheno.add_channel(key, Component.deserialize(value))
else:
macheno.add_component(key, Component.deserialize(value))
for link in dictionary["links"]:
link_split = link.split("->")
outport_split = link_split[0].split(":")
inport_split = link_split[1].split(":")
macheno.components[inport_split[0]].inports[inport_split[1]].connect_outport(macheno.components[outport_split[0]].outports[outport_split[1]])
return macheno
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,449 | AustinDumm/automeno | refs/heads/master | /Automeno/ComponentFactory.py |
_AUTOMENO_COMPONENT_DELEGATES = {}
def AutomenoComponentDelegate(name):
def process_class(cls):
_AUTOMENO_COMPONENT_DELEGATES[name] = cls
return cls
return process_class
def AutomenoComponentFactory(name, delegate_name, parameters):
from Automeno.Component import Component
if delegate_name not in _AUTOMENO_COMPONENT_DELEGATES:
raise Exception("Delegate {} does not exist".format(delegate_name))
return Component(name, _AUTOMENO_COMPONENT_DELEGATES[delegate_name], delegate_name, parameters)
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,450 | AustinDumm/automeno | refs/heads/master | /Automeno/DefaultComponents.py | from Automeno.Types import *
from Automeno.ComponentFactory import AutomenoComponentDelegate
from Automeno.Component import AutomenoComponentProtocol
@AutomenoComponentDelegate("FileCharacter")
class FileCharacterComponentDelegate(AutomenoComponentProtocol):
def inports():
return {}
def outports():
return { "Character": str }
def parameters_types():
return { "FileName": str }
def evaluate_generator(inports, parameters):
while True:
with open(parameters["FileName"], "r") as f:
character = f.read(1)
while character:
yield { "Character": character }
character = f.read(1)
@AutomenoComponentDelegate("NoteGenerator")
class NoteGeneratorDelegate(AutomenoComponentProtocol):
def inports():
return { "On": bool }
def outports():
return { "Notes": [Note] }
def parameters_types():
return { "PlayNote": Note,
"NeedAllOn": bool }
def evaluate_generator(inports, parameters):
tick = 0
while True:
if parameters["NeedAllOn"] and all(inports["On"].evaluate(tick)):
tick = yield { "Notes": [parameters["PlayNote"]] }
elif not parameters["NeedAllOn"] and any(inports["On"].evaluate(tick)):
tick = yield { "Notes": [parameters["PlayNote"]] }
else:
tick = yield { "Notes": [] }
@AutomenoComponentDelegate("WordExists")
class WordExistsDelegate(AutomenoComponentProtocol):
def inports():
return { "Word": str }
def outports():
return { "Exists": bool }
def parameters_types():
return { "WordToCheck": str,
"NeedAllMatch": bool }
def evaluate_generator(inports, parameters):
tick = 0
while True:
check = inports["Word"].evaluate(tick)
if parameters["NeedAllMatch"]:
tick = yield { "Exists": all(map(lambda word: word in parameters["WordToCheck"], check) )}
else:
tick = yield { "Exists": any(map(lambda word: word in parameters["WordToCheck"], check) )}
@AutomenoComponentDelegate("RhythmGenerator")
class RhythmGeneratorDelegate(AutomenoComponentProtocol):
def inports():
return {}
def outports():
return { "On": bool }
def parameters_types():
return { "Equation": str }
def evaluate_generator(inports, parameters):
tick = 0
while True:
equation = parameters["Equation"]
value = eval(equation, { "tick": tick })
tick = yield { "On": value }
@AutomenoComponentDelegate("Channel")
class ChannelSinkComponentDelegate(AutomenoComponentProtocol):
def inports():
return { "Notes": [Note] }
def outports():
return {}
def parameters_types():
return { "Channel": int, "Program": int }
def evaluate_generator(inports, parameters):
tick = 0
while True:
tick = yield inports["Notes"].evaluate(tick)
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,451 | AustinDumm/automeno | refs/heads/master | /Automeno/Interactive.py | from Automeno.Macheno import Macheno
from Automeno.Types import *
from Automeno.ComponentFactory import _AUTOMENO_COMPONENT_DELEGATES
from Automeno.ComponentFactory import AutomenoComponentFactory
from collections import defaultdict
from functools import reduce
import json
_command_dictionary = defaultdict(lambda: _command_not_found)
def _command_not_found():
return "Command not found"
def AutomenoInteractiveCommand(name):
def process_function(fn):
_command_dictionary[name] = fn
return fn
return process_function
def AutomenoInteractiveArguments(arguments):
def process_function(fn):
def run_function(*args):
if type(args[0]) != Macheno:
raise Exception("AutomenoInteractiveCommand failed to provide Macheno")
if len(arguments) + 1 != len(args):
return "Invalid number of commmand arguments: {}".format(arguments)
for i, (key, value) in enumerate(arguments):
if args[i + 1] != None and type(args[i + 1]) != value:
return "Invalid command arguments: {}".format(arguments)
return fn(*args)
return run_function
return process_function
@AutomenoInteractiveCommand("help")
def _command_help(*args):
return _command_dictionary.keys()
@AutomenoInteractiveCommand("list")
@AutomenoInteractiveArguments([("Item Type", str)])
def _command_list(*args):
macheno = args[0]
item_type = args[1]
if item_type == "components":
return str(macheno.components.keys())
elif item_type == "channels":
return str(macheno.channels_keys)
elif item_type == "links":
list_string = ""
for component_key in macheno.components.keys():
list_string += "{}\n".format(component_key)
component = macheno.components[component_key]
for inport_key in component.inports.keys():
inport = component.inports[inport_key]
for outport in inport.connected_outports:
list_string += "\t{}->{}\n".format(outport.name, inport.name)
return list_string
else:
return "Invalid item type [component | channels | links]"
@AutomenoInteractiveCommand("create")
@AutomenoInteractiveArguments([("[component | channel]", str), ("name", str), ("<component_delegate>", str)])
def _command_create(*args):
macheno = args[0]
item_type = args[1]
item_name = args[2]
delegate_name = args[3]
if item_type == "component":
if delegate_name not in _AUTOMENO_COMPONENT_DELEGATES:
return "Unknown delegate name"
delegate = _AUTOMENO_COMPONENT_DELEGATES[delegate_name]
parameters = delegate.parameters_types()
parameter_values = {}
for key, value in parameters.items():
parameter_values[key] = eval(input("{} ({}):".format(key, value)))
component = AutomenoComponentFactory(item_name, delegate_name, parameter_values)
macheno.add_component(item_name, component)
elif item_type == "channel":
delegate = _AUTOMENO_COMPONENT_DELEGATES["Channel"]
parameters = delegate.parameters_types()
parameter_values = {}
for key, value in parameters.items():
parameter_values[key] = value(input("{} ({}):".format(key, value)))
channel = AutomenoComponentFactory(item_name, "Channel", parameter_values)
macheno.add_channel(item_name, channel)
else:
return "item_type must be \"component\" or \"channel\""
return "{} \"{}\" added".format(item_type, item_name)
@AutomenoInteractiveCommand("delete")
def _command_delete(*args):
pass
@AutomenoInteractiveCommand("link")
@AutomenoInteractiveArguments([("outport", str), ("inport", str)])
def _command_link(*args):
macheno = args[0]
outport_path = args[1].split(":")
inport_path = args[2].split(":")
inport = macheno.find_component(inport_path[0]).inports[inport_path[1]]
outport = macheno.find_component(outport_path[0]).outports[outport_path[1]]
inport.connect_outport(outport)
return "{} -> {} created".format(outport_path, inport_path)
@AutomenoInteractiveCommand("unlink")
def _command_unlink(*args):
pass
@AutomenoInteractiveCommand("run")
@AutomenoInteractiveArguments([("file_name", str)])
def _command_run(*args):
macheno = args[0]
file_name = args[1]
macheno.run(file_name)
return "Created {}".format(file_name)
@AutomenoInteractiveCommand("export")
@AutomenoInteractiveArguments([("file_name", str)])
def _command_export(*args):
macheno = args[0]
file_name = args[1]
with open(file_name, "w") as f:
f.write(json.dumps(macheno.serialize(), indent=2, sort_keys=True))
return f'Exported to file {file_name}'
@AutomenoInteractiveCommand("import")
@AutomenoInteractiveArguments([("file_name", str)])
def _command_import(*args):
macheno = args[0]
file_name = args[1]
with open(file_name, "r") as f:
in_dictionary = json.loads(f.read())
macheno.update_self(in_dictionary)
return f'Imported file {file_name}'
def interactive(initial_macheno):
while True:
command = input("m:").split(" ")
if command[0] == "exit":
return
print_value = _command_dictionary[command[0]](initial_macheno, *command[1:])
print(print_value)
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,452 | AustinDumm/automeno | refs/heads/master | /Automeno/Types.py | from enum import IntEnum
class DictSerializable():
def serialize(obj):
raise NotImplementedError("Must subclass DictSerializable")
def deserialize(dictionary):
raise NotImplementedError("Must subclass DictSerializable")
class Key(DictSerializable, IntEnum):
C = 0
C_SHARP = 1
D = 2
D_SHARP = 3
E = 4
F = 5
F_SHARP = 6
G = 7
G_SHARP = 8
A = 9
A_SHARP = 10
B = 11
def serialize(obj):
return int(obj)
def deserialize(dictionary):
return Key(dictionary)
class Pitch(DictSerializable):
pitch_offset = 12
def __init__(self, key: Key, octave: int):
self.key = key
self.octave = octave
def midi_pitch(self) -> int:
return self.octave * 12 + self.pitch_offset + self.key
def serialize(obj):
return { "key": Key.serialize(obj.key),\
"octave": obj.octave }
def deserialize(dictionary):
return Pitch(Key.deserialize(dictionary["key"]), dictionary["octave"])
class Volume(DictSerializable):
min_volume = 0
max_volume = 127
def __init__(self, volume: int):
self.volume = max(self.min_volume, min(self.max_volume, volume))
def serialize(obj):
return obj.volume
def deserialize(dictionary):
return Volume(dictionary)
class Note(DictSerializable):
def __init__(self, pitch: Pitch, volume: Volume, tick_length: int):
self.pitch = pitch
self.volume = volume
self.tick_length = tick_length
def __str__(self):
return "({}, {}, {}, {})".format(str(self.pitch.key), self.pitch.octave, self.volume.volume, self.tick_length)
def from_string(string):
sections = string.strip("()").split[","]
return Note(Pitch(Key[sections[0]], sections[1]), Volume(sections[2]), sections[3])
def serialize(obj):
return { "pitch": Pitch.serialize(obj.pitch),\
"volume": Volume.serialize(obj.volume),\
"tick_length": obj.tick_length }
def deserialize(dictionary):
return Note(Pitch.deserialize(dictionary["pitch"]),\
Volume.deserialize(dictionary["volume"]),\
dictionary["tick_length"])
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,453 | AustinDumm/automeno | refs/heads/master | /Automeno/Component.py | import json
from Automeno.Types import *
from Automeno.ComponentFactory import _AUTOMENO_COMPONENT_DELEGATES
from functools import reduce
class AutomenoComponentProtocol:
def inports():
raise NotImplementedError("Must subclass AutomenoComponentProtocol")
def outports():
raise NotImplementedError("Must subclass AutomenoComponentProtocol")
def parameters_types():
raise NotImplementedError("Must subclass AutomenoComponentProtocol")
def evaluate_generator(inports, parameters):
raise NotImplementedError("Must subclass AutomenoComponentProtocol")
class Port:
def __init__(self, name, port_type, component):
self.name = name
self.port_type = port_type
self.component = component
def evaluate(self, tick):
raise NotImplementedError("Must subclass Port")
class OutPort(Port):
def evaluate(self, tick):
return self.component.evaluate_outport(self.name, tick)
class InPort(Port):
def __init__(self, name, port_type, component):
super().__init__(name, port_type, component)
self.connected_outports = []
def connect_outport(self, outport):
if self.port_type != outport.port_type:
raise Exception("Mismatched In/OutPort Type: {} != {}".format(self.port_type, outport.port_type))
self.connected_outports.append(outport)
def evaluate(self, tick):
evaluated_list = list(reduce(lambda acc, outport: acc + [outport.evaluate(tick)], self.connected_outports, []))
#return [value for element in nested_list for value in element]
return evaluated_list
class Component(DictSerializable):
def __init__(self, name, delegate, delegate_name, parameters):
self.name = name
self.delegate_name = delegate_name
self.inports = dict(map(lambda key_value: (key_value[0], InPort(key_value[0], key_value[1], self)), delegate.inports().items()))
self.outports = dict(map(lambda key_value: (key_value[0], OutPort(key_value[0], key_value[1], self)), delegate.outports().items()))
self.parameters = parameters
if not self.valid(delegate.parameters_types()):
raise Exception("Invalid Component Parameters: {}".format(self.parameters))
self.evaluate_generator = delegate.evaluate_generator(self.inports, parameters)
next(self.evaluate_generator)
self.current_evaluation = None
def valid(self, parameters_types):
for key, value in self.parameters.items():
if key not in parameters_types or parameters_types[key] != type(value):
return False
return True
def reset(self):
self.current_evaluation = None
def evaluate(self, tick):
if self.current_evaluation == None:
self.current_evaluation = self.evaluate_generator.send(tick)
return self.current_evaluation
def evaluate_outport(self, outport_name, tick):
return self.evaluate(tick)[outport_name]
def serialize(obj):
parameters = {}
for name, value in obj.parameters.items():
if callable(getattr(type(value), "serialize", None)):
parameters[name] = type(value).serialize(value)
else:
parameters[name] = value
return { "name": obj.name,\
"delegate_name": obj.delegate_name,\
"parameters": parameters }
def deserialize(dictionary):
delegate = _AUTOMENO_COMPONENT_DELEGATES[dictionary["delegate_name"]]
parameter_types = delegate.parameters_types()
raw_parameters = dictionary["parameters"]
finished_parameters = {}
for key, parameter_type in parameter_types.items():
if parameter_type == type(raw_parameters[key]):
finished_parameters[key] = raw_parameters[key]
else:
parameter_object = parameter_type.deserialize(raw_parameters[key])
finished_parameters[key] = parameter_object
return Component(dictionary["name"], delegate, dictionary["delegate_name"], finished_parameters)
| {"/main.py": ["/Automeno/Types.py", "/Automeno/DefaultComponents.py", "/Automeno/ComponentFactory.py", "/Automeno/Macheno.py", "/Automeno/Interactive.py"], "/Automeno/Macheno.py": ["/Automeno/Types.py", "/Automeno/Component.py", "/Automeno/ComponentFactory.py"], "/Automeno/ComponentFactory.py": ["/Automeno/Component.py"], "/Automeno/DefaultComponents.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py", "/Automeno/Component.py"], "/Automeno/Interactive.py": ["/Automeno/Macheno.py", "/Automeno/Types.py", "/Automeno/ComponentFactory.py"], "/Automeno/Component.py": ["/Automeno/Types.py", "/Automeno/ComponentFactory.py"]} |
78,459 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_authentication.py | import pytest
from galaxy.api.types import Authentication, NextStep
from plugin import AUTH_PARAMS, AUTH_REDIRECT_URL
@pytest.mark.asyncio
async def test_no_stored_credentials(plugin, http_client, backend_client, account_id, refresh_token, display_name):
assert await plugin.authenticate() == NextStep("web_session", AUTH_PARAMS)
exchange_code = "EXCHANGE_CODE"
backend_client.get_users_info.return_value = {
"id": account_id,
"displayName": display_name,
"externalAuths": {}
}
backend_client.get_display_name.return_value = display_name
assert await plugin.pass_login_credentials(None, {"end_uri": AUTH_REDIRECT_URL}, None)\
== Authentication(account_id, display_name)
http_client.retrieve_exchange_code.return_value = exchange_code
http_client.authenticate_with_exchange_code.assert_called_once_with(exchange_code)
backend_client.get_users_info.assert_called_once_with([account_id])
backend_client.get_display_name.assert_called_once_with(backend_client.get_users_info.return_value)
@pytest.mark.asyncio
async def test_stored_credentials(plugin, http_client, backend_client, account_id, refresh_token, display_name):
http_client.authenticate_with_refresh_token.return_value = None
backend_client.get_display_name.return_value = display_name
stored_refresh_token = "STORED_TOKEN"
assert await plugin.authenticate({"refresh_token": stored_refresh_token}) ==\
Authentication(account_id, display_name)
http_client.authenticate_with_refresh_token.assert_called_once_with(stored_refresh_token)
backend_client.get_users_info.assert_called_once_with([account_id])
@pytest.mark.asyncio
async def test_auth_lost(authenticated_plugin, http_client):
http_client.set_auth_lost_callback.assert_called()
callback = http_client.set_auth_lost_callback.call_args[0][0]
callback()
authenticated_plugin.lost_authentication.assert_called_once_with()
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,460 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/consts.py | import os
import sys
import re
from enum import EnumMeta
class System(EnumMeta):
WINDOWS = 1
MACOS = 2
LINUX = 3
_program_data = ''
SYSTEM = None
if sys.platform == 'win32':
SYSTEM = System.WINDOWS
_program_data = os.getenv('PROGRAMDATA')
EPIC_WINREG_LOCATION = r"com.epicgames.launcher\shell\open\command"
LAUNCHER_WINREG_LOCATION = r"Computer\HKEY_CLASSES_ROOT\com.epicgames.launcher\shell\open\command"
LAUNCHER_PROCESS_IDENTIFIER = 'EpicGamesLauncher.exe'
elif sys.platform == 'darwin':
SYSTEM = System.MACOS
_program_data = os.path.expanduser('~/Library/Application Support')
EPIC_MAC_INSTALL_LOCATION = "/Applications/Epic Games Launcher.app"
LAUNCHER_PROCESS_IDENTIFIER = 'Epic Games Launcher'
LAUNCHER_INSTALLED_PATH = os.path.join(_program_data, 'Epic', 'UnrealEngineLauncher', 'LauncherInstalled.dat')
GAME_MANIFESTS_PATH = os.path.join(_program_data, 'Epic', 'EpicGamesLauncher', 'Data', 'Manifests')
AUTH_URL = r"https://www.epicgames.com/id/login"
AUTH_REDIRECT_URL = r"https://epicgames.com/account/personal"
def regex_pattern(regex):
return ".*" + re.escape(regex) + ".*"
AUTH_PARAMS = {
"window_title": "Login to Epic\u2122",
"window_width": 580,
"window_height": 750,
"start_uri": AUTH_URL,
"end_uri_regex": regex_pattern(AUTH_REDIRECT_URL)
} | {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,461 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/version.py | __version__ = "0.49.1"
__changelog__ = {
"0.49.1": """
- fix for local size's import
""",
"0.49.0": """
- get local size's by parsing game manifests
""",
"0.48.3": """
- optimize getting local size
- disable getting local size until Galaxy really use it
"""
} | {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,462 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_owned_games.py | import pytest
from unittest.mock import Mock
from galaxy.api.errors import AuthenticationRequired, UnknownBackendResponse
from galaxy.api.consts import LicenseType
from galaxy.api.types import Game, LicenseInfo
from backend import EpicClient
from definitions import Asset, CatalogItem
import json
@pytest.fixture
def mock_get_catalog_item():
known_items = [
CatalogItem("4fe75bbc5a674f4f9b356b5c90567da5", "Fortnite", ["games", "applications"]),
CatalogItem("fb39bac8278a4126989f0fe12e7353af", "Hades", ["games", "applications"])
]
def func(catalog_id):
for item in known_items:
if catalog_id == item.id:
return item
raise UnknownBackendResponse
return func
@pytest.mark.asyncio
async def test_not_authenticated(plugin, backend_client):
backend_client.get_owned_games.side_effect = AuthenticationRequired()
with pytest.raises(AuthenticationRequired):
await plugin.get_owned_games()
def test_empty_json():
items = {}
with pytest.raises(UnknownBackendResponse):
EpicClient._parse_catalog_item(items)
@pytest.mark.asyncio
async def test_simple(authenticated_plugin, backend_client):
backend_client.get_owned_games.return_value = json.loads("""
{'data': {'Launcher': {'libraryItems': {'records': [
{
'catalogItemId': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'appName': 'Fortnite',
'catalogItem': {
'id': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'title': 'Fortnite',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows', 'Mac']
}
],
'dlcItemList': None,
'mainGameItem': None
}},
{
'catalogItemId': 'fb39bac8278a4126989f0fe12e7353af',
'namespace': 'min',
'appName': 'Min',
'catalogItem': {
'id': 'fb39bac8278a4126989f0fe12e7353af',
'namespace': 'min',
'title': 'Hades',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows', 'Win32']
}
],
'dlcItemList': None,
'mainGameItem': None
}
}
]
}
}
}
}""".replace("'",'"').replace("None", "null"))
games = await authenticated_plugin.get_owned_games()
assert games == [
Game("Fortnite", "Fortnite", [], LicenseInfo(LicenseType.SinglePurchase, None)),
Game("Min", "Hades", [], LicenseInfo(LicenseType.SinglePurchase, None))
]
@pytest.mark.asyncio
async def test_filter_not_games(authenticated_plugin, backend_client):
backend_client.get_owned_games.return_value = json.loads("""
{'data': {'Launcher': {'libraryItems': {'records': [
{
'catalogItemId': '3df83c606f01446c9d0d126c4c15c367',
'namespace': 'calluna',
'appName': 'CallunaDLC001',
'catalogItem': {
'id': '3df83c606f01446c9d0d126c4c15c367',
'namespace': 'calluna',
'title': 'Control DLC001',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows']
}
],
'dlcItemList': None,
'mainGameItem': {
'id': '9afb582e90b74bdd9e2146fb79c78589'
}
}
}
],
'dlcItemList': None,
'mainGameItem': None
}
}
}
}""".replace("'",'"').replace("None", "null"))
games = await authenticated_plugin.get_owned_games()
assert games == []
@pytest.mark.asyncio
async def test_add_game(authenticated_plugin, backend_client):
authenticated_plugin.add_game = Mock()
backend_client.get_owned_games.return_value = json.loads("""
{'data': {'Launcher': {'libraryItems': {'records': [
{
'catalogItemId': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'appName': 'Fortnite',
'catalogItem': {
'id': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'title': 'Fortnite',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows', 'Mac']
}
],
'dlcItemList': None,
'mainGameItem': None
}}
]
}
}
}
}""".replace("'", '"').replace("None", "null"))
games = await authenticated_plugin.get_owned_games()
assert games == [
Game("Fortnite", "Fortnite", [], LicenseInfo(LicenseType.SinglePurchase, None)),
]
# buy game meanwhile
bought_game = Game("Min", "Hades", [], LicenseInfo(LicenseType.SinglePurchase, None))
backend_client.get_owned_games.return_value = json.loads("""
{'data': {'Launcher': {'libraryItems': {'records': [
{
'catalogItemId': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'appName': 'Fortnite',
'catalogItem': {
'id': '4fe75bbc5a674f4f9b356b5c90567da5',
'namespace': 'fn',
'title': 'Fortnite',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows', 'Mac']
}
],
'dlcItemList': None,
'mainGameItem': None
}},
{
'catalogItemId': 'fb39bac8278a4126989f0fe12e7353af',
'namespace': 'min',
'appName': 'Min',
'catalogItem': {
'id': 'fb39bac8278a4126989f0fe12e7353af',
'namespace': 'min',
'title': 'Hades',
'categories': [{
'path': 'games'
}, {
'path': 'applications'
}
],
'releaseInfo': [{
'platform': ['Windows', 'Win32']
}
],
'dlcItemList': None,
'mainGameItem': None
}
}
]
}
}
}
}""".replace("'", '"').replace("None", "null"))
await authenticated_plugin._check_for_new_games(0)
authenticated_plugin.add_game.assert_called_with(bought_game)
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,463 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_http_client.py | from unittest.mock import MagicMock
import pytest
from galaxy.api.errors import AuthenticationRequired
from galaxy.unittest.mock import AsyncMock
from http_client import AuthenticatedHttpClient
@pytest.fixture
def http_request(mocker):
return mocker.patch("aiohttp.ClientSession.request", new_callable=AsyncMock)
@pytest.fixture
async def http_client():
store_credentials = MagicMock()
client = AuthenticatedHttpClient(store_credentials)
yield client
await client.close()
@pytest.fixture
def access_token():
return "ACCESS_TOKEN"
@pytest.fixture
def user_agent():
return (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"EpicGamesLauncher/9.11.2-5710144+++Portal+Release-Live "
"UnrealEngine/4.21.0-5710144+++Portal+Release-Live "
"Safari/537.36"
)
@pytest.fixture
def oauth_response(access_token, refresh_token, account_id):
response = MagicMock()
response.status = 200
response.json = AsyncMock()
response.json.return_value = {
"access_token": access_token,
"refresh_token": refresh_token,
"account_id": account_id
}
return response
@pytest.mark.asyncio
async def test_not_authenticated(http_client):
assert not http_client.authenticated
assert http_client.refresh_token is None
assert http_client.account_id is None
with pytest.raises(AuthenticationRequired):
await http_client.get("url")
@pytest.mark.asyncio
async def test_authenticate_with_exchange_code(
http_client,
http_request,
access_token,
refresh_token,
account_id,
user_agent,
oauth_response
):
http_request.return_value = oauth_response
await http_client.authenticate_with_exchange_code("CODE")
assert http_client.authenticated
assert http_client.refresh_token == refresh_token
assert http_client.account_id == account_id
http_request.assert_called_once()
http_request.reset_mock()
url = "url"
headers = {
"Authorization": "bearer " + access_token,
"User-Agent": user_agent
}
await http_client.get(url)
http_request.assert_called_once_with("GET", url, headers=headers)
@pytest.mark.asyncio
async def test_authenticate_with_refresh_token(
http_client,
http_request,
access_token,
refresh_token,
account_id,
user_agent,
oauth_response
):
http_request.return_value = oauth_response
await http_client.authenticate_with_refresh_token("OLD_REFRESH_TOKEN")
assert http_client.authenticated
assert http_client.refresh_token == refresh_token
assert http_client.account_id == account_id
http_request.assert_called_once()
http_request.reset_mock()
url = "url"
headers = {
"Authorization": "bearer " + access_token,
"User-Agent": user_agent
}
await http_client.get(url)
http_request.assert_called_once_with("GET", url, headers=headers)
@pytest.mark.asyncio
async def test_refresh_token(http_client, http_request, oauth_response):
http_request.return_value = oauth_response
await http_client.authenticate_with_refresh_token("TOKEN")
http_request.reset_mock()
authorized_response = MagicMock()
authorized_response.status = 200
http_client._authorized_get = AsyncMock()
http_client._authorized_get.side_effect = [AuthenticationRequired(), authorized_response]
http_client._authenticate = AsyncMock()
response = await http_client.get("url")
assert response.status == authorized_response.status
@pytest.mark.asyncio
async def test_auth_lost(http_client, http_request, oauth_response):
http_request.return_value = oauth_response
await http_client.authenticate_with_refresh_token("TOKEN")
http_request.reset_mock()
http_client._authorized_get = MagicMock()
http_client._authorized_get.side_effect = AuthenticationRequired()
http_client.__refresh_tokens = MagicMock()
http_client.__refresh_tokens.side_effects = AuthenticationRequired()
with pytest.raises(AuthenticationRequired):
await http_client.get("url")
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,464 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_parser.py | import pytest
from local import LauncherInstalledParser
@pytest.fixture
def load_file():
def func():
return {
"InstallationList": [
{
"InstallLocation": "C:\\Program Files\\Epic Games\\Transistor",
"AppName": "Dill",
"AppID": 0,
"AppVersion": "1.50473-x64"
}
]
}
return func
def test_launcher_installed_parser(load_file):
parser = LauncherInstalledParser()
parser._load_file = load_file
assert parser.parse() == {'Dill': 'C:\\Program Files\\Epic Games\\Transistor'}
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,465 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/local.py | import asyncio
import subprocess
import json
import logging as log
from collections import defaultdict
import os.path
from galaxy.api.types import LocalGameState
from consts import LAUNCHER_INSTALLED_PATH, SYSTEM, System, LAUNCHER_PROCESS_IDENTIFIER, GAME_MANIFESTS_PATH
from process_watcher import ProcessWatcher
if SYSTEM == System.WINDOWS:
import winreg
import ctypes
from consts import EPIC_WINREG_LOCATION
elif SYSTEM == System.MACOS:
from consts import EPIC_MAC_INSTALL_LOCATION
from AppKit import NSWorkspace
import time
def parse_manifests() -> dict:
manifests = {}
for item in os.listdir(GAME_MANIFESTS_PATH):
item_path = os.path.join(GAME_MANIFESTS_PATH, item)
if item_path.endswith('.item'):
with open(item_path, 'r') as f:
manifest = json.load(f)
manifests[manifest['AppName']] = manifest
return manifests
class LauncherInstalledParser:
def __init__(self):
self._path = LAUNCHER_INSTALLED_PATH
self._last_modified = None
def file_has_changed(self):
try:
stat = os.stat(self._path)
except FileNotFoundError:
return False
except Exception as e:
log.exception(f'Stating {self._path} has failed: {str(e)}')
raise RuntimeError('Stating failed:' + str(e))
else:
if stat.st_mtime != self._last_modified:
self._last_modified = stat.st_mtime
return True
return False
def _load_file(self):
content = {}
try:
with open(self._path, 'r') as f:
content = json.load(f)
except FileNotFoundError as e:
log.debug(str(e))
return content
def parse(self):
installed_games = {}
content = self._load_file()
game_list = content.get('InstallationList', [])
for entry in game_list:
app_name = entry.get('AppName', None)
if not app_name or app_name.startswith('UE'):
continue
installed_games[entry['AppName']] = entry['InstallLocation']
return installed_games
class LocalGamesProvider:
def __init__(self):
self._parser = LauncherInstalledParser()
self._ps_watcher = ProcessWatcher(LAUNCHER_PROCESS_IDENTIFIER)
self._games = defaultdict(lambda: LocalGameState.None_)
self._updated_games = set()
self._was_installed = dict()
self._was_running = set()
self._first_run = True
self._status_updater = None
@property
def is_client_running(self):
if SYSTEM == System.MACOS:
workspace = NSWorkspace.sharedWorkspace()
activeApps = workspace.runningApplications()
for app in activeApps:
if app.localizedName() == "Epic Games Launcher":
return True
return False
else:
return self._ps_watcher.is_launcher_running()
@property
def first_run(self):
return self._first_run
@property
def games(self):
return self._games
async def search_process(self, game_id, timeout):
await self._ps_watcher.pool_until_game_start(game_id, timeout, sint=0.5, lint=2)
def is_game_running(self, game_id):
return self._ps_watcher._is_app_tracked_and_running(game_id)
def consume_updated_games(self):
tmp = self._updated_games.copy()
self._updated_games.clear()
return tmp
def setup(self):
log.info('Running local games provider setup')
self.check_for_installed()
self.check_for_running()
loop = asyncio.get_event_loop()
self._status_updater = loop.create_task(self._endless_status_checker())
self._first_run = False
async def _endless_status_checker(self):
log.info('Starting endless status checker')
counter = 0
while True:
try:
self.check_for_installed()
if 0 == counter % 21:
await self.parse_all_procs_if_needed()
elif 0 == counter % 7:
self.check_for_running(check_for_new=True)
self.check_for_running()
except Exception as e:
log.error(e)
finally:
counter += 1
await asyncio.sleep(1)
def check_for_installed(self):
if not self._parser.file_has_changed():
return
log.debug(f'{self._parser._path} file has been found/changed. Parsing')
installed = self._parser.parse()
self._update_game_statuses(set(self._was_installed), set(installed), LocalGameState.Installed)
self._ps_watcher.watched_games = installed
self._was_installed = installed
def get_installed_paths(self):
return self._parser.parse()
async def parse_all_procs_if_needed(self):
if local_client._is_installed is True:
if len(self._was_installed) > 0 and len(self._was_running) == 0:
await self._ps_watcher._search_in_all_slowly(interval=0.015)
def check_for_running(self, check_for_new=False):
running = self._ps_watcher.get_running_games(check_under_launcher=check_for_new)
self._update_game_statuses(self._was_running, running, LocalGameState.Running)
self._was_running = running
def _update_game_statuses(self, previous, current, status):
for id_ in (current - previous):
self._games[id_] |= status
if not self._first_run:
self._updated_games.add(id_)
for id_ in (previous - current):
self._games[id_] ^= status
if not self._first_run:
self._updated_games.add(id_)
class ClientNotInstalled(Exception):
pass
class _MacosLauncher:
_OPEN = 'open'
def __init__(self):
self._was_client_installed = None
@property
def _is_installed(self):
""":returns: bool or None if not known """
# in case we have tried to run it previously
if self._was_client_installed is not None:
return self._was_client_installed
# else we assume that is installed under /Applications
if os.path.exists(EPIC_MAC_INSTALL_LOCATION):
return True
else: # probably not but we don't know for sure
return None
async def exec(self, cmd, prefix_cmd=True):
if prefix_cmd:
cmd = f"{self._OPEN} {cmd}"
log.info(f"Executing shell command: {cmd}")
proc = await asyncio.create_subprocess_shell(cmd)
status = None
try:
status = await asyncio.wait_for(proc.wait(), timeout=2)
except asyncio.TimeoutError:
log.warning('Calling Epic Launcher timeouted. Probably it is fresh installed w/o executable permissions.')
else:
if status != 0:
log.debug(f'Calling Epic Launcher failed with code {status}. Assuming it is not installed')
self._was_client_installed = False
raise ClientNotInstalled
else:
self._was_client_installed = True
async def shutdown_platform_client(self):
await self.exec("osascript -e 'quit app \"Epic Games Launcher\"'", prefix_cmd=False)
async def prevent_epic_from_showing(self):
client_popup_wait_time = 5
check_frequency_delay = 0.02
workspace = NSWorkspace.sharedWorkspace()
activeApps = workspace.runningApplications()
end_time = time.time() + client_popup_wait_time
while time.time() <= end_time:
for app in activeApps:
if app.isActive() and app.localizedName() == "Epic Games Launcher":
app.hide()
return
await asyncio.sleep(check_frequency_delay)
log.info("Timed out on prevent epic from showing")
class _WindowsLauncher:
_OPEN = 'start'
@staticmethod
def _parse_winreg_path(path):
return path.replace('"', '').partition('%')[0].strip()
@property
def _is_installed(self):
try:
reg = winreg.ConnectRegistry(None, winreg.HKEY_CLASSES_ROOT)
with winreg.OpenKey(reg, EPIC_WINREG_LOCATION) as key:
path = self._parse_winreg_path(winreg.QueryValueEx(key, "")[0])
return os.path.exists(path)
except OSError:
return False
async def exec(self, cmd, prefix_cmd=True):
if not self._is_installed:
raise ClientNotInstalled
if prefix_cmd:
cmd = f"{self._OPEN} {cmd}"
log.info(f"Executing shell command: {cmd}")
subprocess.Popen(cmd, shell=True)
async def shutdown_platform_client(self):
await self.exec("taskkill.exe /im \"EpicGamesLauncher.exe\"", prefix_cmd=False)
async def prevent_epic_from_showing(self):
client_popup_wait_time = 5
check_frequency_delay = 0.02
end_time = time.time() + client_popup_wait_time
hwnd = None
try:
while time.time() < end_time:
hwnd = hwnd or ctypes.windll.user32.FindWindowW(None, "Epic Games Launcher")
if hwnd and ctypes.windll.user32.IsWindowVisible(hwnd):
ctypes.windll.user32.CloseWindow(hwnd)
break
await asyncio.sleep(check_frequency_delay)
else:
log.info("Timed out closing epic popup")
except Exception as e:
log.error(f"Exception when checking if window is visible {repr(e)}")
if SYSTEM == System.WINDOWS:
local_client = _WindowsLauncher()
elif SYSTEM == System.MACOS:
local_client = _MacosLauncher()
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,466 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/backend.py | import asyncio
import logging as log
from galaxy.api.errors import UnknownBackendResponse
from definitions import Asset, CatalogItem
class EpicClient:
def __init__(self, http_client):
self._http_client = http_client
def get_display_name(self, user_info):
return user_info[0]["displayName"]
async def get_users_info(self, account_ids):
url = (
"https://account-public-service-prod03.ol.epicgames.com"
"/account/api/public/account?"
)
for account_id in account_ids:
url = url + "&accountId=" + account_id
response = await self._http_client.get(url)
result = await response.json()
try:
return result
except KeyError:
log.exception("Can not parse backend response")
raise UnknownBackendResponse()
async def get_assets(self):
# merge assets from different platforms
platforms = ["Windows", "Mac"]
params = {
"label": "Live"
}
requests = []
for platform in platforms:
url = (
"https://launcher-public-service-prod06.ol.epicgames.com"
"/launcher/api/public/assets/" + platform
)
requests.append(self._http_client.get(url, params=params))
responses = await asyncio.gather(*requests)
assets = set()
for response in responses:
items = await response.json()
assets.update(self._parse_assets(items))
return list(assets)
async def get_catalog_items_with_id(self, namespace, catalog_id):
url = (
"https://catalog-public-service-prod06.ol.epicgames.com"
"/catalog/api/shared/namespace/{}/bulk/items"
).format(namespace)
params = {
"id": catalog_id,
"country": "US",
"locale": "en-US"
}
response = await self._http_client.get(url, params=params)
items = await response.json()
try:
item = self._parse_catalog_item(items)
except UnknownBackendResponse:
log.exception(f"Can not parse backend response for {url} for {catalog_id}: {items}")
raise UnknownBackendResponse
else:
return item
async def get_friends_list(self):
url = (
"https://friends-public-service-prod06.ol.epicgames.com/friends/api/public/"
"friends/{}"
).format(self._http_client.account_id)
response = await self._http_client.get(url)
items = await response.json()
return items
@staticmethod
def _parse_assets(items):
result = []
for item in items:
try:
result.append(Asset(item["namespace"], item["appName"], item["catalogItemId"]))
except KeyError as e:
log.exception(f"Can not parse assets backend response: {e}")
raise UnknownBackendResponse()
return result
@staticmethod
def _parse_catalog_item(items):
try:
item = list(items.values())[0]
categories = [category["path"] for category in item["categories"]]
return CatalogItem(item["id"], item["title"], categories)
except (IndexError, KeyError) as e:
log.warning(f"Could not parse catalog item in {items}, error {repr(e)}")
raise UnknownBackendResponse()
async def get_product_store_info(self, query):
data = {"query": '''\n query searchQuery($namespace: String!, $locale: String!, $query: String!, $country: String!) {
Catalog {
catalogOffers(namespace: $namespace, locale: $locale, params: {keywords: $query, country: $country}) {
elements {
title
productSlug
linkedOfferNs
categories {
path
}
}
}
}
}''',
"variables": {"country": "US",
"locale": "en-US",
"namespace": "epic",
"query": query}
}
response = await self._http_client.post("https://graphql.epicgames.com/graphql", json=data)
response = await response.json()
return response
async def get_playtime(self):
data = {"query": '''\n query playtimeTrackingQuery($accountId: String!){
PlaytimeTracking {
total(accountId: $accountId) {
artifactId
totalTime
}
}
}''',
"variables": {"accountId": f"{self._http_client.account_id}"}
}
response = await self._http_client.post("https://graphql.epicgames.com/graphql", json=data, graph=True)
return response
async def get_productmapping(self):
response = await self._http_client.get("https://store-content.ak.epicgames.com/api/content/productmapping")
response = await response.json()
return response
async def get_owned_games(self,cursor=""):
data = {"query":'''\n query libraryQuery($locale: String, $cursor: String, $excludeNs: [String])
{
Launcher
{
libraryItems
(
cursor: $cursor, params: {excludeNs: $excludeNs})
{
records
{
catalogItemId
namespace
appName
catalogItem(locale:$locale)
{
id
namespace
title
categories
{
path
}
releaseInfo
{
platform
}
dlcItemList
{
id
}
mainGameItem
{
id
}
customAttributes
{
key
value
}
}
}
responseMetadata
{
nextCursor
}
}
}
}''',
"variables": {"locale": "en-US", "cursor": cursor, "excludeNs": ["ue"]}
}
response = await self._http_client.post("https://graphql.epicgames.com/graphql", json=data, graph=True)
log.info(response)
cursor = response['data']['Launcher']['libraryItems']['responseMetadata']['nextCursor']
if cursor:
next_page = await self.get_owned_games(cursor)
response['data']['Launcher']['libraryItems']['records'].extend( next_page['data']['Launcher']['libraryItems']['records'])
return response
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,467 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/plugin.py | import asyncio
import json
import sys
import logging as log
import webbrowser
from galaxy.api.plugin import Plugin, create_and_run_plugin, JSONEncoder
from galaxy.api.consts import Platform, LicenseType
from galaxy.api.types import Authentication, Game, LicenseInfo, FriendInfo, LocalGame, NextStep, LocalGameState, GameTime, Dlc
from galaxy.api.errors import (
InvalidCredentials, BackendTimeout, BackendNotAvailable,
BackendError, NetworkError, UnknownError, FailedParsingManifest
)
from backend import EpicClient
from http_client import AuthenticatedHttpClient
from version import __version__
from local import LocalGamesProvider, local_client, ClientNotInstalled, parse_manifests
from consts import System, SYSTEM, AUTH_REDIRECT_URL, AUTH_PARAMS
from definitions import GameInfo, EpicDlc
class EpicPlugin(Plugin):
def __init__(self, reader, writer, token):
super().__init__(Platform.Epic, __version__, reader, writer, token)
self._http_client = AuthenticatedHttpClient(store_credentials_callback=self.store_credentials)
self._epic_client = EpicClient(self._http_client)
self._local_provider = LocalGamesProvider()
self._local_client = local_client
self._owned_games = {}
self._game_info_cache = {}
self._encoder = JSONEncoder()
self._refresh_owned_task = None
async def _do_auth(self):
user_info = await self._epic_client.get_users_info([self._http_client.account_id])
display_name = self._epic_client.get_display_name(user_info)
self._http_client.set_auth_lost_callback(self.lost_authentication)
return Authentication(self._http_client.account_id, display_name)
async def authenticate(self, stored_credentials=None):
if not stored_credentials:
return NextStep("web_session", AUTH_PARAMS)
refresh_token = stored_credentials["refresh_token"]
try:
await self._http_client.authenticate_with_refresh_token(refresh_token)
except (BackendNotAvailable, BackendError, BackendTimeout, NetworkError, UnknownError) as e:
raise e
except Exception:
raise InvalidCredentials()
return await self._do_auth()
async def pass_login_credentials(self, step, credentials, cookies):
try:
if cookies:
cookiez = {}
for cookie in cookies:
cookiez[cookie['name']] = cookie['value']
self._http_client.update_cookies(cookiez)
exchange_code = await self._http_client.retrieve_exchange_code()
await self._http_client.authenticate_with_exchange_code(exchange_code)
except (BackendNotAvailable, BackendError, BackendTimeout, NetworkError, UnknownError) as e:
raise e
except Exception as e:
log.error(repr(e))
raise InvalidCredentials()
return await self._do_auth()
def handshake_complete(self):
self._game_info_cache = {
k: GameInfo(**v) for k, v
in json.loads(self.persistent_cache.get('game_info', '{}')).items()
}
def _store_cache(self, key, obj):
self.persistent_cache[key] = self._encoder.encode(obj)
self.push_cache()
def store_credentials(self, credentials: dict):
"""Prevents losing credentials on `push_cache`"""
self.persistent_cache['credentials'] = self._encoder.encode(credentials)
super().store_credentials(credentials)
def _get_dlcs(self, products):
dlcs = []
for game in products['data']['Launcher']['libraryItems']['records']:
try:
if 'mainGameItem' in game['catalogItem'] and game['catalogItem']['mainGameItem']:
dlcs.append(EpicDlc(game['catalogItem']['mainGameItem']['id'], game['catalogItemId'], game['catalogItem']['title']))
except (TypeError, KeyError) as e:
log.error(f"Exception while trying to parse product {repr(e)}\nProduct {game}")
return dlcs
def _parse_owned_product(self, game, dlcs):
games_dlcs = []
is_game = False
is_application = False
for category in game['catalogItem']['categories']:
if category['path'] == 'games':
is_game = True
if category['path'] == 'applications':
is_application = True
if not is_game or not is_application:
return
for dlc in dlcs:
if game['catalogItemId'] == dlc.parent_id:
games_dlcs.append(Dlc(dlc.dlc_id, dlc.dlc_title, LicenseInfo(LicenseType.SinglePurchase)))
if game['catalogItemId'] == dlc.dlc_id:
# product is a dlc, skip
return
self._game_info_cache[game['appName']] = GameInfo(game['namespace'], game['appName'], game['catalogItem']['title'])
return Game(game['appName'], game['catalogItem']['title'], games_dlcs, LicenseInfo(LicenseType.SinglePurchase))
async def _get_owned_games(self):
parsed_games = []
owned_products = await self._epic_client.get_owned_games()
dlcs = self._get_dlcs(owned_products)
product_mapping = await self._epic_client.get_productmapping()
for product in owned_products['data']['Launcher']['libraryItems']['records']:
try:
parsed_game = self._parse_owned_product(product, dlcs)
if parsed_game:
cached_game_info = self._game_info_cache.get(parsed_game.game_id)
if cached_game_info.namespace in product_mapping:
parsed_games.append(parsed_game)
except (TypeError, KeyError) as e:
log.error(f"Exception while trying to parse product {repr(e)}\nProduct {product}")
self._store_cache('game_info', self._game_info_cache)
return parsed_games
async def get_owned_games(self):
games = await self._get_owned_games()
for game in games:
self._owned_games[game.game_id] = game
self._refresh_owned_task = asyncio.create_task(self._check_for_new_games(300))
return games
async def get_local_games(self):
if self._local_provider.first_run:
self._local_provider.setup()
return [
LocalGame(app_name, state)
for app_name, state in self._local_provider.games.items()
]
async def _get_store_slug(self, game_id):
cached_game_info = self._game_info_cache.get(game_id)
try:
if cached_game_info:
title = cached_game_info.title
namespace = cached_game_info.namespace
else: # extra safety fallback in case of dealing with removed game
assets = await self._epic_client.get_assets()
for asset in assets:
if asset.app_name == game_id:
if game_id in self._owned_games:
title = self._owned_games[game_id].game_title
else:
details = await self._epic_client.get_catalog_items_with_id(asset.namespace, asset.catalog_id)
title = details.title
namespace = asset.namespace
product_store_info = await self._epic_client.get_product_store_info(title)
if "data" in product_store_info:
for product in product_store_info["data"]["Catalog"]["catalogOffers"]["elements"]:
if product["linkedOfferNs"] == namespace:
return product['productSlug']
return ""
except Exception as e:
log.error(repr(e))
return ""
async def open_epic_browser(self, store_slug=None):
if store_slug:
url = f"https://www.epicgames.com/store/install/{store_slug}"
else:
url = "https://www.epicgames.com/store/download"
log.info(f"Opening Epic website {url}")
webbrowser.open(url)
def _is_game_installed(self, game_id):
try:
game_state = self._local_provider.games[game_id]
if game_state is not LocalGameState.Installed:
return False
return True
except KeyError:
return False
async def launch_game(self, game_id):
if self._local_provider.is_game_running(game_id):
log.info(f'Game already running, game_id: {game_id}.')
return
if SYSTEM == System.WINDOWS:
cmd = f"com.epicgames.launcher://apps/{game_id}?action=launch^&silent=true"
elif SYSTEM == System.MACOS:
cmd = f"'com.epicgames.launcher://apps/{game_id}?action=launch&silent=true'"
try:
await self._local_client.exec(cmd)
except ClientNotInstalled:
await self.open_epic_browser()
else:
await self._local_provider.search_process(game_id, timeout=30)
async def uninstall_game(self, game_id):
if not self._is_game_installed(game_id):
log.warning("Received uninstall command on a not installed game")
return
cmd = "com.epicgames.launcher://store/library"
try:
await self._local_client.exec(cmd)
except ClientNotInstalled:
await self.open_epic_browser(await self._get_store_slug(game_id))
async def install_game(self, game_id):
if self._is_game_installed(game_id):
log.warning(f"Game {game_id} is already installed")
return await self.launch_game(game_id)
cmd = "com.epicgames.launcher://store/library"
try:
await self._local_client.exec(cmd)
except ClientNotInstalled:
await self.open_epic_browser(await self._get_store_slug(game_id))
async def get_friends(self):
ids = await self._epic_client.get_friends_list()
account_ids = []
friends = []
prev_slice = 0
for index, entry in enumerate(ids):
account_ids.append(entry["accountId"])
''' Send request for friends information in batches of 50 so the request isn't too large,
50 is an arbitrary number, to be tailored if need be '''
if index + 1 % 50 == 0 or index == len(ids) - 1:
friends.extend(await self._epic_client.get_users_info(account_ids[prev_slice:]))
prev_slice = index
friend_infos = []
for friend in friends:
if "id" in friend and "displayName" in friend:
friend_infos.append(FriendInfo(user_id=friend["id"], user_name=friend["displayName"]))
elif "id" in friend:
friend_infos.append(FriendInfo(user_id=friend["id"], user_name=""))
return friend_infos
def _update_local_game_statuses(self):
updated = self._local_provider.consume_updated_games()
for id_ in updated:
new_state = self._local_provider.games[id_]
log.debug(f'Updating game {id_} state to {new_state}')
self.update_local_game_status(LocalGame(id_, new_state))
async def _check_for_new_games(self, interval):
await asyncio.sleep(interval)
log.info("Checking for new games")
refreshed_owned_games = await self._get_owned_games()
for game in refreshed_owned_games:
if game.game_id not in self._owned_games:
log.info(f"Found new game, {game}")
self.add_game(game)
self._owned_games[game.game_id] = game
async def prepare_game_times_context(self, game_ids):
return await self._epic_client.get_playtime()
async def get_game_time(self, game_id, context):
if context:
playtime = context
else:
playtime = await self.prepare_game_times_context(None)
time_played = None
for item in playtime['data']['PlaytimeTracking']['total']:
if item['artifactId'] == game_id and 'totalTime' in item:
time_played = int(item['totalTime']/60)
break
return GameTime(game_id, time_played, None)
async def prepare_local_size_context(self, game_ids) -> dict:
return parse_manifests()
async def get_local_size(self, game_id, context) -> int:
try:
game_manifest = context[game_id]
return int(game_manifest['InstallSize'])
except (KeyError, ValueError) as e:
raise FailedParsingManifest(repr(e))
async def launch_platform_client(self):
if self._local_provider.is_client_running:
log.info("Epic client already running")
return
cmd = "com.epicgames.launcher:"
await self._local_client.exec(cmd)
asyncio.create_task(self._local_client.prevent_epic_from_showing())
async def shutdown_platform_client(self):
await self._local_client.shutdown_platform_client()
def tick(self):
if not self._local_provider.first_run:
self._update_local_game_statuses()
if self._refresh_owned_task and self._refresh_owned_task.done():
# Interval set to 8 minutes because that makes the request number just below galaxy's own calls
# and still maintains the functionality
self._refresh_owned_task = asyncio.create_task(self._check_for_new_games(60*8))
async def shutdown(self):
if self._local_provider._status_updater:
self._local_provider._status_updater.cancel()
if self._http_client:
await self._http_client.close()
def main():
create_and_run_plugin(EpicPlugin, sys.argv)
if __name__ == "__main__":
main()
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,468 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/definitions.py | from collections import namedtuple
import dataclasses
Asset = namedtuple("Asset", ["namespace", "app_name", "catalog_id"])
CatalogItem = namedtuple("CatalogItem", ["id", "title", "categories"])
@dataclasses.dataclass
class GameInfo:
namespace: str
app_name: str
title: str
@dataclasses.dataclass
class EpicDlc:
parent_id: str
dlc_id: str
dlc_title: str
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,469 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/process_watcher.py | import asyncio
import psutil
import logging as log
import time
from typing import Dict, Iterable
from dataclasses import dataclass
from collections import defaultdict
@dataclass
class WatchedApp:
id: str
dir: str
is_game: bool = True
def __eq__(self, other):
if isinstance(other, WatchedApp):
return self.id == other.id
elif type(other) == str:
return self.id == other
else:
raise TypeError(f"Trying to compare {type(self)} with {type(other)}")
def __hash__(self):
return hash(self.id)
class _ProcessWatcher:
"""Low level methods"""
def __init__(self):
self._watched_apps = defaultdict(set) # {WatchedApp: set([proc1, proc2, ...])}
self._cache = {}
@property
def watched_games(self):
return {k: v for k, v in self._watched_apps.items() if k.is_game}
@watched_games.setter
def watched_games(self, to_watch: Dict[str, str]):
# remove games not present in to_watch
for app in list(self._watched_apps.keys()):
if app.is_game and app.id not in to_watch:
del self._watched_apps[app]
# add games from to_watch keeping its processes if already present
for game_id, path in to_watch.items():
self._watched_apps.setdefault(WatchedApp(game_id, path), set())
def _get_running_games(self):
self.__remove_processes_if_dead()
return set([game.id for game, procs in self.watched_games.items() if procs])
def _is_app_tracked_and_running(self, app):
if app in self._watched_apps:
for proc in self._watched_apps[app]:
if proc.is_running:
return True
return False
def _search_in_all(self):
"""Fat check"""
log.debug(f'Performing check for all processes')
for proc in psutil.process_iter(ad_value=''):
self.__match_process(proc)
async def _search_in_all_slowly(self, interval=0.02):
"""Fat check with async intervals; 0.02 lasts a few seconds"""
log.debug(f'Performing async check in all processes; interval: {interval}')
for proc in psutil.process_iter(ad_value=''):
self.__match_process(proc)
await asyncio.sleep(interval)
def _search_in_children(self, procs: Iterable[psutil.Process], recursive=True):
"""Cache only child processes because process_iter has its own module level cache"""
found = False
for proc in procs.copy():
try:
for child in proc.children(recursive=recursive):
if child in self._cache:
found |= self.__match_process(self._cache[child])
else:
found |= self.__match_process(child)
self._cache[child] = child
except (psutil.AccessDenied, psutil.NoSuchProcess) as e:
log.warn(f'Getting children of {proc} has failed: {e}')
return found
def __match_process(self, proc):
for game in self._watched_apps:
try:
path = proc.exe()
except (psutil.AccessDenied, psutil.NoSuchProcess):
pass
else:
if not path:
return False
elif game.dir in path:
self._watched_apps[game].add(proc)
return True
return False
def __remove_processes_if_dead(self):
for game, processes in self._watched_apps.items():
# work on copy to avoid adding processes during iteration
for proc in processes.copy():
if not proc.is_running() or proc.status() == psutil.STATUS_ZOMBIE:
log.debug(f'Process {proc} is dead')
self._watched_apps[game].remove(proc)
class ProcessWatcher(_ProcessWatcher):
_LAUNCHER_ID = '__launcher__'
def __init__(self, launcher_identifier):
super().__init__()
self._watched_apps[WatchedApp(self._LAUNCHER_ID, launcher_identifier, False)]
self._launcher_children_cache = set()
# self._search_in_all()
@property
def _launcher(self):
return self._watched_apps[self._LAUNCHER_ID]
def _is_launcher_tracked_and_running(self):
return self._is_app_tracked_and_running(self._LAUNCHER_ID)
def is_launcher_running(self):
if self._is_launcher_tracked_and_running():
return True
self._search_in_all()
return self._is_launcher_tracked_and_running()
async def _pool_until_launcher_start(self, timeout, long_interval):
start = time.time()
while time.time() - start < timeout:
if self._is_launcher_tracked_and_running():
return True
self._search_in_all()
await asyncio.sleep(long_interval)
return False
async def pool_until_game_start(self, game_id, timeout, sint, lint):
"""
:param sint interval between checking launcher children
:param lint (longer) interval between checking if launcher exists
"""
log.debug(f'Starting wait for game {game_id} process')
start = time.time()
while time.time() - start < timeout:
found = await self._pool_until_launcher_start(timeout, lint)
if found:
self._search_in_children(self._launcher)
if self._watched_apps[game_id]:
log.debug(f'Game process found in {time.time() - start}s')
return True
await asyncio.sleep(sint)
self._search_in_all()
if self._watched_apps[game_id]:
log.debug(f'Game process found in the final fallback parsing all processes')
return True
def get_running_games(self, check_under_launcher):
"""Return set of ids of currently running games.
Note: does not actively look for launcher
"""
if check_under_launcher and self._is_launcher_tracked_and_running():
self._search_in_children(self._launcher, recursive=True)
return self._get_running_games()
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,470 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_process_watcher.py | from process_watcher import WatchedApp
def test_watched_games_setter(process_watcher):
process_watcher._watched_apps = {
WatchedApp("Dill", "C:\\Games\\Transtor"): set(),
WatchedApp("Min", "C:\\Games\\Minit"): set([1, 2])
}
process_watcher.watched_games = {"Min": "C:\\Games\\Minit", "Abu": "D:\\Games\\Rome"}
expected = {
WatchedApp("Min", "C:\\Games\\Minit"): set([1, 2]),
WatchedApp("Abu", "D:\\Games\\Rome"): set()
}
assert expected == process_watcher.watched_games
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,471 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /tests/test_is_installed.py | from src.local import _WindowsLauncher
def test_parse_winreg_paths():
possible_winreg_values = { # reg val : only path
r'"C:\Program Files (x86)\Epic Games\Launcher\Portal\Binaries\Win64\EpicGamesLauncher.exe" %1':
r'C:\Program Files (x86)\Epic Games\Launcher\Portal\Binaries\Win64\EpicGamesLauncher.exe',
r'"C:\Program Files\Epic Games\Launcher\Portal\Binaries\Win32\EpicGamesLauncher.exe" %1':
r"C:\Program Files\Epic Games\Launcher\Portal\Binaries\Win32\EpicGamesLauncher.exe",
r'D:\EpicGames\Launcher\Portal\Binaries\Win32\EpicGamesLauncher.exe %1':
r"D:\EpicGames\Launcher\Portal\Binaries\Win32\EpicGamesLauncher.exe",
r'"C:\Program Files (x86)\moreargs.exe" %1 %2':
r"C:\Program Files (x86)\moreargs.exe",
r'"D:\noargs.exe"':
r"D:\noargs.exe"
}
for val, should_be in possible_winreg_values.items():
path = _WindowsLauncher._parse_winreg_path(val)
assert path == should_be
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,472 | FriendsOfGalaxy/galaxy-integration-epic | refs/heads/master | /src/http_client.py | import logging
import aiohttp
import asyncio
from base64 import b64encode
from galaxy.http import handle_exception, create_client_session
from yarl import URL
from galaxy.api.errors import (
AuthenticationRequired, UnknownBackendResponse
)
def basic_auth_credentials(login, password):
credentials = "{}:{}".format(login, password)
return b64encode(credentials.encode()).decode("ascii")
class CookieJar(aiohttp.CookieJar):
def __init__(self):
super().__init__()
self._cookies_updated_callback = None
def set_cookies_updated_callback(self, callback):
self._cookies_updated_callback = callback
def update_cookies(self, cookies, url=URL()):
super().update_cookies(cookies, url)
if cookies and self._cookies_updated_callback:
self._cookies_updated_callback(list(self))
class AuthenticatedHttpClient:
_LAUNCHER_LOGIN = "34a02cf8f4414e29b15921876da36f9a"
_LAUNCHER_PASSWORD = "daafbccc737745039dffe53d94fc76cf"
_BASIC_AUTH_CREDENTIALS = basic_auth_credentials(_LAUNCHER_LOGIN, _LAUNCHER_PASSWORD)
_OAUTH_URL = "https://account-public-service-prod03.ol.epicgames.com/account/api/oauth/token"
LAUNCHER_USER_AGENT = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"EpicGamesLauncher/9.11.2-5710144+++Portal+Release-Live "
"UnrealEngine/4.21.0-5710144+++Portal+Release-Live "
"Safari/537.36"
)
def __init__(self, store_credentials_callback):
self._refresh_token = None
self._access_token = None
self._account_id = None
self._auth_lost_callback = None
self._store_credentials = store_credentials_callback
self._cookie_jar = CookieJar()
self._session = create_client_session(cookie_jar=self._cookie_jar)
self._session.headers = {}
self._session.headers["User-Agent"] = self.LAUNCHER_USER_AGENT
self._refreshing_task = None
def set_cookies_updated_callback(self, callback):
self._cookie_jar.set_cookies_updated_callback(callback)
def update_cookies(self, cookies):
self._cookie_jar.update_cookies(cookies)
def set_auth_lost_callback(self, callback):
self._auth_lost_callback = callback
async def retrieve_exchange_code(self):
xsrf_token = None
old_cookies_values = [cookie.value for cookie in self._session.cookie_jar]
await self.request('GET', "https://www.epicgames.com/id/api/authenticate")
await self.request('GET', "https://www.epicgames.com/id/api/csrf")
cookies = [cookie for cookie in self._session.cookie_jar]
cookies_to_set = dict()
for new_cookie in cookies:
if new_cookie.key in cookies_to_set and new_cookie.value in old_cookies_values:
continue
cookies_to_set[new_cookie.key] = new_cookie.value
if new_cookie.key == 'XSRF-TOKEN':
xsrf_token = new_cookie.value
self._cookie_jar = CookieJar()
self._session = create_client_session(cookie_jar=self._cookie_jar)
self.update_cookies(cookies_to_set)
headers = {
"X-Epic-Event-Action": "login",
"X-Epic-Event-Category": "login",
"X-Epic-Strategy-Flags": "guardianKwsFlowEnabled=false;minorPreRegisterEnabled=false;registerEmailPreVerifyEnabled=false;guardianEmailVerifyEnabled=true;guardianEmbeddedDocusignEnabled=true",
"X-Requested-With": "XMLHttpRequest",
"X-XSRF-TOKEN": xsrf_token,
"Referer": "https://www.epicgames.com/id/login/welcome"
}
response = await self.request('POST', "https://www.epicgames.com/id/api/exchange/generate", headers=headers)
response = await response.json()
return response['code']
async def authenticate_with_exchange_code(self, exchange_code):
await self._authenticate("exchange_code", exchange_code)
async def authenticate_with_refresh_token(self, refresh_token):
self._refresh_token = refresh_token
await self._refresh_tokens()
async def request(self, *args, **kwargs):
with handle_exception():
return await self._session.request(*args, **kwargs)
@property
def account_id(self):
return self._account_id
@property
def authenticated(self):
return self._access_token is not None
@property
def refresh_token(self):
return self._refresh_token
async def _validate_graph_response(self, response):
response = await response.json()
if "errors" in response:
for error in response["errors"]:
if '401' in error["message"]:
raise AuthenticationRequired()
return response
async def do_request(self, method, *args, **kwargs):
if not self.authenticated:
raise AuthenticationRequired()
try:
if 'graph' in kwargs:
return await self._validate_graph_response(await method(*args, **kwargs))
return await method(*args, **kwargs)
except Exception as e:
logging.exception(f"Received exception on authorized request: {repr(e)}")
try:
if not self._refreshing_task or self._refreshing_task.done():
self._refreshing_task = asyncio.create_task(self._refresh_tokens())
await self._refreshing_task
while not self._refreshing_task.done():
await asyncio.sleep(0.2)
except AuthenticationRequired as e:
logging.exception(f"Failed to refresh tokens, received: {repr(e)}")
if self._auth_lost_callback:
self._auth_lost_callback()
raise
except Exception as e:
logging.exception(f"Got exception {repr(e)}")
raise
if 'graph' in kwargs:
return await self._validate_graph_response(await method(*args, **kwargs))
return await method(*args, **kwargs)
async def get(self, *args, **kwargs):
return await self.do_request(self._authorized_get, *args, **kwargs)
async def post(self, *args, **kwargs):
return await self.do_request(self._authorized_post, *args, **kwargs)
async def close(self):
await self._session.close()
logging.debug('http client session closed')
async def _refresh_tokens(self):
logging.info("Refreshing token")
await self._authenticate("refresh_token", self._refresh_token)
async def _authenticate(self, grant_type, secret):
headers = {
"Authorization": "basic " + self._BASIC_AUTH_CREDENTIALS,
"User-Agent": self.LAUNCHER_USER_AGENT
}
data = {
"grant_type": grant_type,
"token_type": "eg1"
}
data[grant_type] = secret
try:
with handle_exception():
try:
response = await self._session.request("POST", self._OAUTH_URL, headers=headers, data=data)
except aiohttp.ClientResponseError as e:
logging.error(e)
if e.status == 400: # override 400 meaning for auth purpose
raise AuthenticationRequired()
except AuthenticationRequired as e:
logging.exception(f"Authentication failed, grant_type: {grant_type}, exception: {repr(e)}")
raise AuthenticationRequired()
result = await response.json()
try:
self._access_token = result["access_token"]
self._refresh_token = result["refresh_token"]
self._account_id = result["account_id"]
credentials = {"refresh_token": self._refresh_token}
self._store_credentials(credentials)
except KeyError:
logging.exception("Could not parse backend response when authenticating")
raise UnknownBackendResponse()
def set_authorization_headers(self, **kwargs):
headers = kwargs.setdefault("headers", {})
headers["Authorization"] = "bearer " + self._access_token
headers["User-Agent"] = self.LAUNCHER_USER_AGENT
return kwargs
async def _authorized_get(self, *args, **kwargs):
kwargs = self.set_authorization_headers(**kwargs)
if 'graph' in kwargs:
kwargs.pop('graph')
return await self._session.request("GET", *args, **kwargs)
async def _authorized_post(self, *args, **kwargs):
kwargs = self.set_authorization_headers(**kwargs)
if 'graph' in kwargs:
kwargs.pop('graph')
return await self._session.request("POST", *args, **kwargs)
def _auth_lost(self):
self._access_token = None
self._account_id = None
if self._auth_lost_callback:
self._auth_lost_callback()
| {"/tests/test_is_installed.py": ["/src/local.py"]} |
78,473 | jeannotalpin/pyRaspKodi | refs/heads/master | /pykodi/__init__.py | ## pyKodi
| {"/action-kodi.py": ["/pykodi/kodi.py"]} |
78,474 | jeannotalpin/pyRaspKodi | refs/heads/master | /action-kodi.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from snipsTools import SnipsConfigParser
from hermes_python.hermes import Hermes
from hermes_python.ontology import *
from pykodi.kodi import Kodi
import io
CONFIG_INI = "config.ini"
# If this skill is supposed to run on the satellite,
# please get this mqtt connection info from <config.ini>
# Hint: MQTT server is always running on the master device
MQTT_IP_ADDR = "localhost"
MQTT_PORT = 1883
MQTT_ADDR = "{}:{}".format(MQTT_IP_ADDR, str(MQTT_PORT))
VOCABULARY = {
"doneSoundLevel":{
"fr":u"C'est fait. Le son est maintenant a {}",
"gb":"Done! The sound level is now {}"
},
"noActivePlayer":{
"fr":u"Désolé, il n'y a pas de lecteur actif.",
"gb":"Sorry, there is no active player!"
},
"clarify":{
"fr":u"Clarifie tes intentions !",
"gb":"Clarify your intentions!"
},
"previous":{
"fr":u"Je reviens à la précédente...",
"gb":"Coming back..."
},
"next":{
"fr":u"Je passe à la suivante",
"gb":"Playing next song"
},
"oops":{
"fr":u"Oula, je n'ai pas pu faire cette action. Pardon maitre !",
"gb":"Oops, sorry: something went wrong..."
},
"unknown":{
"fr":u"Je ne connais pas cette intention : {}.",
"gb": "I don't know this intent: {}"
}
}
class Template(object):
"""Class used to wrap action code with mqtt connection
Please change the name refering to your application
"""
def __init__(self):
# get the configuration if needed
try:
self.config = SnipsConfigParser.read_configuration_file(CONFIG_INI)
except :
self.config = None
## Prepare Kodi
host = self.config.get("secret").get("kodihost")
port = int(self.config.get("secret").get("kodiport"))
self.kodi = Kodi(host, port)
## Language
self.lang = self.config.get("secret").get("lang")
self.vocal = {}
for v in VOCABULARY:
self.vocal[v] = VOCABULARY[v][self.lang]
# start listening to MQTT
self.start_blocking()
def intent_volumeUpDownReceived(self, hermes, intent_message):
""" Increase or decrease volume. If user gives the increment, it is used otherwise a fixed one is used
"""
inc = 20
for (slot_value, slot) in intent_message.slots.items():
inc = int(slot[0].slot_value.value.value)
direction = "up" if "up" in intent_message.intent.intent_name.lower() else "down"
newL = self.kodi.incrementalVolumeChange(direction=direction, increment=inc)
hermes.publish_end_session(intent_message.session_id, self.vocal["doneSoundLevel"].format(newL))
def intent_playPause(self, hermes, intent_message):
""" Toggle player (pause/play)
Say something in return only if there is no active player
"""
if self.kodi.toggle_player():
text=None
else:
text = self.vocal["noActivePlayer"]
hermes.publish_end_session(intent_message.session_id, text)
def intent_prevNext(self, hermes, intent_message):
""" Go to next or previous song
"""
intent_name = intent_message.intent.intent_name.lower()
if "previous" in intent_name:
direction = "previous"
elif "next" in intent_name:
direction = "next"
else:
hermes.publish_end_session(intent_message.session_id, self.vocal["clarify"])
return False
if self.kodi.goPrevNext(direction=direction):
text = self.vocal["previous"] if "previous" in direction else self.vocal["next"]
else:
text = self.vocal["oops"]
hermes.publish_end_session(intent_message.session_id, text)
def intent_unknown(self, hermes, intent_message):
hermes.publish_end_session(intent_message.session_id, self.vocal["unknown"].format(intent_message.intent.intent_name.lower()))
# --> Master callback function, triggered everytime an intent is recognized
def master_intent_callback(self, hermes, intent_message):
coming_intent = intent_message.intent.intent_name
if "volumeup" in coming_intent.lower() or "volumedown" in coming_intent.lower():
self.intent_volumeUpDownReceived(hermes, intent_message)
elif "speakerinterrupt" in coming_intent.lower() or "resumemusic" in coming_intent.lower():
self.intent_playPause(hermes, intent_message)
elif "nextsong" in coming_intent.lower() or "previoussong" in coming_intent.lower():
self.intent_prevNext(hermes, intent_message)
else:
#self.intent_unknown(hermes, intent_message)
pass
# more callback and if condition goes here...
# --> Register callback function and start MQTT
def start_blocking(self):
with Hermes(MQTT_ADDR) as h:
h.subscribe_intents(self.master_intent_callback).start()
if __name__ == "__main__":
Template()
| {"/action-kodi.py": ["/pykodi/kodi.py"]} |
78,475 | jeannotalpin/pyRaspKodi | refs/heads/master | /pykodi/kodi.py | #/usr/bin/env python2
import requests
import json
import logging
#class Error(Exception):
# """Base error class"""
# pass
#
#class GetError(Error):
# """Error raised when GET returns a code != 200"""
# def __init__(self, expression, message):
# self.expression = expression
# self.message = message
#
#class NoActivePlayer(Error):
# def __init__(self, message="There is no active player on Kodi"):
# self.message = message
class Kodi(object):
def __init__(self, host, port, logger = None):
self.address="http://%s:%i/jsonrpc?request=" %(host, int(port))
self.data = {
"jsonrpc" : "2.0",
"id" : 1,
"method" : None, ## to be added for action
"params" : {}, ## same
}
if logger is None:
self.logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
self.logger.setLevel(logging.DEBUG)
self.logger.info("Starting logging")
else:
self.logger = logger
def send(self, data):
""" Send a GET request to Kodi """
req = "%s%s" %(self.address, json.dumps(data))
#self.logger.debug("Sending request with req=%s\n%s" %(req, json.dumps(data, indent=2)))
#self.logger.debug("Sending request with req=%s\n" %(req,))
resp = requests.get(req)
if resp.status_code != 200:
# This means something went wrong.
raise ValueError('GET returned {}'.format(resp.status_code))
return resp.json()
def get_active_player(self):
""" Get the active player """
data = self.data.copy()
data["method"]= "Player.GetActivePlayers"
js = self.send(data)
if len(js["result"])>0:
for player in js["result"]:
if player["type"] == "audio" or player["type"] == "video":
return js["result"][0] ## [{u'playerid': 0, u'type': u'audio'}]
else:
self.logger.warning("There is no active player")
return []
def toggle_player(self):
""" Unpause the music if the player is paused or pause it if it is playing
Do nothing if no active player is found
return true if the toggle happened, False if it failed (to be used for a voice feedback)
"""
player = self.get_active_player()
if player != []:
data = self.data.copy()
data["method"] = "Player.PlayPause"
data["params"] = {"playerid":player["playerid"]}
js = self.send(data)
toreturn = True
self.logger.debug("Successfully tooggled")
else:
#logger.warning("There is no active player")
toreturn = False
return toreturn
def stop_player(self):
""" Stop the music
Do nothing if no active player is found
return true if the stop happened, False if it failed (to be used for a voice feedback)
"""
player = self.get_active_player()
if player != []:
data = self.data.copy()
data["method"] = "Player.Stop"
data["params"] = {"playerid":player["playerid"]}
js = self.send(data)
toreturn = True
self.logger.debug("Successfully stopped")
else:
logger.warning("There is no active player")
toreturn = False
return toreturn
def incrementalVolumeChange(self, direction="up", increment=20):
""" Increment or decrement the volume, based on current level """
## Get current level
data=self.data.copy()
data["method"] = "Application.GetProperties"
data["params"] = {"properties":["volume"]}
js = self.send(data)
curlevel = int(js["result"]["volume"])
self.logger.debug("Current sound level is %i"%curlevel)
newlevel = min(100, curlevel+increment) if direction=="up" else max(10, curlevel-increment)
data = self.data.copy()
data["method"] = "Application.SetVolume"
data["params"] = {"volume":newlevel}
js = self.send(data)
returnedLevel = int(js["result"])
self.logger.debug("New sound level is %i"%returnedLevel)
return returnedLevel
def goPrevNext(self, direction="next"):
""" Go to the next song (direction="next") or the previous one (direction="previous")
return True or False depending on the success
"""
player = self.get_active_player()
if player != []:
data = self.data.copy()
data["method"] = "Player.GoTo"
data["params"] = {
"playerid":player["playerid"],
"to" : direction
}
js = self.send(data)
if js["result"] == "OK":
toreturn = True
self.logger.debug("Successfully switched")
else :
toreturn = False
self.logger.warning("There has been a problem while switching: {}".format(js["result"]))
else:
#logger.warning("There is no active player")
toreturn = False
return toreturn
def getSongs(self):
""" Find all songs
"""
data=self.data.copy()
data["method"] = "AudioLibrary.GetSongs"
data["params"] = {}
js = self.send(data)
for s in js["result"]["songs"]:
print s["label"]
if "quitte pas" in s["label"]:
print "Found!"
sys.exit()
if __name__ == "__main__":
logger = logging.getLogger()
handler = logging.StreamHandler()
formatter = logging.Formatter(
'%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
kod = Kodi("192.168.1.26", 8080, logger)
#print kod.toggle_player()
#print kod.stop_player()
#print kod.incrementalVolumeChange("up")
kod.getSongs()
| {"/action-kodi.py": ["/pykodi/kodi.py"]} |
78,476 | festelle/Bot-a_tu_toma | refs/heads/master | /Bot-a tu Toma/one_file.py | # front end, back end and main in one file to be able to create only one .exe program
##### FRONT END #####
import sys
import optparse
from PyQt5.QtCore import pyqtSignal, Qt, QThread, QTimer, QRect, QTime
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QPushButton, QLineEdit, QHBoxLayout, QVBoxLayout, QComboBox, \
QDateTimeEdit, QTimeEdit)
from PyQt5.QtGui import QIcon
import random
import time
import datetime
class main_window(QWidget):
frontEnd_signal = pyqtSignal(list)
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
self.init_gui()
self.backEnd_signal = None
self.frontEnd_signal.connect(self.index)
def init_gui(self):
#Se crea la ventana inicial
self.setWindowTitle('Bot-a tu toma')
self.setWindowIcon(QIcon('Bot-a.png'))
self.setGeometry(100, 100, 400, 450)
col1 = 25
col2 = 100
col3 = 205
self.userLabel = QLabel('Usuario:', self)
self.userLabel.move(col1, 50)
self.userEdit = QLineEdit('', self)
self.userEdit.setGeometry(col2, 50, 100, 20)
self.userLabelInstruccions = QLabel('(Solo usuario, no mail UC)', self)
self.userLabelInstruccions.move(col3, 50)
self.passwordLabel = QLabel('Contraseña:', self)
self.passwordLabel.move(col1, 90)
self.passwordEdit = QLineEdit('', self)
self.passwordEdit.setGeometry(col2, 90, 100, 20)
self.passwordEdit.setEchoMode(QLineEdit.Password)
self.passwordButton = QPushButton('ver', self)
self.passwordButton.setCheckable(True)
self.passwordButton.clicked.connect(self.view_password)
self.passwordButton.setGeometry( col3, 90, 45, 20)
self.labelNRC1 = QLabel('Ramo 1 (NRC):', self)
self.labelNRC1.move(col1, 170)
self.editNRC1 = QLineEdit('', self)
self.editNRC1.setGeometry(col2, 170, 100, 20)
self.labelNRC2 = QLabel('Ramo 2 (NRC):', self)
self.labelNRC2.move(col1, 210)
self.editNRC2 = QLineEdit('', self)
self.editNRC2.setGeometry(col2, 210, 100, 20)
self.labelNRC3 = QLabel('Ramo 3 (NRC):', self)
self.labelNRC3.move(col1, 250)
self.editNRC3 = QLineEdit('', self)
self.editNRC3.setGeometry(col2, 250, 100, 20)
self.startTimeLabel = QLabel('Hora de toma:', self)
self.startTimeLabel.move(col1, 290)
self.startTimeEdit = QTimeEdit(self)
time = QTime()
time.setHMS(8,30,0)
self.startTimeEdit.setTime(time)
self.startTimeEdit.setGeometry(col2, 290, 100, 20)
self.startTimeInstruccions = QLabel('(Formato 24 horas. Ej: 2 pm -> 14:00)', self)
self.startTimeInstruccions.move(col3, 290)
self.startButton = QPushButton('&Empezar', self)
self.startButton.resize(self.startButton.sizeHint())
self.startButton.clicked.connect(self.button_click)
self.startButton.setGeometry( col2, 340, 100, 40)
self.countdownLabel = QLabel('', self)
self.countdownLabel.move(75, 400)
linkTemplate = '<a href={0}>{1}</a>'
self.repositoryLabel = QLabel('', self)
self.repositoryLabel.setOpenExternalLinks(True)
self.repositoryLabel.setText(linkTemplate.format('https://github.com/festelle/Bot-a_tu_toma', \
'Ver código en github'))
self.repositoryLabel.setGeometry((400-100)/2, 400, 100, 20)
self.authorLabel = QLabel('Creado por F. Estelle', self)
self.authorLabel.setGeometry((400-100)/2, 425, 100, 20)
def index (self, data):
#se interpreta la orden que la señal manda
pass
def button_click(self):
#No se permiten cambiar los datos
self.deactivate()
#Se esconde la ventana (para poder mostrar la ventana, hay que hacer el siguiente paso con threading)
self.hide()
#Se mandan los datos en un thread y se espera a que se inicie el proceso
data = ['start_process', self.userEdit.text(), self.passwordEdit.text(), self.editNRC1.text(), \
self.editNRC2.text(), self.editNRC3.text(), self.startTimeEdit.text(), self.startTimeEdit.time().toPyTime()]
self.backEnd_signal.emit(data)
def view_password(self):
if self.passwordButton.isChecked():
self.passwordButton.setText('ocultar')
self.passwordEdit.setEchoMode(QLineEdit.Normal)
else:
self.passwordButton.setText('ver')
self.passwordEdit.setEchoMode(QLineEdit.Password)
def deactivate(self):
self.userEdit.setDisabled(True)
self.passwordEdit.setDisabled(True)
self.editNRC1.setDisabled(True)
self.editNRC2.setDisabled(True)
self.editNRC3.setDisabled(True)
self.startTimeEdit.setDisabled(True)
self.startButton.setDisabled(True)
##### BACK END #####
from selenium import webdriver
import time
import datetime
import functools
from PyQt5.QtCore import QObject, pyqtSignal, QUrl, QTimer, QThread
from webdriver_manager.chrome import ChromeDriverManager
import threading
class main_window_logic(QObject):
backEnd_signal = pyqtSignal(list)
def __init__(self):
super().__init__()
self.backEnd_signal.connect(self.index)
self.frontEnd_signal = None
self.user = None
self.password = None
self.NRC1 = None
self.NRC2 = None
self.NRC3 = None
self.start_time = None
def index(self, data):
#El primer elemento de la lista es la orden de lo que hay que hacer
order = data[0]
if order == 'start_process':
self.start_process(data)
def start_process(self, data):
# Se guarda la data entregada
self.user, self.password, self.NRC1 = data[1], data[2], data[3]
self.NRC2, self.NRC3, self.start_time, self.start_time_obj = data[4], data[5], data[6], data[7]
# Se crea una instancia de driver
driver = webdriver.Chrome(ChromeDriverManager().install())
# Se prepara la cuenta
prepare = self.prepare_account(driver)
if prepare != 'error':
# Con la cuenta preparada se toman los ramos
self.take_classes(driver)
def prepare_account(self, driver):
#### PASOS PARA INGRESAR A PORTAL UC, NO ES NECESARIO ####
# driver.get('https://sso.uc.cl/cas/login?service=https%3A%2F%2Fportal.uc.cl%2Fc%2Fportal%2Flogin')
# #Se ingresa la información de la cuenta y se inicia sesión
# userBox = driver.find_element_by_id("username")
# userBox.click()
# userBox.send_keys(user)
# passBox = driver.find_element_by_id("password")
# passBox.click()
# passBox.send_keys(password)
# submitButton = driver.find_element_by_name("submit")
# submitButton.click()
#Se mueve hasta la sección de agregar y eliminar ramos
driver.get('https://ssb.uc.cl/ERPUC/twbkwbis.P_WWWLogin')
# Se comienza el proceso 3 minutos antes de que comience la hora de la toma de ramos
time_start = datetime.datetime.combine(datetime.date.today(), self.start_time_obj) - datetime.timedelta(minutes=3)
now = datetime.datetime.now()
if (time_start-now).total_seconds() > 0:
userBox = driver.find_element_by_id("UserID")
userBox.click()
userBox.send_keys(f'Proceso comenzará a las {time_start.time().hour}:{time_start.time().minute}')
time.sleep((time_start-now).total_seconds())
#Se vuelve a ingresar
userBox = driver.find_element_by_id("UserID")
userBox.click()
userBox.clear()
userBox.send_keys(self.user)
passBox = driver.find_element_by_name("PIN")
passBox.click()
passBox.send_keys(self.password)
submitButton = driver.find_element_by_xpath("/html/body/div[3]/form/p/input")
submitButton.click()
time.sleep(1)
try:
botonAgregarClases = driver.find_element_by_link_text("Agregar o Eliminar Clases")
botonAgregarClases.click()
except:
print('\n\n USUARIO O CLAVE UC INCORRECTA \n\n')
return 'error'
#Se espera que sea la hora correcta para empezar la toma de ramos
time_start = datetime.datetime.combine(datetime.date.today(), self.start_time_obj)
now = datetime.datetime.now()
if (time_start-now).total_seconds() > 0:
time.sleep((time_start-now).total_seconds())
def take_classes(self, driver):
#Una vez que es la hora, se mide el tiempo que se demora
initial_time = time.clock()
#Se selecciona el periodo de inscripción actual, se puede modificar para dar opción
submitButton = driver.find_element_by_xpath("/html/body/div[3]/form/input")
submitButton.click()
try:
#Se selecciona el plan de estudio
planMenu = driver.find_element_by_id("st_path_id")
#SOLO FUNCIONA PARA APRETAR LA SEGUNDA OPCION QUE APARECE EN EL MENU
#### REVISAR SI FUNCIONA PARA SEGUNDO SEMETRE ####
planMenu.find_element_by_xpath("/html/body/div[3]/form/table/tbody/tr[2]/td/select/option[2]").click()
sendButton = driver.find_element_by_xpath("/html/body/div[3]/form/input[19]")
sendButton.click()
#Se ingresan los ramos a tomar
inputClass1 = driver.find_element_by_id("crn_id1")
inputClass1.send_keys(self.NRC1)
inputClass2 = driver.find_element_by_id("crn_id2")
inputClass2.send_keys(self.NRC2)
inputClass3 = driver.find_element_by_id("crn_id3")
inputClass3.send_keys(self.NRC3)
#Se aprieta el botón de enviar cambios
sendButton = driver.find_element_by_xpath('/html/body/div[3]/form/input[19]')
sendButton.click()
#Se calcula e imprime tiempo demorado en toma
final_time = time.clock() - initial_time
print('Tiempo demorado en tomar los ramos:', final_time)
except:
print('\n\n OCURRIÓ UN ERROR (ES LA HORA CORRECTA?) \n\n')
###### main.py ######
import sys
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication([])
#Se crean las instancias de mainWindow
mainWindow = main_window()
mainWindowLogic = main_window_logic()
#Se crean y conectan las señales de mainWindow
mainWindow.backEnd_signal = mainWindowLogic.backEnd_signal
mainWindowLogic.frontEnd_signal = mainWindow.frontEnd_signal
#Se abre la ventana principal
mainWindow.show()
sys.exit(app.exec_()) | {"/main.py": ["/backEnd.py", "/frontEnd.py"]} |
78,477 | festelle/Bot-a_tu_toma | refs/heads/master | /backEnd.py | ##### BACK END #####
from selenium import webdriver
import time
import datetime
import functools
from PyQt5.QtCore import QObject, pyqtSignal, QUrl, QTimer, QThread
from webdriver_manager.chrome import ChromeDriverManager
import threading
class main_window_logic(QObject):
backEnd_signal = pyqtSignal(list)
def __init__(self):
super().__init__()
self.backEnd_signal.connect(self.index)
self.frontEnd_signal = None
self.user = None
self.password = None
self.NRC1 = None
self.NRC2 = None
self.NRC3 = None
self.start_time = None
def index(self, data):
#El primer elemento de la lista es la orden de lo que hay que hacer
order = data[0]
if order == 'start_process':
self.start_process(data)
def start_process(self, data):
# Se guarda la data entregada
self.user, self.password, self.NRC1 = data[1], data[2], data[3]
self.NRC2, self.NRC3, self.start_time, self.start_time_obj = data[4], data[5], data[6], data[7]
# Se crea una instancia de driver
driver = webdriver.Chrome(ChromeDriverManager().install())
# Se prepara la cuenta
prepare = self.prepare_account(driver)
if prepare != 'error':
# Con la cuenta preparada se toman los ramos
self.take_classes(driver)
def prepare_account(self, driver):
#### PASOS PARA INGRESAR A PORTAL UC, NO ES NECESARIO ####
# driver.get('https://sso.uc.cl/cas/login?service=https%3A%2F%2Fportal.uc.cl%2Fc%2Fportal%2Flogin')
# #Se ingresa la información de la cuenta y se inicia sesión
# userBox = driver.find_element_by_id("username")
# userBox.click()
# userBox.send_keys(user)
# passBox = driver.find_element_by_id("password")
# passBox.click()
# passBox.send_keys(password)
# submitButton = driver.find_element_by_name("submit")
# submitButton.click()
#Se mueve hasta la sección de agregar y eliminar ramos
driver.get('https://ssb.uc.cl/ERPUC/twbkwbis.P_WWWLogin')
# Se comienza el proceso 3 minutos antes de que comience la hora de la toma de ramos
time_start = datetime.datetime.combine(datetime.date.today(), self.start_time_obj) - datetime.timedelta(minutes=3)
now = datetime.datetime.now()
if (time_start-now).total_seconds() > 0:
userBox = driver.find_element_by_id("UserID")
userBox.click()
userBox.send_keys(f'Proceso comenzará a las {time_start.time().hour}:{time_start.time().minute}')
time.sleep((time_start-now).total_seconds())
#Se vuelve a ingresar
userBox = driver.find_element_by_id("UserID")
userBox.click()
userBox.clear()
userBox.send_keys(self.user)
passBox = driver.find_element_by_name("PIN")
passBox.click()
passBox.send_keys(self.password)
submitButton = driver.find_element_by_xpath("/html/body/div[3]/form/p/input")
submitButton.click()
time.sleep(1)
try:
botonAgregarClases = driver.find_element_by_link_text("Agregar o Eliminar Clases")
botonAgregarClases.click()
except:
print('\n\n USUARIO O CLAVE UC INCORRECTA \n\n')
return 'error'
#Se espera que sea la hora correcta para empezar la toma de ramos
time_start = datetime.datetime.combine(datetime.date.today(), self.start_time_obj)
now = datetime.datetime.now()
if (time_start-now).total_seconds() > 0:
time.sleep((time_start-now).total_seconds())
# Se revisa que efectivamente sea la hora correcta
now = str(datetime.datetime.now())[0:16]
start_time = str(datetime.datetime.now())[0:11] + self.start_time
while now != start_time:
now = str(datetime.datetime.now())[0:16]
def take_classes(self, driver):
#Una vez que es la hora, se mide el tiempo que se demora
initial_time = time.clock()
#Se selecciona el periodo de inscripción actual, se puede modificar para dar opción
submitButton = driver.find_element_by_xpath("/html/body/div[3]/form/input")
submitButton.click()
try:
#Se selecciona el plan de estudio
planMenu = driver.find_element_by_id("st_path_id")
#SOLO FUNCIONA PARA APRETAR LA SEGUNDA OPCION QUE APARECE EN EL MENU
#### REVISAR SI FUNCIONA PARA SEGUNDO SEMETRE ####
planMenu.find_element_by_xpath("/html/body/div[3]/form/table/tbody/tr[2]/td/select/option[2]").click()
sendButton = driver.find_element_by_xpath("/html/body/div[3]/form/input[19]")
sendButton.click()
#Se ingresan los ramos a tomar
inputClass1 = driver.find_element_by_id("crn_id1")
inputClass1.send_keys(self.NRC1)
inputClass2 = driver.find_element_by_id("crn_id2")
inputClass2.send_keys(self.NRC2)
inputClass3 = driver.find_element_by_id("crn_id3")
inputClass3.send_keys(self.NRC3)
#Se aprieta el botón de enviar cambios
sendButton = driver.find_element_by_xpath('/html/body/div[3]/form/input[19]')
sendButton.click()
#Se calcula e imprime tiempo demorado en toma
final_time = time.clock() - initial_time
print('Tiempo demorado en tomar los ramos:', final_time)
except:
print('\n\n OCURRIÓ UN ERROR (ES LA HORA CORRECTA?) \n\n') | {"/main.py": ["/backEnd.py", "/frontEnd.py"]} |
78,478 | festelle/Bot-a_tu_toma | refs/heads/master | /main.py | import sys
import backEnd
import frontEnd
from PyQt5.QtWidgets import QApplication
if __name__ == '__main__':
app = QApplication([])
#Se crean las instancias de mainWindow
mainWindow = frontEnd.main_window()
mainWindowLogic = backEnd.main_window_logic()
#Se crean y conectan las señales de mainWindow
mainWindow.backEnd_signal = mainWindowLogic.backEnd_signal
mainWindowLogic.frontEnd_signal = mainWindow.frontEnd_signal
#Se abre la ventana principal
mainWindow.show()
sys.exit(app.exec_()) | {"/main.py": ["/backEnd.py", "/frontEnd.py"]} |
78,479 | festelle/Bot-a_tu_toma | refs/heads/master | /frontEnd.py | ##### FRONT END #####
import sys
import optparse
from PyQt5.QtCore import pyqtSignal, Qt, QThread, QTimer, QRect, QTime
from PyQt5.QtWidgets import (QApplication, QWidget, QLabel, QPushButton, QLineEdit, QHBoxLayout, QVBoxLayout, QComboBox, \
QDateTimeEdit, QTimeEdit)
from PyQt5.QtGui import QIcon
import random
import time
import datetime
class main_window(QWidget):
frontEnd_signal = pyqtSignal(list)
def __init__(self, *args, **kwargs):
super().__init__(*args, *kwargs)
self.init_gui()
self.backEnd_signal = None
self.frontEnd_signal.connect(self.index)
def init_gui(self):
#Se crea la ventana inicial
self.setWindowTitle('Bot-a tu toma')
self.setWindowIcon(QIcon('Bot-a.png'))
self.setGeometry(100, 100, 400, 450)
col1 = 25
col2 = 100
col3 = 205
self.userLabel = QLabel('Usuario:', self)
self.userLabel.move(col1, 50)
self.userEdit = QLineEdit('', self)
self.userEdit.setGeometry(col2, 50, 100, 20)
self.userLabelInstruccions = QLabel('(Solo usuario, no mail UC)', self)
self.userLabelInstruccions.move(col3, 50)
self.passwordLabel = QLabel('Contraseña:', self)
self.passwordLabel.move(col1, 90)
self.passwordEdit = QLineEdit('', self)
self.passwordEdit.setGeometry(col2, 90, 100, 20)
self.passwordEdit.setEchoMode(QLineEdit.Password)
self.passwordButton = QPushButton('ver', self)
self.passwordButton.setCheckable(True)
self.passwordButton.clicked.connect(self.view_password)
self.passwordButton.setGeometry( col3, 90, 45, 20)
self.labelNRC1 = QLabel('Ramo 1 (NRC):', self)
self.labelNRC1.move(col1, 170)
self.editNRC1 = QLineEdit('', self)
self.editNRC1.setGeometry(col2, 170, 100, 20)
self.labelNRC2 = QLabel('Ramo 2 (NRC):', self)
self.labelNRC2.move(col1, 210)
self.editNRC2 = QLineEdit('', self)
self.editNRC2.setGeometry(col2, 210, 100, 20)
self.labelNRC3 = QLabel('Ramo 3 (NRC):', self)
self.labelNRC3.move(col1, 250)
self.editNRC3 = QLineEdit('', self)
self.editNRC3.setGeometry(col2, 250, 100, 20)
self.startTimeLabel = QLabel('Hora de toma:', self)
self.startTimeLabel.move(col1, 290)
self.startTimeEdit = QTimeEdit(self)
time = QTime()
time.setHMS(8,30,0)
self.startTimeEdit.setTime(time)
self.startTimeEdit.setGeometry(col2, 290, 100, 20)
self.startTimeInstruccions = QLabel('(Formato 24 horas. Ej: 2 pm -> 14:00)', self)
self.startTimeInstruccions.move(col3, 290)
self.startButton = QPushButton('&Empezar', self)
self.startButton.resize(self.startButton.sizeHint())
self.startButton.clicked.connect(self.button_click)
self.startButton.setGeometry( col2, 340, 100, 40)
self.countdownLabel = QLabel('', self)
self.countdownLabel.move(75, 400)
linkTemplate = '<a href={0}>{1}</a>'
self.repositoryLabel = QLabel('', self)
self.repositoryLabel.setOpenExternalLinks(True)
self.repositoryLabel.setText(linkTemplate.format('https://github.com/festelle/Bot-a_tu_toma', \
'Ver código en github'))
self.repositoryLabel.setGeometry((400-100)/2, 400, 100, 20)
self.authorLabel = QLabel('Creado por F. Estelle', self)
self.authorLabel.setGeometry((400-100)/2, 425, 100, 20)
def index (self, data):
#se interpreta la orden que la señal manda
pass
def button_click(self):
#No se permiten cambiar los datos
self.deactivate()
#Se esconde la ventana (para poder mostrar la ventana, hay que hacer el siguiente paso con threading)
self.hide()
#Se mandan los datos en un thread y se espera a que se inicie el proceso
data = ['start_process', self.userEdit.text(), self.passwordEdit.text(), self.editNRC1.text(), \
self.editNRC2.text(), self.editNRC3.text(), self.startTimeEdit.text(), self.startTimeEdit.time().toPyTime()]
self.backEnd_signal.emit(data)
def view_password(self):
if self.passwordButton.isChecked():
self.passwordButton.setText('ocultar')
self.passwordEdit.setEchoMode(QLineEdit.Normal)
else:
self.passwordButton.setText('ver')
self.passwordEdit.setEchoMode(QLineEdit.Password)
def deactivate(self):
self.userEdit.setDisabled(True)
self.passwordEdit.setDisabled(True)
self.editNRC1.setDisabled(True)
self.editNRC2.setDisabled(True)
self.editNRC3.setDisabled(True)
self.startTimeEdit.setDisabled(True)
self.startButton.setDisabled(True)
| {"/main.py": ["/backEnd.py", "/frontEnd.py"]} |
78,486 | aggarabedian/remember_when | refs/heads/main | /main_app/views.py | from django.shortcuts import get_object_or_404, render, redirect
from django.views import View
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic.detail import DetailView
from django.urls import reverse
from django.contrib.auth import login
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.contrib.auth.mixins import UserPassesTestMixin
# Imports Models
from .models import Journal, Memory, Album, Photo
from django.contrib.auth.models import User
# Create your views here.
class Home(TemplateView):
template_name = "home.html"
class About(TemplateView):
template_name = "about.html"
class PublicList(TemplateView):
template_name = "memories_public.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["memories"] = Memory.objects.filter(is_public = True)
return context
@method_decorator(login_required, name='dispatch')
class MemoryDetail(TemplateView):
model = Memory
template_name = "memory_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["memory"] = Memory.objects.get(pk=kwargs["pk"])
return context
@method_decorator(login_required, name='dispatch')
class JournalCreate(CreateView):
model = Journal
fields = ['name', 'birthdate']
template_name = "journal_create.html"
success_url = "/journals/"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
class RegisterView(View):
def get(self, request):
form = UserCreationForm()
context = {"form": form}
return render(request, "registration/register.html", context)
def post(self, request):
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
return redirect("home")
else:
context = {"form": form}
return render(request, "registration/register.html", context)
@method_decorator(login_required, name='dispatch')
class MemoryCreate(View):
def get(self, request):
context = {'journals': Journal.objects.all()}
return render(request, 'memory_create.html', context)
def post(self, request):
title = request.POST.get('title')
content = request.POST.get('content')
is_public = request.POST.get('is_public')
if is_public == 'on':
is_public = True
else:
is_public = False
journal = Journal.objects.get(pk=request.POST.get('journal'))
new_memory = Memory.objects.create(title=title, content=content, is_public=is_public, journal=journal)
return redirect('memory_detail', pk=new_memory.id)
@method_decorator(login_required, name='dispatch')
class MemoryUpdate(UserPassesTestMixin, UpdateView):
model = Memory
fields = ['title', 'content', 'is_public']
template_name = "memory_update.html"
def test_func(self):
memory = get_object_or_404(Memory, pk = self.kwargs["pk"])
return self.request.user == memory.journal.user
def get_success_url(self):
return reverse("memory_detail", kwargs={'pk': self.object.pk})
@method_decorator(login_required, name='dispatch')
class JournalDetail(UserPassesTestMixin, TemplateView):
model = Journal
template_name = "journal_detail.html"
def test_func(self):
journal = get_object_or_404(Journal, pk = self.kwargs["pk"])
return self.request.user == journal.user
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["journal"] = Journal.objects.get(pk=kwargs["pk"])
return context
@method_decorator(login_required, name='dispatch')
class MemoryDelete(UserPassesTestMixin, DeleteView):
model = Memory
template_name = "memory_delete_confirmation.html"
def test_func(self):
memory = get_object_or_404(Memory, pk = self.kwargs["pk"])
return self.request.user == memory.journal.user
def get_success_url(self):
return reverse("journal_detail", kwargs={'pk': self.object.journal.pk})
@method_decorator(login_required, name='dispatch')
class JournalDelete(UserPassesTestMixin, DeleteView):
model = Journal
template_name = "journal_delete_confirmation.html"
success_url = "/journals/"
def test_func(self):
journal = get_object_or_404(Journal, pk = self.kwargs["pk"])
return self.request.user == journal.user
@method_decorator(login_required, name='dispatch')
class JournalList(TemplateView):
template_name = "journal_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
if user != None:
context["journals"] = Journal.objects.filter(user=self.request.user)
return context
else:
return redirect('/')
@method_decorator(login_required, name='dispatch')
class JournalUpdate(UserPassesTestMixin, UpdateView):
model = Journal
fields = ['name', 'birthdate']
template_name = "journal_update.html"
def test_func(self):
journal = get_object_or_404(Journal, pk = self.kwargs["pk"])
return self.request.user == journal.user
def get_success_url(self):
return reverse("journal_detail", kwargs={'pk': self.object.pk})
@method_decorator(login_required, name='dispatch')
class AlbumList(TemplateView):
template_name = "album_list.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
user = self.request.user
if user != None:
context["albums"] = Album.objects.filter(user=self.request.user)
return context
else:
return redirect('/')
@method_decorator(login_required, name='dispatch')
class PhotoDetail(TemplateView):
model = Photo
template_name = "photo_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["photo"] = Photo.objects.get(pk=kwargs["pk"])
return context
@method_decorator(login_required, name='dispatch')
class AlbumDetail(TemplateView):
model = Album
template_name = "album_detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["album"] = Album.objects.get(pk=kwargs["pk"])
return context
@method_decorator(login_required, name='dispatch')
class AlbumCreate(CreateView):
model = Album
fields = ['title', 'description']
template_name = "album_create.html"
success_url = "/albums/"
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
@method_decorator(login_required, name='dispatch')
class AlbumUpdate(UserPassesTestMixin, UpdateView):
model = Album
fields = ['title', 'description']
template_name = "album_update.html"
def test_func(self):
album = get_object_or_404(Album, pk = self.kwargs["pk"])
return self.request.user == album.user
def get_success_url(self):
return reverse("album_detail", kwargs={'pk': self.object.pk})
@method_decorator(login_required, name='dispatch')
class AlbumDelete(UserPassesTestMixin, DeleteView):
model = Album
template_name = "album_delete_confirmation.html"
success_url = "/albums/"
def test_func(self):
album = get_object_or_404(Album, pk = self.kwargs["pk"])
return self.request.user == album.user
# @method_decorator(login_required, name='dispatch')
# class PhotoCreate(CreateView):
# model = Photo
# fields = ['album', 'title', 'description', 'is_public', 'picture']
# template_name = "photo_create.html"
# def get_success_url(self):
# return reverse("photo_detail", kwargs={'pk': self.object.pk})
@method_decorator(login_required, name='dispatch')
class PhotoCreate(View):
def get(self, request):
context = {'albums': Album.objects.all()}
return render(request, 'photo_create.html', context)
def post(self, request):
picture = request.FILES.get('picture')
title = request.POST.get('title')
description = request.POST.get('description')
is_public = request.POST.get('is_public')
if is_public == 'on':
is_public = True
else:
is_public = False
album = Album.objects.get(pk=request.POST.get('album'))
new_photo = Photo.objects.create(picture=picture, title=title, description=description, is_public=is_public, album=album)
return redirect('photo_detail', pk=new_photo.id)
@method_decorator(login_required, name='dispatch')
class PhotoUpdate(UserPassesTestMixin, UpdateView):
model = Photo
fields = ['album', 'title', 'description', 'is_public', 'picture']
template_name = "photo_update.html"
def test_func(self):
photo = get_object_or_404(Photo, pk = self.kwargs["pk"])
return self.request.user == photo.album.user
def get_success_url(self):
return reverse("photo_detail", kwargs={'pk': self.object.pk})
@method_decorator(login_required, name='dispatch')
class PhotoDelete(UserPassesTestMixin, DeleteView):
model = Photo
template_name = "photo_delete_confirmation.html"
success_url = "/albums/"
def test_func(self):
photo = get_object_or_404(Photo, pk = self.kwargs["pk"])
return self.request.user == photo.album.user | {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,487 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0007_album_photo.py | # Generated by Django 3.1.2 on 2021-09-14 02:26
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('main_app', '0006_auto_20210909_1202'),
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('description', models.CharField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='albums', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['created_at'],
},
),
migrations.CreateModel(
name='Photo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('picture', models.ImageField(upload_to='media/')),
('title', models.CharField(max_length=200)),
('description', models.CharField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_public', models.BooleanField(default=False)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='photos', to='main_app.album')),
],
options={
'ordering': ['created_at'],
},
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,488 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0005_auto_20210909_1146.py | # Generated by Django 3.1.2 on 2021-09-09 18:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0004_auto_20210909_1142'),
]
operations = [
migrations.AlterField(
model_name='memory',
name='photo',
field=models.FileField(blank=True, null=True, upload_to='media/'),
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,489 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0002_auto_20210908_1642.py | # Generated by Django 3.1.2 on 2021-09-08 23:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='memory',
name='photo',
field=models.FileField(null=True, upload_to='media/'),
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,490 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0001_initial.py | # Generated by Django 3.1.2 on 2021-09-08 19:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Journal',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('birthdate', models.DateField()),
('total_memories', models.IntegerField(default=0)),
('created_at', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='journals', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Memory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('content', models.TextField(max_length=500)),
('created_at', models.DateTimeField(auto_now_add=True)),
('is_public', models.BooleanField(default=False)),
('photo', models.FileField(upload_to='media/')),
('journal', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='memories', to='main_app.journal')),
],
options={
'ordering': ['created_at'],
},
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,491 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0003_auto_20210909_1135.py | # Generated by Django 3.1.2 on 2021-09-09 18:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0002_auto_20210908_1642'),
]
operations = [
migrations.AlterField(
model_name='memory',
name='photo',
field=models.ImageField(blank=True, null=True, upload_to='media/'),
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,492 | aggarabedian/remember_when | refs/heads/main | /main_app/models.py | from django.db.models import Model, CharField, TextField, BooleanField, ForeignKey
from django.db.models.fields import DateField, DateTimeField, IntegerField
from django.db.models.deletion import CASCADE
from django.contrib.auth.models import User
from django.db.models.fields.files import FileField, ImageField
# Create your models here.
class Journal(Model):
name = CharField(max_length=100)
birthdate = DateField(auto_now=False, auto_now_add=False)
total_memories = IntegerField(default=0)
created_at = DateTimeField(auto_now_add=True)
user = ForeignKey(User, on_delete=CASCADE, related_name='journals')
def __str__(self):
return self.name
class Meta:
ordering = ['name']
class Memory(Model):
title = CharField(max_length=200)
content = TextField(max_length=500)
created_at = DateTimeField(auto_now_add=True)
is_public = BooleanField(default=False)
photo = ImageField(upload_to='media/', blank=True, null=True)
journal = ForeignKey(Journal, on_delete=CASCADE, related_name='memories')
def __str__(self):
return self.title
class Meta:
ordering = ['created_at']
class Album(Model):
title = CharField(max_length=200)
description = CharField(max_length=300)
created_at = DateTimeField(auto_now_add=True)
user = ForeignKey(User, on_delete=CASCADE, related_name='albums')
def __str__(self):
return self.title
class Meta:
ordering = ['created_at']
class Photo(Model):
picture = ImageField(upload_to='media/')
title = CharField(max_length=200)
description = CharField(max_length=300)
created_at = DateTimeField(auto_now_add=True)
is_public = BooleanField(default=False)
album = ForeignKey(Album, on_delete=CASCADE, related_name='photos')
def __str__(self):
return self.title
class Meta:
ordering = ['created_at'] | {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,493 | aggarabedian/remember_when | refs/heads/main | /main_app/admin.py | from django.contrib import admin
from .models import Journal, Memory, Photo, Album
# Register your models here.
admin.site.register(Journal)
admin.site.register(Memory)
admin.site.register(Album)
admin.site.register(Photo) | {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,494 | aggarabedian/remember_when | refs/heads/main | /main_app/urls.py | from django.urls import path
from . import views
urlpatterns = [
path('', views.Home.as_view(), name='home'),
path('about/', views.About.as_view(), name='about'),
path('public/', views.PublicList.as_view(), name='public'),
path('accounts/register/', views.RegisterView.as_view(), name='register'),
path('memory/<int:pk>/', views.MemoryDetail.as_view(), name='memory_detail'),
path('journals/new/', views.JournalCreate.as_view(), name='journal_create'),
path('memory/new/', views.MemoryCreate.as_view(), name='memory_create'),
path('memory/<int:pk>/update/', views.MemoryUpdate.as_view(), name='memory_update'),
path('journals/<int:pk>/', views.JournalDetail.as_view(), name='journal_detail'),
path('memory/<int:pk>/delete/', views.MemoryDelete.as_view(), name='memory_delete'),
path('journals/<int:pk>/delete/', views.JournalDelete.as_view(), name='journal_delete'),
path('journals/', views.JournalList.as_view(), name='journal_list'),
path('journals/<int:pk>/update/', views.JournalUpdate.as_view(), name='journal_update'),
path('albums/', views.AlbumList.as_view(), name='albums_list'),
path('albums/<int:pk>/', views.AlbumDetail.as_view(), name='album_detail'),
path('albums/new/', views.AlbumCreate.as_view(), name='album_create'),
path('albums/<int:pk>/update/', views.AlbumUpdate.as_view(), name='album_update'),
path('albums/<int:pk>/delete/', views.AlbumDelete.as_view(), name='album_delete'),
path('photo/<int:pk>/', views.PhotoDetail.as_view(), name='photo_detail'),
path('photo/new/', views.PhotoCreate.as_view(), name='photo_create'),
path('photo/<int:pk>/update/', views.PhotoUpdate.as_view(), name='photo_update'),
path('photo/<int:pk>/delete/', views.PhotoDelete.as_view(), name='photo_delete'),
] | {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,495 | aggarabedian/remember_when | refs/heads/main | /main_app/migrations/0004_auto_20210909_1142.py | # Generated by Django 3.1.2 on 2021-09-09 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_app', '0003_auto_20210909_1135'),
]
operations = [
migrations.AlterField(
model_name='memory',
name='photo',
field=models.ImageField(blank=True, default=1, upload_to='media/'),
preserve_default=False,
),
]
| {"/main_app/views.py": ["/main_app/models.py"], "/main_app/admin.py": ["/main_app/models.py"]} |
78,498 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/views.py | import runpy
from django.shortcuts import render,redirect
from django.http import HttpResponse
from django.contrib.auth import authenticate, login , get_user_model
from .models import *
from django.db.models import Q
from django.contrib.messages import warning
from django.shortcuts import get_object_or_404
from .forms import RegistrationFormUser , image_data
import uuid,os,shutil
from PIL import Image
from farming import sn
from django.contrib.auth import logout
def logout_view(request):
logout(request)
# Redirect to a success page.
return render(request,'home.html',{})
def home(request):
return render(request,'home.html',{})
def display_info(request):
user_info=Person.objects.filter(user=request.user)
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$88",user_info)
return render(request, 'display_info.html', {'details':user_info,'name':user_info.values()[0]['name']})
def login1(request):
if request.method == "POST":
email = request.POST['username']
password = request.POST['password']
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
user_info=Person.objects.filter(user=request.user)
print("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$7777",user_info.name)
return render(request, 'display_info.html', {'details':user_info,'name':user_info.name})
else:
return render(request, 'home.html', {'error_message': 'Your account has been disabled'})
else:
return render(request, 'home.html', {'error_message': 'Invalid login'})
return render(request,'home.html',{})
def check_new(request):
return render(request,'add_image.html',{})
def check_new2(request):
form=image_data
dis="HEALTHY!!"
tret="HEALTHY!!"
acc=""
acc=0
mail=Person.objects.filter(user=request.user).values()[0]['email']
if request.method == 'POST':
form = image_data(request.POST, request.FILES)
if form.is_valid():
img = Image.open(form.cleaned_data['photo'])
print(img)
img.save("detect/pic.jpg")
#exec(open("mach/sn.py").read())
#file_globals = runpy.run_path("mach/sn.py")
#execfile("mach/sn.py")
dis,tret,acc=sn.openphoto()
f=open("num.txt","r")
if f is not None:
print("opened successfully")
idd=f.read()
print("&&&&&&&&&&&&&&&8",idd)
f.close()
f=open("num.txt","w")
f.write(str(int(idd)+1))
f.close()
a=photo_data(id=idd ,email1=mail,disease=dis,suggestion=tret,photo=form.cleaned_data['photo'] )
print("TTTTTTTTTTTTTTTTTTTTT",form.cleaned_data['photo'])
a.save()
#form.save()
return render(request,'result.html',{'a':a,'acc':acc})
else:
form = image_data()
return render(request, 'add_image.html', {
'form': form
})
"""
form=image_data
#print(request.Method)
if request.method=="POST":
form=image_data(request.POST)
if form.is_valid():
user=request.user
photo=request.FILES.get('myfile')
person=Person.objects.filter(user=user)
email1=person.values()[0]['email']
disease="LIGHTBLIGHT"
suggestion="TREATITNOW"
a=photo_data.objects.create(email1=email1,disease=disease,suggestion=suggestion,photo=photo)
return render(request,'result.html',{'photo_data':a})
return render(request,'empty.html',{})"""
def check_old(request):
person2=Person.objects.filter(user=request.user)
print("&&&&&&&&&&&&&&&&&&&&&&&&&&7",person2.values()[0]['email'])
image_data=photo_data.objects.filter(email1=person2.values()[0]['email'])
if image_data :
print("HEYYYYYYYYYYYY")
return render(request,'display_photo.html',{'data':image_data})
return render(request,'empty.html',{})
def delete_pics(request):
person2=Person.objects.filter(user=request.user)
print("&&&&&&&&&&&&&&&&&&&&&&&&&&7",person2.values()[0]['email'])
image_data=photo_data.objects.filter(email1=person2.values()[0]['email'])
image_data.delete()
return render(request, 'display_info.html', {'details':person2,'name':person2.values()[0]['name']})
def register(request):
title="Adds Users"
form = RegistrationFormUser
print("@@@@@@@@@@@@@@@@",request.method)
if request.method=="POST":
form=RegistrationFormUser(request.POST)
if form.is_valid():
name=form.cleaned_data['name']
email=form.cleaned_data['email']
address=form.cleaned_data['address']
age=form.cleaned_data['age']
gender=form.cleaned_data['gender']
profession=form.cleaned_data['profession']
user=get_user_model().objects.create_user(form.cleaned_data['email'],form.cleaned_data['email'],form.cleaned_data['password'])
user.save()
Person.objects.create(user=user,name=name,email=email,address=address,age=age,gender=gender,profession=profession)
message = "New USER is added"
return render(request, 'stuff_added.html', {'display_text': message})
return render(request, 'register.html', {'form': form, 'title': title})
| {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,499 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/sn.py | import tkinter as tk
from tkinter.filedialog import askopenfilename
import shutil
import os
import sys
from PIL import Image, ImageTk
def bact():
rem = "The remedies for Bacterial Spot are:\n\n "
rem1 = " Discard or destroy any affected plants. \n Do not compost them. \n Rotate yoour tomato plants yearly to prevent re-infection next year. \n Use copper fungicites"
return(rem+rem1)
def vir():
rem = "The remedies for Yellow leaf curl virus are: "
rem1 = " Monitor the field, handpick diseased plants and bury them. \n Use sticky yellow plastic traps. \n Spray insecticides such as organophosphates, carbametes during the seedliing stage. \n Use copper fungicites"
return(rem+rem1)
def latebl():
rem = "The remedies for Late Blight are: "
rem1 = " Monitor the field, remove and destroy infected leaves. \n Treat organically with copper spray. \n Use chemical fungicides,the best of which for tomatoes is chlorothalonil."
return(rem+rem1)
def analysis():
dis="HEALTHY !!"
tret="HEALTHY !!"
acc=""
print("reached analysis() $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$\n\n")
import cv2 # working with, mainly resizing, images
import numpy as np # dealing with arrays
import os # dealing with directories
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
from tqdm import \
tqdm # a nice pretty percentage bar for tasks. Thanks to viewer Daniel BA1/4hler for this suggestion
verify_dir = 'testpicture'
IMG_SIZE = 50
LR = 1e-3
MODEL_NAME = 'farming/healthyvsunhealthy-{}-{}.model'.format(LR, '2conv-basic')
def process_verify_data():
verifying_data = []
for img in tqdm(os.listdir(verify_dir)):
path = os.path.join(verify_dir, img)
img_num = img.split('.')[0]
img = cv2.imread(path, cv2.IMREAD_COLOR)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
verifying_data.append([np.array(img), img_num])
np.save('verify_data.npy', verifying_data)
return verifying_data
verify_data = process_verify_data()
#verify_data = np.load('verify_data.npy')
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from tflearn.layers.core import input_data, dropout, fully_connected
from tflearn.layers.estimator import regression
import tensorflow as tf
tf.reset_default_graph()
convnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 128, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 32, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = conv_2d(convnet, 64, 3, activation='relu')
convnet = max_pool_2d(convnet, 3)
convnet = fully_connected(convnet, 1024, activation='relu')
convnet = dropout(convnet, 0.8)
convnet = fully_connected(convnet, 4, activation='softmax')
convnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')
model = tflearn.DNN(convnet, tensorboard_dir='log')
if os.path.exists('{}.meta'.format(MODEL_NAME)):
model.load(MODEL_NAME)
print('model loaded!\n \n')
import matplotlib.pyplot as plt
fig = plt.figure()
for num, data in enumerate(verify_data):
img_num = data[1]
img_data = data[0]
y = fig.add_subplot(3, 4, num + 1)
orig = img_data
data = img_data.reshape(IMG_SIZE, IMG_SIZE, 3)
# model_out = model.predict([data])[0]
pred=model.predict([data])
model_out = pred[0]
ec=np.argmax(pred[0])
print("The Result is Predicted with : {:0.2f}".format(list(pred[0])[ec]*100) ,"percent success !\n \n")
#print((pred[ec])*100," this is percentage of prediciton")
acc="The Result is Predicted with : {:0.2f}".format(list(pred[0])[ec]*100)
acc=acc+" percent success"
#print("Prediction: %s" % str(pred)) # only show first 3 probas
if np.argmax(model_out) == 0:
str_label = 'healthy'
elif np.argmax(model_out) == 1:
str_label = 'bacterial'
elif np.argmax(model_out) == 2:
str_label = 'viral'
elif np.argmax(model_out) == 3:
str_label = 'lateblight'
if str_label =='healthy':
status ="HEALTHY"
else:
status = "UNHEALTHY"
if str_label == 'bacterial':
dis = "Bacterial Spot "
tret=bact()
elif str_label == 'viral':
dis = "Yellow leaf curl "
tret=vir()
elif str_label == 'lateblight':
dis = "Late Blight "
tret=latebl()
else:
print("FOUND HEALTHY !!!!!!!!!!!!!!!!!!!\n")
folder=verify_dir
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
folder = 'detect'
for filename in os.listdir(folder):
file_path = os.path.join(folder, filename)
if os.path.isfile(file_path) or os.path.islink(file_path):
os.unlink(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
return (dis,tret,acc)
def openphoto():
fileName="detect"
for file in os.listdir(fileName):
print(file)
fileName = os.path.join(fileName, file)
print(fileName)
dst = "testpicture"
shutil.copy(fileName, dst)
dis,tret,acc=analysis()
return (dis,tret,acc)
| {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,500 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/urls.py | from django.conf.urls import url
from . import views
from django.contrib.auth import views as auth_views
from django.urls import path
app_name = 'farming'
urlpatterns=[
url(r'^$' , views.home , name='home'),
url(r'^display_info$' , views.display_info , name='display_info'),
url(r'^register$' , views.register , name='register'),
path('login/', auth_views.LoginView.as_view(), name='login1'),
url(r'^check_new$' , views.check_new , name='check_new'),
url(r'^del_pics$' , views.delete_pics , name='delete_pics'),
url(r'^check_new2$' , views.check_new2 , name='check_new2'),
url(r'^check_old$' , views.check_old , name='check_old'),
url(r'^logout$' , views.logout_view , name='logout_view'),
] | {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,501 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/admin.py | from django.contrib import admin
# Register your models here.
from .models import Person , photo_data
# Register your models here.
admin.site.register(Person)
admin.site.register(photo_data) | {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,502 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /website/urls.py |
from django.contrib import admin
from django.conf.urls import url,include
from farming import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
url(r'^admin/',admin.site.urls),
url(r'^', include('farming.urls')),
]+static(settings.MEDIA_URL,document_root=settings.MEDIA_ROOT) | {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,503 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/models.py | from django.db import models
from django.contrib.auth.models import Permission, User
# Create your models here.
class Person(models.Model):
user=models.OneToOneField(User,on_delete=models.CASCADE)
name = models.CharField(max_length=255)
email=models.EmailField(max_length=100,primary_key=True)
address=models.CharField(max_length=255)
age=models.IntegerField()
GENDER_CHOICES = (
(0, 'male'),
(1, 'female'),
(2, 'not specified'),
)
gender = models.IntegerField(choices=GENDER_CHOICES)
profession=models.CharField(max_length=255)
class photo_data(models.Model):
email1=models.CharField(max_length=255)
disease = models.CharField(max_length=255)
suggestion = models.CharField(max_length=1000)
photo = models.ImageField(upload_to='photo_datas')
| {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,504 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/migrations/0001_initial.py | # Generated by Django 3.0 on 2020-08-08 20:42
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='photo_data',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('email1', models.CharField(max_length=255)),
('disease', models.CharField(max_length=255)),
('suggestion', models.CharField(max_length=1000)),
('photo', models.ImageField(upload_to='photo_datas')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('name', models.CharField(max_length=255)),
('email', models.EmailField(max_length=100, primary_key=True, serialize=False)),
('address', models.CharField(max_length=255)),
('age', models.IntegerField()),
('gender', models.IntegerField(choices=[(0, 'male'), (1, 'female'), (2, 'not specified')])),
('profession', models.CharField(max_length=255)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,505 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /farming/forms.py | from django import forms
from django.contrib.auth.models import User
from .models import Person,photo_data
class RegistrationFormUser(forms.ModelForm):
password = forms.CharField(max_length=32, widget=forms.PasswordInput)
class Meta:
model = Person
fields = ['name', 'email', 'address','age', 'gender','password','profession']
class image_data(forms.ModelForm):
class Meta:
model = photo_data
fields = ['photo'] | {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,506 | jagdeep227/Web-App-for-plant-Disease-Detection-in-Django | refs/heads/master | /aaa.py | from farming import sn
print("heyyyy")
sn.openphoto | {"/farming/views.py": ["/farming/models.py", "/farming/forms.py"], "/farming/admin.py": ["/farming/models.py"], "/farming/forms.py": ["/farming/models.py"]} |
78,517 | miyou995/marchesa | refs/heads/master | /core/migrations/0013_alter_product_couleur.py | # Generated by Django 3.2.5 on 2021-08-09 14:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atributes', '0001_initial'),
('core', '0012_alter_photos_big_slide'),
]
operations = [
migrations.AlterField(
model_name='product',
name='couleur',
field=models.ManyToManyField(related_name='couleur', to='atributes.Couleur'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,518 | miyou995/marchesa | refs/heads/master | /core/views.py | from django.shortcuts import get_object_or_404, render
from django.http import JsonResponse
from .forms import ContactForm
from delivery.models import Wilaya, Commune
from django.views.generic import TemplateView, DetailView, ListView, CreateView
from .models import Product, Category, SubCategory, Photos
from cart.forms import CartAddProductForm
class IndexView(TemplateView):
template_name = "index.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["products"] = Product.objects.filter(actif=True, top=True)
context["top_3"] = Category.objects.filter(actif=True)[:3]
context["home_slide"] = Photos.objects.filter(actif=True, is_big=True)[:1]
context["small_slide"] = Photos.objects.filter(actif=True, is_small=True)[:2]
return context
# STATIC
class ContactView(TemplateView):
template_name = "Contact.html"
class AboutView(TemplateView):
template_name = "about.html"
# PAIEMENT
class VirementBancaireView(TemplateView):
template_name = "paiement/virement-bancaire.html"
class CarteBancaireView(TemplateView):
template_name = "paiement/carte-bancaire.html"
class PaiementView(TemplateView):
template_name = "paiement/paiement.html"
class PaiementEspecesView(TemplateView):
template_name = "paiement/paiement-especes.html"
# LIVRAISON
class EchangeView(TemplateView):
template_name = "livraison/echange.html"
class LivraisonView(TemplateView):
template_name = "livraison/livraison.html"
class RetourView(TemplateView):
template_name = "livraison/retours.html"
class CategoryProductsView(ListView):
context_object_name = 'products'
model = Product
paginate_by = 15
template_name = "products.html"
def get_queryset(self, *args, **kwargs): # new
products = Product.objects.filter(actif=True)
try:
category = get_object_or_404(Category, slug=self.kwargs['slug'])
products = products.filter(sous_category__category=category)
except:
category = get_object_or_404(SubCategory, slug=self.kwargs['slug'])
products = products.filter(sous_category=category)
return products
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["categories"] = Category.objects.all()
context["sous_categories"] = SubCategory.objects.all()
# context["products"] = Product.objects.all()
return context
class ProductDetailView(DetailView):
model = Product
context_object_name = 'product'
template_name = "product-detail.html"
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context["related"] = Product.objects.all().order_by('?')[:4]
context["wilayas"] = Wilaya.objects.all().order_by('name')
return context
class ProductsView(ListView):
context_object_name = 'products'
model = Product
paginate_by = 15
template_name = "products.html"
def get_queryset(self): # new
query = self.request.GET.get('q')
min = self.request.GET.get('min')
max = self.request.GET.get('max')
new = self.request.GET.get('new')
top = self.request.GET.get('top')
if max and new and top:
products = Product.objects.filter(price__range=[min, max], available=True, new= True, top=True)
elif max and new:
products = Product.objects.filter(price__range=[min, max], available=True,new= True)
elif max and top:
products = Product.objects.filter(price__range=[min, max], available=True, top=True)
elif top and new:
products = Product.objects.filter(available=True,new= True, top=True)
elif max:
products = Product.objects.filter(price__range=[min, max], available=True)
elif new:
products = Product.objects.filter(available=True,new= True)
elif top:
products = Product.objects.filter( available=True, top=True)
elif query:
if len(query) > 2:
by_2 = [query[i:i+2] for i in range(0, len(query), 2)][0]
by_1 = [query[i:i+2] for i in range(0, len(query), 2)][1:]
print('the sring split one ', by_2)
print('the sring towo', by_1)
for i in by_1:
products = Product.objects.filter(
Q(name__icontains=by_2) & Q(name__icontains=i)
)
if not len(products):
products = Product.objects.filter(
Q(name__icontains=by_2) | Q(name__icontains=i)
)
# products = Product.objects.filter(name__regex=r'(?i)dragx[\s\w]+')
# products = Product.objects.filter(name__icontains=by_2, name__icontains=by_1)# erreur
# products = Product.objects.filter(name__icontains=query)
print('JE SUISS LAAAAAA EXCEPTIO N TXwO', products)
else:
products = Product.objects.filter(name__icontains=query)
else :
products = Product.objects.all()
return products
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
# context["categories"] = Category.objects.all()
context["sous_categories"] = SubCategory.objects.all()
# context["products"] = Product.objects.all()
return context
class ContactView(CreateView):
template_name = 'contact.html'
form_class = ContactForm
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
def post(self, request, *args, **kwargs):
form = ContactForm(request.POST)
message = 'Une erreur est survenue, veuillez réessayer.'
success = False
try:
#save the form
if form.is_valid():
form.save()
#messages.success(request, 'Votre message a bien été envoyé')
message = 'Votre message a bien été envoyé!'
success = True
print(success)
return render(request, 'other/contact.html', {'message': message, 'success': success})
else:
print(success)
message = 'Une erreur est survenue, veuillez réessayer.'
return render(request, 'contact.html', {'message': message, 'failure': True})
except:
return render(request, 'contact.html', {'message': message, 'failure': True})
return render(request, 'contact.html', {'message': message, 'failure': True})
# Create your views here.
def get_json_view(request):
"""Return request metadata to the user."""
data = {
'received_headers': dict(request.headers.items()),
# 'client_cookies': request.COOKIES,
'path': request.path
}
return JsonResponse(data)
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,519 | miyou995/marchesa | refs/heads/master | /atributes/migrations/0001_initial.py | # Generated by Django 3.2.5 on 2021-08-02 09:14
import colorfield.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Collection',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('annee', models.CharField(max_length=50, verbose_name='Année')),
],
),
migrations.CreateModel(
name='Couleur',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='nom')),
('hex_value', colorfield.fields.ColorField(default='#FFFFFF', max_length=7, verbose_name='Valeur hexadécimale')),
],
),
migrations.CreateModel(
name='Pointure',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='nom')),
],
),
migrations.CreateModel(
name='Taille',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='nom')),
],
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,520 | miyou995/marchesa | refs/heads/master | /core/models.py | from django.db import models
from django.utils.text import slugify
# Create your models here.
from atributes.models import Collection, Taille, Pointure, Couleur
from django.db import models
from django.urls import reverse
from tinymce.widgets import TinyMCE
from tinymce import models as tinymce_models
# Create your models here.
from django.utils.translation import gettext_lazy as _
STATUS_PRODUIT = (
('N', _('Nouveau')),
('R', _('Rupture')),
('P', _('Promotion')),
)
class Category(models.Model):
name = models.CharField( max_length=150, verbose_name='Nom')
slug = models.SlugField( max_length=150, unique= True, verbose_name='URL')
# pixel = models.TextField(verbose_name='Facebook Pixel', blank=True, null=True)
actif = models.BooleanField(verbose_name='actif', default=True)
class Meta:
ordering = ('id',)
verbose_name = 'Catégorie'
verbose_name_plural = '1. Categories'
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name +'-'+str(self.id))
return super(Category, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("core:prod-by-cat", args=[self.slug])
class SubCategory(models.Model):
name = models.CharField( max_length=150, verbose_name='Nom')
slug = models.SlugField( max_length=150, unique= True, verbose_name='URL')
actif = models.BooleanField(verbose_name='actif', default=True)
category = models.ForeignKey(Category, verbose_name="Catégorie",related_name="sub_categories" ,on_delete=models.CASCADE)
def __str__(self):
return self.name
class Meta:
ordering = ('id',)
verbose_name = 'Sous Catégorie'
verbose_name_plural = '2. Sous Catégorie'
def save(self, *args, **kwargs):
self.slug = slugify(self.name +'-'+str(self.id))
return super(SubCategory, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("core:prod-by-sub-cat", args=[self.slug])
class Product(models.Model):
name = models.CharField( max_length=150, verbose_name='Nom')
slug = models.SlugField( max_length=150, unique= True, verbose_name='URL')
sous_category = models.ForeignKey(SubCategory, verbose_name="Sous Catégorie",related_name="products" ,on_delete=models.CASCADE)
description = tinymce_models.HTMLField(verbose_name='Déscription', blank=True, null=True)
price = models.DecimalField(max_digits=10, decimal_places=2, default=1)
old_price = models.DecimalField(max_digits=10, decimal_places=2, verbose_name="Ancien prix",blank=True, null=True)
collection = models.ForeignKey(Collection, on_delete=models.CASCADE, related_name="collections", blank=True, null=True)
taille = models.ManyToManyField(Taille, blank=True, related_name="tailles")
pointure = models.ManyToManyField(Pointure, blank=True, related_name="pointures")
couleur = models.ManyToManyField(Couleur, related_name="couleurs")
actif = models.BooleanField(verbose_name='actif', default=True)
new = models.BooleanField(verbose_name='Nouveau', default=True)
top = models.BooleanField(verbose_name='Meilleur vente', default=True)
status = models.CharField(choices=STATUS_PRODUIT, max_length=1, default='N', blank=True, null=True, verbose_name='Status')
photo1 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo2 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo3 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo4 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo6 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo7 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo8 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo9 = models.ImageField(upload_to='images/produits', blank=True, null=True)
photo10 = models.ImageField(upload_to='images/produits', blank=True, null=True)
created = models.DateTimeField(verbose_name='Date de Création', auto_now_add=True)
updated = models.DateTimeField(verbose_name='Date de dernière mise à jour', auto_now=True)
def __str__(self):
return self.name
class Meta:
ordering = ('id',)
verbose_name = 'Produit'
verbose_name_plural = '3. Produits'
def save(self, *args, **kwargs):
self.slug = slugify(self.name +'-'+str(self.id))
return super(Product, self).save(*args, **kwargs)
def get_absolute_url(self):
return reverse("core:productDetail", args=[self.slug])
class ContactForm(models.Model):
name = models.CharField(verbose_name=_('Nom complet'), max_length=100)
phone = models.CharField(verbose_name=_("Téléphone") , max_length=25)
email = models.EmailField(verbose_name=_("Email"), null=True, blank = True)
subject = models.CharField(verbose_name=_("Sujet"), max_length=50, blank=True)
message = models.TextField(verbose_name=_("Message"), blank=True, null=True)
date_sent = models.DateTimeField(verbose_name=_("Date"), auto_now_add=True)
def __str__(self):
return self.name
class Photos(models.Model):
big_slide = models.ImageField(upload_to='images/slides', height_field=None, width_field=None, max_length=None, verbose_name='URL image ')
actif = models.BooleanField(verbose_name='Active', default=False)
is_big = models.BooleanField(verbose_name='Grande photo (1920 x 570)', default=False)
is_small = models.BooleanField(verbose_name='Medium photo (720 x 540)', default=False)
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,521 | miyou995/marchesa | refs/heads/master | /core/migrations/0001_initial.py | # Generated by Django 3.2.5 on 2021-08-02 09:14
from django.db import migrations, models
import django.db.models.deletion
import tinymce.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('atributes', '__first__'),
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Nom')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='URL')),
],
options={
'verbose_name': 'Catégorie',
'verbose_name_plural': '1. Categories',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='SubCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Nom')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='URL')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sub_categories', to='core.category', verbose_name='Catégorie')),
],
options={
'verbose_name': 'Sous Catégorie',
'verbose_name_plural': '3. Sous Catégorie',
'ordering': ('id',),
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=150, verbose_name='Nom')),
('slug', models.SlugField(max_length=150, unique=True, verbose_name='URL')),
('description', tinymce.models.HTMLField(blank=True, null=True, verbose_name='Déscription')),
('price', models.DecimalField(decimal_places=2, default=1, max_digits=10)),
('old_price', models.DecimalField(blank=True, decimal_places=2, max_digits=10, null=True, verbose_name='Ancien prix')),
('available', models.BooleanField(default=True, verbose_name='Disponible')),
('new', models.BooleanField(default=True, verbose_name='Nouveau')),
('top', models.BooleanField(default=True, verbose_name='Meilleur vente')),
('status', models.CharField(blank=True, choices=[('N', 'Nouveau'), ('R', 'Rupture'), ('P', 'Promotion')], default='N', max_length=1, null=True, verbose_name='Status')),
('photo1', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo2', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo3', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo4', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo6', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo7', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo8', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo9', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('photo10', models.ImageField(blank=True, null=True, upload_to='images/produits')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Date de Création')),
('updated', models.DateTimeField(auto_now=True, verbose_name='Date de dernière mise à jour')),
('collection', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='atributes.collection')),
('couleur', models.ManyToManyField(to='atributes.Couleur')),
('pointure', models.ManyToManyField(to='atributes.Pointure')),
('sous_category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='products', to='core.subcategory', verbose_name='Sous Catégorie')),
('taille', models.ManyToManyField(to='atributes.Taille')),
],
options={
'verbose_name': 'Produit',
'verbose_name_plural': '4. Produits',
'ordering': ('id',),
},
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,522 | miyou995/marchesa | refs/heads/master | /core/migrations/0010_auto_20210803_1530.py | # Generated by Django 3.2.5 on 2021-08-03 14:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('atributes', '0001_initial'),
('core', '0009_auto_20210803_1511'),
]
operations = [
migrations.AlterField(
model_name='product',
name='collection',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='collections', to='atributes.collection'),
),
migrations.AlterField(
model_name='product',
name='couleur',
field=models.ManyToManyField(blank=True, related_name='couleur', to='atributes.Couleur'),
),
migrations.AlterField(
model_name='product',
name='pointure',
field=models.ManyToManyField(blank=True, related_name='pointure', to='atributes.Pointure'),
),
migrations.AlterField(
model_name='product',
name='taille',
field=models.ManyToManyField(blank=True, related_name='tailles', to='atributes.Taille'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,523 | miyou995/marchesa | refs/heads/master | /cart/context_processors.py | from .cart import Cart
def cart(resquest):
return {'cart':Cart(resquest)} | {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,524 | miyou995/marchesa | refs/heads/master | /core/migrations/0007_auto_20210803_1009.py | # Generated by Django 3.2.5 on 2021-08-03 09:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0006_alter_product_collection'),
]
operations = [
migrations.AddField(
model_name='category',
name='actif',
field=models.BooleanField(default=True, verbose_name='actif'),
),
migrations.AddField(
model_name='subcategory',
name='actif',
field=models.BooleanField(default=True, verbose_name='actif'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,525 | miyou995/marchesa | refs/heads/master | /core/admin.py | from django.contrib import admin
from .models import Product, SubCategory, Category, ContactForm, Photos
from django.contrib.auth.models import Group, User
from django.utils.html import format_html
admin.autodiscover()
admin.site.enable_nav_sidebar = False
admin.site.unregister(Group)
class SubCategoryLinesAdmin(admin.TabularInline):
model = SubCategory
readonly_fields = ('name','slug')
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'actif')
prepopulated_fields = {"slug": ("name",)}
list_display_links = ('id','name')
list_per_page = 40
list_editable = ['actif']
search_fields = ('id', 'name',)
exlude = ['slug']
inlines = [SubCategoryLinesAdmin,]
class SubCategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'category', 'actif')
prepopulated_fields = {"slug": ("name",)}
list_display_links = ('id','name' )
list_per_page = 40
list_filter = ('category',)
list_editable = [ 'category', 'actif']
search_fields = ('name',)
exlude = ['slug']
class ProductAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'sous_category', 'old_price','price', 'new', 'top', 'actif', 'collection', 'status')
prepopulated_fields = {"slug": ("name",)}
list_display_links = ('id','name' )
list_per_page = 40
list_filter = ('name', 'sous_category','price', 'new')
list_editable = ['sous_category', 'price', 'new', 'top', 'actif','collection', 'old_price', 'status']
search_fields = ('name',)
exlude = ['slug']
save_as= True
# Contact
class ContactFormAdmin(admin.ModelAdmin):
list_display = ('id', 'name', 'phone', 'email', 'subject', 'date_sent')
list_display_links = ('id',)
list_per_page = 40
list_filter = ('name', 'phone', 'email',)
search_fields = ('id', 'phone', 'email')
class PhotosAdmin(admin.ModelAdmin):
def image_tag(self):
return format_html('<img src="{}" height="150" />'.format(self.big_slide.url))
image_tag.short_description = 'Image'
image_tag.allow_tags = True
list_display = ('id', image_tag, 'actif', 'is_big', 'is_small', 'big_slide')
list_editable = ['actif', 'is_big', 'is_small', 'big_slide']
list_display_links = ('id',image_tag)
list_per_page = 40
admin.site.register(Product, ProductAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(SubCategory, SubCategoryAdmin)
admin.site.register(ContactForm, ContactFormAdmin)
admin.site.register(Photos, PhotosAdmin)
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,526 | miyou995/marchesa | refs/heads/master | /core/migrations/0002_alter_product_options.py | # Generated by Django 3.2.5 on 2021-08-02 09:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ('id',), 'verbose_name': 'Produit', 'verbose_name_plural': '3. Produits'},
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,527 | miyou995/marchesa | refs/heads/master | /core/migrations/0012_alter_photos_big_slide.py | # Generated by Django 3.2.5 on 2021-08-04 10:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0011_photos'),
]
operations = [
migrations.AlterField(
model_name='photos',
name='big_slide',
field=models.ImageField(upload_to='images/slides', verbose_name='URL image '),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,528 | miyou995/marchesa | refs/heads/master | /core/migrations/0011_photos.py | # Generated by Django 3.2.5 on 2021-08-04 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0010_auto_20210803_1530'),
]
operations = [
migrations.CreateModel(
name='Photos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('big_slide', models.ImageField(upload_to='images/slides')),
('actif', models.BooleanField(default=False, verbose_name='Active')),
('is_big', models.BooleanField(default=False, verbose_name='Grande photo (1920 x 570)')),
('is_small', models.BooleanField(default=False, verbose_name='Medium photo (720 x 540)')),
],
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,529 | miyou995/marchesa | refs/heads/master | /core/migrations/0003_alter_subcategory_options.py | # Generated by Django 3.2.5 on 2021-08-02 09:17
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_alter_product_options'),
]
operations = [
migrations.AlterModelOptions(
name='subcategory',
options={'ordering': ('id',), 'verbose_name': 'Sous Catégorie', 'verbose_name_plural': '2. Sous Catégorie'},
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,530 | miyou995/marchesa | refs/heads/master | /atributes/models.py | from django.db import models
from colorfield.fields import ColorField
# Create your models here.
class Collection(models.Model):
annee = models.CharField(verbose_name ="Année", max_length=50)
def __str__(self):
return self.annee
class Taille(models.Model):
name = models.CharField(verbose_name ="nom", max_length=50)
def __str__(self):
return self.name
class Pointure(models.Model):
name = models.CharField(verbose_name ="nom", max_length=50)
def __str__(self):
return self.name
class Couleur(models.Model):
name = models.CharField(verbose_name ="nom", max_length=50)
# hex_value = models.CharField(max_length=7, verbose_name="Valeur hexadécimale")
hex_value = ColorField(max_length=7, verbose_name="Valeur hexadécimale")
def __str__(self):
return self.name | {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,531 | miyou995/marchesa | refs/heads/master | /core/migrations/0008_contactform.py | # Generated by Django 3.2.5 on 2021-08-03 14:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20210803_1009'),
]
operations = [
migrations.CreateModel(
name='ContactForm',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='Nom complet')),
('phone', models.CharField(max_length=25, verbose_name='Téléphone')),
('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='Email')),
('subject', models.CharField(max_length=50, verbose_name='Sujet')),
('message', models.TextField(verbose_name='Message')),
('date_sent', models.DateTimeField(auto_now_add=True, verbose_name='Date')),
],
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,532 | miyou995/marchesa | refs/heads/master | /core/context_processors.py | from django.shortcuts import render , get_object_or_404
from .models import Category
def category(request):
categories = Category.objects.filter(actif=True)
return {
'categories' : categories
}
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,533 | miyou995/marchesa | refs/heads/master | /cart/views.py | from django.http.response import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404, reverse
from django.views.generic import TemplateView
from django.views.decorators.http import require_POST
from .cart import Cart
from core.models import Product
from delivery.models import Wilaya
from django.http import HttpResponse
from django.contrib.admin.views.decorators import staff_member_required
from .forms import CartAddProductForm
from django.template.loader import render_to_string
from django.contrib import messages
from django.core import serializers
# class CheckoutView(TemplateView):
# template_name = "checkout.html"
def cart_detail(request):
cart = Cart(request)
for item in cart:
item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'], 'override': True})
# print('baskets details', list(cart))
# print('Wach ahda item details', item)
# for item in cart:
# item['update_quantity_form'] = CartAddProductForm(initial={ 'quantity': item['quantity']})
# item['total'] = item['product'].price * item['quantity']
# items.append(item)
# products.append(item['product'])
context = {
'cart': cart,
# 'coupon_apply_form': coupon_apply_form
}
for item in cart:
print('itrem', item)
return render(request, 'cart.html', context)
def cart_add_one_product(request):
cart = Cart(request)
# product_id = request.POST['product_id']
# Get the product that we want to add
p_id = request.GET.get('product_id')
print('request', p_id)
product = get_object_or_404(Product, id=p_id, actif=True)
tailles = product.taille.all()
colors = product.couleur.all()
print('les couleurs', colors)
pointures = product.pointure.all()
if tailles:
taille = tailles[0]
color = colors.first()
print('les couleurs', color)
if pointures:
pointure = pointures[0]
if product:
quantity = 1
try:
print('pointure',pointure)
except:
pass
try:
print('taille', taille)
except:
pass
try:
print(color, 'taille, pointure')
except:
pass
try:
cart.add(
product=product,
quantity=quantity,
pointure = pointure,
color = color
)
except:
try:
cart.add(
product=product,
quantity=quantity,
taille = taille,
color = color
)
except:
cart.add(
product=product,
quantity=quantity,
color = color
)
# if tailles:
# taille = tailles[0]
# cart.add(taille = taille)
# else:
# taille = False
# if colors:
# color = colors[0]
# cart.add(color = color)
# else:
# color = False
# if pointures:
# pointure = pointures[0]
# cart.add(pointure = pointure)
# else:
# pointure = False
return JsonResponse(serializers.serialize('json', color), safe=True)
# else:
# return redirect('cart:cart_detail')
@require_POST
def cart_add(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
form = CartAddProductForm(request.POST)
try:
if form.is_valid():
cd = form.cleaned_data
print('cd', cd)
cart.add(
product=product,
quantity=cd['quantity'],
override_quantity=cd['override'],
taille=cd['taille'],
pointure=cd['pointure'],
color =cd['color']
)
print('the Cart two', cart)
return redirect('cart:cart_detail')
except:
return redirect('/')
@require_POST
def cart_update(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
quantity = int(request.POST.get('quantity'))
cart.update(product=product, quantity=quantity)
return redirect('cart:cart_detail')
@require_POST
def cart_remove(request, product_id):
cart = Cart(request)
product = get_object_or_404(Product, id=product_id)
cart.remove(product)
return redirect('cart:cart_detail')
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,534 | miyou995/marchesa | refs/heads/master | /atributes/admin.py | from django.contrib import admin
from .models import Collection, Taille, Pointure, Couleur
@admin.register(Collection)
class CouponAdmin(admin.ModelAdmin):
list_display = ['id', 'annee']
@admin.register(Taille)
class TailleAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
@admin.register(Pointure)
class PointureAdmin(admin.ModelAdmin):
list_display = ['id', 'name']
@admin.register(Couleur)
class CouleurAdmin(admin.ModelAdmin):
list_display = ['id', 'name', 'hex_value']
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,535 | miyou995/marchesa | refs/heads/master | /coupons/migrations/0002_alter_coupon_used.py | # Generated by Django 3.2.5 on 2021-08-10 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupons', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='coupon',
name='used',
field=models.IntegerField(default=0, verbose_name='Coupons restant'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,536 | miyou995/marchesa | refs/heads/master | /core/migrations/0005_auto_20210803_0857.py | # Generated by Django 3.2.5 on 2021-08-03 07:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('atributes', '0001_initial'),
('core', '0004_auto_20210802_1038'),
]
operations = [
migrations.AlterField(
model_name='product',
name='couleur',
field=models.ManyToManyField(blank=True, to='atributes.Couleur'),
),
migrations.AlterField(
model_name='product',
name='pointure',
field=models.ManyToManyField(blank=True, to='atributes.Pointure'),
),
migrations.AlterField(
model_name='product',
name='taille',
field=models.ManyToManyField(blank=True, to='atributes.Taille'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,537 | miyou995/marchesa | refs/heads/master | /core/migrations/0009_auto_20210803_1511.py | # Generated by Django 3.2.5 on 2021-08-03 14:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0008_contactform'),
]
operations = [
migrations.AlterField(
model_name='contactform',
name='message',
field=models.TextField(blank=True, null=True, verbose_name='Message'),
),
migrations.AlterField(
model_name='contactform',
name='subject',
field=models.CharField(blank=True, max_length=50, verbose_name='Sujet'),
),
]
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,538 | miyou995/marchesa | refs/heads/master | /cart/forms.py | from django import forms
from django.forms import NumberInput
# from django.core import validators
class CartAddProductForm(forms.Form):
quantity = forms.IntegerField( min_value=1, widget=NumberInput(attrs={'class': 'form-control text-center','value': 1, 'max':20 }))
override = forms.BooleanField(required=False, initial=False, widget=forms.HiddenInput)
taille = forms.CharField(max_length=20, required=False)
pointure = forms.CharField(max_length=20, required=False)
color = forms.CharField(max_length=20, required=True)
class CartAddProductQuantityForm(forms.Form):
quantity = forms.IntegerField(min_value=1)
| {"/core/views.py": ["/core/models.py", "/cart/forms.py"], "/core/models.py": ["/atributes/models.py"], "/core/admin.py": ["/core/models.py"], "/core/context_processors.py": ["/core/models.py"], "/cart/views.py": ["/core/models.py", "/cart/forms.py"], "/atributes/admin.py": ["/atributes/models.py"]} |
78,549 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /data_loader/TacredDataset.py | import torch
import torch.utils.data as data
from utils import constant
import json
import random
import numpy as np
class TacredDataset(data.Dataset):
"""
Tacred dataset
"""
def __init__(self, filename, batch_size, opt, vocab, evaluation=False):
"""
Load data from json files, preprocess and prepare batches
:param filename: file path
:param batch_size: int
:param opt:
:param vocab: Vocab object
:param evaluation: boolean value, train or test
"""
self.batch_size = batch_size
self.opt = opt
self.vocab = vocab
self.eval = evaluation
self.label2id = constant.LABEL_TO_ID
with open(filename) as infile:
data = json.load(infile)
self.raw_data = data
data = self.preprocess(data, vocab, opt)
# shuffle for training
if not evaluation:
indices = list(range(len(data)))
random.shuffle(indices)
data = [data[i] for i in indices]
self.id2label = dict([(v, k) for k, v in self.label2id.items()])
self.labels = [self.id2label[d[-1]] for d in data]
self.num_example = len(data)
# chunk into batch
data = [data[i: i+batch_size] for i in range(0, len(data), batch_size)]
self.data = data
print('{} batches created for {}'.format(len(data), filename))
def gold(self):
"""
Return gold labels as a list
:return:
"""
return self.labels
def __len__(self):
return len(self.data)
def __getitem__(self, item):
"""
Get a batch with index
:param item:
:return:
"""
if not isinstance(item, int):
raise TypeError
if item < 0 or item > len(self.data):
raise IndexError
batch = self.data[item]
batch_size = len(batch)
batch = list(zip(*batch))
assert len(batch) == 10
# sort all fields by tokens lens for easy RNN operations
lens = [len(x) for x in batch[0]]
batch, orig_idx = sort_all(batch, lens)
# word dropout
if not self.eval:
words = [word_dropout(sent, self.opt['word_dropout']) for sent in batch[0]]
else:
words = batch[0]
# convert to tensors
words = get_long_tensor(words, batch_size)
# mask all the tokens except PAD,it means PAD_ID = 1,others is equal to 0
masks = torch.eq(words, 0)
pos = get_long_tensor(batch[1], batch_size)
ner = get_long_tensor(batch[2], batch_size)
deprel = get_long_tensor(batch[3], batch_size)
head = get_long_tensor(batch[4], batch_size)
subj_position = get_long_tensor(batch[5], batch_size)
obj_position = get_long_tensor(batch[6], batch_size)
subj_type = get_long_tensor(batch[7], batch_size)
obj_type = get_long_tensor(batch[8], batch_size)
relations = torch.LongTensor(batch[9])
return (words, masks, pos, ner, deprel, head, subj_position, obj_position, subj_type, obj_type, relations, orig_idx)
def __iter__(self):
for i in range(self.__len__()):
yield self.__getitem__(i)
def preprocess(self, data, vocab, opt):
"""
Preprocess the data and convert into idx
:param: data: list
:param: vocab: vocabulary
:param: opt
:return: processed, list type, every element is list, containing token_id, dependency hea,and so on
"""
processed = []
for d in data:
tokens = d['token']
if opt['lower']:
tokens = [t.lower() for t in tokens]
# mask
ss, se = d['subj_start'], d['subj_end']
os, oe = d['obj_start'], d['obj_end']
tokens[ss: se + 1] = ['SUBJ-' + d['subj_type']] * (se - ss + 1)
tokens[os: oe + 1] = ['OBJ-' + d['obj_type']] * (oe - os + 1)
tokens = map_to_ids(tokens, vocab.WordsToIdx)
pos = map_to_ids(d['stanford_pos'], constant.POS_TO_ID)
ner = map_to_ids(d['stanford_ner'], constant.NER_TO_ID)
deprel = map_to_ids(d['stanford_deprel'], constant.DEPREL_TO_ID)
head = list(map(int, d['stanford_head']))
l = len(tokens)
subj_positions = get_position(d['subj_start'], d['subj_end'], l)
obj_positions = get_position(d['obj_start'], d['obj_end'], l)
subj_type = [constant.SUBJ_NER_TO_ID[d['subj_type']]]
obj_type = [constant.OBJ_NER_TO_ID[d['obj_type']]]
relation = self.label2id[d['relation']]
processed += [(tokens, pos, ner, deprel, head, subj_positions, obj_positions, subj_type, obj_type, relation)]
return processed
def map_to_ids(tokens, vocab):
"""
map tokens into idx, if token is not in vocab, then replace it with UNK_ID
:param tokens: list
:param vocab: dict, key is token while value is idx
:return: indices: list
"""
ids = [vocab[t] if t in vocab else constant.UNK_ID for t in tokens]
return ids
def get_position(start, end, length):
"""
Obtain entity mention position in token, entity mention pos is 0 while ohters
is not 0
:param start: entity mention start position
:param end: entity mention end position
:param length: tokens length
:return: pos: list, entity mention pos is 0 while other position is not 0
"""
return list(range(-start, 0)) + [0] * (end - start + 1) + list(range(1, length - end))
def word_dropout(tokens, dropout):
"""
Randomly dropout tokens (IDs) and replace them with <UNK> tokens
:param tokens: list, every element is ID
:param dropout: int, probability
:return: list type: every element is ID
"""
return [constant.UNK_ID if x != constant.UNK_ID and np.random.random() < dropout else x for x in tokens]
def sort_all(batch, lens):
"""
Sort all fields by descending order of lens, and return the original indices.
:param batch: will sort with respect to lens
:param lens: list
:return: sorted batch, original indices
"""
unsorted_all = [lens] + [range(len(lens))] + list(batch)
sorted_all = [list(t) for t in zip(*sorted(zip(*unsorted_all), reverse=True))]
return sorted_all[2:], sorted_all[1]
def get_long_tensor(token_list, batch_size):
"""
Convert list of list of tokens to a padded LongTensor.
Not only token need pad, but also POS, dependency, subj_position, obj_position and so on does .
:param token_list: list, every element is list
:param batch_size: int
:return: tokens: torch LongTensor, of shape (batch_size, max length of token_list)
"""
token_len = max(len(x) for x in token_list)
tokens = torch.LongTensor(batch_size, token_len).fill_(constant.PAD_ID)
# add PAD at the end of tokens to equal to token_len
for i, s in enumerate(token_list):
tokens[i, :len(s)] = torch.LongTensor(s)
return tokens
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,550 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /modules/GCNRelationModel.py | import torch
import torch.nn as nn
from utils import constant
from utils import torch_utils
from .gcn import GCN
import numpy as np
from .tree_utils import head_to_tree, tree_to_adj
from torch.autograd import Variable
class GCNRelationModel(nn.Module):
def __init__(self, opt, emb_matrix=None):
"""
:param opt: hyperparameter
:param emb_matrix: pretraining embedding, we use glove embedding here
"""
super(GCNRelationModel, self).__init__()
self.opt = opt
self.emb_matrix = emb_matrix
# create embedding layers
self.emb = nn.Embedding(opt['vocab_size'], opt['emb_dim'], padding_idx=constant.PAD_ID)
self.pos_emb = nn.Embedding(len(constant.POS_TO_ID), opt['pos_dim']) if opt['pos_dim'] > 0 else None
self.ner_emb = nn.Embedding(len(constant.NER_TO_ID), opt['ner_dim']) if opt['ner_dim'] > 0 else None
embeddings = (self.emb, self.pos_emb, self.ner_emb)
self.init_embeddings()
# gcn layer
self.gcn = GCN(self.opt, embeddings, opt['hidden_dim'], opt['num_layers'])
# gcn output go through feed-forward neural network
in_dim = opt['hidden_dim'] * 3
layers = [nn.Linear(in_dim, opt['hidden_dim']), nn.ReLU()]
for _ in range(self.opt['mlp_layers'] - 1):
layers += [nn.Linear(opt['hidden_dim'], opt['hidden_dim']), nn.ReLU()]
self.out_mlp = nn.Sequential(*layers)
def init_embeddings(self):
"""
Initiliaze word embedding
:return:
"""
if self.emb_matrix is None:
self.emb.weight.data[1:, :].uniform_(-1.0, 1.0)
else:
self.emb.weight.data.copy_(self.emb_matrix)
# decide finetuning
if self.opt['topn'] <= 0:
print('Do not finetune word embedding layer.')
self.emb.weight.requires_grad = False
elif self.opt['topn'] < self.opt['vocab_size']:
print('Finetune top {} word embeddings.'.format(self.opt['topn']))
self.emb.weight.register_hook(lambda grad : torch_utils.keep_partial_grad(grad, self.opt['topn']))
else:
print('Finetune all embeddings.')
def forward(self, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs
l = (masks.data.cpu().numpy() == 0).astype(np.int64).sum(1) # the token length of sentences(except PAD)
maxlen = max(l) # max length among all sentences
def inputs_to_tree_reps(head, words, l, prune, subj_pos, obj_pos):
"""
Obtain the adjacency matrix of the dependency path
:param head: torch tensor, of shape (batch_size, maxlen), every row is head of one sentence
:param words: torch tensor, of shape (batch_size, maxlen), every row is token_id of one sentence
:param l:the token length of sentences(except PAD)
:param prune: int
:param subj_pos: torch tensor, of shape (batch_size, maxlen), every row is subject position of one sentence
:param obj_pos:torch tensor, of shape (batch_size, maxlen), every row is object position of one sentence
:return: adj : torch tensor, of shape (maxlen, maxlen), dependency path of argument
"""
trees = [head_to_tree(head[i], words[i], l[i], prune, subj_pos[i], obj_pos[i]) for i in range(len(l))]
adj = [tree_to_adj(maxlen, tree, directed=False, self_loop=False).reshape(1, maxlen, maxlen) for tree in trees]
adj = np.concatenate(adj, axis=0) # so the the first dimension means batch_size
adj = torch.from_numpy(adj)
return Variable(adj)
adj = inputs_to_tree_reps(head.data, words.data, l, self.opt['prune_k'], subj_pos.data, obj_pos.data)
h, pool_mask = self.gcn(adj, inputs)
# pooling
# print('subj_pos:', subj_pos.size(), 'obj_pos:', obj_pos.size())
subj_mask, obj_mask = subj_pos.eq(0).eq(0).unsqueeze(2), obj_pos.eq(0).eq(0).unsqueeze(2)
# print('subj_mask:', subj_mask.size(), 'obj_mask:', obj_mask.size())
pool_type = self.opt['pooling']
h_out = pool(h, pool_mask, type=pool_type)
subj_out = pool(h, subj_mask, type=pool_type)
obj_out = pool(h, obj_mask, type=pool_type)
outputs = torch.cat([subj_out, h_out, obj_out], dim=1)
outputs = self.out_mlp(outputs)
return outputs, h_out
def pool(h, mask, type='max'):
"""
pool operation
:param h: torch tensor, gcn output, of shape (batch_size, max_len, mem_dim)
:param mask: torch tensor, judge mask some tokens, of shape(batch, max_len, 1)
:param type: pool type
:return: h: torch tensor, of shape(batch_size, mem_dim)
"""
if type == 'max':
h = h.masked_fill(mask, -constant.INFINITY_NUMBER)
return torch.max(h, 1)[0]
elif type == 'avg':
h = h.masked_fill(mask, 0)
return h.sum(1) / (mask.size(1) - mask.float().sum(1))
else:
h = h.masked_fill(mask, 0)
return h.sum(1)
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,551 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /trainers/trainer.py | """
A trainer class.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from utils import torch_utils
from models.GCNClassifier import GCNClassifier
class Trainer(object):
def __init__(self, opt, emb_matrix=None):
raise NotImplementedError
def update(self, batch):
"""
Update the parameter with one batch size data
"""
raise NotImplementedError
def predict(self, batch):
"""
Predict one batch
"""
raise NotImplementedError
def update_lr(self, new_lr):
"""
Change learning rate with new_lr
:param new_lr:
:return:
"""
torch_utils.change_lr(self.optimizer, new_lr)
def load(self, filename):
"""
load the checkpoint
:param filename : checkpoint file path
"""
try:
checkpoint = torch.load(filename)
except BaseException:
print('Cannot load model from {}'.format(filename))
exit()
self.model.load_state_dict(checkpoint['model'])
self.opt = checkpoint['config']
def save(self, filename, epoch):
"""
save the model from epoch
:param filename: distination file path
:param epoch: int, epoch num
"""
# save model parameter and hyperparameter
params = {
'model': self.model.state_dict(),
'config': self.opt
}
try:
torch.save(params, filename)
print('model saved to {}'.format(filename))
except BaseException:
print('[Warning: Saving failed...continueing anyway.')
def unpack_batch(batch):
"""
unpack batch data
:param batch: list type
:return:
"""
inputs = [Variable(b) for b in batch[:10]]
labels = Variable(batch[10])
tokens = batch[0]
head = batch[5]
subj_pos = batch[6]
obj_pos = batch[7]
lens = batch[1].eq(0).long().sum(1).squeeze() # sentences length
return inputs, labels, tokens, head, subj_pos, obj_pos, lens
class GCNTrainer(Trainer):
def __init__(self, opt, emb_matrix=None):
"""
GCN Trainer
:param opt:
:param emb_matrix: word embedding matrix, torch tensor
"""
self.opt = opt
self.emb_matrix = emb_matrix
self.model = GCNClassifier(opt, emb_matrix=emb_matrix)
self.criterion = nn.CrossEntropyLoss()
self.parameters = [p for p in self.model.parameters() if p.requires_grad] # only update some parameter, because we may not update some parameter
self.optimizer = torch_utils.get_optimizer(opt['optim'], self.parameters, opt['lr'])
def update(self, batch):
"""
Update the parameter with one batch size data
:param batch:
:return: loss_val: real value
"""
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch)
# step forward
self.model.train() # set the train mode
# before updating the parameter, we should clear the exsiting grad
self.optimizer.zero_grad()
logits, pooling_output = self.model(inputs)
loss = self.criterion(logits, labels)
# l2 decay on all conv layers
if self.opt.get('conv_l2', 0) > 0:
loss += self.model.conv_l2() * self.opt['conv_l2']
# l2 penalty on output representations
if self.opt.get('pooling_l2', 0) > 0:
loss += self.opt['pooling_l2'] * (pooling_output ** 2).sum(1).mean()
loss_val = loss.item()
# backward
loss.backward()
# clip the gradient
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.opt['max_grad_norm'])
self.optimizer.step()
return loss_val
def predict(self, batch):
"""
Predict one batch
:param batch: batch size data
:return: prediction, probs, loss: list, list, real value
"""
inputs, labels, tokens, head, subj_pos, obj_pos, lens = unpack_batch(batch)
orig_idx = batch[11]
# forward
self.model.eval()
logits, _ = self.model(inputs)
loss = self.criterion(logits, labels)
probs = F.softmax(logits, dim=1).data.cpu().numpy().tolist()
predictions = np.argmax(logits.data.cpu().numpy(), axis=1).tolist()
_, predictions, probs = [list(t) for t in zip(*sorted(zip(orig_idx, predictions, probs)))]
return predictions, probs, loss.item()
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,552 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /train.py | from configs import config
import torch
import numpy as np
import random
from utils import constant
from utils import util
import os
from utils.vocab import Vocab
from data_loader.TacredDataset import TacredDataset
from trainers.trainer import GCNTrainer
from utils import helper
from utils import torch_utils
import time
from datetime import datetime
from utils import metrics
args = config.parses_args()
torch.manual_seed(args.seed)
np.random.seed(args.seed)
random.seed(1234)
# make opt
opt = vars(args)
label2id = constant.LABEL_TO_ID
opt['num_class'] = len(label2id)
# load vocab
print('='*10 + 'Build Vocab' + '=' * 10)
if not os.path.exists(opt['vocab_dir']):
os.makedirs(opt['vocab_dir'])
vocab_file = opt['vocab_dir'] + '/vocab.txt'
if not os.path.isfile(vocab_file):
util.build_vocab([opt['data_dir'] + '/train.json', opt['data_dir'] + '/dev.json',
opt['data_dir'] + '/test.json'], vocab_file, -1)
vocab = Vocab(vocab_file, special_words=constant.VOCAB_PREFIX)
opt['vocab_size'] = vocab.size
print('Build Vocab Done !')
# load embedding
print('='*10 + 'Loading Embedding' + '='*10)
emb_file = opt['data_dir'] + '/tacred.pth'
if os.path.isfile(emb_file):
emb = torch.load(emb_file)
else:
glove_emb_file = opt['emb_dir'] + '/glove.840B.300d.txt'
glove_vocab, glove_vector = util.load_glove_vector(glove_emb_file)
emb = util.get_embedding(vocab, glove_vector, glove_vocab)
assert glove_vector.size(1) == opt['emb_dim']
torch.save(emb, emb_file)
# load data and define data loader
print('=' * 10 + 'Loading Data' + '=' * 10)
print('batch_size:', opt['batch_size'])
train_batch = TacredDataset(opt['data_dir'] + '/train.json', opt['batch_size'], opt, vocab,
evaluation=False)
dev_batch = TacredDataset(opt['data_dir'] + '/dev.json', opt['batch_size'], opt, vocab,
evaluation=True)
model_id = opt['id'] if len(opt['id']) > 1 else '0' + opt['id']
model_save_dir = opt['save_dir'] + '/' + model_id
opt['model_save_dir'] = model_save_dir
if not os.path.exists(model_save_dir):
os.makedirs(model_save_dir)
helper.save_config(opt, model_save_dir + '/config.json', verbose=True)
# Define the model
if not opt['load']:
trainer = GCNTrainer(opt, emb_matrix=emb)
else:
# load pretrained model
model_file = opt['model_file']
print('Loading model from {}'.format(model_file))
model_opt = torch_utils.load_config(model_file)
model_opt['optim'] = opt['optim']
trainer = GCNTrainer(model_opt)
trainer.load(model_file)
id2label = dict([(v,k) for k,v in label2id.items()])
dev_score_history = []
current_lr = opt['lr']
global_step = 0
global_start_time = time.time()
format_str = '{}: step {}/{} (epoch {}/{}), loss = {:.6f} ({:.3f} sec/batch), lr: {:.6f}'
max_steps = len(train_batch) * opt['num_epoch']
# start training
for epoch in range(1, opt['num_epoch'] + 1):
train_loss = 0
for i, batch in enumerate(train_batch):
start_time = time.time()
global_step += 1
loss = trainer.update(batch)
train_loss += loss
if global_step % opt['log_step'] == 0:
duration = time.time() - start_time
print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
opt['num_epoch'], loss, duration, current_lr))
# duration = time.time() - start_time
# print(format_str.format(datetime.now(), global_step, max_steps, epoch,\
# opt['num_epoch'], loss, duration, current_lr))
# eval on dev
print('Evaluating on dev set...')
predictions = []
dev_loss = 0
for i, batch in enumerate(dev_batch):
preds, _, loss = trainer.predict(batch)
predictions += preds
dev_loss += loss
predictions = [id2label[p] for p in predictions]
train_loss = train_loss / train_batch.num_example * opt['batch_size'] # avg loss per batch
dev_loss = dev_loss / dev_batch.num_example * opt['batch_size']
dev_p, dev_r, dev_f1 = metrics.score(dev_batch.gold(), predictions)
print("epoch {}: train_loss = {:.6f}, dev_loss = {:.6f}, dev_f1 = {:.4f}".format(epoch,\
train_loss, dev_loss, dev_f1))
dev_score = dev_f1
# save
model_file = model_save_dir + '/checkpoint_epoch_{}.pt'.format(epoch)
trainer.save(model_file, epoch)
if epoch % opt['save_epoch'] != 0:
os.remove(model_file)
# lr schedule
if len(dev_score_history) > opt['decay_epoch'] and dev_score <= dev_score_history[-1] and \
opt['optim'] in ['sgd', 'adagrad', 'adadelta']:
current_lr *= opt['lr_decay']
trainer.update_lr(current_lr)
dev_score_history += [dev_score]
print("")
print("Training ended with {} epochs.".format(epoch)) | {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,553 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /modules/gcn.py | import torch
import torch.nn as nn
from utils import constant
from torch.autograd import Variable
import torch.nn.functional as F
class GCN(nn.Module):
"""
A GCN/Contextualized GCN module operated on dependency graphs.
"""
def __init__(self, opt, embeddings, mem_dim, num_layers):
"""
:param opt: hyperparameter
:param embeddings: tuple, including word embedding, pos embedding, ner embedding
:param mem_dim: GCN state dimension
:param num_layers: the number of GCN layer
"""
super(GCN, self).__init__()
self.opt = opt
self.layers = num_layers # the number of GCN layer
self.use_cuda = opt['cuda']
self.mem_dim = mem_dim # GCN state dimension
# input vector dimension, contencationg of word embedding, pos embedding, ner embedding
self.in_dim = opt['emb_dim'] + opt['pos_dim'] + opt['ner_dim']
self.emb, self.pos_emb, self.ner_emb = embeddings
# rnn layer
if self.opt.get('rnn', False):
input_size = self.in_dim
self.rnn = nn.LSTM(input_size, opt['rnn_hidden'], opt['rnn_layers'],
batch_first=True, dropout=opt['rnn_dropout'], bidirectional=True)
self.in_dim = opt['rnn_hidden'] * 2 # output of Bi-LSTM the input of GCN
self.rnn_drop = nn.Dropout(opt['rnn_dropout']) # Apply dropout on the last output
self.in_drop = nn.Dropout(opt['input_dropout']) # dropout on input
self.gcn_drop = nn.Dropout(opt['gcn_dropout']) # dropout on every gcn layer except the last layer
# gcn layer
self.W = nn.ModuleList()
for layer in range(self.layers):
input_dim = self.in_dim if layer == 0 else self.mem_dim
self.W.append(nn.Linear(input_dim, self.mem_dim))
def conv_l2(self):
"""
l2 regulation on every gcn layer
:return:
"""
conv_weights = []
for w in self.W:
conv_weights += [w.weight, w.bias]
return sum([x.pow(2).sum() for x in conv_weights])
def encode_with_rnn(self, rnn_inputs, masks, batch_size):
"""
Encoding sentence with Bi-LSTM
:param rnn_inputs: torch tensor, of shape (batch_size, max_length, input_dim)
:param masks: torch tensor, of shape (batch_size, max_length), mask tokens
:param batch_size: int
:return: rnn_outputs
"""
seq_lens = list(masks.data.eq(constant.PAD_ID).long().sum(1).squeeze())
h0, c0 = rnn_zero_state(batch_size, self.opt['rnn_hidden'], self.opt['rnn_layers'])
rnn_inputs = nn.utils.rnn.pack_padded_sequence(rnn_inputs, seq_lens, batch_first=True) # this API can not be unstandood
rnn_outputs, (ht, ct) = self.rnn(rnn_inputs, (h0, c0))
rnn_outputs, _ = nn.utils.rnn.pad_packed_sequence(rnn_outputs, batch_first=True)
return rnn_outputs
def forward(self, adj, inputs):
words, masks, pos, ner, deprel, head, subj_pos, obj_pos, subj_type, obj_type = inputs
word_embs = self.emb(words)
embs = [word_embs]
if self.opt['pos_dim'] > 0:
embs += [self.pos_emb(pos)]
if self.opt['ner_dim'] > 0:
embs += [self.ner_emb(ner)]
embs = torch.cat(embs, dim=2)
embs = self.in_drop(embs)
# rnn layer
if self.opt.get('rnn', False):
gcn_inputs = self.rnn_drop(self.encode_with_rnn(embs, masks, words.size()[0]))
else:
gcn_inputs = embs
# gcn layer
denom = adj.sum(2).unsqueeze(2) + 1
# because adj shape is (max_len, max_len), when we do pooling operation
# we should judge which tokens are in dependency path and use this tokens feature to do pooling
# and other tokens we don't care
mask = (adj.sum(2) + adj.sum(1)).eq(0).unsqueeze(2)
# zeros out adj for ablation
if self.opt.get('no_adj', False):
adj = torch.zeros_like(adj)
for l in range(self.layers):
Ax = adj.bmm(gcn_inputs)
AxW = self.W[l](Ax)
AxW = AxW + self.W[l](gcn_inputs) # self loop
AxW = AxW / denom
gAxW = F.relu(AxW)
gcn_inputs = self.gcn_drop(gAxW) if l < self.layers - 1 else gAxW
return gcn_inputs, mask
def rnn_zero_state(batch_size, hidden_dim, num_layers, bidirectional=True, use_cuda=True):
"""
Initiliaze rnn h0, c0 with zero
:param batch_size: int
:param hidden_dim: the dimension of rnn hidden state
:param num_layers: the number of rnn layers
:param bidirectional:
:param use_cuda:
:return: h0, c0 : torch tensor, of shape(total_layer, batch_size, hidden_dim)
"""
total_layers = num_layers * 2 if bidirectional else num_layers
state_shape = (total_layers, batch_size, hidden_dim)
h0 = c0 = Variable(torch.zeros(*state_shape), requires_grad=False)
return h0, c0
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,554 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /modules/tree_utils.py | from .tree import Tree
import numpy as np
def head_to_tree(head, tokens, length, prune, subj_pos, obj_pos):
"""
Convert a sequence of head indexes into a tree object
:param head: tensor, a sequence of head indexes, note that it begins with 1 not zero
:param tokens: tensor, a sequence of token id, including PAD_ID
:param length: the length of tokens(except PAD)
:param prune: prune length
:param subj_pos: tensor. The element which is zero means the position of subject
:param obj_pos: tensor. The element which is zero means the position of object
:return: root, the lowest common node
"""
# remobe PAD from snetence, and transform into list
tokens = tokens[:length].tolist()
head = head[:length].tolist()
root = None
# full tree
if prune < 0:
nodes = [Tree() for _ in head]
for i in range(len(nodes)):
h = head[i]
nodes[i].idx = i
nodes[i].dist = -1 # the distance to the dependency path
if h == 0:
root = nodes[i]
else:
nodes[h - 1].add_child(nodes[i])
# find dependency path
else:
subj_pos = [i for i in range(length) if subj_pos[i] == 0]
obj_pos = [i for i in range(length) if obj_pos[i] == 0]
cas = None
subj_ancestors = set(subj_pos)
# It is really important!!!
# when entity have two more tokens, we should include all in the path instead of only last token
for s in subj_pos:
h = head[s]
tmp = [s]
while h > 0:
tmp += [h-1]
subj_ancestors.add(h-1)
h = head[h-1]
if cas is None:
cas = set(tmp)
else:
cas.intersection_update(tmp)
obj_ancestors = set(obj_pos)
for o in obj_pos:
h = head[o]
tmp = [h]
while h > 0:
tmp += [h - 1]
obj_ancestors.add(h - 1)
h = head[h - 1]
cas.intersection_update(tmp)
# find the lowest common ancestor
if len(cas) == 1:
lca = list(cas)[0]
else:
child_count = {k:0 for k in cas}
for ca in cas:
if head[ca] > 0 and head[ca] - 1 in cas:
child_count[head[ca] - 1] += 1
# the LCA has no child in the CA set
for ca in cas:
if child_count[ca] == 0:
lca = ca
break
path_nodes = subj_ancestors.union(obj_ancestors).difference(cas)
path_nodes.add(lca)
# compute distance to path_nodes
dist = [-1 if i not in path_nodes else 0 for i in range(length)]
for i in range(length):
if dist[i] < 0:
stack = [i] # store ancestor nodes of node i
while stack[-1] >= 0 and stack[-1] not in path_nodes:
stack.append(head[stack[-1]] - 1)
# if node i connectes to dependency path, the last node in stack is in path_nodes while others is not
if stack[-1] in path_nodes:
for d, j in enumerate(reversed(stack)):
dist[j] = d
# node i is not connected to the dependency path, so the last node in the stack
# should be -1, and the last two node is root node
else:
for j in stack:
if j>=0 and dist[j] < 0:
dist[j] = int(1e4)
highest_node = lca
nodes = [Tree() if dist[i] <= prune else None for i in range(length)]
for i in range(len(nodes)):
if nodes[i] is None:
continue
h = head[i]
nodes[i].idx = i
nodes[i].dist = dist[i]
if h > 0 and i != highest_node:
assert nodes[h-1] is not None
nodes[h-1].add_child(nodes[i])
root = nodes[highest_node]
assert root is not None
return root
def tree_to_adj(sent_len, tree, directed=True, self_loop=False):
"""
Convert a tree object to an (numpy) adjacency matrix.
:param sent_len: max length, so every sentence can have the same size
:param tree: Tree object
:param directed: whether consider direction of dependency tree
:param self_loop: whether add self connection of nodes
:return: ret: numpy array, of shape (sent_len, sent_len)
"""
ret = np.zeros((sent_len, sent_len), dtype=np.float32)
queue = [tree]
idx = []
while len(queue) > 0:
t, queue = queue[0], queue[1:]
idx += [t.idx]
for c in t.children:
ret[t.idx, c.idx] = 1
queue += t.children
# consider the direction
if not directed:
ret = ret + ret.T
if self_loop:
for i in idx:
ret[i, i] = 1
return ret
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,555 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /modules/tree.py |
class Tree(object):
"""
tree object, including dependency tree, constituency tree
"""
def __init__(self):
self.parent = None # the parent of this tree
self.idx = None # word index of the root in this tree
self.children = list() # the elements in list is tree object
def add_child(self, child):
"""
add child into tree
:param child: tree object
:return:
"""
child.parent = self
self.children.append(child)
def num_child(self):
return len(self.children)
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,556 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /utils/util.py | import json
from collections import Counter
import torch
import os
from .vocab import Vocab
from . import constant
def build_vocab(filenames, vocabFile, min_freq):
"""
Build the vocabulary from filenames(including train, dev, test) , and save in vocabFile
:param filenames: list, including train file path, dev file path, test file path
:param vocabFile: filepath--- vocab file path
:param min_freq: int, if word freq is less than min_freq, then remove it
:return: None
"""
tokens = []
for filename in filenames:
tokens += load_tokens(filename)
counter = Counter(token for token in tokens)
vocab = sorted([t for t in counter if counter.get(t) >= min_freq], key=counter.get, reverse=True)
# we add entity mask and special words into vocab in the begining of vocab while abadoning entity tokens
vocab = constant.VOCAB_PREFIX + entity_mask() + vocab
with open(vocabFile, 'w') as f:
for v in vocab:
f.write(v + '\n')
def entity_mask():
"""
Get all entity mask tokens as a list
:return: masks: list type
"""
# we mask all entity types
masks = []
sub_entities = list(constant.SUBJ_NER_TO_ID.keys())[2:]
obj_entities = list(constant.OBJ_NER_TO_ID.keys())[2:]
sub_masks = ['SUBJ-' + subj for subj in sub_entities]
obj_masks = ['OBJ-' + obj for obj in obj_entities]
masks += sub_masks
masks += obj_masks
return masks
def load_tokens(filename):
"""
load tokens from filename
:param filename: file path
:return: tokens: list, every element is list that its elements are words
"""
with open(filename, 'r', encoding='utf8', errors='ignore') as infile:
data = json.load(infile)
tokens = []
for d in data:
ts = d['token']
ss, se, os, oe = d['subj_start'], d['subj_end'], d['obj_start'], d['obj_end']
# remove entity tokens, i.e. it can not be appeared in vocabulary
ts[ss:se+1] = ['<PAD>'] * (se-ss+1)
ts[os:oe+1] = ['<PAD>'] * (oe-os+1)
tokens += list(filter(lambda x : x != '<PAD>', ts))
print('{} tokens from {} examples loaded from {}.'.format(len(tokens), len(data), filename))
return tokens
def load_glove_vector(path):
"""
loading word vector(this project employs GLOVE word vector), save GLOVE word, vector as file
respectively
:param path: GLOVE word vector path
:return: glove vocab: vocab object, glove_vector(torch tensor, of shape(words_num, word_dim))
"""
base = os.path.splitext(os.path.basename(path))[0]
glove_word_path = os.path.join('./data/glove/', base + '.vocab')
glove_vector_path = os.path.join('./data/glove/', base + '.pth')
if os.path.isfile(glove_word_path) and os.path.isfile(glove_vector_path):
print('======> File found, loading memory !')
glove_vocab = Vocab(glove_word_path)
glove_vector = torch.load(glove_vector_path)
return glove_vocab, glove_vector
print('======>Loading glove word vector<======')
with open(path, 'r', encoding='utf8', errors='ignore') as f:
content = f.readline().rstrip('\n').split(' ')
word_dim = len(content[1:])
word_count = 1
for _ in f:
word_count += 1
glove_tokens = [None] * word_count
glove_vector = torch.zeros(word_count, word_dim, dtype=torch.float)
with open(path, 'r', encoding='utf8', errors='ignore') as f:
count = 0
for content in f:
content = content.rstrip('\n').split(' ')
glove_tokens[count] = content[0]
vectors = list(map(float, content[1:]))
glove_vector[count] = torch.tensor(vectors, dtype=torch.float)
count += 1
with open(glove_word_path, 'w') as f:
for token in glove_tokens:
f.write(token + '\n')
torch.save(glove_vector, glove_vector_path)
glove_vocab =Vocab(glove_word_path)
return glove_vocab, glove_vector
def get_embedding(vocab, pre_embedding, pre_vocab):
"""
Obtain the word embedding. If words are in vocab and glove at the same time, then using
glove_embedding.Otherwise we can use random vector.Noting that <pad> should be all 0
:param vocab: Vocab object
:param pre_embedding: in this project, we use glove embedding, tensor
:param pre_vocab: vocab of pre_embedding, Vocab object
:return: embedding: torch tensor, of shape (vocab size, word dim)
"""
embedding = torch.zeros(len(vocab.WordsToIdx), pre_embedding.size(1), dtype=torch.float)
# embedding.normal_(0, 0.05)
embedding.uniform_(-1, 1)
# <pad> should be all 0
embedding[constant.PAD_ID].zero_()
for word in vocab.WordsToIdx:
if pre_vocab.get_index(word) is not None:
embedding[vocab.get_index(word)] = pre_embedding[pre_vocab.get_index(word)]
return embedding
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,557 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /models/GCNClassifier.py | """
GCN model for relation extraction.
"""
import torch
import torch.nn as nn
import torch.functional as F
from torch.autograd import Variable
import numpy as np
from modules.GCNRelationModel import GCNRelationModel
from modules.gcn import GCN
class GCNClassifier(nn.Module):
"""
A wrapper classifier for GCNRelationModel.
"""
def __init__(self, opt, emb_matrix=None):
"""
:param opt: hyperparameter
:param emb_matrix: pretraining embedding, we use glove embedding here
"""
super(GCNClassifier, self).__init__()
self.gcn_model = GCNRelationModel(opt, emb_matrix)
in_dim = opt['hidden_dim']
self.classifier = nn.Linear(in_dim, opt['num_class'])
self.opt = opt
def conv_l2(self):
return self.gcn_model.gcn.conv_l2()
def forward(self, input):
outputs, pooling_output = self.gcn_model(input)
logits = self.classifier(outputs)
return logits, pooling_output
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,558 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /test.py | print('=' * 80 + 'print' + '=' *80) | {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,559 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /utils/helper.py | import json
def save_config(config, path, verbose=True):
with open(path, 'w') as outfile:
json.dump(config, outfile, indent=2)
if verbose:
print("Config saved to file {}".format(path))
return config
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,560 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /utils/torch_utils.py | import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import Optimizer
def keep_partial_grad(grad, topk):
"""
Keep only the topk rows of grads, others' grads should not be computed, we set it to zero,
so these weight will not be updated
:param grad: grad tensor
:param topk: int
:return: grad: processed grad
"""
assert topk < grad.size(0)
grad.data[topk:].zero_()
return grad
def change_lr(optimizer, new_lr):
"""
Change optimizer learning rate with new_lr
:param optimizer:
:param new_lr:
:return:
"""
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
### torch specific functions
def get_optimizer(name, parameters, lr, l2=0):
if name == 'sgd':
return torch.optim.SGD(parameters, lr=lr, weight_decay=l2)
elif name == 'adam':
return torch.optim.Adam(parameters, weight_decay=l2) # use default lr
elif name == 'adamax':
return torch.optim.Adamax(parameters, weight_decay=l2) # use default lr
elif name == 'adadelta':
return torch.optim.Adadelta(parameters, lr=lr, weight_decay=l2)
else:
raise Exception("Unsupported optimizer: {}".format(name))
def load_config(filename):
try:
dump = torch.load(filename)
except BaseException:
print("[ Fail: model loading failed. ]")
return dump['config']
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,561 | yin-hong/gcn-over-pruned-trees | refs/heads/master | /scripts/preprocess.py |
def load_file(filename, dstfile):
"""
load train/dev/test file , and save in dstfile
:param filename: train/dev/test file path
:param dstfile: destination file of train/dev/test
:return:
"""
| {"/modules/GCNRelationModel.py": ["/modules/gcn.py", "/modules/tree_utils.py"], "/trainers/trainer.py": ["/models/GCNClassifier.py"], "/train.py": ["/data_loader/TacredDataset.py", "/trainers/trainer.py"], "/modules/tree_utils.py": ["/modules/tree.py"], "/models/GCNClassifier.py": ["/modules/GCNRelationModel.py", "/modules/gcn.py"]} |
78,578 | ewertonhm/RBUpdater | refs/heads/master | /pasta.py | from pathlib import *
def create_routeros_folder():
p = Path('./routeros')
if not p.exists() and not p.is_dir():
p.mkdir()
return p.cwd() | {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
78,579 | ewertonhm/RBUpdater | refs/heads/master | /config.py | ###############
Debug = True
Ports = []
Users = []
Passwords = []
###############
Users.append('admin')
Users.append('root')
Passwords.append('glock9mm')
# Passwords.append('Unisc202@')
Ports.append('22')
# Ports.append('2222')
| {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
78,580 | ewertonhm/RBUpdater | refs/heads/master | /main.py | from datetime import datetime
from relatorio import *
from rb_class import *
from menu import *
from txt_rw import *
import argparse
from config import Users, Passwords, Ports, Debug
"""
# Initialize parser
parser = argparse.ArgumentParser(description='Automatização de backups e atualizações de dispositivos Routerbard')
# Adding arguments
parser.add_argument('-t', '--targets', nargs='+', help = '<Required> Lista de hosts', required=True)
parser.add_argument('-u', '--users', nargs='+', help = '<Optional> Usuários')
parser.add_argument('-s', '--passwords', nargs='+', help = '<Optional> Senhas')
parser.add_argument('-p', '--ports', nargs='+', help = '<Optional> Portas')
parser.add_argument('-d', '--debug', help = '<Optional> Debug mode')
# Read arguments from command line
args = parser.parse_args()
print(args)
"""
LTS = 'version: 6.47.10 (long-term)'
FW = 'current-firmware: 6.47.10'
Hosts = [] #args.targets
#Ports = [22]
#Users = ['admin']
#Passwords = ['glock9mm']
"""
if args.users != None:
for user in args.users:
Users.append(user)
if args.passwords != None:
for pwd in args.passwords:
Passwords.append(pwd)
if args.ports != None:
for port in args.ports:
Ports.append(port)
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
"""
if __name__ == '__main__':
################################################################
####################### LEITURA DE IPS #########################
################################################################
option = hosts_menu()
## [1] Ler arquivo txt
if option == '1':
file_name = menu_txt()
values = read_file(file_name)
for value in values:
Hosts.append(str(value.removesuffix("\n")))
if Debug:
print(Hosts)
## [2] Inserir manualmente
elif option == '2':
values = menu_manual('Insira um endereço de IP, Insira zero para parar: ')
for value in values:
Hosts.append(str(value.removesuffix("\n")))
if Debug:
print(Hosts)
else:
print("Opção inválida")
exit()
print()
################################################################
############ FIM LEITURA DE IPS ################################
################################################################
############ USUARIOS ##########################################
option = users_menu(Users)
## [1] Ler arquivo txt
if option == '1':
file_name = menu_txt()
values = read_file(file_name)
for value in values:
Users.append(str(value.removesuffix("\n")))
if Debug:
print(Users)
## [2] Inserir manualmente
elif option == '2':
values = menu_manual('Insira um Usuário, Insira zero para parar: ')
for value in values:
Users.append(str(value.removesuffix("\n")))
if Debug:
print(Users)
print()
################################################################
############ FIM LEITURA DE USUÁRIOS ###########################
################################################################
############ SENHAS ############################################
option = passwords_menu(Passwords)
## [1] Ler arquivo txt
if option == '1':
file_name = menu_txt()
values = read_file(file_name)
for value in values:
Passwords.append(str(value.removesuffix("\n")))
if Debug:
print(Passwords)
## [2] Inserir manualmente
elif option == '2':
values = menu_manual('Insira uma senha, Insira zero para parar: ')
for value in values:
Passwords.append(str(value.removesuffix("\n")))
if Debug:
print(Passwords)
print()
################################################################
############ PORTAS ############################################
option = ports_menu(Ports)
if option == '1':
values = menu_manual('Insira uma porta, Insira zero para parar: ')
for value in values:
Ports.append(str(value.removesuffix("\n")))
if Debug:
print(Ports)
print()
################################################################
############ FIM LEITURA DE PORTAS #############################
############ MENU PRINCIPAL ####################################
run = True
while run:
option = main_menu()
## [1] Realizar Backup
if option == '1':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter+1,len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
# GET INFORMATIONS
Host_pppoe.append(ssh.get_pppoe())
Host_identity.append(ssh.get_identity())
Host_backup_name.append('{0}_{1}_{2}'.format(Host_identity[counter], Host_pppoe[counter], host))
# BACKUP
ssh.backup(Host_backup_name[counter])
else:
Host_connection.append(False)
Host_pppoe.append('#')
Host_identity.append('#')
Host_backup_name.append('#')
counter = counter + 1
print()
print_relatorio_backup(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_backup_name)
## [2] Verificar a versão do RouterOS
elif option == '2':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter+1,len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
Host_version.append(ssh.get_version())
else:
Host_connection.append(False)
counter = counter + 1
print()
print_relatorio_version(
Hosts,
Host_connection,
Host_connection_datetime,
Host_version)
## [3] Verificar a versão do Firmware
elif option == '3':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
Host_fw_version.append(ssh.get_fw_version())
else:
Host_connection.append(False)
counter = counter + 1
print()
print_relatorio_fw_version(
Hosts,
Host_connection,
Host_connection_datetime,
Host_fw_version)
## [4] Realizar Backup e verificar a versão do RouterOS e Firmware
elif option == '4':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 3)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
# GET INFORMATIONS
Host_pppoe.append(ssh.get_pppoe())
Host_identity.append(ssh.get_identity())
Host_backup_name.append('{0}_{1}_{2}'.format(Host_identity[counter], Host_pppoe[counter], host))
Host_version.append(ssh.get_version())
Host_fw_version.append(ssh.get_fw_version())
ssh.backup(Host_backup_name[counter])
if Debug:
print(Host_version[counter])
print(Host_fw_version[counter])
ssh.close_connection()
else:
Host_connection.append(False)
Host_pppoe.append('#')
Host_identity.append('#')
Host_backup_name.append('#')
Host_version.append('#')
Host_fw_version.append('#')
Host_updated.append('#')
Host_fw_updated.append('#')
counter = counter + 1
print_relatorio(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_version,
Host_fw_version,
Host_backup_name)
## [5] Atualizar RouterOS
elif option == '5':
option = update_menu()
# [1] Atualizar para a ultima versão LTS via internet
if option == '1':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
ssh.online_update()
Host_updated.append(True)
else:
Host_connection.append(False)
Host_updated.append(False)
counter = counter + 1
print()
print_relatorio_version_updated(
Hosts,
Host_connection,
Host_connection_datetime)
# [2] Enviar arquivos
if option == '2':
version = version_menu()
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
else:
Host_connection.append(False)
counter = counter + 1
## [6] Atualizar Firmware
elif option == '6':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
ssh.update_fw()
Host_fw_updated.append(True)
else:
Host_connection.append(False)
counter = counter + 1
print()
print_relatorio_version_fw_updated(
Hosts,
Host_connection,
Host_connection_datetime)
## [7] Criar VPLS
elif option == '7':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
else:
Host_connection.append(False)
counter = counter + 1
## [8] Criar VPN
elif option == '8':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
else:
Host_connection.append(False)
counter = counter + 1
## [9] Desabilitar IPv6
elif option == '9':
Host_connection = []
Host_connection_datetime = []
Host_pppoe = []
Host_identity = []
Host_version = []
Host_fw_version = []
Host_backup_name = []
Host_updated = []
Host_fw_updated = []
counter = 0
for host in Hosts:
print("Host {0}/{1}".format(counter + 1, len(Hosts)))
ssh = try_connect(host, Ports, Users, Passwords, 1)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
else:
Host_connection.append(False)
counter = counter + 1
## [0] Close
elif option == '0':
break
'''
counter = 0
for host in Hosts:
ssh = try_connect(host, Ports, Users, Passwords, 3)
Host_connection_datetime.append(datetime.now().strftime('%d/%m/%Y %H:%M'))
if ssh != False:
Host_connection.append(True)
# GET INFORMATIONS
Host_pppoe.append(ssh.get_pppoe())
Host_identity.append(ssh.get_identity())
Host_backup_name.append('{0}_{1}_{2}'.format(Host_identity[counter], Host_pppoe[counter], host))
Host_version.append(ssh.get_version())
Host_fw_version.append(ssh.get_fw_version())
ssh.backup(Host_backup_name[counter])
if Debug:
print(Host_version[counter])
print(Host_fw_version[counter])
# UPDATE ROUTEROS
if Host_version[counter] != LTS:
ssh.update()
Host_updated.append(True)
else:
if Debug:
print('RouterOS já está na ultima versão long-term')
Host_updated.append(False)
# UPDATE FIRMWARE
if Host_fw_version[counter] != FW:
ssh.update_fw()
Host_fw_updated.append(True)
else:
if Debug:
print('Firmware já esta na ultima versão')
Host_fw_updated.append(False)
ssh.close_connection()
else:
Host_connection.append(False)
Host_pppoe.append('#')
Host_identity.append('#')
Host_backup_name.append('#')
Host_version.append('#')
Host_fw_version.append('#')
Host_updated.append('#')
Host_fw_updated.append('#')
counter = counter + 1
print_relatorio(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_version,
Host_fw_version,
Host_backup_name,
Host_updated,
Host_fw_updated)
''' | {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
78,581 | ewertonhm/RBUpdater | refs/heads/master | /relatorio.py | from txt_rw import write_to_log
def print_relatorio_full(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_version,
Host_fw_version,
Host_backup_name,
Host_updated,
Host_fw_updated):
counter = 0
for host in Hosts:
print(' ')
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
v = None
f = None
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Identificação do Host: {0}'.format(Host_identity[counter]))
print('## Usuário PPPoE: {0}'.format(Host_pppoe[counter]))
print('## Backup realizado, arquivo salvo com o nome: {0}.backup'.format(Host_backup_name[counter]))
print('## Versão do RouterOS no momento do acesso: {0}'.format(Host_version[counter][9:]))
print('## versão do Firmware no momento do acesso: {0}'.format(Host_fw_version[counter][18:]))
if Host_updated[counter]:
print('## Realizado atualização do RouterOS: Sim')
else:
print('## Realizado atualização do RouterOS: Não')
if Host_fw_updated[counter]:
print('## Realizado atualização do Firmware: Sim')
else:
print('## Realizado atualização do Firmware: Não')
else:
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print(' ')
counter = counter + 1
def print_relatorio(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_version,
Host_fw_version,
Host_backup_name):
write_to_log("## RELATORIO")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Identificação do Host: {0}'.format(Host_identity[counter]))
write_to_log('## Usuário PPPoE: {0}'.format(Host_pppoe[counter]))
write_to_log('## Backup realizado, arquivo salvo com o nome: {0}.backup'.format(Host_backup_name[counter]))
write_to_log('## Versão do RouterOS: {0}'.format(Host_version[counter][9:]))
write_to_log('## versão do Firmware: {0}'.format(Host_fw_version[counter][18:]))
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Identificação do Host: {0}'.format(Host_identity[counter]))
print('## Usuário PPPoE: {0}'.format(Host_pppoe[counter]))
print('## Backup realizado, arquivo salvo com o nome: {0}.backup'.format(Host_backup_name[counter]))
print('## Versão do RouterOS: {0}'.format(Host_version[counter][9:]))
print('## versão do Firmware: {0}'.format(Host_fw_version[counter][18:]))
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
def print_relatorio_backup(
Hosts,
Host_connection,
Host_connection_datetime,
Host_pppoe,
Host_identity,
Host_backup_name):
write_to_log("## RELATORIO BACKUP")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Identificação do Host: {0}'.format(Host_identity[counter]))
write_to_log('## Identificação do Host: {0}'.format(Host_identity[counter]))
write_to_log('## Usuário PPPoE: {0}'.format(Host_pppoe[counter]))
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Identificação do Host: {0}'.format(Host_identity[counter]))
print('## Usuário PPPoE: {0}'.format(Host_pppoe[counter]))
print('## Backup realizado, arquivo salvo com o nome: {0}.backup'.format(Host_backup_name[counter]))
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
def print_relatorio_version(
Hosts,
Host_connection,
Host_connection_datetime,
Host_version):
write_to_log("## RELATORIO BACKUP")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Versão do RouterOS no momento do acesso: {0}'.format(Host_version[counter][9:]))
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Versão do RouterOS no momento do acesso: {0}'.format(Host_version[counter][9:]))
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
def print_relatorio_fw_version(
Hosts,
Host_connection,
Host_connection_datetime,
Host_fw_version):
write_to_log("## RELATORIO BACKUP")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Versão do RouterOS no momento do acesso: {0}'.format(Host_fw_version[counter][9:]))
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Versão do RouterOS no momento do acesso: {0}'.format(Host_fw_version[counter][18:]))
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
write_to_log("## RELATORIO BACKUP")
counter = 0
def print_relatorio_version_updated(
Hosts,
Host_connection,
Host_connection_datetime):
write_to_log("## RELATORIO BACKUP")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Versão do RouterOS atualizada: Sim')
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Versão do RouterOS atualizada: Sim')
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
def print_relatorio_version_fw_updated(
Hosts,
Host_connection,
Host_connection_datetime):
write_to_log("## RELATORIO BACKUP")
counter = 0
for host in Hosts:
write_to_log('## Host: {0}'.format(host))
print('## Host: {0}'.format(host))
s = None
if Host_connection[counter]:
write_to_log('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
write_to_log('## Versão do RouterOS atualizada: Sim')
print('## Tentativa de conexão em: {0} : Sucessful'.format(Host_connection_datetime[counter]))
print('## Versão da firmware atualizada: Sim')
else:
write_to_log('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print('## Tentativa de conexão em: {0} : Failed'.format(Host_connection_datetime[counter]))
print()
counter = counter + 1
| {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
78,582 | ewertonhm/RBUpdater | refs/heads/master | /menu.py | from txt_rw import *
from pathlib import *
from pasta import create_routeros_folder
# import consolemenu
def main_menu():
print("[1] Realizar Backup.")
print("[2] Verificar a versão do RouterOS.")
print("[3] Verificar a versão do Firmware.")
print("[4] Realizar Backup e verificar a versão do RouterOS e Firmware")
print("[5] Atualizar RouterOS.")
print("[6] Atualizar Firmware.")
#print("[7] Criar VPLS.")
#print("[8] Criar VPN.")
#print("[9] Desabilitar IPv6.")
print("[0] Close.")
option = input('Escolha uma opção: ')
return option
def hosts_menu():
print("Como deseja inserir os IPs:")
print("[1] Ler arquivo txt")
print("[2] Inserir manualmente")
print()
option = input('Escolha uma opção: ')
return option
def menu_txt():
file = input("Insira o nome do arquivo: ")
return file
def menu_manual(mensagem):
stop = False
ips = []
while not stop:
ip = input(mensagem)
if ip != 'zero' and ip != '0':
ips.append(ip)
else:
stop = True
return ips
def users_menu(Users):
print("Tentativas de conexão serão feitas utilizando o(s) seguinte(s) usuário(s):")
for user in Users:
print(user)
print()
print("Deseja inserir mais algum usuário?")
print("[1] Ler arquivo txt")
print("[2] Inserir manualmente")
print("[0] Não")
option = input('Escolha uma opção: ')
return option
def passwords_menu(Passwords):
print("Tentativas de conexão serão feitas utilizando a(s) seguinte(s) senha(s):")
for pwd in Passwords:
print(pwd)
print()
print("Deseja inserir mais alguma senha?")
print("[1] Ler arquivo txt")
print("[2] Inserir manualmente")
print("[0] Não")
option = input('Escolha uma opção: ')
return option
def ports_menu(Ports):
print("Tentativas de conexão serão feitas utilizando a(s) seguinte(s) porta(s):")
for port in Ports:
print(port)
print("Deseja inserir mais alguma porta?")
print("[1] Sim")
print("[0] Não")
option = input('Escolha uma opção: ')
return option
def update_menu():
print("Como deseja realizar a atualização:")
print("[1] Atualizar para a ultima versão LTS via internet")
print("[2] Enviar arquivos")
print()
option = input('Escolha uma opção: ')
return option
def version_menu():
folder = create_routeros_folder()
folder = str(folder) + "\\routeros\\"
print("Obs: Necessário inserir manualmente os arquivos 'npk' na pasta {0}".format(folder))
version = input("Insira a versão que você irá instalar (exemplo: 6.47.10): ")
return version | {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
78,583 | ewertonhm/RBUpdater | refs/heads/master | /txt_rw.py | from datetime import datetime
def read_file(file_name):
f = open(file_name, 'r')
values = f.readlines()
return values
def write_to_log(string):
time = datetime.now().strftime('%d/%m/%Y %H:%M')
string = time + ': ' + string
# Open the file in append & read mode ('a+')
with open("log.txt", "a+") as file_object:
# Move read cursor to the start of file.
file_object.seek(0)
# If file is not empty then append '\n'
data = file_object.read(100)
if len(data) > 0:
file_object.write("\n")
# Append text at the end of file
file_object.write(string) | {"/main.py": ["/relatorio.py", "/rb_class.py", "/menu.py", "/txt_rw.py", "/config.py"], "/relatorio.py": ["/txt_rw.py"], "/menu.py": ["/txt_rw.py", "/pasta.py"], "/rb_class.py": ["/txt_rw.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.