index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
51,000 | HeylonNHP/RIFE-Colab | refs/heads/main | /Globals/EncoderConfig.py | class EncoderConfig:
_nvencGPUID = 0
_useNvenc = False
_useH265 = False
_encodingPreset = "veryslow"
_encodingProfile = "high"
_encodingCRF = 20
_pixelFormat = "yuv420p"
_availableProfilex264 = ['baseline', 'main', 'high', 'high444p']
_availableProfilex265 = ['main', 'main10']
_availableEncodingPresetsx26x = ['ultrafast', 'superfast', 'veryfast', 'faster', 'fast', 'medium', 'slow', 'slower',
'veryslow', 'placebo']
_availableEncodingPresetsNvenc = ['fast', 'medium', 'slow', 'lossless']
_crfRange = [0, 51]
_enableFFmpegOutputFPS = False
_FFmpegOutputFPS = 60
_lossless = False
# Looping
_preferredLoopLength = 10
_maxLoopLength = 15
# Enable looping the output
loopRepetitionsEnabled = True
def __init__(self):
pass
def set_looping_options(self, preferred_length: float, max_length: float, looping_enabled: bool):
self._preferredLoopLength = preferred_length
self._maxLoopLength = max_length
self.loopRepetitionsEnabled = looping_enabled
def get_looping_options(self):
return [self._preferredLoopLength, self._maxLoopLength, self.loopRepetitionsEnabled]
def set_nvenc_gpu_id(self, gpu_id: int):
if not gpu_id < 0:
self._nvencGPUID = gpu_id
else:
raise Exception("GPU ID out of range")
def enable_nvenc(self, enable: bool):
self._useNvenc = enable
def enable_h265(self, enable: bool):
self._useH265 = enable
def set_encoding_preset(self, preset: str):
if self._useNvenc:
if preset not in self._availableEncodingPresetsNvenc:
raise Exception("Preset doesn't exist")
else:
if preset not in self._availableEncodingPresetsx26x:
raise Exception("Preset doesn't exist")
self._encodingPreset = preset
def set_encoding_profile(self, profile: str):
if self._useH265:
if profile not in self._availableProfilex265:
raise Exception("Profile doesn't exist")
else:
if profile not in self._availableProfilex264:
raise Exception("Profile doesn't exist")
self._encodingProfile = profile
def set_encoding_crf(self, crf: float):
if crf > self._crfRange[1] or crf < self._crfRange[0]:
raise Exception("CRF out of range")
self._encodingCRF = crf
def set_pixel_format(self, pixel_format: str):
self._pixelFormat = pixel_format
def set_ffmpeg_output_fps(self, enable: bool, value: float):
self._enableFFmpegOutputFPS = enable
self._FFmpegOutputFPS = value
def get_nvenc_gpu_id(self):
return self._nvencGPUID
def nvenc_enabled(self):
return self._useNvenc
def h265_enabled(self):
return self._useH265
def get_encoding_preset(self):
if self._lossless and self._useNvenc:
return "lossless"
return self._encodingPreset
def get_encoding_profile(self):
return self._encodingProfile
def get_encoding_crf(self):
if self._lossless:
return 0
return self._encodingCRF
def get_pixel_format(self):
return self._pixelFormat
def get_encoder(self):
if self._useNvenc:
if not self._useH265:
return 'h264_nvenc'
else:
return 'hevc_nvenc'
else:
if not self._useH265:
return 'libx264'
else:
return 'libx265'
def ffmpeg_output_fps_enabled(self):
return self._enableFFmpegOutputFPS
def ffmpeg_output_fps_value(self):
return self._FFmpegOutputFPS
def set_lossless_encoding(self, enable: bool):
self._lossless = enable
def get_lossless_encoding(self):
return self._lossless
| {"/install_dependencies.py": ["/addInstalldirToPath.py", "/Globals/BuildConfig.py"], "/mainGuiUi.py": ["/pyqtHeaders/FileEdit.py"], "/QueuedFrames/SaveFramesList.py": ["/QueuedFrames/FrameFile.py"], "/runInterpolationIndividualSteps.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/generalInterpolationProceedures.py": ["/QueuedFrames/SaveFramesList.py", "/QueuedFrames/queuedFrameList.py", "/QueuedFrames/queuedFrame.py", "/QueuedFrames/FrameFile.py", "/autoEncoding.py", "/runAndPrintOutput.py", "/FFmpegFunctions.py", "/frameChooser.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/Globals/InterpolatorConfig.py", "/rifeFunctions.py", "/rifeInterpolationFunctions.py"], "/main_gui.py": ["/mainGuiUi.py", "/Globals/MachinePowerStatesHandler.py", "/generalInterpolationProceedures.py"], "/FFmpegFunctions.py": ["/Globals/GlobalValues.py"], "/runInterpolationAllSteps.py": ["/addInstalldirToPath.py", "/Globals/EncoderConfig.py", "/generalInterpolationProceedures.py"], "/rifeInterpolationFunctions.py": ["/QueuedFrames/FrameFile.py"], "/Globals/MachinePowerStatesHandler.py": ["/runAndPrintOutput.py"], "/runInterpolationBatch.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/autoEncoding.py": ["/frameChooser.py", "/runAndPrintOutput.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/FFmpegFunctions.py"], "/QueuedFrames/queuedFrameList.py": ["/QueuedFrames/InterpolationProgress.py"], "/frameChooser.py": ["/Globals/GlobalValues.py"], "/rifeFunctions.py": ["/Globals/BuildConfig.py"]} |
51,001 | HeylonNHP/RIFE-Colab | refs/heads/main | /frameChooser.py | import os
from Globals.GlobalValues import GlobalValues
def choose_frames(frames_folder, desired_fps):
frame_files = os.listdir(frames_folder)
frame_files.sort()
last_file = int(frame_files[-1][:-4])
desired_frame_spacing = (1 / desired_fps) * GlobalValues.timebase
timecodes_file_string = ""
current_time = desired_frame_spacing
count = 1
current_list_index = 0
while (current_time - desired_frame_spacing) <= last_file:
current_frame = int(frame_files[current_list_index][:-4])
while not (
current_frame >= round(current_time - desired_frame_spacing)): # and current_frame <= round(current_time)):
if not current_list_index >= len(frame_files) - 1:
current_list_index += 1
else:
break
current_frame = int(frame_files[current_list_index][:-4])
# Build timecodes file
frame_file = frames_folder + os.path.sep + frame_files[current_list_index]
timecodes_file_string += ("file '" + frame_file + "'\n")
count += 1
current_time = ((1 / desired_fps) * count) * GlobalValues.timebase
print(timecodes_file_string)
out_file = open(frames_folder + os.path.sep + 'framesCFR.txt', 'w')
out_file.write(timecodes_file_string)
out_file.close()
def choose_frames_list(frame_files, desired_fps, start_time=0, start_count=0):
chosen_frame_list: list = []
# frameFiles = os.listdir(framesFolder)
frame_files.sort()
last_file_number = int(frame_files[-1][:-4])
desired_frame_spacing = (1 / desired_fps) * GlobalValues.timebase
current_time = desired_frame_spacing
count = 1
if not start_time == 0:
current_time = start_time
if not start_count == 0:
count = start_count
current_list_index = 0
# For when the first frame doesn't start from 0ms
# Advance current time to the first frame's timecode
while current_time < int(frame_files[0][:-4]):
count += 1
current_time = ((1 / desired_fps) * count) * GlobalValues.timebase
while (current_time - desired_frame_spacing) <= last_file_number:
current_frame = int(frame_files[current_list_index][:-4])
while current_frame < round(current_time - desired_frame_spacing):
if current_list_index < len(frame_files) - 1:
current_list_index += 1
else:
break
current_frame = int(frame_files[current_list_index][:-4])
frame_file = frame_files[current_list_index]
chosen_frame_list.append(frame_file)
count += 1
current_time = ((1 / desired_fps) * count) * GlobalValues.timebase
return chosen_frame_list, (int(frame_files[-1][:-4]) - int(frame_files[0][:-4])), current_time, count
| {"/install_dependencies.py": ["/addInstalldirToPath.py", "/Globals/BuildConfig.py"], "/mainGuiUi.py": ["/pyqtHeaders/FileEdit.py"], "/QueuedFrames/SaveFramesList.py": ["/QueuedFrames/FrameFile.py"], "/runInterpolationIndividualSteps.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/generalInterpolationProceedures.py": ["/QueuedFrames/SaveFramesList.py", "/QueuedFrames/queuedFrameList.py", "/QueuedFrames/queuedFrame.py", "/QueuedFrames/FrameFile.py", "/autoEncoding.py", "/runAndPrintOutput.py", "/FFmpegFunctions.py", "/frameChooser.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/Globals/InterpolatorConfig.py", "/rifeFunctions.py", "/rifeInterpolationFunctions.py"], "/main_gui.py": ["/mainGuiUi.py", "/Globals/MachinePowerStatesHandler.py", "/generalInterpolationProceedures.py"], "/FFmpegFunctions.py": ["/Globals/GlobalValues.py"], "/runInterpolationAllSteps.py": ["/addInstalldirToPath.py", "/Globals/EncoderConfig.py", "/generalInterpolationProceedures.py"], "/rifeInterpolationFunctions.py": ["/QueuedFrames/FrameFile.py"], "/Globals/MachinePowerStatesHandler.py": ["/runAndPrintOutput.py"], "/runInterpolationBatch.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/autoEncoding.py": ["/frameChooser.py", "/runAndPrintOutput.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/FFmpegFunctions.py"], "/QueuedFrames/queuedFrameList.py": ["/QueuedFrames/InterpolationProgress.py"], "/frameChooser.py": ["/Globals/GlobalValues.py"], "/rifeFunctions.py": ["/Globals/BuildConfig.py"]} |
51,002 | HeylonNHP/RIFE-Colab | refs/heads/main | /Globals/GlobalValues.py | import os
import subprocess
class GlobalValues:
timebase = 100000
def getFFmpegPath(self,ffprobe=False):
executableName = 'ffmpeg'
if ffprobe:
executableName = 'ffprobe'
path = os.path.realpath(__file__)
path = path[:path.rindex(os.path.sep)]
orig_path = os.getcwd()
try:
os.chdir(path)
except:
'''This will break when this code is packaged by pyInstaller'''
pass
try:
subprocess.run([executableName],stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.chdir(orig_path)
return executableName
except:
print("Global ffmpeg doesn't exist")
path = path[:path.rindex(os.path.sep)]
path = path + os.path.sep + executableName + '.exe'
try:
subprocess.run([path],stdout=subprocess.DEVNULL, stderr=subprocess.STDOUT)
os.chdir(orig_path)
return path
except:
print("Can't find local ffmpeg either :(")
os.chdir(orig_path)
| {"/install_dependencies.py": ["/addInstalldirToPath.py", "/Globals/BuildConfig.py"], "/mainGuiUi.py": ["/pyqtHeaders/FileEdit.py"], "/QueuedFrames/SaveFramesList.py": ["/QueuedFrames/FrameFile.py"], "/runInterpolationIndividualSteps.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/generalInterpolationProceedures.py": ["/QueuedFrames/SaveFramesList.py", "/QueuedFrames/queuedFrameList.py", "/QueuedFrames/queuedFrame.py", "/QueuedFrames/FrameFile.py", "/autoEncoding.py", "/runAndPrintOutput.py", "/FFmpegFunctions.py", "/frameChooser.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/Globals/InterpolatorConfig.py", "/rifeFunctions.py", "/rifeInterpolationFunctions.py"], "/main_gui.py": ["/mainGuiUi.py", "/Globals/MachinePowerStatesHandler.py", "/generalInterpolationProceedures.py"], "/FFmpegFunctions.py": ["/Globals/GlobalValues.py"], "/runInterpolationAllSteps.py": ["/addInstalldirToPath.py", "/Globals/EncoderConfig.py", "/generalInterpolationProceedures.py"], "/rifeInterpolationFunctions.py": ["/QueuedFrames/FrameFile.py"], "/Globals/MachinePowerStatesHandler.py": ["/runAndPrintOutput.py"], "/runInterpolationBatch.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/autoEncoding.py": ["/frameChooser.py", "/runAndPrintOutput.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/FFmpegFunctions.py"], "/QueuedFrames/queuedFrameList.py": ["/QueuedFrames/InterpolationProgress.py"], "/frameChooser.py": ["/Globals/GlobalValues.py"], "/rifeFunctions.py": ["/Globals/BuildConfig.py"]} |
51,003 | HeylonNHP/RIFE-Colab | refs/heads/main | /rifeFunctions.py | from googleDriveDownloader import *
import shutil
import glob
import os
from Globals.BuildConfig import BuildConfig
RIFEPATH = 'arXiv2020RIFE'
def download_rife(install_path, on_windows, force_download_models=False):
# Run if not previously setup
os.chdir(install_path)
if not BuildConfig().isPyInstallerBuild():
if not os.path.exists('arXiv2020RIFE'):
os.system(r'git clone https://github.com/hzwer/Practical-RIFE arXiv2020RIFE')
# Check model files are downloaded
# model_files = ['contextnet.pkl','flownet.pkl','unet.pkl']
model_files = ['flownet.pkl']
model_files_missing = False
for modelFile in model_files:
model_path = install_path + os.path.sep + RIFEPATH + os.path.sep + 'train_log' + os.path.sep + modelFile
if force_download_models and os.path.exists(model_path):
os.remove(model_path)
if not os.path.exists(model_path):
model_files_missing = True
# If they are missing, grab them
if model_files_missing:
# download_file_from_google_drive('11l8zknO1V5hapv2-Ke4DG9mHyBomS0Fc', 'RIFE_trained_model_new.zip')
# download_file_from_google_drive('1wsQIhHZ3Eg4_AfCXItFKqqyDMB4NS0Yd', 'RIFE_trained_model_new.zip')
# 3.8
# download_file_from_google_drive('1O5KfS3KzZCY3imeCr2LCsntLhutKuAqj', 'RIFE_trained_model_new.zip')
# 3.1
# download_file_from_google_drive('1xn4R3TQyFhtMXN2pa3lRB8cd4E1zckQe', 'RIFE_trained_model_new.zip')
# 4.5
download_file_from_google_drive('17Bl_IhTBexogI9BV817kTjf7eTuJEDc0', 'RIFE_trained_model_new.zip')
seven_zip = "7z"
if on_windows:
seven_zip = r"C:\Program Files\7-Zip\7z.exe"
os.system('"' + seven_zip + '"' + r' e RIFE_trained_model_new.zip -aoa')
if not os.path.exists(install_path + '/arXiv2020RIFE'):
os.mkdir(install_path + '/arXiv2020RIFE')
if not os.path.exists(install_path + '/arXiv2020RIFE/train_log'):
os.mkdir(install_path + '/arXiv2020RIFE/train_log')
for data in glob.glob("*.pkl"):
shutil.move(data, install_path + "/arXiv2020RIFE/train_log/")
shutil.move('IFNet_HDv3.py', install_path + '/arXiv2020RIFE/train_log/IFNet_HDv3.py')
shutil.move('RIFE_HDv3.py', install_path + '/arXiv2020RIFE/train_log/RIFE_HDv3.py')
shutil.move('refine.py', install_path + '/arXiv2020RIFE/train_log/refine.py')
os.remove(install_path + "/RIFE_trained_model_new.zip")
| {"/install_dependencies.py": ["/addInstalldirToPath.py", "/Globals/BuildConfig.py"], "/mainGuiUi.py": ["/pyqtHeaders/FileEdit.py"], "/QueuedFrames/SaveFramesList.py": ["/QueuedFrames/FrameFile.py"], "/runInterpolationIndividualSteps.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/generalInterpolationProceedures.py": ["/QueuedFrames/SaveFramesList.py", "/QueuedFrames/queuedFrameList.py", "/QueuedFrames/queuedFrame.py", "/QueuedFrames/FrameFile.py", "/autoEncoding.py", "/runAndPrintOutput.py", "/FFmpegFunctions.py", "/frameChooser.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/Globals/InterpolatorConfig.py", "/rifeFunctions.py", "/rifeInterpolationFunctions.py"], "/main_gui.py": ["/mainGuiUi.py", "/Globals/MachinePowerStatesHandler.py", "/generalInterpolationProceedures.py"], "/FFmpegFunctions.py": ["/Globals/GlobalValues.py"], "/runInterpolationAllSteps.py": ["/addInstalldirToPath.py", "/Globals/EncoderConfig.py", "/generalInterpolationProceedures.py"], "/rifeInterpolationFunctions.py": ["/QueuedFrames/FrameFile.py"], "/Globals/MachinePowerStatesHandler.py": ["/runAndPrintOutput.py"], "/runInterpolationBatch.py": ["/addInstalldirToPath.py", "/generalInterpolationProceedures.py"], "/autoEncoding.py": ["/frameChooser.py", "/runAndPrintOutput.py", "/Globals/GlobalValues.py", "/Globals/EncoderConfig.py", "/FFmpegFunctions.py"], "/QueuedFrames/queuedFrameList.py": ["/QueuedFrames/InterpolationProgress.py"], "/frameChooser.py": ["/Globals/GlobalValues.py"], "/rifeFunctions.py": ["/Globals/BuildConfig.py"]} |
51,004 | TechTarun/Corona-Dashboard | refs/heads/master | /login/urls.py | from django.urls import path
from .views import *
urlpatterns = [
path('login', getform, name='get_login_form'),
path('login/verify', verifylogin, name='verify_login_details')
] | {"/login/urls.py": ["/login/views.py"]} |
51,005 | TechTarun/Corona-Dashboard | refs/heads/master | /login/views.py | from django.shortcuts import render
# Create your views here.
def getform(request):
return render(request, 'login/login.html', {'message' :""})
def verifylogin(request):
is_verified = False
# logic for verfication
if is_verified == True:
return render(request, 'home/home.html')
else:
return render(request, 'login/login.html', {'message': "Credentials are wrong!!"})
| {"/login/urls.py": ["/login/views.py"]} |
51,027 | amanuel/Django-To-Do-App | refs/heads/master | /todos/migrations/0001_initial.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-02-13 02:17
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Todo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('order', models.PositiveIntegerField(blank=True, null=True)),
('done', models.BooleanField(default=False, verbose_name='Done')),
('done_date', models.DateTimeField(blank=True, null=True)),
('deleted', models.BooleanField(default=False, verbose_name='Deleted')),
],
options={
'ordering': ['order'],
'verbose_name_plural': 'Todo Items',
},
),
migrations.CreateModel(
name='TodoList',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('order', models.PositiveIntegerField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['order'],
'verbose_name_plural': 'Todo Lists',
},
),
migrations.AddField(
model_name='todo',
name='todolist',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='todo_items', to='todos.TodoList'),
),
migrations.AddField(
model_name='todo',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| {"/todos/admin.py": ["/todos/models.py"], "/todos/serializers.py": ["/todos/models.py"], "/todos/views.py": ["/todos/serializers.py", "/todos/models.py"]} |
51,028 | amanuel/Django-To-Do-App | refs/heads/master | /todos/admin.py | from django.contrib import admin
from todos.models import Todo, TodoList
class TodoListAdmin(admin.ModelAdmin):
list_display = ['order', 'name', 'user', 'done']
list_editable = ['order']
search_fields = ['name']
filter_horizontal = []
ordering = ['order']
class TodoAdmin(admin.ModelAdmin):
list_display = ['order', 'name', 'done', 'todolist', 'user']
list_editable = ['order']
list_filter = ['todolist__name', 'done']
search_fields = ['name', 'todolist__name']
ordering = ['order']
filter_horizontal = []
admin.site.register(Todo, TodoAdmin)
admin.site.register(TodoList, TodoListAdmin)
| {"/todos/admin.py": ["/todos/models.py"], "/todos/serializers.py": ["/todos/models.py"], "/todos/views.py": ["/todos/serializers.py", "/todos/models.py"]} |
51,029 | amanuel/Django-To-Do-App | refs/heads/master | /todos/models.py | from __future__ import unicode_literals
from django.db import models
from django.conf import settings
from django.core.urlresolvers import reverse
from django.utils.timezone import now
class TodoList(models.Model):
"""model for todo list."""
name = models.CharField(max_length=255)
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False)
order = models.PositiveIntegerField(editable=True, blank=True, null=True)
def save(self, *args, **kwargs):
if not self.pk:
self.order = self.order or int(getattr(TodoList.objects.last(), 'id', 0)) + 1
super(TodoList, self).save(*args, **kwargs)
@property
def done(self):
return self.todo_items.filter(done=False).count() == 0
class Meta:
ordering = ['order']
verbose_name_plural = 'Todo Lists'
def __str__(self):
return self.name
class Todo(models.Model):
"""model for todo items."""
name = models.CharField(max_length=255)
todolist = models.ForeignKey(TodoList, blank=False, related_name='todo_items')
user = models.ForeignKey(settings.AUTH_USER_MODEL, blank=False)
order = models.PositiveIntegerField(editable=True, blank=True, null=True)
done = models.BooleanField('Done', default=False)
done_date = models.DateTimeField(blank=True, null=True)
deleted = models.BooleanField('Deleted', default=False)
def save(self, *args, **kwargs):
if not self.pk:
self.order = self.order or int(getattr(Todo.objects.last(), 'id', 0)) + 1
self.done_date = self.done_date or now() if self.done else None
super(Todo, self).save(*args, **kwargs)
def __str__(self):
return self.name
class Meta:
ordering = ['order']
verbose_name_plural = 'Todo Items'
| {"/todos/admin.py": ["/todos/models.py"], "/todos/serializers.py": ["/todos/models.py"], "/todos/views.py": ["/todos/serializers.py", "/todos/models.py"]} |
51,030 | amanuel/Django-To-Do-App | refs/heads/master | /todos/serializers.py | from rest_framework import serializers
from todos.models import Todo, TodoList
class FilterRelatedMixin(object):
"""
DRF doesn't properly filter related fields under some situations.
This Mixin provides ability to do additional filtering in a serializer
by defining a method filter_<fieldname> in a serializer.
see https://github.com/tomchristie/django-rest-framework/issues/1985
"""
def __init__(self, *args, **kwargs):
super(FilterRelatedMixin, self).__init__(*args, **kwargs)
for name, field in self.fields.iteritems():
if isinstance(field, serializers.RelatedField):
method_name = 'filter_%s' % name
try:
func = getattr(self, method_name)
except AttributeError:
pass
else:
field.queryset = func(field.queryset)
class UserFilteredModelSerializer(FilterRelatedMixin, serializers.ModelSerializer):
def filter_user(self, queryset):
# Restrict user to only see/select themselves in DRF pages unless they are staff
request = self.context.get('request')
if request:
user = request.user
return queryset if user.is_staff else queryset.filter(pk=user.id)
class TodoSerializer(UserFilteredModelSerializer):
def filter_todolist(self, queryset):
# Restrict user to only see their todo lists unless they are staff
request = self.context.get('request')
if request:
user = request.user
return queryset if user.is_staff else queryset.filter(user=request.user)
class Meta:
model = Todo
class TodoListSerializer(UserFilteredModelSerializer):
todo_items = TodoSerializer(read_only=True, many=True)
class Meta:
model = TodoList
| {"/todos/admin.py": ["/todos/models.py"], "/todos/serializers.py": ["/todos/models.py"], "/todos/views.py": ["/todos/serializers.py", "/todos/models.py"]} |
51,031 | amanuel/Django-To-Do-App | refs/heads/master | /todos/views.py | from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from todos.serializers import TodoSerializer, TodoListSerializer
from todos.models import Todo, TodoList
class TodosViewSet(viewsets.ModelViewSet):
queryset = Todo.objects.all().order_by('order')
serializer_class = TodoSerializer
filter_fields = ('deleted', 'done')
permission_classes = [IsAuthenticated]
def get_queryset(self):
queryset = self.queryset
if self.action == 'list':
if not self.request.query_params.get('show_all'):
queryset = queryset.filter(deleted=False)
# Show only todo items that belong to user
if self.request.user.is_staff:
queryset = queryset.order_by('order')
else:
user = self.request.user
queryset = queryset.filter(user=user).order_by('order')
return queryset
class TodoListsViewSet(viewsets.ModelViewSet):
queryset = TodoList.objects.all()
serializer_class = TodoListSerializer
permission_classes = [IsAuthenticated]
def get_queryset(self):
# Show only todo lists that belong to user unless they are staff
if self.request.user.is_staff:
return TodoList.objects.all().order_by('order')
else:
user = self.request.user
return user.todolist_set.all().order_by('order')
| {"/todos/admin.py": ["/todos/models.py"], "/todos/serializers.py": ["/todos/models.py"], "/todos/views.py": ["/todos/serializers.py", "/todos/models.py"]} |
51,033 | basicworld/mengbao | refs/heads/master | /settings.py | # -*- coding: utf-8 -*-
"""
"""
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
ROOT_DIR = os.path.realpath(os.path.dirname(__file__))
if __name__ == '__main__':
print ROOT_DIR
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,034 | basicworld/mengbao | refs/heads/master | /Account.py | # -*- coding: utf-8 -*-
# filename: main.py
"""
一些账户类
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
import MySQLdb as mdb
import sys
import chardet
sql_query_words_model = """
SELECT *
from Words w
where w.word='%(word)s';"""
sql_query_phone_model = """
select m.MobileNumber,
m.MobileType,
m.MobileArea,
m.AreaCode
from Mobile m
where TRUE
and m.MobileNumber='%(phone)s';
"""
str_query_phone_model = u"""\
手机号字段: %s
手机号类型: %s
手机号归属地: %s
归属地区号: %s\
"""
sql_query_idcard_model = u'''
select i.`Zone`, i.`Desc`
from IDCard i
where TRUE
and i.Zone='%(idcard)s';'''
str_query_idcard_model = u"""\
身份证字段: %s
身份证归属地: %s
"""
sql_error_model = "未查询到相关信息"
class MysqlQuery(object):
def __init__(self, host='localhost', user='wechat', passwd='', dbname='mengbao', charset='utf8'):
# try:
self.con = mdb.connect(host, user, passwd, dbname)
#self.con = mdb.connect(host, user, passwd, dbname, charset=charset)
self.cur = self.con.cursor()
#self.con.set_character_set('utf8')
#self.cur.execute('SET NAMES utf8;')
#self.cur.execute('SET CHARACTER SET utf8;')
#self.cur.execute('SET character_set_connection=utf8;')
def __del__(self):
self._close()
def __exit__(self):
self._close()
def _close(self):
if self.con:
self.con.close()
def query_phone(self, phone):
phone = self._get_head(phone)
sql = sql_query_phone_model % {'phone': phone}
try:
self.cur.execute(sql)
que = self.cur.fetchone()
return (str_query_phone_model % que) if que else sql_error_model
except mdb.Error, e:
return "Error %d: %s" % (e.args[0], e.args[1])
def query_idcard(self, idcard):
idcard = self._get_head(idcard, 6)
sql = sql_query_idcard_model % {'idcard': idcard}
try:
self.cur.execute(sql)
que = self.cur.fetchone()
return (str_query_idcard_model % que) if que else sql_error_model
except mdb.Error, e:
return "Error %d: %s" % (e.args[0], e.args[1])
def query_word(self, word):
"""英汉字典"""
# 如果不是英文单词,返回False
#print word.isalpha()
if (not word.isalpha()) or word.isdigit():
return False
try:
sql = sql_query_words_model % {'word': word}
print sql
self.cur.execute(sql)
que = self.cur.fetchone()
if que:
#print que[-1].decode('utf8')
#print chardet.detect(que[-1])
return que[-1]
#elif not word.islower():
# sql = sql_query_words_model % {'word': word.lower()}
# self.cur.execute(sql)
# que = self.cur.fetchone()
# #print que[-1] if que else 'testwlfei'
# return (str_query_words_model % que) if que else False
else:
return False
except mdb.Error, e:
return "Error %d: %s" % (e.args[0], e.args[1])
def _test(self):
self.cur.execute("SELECT VERSION()")
ver = self.cur.fetchone()
return "Database version : %s " % ver
@staticmethod
def _get_head(phone, head=7):
return phone[:head]
if __name__ == '__main__':
my = MysqlQuery()
print my._test()
print my.query_phone('15311447009')
print my.query_phone('1299999')
print my.query_phone('234111113')
print my.query_phone('153')
print my.query_idcard('sfz110100')
print my.query_idcard('sfz999999')
print my.query_idcard('110100199212292314')
print my.query_word('110100199212292314')
print my.query_word('english')
print my.query_word('你好')
del my
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,035 | basicworld/mengbao | refs/heads/master | /text_content_parse.py | # -*- coding: utf-8 -*-
"""
解析文本消息内容
可能是:英汉翻译、天气、笑话、手机归属地、身份证归属地、快递单号
"""
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
help_info = u'''\
功能示例
1. 查询手机归属地
15311448899
手机15311448899
sj15311448899
(至少输入手机号前7位)
2. 查询身份证归属地
身份证110100199212303410
sfz110100199212303410
(至少输入身份证前6位)
3. 看笑话
输入任一关键词:x、xh、笑话
4. 牛津词典
输入单词, 如: english
'''
# 5. 机器人
# 问答机器人, 如: 你好
from Account import MysqlQuery
from Juhe import Juhe
import chardet
def text_parse(content, **kwargs):
"""处理文本"""
content = content.strip()
content = content.encode('utf8')
#print chardet.detect(content)
my = MysqlQuery()
# 如果是笑话,则调用聚合数据
if content in [u'笑话', 'xh', 'x']:
try:
ju = Juhe()
jokes = ju.get_joke('randn')
del ju
return jokes if jokes else 'text_parse() error<002>'
except:
return 'text_parse() error<001>'
# with open('error_juhe.log', 'a+') as f:
# f.write('resp_code:')
else:
# 查询单词库
try:
# my = MysqlQuery()
#print content
resp = my.query_word(content)
if resp:
# 如果是单词,则返回,否则继续
del my
return resp
except:
raise
return 'text_parse() error<006>'
pass
# 检测是否为手机号, 如果是则返回归属地
_copy = content.lower()
if _copy.startswith(u'手机') or _copy.startswith(u'sj'):
_copy = _copy[2:].strip()
if _copy.startswith('+86'):
_copy = _copy[3:]
_copy = re.sub(r' |-', '', _copy)
if _copy.isdigit() and len(_copy) >= 7 and _copy[0] == '1':
# my = MysqlQuery()
resp = my.query_phone(_copy)
del my
return resp
# 检测是否为身份证号,如果是则返回归属地
_copy = content.lower()
if _copy.startswith(u'身份证') or _copy.startswith(u'sfz'):
_copy = _copy[3:].strip()
_copy = re.sub(r' |-', '', _copy)
if _copy.isdigit() and len(_copy) >= 6:
# my = MysqlQuery()
resp = my.query_idcard(_copy)
del my
return resp
# 问答机器人有点问题,debug ing
# if content not in ['help', 'h']:
# ju = Juhe()
# return ju.get_robot(content)
# del ju
return help_info
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,036 | basicworld/mengbao | refs/heads/master | /Juhe.py | # -*- coding: utf-8 -*-
# filename: main.py
"""
聚合数据接口
"""
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
# import time
# from purl import URL
import requests
import json
from mengbao_private_conf import juhe_url_randn_joke, juhe_url_new_joke
from mengbao_private_conf import juhe_url_robot_model
import random
class Juhe(object):
def __init__(self):
pass
def get_robot(self, info):
"""问答机器人
{
"reason":"成功的返回",
"result": /*根据code值的不同,返回的字段有所不同*/
{
"code":100000, /*返回的数据类型,请根据code的值去数据类型API查询*/
"text":"你好啊,希望你今天过的快乐"
},
"error_code":0
}"""
# http://op.juhe.cn/robot/index?info=你好&key=您申请到的APPKEY
url = juhe_url_robot_model % {'info': info}
try:
resp = requests.get(url)
if resp.ok:
answer = json.loads(resp.content)
return answer['result']['text']
else:
return resp.content
except:
return 'get_robot()请求错误<005>'
def get_joke(self, typ='new'):
"""获取随机笑话"""
try:
if typ == 'new':
resp = requests.get(juhe_url_new_joke)
if resp.ok:
jokes = self._parse_new_joke(resp.content)
return jokes
else:
return 'get_joke()解析错误<003>: %s' % resp.status_code
elif typ == 'randn':
resp = requests.get(juhe_url_randn_joke)
if resp.ok:
jokes = self._parse_randn_joke(resp.content)
return jokes
else:
return 'get_joke()解析错误<005>: %s' % resp.status_code
except:
# raise
return 'get_joke()请求错误<004>'
@staticmethod
def _parse_randn_joke(resp_content):
try:
ret = json.loads(resp_content)
if ret['reason'].lower() == 'success':
return random.choice(ret['result'])['content'].strip()
# return '\n----------\n'.join([i['content'] for i in results])
else:
return u'_parse_randn_joke()解析错误<003>'
except:
raise
return u'_parse_randn_joke()解析错误<004>'
@staticmethod
def _parse_new_joke(resp_content):
try:
ret = json.loads(resp_content)
if ret['reason'].lower() == 'success':
return '\n----------\n'.join([i['content'].strip() for i in ret['result']['data']])
else:
return u'_parse_new_joke()解析错误<001>'
except:
return u'_parse_new_joke()解析错误<002>'
if __name__ == '__main__':
ju = Juhe()
jokes = ju.get_joke('new')
print jokes
jokes = ju.get_joke('randn')
print jokes
answer = ju.get_robot('你好')
print answer
answer = ju.get_robot('北京天气')
print answer
del ju
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,037 | basicworld/mengbao | refs/heads/master | /main.py | # -*- coding: utf-8 -*-
# filename: main.py
"""
主框架,主文件
"""
import os
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
THIS_DIR = os.path.realpath(os.path.dirname(__file__))
sys.path.insert(0, THIS_DIR)
from text_content_parse import text_parse
# wechat-sdk setting start
from mengbao_private_conf import *
from wechat_sdk import WechatConf
conf = WechatConf(
token=mbtoken,
appid=mbappid,
appsecret=mbappsecret,
encrypt_mode=mbencrypt_mode, # 可选项:normal/compatible/safe,分别对应于 明文/兼容/安全 模式
encoding_aes_key=mbencoding_aes_key, # 如果传入此值则必须保证同时传入 token, appid
)
from wechat_sdk import WechatBasic
from wechat_sdk.exceptions import ParseError # 用于接收消息
# 消息类型
from wechat_sdk.messages import TextMessage # (文本消息类),
# from wechat_sdk.messages import ImageMessage # (图片消息类),
# from wechat_sdk.messages import VideoMessage # (视频消息类),
# from wechat_sdk.messages import LocationMessage # (位置消息类),
# from wechat_sdk.messages import LinkMessage # (链接消息类),
# from wechat_sdk.messages import EventMessage # (事件消息类),
# from wechat_sdk.messages import VoiceMessage # (语音消息类)
wechat = WechatBasic(conf=conf)
# wechat-sdk setting end
# flask setting start
from flask import Flask
from flask import request
app = Flask(__name__)
@app.route('/wx', methods=['GET', 'POST'])
def handle():
# return 'hello world'
# 验证消息来源,api接入
if request.method=='GET':
try:
signature = request.args.get('signature', '')
timestamp = request.args.get('timestamp', '')
nonce = request.args.get('nonce', '')
echostr = request.args.get('echostr', '')
# print (signature, timestamp, nonce)
if wechat.check_signature(signature, timestamp, nonce):
# print echostr
return echostr # 正确消息时,返回该值
else:
return 'Wrong'
except:
return 'Wrong'
# 回复消息
if request.method == 'POST':
response_content = u'我现在只认识文字,别难为宝宝了(づ ̄ 3 ̄)づ'
try:
body_text = request.data
# print body_text
wechat.parse_data(body_text)
# msg_id = wechat.message.id # 对应于 XML 中的 MsgId
# msg_target = wechat.message.target # 对应于 XML 中的 ToUserName
# msg_source = wechat.message.source # 对应于 XML 中的 FromUserName
# msg_time = wechat.message.time # 对应于 XML 中的 CreateTime
# msg_type = wechat.message.type # 对应于 XML 中的 MsgType
# msg_raw = wechat.message.raw # 原始 XML 文本,方便进行其他分析
if isinstance(wechat.message, TextMessage):
# 处理文字消息
receive_content = wechat.message.content # 接收的消息
# 消息处理
response_content = text_parse(receive_content)
else:
# 其他消息类型暂不处理
# response_conten = u'开发中,敬请期待...'
pass
except ParseError:
response_content = 'Invalid Body Text'
# 构建微信xml
xml = wechat.response_text(content=response_content)
return xml
if __name__ == '__main__':
app.run(port='8008')
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,038 | basicworld/mengbao | refs/heads/master | /test.py | # -*- coding: utf-8 -*-
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8') # 编译环境utf8
from text_content_parse import text_parse
if __name__ == '__main__':
print text_parse('test')
print text_parse('你好')
| {"/text_content_parse.py": ["/Account.py", "/Juhe.py"], "/main.py": ["/text_content_parse.py"]} |
51,051 | gunjanswitchco/gcloud-rest | refs/heads/master | /tests/unit/taskqueue/manager_test.py | import gcloud.rest.taskqueue.manager as manager # pylint: disable=unused-import
def test_importable():
assert True
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,052 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/taskqueue/queue.py | import threading
import requests
from gcloud.rest.auth import Token
API_ROOT = 'https://cloudtasks.googleapis.com/v2beta2'
LOCATION = 'us-central1'
SCOPES = [
'https://www.googleapis.com/auth/cloud-tasks',
]
class TaskQueue(object):
def __init__(self, project, taskqueue, creds=None, google_api_lock=None,
location=LOCATION):
# pylint: disable=too-many-arguments
self.api_root = '{}/projects/{}/locations/{}/queues/{}'.format(
API_ROOT, project, location, taskqueue)
self.google_api_lock = google_api_lock or threading.RLock()
self.access_token = Token(creds=creds,
google_api_lock=self.google_api_lock,
scopes=SCOPES)
self.default_header = {
'Accept': 'application/json',
'Content-Length': '0',
}
def headers(self):
header = {k: v for k, v in self.default_header.items()}
header.update({
'Authorization': 'Bearer {}'.format(self.access_token)
})
return header
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/acknowledge
def ack(self, task):
url = '{}/{}:acknowledge'.format(API_ROOT, task['name'])
body = {
'scheduleTime': task['scheduleTime'],
}
with self.google_api_lock:
resp = requests.post(url, headers=self.headers(), json=body)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/cancelLease
def cancel(self, task):
url = '{}/{}:cancelLease'.format(API_ROOT, task['name'])
body = {
'scheduleTime': task['scheduleTime'],
'responseView': 'BASIC',
}
with self.google_api_lock:
resp = requests.post(url, headers=self.headers(), json=body)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/delete
def delete(self, tname):
url = '{}/{}'.format(API_ROOT, tname)
with self.google_api_lock:
resp = requests.delete(url, headers=self.headers())
resp.raise_for_status()
return resp.json()
def drain(self):
tasks = self.list(page_size=1000)
while tasks:
for task in tasks['tasks']:
try:
self.delete(task['name'])
except requests.exceptions.HTTPError:
pass
tasks = self.list(page_size=1000,
page_token=tasks['nextPageToken'])
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/get
def get(self, tname, full=False):
url = '{}/{}'.format(API_ROOT, tname)
params = {
'responseView': 'FULL' if full else 'BASIC',
}
with self.google_api_lock:
resp = requests.get(url, headers=self.headers(), params=params)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/create
def insert(self, payload, tag=None):
url = '{}/tasks'.format(self.api_root)
body = {
'task': {
'pullMessage': {
'payload': payload,
'tag': tag,
},
},
'responseView': 'FULL',
}
with self.google_api_lock:
resp = requests.post(url, headers=self.headers(), json=body)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/lease
def lease(self, num_tasks=1, lease_duration=10, task_filter=None):
url = '{}/tasks:lease'.format(self.api_root)
body = {
'maxTasks': min(num_tasks, 1000),
'leaseDuration': '{}s'.format(lease_duration),
'responseView': 'FULL',
}
if task_filter:
body['filter'] = task_filter
with self.google_api_lock:
resp = requests.post(url, headers=self.headers(), json=body)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/list
def list(self, full=False, page_size=1000, page_token=''):
url = '{}/tasks'.format(self.api_root)
params = {
'responseView': 'FULL' if full else 'BASIC',
'pageSize': page_size,
'pageToken': page_token,
}
with self.google_api_lock:
resp = requests.get(url, headers=self.headers(), params=params)
resp.raise_for_status()
return resp.json()
# https://cloud.google.com/cloud-tasks/docs/reference/rest/v2beta2/projects.locations.queues.tasks/renewLease
def renew(self, task, lease_duration=10):
url = '{}/{}:renewLease'.format(API_ROOT, task['name'])
body = {
'scheduleTime': task['scheduleTime'],
'leaseDuration': '{}s'.format(lease_duration),
'responseView': 'FULL',
}
with self.google_api_lock:
resp = requests.post(url, headers=self.headers(), json=body)
resp.raise_for_status()
return resp.json()
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,053 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/auth/__init__.py | from pkg_resources import get_distribution
__version__ = get_distribution('gcloud-rest').version
from gcloud.rest.auth.token import Token
__all__ = ['__version__', 'Token']
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,054 | gunjanswitchco/gcloud-rest | refs/heads/master | /tests/unit/storage/bucket_test.py | import gcloud.rest.storage.bucket as bucket # pylint: disable=unused-import
def test_importable():
assert True
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,055 | gunjanswitchco/gcloud-rest | refs/heads/master | /tests/integration/taskqueue/queue_test.py | from __future__ import print_function
from gcloud.rest.taskqueue import encode
from gcloud.rest.taskqueue import TaskQueue
def test_lifecycle(project, creds, pull_queue_name):
payload = 'do-the-lifecycle'
tq = TaskQueue(project, pull_queue_name, creds=creds)
# drain old test tasks
tq.drain()
inserted = tq.insert(encode(payload), tag=encode('gcloud-rest-queue-test'))
print(inserted)
got = tq.get(inserted['name'], full=True)
print(got)
assert inserted == got
listed = tq.list(full=True)
print(listed)
assert listed and listed['tasks']
assert inserted in listed['tasks']
leased = {'name': {'whyIsThisADict': 'subscriptableLinting'}}
while leased['name'] != inserted['name']:
leased_list = tq.lease(num_tasks=1)
print(leased_list)
assert len(leased_list['tasks']) == 1
leased = leased_list['tasks'][0]
print(leased)
for k, v in leased.items():
if k == 'scheduleTime':
assert inserted[k] != v
elif k == 'status':
assert not inserted.get(k)
assert v['attemptDispatchCount'] == 1
else:
assert inserted[k] == v
renewed = tq.renew(leased)
print(renewed)
for k, v in renewed.items():
if k == 'scheduleTime':
assert leased[k] != v
else:
assert leased[k] == v
# ack?
# cancel?
tq.delete(renewed['name'])
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,056 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/kms/__init__.py | from pkg_resources import get_distribution
__version__ = get_distribution('gcloud-rest').version
from gcloud.rest.kms.client import KMS
__all__ = ['__version__', 'KMS']
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,057 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/taskqueue/manager.py | import datetime
import json
import logging
import multiprocessing
import time
import traceback
import requests
from gcloud.rest.core import backoff
from gcloud.rest.core.util import decode
from gcloud.rest.taskqueue.error import FailFastError
from gcloud.rest.taskqueue.queue import TaskQueue
log = logging.getLogger(__name__)
class BrokenTaskManagerException(Exception):
pass
def lease_manager(project, taskqueue, creds, google_api_lock, event, task,
lease_seconds, data):
# pylint: disable=too-many-arguments
"""
This function extends the Pull Task Queue lease to make sure no other
workers pick up the same task. This is force-killed after the task work
is complete
"""
tq = TaskQueue(project, taskqueue, creds=creds,
google_api_lock=google_api_lock)
while not event.is_set():
for _ in range(int(lease_seconds // 2) * 10):
time.sleep(0.1)
if event.is_set():
break
try:
log.info('extending lease for %s', task['name'])
task['scheduleTime'] = data['scheduleTime']
renewed = tq.renew(task, lease_duration=lease_seconds)
data['scheduleTime'] = renewed['scheduleTime']
except Exception as e: # pylint: disable=broad-except
log.error('failed to autorenew task: %s', task['name'], exc_info=e)
event.set()
log.debug('closing lease_manager for task %s', task['name'])
class TaskManager(object):
# pylint: disable=too-many-instance-attributes
def __init__(self, project, taskqueue, worker, backoff_base=2,
backoff_factor=1.1, backoff_max_value=60, batch_size=1,
deadletter_insert_function=None, google_api_lock=None,
lease_seconds=60, retry_limit=None, service_file=None):
# pylint: disable=too-many-arguments
self.project = project
self.taskqueue = taskqueue
self.worker = worker
self.creds = service_file
self.backoff = backoff(base=backoff_base, factor=backoff_factor,
max_value=backoff_max_value)
self.batch_size = max(batch_size, 1)
self.deadletter_insert_function = deadletter_insert_function
self.lease_seconds = lease_seconds
self.retry_limit = retry_limit
self.manager = multiprocessing.Manager()
self.stop_event = multiprocessing.Event()
self.google_api_lock = google_api_lock or multiprocessing.RLock()
self.tq = TaskQueue(project, taskqueue, creds=self.creds,
google_api_lock=self.google_api_lock)
def find_tasks_forever(self):
while not self.stop_event.is_set():
try:
churning = self.find_and_process_work()
except BrokenTaskManagerException:
raise
except Exception as e: # pylint: disable=broad-except
log.error('swallowing exception in find_and_process_work()',
exc_info=e)
continue
if churning:
time.sleep(next(self.backoff))
else:
self.backoff.send(None)
self.backoff.send('reset')
def find_and_process_work(self):
"""
Query the Pull Task Queue REST API for work every N seconds. If work
found, block and perform work while asynchronously updating the lease
deadline.
http://stackoverflow.com/a/17071255
"""
# pylint: disable=too-many-locals
try:
task_lease = self.tq.lease(num_tasks=self.batch_size,
lease_duration=self.lease_seconds)
except requests.exceptions.HTTPError as e:
if e.response.status_code != 429:
log.error('got error attempting to lease tasks, retrying',
exc_info=e)
return True
if not task_lease:
return True
tasks = task_lease.get('tasks')
log.info('grabbed %d tasks', len(tasks))
leasers = []
payloads = []
for task in tasks:
payload = json.loads(
decode(task['pullMessage']['payload']).decode())
retries = int(task['status']['attemptDispatchCount'])
if self.retry_limit is not None and retries > 2 * self.retry_limit:
log.warning('force failing task %s with %d/%d retries',
task['name'], retries, self.retry_limit)
self.fail_task(payload, 'force failed after too many attempts')
self.tq.delete(task['name'])
continue
payloads.append(payload)
try:
data = self.manager.dict()
data['scheduleTime'] = task['scheduleTime']
event = multiprocessing.Event()
lm = multiprocessing.Process(
target=lease_manager,
args=(self.project, self.taskqueue, self.creds,
self.google_api_lock, event, task,
self.lease_seconds, data))
lm.daemon = True
lm.start()
except Exception as e:
log.error('got error while scheduling task', exc_info=e)
raise BrokenTaskManagerException('broken process pool')
leasers.append((event, lm, data))
try:
results = self.worker(payloads)
except Exception:
# Ensure subprocesses die. N.B. doing this in multiple loops is
# overall faster, since we don't care about the renewed tasks.
for (e, _, _) in leasers:
e.set()
for (_, lm, _) in leasers:
lm.join()
raise
for ((e, lm, data), task, payload, result) in zip(leasers, tasks,
payloads, results):
e.set()
lm.join()
self.check_task_result(task, data, payload, result)
return False
def check_task_result(self, task, data, payload, result):
task['scheduleTime'] = data['scheduleTime']
if isinstance(result, FailFastError):
log.error('[FailFastError] failed to process task: %s', payload,
exc_info=result)
self.fail_task(payload, result)
self.tq.delete(task)
return
if isinstance(result, Exception):
log.error('failed to process task: %s', payload, exc_info=result)
if self.retry_limit is None:
self.tq.cancel(task)
return
retries = int(task['status']['attemptDispatchCount'])
if retries < self.retry_limit:
log.info('%d retries for task %s is below limit %d', retries,
task['name'], self.retry_limit)
self.tq.cancel(task)
return
log.warning('retry_limit exceeded, failing task %s at %d',
task['name'], retries)
self.fail_task(payload, result)
self.tq.delete(task['name'])
return
log.info('successfully processed task: %s', task['name'])
self.tq.ack(task)
def fail_task(self, payload, exception):
if not self.deadletter_insert_function:
return
properties = {
'error': str(exception),
'generation': None,
'metageneration': None,
'payload': payload,
'time_created': datetime.datetime.now(),
'traceback': traceback.format_exc(),
'update': None,
}
self.deadletter_insert_function(payload.get('name'), properties)
def stop(self):
self.stop_event.set()
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,058 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/storage/__init__.py | from pkg_resources import get_distribution
__version__ = get_distribution('gcloud-rest').version
from gcloud.rest.storage.bucket import Bucket
__all__ = ['__version__', 'Bucket']
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,059 | gunjanswitchco/gcloud-rest | refs/heads/master | /tests/integration/taskqueue/manager_test.py | import json
import time
import pytest
from gcloud.rest.taskqueue import encode
from gcloud.rest.taskqueue import TaskManager
@pytest.mark.xfail
def test_lifecycle(caplog, mocker, project, creds, pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
{'test_idx': 3},
{'test_idx': 4},
]
worker = mocker.Mock()
worker.return_value = ['ok' for _ in tasks]
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=10, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-lifecycle'))
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
@pytest.mark.slow
@pytest.mark.xfail
def test_multiple_leases(caplog, mocker, project, creds, pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
]
def succeed_after_multiple_leases(ts):
time.sleep(10)
return ['ok' for _ in ts]
worker = mocker.Mock()
worker.side_effect = succeed_after_multiple_leases
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=4, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-multilease'))
caplog.clear()
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
@pytest.mark.slow
@pytest.mark.xfail
def test_multiple_leases_churn(caplog, mocker, project, creds,
pull_queue_name):
tasks = [
{'test_idx': 1},
{'test_idx': 2},
]
def succeed_after_multiple_leases(ts):
_ = [x**2 for x in range(40000000)]
return ['ok' for _ in ts]
worker = mocker.Mock()
worker.side_effect = succeed_after_multiple_leases
tm = TaskManager(project, pull_queue_name, worker, batch_size=len(tasks),
lease_seconds=4, service_file=creds)
# drain old test tasks
tm.tq.drain()
# insert new ones
for task in tasks:
tm.tq.insert(encode(json.dumps(task)),
tag=encode('gcloud-rest-manager-test-multilease'))
caplog.clear()
tm.find_and_process_work()
assert worker.mock_calls == [mocker.call(tasks)]
for record in caplog.records:
assert record.levelname != 'ERROR'
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,060 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/auth/token.py | import datetime
import json
import logging
import os
import threading
import time
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
# N.B. the cryptography library is required when calling jwt.encrypt() with
# algorithm='RS256'. It does not need to be imported here, but this allows us
# to throw this error at load time rather than lazily during normal operations,
# where plumbing this error through will require several changes to otherwise-
# good error handling.
import cryptography # pylint: disable=unused-import
import jwt
import requests
TOKEN_URI = 'https://accounts.google.com/o/oauth2/token'
TIMEOUT = 60
log = logging.getLogger(__name__)
class Token(object):
def __init__(self, creds=None, google_api_lock=None, scopes=None,
timeout=TIMEOUT):
self.creds = creds or os.getenv('GOOGLE_APPLICATION_CREDENTIALS')
if not self.creds:
raise Exception('could not load service credentials')
self.google_api_lock = google_api_lock or threading.RLock()
self.scopes = scopes or []
self.timeout = timeout
self.age = datetime.datetime.now()
self.expiry = 60
self.value = None
def __str__(self):
self.ensure()
return str(self.value)
def assertion(self):
with open(self.creds, 'r') as f:
credentials = json.loads(f.read())
# N.B. the below exists to avoid using this private method:
# return ServiceAccountCredentials._generate_assertion()
now = int(time.time())
payload = {
'aud': TOKEN_URI,
'exp': now + 3600,
'iat': now,
'iss': credentials['client_email'],
'scope': ' '.join(self.scopes),
}
return jwt.encode(payload, credentials['private_key'],
algorithm='RS256')
def acquire(self):
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
body = urlencode((
('grant_type', 'urn:ietf:params:oauth:grant-type:jwt-bearer'),
('assertion', self.assertion()),
))
with self.google_api_lock:
response = requests.post(TOKEN_URI, data=body, headers=headers,
timeout=self.timeout)
content = response.json()
if 'error' in content:
raise Exception('{}'.format(content))
self.age = datetime.datetime.now()
self.expiry = int(content['expires_in'])
self.value = content['access_token']
def ensure(self):
if not self.value:
log.debug('acquiring initial token')
self.acquire()
return
now = datetime.datetime.now()
delta = (now - self.age).total_seconds()
if delta > self.expiry / 2:
log.debug('requiring token with expiry %d of %d / 2', delta,
self.expiry)
self.acquire()
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,061 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/core/util.py | import base64
import random
def backoff(base=2, factor=1.1, max_value=None):
"""
Generator for exponential decay.
The Google docs warn to back off from polling their API if there is no
work available in a task queue. So we do.
This method should be used as follows:
my_backoff = backoff(...)
...
if no_items_in_queue:
time.sleep(next(my_backoff))
else:
my_backoff.send(None)
my_backoff.send('reset')
If its more convenient, you can re-initialize the generator rather than
sending the `reset` event. Note that `None` is sent first to ensure the
generator has begun iteration. Otherwise, sending the `reset` event may
throw a TypeError.
Params:
base: the mathematical base of the exponentiation operation
factor: factor to multiply the exponentation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
"""
def init():
return 0
n = init()
while True:
a = factor * base ** n
if max_value is None or a < max_value:
n += 1
val = (yield a)
else:
val = (yield max_value - random.random() * max_value / 10)
if val == 'reset':
# generally, we discard the generator's output from calling
# backoff().send('reset')
# so we init()-1 here to ensure the following call to
# next(backoff())
# is correct
n = init() - 1
def decode(payload):
"""
https://en.wikipedia.org/wiki/Base64#URL_applications
Modified Base64 for URL variants exist, where the + and / characters
of standard Base64 are respectively replaced by - and _
"""
return base64.b64decode(payload.replace('-', '+').replace('_', '/'))
def encode(payload):
"""
https://en.wikipedia.org/wiki/Base64#URL_applications modified Base64
for URL variants exist, where the + and / characters of standard
Base64 are respectively replaced by - and _
"""
if not isinstance(payload, bytes):
payload = payload.encode('utf-8')
encoded = base64.b64encode(payload)
return encoded.replace(b'+', b'-').replace(b'/', b'_').decode('utf-8')
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,062 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/taskqueue/__init__.py | from pkg_resources import get_distribution
__version__ = get_distribution('gcloud-rest').version
from gcloud.rest.taskqueue.error import FailFastError
from gcloud.rest.taskqueue.manager import TaskManager
from gcloud.rest.taskqueue.queue import TaskQueue
# TODO: deprecate
from gcloud.rest.core.util import decode
from gcloud.rest.core.util import encode
__all__ = ['__version__', 'FailFastError', 'TaskManager', 'TaskQueue',
'decode', 'encode']
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,063 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/core/__init__.py | from pkg_resources import get_distribution
__version__ = get_distribution('gcloud-rest').version
from gcloud.rest.core.util import backoff
from gcloud.rest.core.util import decode
from gcloud.rest.core.util import encode
__all__ = ['__version__', 'backoff', 'decode', 'encode']
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,064 | gunjanswitchco/gcloud-rest | refs/heads/master | /gcloud/rest/taskqueue/error.py | class FailFastError(Exception):
pass
| {"/tests/unit/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/manager.py"], "/gcloud/rest/taskqueue/queue.py": ["/gcloud/rest/auth/__init__.py"], "/gcloud/rest/auth/__init__.py": ["/gcloud/rest/auth/token.py"], "/tests/integration/taskqueue/queue_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/manager.py": ["/gcloud/rest/core/__init__.py", "/gcloud/rest/core/util.py", "/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/queue.py"], "/tests/integration/taskqueue/manager_test.py": ["/gcloud/rest/taskqueue/__init__.py"], "/gcloud/rest/taskqueue/__init__.py": ["/gcloud/rest/taskqueue/error.py", "/gcloud/rest/taskqueue/manager.py", "/gcloud/rest/taskqueue/queue.py", "/gcloud/rest/core/util.py"], "/gcloud/rest/core/__init__.py": ["/gcloud/rest/core/util.py"]} |
51,066 | fabianofalco/tech_test | refs/heads/master | /test_stocks.py | '''
Unit tests
'''
import unittest
import time
from datetime import datetime
from stocks import Stock, Calculator, Trade, EnumStockTypes, EnumBuySellIndicator
class Test_test_stocks(unittest.TestCase):
common_stock = None
preferred_stock = None
calculator = None
common_trade = None
preferred_trade = None
list_of_trades = list()
def setUp(self):
'''
Setting data for unit tests
'''
self.common_stock = Stock('ABCD', EnumStockTypes.COMMON, 100, 8, None, 100)
self.preferred_stock = Stock('EFGH', EnumStockTypes.PREF, 100, 8, 0.02, 100)
self.common_stock_division_by_zero = Stock('ABCD', EnumStockTypes.COMMON, 0, 8, None, 100)
self.preferred_stock_division_by_zero = Stock('EFGH', EnumStockTypes.PREF, 100, 8, 0.02, 0)
self.calculator = Calculator()
self.common_trade = Trade(self.common_stock, datetime.strptime("09/09/18 16:30:31", "%d/%m/%y %H:%M:%S") , 200, EnumBuySellIndicator.BUY.value , 150)
self.preferred_trade = Trade(self.preferred_stock, datetime.strptime("09/09/18 16:30:32", "%d/%m/%y %H:%M:%S"), 300, EnumBuySellIndicator.SELL.value, 200)
self.list_of_trades.append(Trade(self.common_stock, datetime.strptime("09/09/18 16:30:29", "%d/%m/%y %H:%M:%S") , 200, EnumBuySellIndicator.BUY, 150))
self.list_of_trades.append(Trade(self.common_stock, datetime.strptime("09/09/18 16:30:30", "%d/%m/%y %H:%M:%S") , 200, EnumBuySellIndicator.BUY, 150))
self.list_of_trades.append(Trade(self.common_stock, datetime.strptime("09/09/18 16:30:31", "%d/%m/%y %H:%M:%S") , 200, EnumBuySellIndicator.BUY, 150))
self.list_of_trades.append(Trade(self.preferred_stock, datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"), 300, EnumBuySellIndicator.SELL, 200))
self.list_of_trades.append(Trade(self.preferred_stock, datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"), 300, EnumBuySellIndicator.SELL, 200))
self.list_of_trades.append(Trade(self.preferred_stock, datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"), 300, EnumBuySellIndicator.SELL, 200))
def test_calculate_divident_yield_for_common_stock(self):
#expected result
expected_result = 0.08
#actual result
actual_result = self.calculator.calculate_dividend_yield(self.common_stock)
#assertion
self.assertEqual(actual_result, expected_result, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_divident_yield_for_preferred_stock(self):
#expected result
expected_result = 0.02
#actual result
actual_result = self.calculator.calculate_dividend_yield(self.preferred_stock)
#assertion
self.assertEqual(actual_result, expected_result, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_divident_yield_division_by_zero(self):
#expected result
expected_result = 0
#actual result
actual_result_for_common = self.calculator.calculate_dividend_yield(self.common_stock_division_by_zero)
actual_result_for_preferred = self.calculator.calculate_dividend_yield(self.common_stock_division_by_zero)
#assertion
self.assertEqual(actual_result_for_common, expected_result, msg='FAIL. Actual value: ' + str(actual_result_for_common) + 'Expected: ' + str(expected_result))
self.assertEqual(actual_result_for_preferred, expected_result, msg='FAIL. Actual value: ' + str(actual_result_for_preferred) + 'Expected: ' + str(expected_result))
def test_calculate_pe_ratio(self):
#expected result
expected_result_for_common_stock = 1250
expected_result_for_preferred_stock = 5000
#actual result
actual_result_for_common_stock = self.calculator.calculate_pe_ratio(self.common_stock)
actual_result_for_preferred_stock = self.calculator.calculate_pe_ratio(self.preferred_stock)
#assertion
self.assertEqual(actual_result_for_common_stock, expected_result_for_common_stock, msg='FAIL. Actual value: ' + str(actual_result_for_common_stock) + 'Expected: ' + str(expected_result_for_common_stock))
self.assertEqual(actual_result_for_preferred_stock, expected_result_for_preferred_stock, msg='FAIL. Actual value: ' + str(actual_result_for_preferred_stock) + 'Expected: ' + str(expected_result_for_preferred_stock))
def test_calculate_pe_ratio_division_by_zero(self):
#expected result
expected_result = 0
#actual result
actual_result_for_common_stock = self.calculator.calculate_pe_ratio(self.common_stock_division_by_zero)
actual_result_for_preferred_stock = self.calculator.calculate_pe_ratio(self.common_stock_division_by_zero)
#assertion
self.assertEqual(actual_result_for_common_stock, expected_result, msg='FAIL. Actual value: ' + str(actual_result_for_common_stock) + 'Expected: ' + str(expected_result))
self.assertEqual(actual_result_for_preferred_stock, expected_result, msg='FAIL. Actual value: ' + str(actual_result_for_preferred_stock) + 'Expected: ' + str(expected_result))
def test_record_trade(self):
#expected result
expected_result_for_common_stock = 'Stock Symbol: ABCD - Time: 2018-09-09 16:30:31 - Quantity: 200 - Indicator: Buy - Price: 150'
expected_result_for_preferred_stock = 'Stock Symbol: EFGH - Time: 2018-09-09 16:30:32 - Quantity: 300 - Indicator: Sell - Price: 200'
#actual result
actual_result_for_common_stock = self.common_trade.return_print()
actual_result_for_preferred_stock = self.preferred_trade.return_print()
#assertion
self.assertEqual(actual_result_for_common_stock, expected_result_for_common_stock, msg='FAIL. Actual value: ' + actual_result_for_common_stock + 'Expected: ' + expected_result_for_common_stock)
self.assertEqual(actual_result_for_preferred_stock, expected_result_for_preferred_stock, msg='FAIL. Actual value: ' + actual_result_for_preferred_stock + 'Expected: ' + expected_result_for_preferred_stock)
def test_calculate_volume_weighted_stock_price(self):
#expected result
expected_result = 184.62
#actual result
actual_result = self.calculator.calculate_volume_weighted_stock_price(self.list_of_trades, datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"))
#assertion
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_volume_weighted_stock_price_with_empty_list(self):
#expected result
expected_result = 0
actual_result = self.calculator.calculate_volume_weighted_stock_price(list(), datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"))
#actual result
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_volume_weighted_stock_price_no_list(self):
#expected result
expected_result = 0
#actual result
actual_result = self.calculator.calculate_volume_weighted_stock_price(None, datetime.strptime("09/09/18 16:45:30", "%d/%m/%y %H:%M:%S"))
#assertion
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_gbce_all_share_index(self):
#expected result
expected_result = 173.21
#actual result
actual_result = self.calculator.calculate_gbce_all_share_index(self.list_of_trades)
#assertion
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_gbce_all_share_index_with_empty_list(self):
#expected result
expected_result = 0
#actual result
actual_result = self.calculator.calculate_gbce_all_share_index(list())
#assertion
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
def test_calculate_gbce_all_share_index_with_no_list(self):
#expected result
expected_result = 0
#actual result
actual_result = self.calculator.calculate_gbce_all_share_index(None)
#assertion
self.assertAlmostEqual(actual_result, expected_result, 2, msg='FAIL. Actual value: ' + str(actual_result) + 'Expected: ' + str(expected_result))
if __name__ == '__main__':
unittest.main()
| {"/test_stocks.py": ["/stocks.py"]} |
51,067 | fabianofalco/tech_test | refs/heads/master | /stocks.py | '''
General notes:
1 - Use Python 3 to run
2 - Amount in pennies
3 - I am not sure about the formula for PE Ratio. I am not clear about the "Dividend". In the document it is stated like PE Ration = Market Price / Dividend
'''
import time
from enum import Enum
class CustomListException(Exception):
def __init__(self, message):
super().__init__(message)
class EnumStockTypes(Enum):
COMMON = 'Common'
PREF = 'Preferred'
class EnumBuySellIndicator(Enum):
BUY = 'Buy'
SELL = 'Sell'
class Stock():
__type = None
__symbol = None
__market_price = 0
__last_dividend = 0
__fixed_dividend = 0
__par_value = 0
def __init__(self, symbol, type, market_price, last_dividend, fixed_dividend, par_value):
self.__symbol = symbol
self.__type = type
self.__market_price = market_price
self.__last_dividend = last_dividend
self.__fixed_dividend = fixed_dividend
self.__par_value = par_value
@property
def symbol(self):
return self.__symbol
@property
def type(self):
return self.__type
@property
def market_price(self):
return self.__market_price
@property
def last_dividend(self):
return self.__last_dividend
@property
def fixed_dividend(self):
return self.__fixed_dividend
@property
def par_value(self):
return self.__par_value
class Trade():
__stock = None
__time = None
__quantity = 0
__indicator = None
__price = 0.0
def __init__(self, stock, time, quantity, indicator, price):
self.__stock = stock
self.__time = time
self.__quantity = quantity
self.__indicator = indicator
self.__price = price
@property
def stock(self):
return self.__stock
@property
def time(self):
return self.__time
@property
def quantity(self):
return self.__quantity
@property
def indicator(self):
return self.__indicator
@property
def price(self):
return self.__price
def return_print(self):
return 'Stock Symbol: ' + self.stock.symbol + ' - Time: ' + str(self.time) + ' - Quantity: ' + str(self.quantity) + ' - Indicator: ' + self.indicator + ' - Price: ' + str(self.price)
def print_trade(self):
print (self.return_print())
class Calculator():
def calculate_dividend_yield(self, stock):
'''
Returns the dividend yield for a specific type of stock.
In case of division by zero, returns zero.
It returns the error description in case of other exceptions.
For Common stocks: last_dividend / market_price
For Preferred stocks: fixed_dividend * par_value / market_price
Used ternary operator: (a) if condition else (b)
where (a) = formula for Common stocks
(b) = formula for Preferred stocks
'''
try:
return stock.last_dividend / stock.market_price if stock.type == EnumStockTypes.COMMON else stock.fixed_dividend * stock.par_value / stock.par_value
except ZeroDivisionError as e:
return 0
except Exception as e:
return 'Error: ' + str(e)
def calculate_pe_ratio(self, stock):
'''
Returns the PE Ration for a given stock. It is independent of the stock type.
In case of division by zero, returns zero.
It returns the error description in case of other exceptions.
Formula: market_price / dividend
'''
try:
return stock.market_price / self.calculate_dividend_yield(stock)
except ZeroDivisionError as e:
return 0
except Exception as e:
return 'Error: ' + str(e)
def calculate_volume_weighted_stock_price(self, list_of_trades, now=time.time()):
'''
Returns the Volume Weighted Stock Price for a given list of trades and time.
If no time is provided, then it assumes now().
In case of division by zero and empty list, it returns zero.
It returns the error description in case of other exceptions.
Formula: Sum(trade price * quantity) / sum(quantity)
'''
try:
#if list is not provided or is empty it raises a custom exception
if (list_of_trades is None or len(list_of_trades) == 0):
raise CustomListException('No list provided')
#stores the sum of all stock prices for that specific period
sum_trade_price_times_quantity = 0
#stores the sum of all stock prices for that specific period
count_quantity = 0
#period of time to consider the stocks. Last X minutes.
minutes = 15
#convert the period to be considered above to seconds
seconds = minutes * 60
#for each trade in the list, it checks if it was registered in the last X minutes.
#if yes, it is then sum and counted accordingly
for trade in list_of_trades:
time_difference_in_seconds = (now - trade.time).seconds
if (time_difference_in_seconds <= seconds):
sum_trade_price_times_quantity += (trade.quantity * trade.price)
count_quantity += trade.quantity
return sum_trade_price_times_quantity / count_quantity
except ZeroDivisionError as e:
return 0
except CustomListException as e:
return 0
except Exception as e:
return 'Error: ' + str(e)
def calculate_gbce_all_share_index(self, list_of_trades):
'''
Returns the geometric mean for a given list.
In case of division by zero and empty list, it returns zero.
It returns the error description in case of other exceptions.
Formula: n-th square of (p1 * p2 * p3 * ... pn)
'''
try:
#if list is not provided or is empty it raises a custom exception
if (list_of_trades is None or len(list_of_trades) == 0):
raise CustomListException('No list provided')
#initial value to be used in the times operation
prices = 1
#multiplies all trade prices
for trade in list_of_trades:
prices *= trade.price
#returns the geometric mean
return prices ** (1/len(list_of_trades))
except ZeroDivisionError as e:
return 0
except CustomListException as e:
return 0
except Exception as e:
return 'Error: ' + str(e) | {"/test_stocks.py": ["/stocks.py"]} |
51,068 | VictoriaMR/pyHandle | refs/heads/master | /test.py | from mouse import Mouse
Mouse.moveTo(1292, 509) | {"/test.py": ["/mouse.py"], "/server.py": ["/mouse.py"]} |
51,069 | VictoriaMR/pyHandle | refs/heads/master | /server.py | # -*- coding: utf-8 -*-
import web
import json
import time
import random
from mouse import Mouse
# url 映射
urls = ('/', 'index')
clientInfo = {'extid':''}
isFree = True
countTime = 0
intervalTime = 0
app = web.application(urls,globals())
class index:
def GET(self):
web.header('content-type', 'text/json;charset=utf-8')
params = web.input(act='',x=0,y=0,w=0,nc=False,value='',extid='')
return self.doAct(params)
def POST(self):
web.header('content-type', 'text/json;charset=utf-8')
params = web.input(act='',x=0,y=0,w=0,nc=False,value='',extid='')
return self.doAct(params)
def error(self, msg='', code=-1):
return json.dumps({'code':code,'data':'','msg':msg})
def success(self, data, msg='',code=0):
return json.dumps({'code':code,'data':data,'msg':msg})
def check(self, params):
if not params['action'] == 'end':
global clientInfo, isFree, intervalTime
if 'extid' in params:
if not clientInfo['extid'] == '':
if not params['extid'] == clientInfo['extid']:
if intervalTime > 0 and intervalTime - int(time.time()) > 60:
self.clearInfo()
return True
return False
return True
def clearInfo(self):
print('重置参数')
global clientInfo, isFree, countTime, intervalTime
clientInfo['extid'] = ''
isFree = True
rst = True
countTime = 0
intervalTime = 0
return True
def doAct(self, params):
act = params['action']
x = float(params['x'])
y = float(params['y'])
w = float(params['w'])
value = params['value']
# 动作分发
global clientInfo, isFree, countTime, intervalTime
rst = False
print(act, clientInfo, isFree, countTime, intervalTime)
if act != 'end':
rst = self.check(params)
if not rst:
return self.error('客户端正在工作中, 请稍后再试')
countTime = countTime + 1;
if countTime >= int(random.randint(0, 10) + 10):
self.clearInfo()
return self.error('尝试次数过多, 请稍后再试', -4)
intervalTime = int(time.time())
# 点击动作
if act == 'click':
rst = Mouse.click(x, y)
# 刷新动作
elif act == 'flush':
rst = Mouse.flush(x, y)
# 拖动动作
elif act == 'slider':
rst = Mouse.slider(x, y, w)
# 输入动作
elif act == 'input':
rst = Mouse.input(x, y, value)
# 获得开始锁
elif act == 'start':
self.clearInfo()
isFree = False
clientInfo['extid'] = params['extid']
# 释放锁
elif act == 'end':
if clientInfo['extid'] == params['extid']:
self.clearInfo()
if rst:
return self.success(rst, '操作成功')
else:
return self.error('不能正确解析动作')
# 程序执行入口
if __name__ == '__main__':
app.run() | {"/test.py": ["/mouse.py"], "/server.py": ["/mouse.py"]} |
51,070 | VictoriaMR/pyHandle | refs/heads/master | /mouse.py | # -*- coding: utf-8 -*-
import pyautogui
import random
import time
class Mouse:
def __init__(self):
self.size = pyautogui.size();
def click(x, y):
time.sleep(Mouse.time())
rst = Mouse.moveTo(x, y)
if not rst:
return False
time.sleep(Mouse.time())
pyautogui.click()
time.sleep(Mouse.time())
return True
def moveTo(x, y):
time.sleep(Mouse.time())
if not pyautogui.onScreen(x, y):
return False
# 移动到目标鼠标位置
while True:
# 获取鼠标当前坐标
mx, my = pyautogui.position()
if mx > x:
mx = mx - random.randint(30, 100)
if mx < x:
mx = x
else:
mx = mx + random.randint(30, 100)
if mx > x:
mx = x
if my > y:
my = my - random.randint(30, 100)
if my < y:
my = y
else:
my = my + random.randint(30, 100)
if my > y:
my = y
if random.randint(0, 10) >= 6:
pyautogui.moveTo(mx, my, Mouse.time(random.randint(2200,4400)), Mouse.getMoveType())
else:
pyautogui.moveTo(mx, my, Mouse.time(random.randint(2200,4400)))
if mx == x and my == y:
break
time.sleep(Mouse.time())
return True
def getMoveType():
moveList = [pyautogui.easeInQuad, pyautogui.easeOutQuad, pyautogui.easeInOutQuad, pyautogui.easeInElastic]
moveLen = len(moveList) - 1
return moveList[random.randint(0, moveLen)]
def flush(x, y):
time.sleep(Mouse.time());
arr = [3, 3, 4, 4, 5, 5, 5, 5, 5];
index = random.randint(0, len(arr) - 1)
count = arr[index];
if random.randint(0, 5) >= 4:
ctrl = False;
if random.randint(0, 5) >= 4:
ctrl = True
if ctrl:
pyautogui.keyDown('ctrl')
time.sleep(Mouse.time())
while count > 0:
pyautogui.press('f5');
time.sleep(Mouse.time())
count = count - 1
if ctrl:
pyautogui.keyUp('ctrl')
time.sleep(Mouse.time())
else:
Mouse.moveTo(x, y);
time.sleep(Mouse.time())
while count > 0:
pyautogui.click()
time.sleep(Mouse.time())
count = count - 1
return True
def slider(x, y, w):
# 移动到目标鼠标位置
rst = Mouse.moveTo(x, y)
if not rst:
return False
# 左键按下
pyautogui.mouseDown(button='left')
time.sleep(Mouse.time(500))
# 鼠标位移
relx = x;
while True:
x = x + random.randint(14, w//1)
y = y + random.randint(-2, 3)
if x - relx > w:
x = relx + w
if random.randint(0, 10) >= 5:
pyautogui.moveTo(x, y, Mouse.time(random.randint(112, 582)), Mouse.getMoveType())
else:
pyautogui.moveTo(x, y, Mouse.time(random.randint(64, 782)))
if x - relx >= w - 5:
break
time.sleep(Mouse.time(40))
# 鼠标释放
pyautogui.mouseUp(button='left')
return True
def input(x, y, value):
# 移动到目标鼠标位置点击
rst = Mouse.click(x, y)
if not rst:
return False
Mouse.backspace()
# 输入
pyautogui.typewrite(value)
return True
def backspace():
time.sleep(Mouse.time())
cycleTime = 20;
while cycleTime > 0:
pyautogui.typewrite(["backspace"])
time.sleep(Mouse.time(1000))
cycleTime = cycleTime - 1;
return True
def time(time=100):
return random.randint(10, random.randint(66, 110))/time | {"/test.py": ["/mouse.py"], "/server.py": ["/mouse.py"]} |
51,074 | Lechatelia/heart-sound | refs/heads/master | /my_inference.py | import tensorflow as tf
import numpy as np
INPUT_NODE = 577
OUTPUT_NODE = 5
LAYER1_NODE = 300
NODE1=70
NODE2=30
NODE3=80
NODE4=20
NODE5=20
NODE6=80
LAYER2_NODE = 100
LAYER3_NODE = 5
def get_weight_variable(shape, regularizer):
weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
def my_get_weight_variable(name,shape, regularizer):
weights = tf.get_variable(name, shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
if regularizer != None: tf.add_to_collection('losses', regularizer(weights))
return weights
def inference(input_tensor, regularizer,keep_prob=1):
with tf.variable_scope('layer1'):
#weights1_1 = tf.get_variable("weights1", [40,10], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_1=my_get_weight_variable("weights1", [40,NODE1],regularizer)
biases1_1 = tf.get_variable("biases1", [NODE1], initializer=tf.constant_initializer(1.0))
layer1_1 = tf.nn.relu(tf.matmul(input_tensor[: ,0:40], weights1_1) + biases1_1)
layer1_1=tf.nn.dropout(layer1_1,keep_prob)
#weights1_2 = tf.get_variable("weights2", [12,5], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_2 = my_get_weight_variable("weights2", [12, NODE2], regularizer)
biases1_2 = tf.get_variable("biases2", [NODE2], initializer=tf.constant_initializer(1.0))
layer1_2 = tf.nn.relu(tf.matmul(input_tensor[:,40:52], weights1_2) + biases1_2)
layer1_2 = tf.nn.dropout(layer1_2, keep_prob)
#weights1_3 = tf.get_variable("weights3", [128,20], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_3 = my_get_weight_variable("weights3", [128, NODE3], regularizer)
biases1_3 = tf.get_variable("biases3", [NODE3], initializer=tf.constant_initializer(1.0))
layer1_3 = tf.nn.relu(tf.matmul(input_tensor[:,52:180], weights1_3) + biases1_3)
layer1_3 = tf.nn.dropout(layer1_3, keep_prob)
#weights1_4 = tf.get_variable("weights4", [7,5], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_4 = my_get_weight_variable("weights4", [7, NODE4], regularizer)
biases1_4 = tf.get_variable("biases4", [NODE4], initializer=tf.constant_initializer(0.0))
layer1_4 = tf.nn.relu(tf.matmul(input_tensor[:,180:187], weights1_4) + biases1_4)
layer1_4 = tf.nn.dropout(layer1_4, keep_prob)
#weights1_5 = tf.get_variable("weights5", [6,5], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_5 = my_get_weight_variable("weights5", [6, NODE5], regularizer)
biases1_5 = tf.get_variable("biases5", [NODE5], initializer=tf.constant_initializer(0.0))
layer1_5 = tf.nn.relu(tf.matmul(input_tensor[:,187:193], weights1_5) + biases1_5)
layer1_5 = tf.nn.dropout(layer1_5, keep_prob)
#weights1_6 =tf.get_variable("weights6", [384,40], initializer=tf.truncated_normal_initializer(stddev=0.1))
weights1_6 = my_get_weight_variable("weights6", [384,NODE6], regularizer)
biases1_6 = tf.get_variable("biases6", [NODE6], initializer=tf.constant_initializer(0.0))
layer1_6 = tf.nn.relu(tf.matmul(input_tensor[:,193: ], weights1_6) + biases1_6)
layer1_6 = tf.nn.dropout(layer1_6, keep_prob)
layer1 = tf.concat([layer1_1,layer1_2,layer1_3,layer1_4,layer1_5,layer1_6],1)
# with tf.variable_scope('layer2'):
# weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
# biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(1.0))
# layer2 = tf.matmul(layer1, weights) + biases
# layer2 = tf.nn.dropout(layer2, keep_prob)
with tf.variable_scope('layer2'):
weights = get_weight_variable([LAYER1_NODE, LAYER2_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER2_NODE], initializer=tf.constant_initializer(0.0))
layer2 = tf.nn.relu(tf.matmul(layer1, weights) + biases)
layer2 = tf.nn.dropout(layer2, keep_prob)
with tf.variable_scope('layer_res'):
weights = tf.get_variable("weights_res", [LAYER2_NODE,LAYER2_NODE],initializer=tf.truncated_normal_initializer(stddev=0.1))
# weights = get_weight_variable([LAYER2_NODE, LAYER2_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER2_NODE], initializer=tf.constant_initializer(0.0))
layer3_1 = tf.nn.relu(tf.matmul(layer2, weights) + biases)
weights = tf.get_variable("weights_res_1", [LAYER2_NODE, LAYER2_NODE],
initializer=tf.truncated_normal_initializer(stddev=0.1))
# weights = get_weight_variable([LAYER2_NODE, LAYER2_NODE], regularizer)
biases = tf.get_variable("biases_1", [LAYER2_NODE], initializer=tf.constant_initializer(0.0))
layer3_1 = tf.nn.relu(tf.matmul(layer3_1, weights) + biases)
# layer3_1 = tf.nn.dropout(layer3_1, keep_prob)
layer3_1+=layer2
with tf.variable_scope('layer3'):
weights = get_weight_variable([LAYER2_NODE, LAYER3_NODE], regularizer)
biases = tf.get_variable("biases", [LAYER3_NODE], initializer=tf.constant_initializer(0.0))
layer3 = tf.nn.relu(tf.matmul(layer3_1, weights) + biases)
layer3 = tf.nn.dropout(layer3, keep_prob)
with tf.variable_scope('layer4'):
weights = get_weight_variable([LAYER3_NODE, OUTPUT_NODE], regularizer)
biases = tf.get_variable("biases", [OUTPUT_NODE], initializer=tf.constant_initializer(1.0))
layer4 = tf.matmul(layer3, weights) + biases
layer4 = tf.nn.dropout(layer4, keep_prob)
return layer4 | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,075 | Lechatelia/heart-sound | refs/heads/master | /librosa-core-stft-1.py | import numpy as np
import librosa
import librosa.display
y, sr = librosa.load('dataset/artifact__201012172012.wav')
D = librosa.stft(y)
# array([[ 2.576e-03 -0.000e+00j, 4.327e-02 -0.000e+00j, ...,
# 3.189e-04 -0.000e+00j, -5.961e-06 -0.000e+00j],
# [ 2.441e-03 +2.884e-19j, 5.145e-02 -5.076e-03j, ...,
# -3.885e-04 -7.253e-05j, 7.334e-05 +3.868e-04j],
# ...,
# [ -7.120e-06 -1.029e-19j, -1.951e-09 -3.568e-06j, ...,
# -4.912e-07 -1.487e-07j, 4.438e-06 -1.448e-05j],
# [ 7.136e-06 -0.000e+00j, 3.561e-06 -0.000e+00j, ...,
# -5.144e-07 -0.000e+00j, -1.514e-05 -0.000e+00j]], dtype=complex64)
# Use left-aligned frames, instead of centered frames
D_left = librosa.stft(y, center=False)
# Use a shorter hop length
D_short = librosa.stft(y, hop_length=64)
# Display a spectrogram
import matplotlib.pyplot as plt
librosa.display.specshow(librosa.amplitude_to_db(librosa.magphase(D)[0],
ref=np.max),
y_axis='log', x_axis='time')
plt.title('Power spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,076 | Lechatelia/heart-sound | refs/heads/master | /my_train.py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
from my_dataset import My_DataSet
import my_inference
import os
import xlrd
import numpy as np
import audio_processing
input_number=577
output_number = 5
wav_dir='dataset/'
BATCH_SIZE = 200
My_keep_prob=0.6
LEARNING_RATE_BASE = 0.2
LEARNING_RATE_DECAY = 0.95
REGULARIZATION_RATE = 0.0001
#REGULARIZATION_RATE = 0.0001
TRAINING_STEPS = 120000
MOVING_AVERAGE_DECAY = 0.9999
MODEL_SAVE_PATH="model0726/"
MODEL_NAME="hs_model"
def vectorized_result(j):
"""将标签转化成网络的输出格式"""
e = np.zeros(output_number)
e[int(j)] = 1.0
e=np.array(e)
#print(e.shape)
return e
def load_data():
"""从xls文件读取数据,并进行数据格式的转换"""
traindata = []
trainlabel=[]
valdata = []
vallabel=[]
testdata = []
testlabel=[]
ExcelFile = xlrd.open_workbook(r'write.xlsx')
print(ExcelFile.sheet_names())
sheet = ExcelFile.sheet_by_index(0)
print(sheet.nrows)
for i in range(1, sheet.nrows):
rows = sheet.row_values(i)
# linedata=[float(x) for x in line]
# linedata=[x- min(linedata) for x in linedata]
# linedata=[x/(max(linedata)-min(linedata))for x in linedata]
if (str(rows[2]) == 'artifact'):
catedata = 2
elif (str(rows[2]) == 'extrahls'):
catedata = 1
elif ((str(rows[2]) == 'normal')):
catedata = 0
elif ((str(rows[2]) == 'extrastole')):
catedata = 3
elif ((str(rows[2]) == 'murmur')):
catedata = 4
else:
print("catedata is wrong\n")
return
# catedata=float(rows[3])-1
linedata = rows[3:]
# print(len(linedata))
# linedata[0:40] = [x - min(linedata[0:40]) for x in linedata[0:40]]
# linedata[0:40] = [x / (max(linedata[0:40]) - min(linedata[0:40])) for x in linedata[0:40]]
# linedata[40:52] = [x - min(linedata[40:52]) for x in linedata[40:52]]
# linedata[40:52] = [x / (max(linedata[40:52]) - min(linedata[40:52])) for x in linedata[40:52]]
# linedata[52:180] = [x - min(linedata[52:180]) for x in linedata[52:180]]
# linedata[52:180] = [x / (max(linedata[52:180]) - min(linedata[52:180])) for x in linedata[52:180]]
# linedata[180:187] = [x - min(linedata[180:187]) for x in linedata[180:187]]
# linedata[180:187] = [x / (max(linedata[180:187]) - min(linedata[180:187])) for x in linedata[180:187]]
# linedata[187:193] = [x - min(linedata[187:193]) for x in linedata[187:193]]
# linedata[187:193] = [x / (max(linedata[187:193]) - min(linedata[187:193])) for x in linedata[187:193]]
# linedata[193:] = [x - min(linedata[193:]) for x in linedata[193:]]
# linedata[193:] = [x / (max(linedata[193:]) - min(linedata[193:])) for x in linedata[193:]]
if (np.random.rand(1) < 0.9):
traindata.append((np.reshape(np.array(linedata), (input_number))))
trainlabel.append(vectorized_result(catedata))
if (np.random.rand(1) < 0.15):
valdata.append(np.reshape(np.array(linedata), (input_number)))
vallabel.append(vectorized_result(catedata))
else:
testdata.append(np.reshape(np.array(linedata), (input_number)))
testlabel.append(vectorized_result(catedata))
# if (i<=sheet.nrows-100):
# traindata.append((np.reshape(np.array(linedata), (input_number))))
# trainlabel.append(vectorized_result(catedata))
# if (np.random.rand(1) < 0.15):
# valdata.append(np.reshape(np.array(linedata), (input_number)))
# vallabel.append(vectorized_result(catedata))
# else:
# print(str(rows[0]))
# testdata.append(np.reshape(np.array(linedata), (input_number)))
# testlabel.append(vectorized_result(catedata))
traindata=np.array(traindata)
trainlabel=np.array(trainlabel)
valdata=np.array(valdata)
vallabel=np.array(vallabel)
testdata=np.array(testdata)
testlabel=np.array(testlabel)
print('traindata_number='+str(traindata.shape)+'\n')
print('trainlabel_number='+str(trainlabel.shape)+'\n')
print('valdata=' + str(len(valdata)) + '\n')
print('testdata=' + str(len(testdata))+ '\n')
return traindata,trainlabel, valdata,vallabel, testdata,testlabel
def load_wav_data():
"""从文件夹中直接读取wav文件,并进行数据格式的转换"""
traindata = []
trainlabel=[]
valdata = []
vallabel=[]
testdata = []
testlabel=[]
for file in os.listdir(wav_dir):
if file.split('.')[-1] == 'wav':
cls=file.split('_')[0]
if (cls == 'artifact'):
catedata = 2
elif (cls == 'extrahls'):
catedata = 1
elif (cls == 'normal'):
catedata = 0
elif (cls == 'extrastole'):
catedata = 3
elif (cls == 'murmur'):
catedata = 4
else:
print("catedata is wrong\n")
return
print(wav_dir+file)
linedata=audio_processing.extract_feature(wav_dir+file)
linedata = np.concatenate(linedata, 0)
# print(len(linedata))
linedata[0:40] = [x - min(linedata[0:40]) for x in linedata[0:40]]
linedata[0:40] = [x / (max(linedata[0:40]) - min(linedata[0:40])) for x in linedata[0:40]]
linedata[40:52] = [x - min(linedata[40:52]) for x in linedata[40:52]]
linedata[40:52] = [x / (max(linedata[40:52]) - min(linedata[40:52])) for x in linedata[40:52]]
linedata[52:180] = [x - min(linedata[52:180]) for x in linedata[52:180]]
linedata[52:180] = [x / (max(linedata[52:180]) - min(linedata[52:180])) for x in linedata[52:180]]
linedata[180:187] = [x - min(linedata[180:187]) for x in linedata[180:187]]
linedata[180:187] = [x / (max(linedata[180:187]) - min(linedata[180:187])) for x in linedata[180:187]]
linedata[187:193] = [x - min(linedata[187:193]) for x in linedata[187:193]]
linedata[187:193] = [x / (max(linedata[187:193]) - min(linedata[187:193])) for x in linedata[187:193]]
linedata[193:] = [x - min(linedata[193:]) for x in linedata[193:]]
linedata[193:] = [x / (max(linedata[193:]) - min(linedata[193:])) for x in linedata[193:]]
if (np.random.rand(1) < 0.9):
traindata.append((np.reshape(np.array(linedata), (input_number))))
trainlabel.append(vectorized_result(catedata))
if (np.random.rand(1) < 0.15):
valdata.append(np.reshape(np.array(linedata), (input_number)))
vallabel.append(vectorized_result(catedata))
else:
testdata.append(np.reshape(np.array(linedata), (input_number)))
testlabel.append(vectorized_result(catedata))
traindata=np.array(traindata)
trainlabel=np.array(trainlabel)
valdata=np.array(valdata)
vallabel=np.array(vallabel)
testdata=np.array(testdata)
testlabel=np.array(testlabel)
print('traindata_number='+str(traindata.shape)+'\n')
print('trainlabel_number='+str(trainlabel.shape)+'\n')
print('valdata=' + str(len(valdata)) + '\n')
print('testdata=' + str(len(testdata))+ '\n')
return traindata,trainlabel, valdata,vallabel, testdata,testlabel
def train(mnist, valdata, vallabel, testdata, testlabel):
x = tf.placeholder(tf.float32, [None, my_inference.INPUT_NODE], name='x-input')
y_ = tf.placeholder(tf.float32, [None, my_inference.OUTPUT_NODE], name='y-input')
keep_prob = tf.placeholder(tf.float32,name='keep_prob')
regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)
y = my_inference.inference(x, regularizer,keep_prob)
predictions = tf.nn.softmax(y)
tf.add_to_collection("predicts",predictions)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) #用于精确度
# tf.summary.scalar('accuracy',accuracy)
global_step = tf.Variable(0, trainable=False)
variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1))
cross_entropy_mean = tf.reduce_mean(cross_entropy)
loss = cross_entropy_mean + tf.add_n(tf.get_collection('losses'))
learning_rate = tf.train.exponential_decay(
LEARNING_RATE_BASE,
global_step,
1000, LEARNING_RATE_DECAY,
staircase=True)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
with tf.control_dependencies([train_step, variables_averages_op]):
train_op = tf.no_op(name='train')
saver = tf.train.Saver(max_to_keep=10)
with tf.Session() as sess:
# print(os.getcwd())
#saver = tf.train.import_meta_graph('MNIST_model/mnist_model-240001.meta')
#saver.restore(sess, tf.train.latest_checkpoint('MNIST_model/'))
tf.global_variables_initializer().run()
max_test_accuracy = 0.0
max_acc_index = 0
for i in range(TRAINING_STEPS):
xs, ys = mnist.next_batch(BATCH_SIZE)
_, loss_value, step,acc_train,lr= sess.run([train_op, loss, global_step,accuracy,learning_rate], feed_dict={x: xs, y_: ys,keep_prob:My_keep_prob})
if i % 500 == 0:
# acc_1 = sess.run([accuracy], feed_dict={x: valdata, y_: vallabel,keep_prob:1.0}) #二者不参与反向传播
acc_2 = sess.run([accuracy], feed_dict={x: testdata, y_: testlabel,keep_prob:1.0})
if(float(acc_2[0])>max_test_accuracy):
max_test_accuracy=float(acc_2[0])
max_acc_index=i
saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
print("After %d step,lr %g, train_loss %g, train_acc is %g, test_acc is %g ,max_acc is %g,index: %d" % (step,lr, loss_value, float(acc_train),float(acc_2[0]),max_test_accuracy,max_acc_index))
#if i % 1000 == 0:
#saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)
def main(argv=None):
traindata, trainlabel, valdata, vallabel, testdata, testlabel=load_data();
#mnist = input_data.read_data_sets("../../../datasets/MNIST_data", one_hot=True)
#train(mnist)
my_data=My_DataSet(traindata,trainlabel)
train(my_data, valdata, vallabel, testdata, testlabel)
if __name__ == '__main__':
main()
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,077 | Lechatelia/heart-sound | refs/heads/master | /my_pridict.py | import my_inference
import audio_processing
import tensorflow as tf
import my_train
import numpy as np
import os
test_ckpt_path="model0726/hs_model-52501.meta"
ckpt_path="model0726/hs_model-52501"
predictions_map=['normal','extrahls','artifact','extrastole','murmur']
# saver = tf.train.import_meta_graph('MNIST_model/mnist_model-240001.meta')
# saver.restore(sess, tf.train.latest_checkpoint('MNIST_model/'))
def pridict(sess,features):
'''
This function is used to pridict the test data. Please finish pre-precessing in advance
:param test_image_array: 2D numpy array with shape [num_pridict, 577]
:return: the softmax probability with shape [num_pridict, num_labels]
'''
#x = tf.placeholder(tf.float32, [None, my_inference.INPUT_NODE], name='x-input')
# Initialize a new session and restore a checkpoint
#saver = tf.train.Saver(tf.all_variables())
graph=tf.get_default_graph()
x_in = graph.get_operation_by_name('x-input').outputs[0]
y_in = graph.get_operation_by_name('y-input').outputs[0]
keep_in = graph.get_operation_by_name('keep_prob').outputs[0]
predictions=tf.get_collection("predicts")[0]
pre = sess.run(predictions, feed_dict={x_in: features,y_in: (np.array([0,0,0,0,0])).reshape(-1,5),keep_in:1})[0]
return pre
def predict_wav(sess,filename):
features=audio_processing.extract_feature(filename)
features=np.concatenate(features,0)
# features = guiyi(features).astype('float32')
features = np.reshape(features, [-1, 577])
pro = pridict(sess,features)
return pro
def guiyi(linedata):
linedata[0:40] = [x - min(linedata[0:40]) for x in linedata[0:40]]
linedata[0:40] = [x / (max(linedata[0:40]) - min(linedata[0:40])) for x in linedata[0:40]]
linedata[40:52] = [x - min(linedata[40:52]) for x in linedata[40:52]]
linedata[40:52] = [x / (max(linedata[40:52]) - min(linedata[40:52])) for x in linedata[40:52]]
linedata[52:180] = [x - min(linedata[52:180]) for x in linedata[52:180]]
linedata[52:180] = [x / (max(linedata[52:180]) - min(linedata[52:180])) for x in linedata[52:180]]
linedata[180:187] = [x - min(linedata[180:187]) for x in linedata[180:187]]
linedata[180:187] = [x / (max(linedata[180:187]) - min(linedata[180:187])) for x in linedata[180:187]]
linedata[187:193] = [x - min(linedata[187:193]) for x in linedata[187:193]]
linedata[187:193] = [x / (max(linedata[187:193]) - min(linedata[187:193])) for x in linedata[187:193]]
linedata[193:] = [x - min(linedata[193:]) for x in linedata[193:]]
linedata[193:] = [x / (max(linedata[193:]) - min(linedata[193:])) for x in linedata[193:]]
return linedata
def predict_wav_list(sess,wav_list):
for wav in wav_list:
pre_pro = predict_wav(sess, wav).tolist()
print("hear sound::"+wav+"\tprob:\t"+str(pre_pro))
print("prindictions:\t" + predictions_map[pre_pro.index(max(pre_pro))])
def predict_wav_indir(sess,dir,first=False):
if first:
mode='w'
else:
mode='a+'
out_file = open('pre_pro1.txt',mode)
for file in os.listdir(dir):
pre_pro = predict_wav(sess, dir+file).tolist()
if(max(pre_pro)<0.35):
index=2
else:
index=pre_pro.index(max(pre_pro))
out_file.write(file+'\t\t'+predictions_map[index]+"\tprob:\t"+str(pre_pro)+"\n")
if __name__=='__main__':
with tf.Session() as sess:
saver = tf.train.import_meta_graph(test_ckpt_path)
saver.restore(sess, ckpt_path)
print('Model restored from: '+test_ckpt_path)
#预测文件夹
dir = ['../dataset3/extrastole/','../dataset3/murmur/','../dataset3/artifact/','../dataset3/normal/','../dataset3/extrahls/']
for i in range(len(dir)):
predict_wav_indir(sess,dir[i],first=(i==0))
# 具体某一病例#具体某一病例
# predict_wav_list(sess,
# [
# "wav/normal__201105011626.wav",
# "wav/extrahls__201101161027.wav",
# "wav/artifact__201106040947.wav",
# "wav/murmur__201101051114.wav"
# ])
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,078 | Lechatelia/heart-sound | refs/heads/master | /mysql.py | # coding:utf-8
import pymysql
import datetime
table_name = 'USE_INF'
cols = {'username': 'USE_NAME', 'user_age': 'USE_AGE', 'user_sex': 'USE_SEX', 'user_birthdar': 'USE_BIR'}
Server = ["127.0.0.1", "root", "970327", "renesas"]
# Server=["132.232.3.244", "root", "970327", "renesa"]
# 注意
# "VALUES ('%s', %d ,%d ,%d)" \
# '%s'引号是必须的,不然认不出来是字符
class Mysql():
def __init__(self):
self.table = table_name
self.col = cols
def connect(self, ip, users, password, sqlname):
self.db = pymysql.connect(ip, users, password, sqlname)
def show_version(self):
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = self.db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print("Database version : %s " % data)
def show_Info(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 查询语句
sql = "SELECT * FROM USE_INFMATION order by USE_ID DESC"
try:
# 执行SQL语句s
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
print("--------------------------------------------------")
for row in results:
print("|%d\t\t%s\t\t|%s\t\t|%d\t\t|%d\t\t|" % \
(row[0], row[1], row[2], row[3], row[4]))
# (row[0], row[1], row[2].strftime("%Y-%m-%d"), row[3],row[4]))
print("--------------------------------------------------")
except:
print("Error: unable to fetch data")
def Get_all_user_info_return_str(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
info_list = []
# SQL 查询语句
sql = "SELECT * FROM USE_INFMATION "
try:
# 执行SQL语句s
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
info_list.append(self.Info2str(row))
return info_list
except:
print("Error: unable to fetch data")
def show_conclusion(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 查询语句
sql = "SELECT * FROM CONCLUSION "
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
print("--------------------------------------------------")
for row in results:
print("|%d\t\t|%s\t\t|%5f\t\t|%s\t\t\t|%s\t\t|" % \
(row[0], row[1], row[2], row[3], row[4]))
print("--------------------------------------------------")
except:
print("Error: unable to fetch data")
def Insert_info_old(self, info):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 插入语句
sql = "INSERT INTO " \
"USE_INF(USE_NAME,USE_AGE, USE_SEX,USE_BIR) " \
"VALUES ('%s', %d ,%d ,%d)" \
% ((info[0]), info[1], info[2], info[3])
# .format(name=info[0], age=info[1],sex=info[2],birthday=info[3])
# sql='INSERT INTO USE_INF(USE_NAME,USE_AGE, USE_SEX,USE_BIR) VALUES ("boy", 20 ,1 ,19970345)'
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
def Insert_User_Info(self, info):
# [1234567890,'boy1', '1997-03-27' ,1 ,13772052853]
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 插入语句
sql = "INSERT INTO USE_INFMATION (USE_ID,USE_NAME,USE_BRI, USE_SEX,USE_TEL) VALUES (%d,'%s', '%s' ,%d,%d)" % (
info[0], info[1], info[2], info[3], info[4])
# sql="INSERT INTO USE_INFMATION (USE_ID,USE_NAME,USE_BRI,USE_SEX,USE_TEL) VALUES (1567890,'abc',1,13772052853)"
# sql="""INSERT INTO USE_INFMATION VALUES ('123456789','abc', '1997-03-27','1' ,'13772051234')"""
try:
# 执行sql语句Error: unable to fetch data
cursor.execute(sql)
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
def Insert_Diagnosis(self, info):
# [1234567890,"normal",0.998,'normal.wav']
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 插入语句
sql = "INSERT INTO CONCLUSION (USE_ID,RESULT,POSSIBLITY, DATETIME,WAV_INF) VALUES (%d,'%s','%f',NOW(),'%s')" % (
info[0], info[1], info[2], info[3])
try:
# 执行sql语句Error: unable to fetch data
cursor.execute(sql)
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
def Delete_by_name_old(self, name):
# SQL 删除语句
cursor = self.db.cursor()
sql = "DELETE FROM USE_INF WHERE USE_NAME = '%s'" % (name)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
#
def Delete_Info_by_ID(self, ID):
# SQL 删除语句
cursor = self.db.cursor()
sql = "DELETE FROM USE_INFMATION WHERE USE_ID = '%s'" % (ID)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
def Return_Info_by_ID(self, ID):
# SQL 删除语句
cursor = self.db.cursor()
sql = "SELECT * FROM USE_INFMATION WHERE USE_ID = '%s'" % (ID)
try:
# 执行SQL语句
cursor.execute(sql)
results = cursor.fetchall()
return results
# 提交修改
# self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
def Return_all_ID_Name(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 查询语句
sql = "SELECT USE_ID,USE_NAME FROM USE_INFMATION ORDER by USE_ID DESC limit 5 "
try:
# 执行SQL语句s
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
# if len(results)>5:
# results=results[0:6]
return results
except:
print("Error: unable to fetch data")
def Return_all_ID(self):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
id_list = []
# SQL 查询语句
sql = "SELECT USE_ID FROM USE_INFMATION "
try:
# 执行SQL语句s
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for row in results:
id_list.append(row[0])
return id_list
except:
print("Error: unable to fetch data")
def Return_max_id(self):
return max(self.Return_all_ID())
def Return_dianosis_by_id(self, ID):
# 使用cursor()方法获取操作游标
cursor = self.db.cursor()
# SQL 查询语句
sql = "SELECT * FROM CONCLUSION WHERE USE_ID = '%s' ORDER by DATETIME DESC limit 10 " % (ID)
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
# if (len(results)>10):
# results=results[-10:]
return results
except:
print("Error: unable to fetch data")
def ID_Name_list_to_str(self, list_idname):
str_list = []
for idname in list_idname:
# str_list.append('{0[0]:*<10}{0[1]:*<20}'.format(idname))
str_list.append('{0[0]:\0<10}{0[1]:\0<20}'.format(idname))
return str_list
def Info2str(self, infolist):
if len(infolist) == 5:
# return '{0[0]:*<10}{0[1]:*<20}{0[2]}{0[3]:*<1}{0[4]:*<11}'.format(infolist)
return '{0[0]:\0<10}{0[1]:\0<20}{0[2]}{0[3]:\0<1}{0[4]:\0<11}'.format(infolist)
else:
print('info format error')
def Diagnosis2str(self, diaglist):
if len(diaglist) == 5:
# return '{0[0]:*<10}{0[1]:*<20}{0[2]:*<10}{0[3]}{0[4]:*<30}'.format(diaglist)
return '{0[0]:\0<10}{0[1]:\0<20}{0[2]:\0<10}{0[3]}{0[4]:\0<30}'.format(diaglist)
else:
print('diagnosis format error')
def Diagnosis_list_to_str(self, results):
str_list = []
for row in results:
str_list.append(self.Diagnosis2str(row))
return str_list
def Delete_Diagnosis_by_ID(self, ID):
# SQL 删除语句
cursor = self.db.cursor()
sql = "DELETE FROM CONCLUSION WHERE USE_ID = '%s'" % (ID)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
def Update_birth_by_name(self, name, changr):
cursor = self.db.cursor()
sql = " UPDATE USE_INF SET USE_BIR='%d' WHERE USE_NAME='%s' " % (changr, name)
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
def Add_ALTER(self):
# 该函数需要自己更改
cursor = self.db.cursor()
sql = "ALTER TABLE USE_INF ADD USE_Time DATETIME()"
# sql="ALTER TABLE USE_INF ALTER COLUMN USE_BIR INT(8)"
try:
# 执行SQL语句
cursor.execute(sql)
# 提交修改
self.db.commit()
except:
# 发生错误时回滚
self.db.rollback()
def close_sql(self):
# 关闭数据库连接
self.db.close()
def Add_info_to_SQL(ID=1234567890, Name='XJTUer', Birthday='1997-03-27', sex=1, tel=13712345678):
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
mysql.Insert_User_Info([ID, Name, Birthday, sex, tel])
mysql.close_sql()
def Add_info_to_SQL_no_id(Name='XJTUer', Birthday='1997-03-27', sex=1, tel=13712345678):
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
mysql.Insert_User_Info([mysql.Return_max_id() + 1, Name, Birthday, sex, tel])
mysql.close_sql()
def Add_Diagnosis_to_SQL(ID=1234567890, Result='normal', Possi=0.512345, wav_dir='./normal.wav'):
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
mysql.Insert_Diagnosis([ID, Result, Possi, wav_dir])
mysql.close_sql()
def Acquire_Info_by_ID(id):
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
info = mysql.Return_Info_by_ID(id)[0]
return mysql.Info2str(info)
def Update_all_name_id_by_str():
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
return mysql.ID_Name_list_to_str(mysql.Return_all_ID_Name())
def get_all_info():
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
return mysql.Get_all_user_info_return_str()
def get_diagnosis_by_id(id):
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
return mysql.Diagnosis_list_to_str(mysql.Return_dianosis_by_id(id))
def SQL_test():
time = str(datetime.datetime.now()).split('.')[0]
mysql = Mysql()
mysql.connect(Server[0], Server[1], Server[2], Server[3])
mysql.Delete_Info_by_ID('1234567890')
mysql.Insert_User_Info([1234567890, 'boy1', '1997-03-27', 1, 13772052853])
mysql.Delete_Diagnosis_by_ID(1234567890)
mysql.Insert_Diagnosis([123124312, "normal", 0.57801119, './normal.wav'])
# info=mysql.Return_Info_by_ID(1234567890)[0]
# info=mysql.Info2str(info)
# idname=mysql.Return_all_ID_Name()
# idname=mysql.ID_Name_list_to_str(idname)
#
# id=mysql.Return_all_ID()
# id_max=mysql.Return_max_id()
# print(mysql.Diagnosis_list_to_str(mysql.Return_dianosis_by_id(123456521)))
mysql.show_Info()
mysql.show_conclusion()
mysql.close_sql()
if __name__ == '__main__':
# Add_info_to_SQL(1234888821,'Peter','1972-01-02',0,13785266548)
# Add_Diagnosis_to_SQL(123456521, 'murmur', 0.8558, wav_dir='{name}.wav'.format(name=str(str(datetime.datetime.now()).split('.')[0])))
#
# print(Acquire_Info_by_ID(1234567890))
# a = get_all_info()
# print(a)
print(get_diagnosis_by_id(1234567890))
# Add_info_to_SQL_no_id()
SQL_test()
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,079 | Lechatelia/heart-sound | refs/heads/master | /filter.py | from numpy import sin, arange, pi
from scipy.signal import lfilter, firwin,freqz
from pylab import figure, plot, grid, show
import librosa
import matplotlib.pyplot as plt
import librosa.display
import librosa.output
import numpy as np
import os
import wave
#you should install ffmpeg packet
is_real_wav=False
# 如果是官方的真确wav则是true
# 如果是自己的wav不能用库去读取,需要自己写的去读取,所以是false
filt_dir='wav/'
filt_store_dir='wav1/'
Cutoff_hz = [10.0,1000.0]
# Cutoff_hz = 1000
Numtaps = 499
Sample_rate=16000
def filt_wav_store(dir,filename,filt_store_dirs):
print(filename)
X, sample_rate = librosa.load(dir+filename)
nyq_rate = sample_rate / 2.
fir_coeff = firwin(Numtaps, Cutoff_hz / nyq_rate)
filtered_X = lfilter(fir_coeff, 1.0, X)
librosa.output.write_wav(filt_store_dir+filename, filtered_X, sample_rate, norm=True)
def filt_dir_all_wav(dir,filt_store_dir):
if not os.path.exists(filt_store_dir):
os.mkdir(filt_store_dir)
for file in os.listdir(dir):
filt_wav_store(dir,file,filt_store_dir)
def wav_open(filename):
# -*- coding: utf-8 -*-
import wave
import numpy
import pylab as pl
# 打开wav文件
# open返回一个的是一个Wave_read类的实例,通过调用它的方法读取WAV文件的格式和数据
f = wave.open(filename, "rb")
# 读取格式信息
# 一次性返回所有的WAV文件的格式信息,它返回的是一个组元(tuple):声道数, 量化位数(byte单位), 采
# 样频率, 采样点数, 压缩类型, 压缩类型的描述。wave模块只支持非压缩的数据,因此可以忽略最后两个信息
params = f.getparams()
nchannels, sampwidth, framerate, nframes = params[:4]
# 读取波形数据
# 读取声音数据,传递一个参数指定需要读取的长度(以取样点为单位)
str_data = f.readframes(nframes)
f.close()
# 将波形数据转换成数组
# 需要根据声道数和量化单位,将读取的二进制数据转换为一个可以计算的数组
wave_data = numpy.fromstring(str_data, dtype=numpy.uint8)/255
wave_data.shape = -1
# # wave_data = wave_data.T
# time = numpy.arange(0, nframes) * (1.0 / framerate)
# # len_time = int(len(time) / 2)
# # time = time[0:len_time]
#
# ##print "time length = ",len(time)
# ##print "wave_data[0] length = ",len(wave_data[0])
#
# # 绘制波形
#
# pl.subplot(211)
# pl.plot(time, wave_data)
# # pl.subplot(212)
# # pl.plot(time, wave_data[1], c="r")
# pl.xlabel("time")
# pl.show()
return wave_data
def wav_save(file_name,wave_data):
f = wave.open(file_name, "wb")
# set wav params
f.setnchannels(1)
f.setsampwidth(1)
f.setframerate(Sample_rate)
# turn the data to string
f.writeframes(wave_data.tostring())
f.close()
def filter_wav_test(filename):
if is_real_wav:
X, sample_rate = librosa.load(filename,sr=Sample_rate)
else:
X=wav_open(filename)
sample_rate=Sample_rate
librosa.output.write_wav(filename.replace('.wav', 'save.wav'), X,Sample_rate,norm=False)
nyq_rate = sample_rate / 2.
cutoff_hz = Cutoff_hz
# Length of the filter (number of coefficients, i.e. the filter order + 1)
numtaps = Numtaps
# Use firwin to create a lowpass FIR filter
band=[i/ nyq_rate for i in cutoff_hz ]
fir_coeff = firwin(numtaps, band,pass_zero=False)
w, h = freqz(fir_coeff)
plt.title('Digital filter frequency response')
plt.plot(w*nyq_rate/pi, 20 * np.log10(abs(h)), 'b')
plt.ylabel('Amplitude [dB]', color='b')
# plt.xlabel('Frequency [rad/sample]')
plt.xlabel('Frequency [HZ]')
plt.grid()
plt.axis('tight')
plt.xlim(0, 2000)
plt.show()
filtered_X = lfilter(fir_coeff, 1.0, X)
librosa.output.write_wav(filename.replace('.wav','filt.wav'),filtered_X,sr=sample_rate,norm=True)
D = librosa.stft(X.astype(np.float))
D1 = librosa.stft(filtered_X)
# Use left-aligned frames, instead of centered frames
D_left = librosa.stft(X.astype(np.float), center=False)
# Use a shorter hop length
D_short = librosa.stft(filtered_X, hop_length=64)
# Display a spectrogram
librosa.display.specshow(librosa.amplitude_to_db(librosa.magphase(D)[0],np.max),y_axis='log', x_axis='time')
plt.title('Power spectrogram')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
librosa.display.specshow(librosa.amplitude_to_db(librosa.magphase(D1)[0],np.max),y_axis='log', x_axis='time')
plt.title('Power spectrogram after filted')
plt.colorbar(format='%+2.0f dB')
plt.tight_layout()
plt.show()
def filter_test():
# ------------------------------------------------
# Create a signal for demonstration.
# ------------------------------------------------
# 320 samples of (1000Hz + 15000 Hz) at 48 kHz
sample_rate = 48000.
nsamples = 320
F_1KHz = 1000.
A_1KHz = 1.0
F_15KHz = 15000.
A_15KHz = 0.5
t = arange(nsamples) / sample_rate
signal = A_1KHz * sin(2 * pi * F_1KHz * t) + A_15KHz * sin(2 * pi * F_15KHz * t)
# ------------------------------------------------
# Create a FIR filter and apply it to signal.
# ------------------------------------------------
# The Nyquist rate of the signal.
nyq_rate = sample_rate / 2.
# The cutoff frequency of the filter: 6KHz
cutoff_hz = 6000.0
# Length of the filter (number of coefficients, i.e. the filter order + 1)
numtaps = 29
# Use firwin to create a lowpass FIR filter
fir_coeff = firwin(numtaps, cutoff_hz / nyq_rate)
# Use lfilter to filter the signal with the FIR filter
filtered_signal = lfilter(fir_coeff, 1.0, signal)
# ------------------------------------------------
# Plot the original and filtered signals.
# ------------------------------------------------
# The first N-1 samples are "corrupted" by the initial conditions
warmup = numtaps - 1
# The phase delay of the filtered signal
delay = (warmup / 2) / sample_rate
figure(1)
# Plot the original signal
plot(t, signal,'b')
# Plot the filtered signal, shifted to compensate for the phase delay
plot(t - delay, filtered_signal, 'r-')
# Plot just the "good" part of the filtered signal. The first N-1
# samples are "corrupted" by the initial conditions.
plot(t[warmup:] - delay, filtered_signal[warmup:], 'g', linewidth=4)
grid(True)
figure(2)
plot(t[warmup:] - delay, filtered_signal[warmup:], 'g', linewidth=4)
# figure(3)
plot(t , filtered_signal, 'r-')
show()
print_values('signal', signal)
print_values('fir_coeff', fir_coeff)
print_values('filtered_signal', filtered_signal)
# ------------------------------------------------
# Print values
# ------------------------------------------------
def print_values(label, values):
var = "float32_t %s[%d]" % (label, len(values))
print("%-30s = {%s}" % (var, ', '.join(["%+.10f" % x for x in values])))
#
# def waveletSmooth(x, wavelet="db4", level=1, title=None):
# # calculate the wavelet coefficients
# coeff = pywt.wavedec(x, wavelet, mode="per")
# # calculate a threshold
# sigma = mad(coeff[-level])
# # changing this threshold also changes the behavior,
# # but I have not played with this very much
# uthresh = sigma * np.sqrt(2 * np.log(len(x)))
# coeff[1:] = (pywt.threshold(i, value=uthresh, mode="soft") for i in coeff[1:])
# # reconstruct the signal using the thresholded coefficients
# y = pywt.waverec(coeff, wavelet, mode="per")
# f, ax = plt.subplots()
# plot(x, color="b", alpha=0.5)
# plot(y, color="b")
# if title:
# ax.set_title(title)
# ax.set_xlim((0, len(y)))
if __name__=='__main__':
# filter_wav_test('wav/normal__201105011626.wav')
# filt_dir_all_wav(filt_dir,filt_store_dir)
# X = wav_open('2018-08-11 11_16_19.wav')
# print(librosa.util.normalize([-4.0,0.0 , 1.0]))
filter_wav_test('2018-08-11 21_35_29.wav')
filter_wav_test('normal_0112filt.wav')
# wav_open('2018-08-11 11_16_19.wav')s
filter_wav_test('/home/lechatelia/Desktop/Codes/dataset2/filt/extrastole/extrastole_0001filt.wav')
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,080 | Lechatelia/heart-sound | refs/heads/master | /server_predict.py | import my_inference
import audio_processing
import tensorflow as tf
import mnist_train
import numpy as np
import socket
import time
import my_pridict
from sever import Server
import mysql
test_ckpt_path="model0726/hs_model-52501.meta"
ckpt_path="model0726/hs_model-52501"
predictions_map=['normal','extrahls','artifact','extrastole','murmur']
predict_wav_dir=[]
# saver = tf.train.import_meta_graph('MNIST_model/mnist_model-240001.meta')
# saver.restore(sess, tf.train.latest_checkpoint('MNIST_model/'))
import socket
import time
import threading
import datetime
# import server_predict
#
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
#
# # s.bind(('192.168.1.103', 6666)) # 绑定本机IP和任意端口(>1024)
# s.bind(('127.0.0.1', 6666)) # 绑定本机IP和任意端口(>1024)
#
# s.listen(1) # 监听,等待连接的最大数目为1
#
# print('Server is running...')
class Server():
def __init__(self,ip,port,sess):
self.wav_length=32812
# self.wav_length=10
self.sess=sess
self.s = socket.socket(socket.AF_INET,
socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind(('192.168.1.103', 6666)) # 绑定本机IP和任意端口(>1024)
self.s.bind((ip, port)) # 绑定本机IP和任意端口(>1024)
self.s.listen(1) # 监听,等待连接的最大数目为1
print('Server is running...waitting a connection')
self.sock, self.addr = self.s.accept() # 接收一个新连接
print('Accept new connection from %s:%s.' % self.addr) # 接受新的连接请求
# TCP(sock, addr) # 处理连接
def reset(self,ip,port,sess):
self.s = socket.socket(socket.AF_INET,
socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind(('192.168.1.103', 6666)) # 绑定本机IP和任意端口(>1024)
self.s.bind((ip, port)) # 绑定本机IP和任意端口(>1024)
self.s.listen(1) # 监听,等待连接的最大数目为1
print('Server is reset...waitting a connection')
self.sock, self.addr = self.s.accept() # 接收一个新连接
print('Accept new connection from %s:%s.' % self.addr) # 接受新的连接请求
# TCP(sock, addr) # 处理连接
def receive_wav(self,wav_name):
# Length=32000
Length = self.wav_length
length = 0
counter=0
with open(wav_name, 'wb') as f:
while (length < Length):
# data = self.sock.recv(5000) # 接受其数据
# self.s.settimeout(2)
try:
data = self.sock.recv(1024) # 接受其数据
except socket.timeout:
print("time out")
for i in range(Length-length):
f.write(chr(0x00).encode())
print(str(length ))
print(str(len(data)))
break
except ConnectionResetError:
# self.reset()
self.sock.close() # 关闭连接
print('ConnectionResetError')
print(str(length))
print(str(len(data)))
break
if data:
# if not data or data.decode() == 'quit': # 如果数据为空或者'quit',则退出
# break
print("receiced:\t")
print(len(data))
f.write(data)
length += len(data)
# time.sleep(0.001)
if length == Length:
f.close()
break
else:
# pass
self.sock.send('get'.encode())
print('get')
# if counter%4==0:
# self.sock.send('get'.encode())
# print('get')
# counter=counter+1
# print(data.decode('utf-8'))
# sock.send(data.decode('utf-8').upper().encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
else:
self.sock.close() # 关闭连接
print('Connection from %s:%s closed.' % self.addr)
break
print('transmit ok')
def close(self):
self.s.close()
def event_judge(self,pridict=False):
while(1):
try:
data=self.sock.recv(1024)
except OSError:
print('Connection Error from socket %s:%s .' % self.addr)
self.sock.close()
break
else:
if not data:
print('Connection from %s:%s closed.' % self.addr)
self.sock.close()
break
elif data.decode('utf-8')=='wav_start':
self.sock.send('wav_start'.encode())
print('wav_start')
# receive ID
id =self.sock.recv(1024)
while(id.decode('utf-8')=='wav_start'):
id = self.sock.recv(1024)
self.sock.send(id)
# receive wav
time = str(datetime.datetime.now()).split('.')[0].replace(':','_')
self.receive_wav('{name}.wav'.format(name=time))
if pridict:
pre_pro=predict_wav(self.sess,'{name}.wav'.format(name=time))
# pre_pro=predict_wav(self.sess,"wav/normal__201105011626.wav")
diagnosis=predictions_map[pre_pro.index(max(pre_pro))]
print(diagnosis)
if(max(pre_pro)==1):
possibility=0.9999
else:
possibility=max(pre_pro)
result='result{0}{1:*<4}'.format(str(pre_pro.index(max(pre_pro))+1),str(possibility*10000).split('.')[0])
print(result)
try:
self.sock.send(result.encode())
except OSError:
print('不够就结束啦')
break
print(result)
print(time)
# 在局域网下无法使用
mysql.Add_Diagnosis_to_SQL(int(id),diagnosis,max(pre_pro),wav_dir='{name}.wav'.format(name=time))
if(self.sock.recv(1024).decode('utf-8')=='wav_end'):
self.sock.send('wav_end'.encode())
print('wav end')
else :
print("communication error: wav_end")
elif data.decode('utf-8')=='acquire_info':
print('acquire_info start')
self.sock.send('acquire_info'.encode())
id=int(self.sock.recv(1024))
self.sock.send( mysql.Acquire_Info_by_ID(id).encode())
if(self.sock.recv(1024).decode('utf-8')=='info_end'):
self.sock.send('info_end'.encode())
print('info end')
else :
print("communication error: info_end")
elif data.decode('utf-8')=='get_time':
print('get_time start')
self.sock.send( str(datetime.datetime.now()).encode())
if(self.sock.recv(1024).decode('utf-8')=='time_end'):
self.sock.send('time_end'.encode())
print('time end')
else :
print("communication error: info_end")
elif data.decode('utf-8')=='ID_update':
print('ID update')
idname=mysql.Update_all_name_id_by_str()
self.sock.send(('ID_'+str(len(idname))).encode())
if (self.sock.recv(1024).decode('utf-8') == 'ready'):
self.sock.send(self.strlist_2_one_str(idname).encode())
# for i in idname:
# self.sock.send(i.encode())
else:
print("no ready end")
if(self.sock.recv(1024).decode('utf-8')=='update_end'):
self.sock.send('update_end'.encode())
print('ID update end')
else :
print("communication error: update_end")
else:
print('unknown messsge:\t{mess}'.format(mess=data.decode('utf-8')))
def strlist_2_one_str(self, list):
str = ''
if len(list) == 0:
return 'blank'
for i in list:
str = str + i
return str
def pridict(sess,features):
'''
This function is used to pridict the test data. Please finish pre-precessing in advance
:param test_image_array: 2D numpy array with shape [num_pridict, 577]
:return: the softmax probability with shape [num_pridict, num_labels]
'''
#x = tf.placeholder(tf.float32, [None, my_inference.INPUT_NODE], name='x-input')
# Initialize a new session and restore a checkpoint
#saver = tf.train.Saver(tf.all_variables())
graph=tf.get_default_graph()
x_in = graph.get_operation_by_name('x-input').outputs[0]
y_in = graph.get_operation_by_name('y-input').outputs[0]
keep_in = graph.get_operation_by_name('keep_prob').outputs[0]
predictions=tf.get_collection("predicts")[0]
pre = sess.run(predictions, feed_dict={x_in: features,y_in: (np.array([0,0,0,0,0])).reshape(-1,5),keep_in:1})[0]
return pre
def predict_wav(sess,filename):
features = audio_processing.extract_feature(filename)
features = np.concatenate(features, 0)
# features = guiyi(features).astype('float32')
features = np.reshape(features, [-1, 577])
pro = pridict(sess, features)
return pro.tolist()
# def guiyi(linedata):
# linedata[0:40] = [x - min(linedata[0:40]) for x in linedata[0:40]]
# linedata[0:40] = [x / (max(linedata[0:40]) - min(linedata[0:40])) for x in linedata[0:40]]
# linedata[40:52] = [x - min(linedata[40:52]) for x in linedata[40:52]]
# linedata[40:52] = [x / (max(linedata[40:52]) - min(linedata[40:52])) for x in linedata[40:52]]
# linedata[52:180] = [x - min(linedata[52:180]) for x in linedata[52:180]]
# linedata[52:180] = [x / (max(linedata[52:180]) - min(linedata[52:180])) for x in linedata[52:180]]
# linedata[180:187] = [x - min(linedata[180:187]) for x in linedata[180:187]]
# linedata[180:187] = [x / (max(linedata[180:187]) - min(linedata[180:187])) for x in linedata[180:187]]
# linedata[187:193] = [x - min(linedata[187:193]) for x in linedata[187:193]]
# linedata[187:193] = [x / (max(linedata[187:193]) - min(linedata[187:193])) for x in linedata[187:193]]
# linedata[193:] = [x - min(linedata[193:]) for x in linedata[193:]]
# linedata[193:] = [x / (max(linedata[193:]) - min(linedata[193:])) for x in linedata[193:]]
# return linedata
def predict_wav_list(sess,wav_list):
end=[]
for wav in wav_list:
pre_pro = predict_wav(sess, wav).tolist()
print("hear sound::"+wav+"\tprob:\t"+str(pre_pro))
print("prindictions:\t" + predictions_map[pre_pro.index(max(pre_pro))])
end.append( predictions_map[pre_pro.index(max(pre_pro))])
return end
def TCP(sock, addr,sess): # TCP服务器端处理逻辑
print('Accept new connection from %s:%s.' % addr) # 接受新的连接请求
while True:
data = sock.recv(2048) # 接受其数据
time.sleep(1) # 延迟
if not data or data.decode() == 'quit': # 如果数据为空或者'quit',则退出
break
data=data.decode('utf-8')
print(data)
predict_wav_dir.append(data)
end=predict_wav_list(sess,
[data
])
for pre_end in end:
sock.send(pre_end.encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
# sock.send(data.decode('utf-8').upper().encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
sock.close() # 关闭连接
print('Connection from %s:%s closed.' % addr)
if __name__=='__main__':
with tf.Session() as sess:
saver = tf.train.import_meta_graph(test_ckpt_path)
saver.restore(sess, ckpt_path)
print('Model restored from: '+test_ckpt_path)
# s = socket.socket(socket.AF_INET,
# socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
# s.bind(('127.0.0.1', 6666)) # 绑定本机IP和任意端口(>1024)
# s.listen(1) # 监听,等待连接的最大数目为1
# print('Server is running...')
#
# while True:
# sock, addr = s.accept() # 接收一个新连接
# TCP(sock, addr,sess) # 处理连接
while True:
my_server = Server('192.168.1.102', 6666,sess)
# my_server = Server('localhost', 6666,sess)
my_server.event_judge(pridict=True)
print('you can now disconnect connection ')
my_server.close()
time.sleep(1)
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,081 | Lechatelia/heart-sound | refs/heads/master | /audio_processing.py | # Beat tracking example
from __future__ import print_function
import os
import glob
import ntpath
import numpy as np
import librosa
import matplotlib.pyplot as plt
import librosa.display
import os
import xlwt
import csv
from scipy.signal import lfilter, firwin,freqz
from filter import is_real_wav
import filter
txt_name_for_chongfu='./chongfu1.txt'
is_filter=True
Cutoff_hz = 1000.0
Cutoff_hz = [10.0,1000.0]
# Cutoff_hz = 1000
Numtaps = 499
Sample_rate=16000
def extract_feature(file_name,outfile=None):
min_data =32000
# try:
# print(file_name)
# X, sample_rate = librosa.load(file_name)
# except :
# else:
if is_real_wav:
X, sample_rate = librosa.load(file_name,sr=Sample_rate)
else:
X=filter.wav_open(file_name)
sample_rate=Sample_rate
# X, sample_rate1 = librosa.load(file_name, offset=8.1)
# if len(X) >= min_data:
# offset = int(np.random.randint(0, high=len(X) - min_data))
# X = X[offset:offset + min_data]
# if(outfile!=None):
# # print("!!!{file_name}".format(file_name=file_name))
# outfile.write(file_name+'\n')
#是否滤波
if is_filter:
nyq_rate = sample_rate / 2.
band = [i / nyq_rate for i in Cutoff_hz]
fir_coeff = firwin(Numtaps, band, pass_zero=False)
# fir_coeff = firwin(Numtaps, Cutoff_hz / nyq_rate)
X = lfilter(fir_coeff, 1.0, X)
# else:
# pad = (int((min_data - len(X)) / 2), min_data - len(X) - int((min_data - len(X)) / 2))
# X = np.pad(X, pad_width=pad, mode='constant', constant_values=0)
stft = np.abs(librosa.stft(X))
mfccs = librosa.util.normalize(np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0))
chroma = librosa.util.normalize(np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0))
mel = librosa.util.normalize(np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0))
contrast = librosa.util.normalize(np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T, axis=0))
tonnetz = librosa.util.normalize(np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T, axis=0))
# Compute local onset autocorrelation
# # hop_length = 512
oenv = librosa.onset.onset_strength(y=X, sr=sample_rate)
tempogram = librosa.util.normalize(np.mean(librosa.feature.tempogram(onset_envelope=oenv, sr=sample_rate).T, axis=0))
# # Compute global onset autocorrelation
# ac_global = librosa.autocorrelate(oenv, max_size=tempogram.shape[0])
# ac_global = librosa.util.normalize(ac_global)
#
# rmse = np.mean(librosa.feature.rmse(y=X).T, axis=0)
# cent = np.mean(librosa.feature.spectral_centroid(y=X, sr=sample_rate).T, axis=0)
# spec_bw = np.mean(librosa.feature.spectral_bandwidth(y=X, sr=sample_rate).T, axis=0)
# print("tempogram:%d, ac_global:%d, rmse:%d, stft:%d, mel:%d" % (len(tempogram), len(ac_global), len(rmse), len(stft), len(mel)))
# print(rmse)
# print(cent)
# print(spec_bw)
#
# onset_env = librosa.onset.onset_strength(y=X, sr=sample_rate,
# hop_length=512,
# aggregate=np.median)
# peaks = librosa.util.peak_pick(onset_env, 3, 3, 3, 5, 0.5, 10)
# print(stft)
# print(tonnetz)
# print(peaks)
return mfccs, chroma, mel, contrast, tonnetz, tempogram
def extract_feature_2D(file_name):
X, sample_rate = librosa.load(file_name)
stft = np.abs(librosa.stft(X))
# mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
# chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr=sample_rate).T, axis=0)
# mel = np.mean(librosa.feature.melspectrogram(X, sr=sample_rate).T, axis=0)
# contrast = np.mean(librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T, axis=0)
# tonnetz = np.mean(librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T, axis=0)
# oenv = librosa.onset.onset_strength(y=X, sr=sample_rate)
# tempogram = np.mean(librosa.feature.tempogram(onset_envelope=oenv, sr=sample_rate).T, axis=0)
mfccs = librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T
chroma = librosa.feature.chroma_stft(S=stft, sr=sample_rate).T
mel = librosa.feature.melspectrogram(X, sr=sample_rate).T
contrast = librosa.feature.spectral_contrast(S=stft, sr=sample_rate).T
tonnetz = librosa.feature.tonnetz(y=librosa.effects.harmonic(X), sr=sample_rate).T
oenv = librosa.onset.onset_strength(y=X, sr=sample_rate)
tempogram = librosa.feature.tempogram(onset_envelope=oenv, sr=sample_rate).T
print(str(mfccs.shape))
print(str(chroma.shape))
print(str(mel.shape))
print(str(contrast.shape))
print(str(tonnetz.shape))
print(str(tempogram.shape))
return mfccs, chroma, mel, contrast, tonnetz, tempogram
def parse_audio_files(parent_dir, sub_dirs, file_ext='*.wav'):
labels_map = ['artifact', 'extrahls', 'normal', 'murmur', 'extrastole', 'Aunlabelledtest', 'Bunlabelledtest']
# print(enumerate(sub_dirs))
# filenames, folders = []
saveft = []
header = []
features, labels = np.empty((0, 580)), np.empty(0)
for sub_dir in sub_dirs: # enumerate(sub_dirs):
for fn in glob.glob(os.path.join(parent_dir, sub_dir, file_ext)):
file_name = ntpath.basename(fn)
file_attrs = file_name.split("_")
xi_class = file_attrs[0].strip()
# if xi_class != "Bunlabelledtest":
print('Processing file: %s'.ljust(30) % file_name, end='\r')
mfccs, chroma, mel, contrast, tonnetz, tempogram = extract_feature(fn)
print(mfccs.shape)
print(mel.shape)
print(chroma.shape)
print(tonnetz.shape)
print(contrast.shape)
print(tempogram.shape)
# print(ac_global.shape)
ext_features = np.hstack([file_name, sub_dir, xi_class, mfccs, chroma, mel, contrast, tonnetz, tempogram])
header = ['filename', 'folder', 'label']
header.extend(['mfcc'] * len(mfccs))
header.extend(['chroma'] * len(chroma))
header.extend(['mel'] * len(mel))
header.extend(['contrast'] * len(contrast))
header.extend(['tonnetz'] * len(tonnetz))
header.extend(['tempogram'] * len(tempogram))
# header.extend(['ac_global']*len(ac_global))
tmp = [file_name, sub_dir, xi_class]
tmp.extend(mfccs)
tmp.extend(chroma)
tmp.extend(mel)
tmp.extend(contrast)
tmp.extend(tonnetz)
tmp.extend(tempogram)
# tmp.extend(ac_global)
# print(len(ext_features))
features = np.vstack([features, ext_features])
# print(labels_map[file_attrs[0].strip()])
labels = np.append(labels, labels_map.index(xi_class))
# filenames.append(file_name)
# folders.append(sub_dir)
saveft.append(tmp)
# exit(-1)
# print(mfccs.tolist())
# with open('some.csv', 'w') as f:
# writer = csv.writer(f)
# writer.writerow(mfccs.tolist())
# writer.writerow(chroma.tolist())
# writer.writerow(mel.tolist())
saveft.insert(0, header)
return np.array(features), np.array(labels, dtype=np.int), saveft
def one_hot_encode(labels):
n_labels = len(labels)
n_unique_labels = len(np.unique(labels))
one_hot_encode = np.zeros((n_labels, n_unique_labels))
one_hot_encode[np.arange(n_labels), labels] = 1
return one_hot_encode
def pre_precessing():
# 1. Get the file path to the included audio example
# filename = librosa.util.example_audio_file()
# labels_map = ['artifact', 'normal', 'murmur', 'extrahls'] #setb labels
# labels_map = ['normal', 'murmur', 'extrastole'] #setb labels
labels_map = ['artifact', 'extrahls', 'normal', 'murmur', 'extrastole', 'Aunlabelledtest', 'Bunlabelledtest'] # all
# labels_map = ['artifact', 'normal', 'murmur', 'extrastole']#{'artifact':0, 'murmur':1}
filename = "fea4.wav" # ""data/set_a/murmur__201108222221.wav"
data = {}
# 2. Load the audio as a waveform `y`
# Store the sampling rate as `sr`
y, sr = librosa.load(filename)
# # 3. Run the default beat tracker
tempo, beat_frames = librosa.beat.beat_track(y=y, sr=sr)
# #data['chroma_stft'] = librosa.feature.chroma_stft(y=y, sr=sr)
data['beat_track'] = librosa.frames_to_time(beat_frames, sr=sr)
beat_times = librosa.frames_to_time(beat_frames, sr=sr)
print("beat_times" + str(beat_times))
parent_dir = 'data'
sub_dirs = ['set_a', 'set_b']
features, labels, saveft = parse_audio_files(parent_dir, sub_dirs)
print(labels)
print(features)
print(saveft)
labels = one_hot_encode(labels)
print(labels)
print('Processing file: %s', filename, end='\r')
mfccs, chroma, mel, contrast, tonnetz, tempogram = extract_feature(filename)
print(mfccs.shape)
print(mel.shape)
print(chroma.shape)
print(tonnetz.shape)
print(contrast.shape)
print(tempogram.shape)
# np.savetxt("features.csv", features, delimiter=",")
# np.savetxt("labels.csv", labels, delimiter=",")
print(len(saveft))
import csv
with open('fea3.csv', 'w') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerows(saveft)
def write_features_into_excel(dir,write_txt=True,first=False):
write_data=[]
out_file = open(txt_name_for_chongfu, 'a+')
# out_file.close()
if first:
mode='w'
else :
mode='a+'
with open('write.csv', mode, newline='') as csv_file:
# with open('write.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
for file in os.listdir(dir):
# if os.path.isfile(file):
if file.split('.')[-1] == 'wav':
features= extract_feature(dir + file,out_file)
features = np.concatenate(features, 0)
# print(file.split('_')[0])
write_data[0:3]=[file, '', file.split('_')[0]]
write_data[3:]=features
csv_writer.writerow(write_data) # 其中的'0-行, 0-列'指定表中的单元,'EnglishName'是向该单元写入的内容
def write_features_intxt_into_excel(dir):
write_data=[]
i=0
with open('write.csv', 'a+', newline='') as csv_file:
# with open('write.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file)
with open(dir, 'r') as file_to_read: #路径加txt的文件名
while True:
lines = file_to_read.readline().strip('\n') # 整行读取数据
if not lines:
break
features = extract_feature(lines)
features = np.concatenate(features, 0)
# print(lines.split('/')[1].split('_')[0])
write_data[0:3] = [lines.split('/')[1], '', lines.split('/')[1].split('_')[0]]
write_data[3:] = features
csv_writer.writerow(write_data)
i+=1
print("txt2csv number:\t{num}".format(num=i))
if __name__ == '__main__':
# extract_feature('dataset/artifact__201012172012.wav')
dir=['../dataset3/extrastole/','../dataset3/murmur/','../dataset3/artifact/','../dataset3/normal/','../dataset3/extrahls/']
# num=[13,4,13,3,28]
num=[1,1,1,1,1]
# num=[1,1,1,1,1]
for i in range(len(dir)):
for j in range(num[i]):
print("i: {inum},\tj:{jnum}".format(inum=i,jnum=j))
write_features_into_excel(dir[i],write_txt=False,first=((i+j)==0))
# write_features_into_excel('dataset/')
# write_features_intxt_into_excel(txt_name_for_chongfu)
| {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,082 | Lechatelia/heart-sound | refs/heads/master | /client.py | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
from socket import *
HOST = '192.168.1.102'
# HOST = 'localhost'
PORT = 6666
BUFFSIZE = 2048
ADDR = (HOST, PORT)
wav_name=["wav/normal__201105011626.wav"]
message1='update\r\n45454\r\nzhujinguo\r\n1997-03-27\r\n1\r\n13772052853'
message2='use_list_get'
message3='use_diagnosis_get\r\n1564221354'
message3='use_diagnosis_get\r\n123'
if __name__=='__main__':
tctimeClient = socket(AF_INET, SOCK_STREAM)
tctimeClient.connect(ADDR)
print('connect to %s:%s sucessfully!' % ADDR)
while True:
print("输入发送数据")
try:
data = input(">")
except KeyboardInterrupt:
print('sock.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, 1 )over')
tctimeClient.close()
break
else:
if not data:
break
# if data=='send':
elif data=='mess1':
tctimeClient.send(message1.encode())
print('receive:\t{num}'.format(num=tctimeClient.recv(BUFFSIZE).decode()))
elif data=='mess2':
tctimeClient.send(message2.encode())
print('receive:\t{num}'.format(num=tctimeClient.recv(BUFFSIZE).decode()))
elif data=='mess3':
tctimeClient.send(message3.encode())
print('receive:\t{num}'.format(num=tctimeClient.recv(BUFFSIZE).decode()))
else:
tctimeClient.send(data.encode())
print('receive:\t{num}'.format(num=tctimeClient.recv(BUFFSIZE).decode()))
# tctimeClient.send(data.encode())
# predict = tctimeClient.recv(BUFFSIZE).decode()
# if not data:
# break
# print(predict)
tctimeClient.close() | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,083 | Lechatelia/heart-sound | refs/heads/master | /ceshi.py | import numpy as np
import tensorflow as tf
import os
import xlwt
dir='dataset/'
import csv
import audio_processing
from skimage import io
import matplotlib.pyplot as plt
import time as clock
import datetime
import librosa
# out_file = open('./chongfu.txt', 'a+')
# out_file.write("jsoasladasass\n")
# a=[32 ,8]
# b=[i/2 for i in a ]
# print(b)
# print(str(32%4))
print(librosa.util.normalize([1.,2.,3.]))
#
# with open('test.wav', 'wb') as f:
# for i in range(100):
# f.write(chr(0x00).encode())
# f.close()
#Python3.4以后的新方式,解决空行问题
#
# pre_pro=[0.1,0.2,0.4,0.3]
# result='result{0}{1:*<4}'.format(str(pre_pro.index(max(pre_pro))),str(max(pre_pro)*10000).split('.')[0])
# print(result)
#
# a='98343*****peterdfds**'
# print(a[0:10].strip('*'))
# print(a[10:].strip('*'))
#
#
# time =str(datetime.datetime.now()).split('.')[0]
# print(str(time))
# clock.sleep(0.1)
# time =str(datetime.datetime.now()).split('.')[0]
# print(str(time))
# img=io.imread('123.jpg')
# plt.subplot(2,2,1)
# plt.subplot(2, 2, 1) # 将窗口分为两行两列四个子图,则可显示四幅图片
# plt.title('origin image') # 第一幅图片标题
# plt.imshow(img) # 绘制第一幅图片
#
# print(img.shape)
# print(img[:, :, 0].shape)
#
#
#
# plt.subplot(2, 2, 2) # 第二个子图
# plt.title('R channel') # 第二幅图片标题
# plt.imshow(img[:, :, 0], plt.cm.gray) # 绘制第二幅图片,且为灰度图
# plt.axis('off') # 不显示坐标尺寸
#
# plt.subplot(2, 2, 3) # 第三个子图
# plt.title('G channel') # 第三幅图片标题
# plt.imshow(img[:, :, 1], plt.cm.gray) # 绘制第三幅图片,且为灰度图
# plt.axis('off') # 不显示坐标尺寸
#
# plt.subplot(2, 2, 4) # 第四个子图
# plt.title('B channel') # 第四幅图片标题
# plt.imshow(img[:, :, 2], plt.cm.gray) # 绘制第四幅图片,且为灰度图
# plt.axis('off') # 不显示坐标尺寸
#
#
# plt.savefig('hah.png')
# plt.show()
# i=0
# with open('write.csv', 'w', newline='') as csv_file:
# csv_writer = csv.writer(csv_file)
# for file in os.listdir(dir):
# # if os.path.isfile(file):
# if file.split('.')[-1]=='wav':
# print(file.split('_')[0])
# i=i+1
# csv_writer.writerow([file,'',file.split('_')[0]]) # 其中的'0-行, 0-列'指定表中的单元,'EnglishName'是向该单元写入的内容
'''
array_full=['aaa','bbb','ccc']
array_full=np.array(array_full)
print(array_full[1])
print(type(array_full))
print(array_full.shape)
a=[2]
#print(type(a))
#print(float(a[0]))
t1 = [[1, 2, 3], [4, 5, 6]]
t2 = [[7, 8, 9], [10, 11, 12]]
print(tf.concat( [t1, t2],0) )# [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
print(tf.concat( [t1, t2],1))# [[1, 2, 3, 7, 8, 9], [4, 5, 6, 10, 11, 12]]
arr=np.arange(0,20)
arr=np.reshape(arr,(4,5))
print(str(arr.shape))
print(arr)
print(arr[:,3:4])
num=20
a=range(0,num)
b=range(num,2*num)
a=np.array(a)
b=np.array(b)
perm0 = np.arange(num)
np.random.shuffle(perm0)
a = a[perm0]
b= b[perm0]
print(perm0)
print(a)
print(b)
print(np.concatenate((a,b),0))
''' | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,084 | Lechatelia/heart-sound | refs/heads/master | /sever.py | import socket
import time
import threading
import datetime
import mysql
# import server_predict
#
# s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
#
# # s.bind(('192.168.1.103', 6666)) # 绑定本机IP和任意端口(>1024)
# s.bind(('127.0.0.1', 6666)) # 绑定本机IP和任意端口(>1024)
#
# s.listen(1) # 监听,等待连接的最大数目为1
#
# print('Server is running...')
class Server():
def __init__(self,ip,port,sess=None):
self.wav_length=32812
# self.wav_length=10
self.sess=sess
self.s = socket.socket(socket.AF_INET,
socket.SOCK_STREAM) # 创建socket (AF_INET:IPv4, AF_INET6:IPv6) (SOCK_STREAM:面向流的TCP协议)
self.s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
# s.bind(('192.168.1.103', 6666)) # 绑定本机IP和任意端口(>1024)
self.s.bind((ip, port)) # 绑定本机IP和任意端口(>1024)
self.s.listen(1) # 监听,等待连接的最大数目为1
print('Server is running...waitting a connection')
self.sock, self.addr = self.s.accept() # 接收一个新连接
print('Accept new connection from %s:%s.' % self.addr) # 接受新的连接请求
# TCP(sock, addr) # 处理连接
def receive_wav(self,wav_name):
# Length=32000
Length = self.wav_length
length = 0
with open(wav_name, 'wb') as f:
while (length < Length):
data = self.sock.recv(5000) # 接受其数据
if data:
# if not data or data.decode() == 'quit': # 如果数据为空或者'quit',则退出
# break
print("receiced:\t")
print(len(data))
f.write(data)
length += len(data)
if length == Length:
f.close()
break
# print(data.decode('utf-8'))
# sock.send(data.decode('utf-8').upper().encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
else:
self.sock.close() # 关闭连接
print('Connection from %s:%s closed.' % self.addr)
break
print('transmit ok')
def test_loop(self):
while (1):
try:
data = self.sock.recv(1024)
except OSError:
print('Connection Error from socket %s:%s .' % self.addr)
self.sock.close()
break
else:
if not data:
print('Connection from %s:%s closed.' % self.addr)
self.sock.close()
break
else :
print(data.decode('utf-8'))
self.sock.send(data)
def close(self):
self.s.close()
def strlist_2_one_str(self,list):
str=''
if len(list)==0:
return 'blank'
for i in list:
str=str+i
return str
#
def event_judge(self,pridict=False):
while(1):
try:
data=self.sock.recv(1024)
except OSError:
print('Connection Error from socket %s:%s .' % self.addr)
self.sock.close()
break
else:
if not data:
print('Connection from %s:%s closed.' % self.addr)
self.sock.close()
break
else:
data_list=data.decode('utf-8').split('\r\n')
print(data_list)
if data_list[0] == 'update':
if len(data_list)==6:
mysql.Add_info_to_SQL_no_id(data_list[2],data_list[3],int(data_list[4]),tel=int(data_list[5]))
# mysql.Add_info_to_SQL_no_id('zhujin','1997-03-27',1,tel=13772052853)
self.sock.send('over'.encode())
else:
print(data_list)
print('update information error from APP')
elif data_list[0]=='use_list_get':
info_list=mysql.get_all_info()
# for i in range(len(info_list)):
# self.sock.send(info_list[i].encode())
self.sock.send(self.strlist_2_one_str(info_list).encode())
elif data_list[0]=='use_diagnosis_get':
results=mysql.get_diagnosis_by_id(int(data_list[1]))
if len(results)==0:
results.append('blank')
print(len(results[0]))
# for i in results:
# self.sock.send(i.encode())
self.sock.send(self.strlist_2_one_str(results).encode())
# print(self.strlist_2_one_str(results))
else:
print('unknown messsge:\t{mess}'.format(mess=data.decode('utf-8')))
def TCP(sock, addr): # TCP服务器端处理逻辑
ready = True
while ready:
data = sock.recv(3000) # 接受其数据
if not len(data)==0:
time.sleep(1) # 延迟
# if not data or data.decode() == 'quit': # 如果数据为空或者'quit',则退出
# break
print("receiced:\t")
print(data)
# with open('test.wav','wb') as f :
# f.write(data)
# f.close()
# print(data.decode('utf-8'))
# sock.send(data.decode('utf-8').upper().encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
else:
sock.close() # 关闭连接
print('Connection from %s:%s closed.' % addr)
ready=False
def receive_wav(sock, addr): # TCP服务器端处理逻辑
# Length=32000
Length=10
length=0
with open('test.wav', 'wb') as f:
while (length<Length):
data = sock.recv(5000) # 接受其数据
if data:
# if not data or data.decode() == 'quit': # 如果数据为空或者'quit',则退出
# break
print("receiced:\t")
print(len(data))
f.write(data)
length+=len(data)
if length==Length:
f.close()
break
# print(data.decode('utf-8'))
# sock.send(data.decode('utf-8').upper().encode()) # 发送变成大写后的数据,需先解码,再按utf-8编码, encode()其实就是encode('utf-8')
else:
sock.close() # 关闭连接
print('Connection from %s:%s closed.' % addr)
break
print('transmit ok')
if __name__ == '__main__':
while True:
# my_server=Server('localhost', 7777)
my_server=Server('192.168.1.102', 7777)
# my_server.test_loop()
my_server.event_judge()
print('you can now disconnect connection ')
my_server.close()
time.sleep(0.1) | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,085 | Lechatelia/heart-sound | refs/heads/master | /rename.py | import xml.etree.ElementTree as ET
import pickle
import os
from os import listdir, getcwd
from os.path import join
#此脚本用于Windows下文件夹中只存在图片时批量更名,若需在ubuntu下使用,只需将文件符\\换成/即可 共三处
def rename():
relative_dir = input('intput relative_dir:\n')
n = int(input('intput the first number:\n')) # 起始编码数字,该数
profix = relative_dir
wd = getcwd()
for root,dirs,files in os.walk('%s/%s'%(wd,relative_dir)): #
for file in files:
newname=relative_dir+'/'+profix+'_'+str("%04d" %(n))+'.wav' #文件命名格式 #2
#用os模块中的rename方法对文件改名
os.rename('%s/%s'%(relative_dir,file),newname) #3
n+=1
def rename_old():
n=12 #起始编码数字,该数并不是被命名
#relative_dir='pic2' #图片存储相对路径相对路径
relative_dir=input('intput relative_dir:\n') #图片存储相对路径相对路径
n=int(input('intput the first number:\n')) #起始编码数字,该数
wd=getcwd()
for root,dirs,files in os.walk('%s/%s'%(wd,relative_dir)): #1
for file in files:
newname=relative_dir+'/Pic'+str("%04d" %(n))+'.JPG' #文件命名格式 #2
#用os模块中的rename方法对文件改名
os.rename('%s/%s'%(relative_dir,file),newname) #3
n+=1
if __name__=='__main__':
rename() | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,086 | Lechatelia/heart-sound | refs/heads/master | /multi_threading.py | import threading
# coding:utf-8
import threading
import time
def action(arg):
time.sleep(1)
print ('sub thread start!the thread name is:%s\r' % threading.currentThread().getName())
print ('the arg is:%s\r' %arg)
time.sleep(1)
for i in range(4):
t =threading.Thread(target=action,args=(i,))
t.start()
print ('main_thread end!') | {"/my_train.py": ["/my_inference.py", "/audio_processing.py"], "/my_pridict.py": ["/my_inference.py", "/audio_processing.py", "/my_train.py"], "/server_predict.py": ["/my_inference.py", "/audio_processing.py", "/my_pridict.py", "/sever.py", "/mysql.py"], "/audio_processing.py": ["/filter.py"], "/ceshi.py": ["/audio_processing.py"], "/sever.py": ["/mysql.py"]} |
51,105 | sjaishanker/Cryptanalysis-Using-Deep-Neural-Networks | refs/heads/master | /FileHandle.py | from Des import DES_encrypt
import os
def textToBits(text, encoding='utf-8', errors='surrogatepass'):
'''takes text as input and returns bits'''
bits = bin(int.from_bytes(text.encode(encoding, errors), 'big'))[2:]
return bits.zfill(8 * ((len(bits) + 7) // 8))
def textFromBits(bits, encoding='utf-8', errors='surrogatepass'):
'''takes bits as input and returns text'''
n = int(bits, 2)
return n.to_bytes((n.bit_length() + 7) // 8, 'big').decode(encoding, errors) or '\0'
def writeToFile(filename, message):
'''write message to given file location'''
if os.path.exists(filename):
append_write = 'a'
else:
append_write = 'w'
f = open(filename, append_write)
f.write(message)
f.close()
def readFromFile(file_location):
''' Takes in file location as input and returns list of 8 character slices'''
batches = []
with open(file_location, "r") as f:
counter = 1
batch = ""
while True:
byte = f.read(1)
if not byte:
if counter == 8 or counter == 0:
break
else:
for x in range(7 - counter):
batch += " "
break
else:
if counter == 8:
batch += byte
counter = 1
batches.append(batch)
batch = ""
else:
batch += byte
counter += 1
return batches
def decToBin(x):
"""Converts Decimal to Binary"""
return int(bin(x)[2:])
def fileLength(fname):
"""Returns Length of file"""
i = -1
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def makeDataset(key, input_file):
'''makes dataset from given files and key'''
batches = readFromFile(input_file)
plain_text = []
cipher_text = []
for x in range(len(batches)):
bits = textToBits(batches[x])
temp = [ord(c) for c in batches[x]]
plain_text.append(temp)
encrypted = DES_encrypt(bits,key)
temp = []
for y in range(len(encrypted)):
if encrypted[y] == '0':
temp.append(0)
else:
temp.append(1)
cipher_text.append(temp)
return plain_text, cipher_text
| {"/Gui.py": ["/utils.py"]} |
51,106 | sjaishanker/Cryptanalysis-Using-Deep-Neural-Networks | refs/heads/master | /utils.py | import random
from keras.models import load_model
import numpy as np
def makePrediction(tokens):
"""Takes in the tokens generated and returns the output string"""
output = ""
model = load_model('model.h5')
for x in tokens:
temp = []
for i in range(64):
if x[i] == '1':
temp.append(1)
else:
temp.append(0)
array = np.array([temp])
prediction = model.predict(array)
for i in range(8):
temp = prediction[0][128 * i:(128 * i) + 128].tolist()
index = np.argmax(temp)
if index%128 < 31:
index = random.randint(48,122)
output += chr(index+1)
else:
output += chr((index%128) + 1)
return output | {"/Gui.py": ["/utils.py"]} |
51,107 | sjaishanker/Cryptanalysis-Using-Deep-Neural-Networks | refs/heads/master | /Gui.py | from utils import makePrediction
import re
import os
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk, Gdk
class MainWindow(Gtk.Window):
"""
Class for the main window of the GUI. Uses the file glade_file.glade to build the UI.
"""
encryptedFilePath = ""
decryptedFilePath = ""
def __init__(self):
"""
Create all the GUI elements from the glade file on initialization.
"""
self.gladeFile = "res/glade_file.glade"
self.builder = Gtk.Builder()
self.builder.add_from_file(self.gladeFile)
self.builder.connect_signals(self)
self.window = self.builder.get_object("window1")
self.window.set_size_request(550, 500)
self.window.connect("destroy", Gtk.main_quit)
self.header = Gtk.HeaderBar()
self.header.props.title = "Integrated Approach for Cryptology using Deep Neural Network"
self.header.set_show_close_button(True)
self.decryptButton = self.builder.get_object("decrypt")
self.decryptButton.connect("clicked", self.decrypt)
self.encryptedText = self.builder.get_object("encrypted_text")
self.encryptedTextBuffer = self.encryptedText.get_buffer()
self.decryptedText = self.builder.get_object("decrypted_text")
self.decryptedTextBuffer = self.decryptedText.get_buffer()
self.window.set_titlebar(self.header)
self.window.show_all()
def decrypt(self, widget):
"""
Called on clicking the "Decrypt" button. Performs decryption of the encrypted text.
Displays the output to the decrypted text window and if a text file is selected,
stores the data to the text file.
"""
if self.encryptedTextBuffer == "":
dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Please write the encrypted data to decrypt or select file!")
dialog.run()
dialog.destroy()
else:
start_iter = self.encryptedTextBuffer.get_start_iter()
end_iter = self.encryptedTextBuffer.get_end_iter()
text = self.encryptedTextBuffer.get_text(start_iter, end_iter, True)
tokens = text.split('\n')
output = makePrediction(tokens[:-1])
self.decryptedTextBuffer.set_text(output)
if self.decryptedFilePath != "":
with open(self.decryptedFilePath,"w") as f:
f.write(output)
def encryptedFileSelect(self, widget):
"""
Called on selecting the encrypted file to decrypt. Reads the file and displays the
text to the encrypted text window.
:param widget: The current widget object
"""
self.encryptedFilePath = widget.get_filename()
if not re.search('.txt$', self.encryptedFilePath): #Check to ensure that the input file is of type .txt
dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Please select a \"Text\" file!")
dialog.run()
dialog.destroy()
self.encryptedFilePath = ""
widget.set_filename("None")
else:
with open(self.encryptedFilePath) as file:
data = file.read()
self.encryptedTextBuffer.set_text(data)
def decryptedFileSelect(self, widget):
"""
Called on selecting the file to decrypt to. Stores the path of the file.
:param widget: The current widget object
"""
self.decryptedFilePath = widget.get_filename()
if not re.search('.txt$', self.decryptedFilePath): #Ensure that the selected file is a text file.
dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.INFO, Gtk.ButtonsType.OK, "ERROR")
dialog.format_secondary_text("Please select a \"Text\" file!")
dialog.run()
dialog.destroy()
self.decryptedFilePath = ""
widget.set_filename("None")
elif not os.stat(self.decryptedFilePath).st_size == 0: #Handle case when selected file is not empty.
dialog = Gtk.MessageDialog(None, 0, Gtk.MessageType.QUESTION, Gtk.ButtonsType.YES_NO, "WARNING")
dialog.format_secondary_text("The file where the decrypted data is to be written is not empty."
"\nAre you sure you want to continue?")
response = dialog.run()
if response == Gtk.ResponseType.NO:
dialog1 = Gtk.FileChooserDialog("Please choose a new file", None,
Gtk.FileChooserAction.OPEN, (Gtk.STOCK_OPEN, Gtk.ResponseType.OK))
response1 = dialog1.run()
if response1 == Gtk.ResponseType.OK:
self.decryptedFilePath = dialog1.get_filename()
self.decrypted_file_file_set_cb(self, widget)
dialog1.destroy()
dialog.destroy()
window = MainWindow()
Gtk.main()
| {"/Gui.py": ["/utils.py"]} |
51,109 | albertsennk/pokeball-hunt | refs/heads/master | /tile.py | """
Noah Albertsen
3/31/2017
Reused code from Project 2, modified with the new tile types
"""
class Tile(object):
global tileTypes
# these variables are the paths for the tile images
grass = "grassTile.png"
wave = "waterTile.png"
mountain = "boulder.png"
sand = "grassTile.png"
coin = "treasureTile.png"
gold = "treasureTile.png"
tileTypes = [grass, wave, mountain, sand, coin, gold]
def __init__(self, type, r, c):
# setting the image based on the type passed to it
if (type == 'g'):
self.img = loadImage(tileTypes[0])
self.passable = True
self.collectable = False
elif (type == 'w'):
self.img = loadImage(tileTypes[1])
self.passable = False
self.collectable = False
elif (type == 'm'):
self.img = loadImage(tileTypes[2])
self.passable = False
self.collectable = False
elif (type == 's'):
self.img = loadImage(tileTypes[3])
self.passable = True
self.collectable = False
elif (type == 'c'):
self.img = loadImage(tileTypes[4])
self.passable = True
self.collectable = True
elif (type == 'e'):
self.img = loadImage(tileTypes[5])
self.passable = True
self.collectable = True
self.type = type
self.tSize = 48
self.r = r
self.c = c
# displays the tile at the appropriate row and column
# c and r are multiplied by tSize to get correct pixel coordinates
def display(self):
image(self.img, self.c * self.tSize, self.r * self.tSize)
# returns the pixel size of each tile (ie one edge is tSize pixels long)
def getSize(self):
return self.tSize
# returns boolean, whether or not the tile is passable
# ie a wave/mountain is not passable, but grass and sand are
def isPassable(self):
return self.passable
# returns the row of the tile (y value)
def getR(self):
return self.r
# returns the column of the tile (x value)
def getC(self):
return self.c
# returns the x value of the right edge
def getRight(self):
return self.c * self.tSize + self.tSize
# returns the x value of the left edge
def getLeft(self):
return self.c * self.tSize
# returns the y value of the top edge
def getTop(self):
return self.r * self.tSize
# returns the y value of the bottom edge
def getBottom(self):
return self.r * self.tSize + self.tSize
# returns boolean, whether or not the tile is collectable
# ie grass/sand are not collectable, but coin and gold are
def isCollectable(self):
return self.collectable
# returns the original type if the tile does not have a collectable, but if it does,
# returns the original type: grass for coins and sand for gold
def getEmptyType(self):
# these are already the empty types, so returns the original type
if (self.type == 'm' or self.type == 'w' or self.type == 'g' or self.type == 's'):
return self.type
# for coins, the empty type is grass
elif (self.type == 'c'):
return 'g'
# for the gold bars, the empty type is sand
elif (self.type == 'e'):
return 's'
# to string method, used for testing purposes
def __str__(self):
str = "Tile type: {}\nRow: {!s} Col: {!s}".format(self.type, self.r, self.c)
return str | {"/mapClass.py": ["/tile.py"]} |
51,110 | albertsennk/pokeball-hunt | refs/heads/master | /monster.py | """
Noah Albertsen, modifying examples from class
"""
# importing the random class
import random
# constants for the different chase states for the monster
WANDER = 0
CHASE = 1
# global constants to defifne the current direction of travel
MOVE_NONE = 0
MOVE_LEFT = 1
MOVE_RIGHT = 2
MOVE_UP = 3
MOVE_DOWN = 4
# a global constant for the minimum distance the player has to be from
# the moster for it to enter the CHASE state
CHASE_DISTANCE = 6
class Monster:
def __init__(self, tileMap, player):
self.row = 1
self.col = 1
self.state = WANDER
self.possibleMoves = self.getPassableMoves(tileMap, self.row, self.col)
self.direction = self.chooseWanderDir(self.possibleMoves)
self.tileMap = tileMap
self.img = loadImage("teamRocketSprite.png")
self.size = 48
self.chaseDist = 6
self.player = player
# given a row and column, returns a list of the possible passable
# adjacent tiles, i.e. [MOVE_LEFT, MOVE_DOWN2]. returns an empty list
# if there are no valid moves
def getPassableMoves(self, tileMap, row, col):
# creating the empty list for the possible moves
passableDirList = []
# checking if up is good to go
# print(tileMap.getTileAt(row, col))
if (tileMap.getTileAt(row - 1, col) != -1 and
tileMap.getTileAt(row - 1, col).isPassable()):
passableDirList.append(MOVE_UP)
# checking if down is good to go
if (tileMap.getTileAt(row + 1, col) != -1 and
tileMap.getTileAt(row + 1, col).isPassable()):
passableDirList.append(MOVE_DOWN)
# checking if left is good to go
if (tileMap.getTileAt(row, col - 1) != -1 and
tileMap.getTileAt(row, col - 1).isPassable()):
passableDirList.append(MOVE_LEFT)
# checking if right is good to go
if (tileMap.getTileAt(row, col + 1) != -1 and
tileMap.getTileAt(row, col + 1).isPassable()):
passableDirList.append(MOVE_RIGHT)
# returning the list of passable directions
return passableDirList
# Gets the coordinates that represent making a move in the given
# direction, in which moveDir is one of the named constants.
def getMoveRowCol(self, startRow, startCol, moveDir):
if (moveDir == MOVE_RIGHT):
moveTile = [startRow, startCol + 1]
elif (moveDir == MOVE_LEFT):
moveTile = [startRow, startCol - 1]
elif (moveDir == MOVE_UP):
moveTile = [startRow - 1, startCol]
elif (moveDir == MOVE_DOWN):
moveTile = [startRow + 1, startCol]
else:
moveTile = [startRow, startCol]
return moveTile
# displaying the monster, updating it based on its current coordinates
def display(self):
image(self.img, self.col * self.size, self.row * self.size)
# returning the current row of the monster
def getR(self):
return self.row
# returning the current column of the monster
def getC(self):
return self.col
# set the monster's row
def setR(self, r):
self.row = r
# set the monster's column
def setC(self, c):
self.col = c
# randomly choosing from the given list of passable directions
# we return MOVE_NONE if the list is empty
def chooseWanderDir(self, dirList):
if (len(dirList) > 0):
index = random.randint(0, len(dirList) - 1)
dir = dirList[index]
else:
dir = MOVE_NONE
self.direction = dir
# choosing the moster's best chase direction, based on the Manhattan
# distance
def chooseChaseDir(self, dirList, playerR, playerC):
# set bestDir to MOVE_NONE, because the bestDir hasn't been
# calculated yet
bestDir = MOVE_NONE
# we set bestDist to a large value, so that the first option checked
# will automatically be found to be closer
bestDist = 9999999
# making sure we check every direction in the list
for move in dirList:
if (move == MOVE_LEFT):
tempDist = abs(self.row - playerR) + abs(self.col - 1 - playerC)
# print("Temp Distance: " + str(tempDist) + ", direction: " + str(move))
if (tempDist < bestDist):
bestDist = tempDist
bestDir = MOVE_LEFT
elif (move == MOVE_RIGHT):
tempDist = abs(self.row - playerR) + abs(self.col + 1 - playerC)
# print("Temp Distance: " + str(tempDist) + ", direction: " + str(move))
if (tempDist < bestDist):
bestDist = tempDist
bestDir = MOVE_RIGHT
elif (move == MOVE_UP):
tempDist = abs(self.row - 1 - playerR) + abs(self.col - playerC)
# print("Temp Distance: " + str(tempDist) + ", direction: " + str(move))
if (tempDist < bestDist):
bestDist = tempDist
bestDir = MOVE_UP
elif (move == MOVE_DOWN):
tempDist = abs(self.row + 1 - playerR) + abs(self.col - playerC)
# print("Temp Distance: " + str(tempDist) + ", direction: " + str(move))
if (tempDist < bestDist):
bestDist = tempDist
bestDir = MOVE_DOWN
# returning the best direction to go
# print("Best move to get to player: " + str(bestDir))
self.direction = bestDir
# choosing the monster move direction, based on its chase state
def chooseDirection(self, playerR, playerC):
if (self.state == CHASE):
self.chooseChaseDir(self.passableMoves, self.row, self.col)
else:
self.chooseWanderDir(self.passableMoves)
# given the player object, update the monster's state based on the pre-defined
# chase distance/Manhattan distance to the player, and choose the direction
# based on the current state
def chooseState(self, player):
# getting the player row and column
playerR = player.getR()
playerC = player.getC()
# calculating the Manhattan distance between the monster and player
distance = abs(self.row - playerR) + abs(self.col - playerC)
# if the Manhattan distance is <= the chase distance, we set the state
# to chase, otherwise we set it to wander and call chooseWanderDir to
# randomly choose the direciton of travel
if (distance <= self.chaseDist):
self.state = CHASE
self.chooseChaseDir(self.passableMoves, playerR, playerC)
else:
self.state = WANDER
# the monster moves one tile at a time, based on the current passable moves,
# its chase state, and the best possible direction.
def move(self, playerR, playerC):
# print("Row,Col before: " + str(self.row) + ", " + str(self.col))
moveToCoords = self.getMoveRowCol(self.row, self.col, self.direction)
row = moveToCoords[0]
col = moveToCoords[1]
# checking if moveTile is passable
if (self.tileMap.getTileAt(row, col) != -1 and
self.tileMap.getTileAt(row, col).isPassable()):
# updating the monster's location to the confirmend passable location
# simplifies movement just by updating its row and column location,
# rather than by pixels
self.row = row
self.col = col
else:
# monster has hit a wall, so we must choose a new direction
self.chooseDirection(playerR, playerC)
# print("Row,Col after: " + str(self.row) + ", " + str(self.col))
self.chooseState(self.player)
self.passableMoves = self.getPassableMoves(self.tileMap, self.row, self.col) | {"/mapClass.py": ["/tile.py"]} |
51,111 | albertsennk/pokeball-hunt | refs/heads/master | /mapClass.py | """
Noah Albertsen
3/31/2017
Reused code from Project 2
"""
import tile
class Map(object):
# as of right now, only square maps are accepted
def __init__(self, path, mapSize):
self.path = path
self.mapSize = mapSize
self.tSize = 48
# creating a 2D array for the tile objects
self.tileMap = [[0 for i in range(self.mapSize)] for j in range(self.mapSize)]
# opening the map text file
f = open(self.path, 'r')
# processing through each line of the file
for i in range(self.mapSize):
newLine = f.readline()
# creating a list of strings, using the default split
lineList = newLine.split()
# processing through the lineList, creating a new tile for each subset
for j in range(self.mapSize):
self.tileMap[i][j] = tile.Tile(lineList[j], i, j)
# for each tile in the tileMap, call the tile's display method
def display(self):
for i in range(self.mapSize):
for j in range(self.mapSize):
self.tileMap[i][j].display()
# returns the tile at a given row and column
def getTileAt(self, r, c):
# if the indices given are out of range, return -1
if (r < 0 or r > self.mapSize or c < 0 or c > self.mapSize):
return -1
else:
return self.tileMap[r][c]
# sets the tile at the given position to the type listed
# this really creates a new tile, and replaces the one at the given position
def setTileAt(self, r, c, type):
# if the indices given are out of range, do nothing
if (r < 0 or r > self.mapSize or c < 0 or c > self.mapSize):
True
else:
self.tileMap[r][c] = tile.Tile(type, r, c)
def pixel2Row(self, x):
return x // width
def pixel2Col(self, y):
return y // height | {"/mapClass.py": ["/tile.py"]} |
51,112 | albertsennk/pokeball-hunt | refs/heads/master | /player.py | """
Noah Albertsen
3/31/2017
Reused code from Project 2
"""
class Player(object):
def __init__(self, mapSize):
self.img = loadImage('oakSprite.png')
self.pSize = 48
self.xpos = mapSize/2 * self.pSize
self.ypos = mapSize/2 * self.pSize
self.mapSize = mapSize
self.numLives = 3
# displays the player character
def display(self):
image(self.img, self.xpos, self.ypos)
# changes the x and y positions by the input values
def move(self, xAmt, yAmt):
self.xpos += xAmt
self.ypos += yAmt
# returns the center coordinate x value
def getXCenter(self):
return self.xpos + self.pSize/2
# returns the center coordinate y value
def getYCenter(self):
return self.ypos + self.pSize/2
# returns the x value of the top left corner
def getX(self):
return self.xpos
# returns the y value of the top left corner
def getY(self):
return self.ypos
# returns the size of the sides (in pixels) ie one side is pSize
def getSize(self):
return self.pSize
# returns the tile at the player's current row and column
def getTile(self, tileMap):
t = tileMap.getTileAt(self.xpos//self.mapSize, self.ypos//self.mapSize)
return t
# returns the current column of the player
def getC(self):
return (self.xpos + self.pSize/2)//self.pSize
# returns the current row of the player
def getR(self):
return (self.ypos + self.pSize/2)//self.pSize
# returns the x value of the left edge
def getLeft(self):
return self.xpos
# returns the x value of the right edge
def getRight(self):
return self.xpos + self.pSize
#returns the y value of the top edge
def getTop(self):
return self.ypos
# returns the y value of the bottom edge
def getBottom(self):
return self.ypos + self.pSize
def getNumLives(self):
return self.numLives
def loseLife(self):
self.numLives -= 1
def setR(self, r):
self.ypos = r * (self.pSize)
def setC(self, c):
self.xpos = c * (self.pSize) | {"/mapClass.py": ["/tile.py"]} |
51,133 | Arowne/automl_vision_aicompare | refs/heads/master | /AutoMLVisionClient.py | import requests
class AutoMLVisionClient():
def __init__(self, *args, **kwargs):
self.api_key = kwargs['api_key']
self.server_url = 'http://0.0.0.0:8000/v1/'
self.headers = {
'Authorization': 'Bearer ' + self.api_key
}
def create_project(self, project_name=None, project_description=None):
payload = {
'name': project_name,
'description': project_description
}
response = requests.post(
self.server_url + 'automl/vision/project', data=payload, headers=self.headers)
data = response.json()
print(response.json())
return data['project_id']
def add_project_image(self, project_id=None, file_path=None):
files = {
'files': open(file_path, 'rb')
}
response = requests.post(self.server_url + 'automl/vision/image/' +
project_id + '/project', files=files, headers=self.headers)
data = response.json()
return data['image_id']
def add_project_tag(self, project_id=None, tag_name=None):
payload = {
'name': tag_name,
}
response = requests.post(self.server_url + 'automl/vision/tag/' +
project_id + '/project', data=payload, headers=self.headers)
data = response.json()
return data['tag_id']
def add_image_tag(self, image_id=None, tag_id=None):
response = requests.get(self.server_url + 'automl/vision/tag/' +
tag_id + '/image/' + image_id, headers=self.headers)
data = response.json()
return data['response']
def train_project(self, project_id=None, training_name=None, training_time=None, provider=None):
payload = {
"training_name": training_name,
"training_time": training_time,
"provider": provider
}
response = requests.post(self.server_url + 'automl/vision/train/' +
project_id + '/project', data=payload, headers=self.headers)
data = response.json()
print(data)
return data['results'][0]['training_state']['train_id']
def predict(self, provider=None, image_path=None, train_id=None, project_id=None):
files = {
'files': open(image_path, 'rb')
}
payload = {
"provider": provider
}
response = requests.post(self.server_url + 'automl/vision/prediction/project/' +
project_id +'/train/' + train_id, data=payload, files=files, headers=self.headers)
data = response.json()
return data['results'][0]['predictions']
| {"/main.py": ["/AutoMLVisionClient.py"]} |
51,134 | Arowne/automl_vision_aicompare | refs/heads/master | /main.py | import glob
from AutoMLVisionClient import AutoMLVisionClient
def train_project():
# # Init class
client = AutoMLVisionClient(api_key='<your_api_key>')
print('Project creation...')
project_id = client.create_project(project_name='Floor plans classifier', project_description='Automaticaly count number of rooms')
# Add tag
print('Tag creation ...')
three_rooms_id = client.add_project_tag(project_id=project_id, tag_name='3')
four_rooms_id = client.add_project_tag(project_id=project_id, tag_name='4')
five_rooms_id = client.add_project_tag(project_id=project_id, tag_name='5')
tags = [three_rooms_id, four_rooms_id, five_rooms_id]
print('Image importation ...')
# Add upload and add tag image
for index in range(3):
current_folder = "floor_plan_dataset/" + str(index) +"/*"
counter = 0
for img in glob.glob(current_folder):
counter += 1
if counter > 100:
break
print(str(counter*(index+1)) + '/300', end="\r")
image_id = client.add_project_image(project_id=project_id, file_path=img)
response = client.add_image_tag(image_id=image_id, tag_id=tags[index])
print(response)
print('Launch train ...')
print(project_id)
# You can switch provider between microsoft, google_cloud or aws
train_id = client.train_project(project_id=project_id, training_name='Room classification google train', training_time=8000, provider='microsoft')
prediction = client.predict(provider="microsoft", project_id=train_id, train_id=project_id, image_path="floor_plan_dataset/0/Cat9_9.jpg", )
print(prediction)
if __name__ == "__main__":
train_project()
pass
| {"/main.py": ["/AutoMLVisionClient.py"]} |
51,135 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /FacialRecognition.py | import face_recognition as fr
import RandomString as rs
import numpy as np
from cv2 import cv2
from io import BytesIO
import requests
import os
import shutil
from VideoCapture import Device
from PIL import Image
def compare(img):
folder = './images/known/'
for filename in os.listdir(folder):
image = cv2.imread(os.path.join(folder, filename))
img_encoded = fr.face_encodings(image)[0]
result = fr.compare_faces([img],img_encoded,tolerance=0.5)
if result[0]:
return filename
def SignUp_With_FacialId(PicName):
text = ''
name = PicName
image = fr.load_image_file('images/unknown/' + name)
face_locations = fr.face_locations(image)
if(len(face_locations) == 1):
img = fr.face_encodings(image)[0]
image2 = compare(img)
if image2 != None:
text = 'Account Already Exists'
os.remove('images/unknown/' + name)
return text
else:
cv2.imwrite('images/known/' + name, image)
text = 'Person Saved'
os.remove('images/unknown/' + name)
return text
elif(len(face_locations) > 1):
text = 'More Than One person in Image'
os.remove('images/unknown/' + name)
return text
else:
text = 'NO person in image'
os.remove('images/unknown/' + name)
return text
def SignIn_With_FacialId(PicName):
text = ''
name = PicName
image = fr.load_image_file('images/unknown/' + name)
face_locations = fr.face_locations(image)
if(len(face_locations) == 1):
img = fr.face_encodings(image)[0]
image2 = compare(img)
if image2 == None:
text = 'Sign Up First'
os.remove('images/unknown/' + name)
return text
else:
text = image2
os.remove('images/unknown/' + name)
return text
elif(len(face_locations) > 1):
text ='More Than One person in Image'
return text
else:
text = 'NO person in image'
return text
| {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,136 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /app.py | from flask import Flask, request, render_template, make_response, session
from StocksData import prediction, return_graph_url
from FacialRecognition import SignIn_With_FacialId, SignUp_With_FacialId
from flask_mysqldb import MySQL
import database as db
import FR
from flask_mail import Mail, Message
app = Flask(__name__)
app.config['MAIL_SERVER']='smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'musharrafmobeen@gmail.com'
app.config['MAIL_PASSWORD'] = 'blackviking125125'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
mail = Mail(app)
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = ''
app.config['MYSQL_DB'] = 'stockslab'
mysql = MySQL(app)
app.secret_key = b'_5#y2L"F4Q8z\n\xec]/'
global eml
@app.route('/')
def index():
if 'Agree' in request.cookies:
if 'User' in request.cookies:
username = request.cookies.get('User')
return render_template('home.html', username = username)
else:
return render_template('home2.html')
else:
return render_template('index.html')
@app.route('/setCookie')
def setCookie():
res = make_response(render_template('home2.html') )
res.set_cookie('Agree', 'welcome to website' , max_age=60*60*24*365)
return res
@app.route('/MessageToDeveloper_LoggedIn', methods=['GET', 'POST'])
def MessageToDeveloper_LoggedIn():
if request.method == 'POST':
username = request.form['nameloggedin']
email = request.form['mailloggedin']
comments = request.form['review']
if email != None and username != None:
msg = Message(f'From {username} : Comment about the website ', sender = email, recipients=['musharrafmobeen8@gmail.com','mmq.25524@gmail.com'])
msg.body = f'{comments}'
mail.send(msg)
return render_template('home.html')
else:
return render_template('home.html', message = "fill all forms")
return render_template('home.html', message = "Something Went Wrong")
@app.route('/MessageToDeveloper_LoggedOff', methods=['GET', 'POST'])
def MessageToDeveloper_LoggedOff():
if request.method == 'POST':
username = request.form['nameloggedoff']
email = request.form['mailloggedoff']
comments = request.form['review']
if email != None and username != None:
msg = Message(f'From {username} : Comment about the website ', sender= email, recipients=['musharrafmobeen8@gmail.com','mmq.25524@gmail.com'])
msg.body = f'{comments}'
msg.attachments
mail.send(msg)
return render_template('home2.html')
else:
return render_template('home2.html', message = "fill all forms")
return render_template('home2.html', message = "Something Went Wrong")
@app.route('/SignIn')
def SignIn():
return render_template('SignIN.html')
@app.route('/SignUp')
def SignUp():
return render_template('SignUP.html')
@app.route('/LogIn', methods=['GET', 'POST'])
def LogIn():
if request.method == 'POST':
if 'User' in request.cookies:
return render_template('home.html', result = "Already Logged In")
else:
Email = request.form['Email']
Password = request.form['Password']
result = db.read(mysql,Email,Password)
if result[0] != None:
res = make_response(render_template('home.html', username = result[0]))
res.set_cookie('User', result[0] , max_age=60*60*24*365)
if not 'User' in session:
session['User'] = result
session['Email'] = Email
eml = True
return res
else:
return render_template('SignIN.html', result = "Account Not Found")
@app.route('/LogOut')
def LogOut():
res = make_response(render_template('home2.html') )
res.set_cookie('User', '' , max_age=0)
session.clear()
return res
@app.route('/updateordelete',methods=['GET','POST'])
def updateordelete():
if request.method == 'POST':
if 'Email' in session:
Email = session['Email']
UserName = request.form['username']
Password = request.form['password']
selector = request.form['btn']
if selector == 'Delete':
try:
db.delete(mysql,Email)
res = make_response(render_template('home2.html', message = "Account Deleted"))
res.set_cookie('User', '' , max_age=0)
session.clear()
return res
except:
return render_template('home.html', message = "Something Went Wrong 1")
elif selector == 'Update':
if UserName != None and Password != None:
try:
db.update(mysql,UserName,Email,Password)
res = make_response(render_template('home.html', username = UserName ,message= "Successfullt Updated"))
res.set_cookie('User', '' , max_age=0)
res.set_cookie('User', UserName , max_age=60*60*24*365)
session['User'] = UserName
return res
except:
return render_template('home.html', message = "Something Went Wrong 2")
else:
return render_template('home.html', message = "Something Went Wrong 3")
return render_template('home.html', message = "Something Went Wrong 3")
return render_template('home.html', message = "Something Went Wrong 4")
@app.route('/Register', methods=['GET', 'POST'])
def Registor():
if request.method == 'POST':
UserName = request.form['UserName']
Email = request.form['Email']
Password = request.form['Password']
Password2 = request.form['Password2']
try:
result = db.insert(mysql,UserName,Email,Password,Password2)
if result == "Success":
msg = Message('Welcome you have successfully registered', sender='musharrafmobeen8@gmail.com', recipients=[Email])
mail.send(msg)
res = make_response(render_template('home.html', username = UserName))
res.set_cookie('User', UserName , max_age=60*60*24*365)
if not 'User' in session:
session['User'] = UserName
session['Email'] = Email
eml = True
return res
elif result == "AlreadyExists":
return render_template('SignUP.html',result = "Email Already Registered")
elif result == "PasswordError":
return render_template('SignUP.html',result = "Password Mismatch")
except TypeError:
return render_template('SignUP.html',result = "Email Already Registered")
return render_template('SignUP.html',result = "Email Already Registered")
else:
return render_template('SignUP.html',result = "Email Already Registered")
@app.route('/Policy')
def Policy():
return render_template('policy.php')
@app.route('/LogIn_With_Facial_ID')
def LogIn_With_Facial_ID():
try:
picname = FR.getPic()
if picname != "No Capture Device Available":
text = SignIn_With_FacialId(picname)
if text != 'More Than One person in Image' and text != 'NO person in image' and text != 'Sign Up First' and text != None:
User = db.readFID(mysql,text)
if User != None:
if not 'User' in request.cookies:
res = make_response(render_template('home.html', username = User[0]))
res.set_cookie('User', User[0] , max_age=60*60*24*365)
if not 'User' in session:
session['User'] = User[0]
session['Email'] = User[1]
eml = False
return res
return render_template('SignIN.html',result = "User Not Found")
else:
return render_template('SignUP.html',result = "Register First")
else:
return render_template('SignIN.html',result = "No Camera Available")
return render_template('SignIN.html',result = "User Not Found")
except ValueError:
pass
@app.route('/Register_With_Facial_ID', methods=['GET', 'POST'])
def Register_With_Facial_ID():
if request.method == 'POST':
UserName = request.form['username']
Email = request.form['email']
if not UserName and not Email:
return render_template('SignUP.html',result = "Fiil all fields Empty")
else:
try:
picname = FR.getPic()
if picname != "No Capture Device Available":
text = SignUp_With_FacialId(picname)
if text == 'Person Saved':
try:
msg = Message('Welcome you have successfully registered', sender='musharrafmobeen8@gmail.com', recipients=[Email])
mail.send(msg)
db.insertFID(mysql,UserName,picname,Email)
res = make_response(render_template('home.html', username = UserName))
res.set_cookie('User', UserName , max_age=60*60*24*365)
if not 'User' in session:
session['User'] = UserName
session['Email']= Email
eml = False
return res
except:
render_template('SignUP.html',result = "Something Went Wrong")
else:
return render_template('SignIN.html',result = "No Camera Available")
except TypeError as e:
return render_template('SignUP.html', result = e)
else:
return render_template('SignUP.html',result = text)
return render_template('SignUP.html',result = "Something Went Wrong")
return render_template('SignUP.html',result = "")
else:
return render_template('SignUP.html',result = "something went wrong")
@app.route('/PredictStocks')
def PredictStocks():
return render_template('StocksPrediction.html')
@app.route('/Predict', methods=['GET', 'POST'])
def foo():
if request.method == 'POST':
Company_Name = request.form['Company_Name']
Date = request.form['Date']
predictions = prediction(Company_Name,Date)
graph_name = return_graph_url()
if 'Email' in session:
reciever = session['Email']
msg = Message('Result To The Stocks Prediction', sender = 'musharrafmobeen8@gmail.com', recipients=[reciever])
msg.body = f'Opening value of stocks={predictions[0][0]}, Highest value of stocks={predictions[0][1]}, Lowest value of stocks={predictions[0][2]}, Closing value of stocks={predictions[0][3]}, Volume of stocks={predictions[0][4]}'
with app.open_resource(f"static/graphs/{graph_name}") as fp:
msg.attach("galexy.jpg","image/png",fp.read())
mail.send(msg)
return render_template('Prediction.html', Open=predictions[0][0], High=predictions[0][1], Low=predictions[0][2], Close=predictions[0][3], Volume=predictions[0][4], graph= f'graphs/{graph_name}' )
if __name__ == '__main__':
app.debug = True
app.run()
| {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,137 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /StocksData.py | import yfinance as yf
from flask import Flask
import StocksPredictions as SP
import pandas as pd
from flask import render_template
import webbrowser
import sys
from selenium import webdriver
from webdriver_manager.chrome import ChromeDriverManager
from bs4 import BeautifulSoup
import requests
from urllib.request import urlopen
import json
def prediction(Company_Name,Date):
a_string = Company_Name
stripped_string = a_string.strip("'")
company_name = stripped_string
stock_name = yf.Ticker(company_name)
prediction_date = Date
date = pd.to_datetime(prediction_date)
date = date.toordinal()
print(date)
# stock_name.info
stock_data = stock_name.history(period="2Y")
stock_data = stock_data.drop(columns=['Dividends', 'Stock Splits'])
dates =[]
for x in range(len(stock_data)):
newdate = str(stock_data.index[x])
newdate = newdate[0:10]
dates.append(newdate)
stock_data['Date'] = dates
model = SP.train(stock_data)
prediction = SP.prediction(date,model)
global graph_name
graph_name = SP.graph(stock_data,date, company_name)
return prediction
def return_graph_url():
return graph_name | {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,138 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /FR.py | from cv2 import cv2
import RandomString as rs
def getPic():
camera = cv2.VideoCapture(0)
name = rs.get_random_string(8) + '.jpg'
if camera:
while True:
ret,image = camera.read()
cv2.imshow('image',image)
if cv2.waitKey(1)& 0xFF == ord('s'):
cv2.imwrite('images/unknown/'+name,image)
break
cv2.destroyAllWindows()
camera.release()
return name
else:
return "No Capture Device Available" | {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,139 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /StocksPredictions.py | # Install The Dependencies
import numpy as np
import pandas as pd
import os
import datetime as dt
from sklearn.tree import DecisionTreeRegressor as DTR
from sklearn.tree import DecisionTreeClassifier as DSC
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression as lr
import matplotlib
import time
matplotlib.use('Agg')
import matplotlib.pyplot as plt
# import joblib
plt.style.use('bmh')
def train(stocks_data):
stocks_data['Date'] = pd.to_datetime(stocks_data['Date'])
stocks_data['Date'] = stocks_data['Date'].map(dt.datetime.toordinal)
stocks_data['Date']
x = stocks_data.drop(columns=['Open', 'High', 'Low', 'Close', 'Volume'])
y = stocks_data.drop(columns=['Date'])
model = DTR()
model.fit(x, y)
return model
def prediction(prediction_date, model):
predictions = model.predict([[prediction_date]])
return predictions
def graph(stocks_data,prediction_date, company_name):
current_date = dt.datetime.now().toordinal()
future_days = prediction_date - current_date
stocks_data['prediction'] = stocks_data[['Close']].shift(-future_days)
plt.figure(figsize=(16, 8))
plt.title(company_name)
plt.xlabel('Date')
plt.ylabel('close Price USD ($)')
plt.plot(stocks_data['prediction'])
new_graph_name = "graph"+ str(time.time()) + ".png"
for filename in os.listdir('static/graphs/'):
if filename.startswith('graph'):
os.remove('static/graphs/'+ filename)
plt.savefig('static/graphs/' + new_graph_name)
return new_graph_name
| {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,140 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /database.py | from flask_mysqldb import MySQL
import MySQLdb
def read(mysql,Email,Password):
cur = mysql.connection.cursor()
try:
cur.execute(f"Select username From accounts Where mail = '{Email}' And password = '{Password}'")
mysql.connection.commit()
result = cur.fetchone()
return result
finally:
cur.close()
def insert(mysql,UserName,Email,Password,Password2):
if Password == Password2:
cur = mysql.connection.cursor()
try:
cur.execute("INSERT INTO accounts(username,password,mail) VALUES (%s, %s, %s)", (UserName, Password, Email))
mysql.connection.commit()
return "Success"
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
return "Password Match"
else:
return "PasswordError"
def delete(mysql,Email):
cur = mysql.connection.cursor()
try:
cur.execute(f"Delete From accounts Where mail = '{Email}'")
mysql.connection.commit()
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
def update(mysql,UserName,Email,Password):
cur = mysql.connection.cursor()
try:
query = f""" UPDATE accounts
SET username = '{UserName}', password = '{Password}'
WHERE mail = '{Email}' """
cur.execute(query)
mysql.connection.commit()
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
def readFID(mysql,PicName):
cur = mysql.connection.cursor()
try:
cur.execute(f"Select username,email From faceid Where picname = '{PicName}'")
mysql.connection.commit()
result = cur.fetchone()
return result
finally:
cur.close()
def insertFID(mysql,UserName,PicName,Email):
cur = mysql.connection.cursor()
try:
cur.execute("INSERT INTO faceid(username,picname,email) VALUES (%s, %s,%s)", (UserName, PicName,Email))
mysql.connection.commit()
return "Success"
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
def deleteFID(mysql,PicName):
cur = mysql.connection.cursor()
try:
cur.execute(f"Delete From faceid Where picname = '{PicName}'")
mysql.connection.commit()
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
def updateFID(mysql,UserName,PicName):
cur = mysql.connection.cursor()
try:
query = f""" UPDATE faceid
SET username = '{UserName}'
WHERE picname = '{PicName}' """
cur.execute(query)
mysql.connection.commit()
except TypeError:
return "AlreadyExists"
except:
return None
finally:
cur.close()
| {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,141 | musharrafmobeen/Stocks-Prediction | refs/heads/master | /pic.py | import FacialRecognition as Fr
Fr.SignUp_With_FacialId()
| {"/app.py": ["/StocksData.py", "/FacialRecognition.py", "/database.py", "/FR.py"], "/StocksData.py": ["/StocksPredictions.py"], "/pic.py": ["/FacialRecognition.py"]} |
51,172 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/tests/test_unconstrained_min.py | import numpy as np
import unittest
from Code.src.unconstrained_min import line_search
from Code.tests.examples import Q1_quad, Q2_quad, Q3_quad, QuadraticFunction, RosenbrockFunction
from Code.src.utils import final_report, plot_contours_paths, plot_val_hist
class TestUnconstrainedMin(unittest.TestCase):
def test_quad_min_gd(self):
x0 = np.array([[1], [1]])
obj_tol = 10e-12
param_tol = 10e-8
max_iter = 100
## Qaudratic Functions ##
##=====================##
f_quad_1 = QuadraticFunction(Q1_quad)
f_quad_2 = QuadraticFunction(Q2_quad)
f_quad_3 = QuadraticFunction(Q3_quad)
dir_selection_method = 'gd'
success, last_x, val_hist, x_hist = line_search(f_quad_1, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'GD 1st Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_1, x_hist, 'GD 1st Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_2, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'GD 2nd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_2, x_hist, 'GD 2nd Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_3, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'GD 3rd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_3, x_hist, 'GD 3rd Qaudratic Convergence')
def test_quad_min_nt(self):
x0 = np.array([[1], [1]])
obj_tol = 10e-12
param_tol = 10e-8
max_iter = 100
## Qaudratic Functions ##
##=====================##
f_quad_1 = QuadraticFunction(Q1_quad)
f_quad_2 = QuadraticFunction(Q2_quad)
f_quad_3 = QuadraticFunction(Q3_quad)
dir_selection_method = 'nt'
success, last_x, val_hist, x_hist = line_search(f_quad_1, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'NT 1st Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_1, x_hist, 'NT 1st Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_2, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'NT 2nd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_2, x_hist, 'NT 2nd Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_3, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'NT 3rd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_3, x_hist, 'NT 3rd Qaudratic Convergence')
def test_quad_min_bfgs(self):
x0 = np.array([[1], [1]])
obj_tol = 10e-12
param_tol = 10e-8
max_iter = 100
## Qaudratic Functions ##
##=====================##
f_quad_1 = QuadraticFunction(Q1_quad)
f_quad_2 = QuadraticFunction(Q2_quad)
f_quad_3 = QuadraticFunction(Q3_quad)
dir_selection_method = 'bfgs'
success, last_x, val_hist, x_hist = line_search(f_quad_1, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'BFGS 1st Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_1, x_hist, 'BFGS 1st Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_2, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'BFGS 2nd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_2, x_hist, 'BFGS 2nd Qaudratic Convergence')
success, last_x, val_hist, x_hist = line_search(f_quad_3, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'BFGS 3rd Qaudratic objective function vs. iterations')
plot_contours_paths(f_quad_3, x_hist, 'BFGS 3rd Qaudratic Convergence')
def test_rosenbrock_min_gd(self):
x0 = np.array([[2], [2]])
obj_tol = 10e-9
param_tol = 10e-9
max_iter = 10000
f_rosenbrock = RosenbrockFunction()
dir_selection_method = 'gd'
success, last_x, val_hist, x_hist = line_search(f_rosenbrock, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'GD rosenbrock objective function vs. iterations')
plot_contours_paths(f_rosenbrock, x_hist, 'GD rosenbrock Convergence')
def test_rosenbrock_min_nt(self):
x0 = np.array([[2], [2]])
obj_tol = 10e-7
param_tol = 10e-8
max_iter = 10000
f_rosenbrock = RosenbrockFunction()
dir_selection_method = 'nt'
success, last_x, val_hist, x_hist = line_search(f_rosenbrock, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'NT rosenbrock objective function vs. iterations')
plot_contours_paths(f_rosenbrock, x_hist, 'NT rosenbrock Convergence')
def test_rosenbrock_min_bfgs(self):
x0 = np.array([[2], [2]])
obj_tol = 10e-7
param_tol = 10e-8
max_iter = 10000
f_rosenbrock = RosenbrockFunction()
dir_selection_method = 'bfgs'
success, last_x, val_hist, x_hist = line_search(f_rosenbrock, x0, obj_tol, param_tol, max_iter,
dir_selection_method)
final_report(success, last_x)
plot_val_hist(val_hist, 'BFGS rosenbrock objective function vs. iterations')
plot_contours_paths(f_rosenbrock, x_hist, 'BFGS rosenbrock Convergence')
if __name__ == '__main__':
unittest.main()
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,173 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/src/unconstrained_min.py | import numpy as np
from Code.src.utils import report
def line_search(f, x0, obj_tol, param_tol, max_iter, dir_selection_method='gd', init_step_len=1.0, slope_ratio=1e-4,
back_track_factor=0.2):
prev_x = x0.copy()
prev_val = f.evaluate(x0)
val_hist = [prev_val.reshape((1,))]
x_hist = x0.copy()
if dir_selection_method == 'bfgs':
first_iteration = True
Bk = np.eye(len(x0))
for curr_iter in range(max_iter):
if dir_selection_method == 'gd':
pk = -f.evaluate_grad(prev_x)
elif dir_selection_method == 'nt':
pk = newton_dir(f, prev_x)
elif dir_selection_method == 'nt_equality':
pk = newton_equality_const_dir(f, prev_x)
elif dir_selection_method == 'bfgs':
if first_iteration:
pk, Bk = bfgs_dir(f, x_hist, None, None, first_iteration)
first_iteration = False
else:
pk, Bk = bfgs_dir(f, x_hist[:, -1].reshape(-1, 1), x_hist[:, -2].reshape(-1, 1), Bk, first_iteration)
else:
raise Exception("dir_selection_method = [‘gd’, ‘nt’, ‘bfgs’] only!")
step_size = get_step_size_wolfe(f, prev_x, pk, init_step_len, slope_ratio, back_track_factor)
new_x = prev_x + step_size * pk.reshape(prev_x.shape)
x_hist = np.append(x_hist, new_x, axis=1)
new_val = f.evaluate(new_x)
val_hist.append(new_val.reshape((1,)))
if abs(prev_val - new_val) < obj_tol or np.linalg.norm(new_x-prev_x) < param_tol:
report(curr_iter, new_x, new_val, np.linalg.norm(new_x - prev_x), abs(prev_val - new_val))
return True, new_x, val_hist, x_hist
report(curr_iter, new_x, new_val, np.linalg.norm(new_x-prev_x), abs(prev_val - new_val))
prev_x = new_x
prev_val = new_val
return False, new_x, val_hist, x_hist
def bfgs_dir(f, xk_1, xk, Bk, first_iteration):
""" hessian approximation using BFGS """
if first_iteration:
Bk_1 = np.eye(len(xk_1))
else:
sk = (xk_1-xk).reshape(-1, 1)
yk = (f.evaluate_grad(xk_1) - f.evaluate_grad(xk)).reshape(-1, 1)
Bk_1 = Bk - (Bk @ sk @ sk.T @ Bk) /(sk.T @ Bk @ sk) + (yk @ yk.T) / (yk.T @ sk)
return -(mat_inv(Bk_1) @ f.evaluate_grad(xk_1)).reshape(-1, 1), Bk_1
def newton_dir(f, x):
""" hessian """
return -mat_inv(f.evaluate_hess(x)) @ f.evaluate_grad(x)
def newton_equality_const_dir(f, x):
tmp1 = np.vstack([np.hstack([f.evaluate_hess(x), f.A.T]),
np.hstack([f.A, np.zeros((1, f.A.shape[0]))])])
tmp2 = np.vstack([-f.evaluate_grad(x), np.zeros((1, f.A.shape[0]))])
res = np.linalg.solve(tmp1, tmp2)
return res[:f.A.shape[1]]
def mat_inv(A):
return np.linalg.solve(A, np.eye(A.shape[0]))
def get_step_size_wolfe(f, xk, pk, init_step_len, slope_ratio, back_track_factor):
alpha = init_step_len
for i in range(50):
if f.evaluate(xk + alpha * pk.reshape(xk.shape)) <= (f.evaluate(xk) + slope_ratio * alpha * f.evaluate_grad(xk).T @ pk):
return alpha
else:
alpha *= back_track_factor
raise Exception('1st wolfe condition was not applied')
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,174 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/src/utils.py | import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.collections import PatchCollection
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.patches import Polygon
import numpy as np
def report(iteration, x, fx, x_diff, fx_diff):
print(f'{iteration=}, {x=}, {fx=}, {x_diff=}, {fx_diff=}')
def final_report(success, last_x):
print(f'the convergence is {success} at last x={last_x}')
def plot_val_hist(val_hist, title):
plt.plot(val_hist)
plt.xlabel('Iterations')
plt.ylabel('objective function value')
plt.title(title)
plt.show()
def plot_contours_paths(f, x_hist, title):
fig = plt.figure()
fig.suptitle(title)
ax = fig.add_subplot(1, 2, 1, projection='3d')
x1 = np.linspace(-3, 3, 1000)
x2 = np.linspace(-8, 3, 1000)
X, Y = np.meshgrid(x1, x2)
Z = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
Z[i, j] = f.evaluate(np.array([[X[i, j]], [Y[i, j]]]))
ax.contour3D(X, Y, Z, 50)
x1_hist = []
x2_hist = []
z_hist = []
for x in x_hist.T:
fx = f.evaluate(x)
x1_hist.append(x[0])
x2_hist.append(x[1])
z_hist.append(fx)
ax.plot(x1_hist, x2_hist, z_hist, '-o', label='path')
ax.plot(x1_hist[-1], x2_hist[-1], z_hist[-1], marker='^', markerfacecolor='yellow', markersize=12, label='final x')
ax.set_xlabel('x0')
ax.set_ylabel('x1')
ax.set_zlabel('f(x)')
ax.legend()
ax1 = fig.add_subplot(1, 2, 2)
CS = ax1.contour(X, Y, Z, 30)
ax1.clabel(CS, inline=True, fontsize=10)
ax1.plot(x1_hist, x2_hist, '-o', label='path')
ax1.plot(x1_hist[-1], x2_hist[-1], marker='^', markerfacecolor='yellow', markersize=12, label='final x')
plt.xlabel('x0')
plt.ylabel('x1')
ax1.legend()
plt.show()
def plot_qp(f, x_hist):
fig = plt.figure()
fig.suptitle('QP convergence')
ax = fig.add_subplot(1, 2, 1, projection='3d')
poly3d = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
ax.add_collection3d(Poly3DCollection(poly3d, alpha=0.5))
x0 = np.linspace(0, 1, 100)
x1 = np.linspace(0, 1, 100)
X, Y = np.meshgrid(x0, x1)
Z = 1 - X - Y
C = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
C[i, j] = f.evaluate(np.array([[X[i, j]], [Y[i, j]], [Z[i, j]]]))
ax.plot(x_hist[0, :], x_hist[1, :], x_hist[2, :], '-o', label='path')
ax.plot(x_hist[0, -1], x_hist[1, -1], x_hist[2, -1], marker='^', markerfacecolor='yellow', markersize=12,
label='final x')
plt.xlabel('x0')
plt.ylabel('x1')
ax.set_zlabel('x2')
ax.legend()
ax1 = fig.add_subplot(1, 2, 2)
poly = np.array([[1, 0], [0, 1], [0, 0]])
patch = [Polygon(poly, True)]
collection = PatchCollection(patch, alpha=0.5, label='feasible region')
ax1.add_collection(collection)
CS = plt.contour(X, Y, C, 10)
plt.clabel(CS, inline=True, fontsize=10)
plt.plot(x_hist[0, :], x_hist[1, :], '-o', label='path')
plt.plot(x_hist[0, -1], x_hist[1, -1], marker='^', markerfacecolor='yellow', markersize=12, label='final x')
plt.xlabel('x0')
plt.ylabel('x1')
plt.legend()
plt.show()
def plot_lp(f, x_hist):
fig, ax = plt.subplots()
poly = np.array([[1, 0], [2, 0], [2, 1], [0, 1]])
patch = [Polygon(poly, True)]
collection = PatchCollection(patch, alpha=0.5, label='feasible region')
ax.add_collection(collection)
x0 = np.linspace(-0.1, 2.1, 1000)
x1 = np.linspace(-0.1, 1.1, 1000)
X, Y = np.meshgrid(x0, x1)
C = np.zeros(X.shape)
for i in range(X.shape[0]):
for j in range(X.shape[1]):
C[i, j] = f.evaluate(np.array([[X[i, j]], [Y[i, j]]]))
CS = plt.contour(X, Y, C, 30)
plt.clabel(CS, inline=True, fontsize=10)
ax.plot(x_hist[0, :], x_hist[1, :], '-o', label='path')
ax.plot(x_hist[0, -1], x_hist[1, -1], marker='^', markerfacecolor='yellow', markersize=12, label='final x')
plt.legend()
plt.xlabel('x0')
plt.ylabel('x1')
plt.show()
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,175 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/tests/examples.py | import numpy as np
from abc import ABC, abstractmethod
Q1_quad = np.array([[1, 0], [0, 1]])
Q2_quad = np.array([[5, 0], [0, 1]])
Q3_quad = np.array([[np.sqrt(3) / 2, -0.5], [0.5, np.sqrt(3) / 2]]).transpose() @ np.array(
[[5, 0], [0, 1]]) @ np.array(
[[np.sqrt(3) / 2, -0.5], [0.5, np.sqrt(3) / 2]])
class BaseLineSearchFunction(ABC):
@abstractmethod
def __init__(self):
pass
@abstractmethod
def evaluate(self, x):
pass
@abstractmethod
def evaluate_grad(self, x):
pass
@abstractmethod
def evaluate_hess(self, x):
pass
class QuadraticFunction(BaseLineSearchFunction):
def __init__(self, Q):
self.Q = Q
def evaluate(self, x):
return x.transpose() @ self.Q @ x
def evaluate_grad(self, x):
return 2 * self.Q @ x
def evaluate_hess(self, x=None):
return 2 * self.Q
class RosenbrockFunction(BaseLineSearchFunction):
def __init__(self):
pass
def evaluate(self, x):
return 100 * np.power(x[1] - np.power(x[0], 2), 2) + np.power(1 - x[0], 2)
def evaluate_grad(self, x):
return np.array([[400 * np.power(x[0], 3) - 400 * x[0] * x[1] + 2 * x[0] - 2],
[-200 * np.power(x[0], 2) + 200 * x[1]]]).reshape((2, 1))
def evaluate_hess(self, x):
return np.array([[(1200 * np.power(x[0], 2) - 400 * x[1] + 2), (-400 * x[0])],
[(-400 * x[0]), np.array([200])]]).reshape(2, 2)
## constrined part ##
class QP_f0(BaseLineSearchFunction):
def __init__(self, t=1.0):
self.t = t
@property
def t(self):
return self._t
@t.setter
def t(self, value):
self._t = value
## f0 = x0^2+x1^1+(x2+1)^2 ##
def evaluate(self, x):
x_copy = x.copy()
x_copy[2] += 1
return (x_copy ** 2).sum() * self.t
def evaluate_grad(self, x):
x_copy = x.copy()
x_copy[2] += 1
return 2 * x_copy * self.t
def evaluate_hess(self, x=None):
return 2 * np.eye(3) * self.t
class QP_f1(BaseLineSearchFunction):
def __init__(self):
pass
## f1 = x0
def evaluate(self, x):
return np.log(x[0])
def evaluate_grad(self, x):
return np.array([1 / x[0], np.array([0]), np.array([0])]).reshape(-1, 1)
def evaluate_hess(self, x):
hess = np.zeros((3, 3))
hess[0, 0] = -1 / (x[0] ** 2)
return hess
class QP_f2(BaseLineSearchFunction):
def __init__(self):
pass
## f2 = x1
def evaluate(self, x):
return np.log(x[1])
def evaluate_grad(self, x):
return np.array([np.array([0]), 1 / x[1], np.array([0])]).reshape(-1, 1)
def evaluate_hess(self, x):
hess = np.zeros((3, 3))
hess[1, 1] = -1 / (x[1] ** 2)
return hess
class QP_f3(BaseLineSearchFunction):
def __init__(self):
pass
## f3 = x2
def evaluate(self, x):
return np.log(x[2])
def evaluate_grad(self, x):
return np.array([np.array([0]), np.array([0]), 1 / x[2]]).reshape(-1, 1)
def evaluate_hess(self, x):
hess = np.zeros((3, 3))
hess[2, 2] = -1 / (x[2] ** 2)
return hess
class ConstrainedQuadraticFunction(BaseLineSearchFunction):
def __init__(self):
self.f0 = QP_f0()
self.f1 = QP_f1()
self.f2 = QP_f2()
self.f3 = QP_f3()
## x0+x1+x2=1 ##
@property
def A(self):
return np.array([1, 1, 1]).reshape(1, -1)
@property
def b(self):
return np.array([1])
@property
def inequality_constraints(self):
return [self.f1, self.f2, self.f3]
def evaluate(self, x):
ret_val = self.f0.evaluate(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate(x)
return ret_val
def evaluate_grad(self, x):
ret_val = self.f0.evaluate_grad(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate_grad(x)
return ret_val
def evaluate_hess(self, x):
ret_val = self.f0.evaluate_hess(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate_hess(x)
return ret_val
class LP_f0(BaseLineSearchFunction):
def __init__(self, t=1.0):
self.t = t
@property
def t(self):
return self._t
@t.setter
def t(self, value):
self._t = value
## f0 = -x0-x1 ##
def evaluate(self, x):
return -x.sum() * self.t
def evaluate_grad(self, x):
return -np.ones(2) * self.t
def evaluate_hess(self, x=None):
return np.zeros((2, 2)) * self.t
class LP_f1(BaseLineSearchFunction):
def __init__(self):
pass
## f1 = x0+x1-1
def evaluate(self, x):
return np.log(x.sum()-1)
def evaluate_grad(self, x):
return np.ones(2)/(x.sum()-1)
def evaluate_hess(self, x):
return -np.ones((2, 2))/(x.sum()-1)**2
class LP_f2(BaseLineSearchFunction):
def __init__(self):
pass
## f2 = 1-x1
def evaluate(self, x):
return np.log(1 - x[1])
def evaluate_grad(self, x):
return np.array([np.array([0]), -1 / (1 - x[1])]).reshape(2,)
def evaluate_hess(self, x):
hess = np.zeros((2, 2))
hess[1, 1] = -1 / (1 - x[1]) ** 2
return hess
class LP_f3(BaseLineSearchFunction):
def __init__(self):
pass
## f3 = 2-x0
def evaluate(self, x):
return np.log(2 - x[0])
def evaluate_grad(self, x):
return np.array([-1 / (2 - x[0]), np.array([0])]).reshape(2,)
def evaluate_hess(self, x):
hess = np.zeros((2, 2))
hess[0, 0] = -1 / (2 - x[0]) ** 2
return hess
class LP_f4(BaseLineSearchFunction):
def __init__(self):
pass
## f4 = x1
def evaluate(self, x):
return np.log(x[1])
def evaluate_grad(self, x):
return np.array([np.array([0]), 1 / x[1]]).reshape(2,)
def evaluate_hess(self, x):
hess = np.zeros((2, 2))
hess[1, 1] = -1 / x[1] ** 2
return hess
class ConstrainedLPFunction(BaseLineSearchFunction):
def __init__(self):
self.f0 = LP_f0()
self.f1 = LP_f1()
self.f2 = LP_f2()
self.f3 = LP_f3()
self.f4 = LP_f4()
## x0+x1+x2=1 ##
@property
def A(self):
return None
@property
def b(self):
return None
@property
def inequality_constraints(self):
return [self.f1, self.f2, self.f3, self.f4]
def evaluate(self, x):
ret_val = self.f0.evaluate(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate(x)
return ret_val
def evaluate_grad(self, x):
ret_val = self.f0.evaluate_grad(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate_grad(x)
return ret_val
def evaluate_hess(self, x):
ret_val = self.f0.evaluate_hess(x)
for fi in self.inequality_constraints:
ret_val -= fi.evaluate_hess(x)
return ret_val
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,176 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/tests/test_constrained_min.py | import numpy as np
import unittest
from Code.src.constrained_min import interior_pt
from Code.tests.examples import ConstrainedQuadraticFunction, ConstrainedLPFunction
from Code.src.utils import final_report, plot_qp, plot_lp
class TestConstrainedMin(unittest.TestCase):
def test_qp(self):
x0 = np.array([0.1, 0.2, 0.7]).reshape(-1, 1)
obj_tol = 10e-12
param_tol = 10e-8
max_inner_loops = 100
constrained_qp = ConstrainedQuadraticFunction()
success, last_x, val_hist, x_hist = interior_pt(func=constrained_qp, x0=x0, obj_tol=obj_tol,
param_tol=param_tol, max_inner_loops=max_inner_loops)
final_report(success, last_x)
plot_qp(constrained_qp.f0, x_hist)
def test_lp(self):
x0 = np.array([0.5, 0.75]).reshape(-1, 1)
obj_tol = 10e-12
param_tol = 10e-8
max_inner_loops = 100
constrained_lp = ConstrainedLPFunction()
success, last_x, val_hist, x_hist = interior_pt(func=constrained_lp, x0=x0, obj_tol=obj_tol,
param_tol=param_tol, max_inner_loops=max_inner_loops)
final_report(success, last_x)
plot_lp(constrained_lp.f0, x_hist)
if __name__ == '__main__':
unittest.main()
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,177 | aviadar/Nemerical-Optimization | refs/heads/main | /Code/src/constrained_min.py | import numpy as np
from Code.src.unconstrained_min import line_search
def interior_pt(func, x0, obj_tol, param_tol, max_inner_loops,
t=1.0, mu=10.0, epsilon=1e-6, max_outer_loops=100):
x_hist = None
new_x = x0.copy()
success = False
m = float(len(func.inequality_constraints))
if func.A is None:
dir_selection_method = 'nt'
else:
dir_selection_method = 'nt_equality'
for iteration in range(max_outer_loops):
func.f0.t = t
_, last_x, _, x_hist_temp = line_search(func, new_x, obj_tol, param_tol, max_inner_loops,
dir_selection_method)
# val_hist.append(val_hist_temp)
if x_hist is None:
x_hist = x_hist_temp
else:
x_hist = np.append(x_hist, x_hist_temp, axis=1)
if m / t < epsilon:
success = True
break
new_x = last_x
t *= mu
func.f0.t = 1.0
val_hist = [func.f0.evaluate(x) for x in x_hist.T]
return success, last_x, val_hist, x_hist
| {"/Code/tests/test_unconstrained_min.py": ["/Code/src/unconstrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/unconstrained_min.py": ["/Code/src/utils.py"], "/Code/tests/test_constrained_min.py": ["/Code/src/constrained_min.py", "/Code/tests/examples.py", "/Code/src/utils.py"], "/Code/src/constrained_min.py": ["/Code/src/unconstrained_min.py"]} |
51,203 | Hyoutan-tokyo-504/clock-degree | refs/heads/main | /test.py | import unittest
import contextlib
import Clock_degree as cd
class redirect_stdin(contextlib._RedirectStream):
_stream = "stdin"
class ClockTest(unittest.TestCase):
def setUp(self):
# Initialization process
pass
def tearDown(self):
# End precess
pass
#input test
def _calinput(self):
return cd.clockinput()
def test1(self):
from io import StringIO
buf = StringIO()
buf.write("22 40\n")
buf.seek(0)
with redirect_stdin(buf):
degree = self._calinput()
expected = ['22','40']
self.assertEqual(degree, expected)
#error test
def test2(self):
#testpatterns = [normal,hour_over,min_over,hour_str,min_str]
testpatterns = [('22','40',False),('25','40',True),('25','100',True),('aa','40',True),('30','bb',True)]
for hour, minute, result in testpatterns:
self.assertEqual(cd.errorjudge(hour,minute),result)
#degree test
def test3(self):
#testpatterns = [normal, hour+12, (degree>180)_process, 0test]
testpatterns = [(10,40,60),(22,40,60),(11,10,90),(0,0,0)]
for hour, minute, result in testpatterns:
self.assertEqual(cd.clock_degree(hour,minute),result)
if __name__ == "__main__":
unittest.main()
| {"/test.py": ["/Clock_degree.py"]} |
51,204 | Hyoutan-tokyo-504/clock-degree | refs/heads/main | /Clock_degree.py | #!/usr/bin/env python
# coding: utf-8
# In[10]:
#this function returns the degree required
def clock_degree(hour, minute):
if(hour>=12):
hour = hour - 12
hour_degree = 360*(hour/12)
minute_degree = 360*(minute/60)
degree = abs(hour_degree - minute_degree)
if(degree>180):
return int(360 - degree)
else:
return int(degree)
#the function getting standard input
def clockinput():
print('何時何分ですか?(入力例:22 40)')
hour, minute = input().split()
return[hour,minute]
# the function judging input error
def errorjudge(hour,minute):
try:
hour = int(hour)
minute = int(minute)
if(hour < 0 or hour > 23 or minute < 0 or minute > 59):
print('第一引数は0~23、第二引数は0~59の整数値を入力してください。')
return True
return False
except ValueError:
print('整数値を入力してください。')
return True
#the function in charge of all processes
def clockmain():
while True:
clocklist = clockinput()
error = errorjudge(*clocklist)
if error == True:
continue
[hour, minute] = map(int,clocklist)
print('求まる角度は'+str(clock_degree(hour, minute)) +'度です。')
break
# In[ ]:
if __name__ == "__main__":
clockmain()
| {"/test.py": ["/Clock_degree.py"]} |
51,205 | Hyoutan-tokyo-504/clock-degree | refs/heads/main | /birthday_symbol.py | #!/usr/bin/env python
# coding: utf-8
# ## the relationship between date and zodiac signs and chinese zodiac signs
# In[17]:
#日付だけスプリットされた配列が引数に入る['11','29']
import sys
zodiaclist = [['Aries',321,419],['Taurus',420,520],['Gemini',521,621],
['Cancer',622,722],['Leo',723,822],['Virgo',823,922],
['Libra',923,1023],['Scorpio',1024,1122],['Sagittarius',1123,1221],
['Aquarius',120,218],['Pisces',219,320]]
chinesezodiaclist = [['子',4],['丑',5],['寅',6],['卯',7],
['辰',8],['巳',9],['午',10],['未',11],
['申',0],['酉',1],['戌',2],['亥',3]]
# In[18]:
def zodiacsigns(date):
zodiacint = int(date[0])*100 + int(date[1])
for i in range(len(zodiaclist)):
if zodiacint >= zodiaclist[i][1] and zodiacint <= zodiaclist[i][2]:
zodiac = zodiaclist[i][0]
break
if i == len(zodiaclist) - 1:
zodiac = 'Capricorn'
return zodiac
def chinesezodiacsigns(year):
year = int(year)
surplus = year%12
for i in range(len(chinesezodiaclist)):
if surplus == chinesezodiaclist[i][1]:
chinese = chinesezodiaclist[i][0]
return chinese
# In[19]:
if __name__ == "__main__":
if sys.version_info.major == 3:
date = input('日付を入力してください。')
else:
date = raw_input('日付を入力してください。')
date = date.split()
split_date = []
for i in range(len(date)):
split_date.append(date[i].split('/'))
for i in range(len(split_date)):
zodiac = zodiacsigns([split_date[i][1],split_date[i][2]])
chinesezodiac = chinesezodiacsigns(split_date[i][0])
print(date[i]+' '+zodiac+' '+chinesezodiac)
# In[ ]:
| {"/test.py": ["/Clock_degree.py"]} |
51,207 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0044_auto_20210802_2127.py | # Generated by Django 3.2.5 on 2021-08-03 01:27
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0043_auto_20210801_1936'),
]
operations = [
migrations.AddField(
model_name='submission',
name='returned',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 27, 0, 873349)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 27, 0, 873349)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 2, 21, 27, 0, 877454)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 27, 0, 877454)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,208 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0036_auto_20210731_2146.py | # Generated by Django 3.2.5 on 2021-08-01 01:46
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0035_auto_20210731_2144'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 46, 25, 765073)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 46, 25, 764076)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 46, 25, 765073)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,209 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0003_auto_20210725_1530.py | # Generated by Django 3.2.5 on 2021-07-25 19:30
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0002_profile'),
]
operations = [
migrations.AddField(
model_name='user',
name='userType',
field=models.CharField(default='student', max_length=20),
),
migrations.DeleteModel(
name='Profile',
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,210 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/views.py | from datetime import date, datetime, time
import educationPortal
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.backends import AllowAllUsersModelBackend
from django.db import IntegrityError
from django.http import HttpResponse, HttpResponseRedirect
from django.http.response import JsonResponse
from django.shortcuts import render
from django.urls import reverse
import json
from .models import Assignment, Comment, Conversation, FileModel, MCanswer, MultipleChoiceQuestion, Quiz, QuizSubmission, Submission, User, Classroom, Announcement, Text
from django.shortcuts import redirect
import uuid
import sys
from django.db.models import Q
import time
from django.core.mail import send_mail
from django.core import serializers
def index(request):
if request.user.is_authenticated:
if request.user.userType.lower() == "teacher":
allClasses = Classroom.objects.all().filter(teacher=request.user)
return render(request, "educationPortal/index.html", {
"classes": allClasses,
"user": request.user
})
if request.user.userType.lower() == "student":
allClasses = Classroom.objects.filter(students=request.user)
print(allClasses)
return render(request, "educationPortal/index.html", {
"classes": allClasses,
"user": request.user
})
else:
return login_view(request)
def login_view(request):
if request.method == "POST":
# Attempt to sign user in
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(request, username=username, password=password)
# Check if authentication successful
if user is not None:
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "educationPortal/login.html", {
"message": "Invalid username and/or password."
})
else:
return render(request, "educationPortal/login.html")
def logout_view(request):
logout(request)
return HttpResponseRedirect(reverse("index"))
def register(request):
if request.method == "POST":
username = request.POST["username"]
email = request.POST["email"]
firstname = request.POST["firstname"]
lastname = request.POST["lastname"]
userType = request.POST["userType"]
# Ensure password matches confirmation
password = request.POST["password"]
confirmation = request.POST["confirmation"]
if password != confirmation:
return render(request, "educationPortal/register.html", {
"message": "Passwords must match."
})
# Attempt to create new user
try:
user = User.objects.create_user(username, email, password)
user.first_name = firstname
user.last_name = lastname
user.userType = userType
user.profile_pic = request.FILES.get('img')
user.save()
except IntegrityError:
return render(request, "educationPortal/register.html", {
"message": "Username already taken."
})
login(request, user)
return HttpResponseRedirect(reverse("index"))
else:
return render(request, "educationPortal/register.html")
def createNewClassroom(request, name):
if request.method == "POST":
data = json.loads(request.body)
name = data["name"]
classroom = Classroom()
classroom.name = name
classroom.teacher = request.user
classroom.theme = data["theme"]
classroom.subject = data["subject"]
allclasses = Classroom.objects.all()
code = uuid.uuid4().hex[:8].upper()
unique = True
for c in allclasses:
if c.code == code:
unique = False
# checking for uniqueness of code
while unique == False:
code = uuid.uuid4().hex[:8].upper()
for c in allclasses:
if c.code == code:
unique = False
classroom.code = code
classroom.save()
elif request.method == "DELETE":
data = json.loads(request.body)
id = data["id"]
allclasses = Classroom.objects.filter(id=id).delete()
return HttpResponse()
def JoinClassroom(request, code):
if request.method == "GET":
set = Classroom.objects.all().values_list('code', flat=True)
codes = []
for s in set:
codes.append(s)
return JsonResponse({
'codes': codes
})
if request.method == "PUT":
data = json.loads(request.body)
code = data["code"]
classroom = Classroom.objects.get(code=code)
classroom.students.add(request.user)
classroom.save()
print('hey')
return render(request, "educationPortal/ViewClassroom.html", {
"class": classroom
})
if request.method == "DELETE":
data = json.loads(request.body)
code = data["code"]
classroom = Classroom.objects.get(code=code)
classroom.students.remove(request.user)
classroom.save()
return render(request, "educationPortal/ViewClassroom.html", {
"class": classroom
})
def ViewClassroom(request, code):
if request.user.is_authenticated:
selectedClass = Classroom.objects.get(code=code)
announcements = Announcement.objects.filter(
classroom=selectedClass).order_by('-date')
return render(request, "educationPortal/ViewClassroom.html", {
"class": selectedClass,
"announcements": announcements
})
else:
return login_view(request)
def makeAnnouncement(request):
if request.method == "POST":
data = json.loads(request.body)
print()
announcement = Announcement()
announcement.body = data["body"]
announcement.creator = request.user
announcement.date = datetime.now()
announcement.classroom = Classroom.objects.get(code=data["code"])
announcement.save()
return HttpResponse()
def addComment(request):
if request.method == "POST":
data = json.loads(request.body)
print(data["text"])
print(data["id"])
comment = Comment()
comment.commenter = request.user
comment.text = data["text"]
comment.date = datetime.now()
comment.save()
announcement = Announcement.objects.get(id=data["id"])
announcement.comments.add(comment)
announcement.save()
return HttpResponse()
def conversations(request):
if request.user.is_authenticated:
convos = (Conversation.objects.filter(
Q(user1=request.user) | Q(user2=request.user))).order_by('-lastInteracted')
print(convos)
return render(request, "educationPortal/conversations.html", {
"students": User.objects.all().exclude(id=request.user.id),
"conversations": convos
})
else:
return login_view(request)
def addConversation(request):
if request.method == "POST":
data = json.loads(request.body)
username = data["username"]
to = User.objects.get(username=username)
if not Conversation.objects.filter(user1=request.user, user2=to).exists() and not Conversation.objects.filter(user1=to, user2=request.user).exists():
conversation = Conversation()
conversation.user1 = request.user
conversation.user2 = to
conversation.lastInteracted = time.time()
conversation.save()
if request.method == "DELETE":
data = json.loads(request.body)
id = data["id"]
Conversation.objects.get(id=id).delete()
return HttpResponse()
def sendText(request):
if request.method == "POST":
data = json.loads(request.body)
id = data["id"]
sender = request.user
text = data["text"]
reciever = None
conversation = Conversation.objects.get(id=id)
if sender == conversation.user1:
reciever = conversation.user2
else:
reciever = conversation.user1
newText = Text()
newText.sender = sender
newText.reciever = reciever
newText.date = datetime.now()
newText.text = text
newText.save()
conversation.texts.add(newText)
conversation.lastInteracted = time.time()
conversation.save()
return HttpResponse()
def editProfileImage(request):
if request.method == "POST":
user = request.user
# print(request.__dict__, file=sys.stderr)
user.profile_pic.delete()
user.profile_pic = request.FILES.get('img')
user.save()
next = request.POST.get('next', '/')
return HttpResponseRedirect(next)
def assignments(request, code):
classroom = Classroom.objects.get(code=code)
assignments = Assignment.objects.filter(classroom=classroom)
return render(request, "educationPortal/assignments.html", {
"class": classroom,
"date": datetime.now(),
"assignments": assignments
})
def createAssignment(request, code):
classroom = Classroom.objects.get(code=code)
if request.method == "POST":
assignment = Assignment()
assignment.title = request.POST["title"]
assignment.description = request.POST["instructions"]
assignment.classroom = classroom
assignment.duedate = request.POST["assignmentDueDate"]
assignment.save()
for f in request.FILES.getlist('files'):
file = FileModel()
file.file = f
file.save()
assignment.givenFiles.add(file)
assignment.save()
return render(request, "educationPortal/assignments.html", {
"class": classroom,
"defaultDate": datetime.now(),
"assignments": Assignment.objects.filter(classroom=classroom)
})
def viewAssignment(request, code, id):
classroom = Classroom.objects.get(code=code)
assignment = Assignment.objects.get(id=id)
submission = None
if(assignment.submissions.filter(user=request.user).exists()): # for student
submission = assignment.submissions.get(user=request.user)
allSubmissions = assignment.submissions.all() # all submissions
allUserNamesSubmitted = assignment.submissions.values(
'user__username') # all usersnames who have submitted
non_submitters = classroom.students.exclude(
username__in=allUserNamesSubmitted) # students who haven't submitted
return render(request, "educationPortal/viewAssignment.html", {
"class": classroom,
"assignment": assignment,
"submission": submission, # one student submission (for student)
"allSubmissions": allSubmissions, # for teacher
"non_submitters": non_submitters, # for teacher
"overdue": datetime.now().replace(tzinfo=None) > (assignment.duedate).replace(tzinfo=None)
})
def submitAssignment(request, code, id):
assignment = Assignment.objects.get(id=id)
if request.method == "POST":
if not assignment.submissions.filter(user=request.user).exists():
submission = Submission()
submission.description = request.POST["description"]
submission.user = request.user
submission.date = datetime.now()
submission.save()
for f in request.FILES.getlist('files'):
file = FileModel()
file.file = f
file.save()
submission.files.add(file)
submission.save()
assignment.submissions.add(submission)
assignment.save()
else:
submission = assignment.submissions.get(user=request.user)
submission.description = request.POST["description"]
submission.user = request.user
submission.date = datetime.now()
submission.files.clear()
submission.resubmitted = True
submission.save()
for f in request.FILES.getlist('files'):
file = FileModel()
file.file = f
file.save()
submission.files.add(file)
submission.save()
return viewAssignment(request, code, id)
def gradeAssignment(request, code, assignmentId, id):
submission = Submission.objects.get(id=id)
assignment = Assignment.objects.get(id=assignmentId)
submitter = submission.user
if request.method == "POST":
submission.grade = request.POST["grade"]
submission.save()
# send email to user's email address
subject = "E-learning Classroom: " + \
str(assignment.title) + " - Score Recieved: "
message = "Hello " + request.user.first_name + \
", Your score for this assignment is " + \
str(submission.grade) + " / 100"
send_mail(
subject, # subject
message, # message
'e.learningportal.0@gmail.com', # from email
[submitter.email], # to email
)
return viewAssignment(request, code, assignmentId)
def quizzes(request, code):
classroom = Classroom.objects.get(code=code)
quizzes = Quiz.objects.filter(classroom=classroom)
return render(request, "educationPortal/quizzes.html", {
"class": classroom,
"quizzes": quizzes,
"date": datetime.now()
})
def createQuiz(request):
if request.method == "POST":
data = json.loads(request.body)
classroom = Classroom.objects.get(code=data["code"])
print(data)
name = data['name']
questions = data['questions']
quiz = Quiz()
quiz.name = name
quiz.classroom = classroom
quiz.duedate = data['duedate']
quiz.save()
for q in questions:
mc = MultipleChoiceQuestion()
mc.question = q['question']
mc.option1 = q['option1']
mc.option2 = q['option2']
mc.option3 = q['option3']
mc.option4 = q['option4']
mc.correctOption = int(q['correct'])
mc.save()
quiz.questions.add(mc)
quiz.save()
return HttpResponse()
def viewQuiz(request, code, id):
classroom = Classroom.objects.get(code=code)
quiz = Quiz.objects.get(id=id)
allSubmissions = quiz.submissions.all()
allUserNamesSubmitted = quiz.submissions.values(
'user__username') # all usersnames who have submitted
non_submitters = classroom.students.exclude(
username__in=allUserNamesSubmitted) # students who haven't submitted
# array of student answer arrays
answersArray = []
for sub in allSubmissions:
answersArray.append(list(sub.answers.values_list('answer', flat=True)))
print(answersArray)
# will be used for the student to see his/her result and grade
submission = quiz.submissions.get(user=request.user) if quiz.submissions.filter(
user=request.user).exists() else None
answers = None
correctanswers = None
correctanswers = list(
quiz.questions.values_list('correctOption', flat=True))
if submission:
answers = list(submission.answers.values_list('answer', flat=True))
return render(request, "educationPortal/ViewQuiz.html", {
"class": classroom,
"quiz": quiz,
"submission": submission,
"allSubmissions": allSubmissions, # teacher stuff
"non_submitters": non_submitters, # teacher stuff
"answers": answers,
"correctanswers": correctanswers, # teacher and student stuff
"answersArray": answersArray
})
def submitQuiz(request, code, id):
if request.method == "POST":
quiz = Quiz.objects.get(id=id)
data = json.loads(request.body)
answers = data["answers"]
correct = 0
s = QuizSubmission()
s.date = datetime.now()
s.save()
# form multiple choice questions from this array. This array is an array of numbers representing the selected option per question.
questions = quiz.questions.all()
for i in range(len(questions)):
m = MCanswer()
m.answer = answers[i]
m.save()
s.answers.add(m)
if answers[i] == questions[i].correctOption:
correct = correct + 1
s.grade = correct
s.user = request.user
s.save()
quiz.submissions.add(s)
quiz.save()
return viewQuiz(request, code, id)
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,211 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0049_auto_20210804_1858.py | # Generated by Django 3.2.5 on 2021-08-04 22:58
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0048_auto_20210804_1521'),
]
operations = [
migrations.AddField(
model_name='quiz',
name='duedate',
field=models.DateTimeField(null=True),
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 18, 58, 49, 627254)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 18, 58, 49, 626251)),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 18, 58, 49, 632464)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 18, 58, 49, 630246)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 18, 58, 49, 628253)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,212 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0040_auto_20210801_1501.py | # Generated by Django 3.2.5 on 2021-08-01 19:01
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0039_auto_20210801_1501'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 1, 15, 1, 29, 59721)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 1, 15, 1, 29, 59721)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 1, 15, 1, 29, 59721)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,213 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0024_auto_20210730_1301.py | # Generated by Django 3.2.5 on 2021-07-30 17:01
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0023_alter_user_profile_pic'),
]
operations = [
migrations.CreateModel(
name='Conversation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.AlterField(
model_name='user',
name='profile_pic',
field=models.ImageField(blank=True, default='blankUserIcon.svg', null=True, upload_to=''),
),
migrations.CreateModel(
name='Text',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('text', models.CharField(default='', max_length=1000)),
('conversation', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='text', to='educationPortal.conversation')),
('reciever', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reciever', to=settings.AUTH_USER_MODEL)),
('sender', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sender', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='conversation',
name='user1',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user1', to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='conversation',
name='user2',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='user2', to=settings.AUTH_USER_MODEL),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,214 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0025_auto_20210730_1745.py | # Generated by Django 3.2.5 on 2021-07-30 21:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0024_auto_20210730_1301'),
]
operations = [
migrations.RemoveField(
model_name='text',
name='conversation',
),
migrations.AddField(
model_name='conversation',
name='texts',
field=models.ManyToManyField(blank=True, to='educationPortal.Text'),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,215 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0031_auto_20210731_1749.py | # Generated by Django 3.2.5 on 2021-07-31 21:49
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0030_auto_20210731_1748'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 17, 49, 29, 681615)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 17, 49, 29, 680618)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 17, 49, 29, 682612)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,216 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0032_auto_20210731_2140.py | # Generated by Django 3.2.5 on 2021-08-01 01:40
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0031_auto_20210731_1749'),
]
operations = [
migrations.CreateModel(
name='FileModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(blank=True, upload_to='')),
],
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 40, 58, 633827)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 40, 58, 633827)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 40, 58, 634824)),
),
migrations.CreateModel(
name='Submission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='', max_length=1000)),
('files', models.ManyToManyField(blank=True, to='educationPortal.FileModel')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submitter', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Assignment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('description', models.CharField(default='', max_length=20000)),
('duedate', models.DateTimeField()),
('submissions', models.ManyToManyField(blank=True, to='educationPortal.Submission')),
],
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,217 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0030_auto_20210731_1748.py | # Generated by Django 3.2.5 on 2021-07-31 21:48
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0029_auto_20210731_1551'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 17, 48, 27, 680598)),
),
migrations.AlterField(
model_name='conversation',
name='lastInteracted',
field=models.FloatField(),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,218 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0050_auto_20210804_1930.py | # Generated by Django 3.2.5 on 2021-08-04 23:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0049_auto_20210804_1858'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 19, 30, 11, 631073)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 19, 30, 11, 631073)),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 19, 30, 11, 638054)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 19, 30, 11, 635062)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 19, 30, 11, 632070)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,219 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0051_auto_20210805_0750.py | # Generated by Django 3.2.5 on 2021-08-05 11:50
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0050_auto_20210804_1930'),
]
operations = [
migrations.RemoveField(
model_name='quizsubmission',
name='questions',
),
migrations.AddField(
model_name='quizsubmission',
name='grade',
field=models.IntegerField(default=0),
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 7, 50, 46, 858109)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 7, 50, 46, 858109)),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 7, 50, 46, 866105)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 7, 50, 46, 862105)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 7, 50, 46, 858109)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,220 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0046_auto_20210803_2201.py | # Generated by Django 3.2.5 on 2021-08-04 02:01
import datetime
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0045_auto_20210802_2153'),
]
operations = [
migrations.CreateModel(
name='MultipleChoiceQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question', models.CharField(max_length=1000)),
('option1', models.CharField(max_length=1000)),
('option2', models.CharField(max_length=1000)),
('option3', models.CharField(max_length=1000)),
('option4', models.CharField(max_length=1000)),
('correctOption', models.IntegerField(default=1)),
('selectedOption', models.IntegerField(default=-1)),
],
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 3, 22, 1, 26, 530563)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 3, 22, 1, 26, 526528)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 3, 22, 1, 26, 534558)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 3, 22, 1, 26, 530563)),
),
migrations.CreateModel(
name='QuizSubmission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateField(default=datetime.datetime(2021, 8, 3, 22, 1, 26, 534558))),
('questions', models.ManyToManyField(blank=True, to='educationPortal.MultipleChoiceQuestion')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Quiz',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=1000)),
('classroom', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='educationPortal.classroom')),
('questions', models.ManyToManyField(blank=True, to='educationPortal.MultipleChoiceQuestion')),
('submissions', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='educationPortal.quizsubmission')),
],
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,221 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0052_auto_20210805_1502.py | # Generated by Django 3.2.5 on 2021-08-05 19:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0051_auto_20210805_0750'),
]
operations = [
migrations.CreateModel(
name='MCanswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer', models.IntegerField(default=-1)),
],
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 2, 20, 653522)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 2, 20, 653522)),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 15, 2, 20, 661509)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 15, 2, 20, 657516)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 2, 20, 653522)),
),
migrations.AddField(
model_name='quizsubmission',
name='answers',
field=models.ManyToManyField(null=True, to='educationPortal.MCanswer'),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,222 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0048_auto_20210804_1521.py | # Generated by Django 3.2.5 on 2021-08-04 19:21
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0047_auto_20210804_1439'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 15, 21, 27, 493313)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 15, 21, 27, 493313)),
),
migrations.RemoveField(
model_name='quiz',
name='submissions',
),
migrations.AddField(
model_name='quiz',
name='submissions',
field=models.ManyToManyField(blank=True, to='educationPortal.QuizSubmission'),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 15, 21, 27, 501303)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 15, 21, 27, 497309)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 15, 21, 27, 493313)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,223 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0028_auto_20210731_1530.py | # Generated by Django 3.2.5 on 2021-07-31 19:30
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0027_auto_20210730_2145'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 15, 30, 44, 792515)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 15, 30, 44, 792515)),
),
migrations.AlterField(
model_name='conversation',
name='lastInteracted',
field=models.DateField(default=datetime.datetime(2021, 7, 31, 15, 30, 44, 794545)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 15, 30, 44, 793544)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,224 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0045_auto_20210802_2153.py | # Generated by Django 3.2.5 on 2021-08-03 01:53
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0044_auto_20210802_2127'),
]
operations = [
migrations.RenameField(
model_name='submission',
old_name='returned',
new_name='resubmitted',
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 53, 42, 530133)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 53, 42, 530133)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 2, 21, 53, 42, 533125)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 2, 21, 53, 42, 531129)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,225 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0047_auto_20210804_1439.py | # Generated by Django 3.2.5 on 2021-08-04 18:39
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0046_auto_20210803_2201'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 14, 39, 29, 269586)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 14, 39, 29, 268588)),
),
migrations.AlterField(
model_name='quiz',
name='name',
field=models.CharField(default='Untitled Quiz', max_length=1000),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 14, 39, 29, 275289)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 4, 14, 39, 29, 274257)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 4, 14, 39, 29, 269586)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,226 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/urls.py | from django.urls import path
from . import views
urlpatterns = [
path("", views.index, name="index"),
path("login", views.login_view, name="login"),
path("logout", views.logout_view, name="logout"),
path("register", views.register, name="register"),
path("createNewClassroom/<str:name>/", views.createNewClassroom,
name="createNewClassroom"),
path("ViewClassroom/<str:code>/", views.ViewClassroom, name="ViewClassroom"),
path("JoinClassroom/<str:code>/",
views.JoinClassroom, name="JoinClassroom"),
path("makeAnnouncement/", views.makeAnnouncement, name="makeAnnouncement"),
path("addComment/", views.addComment, name="addComment"),
path("conversations/",
views.conversations, name="conversations"),
path("addConversation/", views.addConversation, name="addConversation"),
path("sendText/", views.sendText, name="sendText"),
path("editProfileImage/", views.editProfileImage, name="editProfileImage"),
# assignmentStuff
path("ViewClassroom/<str:code>/assignments",
views.assignments, name="assignments"),
path("ViewClassroom/<str:code>/assignments/createAssignment",
views.createAssignment, name="createAssignment"),
path("ViewClassroom/<str:code>/assignments/viewAssignment/<int:id>",
views.viewAssignment, name="viewAssignment"),
path("ViewClassroom/<str:code>/assignments/viewAssignment/<int:id>/submit",
views.submitAssignment, name="submitAssignment"),
path("ViewClassroom/<str:code>/assignments/viewAssignment/<int:assignmentId>/<int:id>/grade",
views.gradeAssignment, name="gradeAssignment"),
# quiz Stuff
path("ViewClassroom/<str:code>/quizzes", views.quizzes, name="quizzes"),
path("ViewClassroom/quizzes/createQuiz",
views.createQuiz, name="createQuiz"),
path("ViewClassroom/<str:code>/quizzes/viewQuiz/<int:id>",
views.viewQuiz, name="viewQuiz"),
path("ViewClassroom/<str:code>/quizzes/viewQuiz/<int:id>/submit",
views.submitQuiz, name="submitQuiz"),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,227 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0027_auto_20210730_2145.py | # Generated by Django 3.2.5 on 2021-07-31 01:45
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0026_conversation_lastinteracted'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.date(2021, 7, 31)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.date(2021, 7, 31)),
),
migrations.AlterField(
model_name='conversation',
name='lastInteracted',
field=models.DateField(default=datetime.date(2021, 7, 31)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.date(2021, 7, 31)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,228 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0019_auto_20210729_1134.py | # Generated by Django 3.2.5 on 2021-07-29 15:34
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0018_alter_announcement_date'),
]
operations = [
migrations.AddField(
model_name='announcement',
name='body',
field=models.CharField(default='', max_length=500),
),
migrations.AddField(
model_name='announcement',
name='classroom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='classroom', to='educationPortal.classroom'),
),
migrations.AddField(
model_name='announcement',
name='creator',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='creator', to=settings.AUTH_USER_MODEL),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,229 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0053_auto_20210805_1507.py | # Generated by Django 3.2.5 on 2021-08-05 19:07
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0052_auto_20210805_1502'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 7, 9, 194566)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 7, 9, 190569)),
),
migrations.AlterField(
model_name='quizsubmission',
name='answers',
field=models.ManyToManyField(blank=True, to='educationPortal.MCanswer'),
),
migrations.AlterField(
model_name='quizsubmission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 15, 7, 9, 198559)),
),
migrations.AlterField(
model_name='submission',
name='date',
field=models.DateField(default=datetime.datetime(2021, 8, 5, 15, 7, 9, 198559)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 8, 5, 15, 7, 9, 194566)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,230 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0021_auto_20210729_1559.py | # Generated by Django 3.2.5 on 2021-07-29 19:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0020_alter_announcement_body'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='body',
field=models.CharField(default='', max_length=20000),
),
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField(default=django.utils.timezone.now)),
('text', models.CharField(default='', max_length=5000)),
('commenter', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commenter', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='announcement',
name='comments',
field=models.ManyToManyField(blank=True, to='educationPortal.Comment'),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,231 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0029_auto_20210731_1551.py | # Generated by Django 3.2.5 on 2021-07-31 19:51
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0028_auto_20210731_1530'),
]
operations = [
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 15, 51, 17, 614324)),
),
migrations.AlterField(
model_name='conversation',
name='lastInteracted',
field=models.DateField(),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
51,232 | pavansai018/E-Learning-Portal | refs/heads/master | /educationPortal/migrations/0033_auto_20210731_2144.py | # Generated by Django 3.2.5 on 2021-08-01 01:44
import datetime
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('educationPortal', '0032_auto_20210731_2140'),
]
operations = [
migrations.AddField(
model_name='assignment',
name='classroom',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='belongingToClassroom', to='educationPortal.classroom'),
),
migrations.AlterField(
model_name='announcement',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 44, 21, 782962)),
),
migrations.AlterField(
model_name='comment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 44, 21, 782962)),
),
migrations.AlterField(
model_name='text',
name='date',
field=models.DateTimeField(default=datetime.datetime(2021, 7, 31, 21, 44, 21, 783960)),
),
]
| {"/educationPortal/views.py": ["/educationPortal/models.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.