index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
65,026 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0011_auto_20190403_2011.py | # Generated by Django 2.1.7 on 2019-04-03 20:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0010_auto_20190403_2009'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='file',
field=models.FileField(blank=True, upload_to='lms-lite-2019/quizes'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,027 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0009_auto_20190328_0507.py | # Generated by Django 2.1.7 on 2019-03-28 05:07
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0008_auto_20190327_2020'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='due_date',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='assignment',
name='open_date',
field=models.DateTimeField(blank=True),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,028 | kh06089/2019-LMSLite | refs/heads/master | /courses/admin.py | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from courses.models import Course, Assignment, Grade
from .forms import CourseAdminCreationForm, CourseAdminChangeForm
class CourseAdmin(admin.ModelAdmin):
form = CourseAdminCreationForm
add_form = CourseAdminChangeForm
list_display = ('course_name', )
list_filter = ('course_name',)
fieldsets = (
(None, {'fields': ('prof', 'course_name', 'description', 'students')}),
)
add_fieldsets = (
(None, {'fields': ('prof', 'course_name', 'description')}),
)
search_fields = ('course_name',)
ordering = ('course_name',)
filter_horizontal = ()
admin.site.register(Assignment)
admin.site.register(Grade)
admin.site.register(Course, CourseAdmin)
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,029 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0013_auto_20190403_2114.py | # Generated by Django 2.1.7 on 2019-04-03 21:14
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0012_auto_20190403_2015'),
]
operations = [
migrations.AlterField(
model_name='assignment',
name='grade',
field=models.BigIntegerField(blank=True, default=0),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,030 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0014_auto_20190403_2253.py | # Generated by Django 2.1.7 on 2019-04-03 22:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0013_auto_20190403_2114'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='file',
field=models.FileField(blank=True, upload_to='quiz'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,031 | kh06089/2019-LMSLite | refs/heads/master | /accounts/migrations/0012_student_grades.py | # Generated by Django 2.1.7 on 2019-04-11 19:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0015_auto_20190411_1951'),
('accounts', '0011_auto_20190404_2028'),
]
operations = [
migrations.AddField(
model_name='student',
name='grades',
field=models.ManyToManyField(blank=True, to='courses.Grade'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,032 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0023_auto_20190430_1310.py | # Generated by Django 2.1.7 on 2019-04-30 17:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0022_auto_20190430_1304'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='average',
field=models.FloatField(blank=True, default=0, null=True),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,033 | kh06089/2019-LMSLite | refs/heads/master | /accounts/views.py | from django.http import HttpResponseRedirect
from django.shortcuts import render, redirect
from accounts.forms import ProfessorChangeForm
def profile_view(request):
context_dict = {}
if request.user.is_authenticated:
form = ProfessorChangeForm(request.POST, request.FILES, instance=request.user)
context_dict['form'] = form
if request.method == 'POST':
form.save()
return redirect('index')
return render(request, 'profile.html', context_dict)
return HttpResponseRedirect("/auth/login")
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,034 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0020_auto_20190430_0043.py | # Generated by Django 2.1.7 on 2019-04-30 00:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0019_auto_20190416_2229'),
]
operations = [
migrations.AddField(
model_name='quiz',
name='quiz_code',
field=models.CharField(blank=True, default=None, max_length=8),
),
migrations.AddField(
model_name='quiz',
name='restricted',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,035 | kh06089/2019-LMSLite | refs/heads/master | /accounts/migrations/0013_auto_20190416_2256.py | # Generated by Django 2.1.7 on 2019-04-16 22:56
import accounts.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_student_grades'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile_photo',
field=models.ImageField(blank=True, upload_to=accounts.models.photo_upload_address),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,036 | kh06089/2019-LMSLite | refs/heads/master | /courses/views.py | import datetime
from django.core.files.storage import default_storage
from django.http import HttpResponse
from django.shortcuts import render, redirect
from tempfile import NamedTemporaryFile
from django.utils.encoding import smart_str
from LMSLite.helpers import grade_quiz, reset_quiz, create_quiz, update_quiz, print_grades
from accounts.models import Professor, Student
from courses.models import Course, Quiz, Grade, Homework, Survey, Assignment
from courses.forms import QuizFileForm, QuizEditForm, HomeworkCreationForm, GradeEditForm, SurveyFileForm, SurveyEditForm
from google.cloud import storage
def course_view(request, id):
context_dict = {}
course = Course.objects.get(id=id)
quiz = QuizFileForm(request.POST, request.FILES)
homework = HomeworkCreationForm(request.POST, request.FILES)
survey = SurveyFileForm(request.POST, request.FILES)
d = datetime.datetime.today()
context_dict['course'] = course
context_dict['quizform'] = quiz
context_dict['hwForm'] = homework
context_dict['surveyForm'] = survey
context_dict['quizes'] = course.quizes.all()
assignments = []
x = 0
for assignment in course.quizes.all():
if assignment.due_date.replace(tzinfo=None) > d and x < 5:
assignments.append(assignment)
x += 1
x = 0
for assignment in course.homeworks.all():
if assignment.due_date.replace(tzinfo=None) > d and x < 5:
assignments.append(assignment)
x += 1
x = 0
for assignment in course.surveys.all():
if assignment.due_date.replace(tzinfo=None) > d and x < 5:
assignments.append(assignment)
x += 1
context_dict['assignments'] = assignments
if 'quizFileUpdate' in request.POST:
post = request.POST.copy()
update_quiz(Quiz.objects.order_by('id')[len(Quiz.objects.all()) - 1].file.name, post)
return redirect('index')
if 'surveyFileUpdate' in request.POST:
post = request.POST.copy()
update_quiz(Survey.objects.order_by('id')[len(Survey.objects.all()) - 1].file.name, post)
return redirect('index')
if 'surveySubmit' in request.POST:
survey.save(course=course, prof=Professor.objects.get(id=request.user.id))
edit = SurveyEditForm
key_name = course.course_name + '/Surveys/' +request.POST['assignment_name']+'/'+request.POST['assignment_name'].replace(' ', '_') +'_key.txt'
client = storage.Client()
bucket = client.get_bucket('lms-lite-2019')
try:
blob = bucket.get_blob(key_name)
downloaded_blob = blob.download_as_string()
except:
file = default_storage.open(key_name, 'w+')
file.write('MC\tSample Question?\tCorrect Answer\tCorrect\tIncorrect Answer\tIncorrect')
file.close()
survey_instance = Survey.objects.order_by('id')[len(Survey.objects.all()) - 1]
survey_instance.file = key_name
survey_instance.save()
blob = bucket.get_blob(key_name)
downloaded_blob = blob.download_as_string()
quizKey = NamedTemporaryFile(delete=False)
quizKey.write(bytes(downloaded_blob.decode('utf8'), 'UTF-8'))
quizKey.seek(0)
edit.file_address = quizKey.name
context_dict['surveyForm'] = edit
if 'quizSubmit' in request.POST:
quiz.save(course=course, prof=Professor.objects.get(id=request.user.id))
key_name = course.course_name + '/Quizzes/' +request.POST['assignment_name']+'/'+request.POST['assignment_name'].replace(' ', '_') +'_key.txt'
edit = QuizEditForm
client = storage.Client()
bucket = client.get_bucket('lms-lite-2019')
try:
blob = bucket.get_blob(key_name)
downloaded_blob = blob.download_as_string()
except:
file = default_storage.open(key_name, 'w+')
file.write('MC\tSample Question?\tCorrect Answer\tCorrect\tIncorrect Answer\tIncorrect')
file.close()
quiz_instance = Quiz.objects.order_by('id')[len(Quiz.objects.all()) - 1]
quiz_instance.file = key_name
quiz_instance.save()
blob = bucket.get_blob(key_name)
downloaded_blob = blob.download_as_string()
quizKey = NamedTemporaryFile(delete=False)
quizKey.write(bytes(downloaded_blob.decode('utf8'), 'UTF-8'))
quizKey.seek(0)
edit.file_address = quizKey.name
context_dict['quizform'] = edit
if 'hmwkSubmit' in request.POST:
homework.save(course=course, prof=Professor.objects.get(id=request.user.id))
return redirect('index')
return render(request, 'course_page.html', context_dict)
def quiz_view(request, cid, id):
context_dict = {}
quiz = Quiz.objects.get(id=id)
cid = quiz.course_id
student = Student.objects.get(id=request.user.id)
context_dict['quiz'] = quiz
context_dict['course'] = cid
client = storage.Client()
bucket = client.get_bucket('lms-lite-2019')
key_blob = bucket.get_blob(quiz.file.name)
downloaded_blob = key_blob.download_as_string()
quizKey = NamedTemporaryFile(delete=False)
quizKey.write(bytes(downloaded_blob.decode('utf8'), 'UTF-8'))
quizKey.seek(0)
questions = create_quiz(input=quizKey.name)
quizKey.seek(0)
context_dict['questions'] = questions
if 'btn_done' in request.POST:
return redirect(course_view(request, cid.id))
if request.method == "POST":
stdQuiz = NamedTemporaryFile(delete=False)
response_loc = '/'.join((cid.course_name, 'Quizzes', quiz.assignment_name, 'Responses', request.user.email.split('@')[0]+'_response.txt'))
response_file = reset_quiz(quizKey.name, response_loc, request.POST)
std_quiz_blob = bucket.get_blob(response_loc)
download = std_quiz_blob.download_as_string()
stdQuiz.write(bytes(download.decode('utf8'), 'UTF-8'))
quizKey.seek(0)
stdQuiz.seek(0)
score = grade_quiz(stdQuiz.name, quizKey.name)
context_dict['grade'] = score
grade = Grade()
grade.assignment = quiz
grade.file = response_file.name
grade.grade_value = score
grade.stdnt = student
grade.save()
student.quizes.remove(quiz)
student.grades.add(grade)
return render(request, 'post_quiz_page.html', context_dict)
return render(request, 'quiz_page.html', context_dict)
def quiz_list_view(request, cid):
context_dict = {}
course = Course.objects.get(id=cid)
quizzes = Student.objects.get(id=request.user.id).quizes.all()
student = Student.objects.get(id=request.user.id)
context_dict['quizzes'] = quizzes
context_dict['course'] = course
for quiz in quizzes:
if quiz.restrict_date:
if quiz.restrict_date.replace(tzinfo=None) <= datetime.datetime.today():
student.quizes.remove(quiz)
return render(request, 'quiz_list_page.html', context_dict)
def pre_quiz_view(request,id, cid):
context_dict = {}
quiz = Quiz.objects.get(id=id)
context_dict['quiz'] = quiz
student = Student.objects.get(id=request.user.id)
if request.method == 'POST':
if quiz.quiz_code:
if quiz.quiz_code == request.POST['quiz-code']:
student.quizes.remove(quiz)
return redirect('quiz_page', quiz.course_id.id, quiz.id)
else:
return render(request, 'pre_quiz_page.html', context_dict)
return redirect('quiz_page', quiz.course_id.id, quiz.id)
return render(request,'pre_quiz_page.html', context_dict)
def grade_view(request, cid):
context_dict = {}
quiz_grades = []
hw_grades = []
if request.method == 'POST':
file = default_storage.open(print_grades(cid).name)
response = HttpResponse(file, content_type='text/csv')
response['Content-Disposition'] = 'attachment; filename=%s' % smart_str(file.name)
return response
course = Course.objects.get(id=cid)
if request.user.role == 2:
student = Student.objects.get(id=request.user.id)
context_dict['student']=student
quizzes = course.quizes.all()
homeworks = course.homeworks.all()
surveys = course.surveys.all()
k = 0
for quiz in quizzes:
quiz_average = 0
for student in course.students.all():
try:
grade = student.grades.get(assignment=quiz)
quiz_average += grade.grade_value
quiz_grades.append(grade)
k += 1
except:
pass
if k > 0:
quiz_average /= k
context_dict['quiz_average'] = quiz_average
quiz_average = round(quiz_average, 2)
quiz.average = quiz_average
k = 0
for homework in homeworks:
for student in course.students.all():
try:
grade = student.grades.get(assignment=homework)
hw_grades.append(grade)
except:
pass
context_dict['course'] = course
context_dict['quizzes'] = quizzes
context_dict['homeworks'] = homeworks
context_dict['surveys'] = surveys
context_dict['quiz_grades'] = quiz_grades
context_dict['hw_grades'] = hw_grades
return render(request, 'assignment_list.html', context_dict)
def submission_view(request, cid, id):
context_dict = {}
grade = Grade.objects.get(id=id)
grade_form = GradeEditForm(request.POST, instance=grade)
context_dict['grade'] = grade
context_dict['grade_form'] = grade_form
if grade.assignment.type == 0:
client = storage.Client()
bucket = client.get_bucket('lms-lite-2019')
blob = bucket.get_blob(grade.file.name)
downloaded_blob = blob.download_as_string()
response = NamedTemporaryFile(delete=False)
response.write(bytes(downloaded_blob.decode('utf8'), 'UTF-8'))
response.seek(0)
questions = create_quiz(response.name)
context_dict['questions'] = questions
if request.method == 'POST':
grade_form.save()
grade.stdnt.grades.add(grade)
return redirect('/courses/'+str(grade.assignment.course_id.id) +'/grades')
return render(request,'submission_view.html',context_dict)
def homework_view(request,id):
context_dict = {}
course = Course.objects.get(id=id)
homework = Student.objects.get(id=request.user.id).homeworks.all()
context_dict['homework'] = homework
context_dict['course'] = course
return render(request,'homework_list.html',context_dict)
def homework_submit_view(request,id,cid):
context_dict = {}
homework = Homework.objects.get(id=id)
student = Student.objects.get(id=request.user.id)
if homework.restrict_date:
if homework.restrict_date.replace(tzinfo=None) <= datetime.datetime.today():
student.homeworks.remove(homework)
context_dict['homework'] = homework
if request.method == 'POST':
sub_addr = homework.course_id.course_name + '/Homework/' + homework.assignment_name + '/Submissions/' + \
Student.objects.get(id=request.user.id).email.split('@')[0] + '/' + request.FILES['upload'].name
default_storage.save(sub_addr, request.FILES['upload'])
grade = Grade()
grade.assignment = homework
grade.grade_value = 0
grade.file = sub_addr
grade.stdnt = student
grade.save()
grade.stdnt.grades.add(grade)
if request.method == 'POST':
student.homeworks.remove(homework)
return redirect('index')
return render(request,'homework_submit_page.html',context_dict)
def survey_list_view(request,cid):
context_dict = {}
course = Course.objects.get(id=cid)
surveys = Student.objects.get(id=request.user.id).surveys.all()
student = Student.objects.get(id=request.user.id)
context_dict['course'] = course
context_dict['survey'] = surveys
for survey in surveys:
if survey.restrict_date:
if survey.restrict_date.replace(tzinfo=None) <= datetime.datetime.today():
student.surveys.remove(survey)
return render(request, 'survey_list_view.html', context_dict)
def pre_survey_view(request,id, cid):
context_dict = {}
survey = Survey.objects.get(id=id)
context_dict['survey'] = survey
student = Student.objects.get(id=request.user.id)
if request.method == 'POST':
student.surveys.remove(survey)
return redirect('survey_page', survey.course_id.id, survey.id)
else:
return render(request, 'pre_survey_page.html', context_dict)
return render(request,'pre_survey_page.html', context_dict)
def take_survey_view(request,id,cid):
context_dict = {}
survey = Survey.objects.get(id=id)
cid = survey.course_id
student = Student.objects.get(id=request.user.id)
context_dict['survey'] = survey
context_dict['course'] = cid
client = storage.Client()
bucket = client.get_bucket('lms-lite-2019')
key_blob = bucket.get_blob(survey.file.name)
downloaded_blob = key_blob.download_as_string()
surveyKey = NamedTemporaryFile(delete=False)
surveyKey.write(bytes(downloaded_blob.decode('utf8'), 'UTF-8'))
surveyKey.seek(0)
questions = create_quiz(input=surveyKey.name)
surveyKey.seek(0)
context_dict['questions'] = questions
if request.method=='POST':
return render(request, 'post_survey_page.html', context_dict)
return render(request,'survey_page.html',context_dict)
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,037 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0018_auto_20190416_2143.py | # Generated by Django 2.1.7 on 2019-04-16 21:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0017_auto_20190416_2138'),
]
operations = [
migrations.AlterField(
model_name='grade',
name='assignment',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='courses.Assignment'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,038 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0015_auto_20190411_1951.py | # Generated by Django 2.1.7 on 2019-04-11 19:51
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('courses', '0014_auto_20190403_2253'),
]
operations = [
migrations.CreateModel(
name='Grade',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('grade_value', models.FloatField()),
],
),
migrations.RemoveField(
model_name='assignment',
name='grade',
),
migrations.AddField(
model_name='grade',
name='assignment',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='courses.Assignment'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,039 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0016_auto_20190415_1610.py | # Generated by Django 2.1.7 on 2019-04-15 16:10
import courses.models
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0012_student_grades'),
('courses', '0015_auto_20190411_1951'),
]
operations = [
migrations.AddField(
model_name='grade',
name='file',
field=models.FileField(blank=True, default=None, upload_to=courses.models.response_upload_address),
),
migrations.AddField(
model_name='grade',
name='stdnt',
field=models.ForeignKey(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='accounts.Student'),
),
migrations.AlterField(
model_name='grade',
name='assignment',
field=models.OneToOneField(blank=True, default=None, on_delete=django.db.models.deletion.CASCADE, to='courses.Assignment'),
),
migrations.AlterField(
model_name='grade',
name='grade_value',
field=models.FloatField(blank=True, default=None),
),
migrations.AlterField(
model_name='homework',
name='file',
field=models.FileField(blank=True, upload_to=courses.models.homework_upload_address),
),
migrations.AlterField(
model_name='quiz',
name='file',
field=models.FileField(blank=True, upload_to=courses.models.quiz_upload_address),
),
migrations.AlterField(
model_name='survey',
name='file',
field=models.FileField(blank=True, upload_to=courses.models.survey_upload_address),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,040 | kh06089/2019-LMSLite | refs/heads/master | /accounts/migrations/0014_student_quizes.py | # Generated by Django 2.1.7 on 2019-04-23 20:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0019_auto_20190416_2229'),
('accounts', '0013_auto_20190416_2256'),
]
operations = [
migrations.AddField(
model_name='student',
name='quizes',
field=models.ManyToManyField(blank=True, default=None, to='courses.Quiz'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,041 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0025_auto_20190430_1406.py | # Generated by Django 2.1.7 on 2019-04-30 18:06
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0024_merge_20190430_1352'),
]
operations = [
migrations.AlterField(
model_name='quiz',
name='quiz_code',
field=models.CharField(blank=True, default=None, max_length=8, null=True),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,042 | kh06089/2019-LMSLite | refs/heads/master | /courses/migrations/0017_auto_20190416_2138.py | # Generated by Django 2.1.7 on 2019-04-16 21:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0016_auto_20190415_1610'),
]
operations = [
migrations.AlterField(
model_name='grade',
name='file',
field=models.FileField(blank=True, default=None, upload_to=''),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,043 | kh06089/2019-LMSLite | refs/heads/master | /accounts/migrations/0011_auto_20190404_2028.py | # Generated by Django 2.1.7 on 2019-04-04 20:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0010_auto_20190404_2027'),
]
operations = [
migrations.AlterField(
model_name='user',
name='profile_photo',
field=models.ImageField(blank=True, upload_to='user-profile-photos'),
),
]
| {"/courses/forms.py": ["/LMSLite/helpers.py", "/courses/models.py"], "/LMSLite/views.py": ["/courses/models.py"], "/LMSLite/helpers.py": ["/courses/models.py"], "/courses/admin.py": ["/courses/models.py", "/courses/forms.py"], "/courses/views.py": ["/LMSLite/helpers.py", "/courses/models.py", "/courses/forms.py"], "/courses/migrations/0016_auto_20190415_1610.py": ["/courses/models.py"]} |
65,109 | dzchen314/WarandPeace-text-parser | refs/heads/master | /header_parser.py | '''
Parses Tolstoy's War and Peace into header, body, and footer for processing
using a state machine framework.
'''
from statemachine import StateMachine
import sys
# Machine States
def error(text):
# Catch errors: unidentifiable line
sys.stderr.write('Unidentifiable line:\n'+ line)
def end(text):
# End of text
sys.stdout.write('Processing Successful\n')
def header(text):
# Start with the header and determine state transition
# with 10 consecutive blank lines
fp = text
blankline_count = 0
while 1:
line = fp.readline()
#print(line)
if line in ['\n', '\r\n']: blankline_count += 1
else: blankline_count = 0
if blankline_count == 10: return body, fp
else: continue
def body(text):
# Body state (transition is same as header)
fp = text
blankline_count = 0
body_text = ''
while 1:
line = fp.readline()
body_text += line
if line in ['\n', '\r\n']: blankline_count += 1
else: blankline_count = 0
# Write body text into file for later processing
if blankline_count == 10:
with open('warandpeace_body.txt','w') as body_file:
body_file.write(body_text)
return footer, fp
else: continue
def footer(text):
# Footer state, the only transition is end of book
fp = text
while 1:
line = fp.readline()
print(line)
if not line: return end, fp
if __name__== "__main__":
m = StateMachine()
m.add_state(header)
m.add_state(body)
m.add_state(footer)
m.add_state(error, end_state=1)
m.add_state(end, end_state=1)
m.set_start(header)
m.run('warandpeace.txt')
| {"/header_parser.py": ["/statemachine.py"], "/book_parser.py": ["/statemachine.py"]} |
65,110 | dzchen314/WarandPeace-text-parser | refs/heads/master | /statemachine.py | '''
Basic state machine framework for text parsing
'''
class StateMachine:
def __init__(self):
self.handlers = []
self.startState = None
self.endStates = []
def add_state(self, handler, end_state=0):
# Add states to the state machine by appending a handler
self.handlers.append(handler)
if end_state:
self.endStates.append(handler)
def set_start(self, handler):
# Set the starting state
self.startState = handler
def run(self, filepath=None):
handler = self.startState
# Open a file to read line by line
with open(filepath,'r') as text:
while 1:
# While loop for changing states
(newState, text) = handler(text)
if newState in self.endStates:
newState(text)
break
elif newState not in self.handlers:
print("Invalid target %s", newState)
else:
handler = newState
| {"/header_parser.py": ["/statemachine.py"], "/book_parser.py": ["/statemachine.py"]} |
65,111 | dzchen314/WarandPeace-text-parser | refs/heads/master | /book_parser.py | '''
Book parser written to parse Tolstoy's War and Peace.
Stores book index, book year, chapter index, paragraph index, sentence index,
sentence text, word indices, and word text into a nested dictionary and
serializes dictionary into JSON format.
'''
from collections import defaultdict
from nltk import tokenize
from statemachine import StateMachine
from unidecode import unidecode
import sys
import json
# punct is a list of punctuation to be removed for words
punct = '''!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~``\'\'...'''
# global variables for indexing (for convenience)
book_n, year, chapter_n, paragraph_n = 0, 0, 0, 0
def make_dict():
# Makes a nested dictionary that allows for changing values at any depth
return defaultdict(make_dict)
nested_dict = defaultdict(make_dict) # Initiate the nested dict to store words
def dictify(d):
# Converts nested_dict from defaultdict to normal python dict
for k,v in d.items():
if isinstance(v,defaultdict):
try:
d[int(k)] = dictify(v)
except ValueError:
d[k] = dictify(v)
return dict(d)
def iter_all(d,depth=1):
# Prints nested dictionary in a readable way (for debugging)
for k,v in d.items():
print("-"*depth,k)
if type(v) is defaultdict:
iter_all(v,depth+1)
else:
print("-"*(depth+1),v)
# Machine States
def error(text):
# Unidentifiable line
sys.stderr.write('Unidentifiable line:\n'+ line)
def end(text):
# End state
sys.stdout.write('Processing Successful\n')
def book(text):
# Start with the book index and year and determine state transition
# to chapter after 5 consecutive blank lines
# Resets chapter index
global chapter_n
global book_n
global year
global nested_dict
chapter_n = 0
fp = text
blankline_count = 0
while 1:
line = fp.readline()
if line in ['\n', '\r\n']: blankline_count += 1
else: blankline_count = 0
if line[:4] == 'BOOK' or line[:14] == 'FIRST EPILOGUE' or line[:15] == 'SECOND EPILOGUE':
book_n += 1
if line[:4] == 'BOOK' or line[:14] == 'FIRST EPILOGUE':
year = line[line.find(':')+2:-1]
# Second Epilogue has no year so set year to 0
if line[:15] == 'SECOND EPILOGUE':
year = 0
nested_dict[book_n]['year'] = year
if blankline_count == 5: return chapter, fp
else: continue
def chapter(text):
# Chapter state, tracks index and can transition to paragraph
global chapter_n
global paragraph_n
chapter_n += 1
paragraph_n = 0
fp = text
while 1:
line = fp.readline()
if line in ['\n', '\r\n']: return paragraph, fp
else: continue
def paragraph(text):
# Paragraph state, tracks index and transitions to sentence
global paragraph_n
paragraph_n += 1
paragraph = ''
fp = text
while 1:
line = fp.readline()
if line not in ['\n', '\r\n']: paragraph += line
else: return sentence, (paragraph, fp)
def sentence(text):
# Sentence state, tokenizes paragraph into
# sentences and words (filters out punctuation)
# Stores information in nested_dict
global nested_dict
paragraph, fp = text
sentences = tokenize.sent_tokenize(paragraph)
for i, sent in enumerate(sentences):
sent = unidecode(sent)
sent = sent.replace('\n',' ') # Replace \n character with space
nested_dict[book_n][chapter_n][paragraph_n][i+1]['sentence'] = sent
sent = sent.replace('-',' ') # Replace hyphens with commas to separate words
words = [w for w in tokenize.word_tokenize(sent[:-1].lower()) \
if w not in punct] # sent[:-1] remove .!?
for j, word in enumerate(words):
nested_dict[book_n][chapter_n][paragraph_n][i+1][j+1] = word
return end_paragraph, fp
def end_paragraph(text):
# State at the end of a paragraph after sentences and words are processed
# Determines the next state based (book, chapter, another paragraph or end)
fp = text
index = fp.tell()
blankline_count = 0
while 1:
line = fp.readline()
if blankline_count > 8:
fp.seek(index)
return end, fp
if line[:4] == 'BOOK' or line[:14] == 'FIRST EPILOGUE' or line[:15] == 'SECOND EPILOGUE':
fp.seek(index)
return book, fp
if line[:7] == 'CHAPTER':
return chapter, fp
if line in ['\n', '\r\n']:
blankline_count += 1
index = fp.tell() # Sometimes paragraphs are separated by more than 1 blankspace
else:
fp.seek(index)
return paragraph, fp
if __name__== "__main__":
m = StateMachine()
m.add_state(book)
m.add_state(chapter)
m.add_state(paragraph)
m.add_state(sentence)
m.add_state(end_paragraph)
m.add_state(error, end_state=1)
m.add_state(end, end_state=1)
m.set_start(book)
m.run('warandpeace_body.txt')
final_dict = dictify(nested_dict) # Reformat defaultdict to normal dictionary
with open('textbody_dict.json', 'w') as fp:
json.dump(final_dict, fp, indent=4)
| {"/header_parser.py": ["/statemachine.py"], "/book_parser.py": ["/statemachine.py"]} |
65,174 | Data-Scopes/scale | refs/heads/master | /scripts/liwc.py | from collections import defaultdict
import LIWCtools.LIWCtools as liwc_tools
import pandas as pd
class LIWC(object):
def __init__(self, dict_filename: str):
self.LD = liwc_tools.LDict(dict_filename, encoding='utf-8')
self.liwc_cat = {}
self.in_cat = defaultdict(lambda: defaultdict(bool))
self.wildcard_word_prefix = defaultdict(lambda: defaultdict(bool))
self.prefix_size = 2
self.set_liwc_cat()
def set_liwc_cat(self):
for cat in self.LD.catDict.catDict:
cat_dict = self.LD.catDict.catDict[cat]
cat_dict_name, cat_dict_words = cat_dict
self.liwc_cat[cat_dict_name] = cat_dict_words
for word in cat_dict_words:
if word.endswith('*'):
self.wildcard_word_prefix[word[:self.prefix_size]][word] = True
self.in_cat[word][cat_dict_name] = True
else:
self.in_cat[word][cat_dict_name] = True
def in_dict(self, word):
if word in self.in_cat:
return True
elif word[:self.prefix_size] in self.wildcard_word_prefix:
for wildcard_word in self.wildcard_word_prefix[word[:self.prefix_size]]:
if word.startswith(wildcard_word):
return True
return False
def get_word_cats(self, word):
return list(self.in_cat[word].keys())
def has_categories(self, word):
cats = []
if word in self.in_cat:
cats = self.get_word_cats(word)
if word[:self.prefix_size] in self.wildcard_word_prefix:
for wildcard_word in self.wildcard_word_prefix[word[:self.prefix_size]]:
if word.startswith(wildcard_word[:-1]):
cats += self.get_word_cats(wildcard_word)
return cats
def text_dict_to_liwc_dataframe(self, text_dict):
counts = {}
for text_id in text_dict:
counts[text_id] = self.LD.LDictCountString(text_dict[text_id])
return pd.DataFrame(counts).transpose().fillna(0)
| {"/scripts/text_tail_analysis.py": ["/scripts/liwc.py"]} |
65,175 | Data-Scopes/scale | refs/heads/master | /scripts/pmi.py | from typing import Iterable, List, Union
from collections import Counter, OrderedDict
from itertools import combinations
import math
def count_tokens(token_sets: List[List[str]]) -> Counter:
"""Count items in set of item sets."""
token_freq = Counter()
for token_set in token_sets:
token_freq.update(token_set)
return token_freq
def count_token_cooc(token_sets: List[List[str]]) -> Counter:
cooc_freq = Counter()
for token_set in token_sets:
cooc_freq.update([token_pair for token_pair in combinations(token_set, 2)])
return cooc_freq
class PMICOOC(object):
def __init__(self, token_sets: List[List[str]], filter_terms=Union[None, Iterable]):
self.token_freq = count_tokens(token_sets)
self.cooc_freq = count_token_cooc(token_sets)
self.total_words = sum(self.token_freq.values())
self.total_coocs = sum(self.cooc_freq.values())
self.term_prob = {term: freq / self.total_words for term, freq in self.token_freq.items()}
self.cooc_prob = {term_pair: freq / self.total_coocs for term_pair, freq in self.cooc_freq.items()}
pmi = {}
for term_pair, freq in self.cooc_freq.most_common():
term1, term2 = term_pair
if filter_terms and (term1 not in filter_terms or term2 not in filter_terms):
continue
pmi[term_pair] = math.log(self.cooc_prob[term_pair] / (self.term_prob[term1] * self.term_prob[term2]))
self.pmi_cooc = OrderedDict(
{term_pair: score for term_pair, score in sorted(pmi.items(), key=lambda x: x[1], reverse=True)})
self.sorted_terms = list(self.pmi_cooc.keys())
def __getitem__(self, item):
return self.pmi_cooc[item] if item in self.pmi_cooc else self.sorted_terms[item]
def items(self):
return self.pmi_cooc.items()
def highest(self, num: int) -> OrderedDict:
highest = OrderedDict()
for ki, key in enumerate(self.pmi_cooc):
highest[key] = self.pmi_cooc[key]
if ki == num:
break
return highest
| {"/scripts/text_tail_analysis.py": ["/scripts/liwc.py"]} |
65,176 | Data-Scopes/scale | refs/heads/master | /scripts/helper.py | from typing import Iterator, Iterable, Tuple, Sized, Union
from elasticsearch import Elasticsearch
from collections import OrderedDict
import math
import numpy as np
import gzip
import json
import csv
def read_json(data_file: str) -> Iterator:
"""read_json reads the content of a JSON-line format file, which has a JSON document on each line.
The gzip parameter can be used to read directly from gzipped files."""
if data_file.endswith('.gz'):
fh = gzip.open(data_file, 'rt')
else:
fh = open(data_file, 'rt')
for line in fh:
yield json.loads(line.strip())
fh.close()
def read_csv(data_file: str) -> Iterator:
"""read_csv reads the content of a csv file. The gzip parameter can be used to read directly from gzipped files."""
if data_file.endswith('.gz'):
fh = gzip.open(data_file, 'rt')
else:
fh = open(data_file, 'rt')
reader = csv.reader(fh, delimiter='\t')
headers = next(reader)
for row in reader:
yield {header: row[hi] for hi, header in enumerate(headers)}
fh.close()
def ecdf(data: Union[np.ndarray, Sized], reverse: bool = False) -> Tuple[Iterable, Iterable]:
"""Compute ECDF for a one-dimensional array of measurements.
This function is copied from Eric Ma's tutorial on Bayes statistics at
scipy 2019 https://github.com/ericmjl/bayesian-stats-modelling-tutorial"""
# Number of data points
n = len(data)
# x-data for the ECDF
x = np.sort(data)
if reverse:
x = np.flipud(x)
# y-data for the ECDF
y = np.arange(1, n+1) / n
return x, y
def scroll_hits(es: Elasticsearch, query: dict, index: str, size: int = 100) -> iter:
response = es.search(index=index, scroll='2m', size=size, body=query)
sid = response['_scroll_id']
scroll_size = response['hits']['total']
print('total hits:', scroll_size)
if type(scroll_size) == dict:
scroll_size = scroll_size['value']
# Start scrolling
while scroll_size > 0:
for hit in response['hits']['hits']:
yield hit
response = es.scroll(scroll_id=sid, scroll='2m')
# Update the scroll ID
sid = response['_scroll_id']
# Get the number of results that we returned in the last scroll
scroll_size = len(response['hits']['hits'])
# Do something with the obtained page
| {"/scripts/text_tail_analysis.py": ["/scripts/liwc.py"]} |
65,177 | Data-Scopes/scale | refs/heads/master | /scripts/text_tail_analysis.py | from typing import Dict, Iterable, Iterator, List, Set, Union
from collections import Counter, defaultdict
from spacy.tokens import Doc, DocBin, Span, Token
from pandas import DataFrame
import math
import time
from scripts.liwc import LIWC
def get_dataframe_review_texts(df: DataFrame) -> Iterator[str]:
"""return the review texts from a sample dataframe."""
num_rows = len(df)
review_text_col = list(df.columns).index('review_text')
for row_num in range(0, num_rows):
yield df.iloc[row_num, review_text_col]
def get_doc_content_chunks(spacy_doc: Doc) -> Iterator[List[Union[Token, Span]]]:
"""Get content chunks per sentence for all sentences in spacy_doc"""
ncs_start_index = {nc.start: nc for nc in spacy_doc.noun_chunks}
ncs_token_index = {t.i for nc in spacy_doc.noun_chunks for t in nc}
for sent in spacy_doc.sents:
yield get_sent_content_chunks(sent, ncs_start_index, ncs_token_index)
def get_sent_content_chunks(sent: Span, ncs_start_index: Dict[int, Span],
ncs_token_index: Set[int]) -> List[Union[Token, Span]]:
"""Get content chunks for a spacy sentence and a list of sentence noun chunks"""
ordered_chunks = []
for token in sent:
if token.i in ncs_start_index:
# if token is start element of noun_chunk, add whole noun_chunk to list
ordered_chunks.append(ncs_start_index[token.i])
elif token.i in ncs_token_index:
# if token is non-start element of noun_chunk, skip it
continue
elif token.pos_ in ['VERB', 'ADJ', 'ADP', 'ADV'] and not token.is_stop:
# if token is not part of a noun chunk and not a auxilliary or stop word, add it
ordered_chunks.append(token)
return ordered_chunks
def get_word_tokens(doc: Doc) -> List[Token]:
"""Return only tokens that are not stopwords and not punctuation."""
return [token for token in doc if not token.is_stop and not token.is_punct and token.is_alpha]
def get_doc_word_token_set(doc: Doc, use_lemma=False) -> Set[Token]:
"""Return the set of tokens in a document (no repetition)."""
return set([token.lemma_ if use_lemma else token.text for token in get_word_tokens(doc)])
def filter_pos(tokens: Iterable[Token], include_pos: List[str]):
"""Filter tokens based a list of POS tags"""
return [token for token in tokens if token.pos_ in include_pos]
def get_lemmas(tokens: Iterable[Token]):
return [token.text if token.pos_ == 'PRON' else token.lemma_ for token in tokens]
def get_lemma_pos(tokens: Iterable[Token], keep_pron: bool = False):
"""Iterate over a set of tokens and return tuples of lemma and POS."""
if keep_pron:
return [(token.text, token.pos_) if token.pos_ == 'PRON' else (token.lemma_, token.pos_) for token in tokens]
else:
return [(token.lemma_, token.pos_) for token in tokens]
def has_lemma_pos(token_iter: Iterable[Token], lemma: str, pos: str) -> bool:
for token in token_iter:
if token.lemma_ == lemma and token.pos_ == pos:
return True
return False
def sentence_iter(docs: List[Doc]):
"""Iterate over a list of spacy docs and return individual sentences."""
for doc in docs:
for sent in doc.sents:
yield sent
def get_lemma_pos_tf_index(docs: List[Doc], keep_pron: bool = False) -> Counter:
"""Iterate over all tokens in a set of spacy docs and index the frequency of a token's lemma and POS."""
tf_lemma_pos = Counter()
for doc in docs:
tf_lemma_pos.update(get_lemma_pos(doc, keep_pron=keep_pron))
return tf_lemma_pos
def get_lemma_pos_df_index(docs: List[Doc], keep_pron: bool = False) -> Counter:
"""Iterate over all tokens in a set of spacy docs and index the document frequency of a token's lemma and POS."""
df_lemma_pos = Counter()
for doc in docs:
df_lemma_pos.update(get_lemma_pos(get_doc_word_token_set(doc), keep_pron=keep_pron))
return df_lemma_pos
def show_tail_lemmas(tf_lemma_pos: Counter, tf_threshold: int = 1, pos: str = None, num_lemmas: int = 100):
"""Print lemmas below a certain TF threshold. Optionally, add a POS filter to only
see lemmas with a specific part-of-speech."""
if pos:
lemmas = [lemma for lemma, pos in tf_lemma_pos if tf_lemma_pos[(lemma, pos)] == tf_threshold and pos == pos]
else:
lemmas = [lemma for lemma, pos in tf_lemma_pos if tf_lemma_pos[(lemma, pos)] == tf_threshold]
for i in range(0, 100, 5):
print(''.join([f'{lemmas[j]: <16}' for j in range(i, i + 5)]))
def show_pos_tail_distribution(tf_lemma_pos):
all_pos = defaultdict(int)
low_pos = defaultdict(int)
one_pos = defaultdict(int)
for lemma, pos in tf_lemma_pos:
all_pos[pos] += tf_lemma_pos[(lemma, pos)]
if tf_lemma_pos[(lemma, pos)] <= 5:
low_pos[pos] += tf_lemma_pos[(lemma, pos)]
if tf_lemma_pos[(lemma, pos)] == 1:
one_pos[pos] += tf_lemma_pos[(lemma, pos)]
print('Word form\tAll TF (frac)\tTF <= 5 (frac)\tTF = 1 (frac)')
print('------------------------------------------------------------')
for pos in all_pos:
all_frac = round(all_pos[pos] / sum(all_pos.values()), 2)
low_frac = round(low_pos[pos] / sum(low_pos.values()), 2)
one_frac = round(one_pos[pos] / sum(one_pos.values()), 2)
all_pos_string = f'\t{all_pos[pos]: > 8}{all_frac: >6.2f}'
low_pos_string = f'\t{low_pos[pos]: >6}{low_frac: >6.2}'
one_pos_string = f'\t{one_pos[pos]: >6}{one_frac: >6.2}'
print(f'{pos: <10}{all_pos_string}{low_pos_string}{one_pos_string}')
def group_by_head(docs: List[Doc], tf_lemma_pos: Counter, token_pos_types: List[str],
head_pos_types: List[str] = ['ADJ', 'ADV', 'NOUN', 'PROPN', 'VERB'],
max_threshold: Union[None, int] = None, min_threshold: Union[None, int] = None):
"""Iterate over a set of spacy docs and group all terms within a frequency threshold by their head term.
The head term is based on the Spacy dependency parse."""
head_group = defaultdict(Counter)
for sent in sentence_iter(docs):
for token in sent:
# skip tokens with a POS that is not in the accepted token POS list
if token.pos_ not in token_pos_types:
continue
token_lemma_pos = (token.lemma_, token.pos_)
# skip if the token's lemma+POS is outside optional frequency thresholds
if max_threshold and token_lemma_pos in tf_lemma_pos and tf_lemma_pos[token_lemma_pos] > max_threshold:
continue
if min_threshold and token_lemma_pos in tf_lemma_pos and tf_lemma_pos[token_lemma_pos] < min_threshold:
continue
# skip if head POS is not in the accepted head POS list
if token.head.pos_ not in head_pos_types:
continue
head_lemma_pos = (token.head.lemma_, token.head.pos_)
head_group[head_lemma_pos].update([token_lemma_pos])
return head_group
def group_by_child(docs: List[Doc], tf_lemma_pos: Counter, token_pos_types: List[str],
child_pos_types: List[str] = ['ADJ', 'ADV', 'NOUN', 'PROPN', 'VERB'],
max_threshold: Union[None, str] = None, min_threshold: Union[None, int] = None):
"""Iterate over a set of spacy docs and group all terms within a frequency threshold by their head term.
The head term is based on the Spacy dependency parse."""
child_group = defaultdict(Counter)
for sent in sentence_iter(docs):
for token in sent:
# skip tokens with a POS that is not in the accepted token POS list
if token.pos_ not in token_pos_types:
continue
token_lemma_pos = (token.lemma_, token.pos_)
# skip if the token's lemma+POS is outside optional frequency thresholds
if max_threshold and token_lemma_pos in tf_lemma_pos and tf_lemma_pos[token_lemma_pos] > max_threshold:
continue
if min_threshold and token_lemma_pos in tf_lemma_pos and tf_lemma_pos[token_lemma_pos] < min_threshold:
continue
# skip if child POS is not in the accepted child POS list
for child in token.children:
if child.pos_ not in child_pos_types:
continue
child_lemma_pos = (child.lemma_, child.pos_)
child_group[child_lemma_pos].update([token_lemma_pos])
return child_group
attrs = [
"IS_ALPHA", "IS_PUNCT", "IS_STOP", "IS_SPACE",
"LENGTH", "LEMMA", "POS", "TAG", "DEP",
"ENT_IOB", "ENT_TYPE", #"ENT_ID", "ENT_KB_ID",
"HEAD", "SENT_END", #"SPACY", "PROB", "LANG",
"IDX",
]
def read_doc_bin(fname: str) -> DocBin:
with open(fname, 'rb') as fh:
doc_bin_bytes = fh.read()
return DocBin().from_bytes(doc_bin_bytes)
def read_docs_from_bin(fname: str, nlp) -> List[Doc]:
doc_bin = read_doc_bin(fname)
return list(doc_bin.get_docs(nlp.vocab))
def write_docs_to_bin(docs: List[Doc], fname: str) -> None:
doc_bin = DocBin(attrs=attrs)
for doc in docs:
doc_bin.add(doc)
with open(fname, 'wb') as fh:
doc_bin_bytes = doc_bin.to_bytes()
fh.write(doc_bin_bytes)
def spacy_parse_store_from_dataframe(fname, df, nlp):
chunks = math.ceil(len(df))
start_time = time.time()
for chunk in range(chunks):
start = chunk * 10000
end = start + 10000
chunk_df = df.iloc[start:end, ]
chunk_fname = fname + f'_{chunk}'
doc_bin = DocBin(attrs=attrs)
for ti, text in enumerate(get_dataframe_review_texts(chunk_df)):
doc = nlp(text)
doc_bin.add(doc)
if (ti+1) % 1000 == 0:
print(ti+1, 'reviews parsed in chunk', chunk, '\ttime:', time.time() - start_time)
with open(chunk_fname, 'wb') as fh:
fh.write(doc_bin.to_bytes())
def read_spacy_docs_for_dataframe(fname, df, nlp):
docs = read_docs_from_bin(fname, nlp)
return add_review_id_to_spacy_docs(df, docs)
def add_review_id_to_spacy_docs(df, docs):
if len(df) != len(docs):
raise IndexError('dataframe and spacy docs list are not the same length!')
review_ids = list(df.review_id)
return {review_id: docs[ri] for ri, review_id in enumerate(review_ids)}
def select_dataframe_spacy_docs(df, docs_dict, as_dict=False):
review_ids = set(list(df.review_id))
if as_dict:
return {review_id: docs_dict[review_id] for review_id in review_ids if review_id in review_ids}
else:
return [docs_dict[review_id] for review_id in review_ids if review_id in review_ids]
def add_data(data, tf_lemma_pos, dep_type, dep_lemma_pos, tail_lemma_pos, dep_tail_count, cat):
dep_lemma, dep_pos = dep_lemma_pos
tail_lemma, tail_pos = tail_lemma_pos
data['dependency_type'] += [dep_type]
data['dependency_word'] += [dep_lemma]
data['dependency_pos'] += [dep_pos]
data['dependency_freq'] += [tf_lemma_pos[dep_lemma_pos]]
data['tail_word'] += [tail_lemma]
data['tail_pos'] += [tail_pos]
data['tail_freq'] += [tf_lemma_pos[tail_lemma_pos]]
data['dep_tail_freq'] += [dep_tail_count]
data['liwc_category'] += [cat]
def get_tail_groupings(doc_list, tf_lemma_pos, token_pos_types, liwc, max_threshold=5, min_threshold=0):
tail_groupings = {'dependency_type': [], 'dependency_word': [], 'dependency_pos': [], 'dependency_freq': [],
'tail_word': [], 'tail_pos': [], 'tail_freq': [],
'dep_tail_freq': [], 'liwc_category': []}
dep_groups = {
'head': group_by_head(doc_list, tf_lemma_pos, token_pos_types,
max_threshold=max_threshold, min_threshold=min_threshold),
'child': group_by_child(doc_list, tf_lemma_pos, token_pos_types,
max_threshold=max_threshold, min_threshold=min_threshold)
}
for dep_type in dep_groups:
for dep_lemma_pos in dep_groups[dep_type]:
if len(dep_groups[dep_type][dep_lemma_pos]) < 1:
continue
dep_lemma, dep_pos = dep_lemma_pos
for tail_lemma_pos in dep_groups[dep_type][dep_lemma_pos]:
dep_tail_count = dep_groups[dep_type][dep_lemma_pos][tail_lemma_pos]
tail_lemma, tail_pos = tail_lemma_pos
if not liwc.in_dict(tail_lemma):
cat = None
else:
cat = "|".join(liwc.has_categories(tail_lemma))
add_data(tail_groupings, tf_lemma_pos, dep_type, dep_lemma_pos,
tail_lemma_pos, dep_tail_count, cat)
return tail_groupings
| {"/scripts/text_tail_analysis.py": ["/scripts/liwc.py"]} |
65,199 | IshMSahni/Photo-editor | refs/heads/master | /filters.py | """
SYSC 1005 Fall 2018
Filters for Lab 7. All of these filters were presented during lectures.
"""
from Cimpl import *
from random import randint
def grayscale(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a grayscale copy of image.
>>> image = load_image(choose_file())
>>> gray_image = grayscale(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
brightness = (r + g + b) // 3
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(brightness, brightness, brightness)
set_color(new_image, x, y, gray)
return new_image
def weighted_grayscale(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a grayscale copy of the image with reference to a specific weight of each.
>>> image = load_image(choose_file())
>>> gray_image = weighted_grayscale(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
brightness = r * 0.299 + g * 0.587 + b * 0.114
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(brightness, brightness, brightness)
set_color(new_image, x, y, gray)
return new_image
#EXERCISE 2
def extreme_contrast(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a edited copy of image.
>>> image = load_image(choose_file())
>>> contrast_image = extreme_contrast(image)
>>> show(contrast_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
if (0 < r < 127):
r = 0
else:
r = 256;
if (0 < g < 127):
g = 0
else:
g = 256
if (0 < b < 127):
b = 0
else:
b = 256
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(r, g, b)
set_color(new_image, x, y, gray)
return new_image
#Exercise 3
def sepia_tint(image):
weighted_grayscale(image)
""" (Cimpl.Image) -> Cimpl.Image
Returns a copy of image in which the colours have been
converted to sepia tones.
>>> image = load_image(choose_file())
>>> new_image = sepia_tint(image)
>>> show(new_image)
"""
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(r, g, b)
set_color(new_image, x, y, gray)
new_image = copy(weighted_grayscale(image))
for x, y,(r, g, b) in new_image:
if (r >= 62):
set_color(new_image, x, y, create_color(r * 1.1, g, b * 0.9))
elif (r > 62 and r < 192):
set_color(new_image, x, y, create_color(r * 1.15, g, b * 0.85))
else:
set_color(new_image, x, y, create_color(r * 1.08, g, b * 0.93))
return new_image
#EXERCISE 4
def _adjust_component(amount):
""" (int) -> int
Divide the range 0..255 into 4 equal-size quadrants,
and return the midpoint of the quadrant in which the
specified amount lies.
>>> _adjust_component(10)
31
>>> _adjust_component(85)
95
>>> _adjust_component(142)
159
>>> _adjust_component(230)
223
"""
if (amount <64):
return 31
elif (amount >63 and amount <128):
return 95
elif (amount >127 and amount <192):
return 159
else:
return 223
#EXERCISE 5
def posterize(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a "posterized" copy of image.
>>> image = load_image(choose_file())
>>> new_image = posterize(image)
>>> show(new_image)
"""
new_image = copy(image)
# Makes the image have a smaller number of colors than the original.
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
for x, y, (r, g, b) in image:
new_color = create_color(_adjust_component(r),_adjust_component(g),_adjust_component(b))
set_color(new_image, x, y, new_color)
return new_image
def blur(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a new image that is a blurred copy of image.
original = load_image(choose_file())
blurred = blur(original)
show(blurred)
"""
target = copy(image)
for y in range(1, get_height(image) - 1):
for x in range(1, get_width(image) - 1):
# Grab the pixel @ (x, y) and its four neighbours
top_red, top_green, top_blue = get_color(image, x, y - 1)
left_red, left_green, left_blue = get_color(image, x - 1, y)
bottom_red, bottom_green, bottom_blue = get_color(image, x, y + 1)
right_red, right_green, right_blue = get_color(image, x + 1, y)
center_red, center_green, center_blue = get_color(image, x, y)
topleft_red, topleft_green, topleft_blue = get_color (image, x - 1, y - 1)
topright_red, topright_green, topright_blue = get_color(image, x + 1, y + 1)
bottomleft_red, bottomleft_green, bottomleft_blue = get_color(image, x - 1, y + 1)
bottomright_red, bottomright_green, bottomright_blue = get_color(image, x + 1, y + 1)
# Average the red components of the five pixels
new_red = (top_red + left_red + bottom_red +
right_red + center_red +
topleft_red + topright_red +
bottomleft_red + bottomright_red ) // 9
# Average the green components of the five pixels
new_green = (top_green + left_green + bottom_green +
right_green + center_green+
topleft_green + topright_green
+ bottomleft_green + bottomright_green) // 9
# Average the blue components of the five pixels
new_blue = (top_blue + left_blue + bottom_blue +
right_blue + center_blue +
topleft_blue + topright_blue + bottomleft_blue +
bottomright_blue) // 9
new_color = create_color(new_red, new_green, new_blue)
# Modify the pixel @ (x, y) in the copy of the image
set_color(target, x, y, new_color)
return target
def detect_edges(image, threshold):
new_image = copy(image)
""" (Cimpl.Image, float) -> Cimpl.Image
Return a new image that that is modified to black and white using edge detection.
>>> image = load_image(choose_file())
>>> filtered = detect_edges(image, 10.0)
>>> show(filtered)
"""
white = create_color(255, 255, 255)
black = create_color(0, 0, 0)
#A loop to check all the pixels in the image
for y in range(0, get_height(new_image) - 1):
for x in range(0, get_width(new_image)):
r, g, b = get_color(new_image, x, y) #Gets the rgb values of a pixel
brightness1 = (r + g + b) / 3
r, g, b = get_color(new_image, x, y + 1) #Gets the rgb values of a pixel near the previous
brightness2 = (r + g + b) / 3
if ((abs (brightness1 - brightness2)) > threshold): #if the ablsolute value of the difference of two darkened pixels is greater than the threshold then set it to black. Otherwise set it to white.
set_color(new_image, x, y, black)
else:
set_color(new_image,x,y,white)
return new_image
def detect_edges_better(image, threshold):
new_image = copy(image)
""" (Cimpl.Image, float) -> Cimpl.Image
Return a new image that contains a copy of the original image
that has been modified using edge detection.
>>> image = load_image(choose_file())
>>> filtered = detect_edges(image, 10.0)
>>> show(filtered)
"""
#The difference between this detect edges and the other is that this looks at all the pixels around the original pixel
white = create_color(255, 255, 255)
black = create_color(0, 0, 0)
for y in range(0, get_height(new_image) - 1):
for x in range(0, get_width(new_image) - 1):
r, g, b = get_color(new_image, x, y)
brightness = (r + g + b) / 3
r, g, b = get_color(new_image, x, y + 1)
brightness_below = (r + g + b) / 3
r, g, b = get_color(new_image, x + 1, y)
brightness_right = (r + g + b) / 3
calculatebelow = (abs(brightness - brightness_below)) # compares the brightness of the original pixel to the one below
calculateright = (abs(brightness - brightness_right))
# If either the pixel below or the one to the right is greater than the threshold, change the color of the pixel to black.
if (calculatebelow > threshold or calculateright > threshold):
set_color(new_image, x, y, black)
else:
set_color(new_image, x, y, white)
return new_image
def grayscale(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a grayscale copy of image.
>>> image = load_image(choose_file())
>>> gray_image = grayscale(image)
>>> show(gray_image)
"""
new_image = copy(image)
for x, y, (r, g, b) in image:
# Use the pixel's brightness as the value of RGB components for the
# shade of gray. These means that the pixel's original colour and the
# corresponding gray shade will have approximately the same brightness.
brightness = (r + g + b) // 3
# or, brightness = (r + g + b) / 3
# create_color will convert an argument of type float to an int
gray = create_color(brightness, brightness, brightness)
set_color(new_image, x, y, gray)
return new_image
# The negative filter inverts every component of every pixel.
# The solarizing filter invert only those components that have intensities
# below a specified value.
def negative(image):
""" (Cimpl.Image) -> Cimpl.Image
Return an inverted copy of image; that is, an image that is a colour
negative of the original image.
>>> image = load_image(choose_file())
>>> filtered = negative(image)
>>> show(filtered)
"""
new_image = copy(image)
# Invert the intensities of every component in every pixel.
for x, y, (r, g, b) in image:
inverted = create_color(255 - r, 255 - g, 255 - b)
set_color(new_image, x, y, inverted)
return new_image
def solarize(image, threshold):
""" (Cimpl.Image, int) -> Cimpl.Image
Return a "solarized" copy of image. RGB components that have
intensities less than threshold are modified.
Parameter threshold is in the range 0 to 256, inclusive.
>>> image = load_image(choose_file())
>>> filtered = solarize(image)
>>> show(filtered)
"""
new_image = copy(image)
for x, y, (red, green, blue) in image:
# Invert the intensities of all RGB components that are less than
# threshold.
if red < threshold:
red = 255 - red
if green < threshold:
green = 255 - green
if blue < threshold:
blue = 255 - blue
col = create_color(red, green, blue)
set_color(new_image, x, y, col)
return new_image
def black_and_white(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a black-and-white (two-tone) copy of image.
>>> image = load_image(choose_file())
>>> filtered = black_and_white(image)
>>> show(filtered)
"""
new_image = copy(image)
black = create_color(0, 0, 0)
white = create_color(255, 255, 255)
# Brightness levels range from 0 to 255.
# Change the colour of each pixel to black or white, depending on
# whether its brightness is in the lower or upper half of this range.
for x, y, (red, green, blue) in image:
brightness = (red + green + blue) // 3
if brightness < 128: # brightness is between 0 and 127, inclusive
set_color(new_image, x, y, black)
else: # brightness is between 128 and 255, inclusive
set_color(new_image, x, y, white)
return new_image
def black_and_white_and_gray(image):
""" (Cimpl.Image) -> Cimpl.Image
Return a black-and-white-and gray (three-tone) copy of image.
>>> image = load_image(choose_file())
>>> filtered = black_and_white_and_gray(image)
>>> show(filtered)
"""
new_image = copy(image)
black = create_color(0, 0, 0)
gray = create_color(128, 128, 128)
white = create_color(255, 255, 255)
# Brightness levels range from 0 to 255. Change the colours of every
# pixel whose brightness is in the lower third of this range to black,
# in the upper third to white, and in the middle third to medium-gray.
for x, y, (red, green, blue) in image:
brightness = (red + green + blue) // 3
if brightness < 85: # brightness is between 0 and 84, inclusive
set_color(new_image, x, y, black)
elif brightness < 171: # brightness is between 85 and 170, inclusive
set_color(new_image, x, y, gray)
else: # brightness is between 171 and 255, inclusive
set_color(new_image, x, y, white)
return new_image
def scatter(image):
""" (Cimpl.image) -> Cimpl.image
Return a new image that looks like a copy of an image in which the pixels
have been randomly scattered.
>>> original = load_image(choose_file())
>>> scattered = scatter(original)
>>> show(scattered)
"""
new_image = copy(image)
for x, y, (r, g, b) in new_image:
row_and_column_are_in_bounds = False
while not row_and_column_are_in_bounds:
randcol = randint(-10,10)
randrow = randint(-10,10)
random_col = x + randcol
random_row = y + randrow
# Checks the whole picture to make sure the random column and random rows are greater than 0 but not larger than the actual picture itself. Then it sets the self explanatory variable row_and_column_are_in_bounds = True to true so then the program can proceed.
if (random_col >= 0 and random_col <= get_width(new_image) - 1) and (random_row >= 0 and random_row <= get_height(new_image) - 1):
row_and_column_are_in_bounds = True
newcolor = get_color(image, random_col, random_row)
set_color(new_image, x, y, newcolor)
return new_image
| {"/photo_editor.py": ["/filters.py"]} |
65,200 | IshMSahni/Photo-editor | refs/heads/master | /photo_editor.py | # SYSC 1005 A Fall 2018 Lab 7
import sys # get_image calls exit
from Cimpl import *
from filters import *
def get_image():
"""
Interactively select an image file and return a Cimpl Image object
containing the image loaded from the file.
"""
# Pop up a dialogue box to select a file
file = choose_file()
# Exit the program if the Cancel button is clicked.
if file == "":
sys.exit("File Open cancelled, exiting program")
# Open the file containing the image and load it
img = load_image(file)
return img
# A bit of code to demonstrate how to use get_image().
if __name__ == "__main__":
x = True
imageloaded = False
commands = (" L)oad \n B)lur E)dge detect P)osterize S)catter T)int sepia \n W)eighted Grayscale X)treme contrast Q)uit \n")
while(x == True):
answer = input(commands)
# If answer in command tests to see if the user tries to load a filter before loading the picture and seperates all the commands so that load and quit dont run through the same if statement as the restof the filters.
if answer in ["B", "E", "P", "S", "T", "W", "X"]:
if imageloaded == False:
print ("Sorry there is no image loaded")
continue
else:
if answer == "B": #Blur filter
for y in range(5): #Runs through the blur filter 5 times to make it more noticeable
img = blur(img)
show(img)
elif answer == "E": #Edge detection better function --> also requires a threshold
threshold = int(input("Enter a number for for the distance of the edge of the photo: "))
img = detect_edges_better(img, threshold)
show(img)
elif answer == "P": #Posterize function
img = posterize(img)
show(img)
elif answer == "S": #Scatter function
img = scatter(img)
show(img)
elif answer == "T": #Sepia Tint better function
img = sepia_tint(img)
show(img)
elif answer == "W": #Weighted Grayscale
img = weighted_grayscale(img)
show(img)
elif answer == "X": #Extreme Contrast
img = extreme_contrast(img)
show(img)
#This part makes sure to exit the program or load the picture more efficiently as it doesnt go through all the other code that exists
elif answer in ["L", "Q"]:
if answer == "L": #Load image function
img = get_image()
show (img)
imageloaded = True
elif answer == "Q":
print ("The program will now exit.")
x == False
else:
print(answer, " No such command.")
| {"/photo_editor.py": ["/filters.py"]} |
65,216 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/wavelet_features.py | import pywt
import numpy as np
from astronomaly.base.base_pipeline import PipelineStage
def flatten_swt2_coefficients(wavelet_coeffs):
"""
A standardised way of flattening the swt2d coefficients
They are stored as n_levels -> (cA, (cH, cV, cD)) where each of the sets
of coeffcients has a list of [npixels, npixels]
Parameters
----------
wavelet_coeffs : list
Exactly as output by pywt
Returns
-------
np.ndarray
Flattened coefficients
labels
The labels of the coefficients
"""
pixel_count = np.prod(wavelet_coeffs[0][0].shape)
total_len = len(wavelet_coeffs) * 4 * pixel_count
output_array = np.zeros(total_len)
for lev in range(len(wavelet_coeffs)):
approx_coeffs = wavelet_coeffs[lev][0]
output_array[4 * lev * pixel_count:(4 * lev + 1) * pixel_count] = \
approx_coeffs.reshape(pixel_count)
for det in range(3):
detailed_coeffs = wavelet_coeffs[lev][1][det]
start = (4 * lev + det + 1) * pixel_count
output_array[start:start + pixel_count] = detailed_coeffs.reshape(
pixel_count)
return output_array
def generate_labels(wavelet_coeffs):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
pixel_count = np.prod(wavelet_coeffs[0][0].shape)
total_len = len(wavelet_coeffs) * 4 * pixel_count
labels = np.zeros(total_len).astype('str')
cfs = ['H', 'V', 'D']
for lev in range(len(wavelet_coeffs)):
labels[4 * lev * pixel_count:(4 * lev + 1) * pixel_count] = \
np.array(['cA%d_%d' % (lev, i) for i in range(pixel_count)],
dtype='str')
for det in range(3):
start = (4 * lev + det + 1) * pixel_count
labels[start: start + pixel_count] = \
['c%s%d_%d' % (cfs[det], lev, i) for i in range(pixel_count)]
return labels
def reshape_swt2_coefficients(flat_coeffs, nlev, image_shape):
"""
Inverse function to restore a flattened array to pywt structure.
Parameters
----------
flat_coeffs : np.ndarray
Flattened array of coefficients
nlev : int
Number of levels wavelet decomposition was performed with
image_shape : tuple
Shape of original images
Returns
-------
list
pywt compatible coefficient structure
"""
pixel_count = np.prod(image_shape)
output = []
for lev in range(nlev):
output_lev = []
start = 4 * lev * pixel_count
unshaped_coeffs = flat_coeffs[start: start + pixel_count]
approx_coeffs = unshaped_coeffs.reshape(image_shape)
output_lev.append(approx_coeffs)
det_coeffs = []
for det in range(3):
start = (4 * lev + det + 1) * pixel_count
unshaped_coeffs = flat_coeffs[start: start + pixel_count]
det_coeffs.append(unshaped_coeffs.reshape(image_shape))
output_lev.append(det_coeffs)
output.append(output_lev)
return output
def wavelet_decomposition(img, level=2, wavelet_family='sym2'):
"""
Perform wavelet decomposition on single image
Parameters
----------
img : np.ndarray
Image
level : int, optional
Level of depth for the wavelet transform
wavelet_family : string or pywt.Wavelet object
Which wavelet family to use
Returns
-------
np.ndarray
Flattened array of coefficients
labels
The labels of the coefficients
"""
coeffs = pywt.swt2(img, wavelet_family, level=level)
return coeffs
class WaveletFeatures(PipelineStage):
def __init__(self, level=2, wavelet_family='sym2', **kwargs):
"""
Performs a stationary wavelet transform
Parameters
----------
level : int, optional
Level of depth for the wavelet transform
wavelet_family : string or pywt.Wavelet object
Which wavelet family to use
"""
super().__init__(level=level, wavelet_family=wavelet_family, **kwargs)
self.level = level
self.wavelet_family = wavelet_family
def _execute_function(self, image):
"""
Does the work in actually extracting the wavelets
Parameters
----------
image : np.ndarray
Input image
Returns
-------
pd.DataFrame
Contains the extracted wavelet features
"""
# Here I'm explicitly assuming any multi-d images store the colours
# in the last dim
if len(image.shape) == 2:
# Greyscale-like image
coeffs = wavelet_decomposition(image, level=self.level,
wavelet_family=self.wavelet_family)
flattened_coeffs = flatten_swt2_coefficients(coeffs)
if len(self.labels) == 0:
self.labels = generate_labels(coeffs)
return flattened_coeffs
else:
wavs_all_bands = []
all_labels = []
for band in range(len(image.shape[2])):
coeffs = wavelet_decomposition(image, level=self.level,
wavelet_family=self.wavelet_family) # noqa E128
flattened_coeffs = flatten_swt2_coefficients(coeffs)
wavs_all_bands += list(flattened_coeffs)
if len(self.labels) == 0:
labels = generate_labels(coeffs)
all_labels += ['%s_band_%d' % (labels[i], band)
for i in range(labels)]
if len(self.labels) == 0:
self.labels = all_labels
return wavs_all_bands
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,217 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/shape_features.py | import numpy as np
import cv2
from astronomaly.base.base_pipeline import PipelineStage
from astronomaly.base import logging_tools
def find_contours(img, threshold):
"""
Finds the contours of an image that meet a threshold
Parameters
----------
img : np.ndarray
Input image (must be greyscale)
threshold : float
What threshold to use
Returns
-------
contours
opencv description of contours (each contour is a list of x,y values
and there may be several contours, given as a list of lists)
hierarchy
opencv description of how contours relate to each other (see opencv
documentation)
"""
img_bin = np.zeros(img.shape, dtype=np.uint8)
img_bin[img <= threshold] = 0
img_bin[img > threshold] = 1
contours, hierarchy = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
return contours, hierarchy
def fit_ellipse(contour, image, return_params=False, filled=True):
"""
Fits an ellipse to a contour and returns a binary image representation of
the ellipse.
Parameters
----------
contour : np.ndarray
Array of x,y values describing the contours (as returned by opencv's
findCountours function)
image : np.ndarray
The original image the contour was fit to.
return_params : bool
If true also returns the parameters of the fitted ellipse
Returns
-------
np.ndarray
2d binary image with representation of the ellipse
"""
if filled:
thickness = -1
y_npix = image.shape[0]
x_npix = image.shape[1]
ellipse_arr = np.zeros([y_npix, x_npix], dtype=np.float)
else:
thickness = 1
ellipse_arr = image.copy()
# Sets some defaults for when the fitting fails
default_return_params = [np.nan] * 5
raised_error = False
try:
((x0, y0), (maj_axis, min_axis), theta) = cv2.fitEllipse(contour)
ellipse_params = x0, y0, maj_axis, min_axis, theta
if np.any(np.isnan(ellipse_params)) or y0 < 0 or x0 < 0:
raised_error = True
logging_tools.log('fit_ellipse failed with unknown error:')
except cv2.error as e:
logging_tools.log('fit_ellipse failed with cv2 error:' + e.msg)
raised_error = True
if x0 > len(image) or y0 > len(image):
raised_error = True
logging_tools.log('fit_ellipse failed with unknown error:')
if raised_error:
if return_params:
return ellipse_arr, default_return_params
else:
return ellipse_arr
x0 = int(np.round(x0))
y0 = int(np.round(y0))
maj_axis = int(np.round(maj_axis))
min_axis = int(np.round(min_axis))
theta = int(np.round(theta))
cv2.ellipse(ellipse_arr, (x0, y0), (maj_axis // 2, min_axis // 2),
theta, 0, 360, (1, 1, 1), thickness)
if return_params:
return ellipse_arr, ellipse_params
else:
return ellipse_arr
def get_ellipse_leastsq(contour, image):
"""
Fits an ellipse to a (single) contour and returns the sum of the
differences squared between the fitted ellipse and contour (normalised).
Parameters
----------
contour : np.ndarray
Array of x,y values describing the contours (as returned by opencv's
findCountours function)
image : np.ndarray
The original image the contour was fit to.
Returns
-------
float
sum((ellipse-contour)^2)/number_of_pixels
"""
thickness = -1
y_npix = image.shape[0]
x_npix = image.shape[1]
contour_arr = np.zeros([y_npix, x_npix], dtype=np.float)
cv2.drawContours(contour_arr, [contour], 0, (1, 1, 1), thickness)
ellipse_arr, params = fit_ellipse(contour, image, return_params=True)
if np.any(np.isnan(params)):
res = np.nan
else:
arr_diff = ellipse_arr - contour_arr
res = np.sum((arr_diff)**2) / np.prod(contour_arr.shape)
return [res] + list(params)
def draw_contour(contour, image, filled=False):
"""
Draws a contour onto an image for diagnostic purposes
Parameters
----------
contour : np.ndarray
Array of x,y values describing the contours (as returned by opencv's
findCountours function)
image : np.ndarray
The original image the contour was fit to.
filled : bool, optional
If true will fill in the contour otherwise will return an outline.
Returns
-------
np.ndarray
The image with the drawn contour on top
"""
if filled:
thickness = -1
contour_arr = np.zeros([image.shape[0], image.shape[1]])
else:
thickness = 1
contour_arr = image.copy()
cv2.drawContours(contour_arr, [contour], 0, (1, 1, 1), thickness)
return contour_arr
def extract_contour(contours, x0, y0):
"""
Utility function to determine which contour contains the points given.
Note by default this will only return the first contour it finds to contain
x0, y0.
Parameters
----------
contours : np.ndarray
Array of x,y values describing the contours (as returned by opencv's
findCountours function)
x0 : int
x value to test
y0 : int
y value to test
Returns
-------
contour : np.ndarray
Returns the single contour that contains (x0,y0)
"""
for c in contours:
if cv2.pointPolygonTest(c, (x0, y0), False) == 1:
return c
print('No contour found around points given')
raise TypeError
def get_hu_moments(img):
"""
Extracts the Hu moments for an image. Note this often works best with
simple, clean shapes like filled contours.
Parameters
----------
img : np.ndarray
Input image (must be 2d, no channel information)
Returns
-------
np.ndarray
The 7 Hu moments for the image
"""
moms = cv2.moments(img)
hu_feats = cv2.HuMoments(moms)
hu_feats = hu_feats.flatten()
return hu_feats
def check_extending_ellipses(img, threshold, return_params=False):
"""
Checks and flags images when the contour extends beyond the image size.
Used to check whether the image size (window size) must be increased.
Parameters
----------
img : np.ndarray
Input image (must be 2d, no channel information)
threshold :
Threshold values for drawing the outermost contour.
return_params : bool
If true also returns the parameters of the fitted ellipse
Returns
-------
boolean
Value that flags whether the ellipse extending beyond the image or not.
"""
width = img.shape[0]
height = img.shape[1]
old_window = img.shape
new_width = width * 3
new_height = height * 3
blank_canvas = np.zeros((new_width, new_height), dtype=np.float)
contours, hierarchy = find_contours(img, threshold)
# Sets some defaults for when the fitting fails
default_return_params = [np.nan] * 5
raised_error = False
try:
((x0, y0), (maj_axis, min_axis), theta) = cv2.fitEllipse(
np.float32(contours[0]))
ellipse_params = x0, y0, maj_axis, min_axis, theta
if np.any(np.isnan(ellipse_params)) or y0 < 0 or x0 < 0:
raised_error = True
logging_tools.log('fit_ellipse failed with unknown error:')
except cv2.error as e:
logging_tools.log('fit_ellipse failed with cv2 error:' + e.msg)
raised_error = True
if raised_error:
contour_extends = False
return contour_extends, old_window
x0_new = int(np.round(x0)) + (int(width))
y0_new = int(np.round(y0)) + (int(height))
maj_axis = int(np.round(maj_axis))
min_axis = int(np.round(min_axis))
theta = int(np.round(theta))
ellipse = cv2.ellipse(blank_canvas, (x0_new, y0_new),
(maj_axis // 2, min_axis // 2
), theta, 0, 360, (1, 1, 1), 1)
ellipse[int(width*1):int(width*2), int(height*1):int(height*2)] = 0
if ellipse.any() != 0:
dif = np.sqrt((x0 - width/2)**2 + (y0 - height/2)**2)
new_window = int((max(min_axis, maj_axis) + dif) * 1.25)
contour_extends = True
return contour_extends, new_window
else:
contour_extends = False
return contour_extends, old_window
class EllipseFitFeatures(PipelineStage):
def __init__(self, percentiles=[90, 70, 50, 0], channel=None,
upper_limit=100, check_for_extended_ellipses=False,
**kwargs):
"""
Computes a fit to an ellipse for an input image. Translation and
rotation invariate features. Warning: it's strongly recommended to
apply a sigma-clipping transform before running this feature extraction
algorithm.
Parameters
----------
channel : int
Specify which channel to use for multiband images
percentiles : array-like
What percentiles to use as thresholds for the ellipses
check_for_extended_ellipses : boolean
Activates the check that determins whether or not the outermost
ellipse extends beyond the image
upper_limit : int
Sets the upper limit to the up-scaling feature of the class. Used
when there are not enough pixels available to fit an ellipse.
Default is 100.
"""
super().__init__(percentiles=percentiles, channel=channel,
check_for_extended_ellipses=check_for_extended_ellipses,
upper_limit=upper_limit, **kwargs)
self.percentiles = percentiles
self.labels = []
feat_labs = ['Residual_%d', 'Offset_%d',
'Aspect_%d', 'Theta_%d', 'Maj_%d']
self.feat_labs = feat_labs
for f in feat_labs:
for n in percentiles:
self.labels.append(f % n)
self.channel = channel
self.check_for_extended_ellipses = check_for_extended_ellipses
self.upper_limit = upper_limit
if check_for_extended_ellipses:
self.labels.append('Warning_Open_Ellipse')
self.labels.append('Recommended_Window_Size')
def _execute_function(self, image):
"""
Does the work in actually extracting the ellipse fitted features
Parameters
----------
image : np.ndarray
Input image
Returns
-------
array
Contains the extracted ellipse fitted features
"""
# First check the array is normalised since opencv will cry otherwise
if len(image.shape) > 2:
if self.channel is None:
raise ValueError('Contours cannot be determined for \
multi-channel images, please set the \
channel kwarg.')
else:
this_image = image[:, :, self.channel]
else:
this_image = image
# Get rid of possible NaNs
# this_image = np.nan_to_num(this_image)
x0 = y0 = -1
x_cent = this_image.shape[0] // 2
y_cent = this_image.shape[1] // 2
warning_open_ellipses = []
new_window = []
#feats = []
stop = False
scale = [i for i in np.arange(100, self.upper_limit + 1, 1)]
# Start with the closest in contour (highest percentile)
percentiles = np.sort(self.percentiles)[::-1]
if np.all(this_image == 0):
failed = True
failure_message = "Invalid cutout for feature extraction"
else:
failed = False
failure_message = ""
for a in scale:
lst = []
feats = []
for p in percentiles:
lst.append(p)
width = int(image.shape[1] * (a / 100))
height = int(image.shape[0] * (a / 100))
dim = (width, height)
resize = cv2.resize(
this_image, dim, interpolation=cv2.INTER_AREA)
if failed:
contours = []
else:
thresh = np.percentile(resize[resize > 0], p)
contours, hierarchy = find_contours(resize, thresh)
x_contours = np.zeros(len(contours))
y_contours = np.zeros(len(contours))
# First attempt to find the central point of the inner most contour
if len(contours) != 0:
for k in range(len(contours)):
M = cv2.moments(contours[k])
try:
x_contours[k] = int(M["m10"] / M["m00"])
y_contours[k] = int(M["m01"] / M["m00"])
except ZeroDivisionError:
pass
if x0 == -1:
x_diff = x_contours - x_cent
y_diff = y_contours - y_cent
else:
x_diff = x_contours - x0
y_diff = y_contours - y0
# Will try to find the CLOSEST contour to the central one
r_diff = np.sqrt(x_diff**2 + y_diff**2)
ind = np.argmin(r_diff)
if x0 == -1:
x0 = x_contours[ind]
y0 = y_contours[ind]
c = contours[ind]
# Minimum of 5 points are needed to draw a unique ellipse
if len(c) < 5:
break
params = get_ellipse_leastsq(c, resize)
# Check whether or not the outermost ellipse extends
# beyond the image
if self.check_for_extended_ellipses and p == percentiles[-1]:
check, window = check_extending_ellipses(
resize, thresh)
if check:
new_window.append(window)
warning_open_ellipses.append(1)
else:
new_window.append(int(window[1]))
warning_open_ellipses.append(0)
# Params return in this order:
# residual, x0, y0, maj_axis, min_axis, theta
if np.any(np.isnan(params)):
failed = True
else:
if params[3] == 0 or params[4] == 0:
aspect = 1
else:
aspect = params[4] / params[3]
if aspect < 1:
aspect = 1 / aspect
if aspect > 100:
aspect = 1
new_params = params[:3] + [aspect] + [params[-1]]
feats.append(new_params)
else:
failed = True
failure_message = "No contour found"
if failed:
feats.append([np.nan] * 5)
logging_tools.log(failure_message)
# Now we have the leastsq value, x0, y0, aspect_ratio,
# theta for each sigma
# Normalise things relative to the highest threshold value
# If there were problems with any sigma levels,
# set all values to NaNs
if np.any(np.isnan(feats)):
return [np.nan] * len(self.feat_labs) * len(self.percentiles)
else:
max_ind = np.argmax(self.percentiles)
residuals = []
dist_to_centre = []
aspect = []
theta = []
maj = []
x0_max_sigma = feats[max_ind][1]
y0_max_sigma = feats[max_ind][2]
aspect_max_sigma = feats[max_ind][3]
theta_max_sigma = feats[max_ind][4]
for n in range(len(feats)):
prms = feats[n]
residuals.append(prms[0])
if prms[1] == 0 or prms[2] == 0:
r = 0
else:
x_diff = prms[1] - x0_max_sigma
y_diff = prms[2] - y0_max_sigma
r = np.sqrt((x_diff)**2 + (y_diff)**2)
dist_to_centre.append(r)
aspect.append(prms[3] / aspect_max_sigma)
theta_diff = np.abs(prms[4] - theta_max_sigma) % 360
# Because there's redundancy about which way an ellipse
# is aligned, we always take the acute angle
if theta_diff > 90:
theta_diff -= 90
theta.append(theta_diff)
maj.append(prms[3])
features = np.hstack(
(residuals, dist_to_centre, aspect, theta, maj))
if len(lst) == len(percentiles):
break
if a == self.upper_limit:
features = [np.nan] * \
len(self.feat_labs) * len(self.percentiles)
if self.check_for_extended_ellipses:
features = np.append(features, warning_open_ellipses)
features = np.append(features, new_window)
return features
class HuMomentsFeatures(PipelineStage):
def __init__(self, sigma_levels=[1, 2, 3, 4, 5], channel=None,
central_contour=False, **kwargs):
"""
Computes the Hu moments for the contours at specified levels in an
image.
Parameters
----------
sigma_levels : array-like
The levels at which to calculate the contours in numbers of
standard deviations of the image.
channel : int
Specify which channel to use for multiband images
central_contour : bool
If true will only use the contour surrounding the centre of the
image
"""
super().__init__(sigma_levels=sigma_levels, channel=channel,
central_contour=central_contour, **kwargs)
self.sigma_levels = sigma_levels
self.channel = channel
self.central_contour = central_contour
hu_labels = ['I%d' % i for i in range(7)]
sigma_labels = ['level%d' % n for n in sigma_levels]
self.labels = []
for s in sigma_labels:
for h in hu_labels:
self.labels.append(s + '_' + h)
def _execute_function(self, image):
"""
Does the work in actually extracting the Hu moments
Parameters
----------
image : np.ndarray
Input image
Returns
-------
array
Contains the Hu moments for each contour level
"""
# First check the array is normalised since opencv will cry otherwise
if len(image.shape) > 2:
if self.channel is None:
raise ValueError('Contours cannot be determined for \
multi-channel images, please set the \
channel kwarg.')
else:
this_image = image[:, :, self.channel]
else:
this_image = image
if self.central_contour:
x0 = this_image.shape[0] // 2
y0 = this_image.shape[1] // 2
else:
x0 = y0 = -1
feats = []
for n in self.sigma_levels:
contours, hierarchy = find_contours(this_image, n_sigma=n)
found = False
for c in contours:
# Only take the contour in the centre of the image
if x0 == -1:
# We haven't set which contour we're going to look at
# default to the largest
lengths = [len(cont) for cont in contours]
largest_cont = contours[np.argmax(lengths)]
M = cv2.moments(largest_cont)
x0 = int(M["m10"] / M["m00"])
y0 = int(M["m01"] / M["m00"])
in_contour = cv2.pointPolygonTest(c, (x0, y0), False)
if in_contour == 1 and not found:
contour_img = draw_contour(c, this_image)
feats.append(get_hu_moments(contour_img))
found = True
if not found:
feats.append([0] * 7)
feats = np.hstack(feats)
return feats
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,218 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/scripts/goods_example.py | # Example of astronomaly applied to a fits image
from astronomaly.data_management import image_reader
from astronomaly.preprocessing import image_preprocessing
from astronomaly.feature_extraction import power_spectrum
from astronomaly.feature_extraction import shape_features
from astronomaly.dimensionality_reduction import pca
from astronomaly.postprocessing import scaling
from astronomaly.anomaly_detection import isolation_forest, human_loop_learning
from astronomaly.visualisation import tsne_plot
import os
import pandas as pd
# Root directory for data
data_dir = os.path.join(os.getcwd(), 'example_data', )
image_dir = os.path.join(data_dir, 'GOODS', '')
# Where output should be stored
output_dir = os.path.join(
data_dir, 'astronomaly_output', 'GOODS', '')
# Pre-converted tractor file
catalogue = pd.read_csv(
os.path.join(image_dir, 'h_sb_sect23_v2.0_drz_cat.csv'))
band_prefixes = []
bands_rgb = {}
plot_cmap = 'bone'
window_size = 128
feature_method = 'ellipse'
dim_reduction = ''
if not os.path.exists(image_dir):
os.makedirs(image_dir)
fls = os.listdir(image_dir)
found_fits = False
for f in fls:
if 'fits' in f or 'FITS' in f:
found_fits = True
break
if not found_fits:
data_link = "https://archive.stsci.edu/pub/hlsp/goods/v2/" + \
"h_sb_sect23_v2.0_drz_img.fits "
# No data to run on!
print('No data found to run on, downloading some GOODS-S data...')
print('If wget is slow, try downloading the data directly from this link:')
print(data_link)
print()
os.system("wget " + data_link + "-P " + image_dir)
print('GOODS-S data downloaded.')
image_transform_function = [image_preprocessing.image_transform_sigma_clipping,
image_preprocessing.image_transform_scale]
display_transform_function = [image_preprocessing.image_transform_scale]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
def run_pipeline():
"""
An example of the full astronomaly pipeline run on image data
Parameters
----------
image_dir : str
Directory where images are located (can be a single fits file or
several)
features : str, optional
Which set of features to extract on the cutouts
dim_reduct : str, optional
Which dimensionality reduction algorithm to use (if any)
anomaly_algo : str, optional
Which anomaly detection algorithm to use
Returns
-------
pipeline_dict : dictionary
Dictionary containing all relevant data including cutouts, features
and anomaly scores
"""
image_dataset = image_reader.ImageDataset(
directory=image_dir,
window_size=window_size, output_dir=output_dir, plot_square=False,
transform_function=image_transform_function,
display_transform_function=display_transform_function,
plot_cmap=plot_cmap,
catalogue=catalogue,
band_prefixes=band_prefixes,
bands_rgb=bands_rgb
) # noqa
if feature_method == 'psd':
pipeline_psd = power_spectrum.PSD_Features(
force_rerun=True, output_dir=output_dir)
features_original = pipeline_psd.run_on_dataset(image_dataset)
elif feature_method == 'ellipse':
pipeline_ellipse = shape_features.EllipseFitFeatures(
percentiles=[90, 80, 70, 60, 50, 0],
output_dir=output_dir, channel=0, force_rerun=False
)
features_original = pipeline_ellipse.run_on_dataset(image_dataset)
features = features_original.copy()
if dim_reduction == 'pca':
pipeline_pca = pca.PCA_Decomposer(force_rerun=False,
output_dir=output_dir,
threshold=0.95)
features = pipeline_pca.run(features_original)
pipeline_scaler = scaling.FeatureScaler(force_rerun=False,
output_dir=output_dir)
features = pipeline_scaler.run(features)
pipeline_iforest = isolation_forest.IforestAlgorithm(
force_rerun=False, output_dir=output_dir)
anomalies = pipeline_iforest.run(features)
pipeline_score_converter = human_loop_learning.ScoreConverter(
force_rerun=False, output_dir=output_dir)
anomalies = pipeline_score_converter.run(anomalies)
anomalies = anomalies.sort_values('score', ascending=False)
try:
df = pd.read_csv(
os.path.join(output_dir, 'ml_scores.csv'),
index_col=0,
dtype={'human_label': 'int'})
df.index = df.index.astype('str')
if len(anomalies) == len(df):
anomalies = pd.concat(
(anomalies, df['human_label']), axis=1, join='inner')
except FileNotFoundError:
pass
pipeline_active_learning = human_loop_learning.NeighbourScore(
alpha=1, output_dir=output_dir)
pipeline_tsne = tsne_plot.TSNE_Plot(
force_rerun=False,
output_dir=output_dir,
perplexity=50)
t_plot = pipeline_tsne.run(features.loc[anomalies.index])
return {'dataset': image_dataset,
'features': features,
'anomaly_scores': anomalies,
'visualisation': t_plot,
'active_learning': pipeline_active_learning}
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,219 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/anomaly_detection/lof.py | from astronomaly.base.base_pipeline import PipelineStage
from sklearn.neighbors import LocalOutlierFactor
import pandas as pd
import pickle
from os import path
class LOF_Algorithm(PipelineStage):
def __init__(self, contamination='auto', n_neighbors=20, **kwargs):
"""
Runs sklearn's local outlier factor anomaly detection algorithm and
returns the anomaly score for each instance.
Parameters
----------
contamination : string or float, optional
Hyperparameter to pass to LOF. 'auto' is recommended
n_neighbors : int
Hyperparameter to pass to LOF. Fairly sensitive to the amount of
data in the dataset.
"""
super().__init__(
contamination=contamination, n_neighbors=n_neighbors, **kwargs)
self.contamination = contamination
self.n_neighbors = n_neighbors
self.algorithm_obj = None
def save_algorithm_obj(self):
"""
Stores the LOF object to the output directory to allow quick
rerunning on new data.
"""
if self.algorithm_obj is not None:
f = open(path.join(self.output_dir,
'ml_algorithm_object.pickle'), 'wb')
pickle.dump(self.algorithm_obj, f)
def _execute_function(self, features):
"""
Does the work in actually running the algorithm.
Parameters
----------
features : pd.DataFrame or similar
The input features to run the algorithm on. Assumes the index is
the id of each object and all columns are to be used as features.
Returns
-------
pd.DataFrame
Contains the same original index of the features input and the
anomaly scores. More negative is more anomalous.
"""
self.algorithm_obj = LocalOutlierFactor(
contamination=self.contamination,
n_neighbors=self.n_neighbors,
novelty=False)
self.algorithm_obj.fit_predict(features)
scores = self.algorithm_obj.negative_outlier_factor_
if self.save_output:
self.save_algorithm_obj()
return pd.DataFrame(data=scores, index=features.index,
columns=['score'])
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,220 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/anomaly_detection/human_loop_learning.py | from astronomaly.base.base_pipeline import PipelineStage
from astronomaly.base import logging_tools
from sklearn.ensemble import RandomForestRegressor
import numpy as np
import pandas as pd
from scipy.spatial import cKDTree
class ScoreConverter(PipelineStage):
def __init__(self, lower_is_weirder=True, new_min=0, new_max=5,
convert_integer=False, column_name='score',
**kwargs):
"""
Convenience function to convert anomaly scores onto a standardised
scale, for use with the human-in-the-loop labelling frontend.
Parameters
----------
lower_is_weirder : bool
If true, it means the anomaly scores in input_column correspond to
a lower is more anomalous system, such as output by isolation
forest.
new_min : int or float
The new minimum score (now corresponding to the most boring
objects)
new_max : int or float
The new maximum score (now corresponding to the most interesting
objects)
convert_integer : bool
If true will force the resulting scores to be integer.
column_name : str
The name of the column to convert to the new scoring method.
Default is 'score'. If 'all' is used, will convert all columns
the DataFrame.
"""
super().__init__(lower_is_weirder=lower_is_weirder, new_min=new_min,
new_max=new_max, **kwargs)
self.lower_is_weirder = lower_is_weirder
self.new_min = new_min
self.new_max = new_max
self.convert_integer = convert_integer
self.column_name = column_name
def _execute_function(self, df):
"""
Does the work in actually running the scaler.
Parameters
----------
df : pd.DataFrame or similar
The input anomaly scores to rescale.
Returns
-------
pd.DataFrame
Contains the same original index and columns of the features input
with the anomaly score scaled according to the input arguments in
__init__.
"""
print('Running anomaly score rescaler...')
if self.column_name == 'all':
cols = df.columns
else:
cols = [self.column_name]
try:
scores = df[cols]
except KeyError:
msg = 'Requested column ' + self.column_name + ' not available in \
input dataframe. No rescaling has been performed'
logging_tools.log(msg, 'WARNING')
return df
if self.lower_is_weirder:
scores = -scores
scores = (self.new_max - self.new_min) * (scores - scores.min()) / \
(scores.max() - scores.min()) + self.new_min
if self.convert_integer:
scores = round(scores)
return scores
class NeighbourScore(PipelineStage):
def __init__(self, min_score=0.1, max_score=5, alpha=1, **kwargs):
"""
Computes a new anomaly score based on what the user has labelled,
allowing anomalous but boring objects to be rejected. This function
takes training data (in the form of human given labels) and then
performs regression to be able to predict user scores as a function of
feature space. In regions of feature space where the algorithm is
uncertain (i.e. there was little training data), it simply returns
close to the original anomaly score. In regions where there was more
training data, the anomaly score is modulated by the predicted user
score resulting in the user seeing less "boring" objects.
Parameters
----------
min_score : float
The lowest user score possible (must be greater than zero)
max_score : float
The highest user score possible
alpha : float
A scaling factor of how much to "trust" the predicted user scores.
Should be close to one but is a tuning parameter.
"""
super().__init__(min_score=min_score, max_score=max_score, alpha=alpha,
**kwargs)
self.min_score = min_score
self.max_score = max_score
self.alpha = alpha
def anom_func(self, nearest_neighbour_distance, user_score, anomaly_score):
"""
Simple function that is dominated by the (predicted) user score in
regions where we are reasonably sure about our ability to predict that
score, and is dominated by the anomaly score from an algorithms in
regions we have little data.
Parameters
----------
nearest_neighbour_distance : array
The distance of each instance to its nearest labelled neighbour.
user_score : array
The predicted user score for each instance
anomaly_score : array
The actual anomaly score from a machine learning algorithm
Returns
-------
array
The final anomaly score for each instance, penalised by the
predicted user score as required.
"""
f_u = self.min_score + 0.85 * (user_score / self.max_score)
d0 = nearest_neighbour_distance / np.mean(nearest_neighbour_distance)
dist_penalty = np.exp(d0 * self.alpha)
return anomaly_score * np.tanh(dist_penalty - 1 + np.arctanh(f_u))
def compute_nearest_neighbour(self, features_with_labels):
"""
Calculates the distance of each instance to its nearest labelled
neighbour.
Parameters
----------
features_with_labels : pd.DataFrame
A dataframe where the first columns are the features and the last
two columns are 'human_label' and 'score' (the anomaly score from
the ML algorithm).
Returns
-------
array
Distance of each instance to its nearest labelled neighbour.
"""
features = features_with_labels.drop(columns=['human_label', 'score'])
# print(features)
label_mask = features_with_labels['human_label'] != -1
labelled = features.loc[features_with_labels.index[label_mask]].values
features = features.values
mytree = cKDTree(labelled)
distances = np.zeros(len(features))
for i in range(len(features)):
dist = mytree.query(features[i])[0]
distances[i] = dist
# print(labelled)
return distances
def train_regression(self, features_with_labels):
"""
Uses machine learning to predict the user score for all the data. The
labels are provided in the column 'human_label' which must be -1 if no
label exists.
Parameters
----------
features_with_labels : pd.DataFrame
A dataframe where the first columns are the features and the last
two columns are 'human_label' and 'score' (the anomaly score from
the ML algorithm).
Returns
-------
array
The predicted user score for each instance.
"""
label_mask = features_with_labels['human_label'] != -1
inds = features_with_labels.index[label_mask]
features = features_with_labels.drop(columns=['human_label', 'score'])
reg = RandomForestRegressor(n_estimators=100)
reg.fit(features.loc[inds],
features_with_labels.loc[inds, 'human_label'])
fitted_scores = reg.predict(features)
return fitted_scores
def combine_data_frames(self, features, ml_df):
"""
Convenience function to correctly combine dataframes.
"""
return pd.concat((features, ml_df), axis=1, join='inner')
def _execute_function(self, features_with_labels):
"""
Does the work in actually running the NeighbourScore.
Parameters
----------
features_with_labels : pd.DataFrame
A dataframe where the first columns are the features and the last
two columns are 'human_label' and 'score' (the anomaly score from
the ML algorithm).
Returns
-------
pd.DataFrame
Contains the final scores using the same index as the input.
"""
distances = self.compute_nearest_neighbour(features_with_labels)
regressed_score = self.train_regression(features_with_labels)
trained_score = self.anom_func(distances,
regressed_score,
features_with_labels.score.values)
dat = np.column_stack(([regressed_score, trained_score]))
return pd.DataFrame(data=dat,
index=features_with_labels.index,
columns=['predicted_user_score', 'trained_score'])
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,221 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/visualisation/umap_plot.py | import umap
import numpy as np
import pandas as pd
from astronomaly.base.base_pipeline import PipelineStage
from astronomaly.base import logging_tools
class UMAP_Plot(PipelineStage):
# https://umap-learn.readthedocs.io/en/latest/api.html
def __init__(self, min_dist=0.1, n_neighbors=15, max_samples=2000,
shuffle=False, **kwargs):
"""
Computes a UMAP visualisation of the data
Parameters
----------
min_dist: float (optional, default 0.1)
(Taken from UMAP documentation)
The effective minimum distance between embedded points. Smaller
values will result in a more clustered/clumped embedding where
nearby points on the manifold are drawn closer together, while
larger values will result on a more even dispersal of points. The
value should be set relative to the spread value, which determines
the scale at which embedded points will be spread out.
n_neighbors: float (optional, default 15)
(Taken from UMAP documentation)
The size of local neighborhood (in terms of number of neighboring
sample points) used for manifold approximation. Larger values
result in more global views of the manifold, while smaller values
result in more local data being preserved. In general values
should be in the range 2 to 100.
max_samples : int, optional
Limits the computation to this many samples (by default 2000). Will
be the first 2000 samples if shuffle=False. This is very useful as
t-SNE scales particularly badly with sample size.
shuffle : bool, optional
Randomises the sample before selecting max_samples, by default
False
"""
super().__init__(min_dist=min_dist, n_neighbors=n_neighbors,
max_samples=max_samples, shuffle=shuffle, **kwargs)
self.max_samples = max_samples
self.shuffle = shuffle
self.min_dist = min_dist
self.n_neighbors = n_neighbors
def _execute_function(self, features):
"""
Does the work in actually running the pipeline stage.
Parameters
----------
features : pd.DataFrame or similar
The input features to run on. Assumes the index is the id
of each object and all columns are to be used as features.
Returns
-------
pd.DataFrame
Returns a dataframe with the same index as the input features and
two columns, one for each dimension of the UMAP plot.
"""
if len(features.columns.values) == 2:
logging_tools.log('Already dim 2 - skipping umap', level='WARNING')
return features.copy()
# copied from tsne
if len(features) > self.max_samples:
if not self.shuffle:
inds = features.index[:self.max_samples]
else:
inds = np.random.choice(features.index, self.max_samples,
replace=False)
features = features.loc[inds]
reducer = umap.UMAP(n_components=2, min_dist=self.min_dist,
n_neighbors=self.n_neighbors)
logging_tools.log('Beginning umap transform')
reduced_embed = reducer.fit_transform(features)
logging_tools.log('umap transform complete')
return pd.DataFrame(data=reduced_embed, index=features.index)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,222 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/flatten_features.py | from astronomaly.base.base_pipeline import PipelineStage
import numpy as np
class Flatten_Features(PipelineStage):
def __init__(self, **kwargs):
"""
A very simple feature extraction that ravels an input image to reduce
it to a 1d vector. This can be useful for simple test datasets like
MNIST or to flatten images that are already aligned in some way to then
use PCA on.
"""
super().__init__(**kwargs)
self.labels = None
def _set_labels(self, image):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
n = np.prod(image.shape)
self.labels = np.array(np.arange(n), dtype='str')
def _execute_function(self, image):
"""
Does the work in flattening the image
Parameters
----------
image : np.ndarray
Input image
Returns
-------
Array
Flattened image
"""
feats = image[:, :, 0].ravel()
if self.labels is None:
self._set_labels(image[:, :, 0])
return feats
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,223 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/autoencoder.py | import numpy as np
import os
from astronomaly.base.base_pipeline import PipelineStage
try:
from keras.models import load_model
from keras.layers import Input, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
except ImportError:
print("Failed to import Keras. Deep learning will be unavailable.")
class Autoencoder:
def __init__(self, model_file=''):
"""
Class containing autoencoder training methods.
Parameters
----------
model_file : string, optional
Allows for loading of previously trained Keras model in HDF5
format. Note these models are very sensitive, the exact same
preprocessing steps must be used to reproduce results.
"""
if len(model_file) != 0:
try:
self.autoencoder = load_model(model_file)
inputs = self.autoencoder.input
outputs = self.autoencoder.get_layer('encoder').output
self.encoder = Model(inputs=inputs, outputs=outputs)
except OSError:
print('Model file ', model_file,
'is invalid. Weights not loaded. New model created.')
self.autoencoder = None
else:
self.autoencoder = None
def shape_check(self, images):
"""
Convenience function to reshape images appropriate for deep learning.
Parameters
----------
images : np.ndarray, list
Array of list of images
Returns
-------
np.ndarray
Converted array compliant with CNN
"""
images = np.array(images)
if len(images.shape) == 2:
images = images.reshape([-1, images.shape[0], images.shape[1], 1])
if len(images.shape) == 3:
images = images.reshape([-1,
images.shape[0], images.shape[1],
images.shape[2]])
return images
def compile_autoencoder_model(self, input_image_shape):
"""
Compiles the default autoencoder model. Note this model is designed to
operate on 128x128 images. While it can run on different size images
this can dramatically change the size of the final feature space.
Parameters
----------
input_image_shape : tuple
The expected shape of the input images. Can either be length 2 or 3
(to include number of channels).
"""
if len(input_image_shape) == 2:
input_image_shape = (input_image_shape[0], input_image_shape[1], 1)
# Assumes "channels last" format
input_img = Input(shape=input_image_shape)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
# x = MaxPooling2D((2, 2), padding='same')(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# x = MaxPooling2D((4, 4), padding='same')(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# encoder = MaxPooling2D((4, 4), padding='same', name='encoder')(x)
# # at this point the representation is (4, 4, 8) i.e. 128-dimensional
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoder)
# x = UpSampling2D((4, 4))(x)
# x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
# x = UpSampling2D((4, 4))(x)
# x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
# x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
encoder = MaxPooling2D((2, 2), padding='same', name='encoder')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(16, (3, 3), activation='relu', padding='same')(encoder)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(32, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
decoder = Conv2D(input_image_shape[-1], (3, 3), activation='sigmoid',
padding='same')(x)
autoencoder = Model(input_img, decoder)
autoencoder.compile(loss='mse', optimizer='adam')
self.autoencoder = autoencoder
self.encoder = Model(inputs=autoencoder.input,
outputs=autoencoder.get_layer('encoder').output)
def fit(self, training_data, batch_size=32, epochs=10):
"""
Actually train the autoencoder.
Parameters
----------
training_data : np.ndarray, list
Either array or list of images. It's recommended that this data be
augmented with translation or rotation (or both).
batch_size : int, optional
Number of samples used to update weights in each iteration. A
larger batch size can be more accurate but requires more memory and
is slower to train.
epochs : int, optional
Number of full passes through the entire training set.
"""
X = self.shape_check(training_data)
self.autoencoder.fit(X, X,
batch_size=batch_size,
epochs=epochs,
verbose=1,
shuffle=True)
def encode(self, images):
"""
Returns the deep encoded features for an array of images.
Parameters
----------
images : np.ndarray
Input images (nobjects x image_shape). For a single image,
provide [image] as an array is expected.
Returns
-------
np.ndarray
Deep features (nobjects x nfeatures)
"""
return self.encoder.predict(self.shape_check(images))
def save(self, filename):
"""
Saves Keras model in HDF5 format
Parameters
----------
filename : string
Location for saved model
"""
self.autoencoder.save(filename)
class AutoencoderFeatures(PipelineStage):
def __init__(self, training_dataset=None, retrain=False, **kwargs):
"""
Runs a very simple autoencoder to produce lower dimensional features.
This function is currently not very flexible in terms of changing
parameters, network architecture etc.
Parameters
----------
training_dataset : Dataset, optional
A Dataset-type object containing data to train the autoencoder on.
Note that since Astronomaly runs in an unsupervised setting, this
can be the same data that the final anomaly detection algorithm is
run on. However you may wish to augment the training data, for
example by applying translation to the cutouts.
retrain : bool, optional
Whether or not to train the algorithm again or load from a model
file. This is useful because the automated checks in whether or not
to rerun a function only operate when "run_on_dataset" is called
whereas the training is performed in __init__.
Raises
------
ValueError
If training data is not provided.
"""
super().__init__(training_dataset=training_dataset, **kwargs)
if training_dataset is None:
raise ValueError('A training dataset object must be provided.')
model_file = os.path.join(self.output_dir, 'autoencoder.h5')
if retrain or ('force_rerun' in kwargs and kwargs['force_rerun']):
self.autoenc = Autoencoder()
else:
self.autoenc = Autoencoder(model_file=model_file)
if self.autoenc.autoencoder is None:
cutouts = []
# Here I'm explicitly assuming the entire training set can be read
# into memory
print("Loading training data...")
for i in training_dataset.index:
cutouts.append(training_dataset.get_sample(i))
print("%d objects loaded." % len(cutouts))
img_shape = cutouts[0].shape
print('Compiling autoencoder model...')
self.autoenc.compile_autoencoder_model(img_shape)
print('Done!')
print('Training autoencoder...')
self.autoenc.fit(cutouts, epochs=10)
print('Done!')
if self.save_output:
print('Autoencoder model saved to', model_file)
self.autoenc.save(model_file)
else:
print('Trained autoencoder read from file', model_file)
def _execute_function(self, image):
"""
Runs the trained autoencoder to get the encoded features of the input
image.
Parameters
----------
image : np.ndarray
Cutout to run autoencoder on
Returns
-------
np.ndarray
Encoded features
"""
feats = self.autoenc.encode(image)
feats = np.reshape(feats, [np.prod(feats.shape[1:])])
if len(self.labels) == 0:
self.labels = ['enc_%d' % i for i in range(len(feats))]
return feats
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,224 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/data_management/raw_features.py | from astronomaly.base.base_dataset import Dataset
import numpy as np
import pandas as pd
class RawFeatures(Dataset):
def __init__(self, **kwargs):
"""
A Dataset class for simply reading in a set of data to be directly used
as features.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
"""
super().__init__(**kwargs)
self.features = []
self.labels = []
print('Loading features...')
for f in self.files:
ext = f.split('.')[-1]
feats = []
labels = []
if ext == 'npy':
if 'labels' in f:
labels = np.load(f)
labels = pd.DataFrame(data=labels,
columns=['label'], dtype='int')
else:
feats = np.load(f)
feats = pd.DataFrame(data=feats)
elif ext == 'csv':
if 'labels' in f:
labels = pd.read_csv(f)
else:
feats = pd.read_csv(f)
elif ext == 'parquet':
if 'labels' in f:
labels = pd.read_parquet(f)
else:
feats = pd.read_parquet(f)
if len(feats) != 0:
if len(self.features) == 0:
self.features = feats
else:
self.features = pd.concat((self.features, feats))
if len(labels) != 0:
if len(self.labels) == 0:
self.labels = labels
else:
self.labels = pd.concat((self.labels, labels))
# Force string index because it's safer
self.features.index = self.features.index.astype('str')
self.labels.index = self.labels.index.astype('str')
print('Done!')
self.data_type = 'raw_features'
if len(labels) != 0:
self.metadata = self.labels
else:
self.metadata = pd.DataFrame(data=[],
index=list(self.features.index))
def get_sample(self, idx):
"""
Returns a particular instance given an index string.
"""
return self.features.loc[idx].values
def get_display_data(self, idx):
"""
Returns data as a dictionary for web display
"""
cols = list(self.features.columns)
feats = self.features.loc[idx].values
out_dict = {'categories': cols}
out_dict['data'] = [[i, feats[i]] for i in range(len(feats))]
return out_dict
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,225 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/dimensionality_reduction/pca.py | from astronomaly.base.base_pipeline import PipelineStage
import numpy as np
import pandas as pd
from os import path
class PCA_Decomposer(PipelineStage):
def __init__(self, n_components=0, threshold=0, **kwargs):
"""
Dimensionality reduction with principle component analysis (PCA).
Wraps the scikit-learn function.
Parameters
----------
n_components : int
Requested number of principle components to use. If 0 (default),
returns the maximum number of components.
threshold : float
An alternative to n_components. Will use sufficient components to
ensure threshold explained variance is achieved. Scikit-learn uses
the kwarg n_components to specify either an int or float but we are
explicit here.
"""
super().__init__(n_components=n_components, threshold=threshold,
**kwargs)
self.n_components = n_components
if self.n_components == 0:
self.n_components = None
if 0 < threshold < 1:
self.n_components = threshold
self.pca_obj = None
def save_pca(self, features):
"""
Stores the mean and components of the PCA to disk. Makes use of the
original features information to label the columns.
Parameters
----------
features : pd.DataFrame or similar
The original feature set the PCA was run on.
"""
if self.pca_obj is not None:
mn = self.pca_obj.mean_
comps = self.pca_obj.components_
dat = np.vstack((mn, comps))
index = ['mean']
for i in range(len(comps)):
index += ['component%d' % i]
df = pd.DataFrame(data=dat, columns=features.columns, index=index)
self.save(df, path.join(self.output_dir, 'pca_components'))
def _execute_function(self, features):
"""
Actually does the PCA reduction and returns a dataframe.
"""
from sklearn.decomposition import PCA
self.pca_obj = PCA(self.n_components)
self.pca_obj.fit(features)
print('Total explained variance:',
np.sum(self.pca_obj.explained_variance_ratio_))
output = self.pca_obj.transform(features)
if self.save_output:
self.save_pca(features)
return pd.DataFrame(data=output, index=features.index)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,226 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/dimensionality_reduction/truncated_svd.py | from astronomaly.base.base_pipeline import PipelineStage
import numpy as np
import pandas as pd
from os import path
class Truncated_SVD_Decomposer(PipelineStage):
def __init__(self, n_components=0, **kwargs):
"""
Perform a truncated SVD decomposition. This is very useful for
extremely high dimensional data (>10000 features) although it's not
guaranteed to return the same coefficients each run.
Parameters
----------
n_components : int
Number of components required (not optional). If 0 (default),
will raise an error.
"""
super().__init__(n_components=n_components, **kwargs)
if n_components == 0:
raise ValueError("n_components must be set to a non-zero integer")
self.n_components = n_components
self.trunc_svd_obj = None
def save_svd(self, features):
"""
Stores the mean and components of the truncated SVD to disk. Makes use
of the original features information to label the columns.
Parameters
----------
features : pd.DataFrame or similar
The original feature set the truncated SVD was run on.
"""
if self.trunc_svd_obj is not None:
comps = self.trunc_svd_obj.components_
index = []
for i in range(len(comps)):
index += ['component%d' % i]
df = pd.DataFrame(data=comps, columns=features.columns,
index=index)
self.save(df, path.join(self.output_dir, 'pca_components'))
def _execute_function(self, features):
"""
Actually does the SVD reduction and returns a dataframe.
"""
from sklearn.decomposition import TruncatedSVD
self.trunc_svd_obj = TruncatedSVD(self.n_components)
self.trunc_svd_obj.fit(features)
print('Total explained variance:',
np.sum(self.trunc_svd_obj.explained_variance_ratio_))
output = self.trunc_svd_obj.transform(features)
if self.save_output:
self.save_svd(features)
return pd.DataFrame(data=output, index=features.index)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,227 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/power_spectrum.py | import numpy as np
from scipy import ndimage
from astronomaly.base.base_pipeline import PipelineStage
def psd_2d(img, nbins):
"""
Computes the power spectral density for an input image. Translation and
rotation invariate features.
Parameters
----------
img : np.ndarray
Input image
nbins : int
Number of frequency bins to use. Frequency will range from 1 pixel to
the largest axis of the input image, measured in pixels.
Returns
-------
np.ndarray
Power spectral density at each frequency
"""
the_fft = np.fft.fftshift(np.fft.fft2(img - img.mean()))
psd = np.abs(the_fft) ** 2
psd = psd / psd.sum()
# Now radially bin the power spectral density
X, Y = np.meshgrid(np.arange(the_fft.shape[1]),
np.arange(the_fft.shape[0]))
r = np.hypot(X - the_fft.shape[1] // 2, Y - the_fft.shape[0] // 2)
max_freq = np.min((the_fft.shape[0] // 2, the_fft.shape[1] // 2))
rbin = (nbins * r / max_freq).astype(np.int)
radial_sum = ndimage.sum(psd, labels=rbin, index=np.arange(1, nbins + 1))
return radial_sum
class PSD_Features(PipelineStage):
def __init__(self, nbins='auto', **kwargs):
"""
Computes the power spectral density for an input image. Translation and
rotation invariate features.
Parameters
----------
nbins : int, str
Number of frequency bins to use. Frequency will range from 1 pixel
to the largest axis of the input image, measured in pixels. If set
to 'auto' will use the Nyquist theorem to automatically calculate
the appropriate number of bins at runtime.
"""
super().__init__(nbins=nbins, **kwargs)
self.nbins = nbins
def _set_labels(self):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
if self.nbands == 1:
self.labels = ['psd_%d' % i for i in range(self.nbins)]
else:
self.labels = []
for band in range(self.nbands):
self.labels += \
['psd_%d_band_%d' % (i, band) for i in range(self.nbins)]
def _execute_function(self, image):
"""
Does the work in actually extracting the PSD
Parameters
----------
image : np.ndarray
Input image
Returns
-------
array
Contains the extracted PSD features
"""
if self.nbins == 'auto':
# Here I'm explicitly assuming any multi-d images store the
# colours in the last dim
shp = image.shape[:2]
self.nbins = int(min(shp) // 2)
if len(image.shape) != 2:
self.nbands = image.shape[2]
else:
self.nbands = 1
if len(self.labels) == 0:
# Only call this once we know the dimensions of the input data.
self._set_labels()
if self.nbands == 1:
# Greyscale-like image
psd_feats = psd_2d(image, self.nbins)
return psd_feats
else:
psd_all_bands = []
labels = []
for band in range(image.shape[2]):
psd_feats = psd_2d(image[:, :, band], self.nbins)
psd_all_bands += list(psd_feats)
labels += \
['psd_%d_band_%d' % (i, band) for i in range(self.nbins)]
return psd_all_bands
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,228 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/data_management/light_curve_reader.py | import pandas as pd
import numpy as np
from astronomaly.base.base_dataset import Dataset
import os
# ignores the false positve pandas warning
# for the following kind of code
# df['key'] == item, for an existing key in a df
pd.options.mode.chained_assignment = None
def split_lc(lc_data, max_gap):
"""
Splits the light curves into smaller chunks based on their gaps. This is
useful for long light curves that span many observing seasons so have
large gaps that can sometimes interfere with feature extraction.
Parameters
----------
lc_data : pd.Dataframe
Light curves
max_gap : int
Maximum gap between observations
Returns
-------
pd.DataFrame
Split light curves
"""
unq_ids = np.unique(lc_data.ID)
unq_ids = unq_ids
splitted_dict = {}
id_n = 0
for idx in unq_ids:
id_n += 1
ids = (str)(idx) # Used for renaming things
progress = id_n / len(unq_ids)
progress = progress * 100
# print('Concatinating {}%'.format(progress))
lc = lc_data[lc_data['ID'] == ids]
if 'filters' in lc.columns:
unq_filters = np.unique(lc.filters)
for filtr in unq_filters:
lc1 = lc[lc['filters'] == filtr]
tm = lc1.time
time_diff = [tm.iloc[i] - tm.iloc[i - 1]
for i in range(1, len(tm))]
time_diff.insert(0, 0)
lc1['time_diff'] = time_diff
gap_idx = np.where(lc1.time_diff > max_gap)[0]
# Separating the lc as by the gap index
try:
lc0 = lc1.iloc[:gap_idx[0]]
lc0['ID'] = [str(ids) + '_0' for i in range(len(lc0.time))]
# Create a new index for the first of split light curves
key = 'lc' + ids + '_' + str(filtr) + str(0)
splitted_dict.update({key: lc0})
for k in range(1, len(gap_idx)):
lcn = lc1.iloc[gap_idx[k - 1]:gap_idx[k]]
lcn['ID'] = [str(ids) + '_' + str(k)
for i in range(len(lcn.time))]
key = 'lc' + ids + '_' + str(filtr) + str(k)
splitted_dict.update({key: lcn})
lc2 = lc1.iloc[gap_idx[k]:]
lc2['ID'] = [ids + '_' + str(k + 1)
for i in range(len(lc2.time))]
key = 'lc' + ids + '_' + str(filtr) + str(k + 1)
splitted_dict.update({key: lc2})
except (IndexError, UnboundLocalError):
pass
final_data = pd.concat(splitted_dict.values(), ignore_index=False)
return final_data
def convert_flux_to_mag(lcs, mag_ref):
"""
Converts flux to mags for a given light curve data.
Parameters
----------
lcs: pd.DataFrame
Light curve
mag_ref: float
Reference magnitude
"""
# Discard all the negative flux values
# since they are due to noise or are for
# faint observations
# Replacing the negative flux values with their respective errors
neg_flux_indx = np.where(lcs['flux'].values < 0)
lcs.loc[lcs['flux'] < 0, ['flux']] = lcs['flux_error'].iloc[neg_flux_indx]
lc = lcs
# Flux and flux error
f_obs = lc.flux.values
f_obs_err = lc.flux_error.values
constant = (-2.5 / np.log(10))
# converting
flux_convs = mag_ref - 2.5 * np.log10(f_obs)
err_convs = np.abs(constant * (f_obs_err / f_obs))
# Adding the new mag and mag_error column
lc['mag'] = flux_convs
lc['mag_error'] = err_convs
return lc
class LightCurveDataset(Dataset):
def __init__(self, data_dict, header_nrows=1,
delim_whitespace=False, max_gap=50, plot_errors=True,
convert_flux=False, mag_ref=22,
split_lightcurves=False,
filter_colors=['#9467bd', '#1f77b4', '#2ca02c', '#d62728',
'#ff7f0e', '#8c564b'],
filter_labels=[],
which_filters=[],
plot_column='flux',
**kwargs):
"""
Reads in light curve data from file(s).
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
data_dict: Dictionary
Dictionary with index of the column names corresponding to
the following specific keys:
('id','time','mag','mag_err','flux','flux_err','filters',
'labels')
e.g {'time':1,'mag':2}, where 1 and 2 are column index
correpoding to 'time' and 'mag' in the input data.
If the data does not have unique ids, the user can neglect the
'id' key, and the ids will be the file path by default.
The user can also provide a list of indices for the 'mag' and
'flux' columns.
This is the case where the brightness is recorded in more than
one column. e.g {'time':1,'mag':[2,3]} 2 and 3 corresponds to
columns with brightness records
header_nrows: int
The number of rows the header covers in the dataset, by
default 1
convert_flux : bool
If true converts flux to magnitudes
mag_ref : float/int
The reference magnitude for conversion, by default 22. Used to
convert flux to magnitude if required
split_lightcurves : bool
If true, splits up light curves that have large gaps due to
multiple observing seasons
max_gap: int
Maximum gap between consecutive observations, default 50
delim_whitespace: bool
Should be True if the data is not separated by a comma, by
default False
plot_errors: bool
If errors are available for the data, this boolean allows them
to be plotted
filter_colors: list
Allows the user to define their own colours (using hex codes)
for the different filter bands. Will revert to default
behaviour of the JavaScript chart if the list of colors
provided is shorter than the number of unique filters.
filter_labels: list
For multiband data, labels will be passed to the frontend
allowing easy identification of different bands in the light
curve. Assumes the filters are identified by an integer in the
data such that the first filter (e.g. filter 0) will correspond
to the first label provided. For example, to plot PLAsTiCC
data, provide filter_labels=['u','g','r','i','z','y']
which_filters: list
Allows the user to select specific filters (thereby dropping
others). The list of filters to be included must be numeric and
integer. For example, to select the griz bands only, set
which_filters = [1, 2, 3, 4]
plot_column: string
Indicates which column to plot. Usually data will have either a
flux or a mag column. The code will automatically detect which is
available but if both are available, it will use this kwarg to
select which to use. The corresponding errors are also used (if
requested)
"""
super().__init__(data_dict=data_dict, header_nrows=header_nrows,
delim_whitespace=delim_whitespace, mag_ref=mag_ref,
max_gap=max_gap, plot_errors=plot_errors,
filter_labels=filter_labels,
which_filters=which_filters,
convert_flux=convert_flux,
split_lightcurves=split_lightcurves,
filter_colors=filter_colors,
plot_column=plot_column, **kwargs)
self.data_type = 'light_curve'
self.metadata = pd.DataFrame(data=[])
self.data_dict = data_dict
self.header_nrows = header_nrows
self.delim_whitespace = delim_whitespace
self.max_gap = max_gap
self.plot_errors = plot_errors
self.filter_labels = filter_labels
self.filter_colors = filter_colors
self.convert_flux = convert_flux
self.plot_column = plot_column
# ================================================================
# Reading the light curve data
# ================================================================
# The case where there is one file
data = pd.read_csv(self.files[0], skiprows=self.header_nrows,
delim_whitespace=self.delim_whitespace, header=None)
# The case for multiple files of light curve data
file_len = [len(data)]
if len(self.files) > 1:
file_paths = [self.files[0]]
for fl in range(1, len(self.files)):
this_data = pd.read_csv(
self.files[fl],
skiprows=self.header_nrows,
delim_whitespace=self.delim_whitespace,
header=None)
data = pd.concat([data, this_data])
file_paths.append(self.files[fl])
file_len.append(len(this_data))
IDs = []
for fl in range(0, len(file_len)):
for f in range(file_len[fl]):
IDs.append(file_paths[fl].split(os.path.sep)[-1])
# =================================================================
# Renaming the columns into standard columns for astronomaly
# =================================================================
time = data.iloc[:, self.data_dict['time']]
standard_data = {'time': time}
if 'id' in data_dict.keys():
idx = data.iloc[:, self.data_dict['id']]
ids = np.unique(idx)
ids = np.array(ids, dtype='str')
standard_data.update({'ID': np.array(idx, dtype='str')})
else:
idx = IDs
self.index = idx
self.metadata = pd.DataFrame({'ID': idx}, index=idx)
standard_data.update({'ID': IDs})
if 'labels' in data_dict.keys():
labels = data.iloc[:, self.data_dict['labels']]
standard_data.update({'labels': labels})
# Possible brightness columns
brightness_cols = ['mag', 'flux']
# Looping through the brightness columns
for col in range(len(brightness_cols)):
data_col = brightness_cols[col]
if data_col in self.data_dict.keys():
# ============Multiple brightness columns======================
try:
for i in range(len(self.data_dict[data_col])):
# The case where there are no error columns
standard_data.update({data_col + str(i + 1):
data.iloc[:,
self.data_dict[data_col][i]]})
# The case where there are brightness error columns
if data_col + '_err' in self.data_dict.keys():
# Updating the standard dictionary to include the
# brightness_errors
key = data_col + '_error' + str(i + 1)
err_col = self.data_dict[data_col + '_err'][i]
val = data.iloc[:, err_col]
standard_data.update({key: val})
# =================Single brightness Column===================
# ============================================================
except TypeError:
# The case for single brightness column and no errors
val = data.iloc[:, self.data_dict[data_col]]
standard_data.update({data_col: val})
if data_col + '_err' in self.data_dict.keys():
key = data_col + '_error'
val = data.iloc[:, self.data_dict[data_col + '_err']]
standard_data.update({key: val})
# ============The case where there are filters in the data=====
if 'filters' in self.data_dict.keys():
val = data.iloc[:, self.data_dict['filters']]
standard_data.update({'filters': val})
lc = pd.DataFrame.from_dict(standard_data)
if len(which_filters) > 0 and 'filters' in lc.columns:
# Drop filters if requested
lc = lc.loc[np.in1d(lc['filters'], which_filters)]
if 'flux' in lc.columns:
# Convert flux to mag
if convert_flux is True:
lc = convert_flux_to_mag(lc, mag_ref)
# elif 'mag' not in lc.columns:
# # THIS IS TEMPORARY, ESSENTIAL FOR PLOTTING
# # *** May need to update plotting code ***
# lc['mag'] = lc.flux
# lc['mag_error'] = lc.flux_error
if split_lightcurves:
# Split the light curve into chunks
lc = split_lc(lc, self.max_gap)
self.light_curves_data = lc
ids = np.unique(lc.ID)
self.index = ids
# Add the classes to the metadata
if 'labels' in lc.columns:
lc1 = lc.copy()
lc1 = lc.drop_duplicates(subset='ID')
labels = [lc1[lc1['ID'] == i]['labels'].values[0] for i in ids]
self.metadata = pd.DataFrame({'label': labels, 'ID': ids},
index=ids)
# Metadata without the class
else:
self.metadata = pd.DataFrame({'ID': ids}, index=ids)
print('%d light curves loaded successfully' % len(self.index))
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
dict
json-compatible dictionary of the light curve data
"""
# Reading in the light curve data
light_curve_original = self.light_curves_data[
self.light_curves_data['ID'] == idx]
lc_cols = light_curve_original.columns.values.tolist()
# Make a decision about what to plot based on what columns are
# available and what column is requested
if 'flux' in lc_cols and 'mag' in lc_cols:
data_col = [self.plot_column]
err_col = [self.plot_column + '_error']
elif 'mag' in lc_cols:
data_col = ['mag']
err_col = ['mag_error']
else:
data_col = ['flux']
err_col = ['flux_error']
out_dict = {'plot_data_type': data_col,
'data': [], 'errors': [], 'filter_labels': [],
'filter_colors': []}
if err_col[0] in lc_cols and self.plot_errors:
plot_errors = True
else:
plot_errors = False
if 'filters' in lc_cols:
multiband = True
unique_filters = np.unique(light_curve_original['filters'])
else:
multiband = False
unique_filters = [0]
k = 0
for filt in unique_filters:
if multiband:
msk = light_curve_original['filters'] == filt
light_curve = light_curve_original[msk]
else:
light_curve = light_curve_original
mag_indx = [cl for cl in data_col if cl in lc_cols]
err_indx = [cl for cl in err_col if cl in lc_cols]
if plot_errors:
light_curve['err_lower'] = light_curve[mag_indx].values - \
light_curve[err_indx].values
light_curve['err_upper'] = light_curve[mag_indx].values + \
light_curve[err_indx].values
lc_errs = light_curve[['time', 'err_lower', 'err_upper']]
err = lc_errs.values.tolist()
# inserting the time column to data and adding 'data'
# and 'errors' to out_dict
mag_indx.insert(0, 'time')
dat = light_curve[mag_indx].values.tolist()
out_dict['data'].append(dat)
if plot_errors:
out_dict['errors'].append(err)
else:
out_dict['errors'].append([])
if len(self.filter_labels) >= len(unique_filters):
out_dict['filter_labels'].append(self.filter_labels[k])
else:
out_dict['filter_labels'].append((str)(filt))
if len(self.filter_colors) >= len(unique_filters):
out_dict['filter_colors'].append(self.filter_colors[k])
else:
out_dict['filter_colors'].append('')
k += 1
return out_dict
def get_sample(self, idx):
# Choosing light curve values for a specific ID
light_curve_sample = self.light_curves_data[
self.light_curves_data['ID'] == idx]
return light_curve_sample
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,229 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/visualisation/tsne_plot.py | from sklearn.manifold import TSNE
import numpy as np
import pandas as pd
from astronomaly.base.base_pipeline import PipelineStage
class TSNE_Plot(PipelineStage):
def __init__(self, perplexity=30, max_samples=2000, shuffle=False,
**kwargs):
"""
Computes a t-SNE 2d visualisation of the data
Parameters
----------
perplexity : float, optional
The perplexity is related to the number of nearest neighbors that
is used in other manifold learning algorithms (see t-SNE
documentation), by default 30
max_samples : int, optional
Limits the computation to this many samples (by default 2000). Will
be the first 2000 samples if shuffle=False. This is very useful as
t-SNE scales particularly badly with sample size.
shuffle : bool, optional
Randomises the sample before selecting max_samples, by default
False
"""
super().__init__(perplexity=perplexity, max_samples=max_samples,
shuffle=shuffle, **kwargs)
self.perplexity = perplexity
self.max_samples = max_samples
self.shuffle = shuffle
def _execute_function(self, features):
"""
Does the work in actually running the pipeline stage.
Parameters
----------
features : pd.DataFrame or similar
The input features to run on. Assumes the index is the id
of each object and all columns are to be used as features.
Returns
-------
pd.DataFrame
Returns a dataframe with the same index as the input features and
two columns, one for each dimension of the t-SNE plot.
"""
if len(features) > self.max_samples:
if not self.shuffle:
inds = features.index[:self.max_samples]
else:
inds = np.random.choice(features.index, self.max_samples,
replace=False)
features = features.loc[inds]
ts = TSNE(perplexity=self.perplexity, learning_rate=10, n_iter=5000)
ts.fit(features)
fitted_tsne = ts.embedding_
return pd.DataFrame(data=fitted_tsne, index=features.index)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,230 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/scripts/CRTS_example.py | # An example for the CRTS data
from astronomaly.data_management import light_curve_reader
from astronomaly.feature_extraction import feets_features
from astronomaly.postprocessing import scaling
from astronomaly.anomaly_detection import isolation_forest, human_loop_learning
from astronomaly.visualisation import tsne_plot
import os
import pandas as pd
# Root directory for data
data_dir = os.path.join(os.getcwd(), 'example_data')
lc_path = os.path.join(data_dir, 'CRTS', 'CRTS_subset_500.csv')
# Where output should be stored
output_dir = os.path.join(
data_dir, 'astronomaly_output', 'CRTS', '')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
display_transform_function = []
# Change this to false to automatically use previously run features
force_rerun = True
def run_pipeline():
"""
Any script passed to the Astronomaly server must implement this function.
run_pipeline must return a dictionary that contains the keys listed below.
Parameters
----------
Returns
-------
pipeline_dict : dictionary
Dictionary containing all relevant data. Keys must include:
'dataset' - an astronomaly Dataset object
'features' - pd.DataFrame containing the features
'anomaly_scores' - pd.DataFrame with a column 'score' with the anomaly
scores
'visualisation' - pd.DataFrame with two columns for visualisation
(e.g. TSNE or UMAP)
'active_learning' - an object that inherits from BasePipeline and will
run the human-in-the-loop learning when requested
"""
# This creates the object that manages the data
lc_dataset = light_curve_reader.LightCurveDataset(
filename=lc_path,
data_dict={'id': 0, 'time': 4, 'mag': 2, 'mag_err': 3},
output_dir=output_dir
)
# Creates a pipeline object for feature extraction
pipeline_feets = feets_features.Feets_Features(
exclude_features=['Period_fit', 'PercentDifferenceFluxPercentile',
'FluxPercentileRatioMid20',
'FluxPercentileRatioMid35',
'FluxPercentileRatioMid50',
'FluxPercentileRatioMid65',
'FluxPercentileRatioMid80'],
compute_on_mags=True,
# Feets prints a lot of warnings to screen, set this to true to ignore
# You may also want to run with `python -W ignore` (with caution)
ignore_warnings=True,
output_dir=output_dir,
force_rerun=force_rerun)
# Actually runs the feature extraction
features = pipeline_feets.run_on_dataset(lc_dataset)
# Now we rescale the features using the same procedure of first creating
# the pipeline object, then running it on the feature set
pipeline_scaler = scaling.FeatureScaler(force_rerun=force_rerun,
output_dir=output_dir)
features = pipeline_scaler.run(features)
# The actual anomaly detection is called in the same way by creating an
# Iforest pipeline object then running it
pipeline_iforest = isolation_forest.IforestAlgorithm(
force_rerun=force_rerun, output_dir=output_dir)
anomalies = pipeline_iforest.run(features)
# We convert the scores onto a range of 0-5
pipeline_score_converter = human_loop_learning.ScoreConverter(
force_rerun=force_rerun, output_dir=output_dir)
anomalies = pipeline_score_converter.run(anomalies)
try:
# This is used by the frontend to store labels as they are applied so
# that labels are not forgotten between sessions of using Astronomaly
if 'human_label' not in anomalies.columns:
df = pd.read_csv(
os.path.join(output_dir, 'ml_scores.csv'),
index_col=0,
dtype={'human_label': 'int'})
df.index = df.index.astype('str')
if len(anomalies) == len(df):
anomalies = pd.concat(
(anomalies, df['human_label']), axis=1, join='inner')
except FileNotFoundError:
pass
# This is the active learning object that will be run on demand by the
# frontend
pipeline_active_learning = human_loop_learning.NeighbourScore(
alpha=1, output_dir=output_dir)
# We use TSNE for visualisation which is run in the same way as other parts
# of the pipeline.
pipeline_tsne = tsne_plot.TSNE_Plot(
force_rerun=False,
output_dir=output_dir,
perplexity=100)
t_plot = pipeline_tsne.run(features)
# The run_pipeline function must return a dictionary with these keywords
return {'dataset': lc_dataset,
'features': features,
'anomaly_scores': anomalies,
'visualisation': t_plot,
'active_learning': pipeline_active_learning}
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,231 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/photutils_features.py | from astronomaly.base.base_pipeline import PipelineStage
import numpy as np
from photutils import morphology
class PhotutilsFeatures(PipelineStage):
def __init__(self, columns, **kwargs):
"""
Uses the photutils package to extract requested properties from the
image. The list of available photutil properties is here:
https://photutils.readthedocs.io/en/stable/api/photutils.segmentation.SourceCatalog.html#photutils.segmentation.SourceCatalog
Properties that are returned as arrays will automatically be flattened
and each element will be treated as an independent feature.
"""
super().__init__(columns=columns, **kwargs)
self.columns = columns
self.labels = None
def _set_labels(self, labels):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
self.labels = np.array(labels, dtype='str')
def _execute_function(self, image):
"""
Does the work in extracting the requested properties using photutils.
Parameters
----------
image : np.ndarray
Input image
Returns
-------
Array
Features
"""
if np.prod(image.shape) > 2:
image = image[0]
feats = []
labels = []
cat = morphology.data_properties(image)
for c in self.columns:
prop = getattr(cat, c)
prop = np.array(prop)
prop = prop.flatten()
if len(prop) == 1:
feats.append(prop[0])
labels.append(c)
else:
feats += prop.tolist()
for i in range(len(prop)):
labels.append(c + str(i))
if self.labels is None:
self._set_labels(labels)
return np.array(feats)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,232 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/flux_histogram.py | import numpy as np
from astronomaly.base.base_pipeline import PipelineStage
from astronomaly.preprocessing.image_preprocessing import image_transform_scale
def calculate_flux_histogram(img, nbins, norm=True):
"""
Histograms the flux values of the pixels into a given number of bins.
Parameters
----------
img : np.ndarray
Input image
nbins : int
Number of bins to use.
norm : boolean
If true, normalises the image first so that histogram will be of values
from zero to one.
Returns
-------
bins
x-axis bins for the histogram
values
Histogram values in each bin from zero to one
"""
if norm:
img = image_transform_scale(img)
vals, bins = np.histogram(img, bins=nbins, density=True)
return vals
class FluxHistogramFeatures(PipelineStage):
def __init__(self, nbins=25, norm=True, **kwargs):
"""
Simple histogram of flux values.
Parameters
----------
nbins : int
Number of bins to use.
norm : bool
If true, normalises the image first so that histogram will range
from zero to one.
"""
super().__init__(nbins=nbins, norm=norm, **kwargs)
self.nbins = nbins
self.norm = norm
def _set_labels(self):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
if self.nbands == 1:
self.labels = ['hist_%d' % i for i in range(self.nbins)]
else:
self.labels = []
for band in range(self.nbands):
self.labels += \
['hist_%d_band_%d' % (i, band) for i in range(self.nbins)]
def _execute_function(self, image):
"""
Does the work in actually extracting the histogram
Parameters
----------
image : np.ndarray
Input image
Returns
-------
array
Contains the extracted flux histogram features
"""
if len(image.shape) != 2:
self.nbands = image.shape[2]
else:
self.nbands = 1
if len(self.labels) == 0:
# Only call this once we know the dimensions of the input data.
self._set_labels()
if self.nbands == 1:
# Greyscale-like image
hist_feats = calculate_flux_histogram(image, nbins=self.nbins)
return hist_feats
else:
hist_all_bands = []
labels = []
for band in range(image.shape[2]):
hist_feats = calculate_flux_histogram(image[:, :, band],
nbins=self.nbins,
norm=self.norm)
hist_all_bands += list(hist_feats)
labels += \
['hist_%d_band_%d' % (i, band) for i in range(self.nbins)]
return hist_all_bands
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,233 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/base/base_pipeline.py | from astronomaly.base import logging_tools
from os import path
import pandas as pd
import numpy as np
from pandas.util import hash_pandas_object
import time
class PipelineStage(object):
def __init__(self, *args, **kwargs):
"""
Base class defining functionality for all pipeline stages. To
contribute a new pipeline stage to Astronomaly, create a new class and
inherit PipelineStage. Always start by calling "super().__init__()" and
pass it all the arguments of the init function in your new class. The
only other function that needs to be changed is `_execute_function`
which should actually implement pipeline stage functionality. The base
class will take care of automatic logging, deciding whether or not a
function has already been run on this data, saving and loading of files
and error checking of inputs and outputs.
Parameters
----------
force_rerun : bool
If True will force the function to run over all data, even if it
has been called before.
save_output : bool
If False will not save and load any files. Only use this if
functions are very fast to rerun or if you cannot write to disk.
output_dir : string
Output directory where all outputs will be stored. Defaults to
current working directory.
file_format : string
Format to save the output of this pipeline stage to.
Accepted values are:
parquet
drop_nans : bool
If true, will drop any NaNs from the input before passing it to the
function
"""
# This will be the name of the child class, not the parent.
self.class_name = type(locals()['self']).__name__
self.function_call_signature = \
logging_tools.format_function_call(self.class_name,
*args, **kwargs)
# Disables the automatic saving of intermediate outputs
if 'save_output' in kwargs and kwargs['save_output'] is False:
self.save_output = False
else:
self.save_output = True
# Handles automatic file reading and writing
if 'output_dir' in kwargs:
self.output_dir = kwargs['output_dir']
else:
self.output_dir = './'
if 'drop_nans' in kwargs and kwargs['drop_nans'] is False:
self.drop_nans = False
else:
self.drop_nans = True
# This allows the automatic logging every time this class is
# instantiated (i.e. every time this pipeline stage
# is run). That means any class that inherits from this base class
# will have automated logging.
logging_tools.setup_logger(log_directory=self.output_dir,
log_filename='astronomaly.log')
if 'force_rerun' in kwargs and kwargs['force_rerun']:
self.args_same = False
self.checksum = ''
else:
self.args_same, self.checksum = \
logging_tools.check_if_inputs_same(self.class_name,
locals()['kwargs'])
if 'file_format' in kwargs:
self.file_format = kwargs['file_format']
else:
self.file_format = 'parquet'
self.output_file = path.join(self.output_dir,
self.class_name + '_output')
if self.file_format == 'parquet':
if '.parquet' not in self.output_file:
self.output_file += '.parquet'
if path.exists(self.output_file) and self.args_same:
self.previous_output = self.load(self.output_file)
else:
self.previous_output = pd.DataFrame(data=[])
self.labels = []
def save(self, output, filename, file_format=''):
"""
Saves the output of this pipeline stage.
Parameters
----------
output : pd.DataFrame
Whatever the output is of this stage.
filename : str
File name of the output file.
file_format : str, optional
File format can be provided to override the class's file format
"""
if len(file_format) == 0:
file_format = self.file_format
if self.save_output:
# Parquet needs strings as column names
# (which is good practice anyway)
output.columns = output.columns.astype('str')
if file_format == 'parquet':
if '.parquet' not in filename:
filename += '.parquet'
output.to_parquet(filename)
elif file_format == 'csv':
if '.csv' not in filename:
filename += '.csv'
output.to_csv(filename)
def load(self, filename, file_format=''):
"""
Loads previous output of this pipeline stage.
Parameters
----------
filename : str
File name of the output file.
file_format : str, optional
File format can be provided to override the class's file format
Returns
-------
output : pd.DataFrame
Whatever the output is of this stage.
"""
if len(file_format) == 0:
file_format = self.file_format
if file_format == 'parquet':
if '.parquet' not in filename:
filename += '.parquet'
output = pd.read_parquet(filename)
elif file_format == 'csv':
if '.csv' not in filename:
filename += '.csv'
output = pd.read_csv(filename)
return output
def hash_data(self, data):
"""
Returns a checksum on the first few rows of a DataFrame to allow
checking if the input changed.
Parameters
----------
data : pd.DataFrame or similar
The input data on which to compute the checksum
Returns
-------
checksum : str
The checksum
"""
try:
hash_per_row = hash_pandas_object(data)
total_hash = hash_pandas_object(pd.DataFrame(
[hash_per_row.values]))
except TypeError:
# Input data is not already a pandas dataframe
# Most likely it's an image (np.array)
# In order to hash, it has to be converted to a DataFrame so must
# be a 2d array
try:
if len(data.shape) > 2:
data = data.ravel()
total_hash = hash_pandas_object(pd.DataFrame(data))
except (AttributeError, ValueError) as e:
# I'm not sure this could ever happen but just in case
logging_tools.log("""Data must be either a pandas dataframe or
numpy array""", level='ERROR')
raise e
return int(total_hash.values[0])
def run(self, data):
"""
This is the external-facing function that should always be called
(rather than _execute_function). This function will automatically check
if this stage has already been run with the same arguments and on the
same data. This can allow a much faster user experience avoiding
rerunning functions unnecessarily.
Parameters
----------
data : pd.DataFrame
Input data on which to run this pipeline stage on.
Returns
-------
pd.DataFrame
Output
"""
new_checksum = self.hash_data(data)
if self.args_same and new_checksum == self.checksum:
# This means we've already run this function for all instances in
# the input and with the same arguments
msg = "Pipeline stage %s previously called " \
"with same arguments and same data. Loading from file. " \
"Use 'force_rerun=True' in init args to override this " \
"behavior." % self.class_name
logging_tools.log(msg, level='WARNING')
return self.previous_output
else:
msg_string = self.function_call_signature + ' - checksum: ' + \
(str)(new_checksum)
# print(msg_string)
logging_tools.log(msg_string)
print('Running', self.class_name, '...')
t1 = time.time()
if self.drop_nans:
# This is ok here because everything after feature extraction
# is always a DataFrame
output = self._execute_function(data.dropna())
else:
output = self._execute_function(data)
self.save(output, self.output_file)
print('Done! Time taken:', (time.time() - t1), 's')
return output
def run_on_dataset(self, dataset=None):
"""
This function should be called for pipeline stages that perform feature
extraction so require taking a Dataset object as input.
This is an external-facing function that should always be called
(rather than _execute_function). This function will automatically check
if this stage has already been run with the same arguments and on the
same data. This can allow a much faster user experience avoiding
rerunning functions unnecessarily.
Parameters
----------
dataset : Dataset
The Dataset object on which to run this feature extraction
function, by default None
Returns
-------
pd.Dataframe
Output
"""
# *** WARNING: this has not been tested against adding new data and
# *** ensuring the function is called for new data only
dat = dataset.get_sample(dataset.index[0])
new_checksum = self.hash_data(dat)
if not self.args_same or new_checksum != self.checksum:
# If the arguments have changed we rerun everything
msg_string = self.function_call_signature + ' - checksum: ' + \
(str)(new_checksum)
logging_tools.log(msg_string)
else:
# Otherwise we only run instances not already in the output
msg = "Pipeline stage %s previously called " \
"with same arguments. Loading from file. Will only run " \
"for new samples. Use 'force_rerun=True' in init args " \
"to override this behavior." % self.class_name
logging_tools.log(msg, level='WARNING')
print('Extracting features using', self.class_name, '...')
t1 = time.time()
logged_nan_msg = False
nan_msg = "NaNs detected in some input data." \
"NaNs will be set to zero. You can change " \
"behaviour by setting drop_nan=False"
new_index = []
output = []
n = 0
for i in dataset.index:
if i not in self.previous_output.index or not self.args_same:
if n % 100 == 0:
print(n, 'instances completed')
input_instance = dataset.get_sample(i)
if input_instance is None:
none_msg = "Input sample is None, skipping sample"
logging_tools.log(none_msg, level='WARNING')
continue
if self.drop_nans:
found_nans = False
try:
if np.any(np.isnan(input_instance)):
input_instance = np.nan_to_num(input_instance)
found_nans = True
except TypeError:
# So far I've only found this happens when there are
# strings in a DataFrame
for col in input_instance.columns:
try:
if np.any(np.isnan(input_instance[col])):
input_instance[col] = \
np.nan_to_num(input_instance[col])
found_nans = True
except TypeError:
# Probably just a column of strings
pass
if not logged_nan_msg and found_nans:
print(nan_msg)
logging_tools.log(nan_msg, level='WARNING')
logged_nan_msg = True
out = self._execute_function(input_instance)
if np.any(np.isnan(out)):
logging_tools.log("Feature extraction failed for id " + i)
output.append(out)
new_index.append(i)
n += 1
new_output = pd.DataFrame(data=output, index=new_index,
columns=self.labels)
index_same = new_output.index.equals(self.previous_output.index)
if self.args_same and not index_same:
output = pd.concat((self.previous_output, new_output))
else:
output = new_output
if self.save_output:
self.save(output, self.output_file)
print('Done! Time taken: ', (time.time() - t1), 's')
return output
def _execute_function(self, data):
"""
This is the main function of the PipelineStage and is what should be
implemented when inheriting from this class.
Parameters
----------
data : Dataset object, pd.DataFrame
Data type depends on whether this is feature extraction stage (so
runs on a Dataset) or any other stage (e.g. anomaly detection)
Raises
------
NotImplementedError
This function must be implemented when inheriting this class.
"""
raise NotImplementedError
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,234 | MichelleLochner/astronomaly | refs/heads/main | /setup.py | import setuptools
import re
import os
VERSIONFILE = os.path.join("astronomaly", "_version.py")
verstrline = open(VERSIONFILE, "rt").read()
VSRE = r"^__version__ = ['\"]([^'\"]*)['\"]"
mo = re.search(VSRE, verstrline, re.M)
if mo:
verstr = mo.group(1)
else:
raise RuntimeError("Unable to find version string in %s." % (VERSIONFILE,))
setuptools.setup(
name="astronomaly",
version=verstr,
author="Michelle Lochner",
author_email="dr.michelle.lochner@gmail.com",
description="A general anomaly detection framework for Astronomical data",
long_description_content_type="text/markdown",
url="https://github.com/MichelleLochner/astronomaly",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD-3 License",
"Operating System :: OS Independent",
],
)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,235 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/_version.py | __version__ = "1.2"
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,236 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/feature_extraction/feets_features.py | import numpy as np
import feets
from astronomaly.base.base_pipeline import PipelineStage
import warnings
from astronomaly.base import logging_tools
class Feets_Features(PipelineStage):
def __init__(self, exclude_features,
compute_on_mags=False,
ignore_warnings=False,
filter_labels=['u', 'g', 'r', 'i', 'z', 'y'],
**kwargs):
"""
Applies the 'feets' general time series feature extraction package
Parameters
----------
exclude_features : list
List of features to be excluded when calculating the features (as
strings)
compute_on_mags : bool
If true, will convert flux to magnitude
ignore_warnings : bool
The feets feature extraction package raises many, many warnings
especially when run on large datasets. This flag will disable all
warning printouts from feets. It is HIGHLY recommended
to first check the warnings before disabling them.
filter_labels : list
Optional list of strings corresponding to the name of each filter.
We explicitly assume each observations' filter (if available) has
a numerical value, translated to a string using this list
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
force_rerun : bool
If True will force the function to run over all data, even if it
has been called before.
"""
super().__init__(exclude_features=exclude_features,
compute_on_mags=compute_on_mags,
ignore_warnings=ignore_warnings,
filter_labels=filter_labels, **kwargs)
self.exclude_features = exclude_features
self.labels = None
self.compute_on_mags = compute_on_mags
self.ignore_warnings = ignore_warnings
self.filter_labels = filter_labels
def _set_labels(self, feature_labels):
"""
Because the number of features may not be known till runtime, we can
only create the labels of these features at runtime.
"""
# All available features
self.labels = feature_labels
def _execute_function(self, lc_data):
"""
Takes light curve data for a single object and computes the features
based on the available columns.
Parameters
----------
lc_data: pandas DataFrame
Light curve of a single object
Returns
-------
array
An array of the calculated features or an array of nan values
incase there is an error during the feature extraction process
"""
with warnings.catch_warnings():
if self.ignore_warnings:
# Feets produces a lot of warnings that can't easily be
# redirected, this switches them off
warnings.simplefilter('ignore')
if self.compute_on_mags is True and 'mag' not in lc_data.columns:
msg = """compute_on_mags selected but no magnitude column
found - switching to flux"""
logging_tools.log(msg, level='WARNING')
if self.compute_on_mags is True and 'mag' in lc_data.columns:
standard_lc_columns = ['time', 'mag', 'mag_error']
else:
standard_lc_columns = ['time', 'flux', 'flux_error']
current_lc_columns = [cl for cl in standard_lc_columns
if cl in lc_data.columns]
# list to store column names supported by feets
available_columns = ['time']
# Renaming the columns for feets
for cl in current_lc_columns:
if cl == 'mag' or cl == 'flux':
available_columns.append('magnitude')
if cl == 'mag_error' or cl == 'flux_error':
available_columns.append('error')
# Creates the feature extractor
fs = feets.FeatureSpace(data=available_columns,
exclude=self.exclude_features)
# Getting the length of features to be calculated
len_labels = len(fs.features_)
# print(fs.features_)
# The case where we have filters
if 'filters' in lc_data.columns:
ft_values = []
ft_labels = []
for i in range(0, 6):
passbands = self.filter_labels
filter_lc = lc_data[lc_data['filters'] == i]
lc_columns = []
for col in current_lc_columns:
lc_columns.append(filter_lc[col])
# Accounts for light curves that do not have some filters
if len(filter_lc.ID) != 0:
# Checking the number of points in the light curve
if len(filter_lc.ID) >= 5:
features, values = fs.extract(*lc_columns)
new_labels = [f + '_' + passbands[i]
for f in features]
for j in range(len(features)):
ft_labels.append(new_labels[j])
ft_values.append(values[j])
else:
for ft in fs.features_:
ft_labels.append(ft + '_' + passbands[i])
ft_values.append(np.nan)
else:
for vl in fs.features_:
ft_values.append(np.nan)
ft_labels.append(vl + '_' + passbands[i])
# Updating the labels
if self.labels is None:
self._set_labels(list(ft_labels))
# print(self.labels)
return ft_values
# The case with no filters
else:
if len(lc_data.ID) >= 5:
# print('passed')
lc_columns = []
for col in current_lc_columns:
lc_columns.append(lc_data[col])
ft_labels, ft_values = fs.extract(*lc_columns)
# # Updating the labels
if self.labels is None:
self._set_labels(list(ft_labels))
return ft_values
# Feature extraction fails so returns an array of nan values
else:
return np.array([np.nan for i in range(len_labels)])
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,237 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/scripts/raw_features_example.py | # Replicates the simulated example in the paper
import os
import numpy as np
from astronomaly.data_management import raw_features
from astronomaly.anomaly_detection import lof, human_loop_learning
from astronomaly.visualisation import tsne_plot
# Root directory for data
data_dir = os.path.join(os.getcwd(), 'example_data', )
input_files = [os.path.join(data_dir, 'Simulations', 'y_test.npy'),
os.path.join(data_dir, 'Simulations', 'labels_test.npy')]
# Where output should be stored
output_dir = os.path.join(
data_dir, 'astronomaly_output', 'simulations', '')
def artificial_human_labelling(anomalies=None, metadata=None, N=200,
human_labels={0: 0, 1: 0, 2: 3, 3: 0, 4: 5}):
print('Artificially adding human labels...')
if anomalies is None:
raise ValueError('Anomaly score dataframe not provided')
if metadata is None:
raise ValueError('True labels not given')
anomalies['human_label'] = [-1] * len(anomalies)
labels = metadata.loc[anomalies.index]
for k in list(human_labels.keys()):
inds = labels.index[:N][(np.where(labels.label[:N] == k))[0]]
anomalies.loc[inds, 'human_label'] = human_labels[k]
print('Done!')
return anomalies
def run_pipeline():
if not os.path.exists(output_dir):
os.makedirs(output_dir)
raw_dataset = raw_features.RawFeatures(list_of_files=input_files,
output_dir=output_dir)
features = raw_dataset.features
pipeline_lof = lof.LOF_Algorithm(output_dir=output_dir, n_neighbors=100,
force_rerun=False)
anomalies = pipeline_lof.run(features)
pipeline_score_converter = human_loop_learning.ScoreConverter(
output_dir=output_dir)
anomalies = pipeline_score_converter.run(anomalies)
anomalies = anomalies.sort_values('score', ascending=False)
anomalies = artificial_human_labelling(
anomalies=anomalies, metadata=raw_dataset.metadata, N=200,
human_labels={0: 0, 1: 0, 2: 3, 3: 0, 4: 5})
pipeline_active_learning = human_loop_learning.NeighbourScore(
alpha=1, force_rerun=True, output_dir=output_dir)
pipeline_tsne = tsne_plot.TSNE_Plot(output_dir=output_dir, perplexity=50)
t_plot = pipeline_tsne.run(features.loc[anomalies.index])
return {'dataset': raw_dataset,
'features': features,
'anomaly_scores': anomalies,
'visualisation': t_plot,
'active_learning': pipeline_active_learning}
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,238 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/utils/utils.py | import matplotlib.pyplot as plt
import astropy
import os
import pandas as pd
import numpy as np
import xlsxwriter
from PIL import Image
def convert_pybdsf_catalogue(catalogue_file, image_file,
remove_point_sources=False,
merge_islands=False,
read_csv_kwargs={},
colnames={}):
"""
Converts a pybdsf fits file to a pandas dataframe to be given
directly to an ImageDataset object.
Parameters
----------
catalogue_files : string
Pybdsf catalogue in fits table format
image_file:
The image corresponding to this catalogue (to extract pixel information
and naming information)
remove_point_sources: bool, optional
If true will remove all sources with an S_Code of 'S'
merge_islands: bool, optional
If true, will locate all sources belonging to a particular island and
merge them, maintaining only the brightest source
read_csv_kwargs: dict, optional
Will pass these directly to panda's read_csv function to allow reading
in of a variety of file structures (e.g. different delimiters)
colnames: dict, optional
Allows you to choose the column names for "source_identifier" (which
column to use to identify the source), "Isl_id", "Peak_flux" and
"S_Code" (if
remove_point_sources is true)
"""
if 'Peak_flux' not in colnames:
colnames['Peak_flux'] = 'Peak_flux'
if 'S_Code' not in colnames:
colnames['S_Code'] = 'S_Code'
if 'source_identifier' not in colnames:
colnames['source_identifier'] = 'Source_id'
if 'Isl_id' not in colnames:
colnames['Isl_id'] = 'Isl_id'
if 'csv' in catalogue_file:
catalogue = pd.read_csv(catalogue_file, **read_csv_kwargs)
cols = list(catalogue.columns)
for i in range(len(cols)):
cols[i] = cols[i].strip()
cols[i] = cols[i].strip('#')
catalogue.columns = cols
else:
dat = astropy.table.Table(astropy.io.fits.getdata(catalogue_file))
catalogue = dat.to_pandas()
if remove_point_sources:
catalogue = catalogue[catalogue[colnames['S_Code']] != 'S']
if merge_islands:
inds = []
for isl in np.unique(catalogue[colnames['Isl_id']]):
msk = catalogue[colnames['Isl_id']] == isl
selection = catalogue[msk][colnames['Peak_flux']]
ind = catalogue[msk].index[selection.argmax()]
inds.append(ind)
catalogue = catalogue.loc[inds]
hdul = astropy.io.fits.open(image_file)
original_image = image_file.split(os.path.sep)[-1]
w = astropy.wcs.WCS(hdul[0].header, naxis=2)
x, y = w.wcs_world2pix(np.array(catalogue.RA), np.array(catalogue.DEC), 1)
new_catalogue = pd.DataFrame()
new_catalogue['objid'] = catalogue[colnames['source_identifier']]
new_catalogue['original_image'] = [original_image] * len(new_catalogue)
new_catalogue['peak_flux'] = catalogue[colnames['Peak_flux']]
new_catalogue['x'] = x
new_catalogue['y'] = y
new_catalogue['ra'] = catalogue.RA
new_catalogue['dec'] = catalogue.DEC
new_catalogue.drop_duplicates(subset='objid', inplace=True)
return new_catalogue
def create_catalogue_spreadsheet(image_dataset, scores,
filename='anomaly_catalogue.xlsx',
ignore_nearby_sources=True,
source_radius=0.016):
"""
Creates a catalogue of the most anomalous sources in the form of an excel
spreadsheet that includes cutout images.
Parameters
----------
image_dataset : astronomaly.data_management.image_reader.ImageDataset
The image dataset
scores : pd.DataFrame
The list of objects to convert to spreadsheet. NOTE: This must already
be sorted in the order you want in the spreadsheet and limited to the
number you want displayed.
filename : str, optional
Filename for spreadsheet, by default 'anomaly_catalogue.xlsx'
ignore_nearby_sources : bool, optional
If true, will search for nearby objects before adding to the
spreadsheet and will only add if no source is found within
source_radius degrees, by default True
source_radius : float, optional
Number of degrees to exclude nearby sources by in degrees, default
0.016 degrees
"""
workbook = xlsxwriter.Workbook(filename, {'nan_inf_to_errors': True})
worksheet = workbook.add_worksheet()
# Widen the first column to make the text clearer.
worksheet.set_column('A:E', 25)
worksheet.set_column('G:H', 25)
worksheet.set_column('F:F', 30)
cell_format = workbook.add_format({
'bold': True, 'font_size': 14, 'center_across': True})
worksheet.set_row(0, 50, cell_format)
worksheet.write('A1', 'ObjID')
worksheet.write('B1', 'Image Name')
worksheet.write('C1', 'RA')
worksheet.write('D1', 'DEC')
worksheet.write('E1', 'Peak Flux')
worksheet.write('F1', 'Cutout')
worksheet.write('G1', 'Type')
worksheet.write('H1', 'Comments')
cell_format = workbook.add_format({'center_across': True})
hgt = 180
cat = image_dataset.metadata
cat.index = cat.index.astype('str')
row = 2
for i in range(len(scores)):
idx = scores.index[i]
proceed = True
if ignore_nearby_sources and i > 0:
ra_prev = cat.loc[scores.index[:i], 'ra']
dec_prev = cat.loc[scores.index[:i], 'dec']
ra_diff = ra_prev - cat.loc[idx, 'ra']
dec_diff = dec_prev - cat.loc[idx, 'dec']
radius = np.sqrt(ra_diff ** 2 + dec_diff ** 2)
if np.any(radius < source_radius):
proceed = False
if proceed:
if cat.loc[idx, 'peak_flux'] == -1:
# Will trigger it to set the flux
image_dataset.get_sample(idx)
worksheet.set_row(row - 1, hgt, cell_format)
worksheet.write('A%d' % row, idx)
worksheet.write('B%d' % row, cat.loc[idx, 'original_image'])
worksheet.write('C%d' % row, cat.loc[idx, 'ra'])
worksheet.write('D%d' % row, cat.loc[idx, 'dec'])
worksheet.write('E%d' % row, cat.loc[idx, 'peak_flux'])
fig = image_dataset.get_display_data(idx)
image_options = {'image_data': fig, 'x_scale': 2, 'y_scale': 2}
worksheet.insert_image('F%d' % row, 'img.png', image_options)
row += 1
workbook.close()
def get_visualisation_sample(features, anomalies, anomaly_column='score',
N_anomalies=20, N_total=2000):
"""
Convenience function to downsample a set of data for a visualisation plot
(such as t-SNE or UMAP). You can choose how many anomalies to highlight
against a backdrop of randomly selected samples.
Parameters
----------
features : pd.DataFrame
Input feature set
anomalies : pd.DataFrame
Contains the anomaly score to rank the objects by.
anomaly_column : string, optional
The column used to rank the anomalies by (always assumes higher is more
anomalous), by default 'score'
N_anomalies : int, optional
Number of most anomalous objects to plot, by default 20
N_total : int, optional
Total number to plot (not recommended to be much more than 2000 for
t-SNE), by default 2000
"""
if N_total > len(features):
N_total = len(features)
if N_anomalies > len(features):
N_anomalies = 0
N_random = N_total - N_anomalies
index = anomalies.sort_values(anomaly_column, ascending=False).index
inds = index[:N_anomalies]
other_inds = index[N_anomalies:]
inds = list(inds) + list(np.random.choice(other_inds,
size=N_random, replace=False))
return features.loc[inds]
def create_ellipse_check_catalogue(image_dataset, features,
filename='ellipse_catalogue.csv'):
"""
Creates a catalogue that contains sources which require a larger window
or cutout size. Also contains the recommended windows size required.
Parameters
----------
image_dataset : astronomaly.data_management.image_reader.ImageDataset
The image dataset
features : pd.DataFrame
Dataframe containing the extracted features about the sources. Used to
obtain the ellipse warning column.
filename : str, optional
Filename for spreadsheet, by default 'ellipse_catalogue.csv'
"""
dat = features.copy()
met = image_dataset.metadata
ellipse_warning = dat.loc[dat['Warning_Open_Ellipse'] == 1]
data = pd.merge(ellipse_warning[[
'Warning_Open_Ellipse', 'Recommended_Window_Size']],
met, left_index=True, right_index=True)
data.to_csv(filename)
class ImageCycler:
def __init__(self, images, xlabels=None):
"""
Convenience object to cycle through a list of images inside a jupyter
notebook.
Parameters
----------
images : list
List of numpy arrays to display as images
xlabels : list, optional
List of custom labels for the images
"""
self.current_ind = 0
self.images = images
self.xlabels = xlabels
def onkeypress(self, event):
"""
Matplotlib event handler for left and right arrows to cycle through
images.
Parameters
----------
event
Returns
-------
"""
plt.gcf()
if event.key == 'right' and self.current_ind < len(self.images):
self.current_ind += 1
elif event.key == 'left' and self.current_ind > 0:
self.current_ind -= 1
plt.clf()
event.canvas.figure.gca().imshow(
self.images[self.current_ind], origin='lower', cmap='hot')
if self.xlabels is not None:
plt.xlabel(self.xlabels[self.current_ind])
plt.title(self.current_ind)
event.canvas.draw()
def cycle(self):
"""
Creates the plots and binds the event handler
"""
fig = plt.figure()
fig.canvas.mpl_connect('key_press_event', self.onkeypress)
plt.imshow(self.images[self.current_ind], origin='lower', cmap='hot')
plt.title(self.current_ind)
def get_file_paths(image_dir, catalogue_file, file_type='.fits'):
"""
Finds and appends the pathways of the relevant files to the catalogue.
Required to access the files when passing a catalogue to the
ImageThumbnailsDataset.
Parameters
----------
image_dir : str
Directory where images are located (can be a single fits file or
several)
catalogue_file : pd.DataFrame
Dataframe that contains the information pertaining to the data.
file_type : str
Sets the type of files used. Commonly used file types are .fits
or .jpgs.
Returns
-------
catalogue_file : pd.DataFrame
Dataframe with the required file pathways attached.
"""
filenames = []
for root, dirs, files in os.walk(image_dir):
for f in files:
if f.endswith(file_type):
filenames.append(os.path.join(root, f))
filenames = sorted(filenames, key=lambda x: x.split('/')[-1])
catalogue = catalogue_file.sort_values(['ra', 'dec'])
catalogue['filename'] = filenames
return catalogue
def convert_tractor_catalogue(catalogue_file, image_file, image_name=''):
"""
Converts a tractor fits file to a pandas dataframe to be given
directly to an ImageDataset object.
Parameters
----------
catalogue_files : string
tractor catalogue in fits table format
image_file:
The image corresponding to this catalogue (to extract pixel information
and naming information)
"""
catalogue = astropy.table.Table(astropy.io.fits.getdata(catalogue_file))
dataframe = {}
for name in catalogue.colnames:
data = catalogue[name].tolist()
dataframe[name] = data
old_catalogue = pd.DataFrame(dataframe)
hdul = astropy.io.fits.open(image_file)
if len(image_name) == 0:
original_image = image_file.split(os.path.sep)[-1]
else:
original_image = image_name
new_catalogue = pd.DataFrame()
new_catalogue['objid'] = old_catalogue['objid']
new_catalogue['original_image'] = [original_image] * len(new_catalogue)
new_catalogue['flux_g'] = old_catalogue['flux_g']
new_catalogue['flux_r'] = old_catalogue['flux_r']
new_catalogue['flux_z'] = old_catalogue['flux_z']
new_catalogue['x'] = old_catalogue['bx'].astype('int')
new_catalogue['y'] = old_catalogue['by'].astype('int')
new_catalogue['ra'] = old_catalogue['ra']
new_catalogue['dec'] = old_catalogue['dec']
return new_catalogue
def create_png_output(image_dataset, number_of_images, data_dir):
"""
Simple function that outputs a certain number of png files
from the input fits files
Parameters
----------
image_dataset : astronomaly.data_management.image_reader.ImageDataset
The image dataset
number_of_images : integer
Sets the number of images to be created by the function
data_dir : directory
Location of data directory.
Needed to create output folder for the images.
Returns
-------
png : image object
Images are created and saved in the output folder
"""
out_dir = os.path.join(data_dir, 'Output', 'png')
if not os.path.exists(out_dir):
os.makedirs(out_dir)
for i in range(number_of_images):
idx = image_dataset.index[i]
name = image_dataset.metadata.original_image[i]
sample = image_dataset.get_display_data(idx)
pil_image = Image.open(sample)
pil_image.save(os.path.join(
out_dir, str(name.split('.fits')[0])+'.png'))
def remove_corrupt_file(met, ind, idx):
"""
Function that removes the corrupt or missing file
from the metadata and from the metadata index.
Parameters
----------
met : pd.DataFrame
The metadata of the dataset
ind : string
The index of the metadata
idx : string
The index of the source file
"""
ind = np.delete(ind, np.where(ind == idx))
met = np.delete(met, np.where(met == idx))
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,239 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/base/base_dataset.py | import os
from astronomaly.base import logging_tools
class Dataset(object):
def __init__(self, *args, **kwargs):
"""
Base Dataset object that all other dataset objects should inherit from.
Whenever a child of this class is implemented, super().__init__()
should be called and explicitly passed all kwargs of the child class,
to ensure correct logging and saving of files.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
"""
self.data_type = None
if 'filename' in kwargs:
filename = kwargs['filename']
else:
filename = ''
if 'directory' in kwargs:
directory = kwargs['directory']
else:
directory = ''
if 'list_of_files' in kwargs:
list_of_files = kwargs['list_of_files']
else:
list_of_files = []
if len(filename) != 0:
self.files = [filename]
elif len(list_of_files) != 0 and len(directory) == 0:
# Assume the list of files are absolute paths
self.files = list_of_files
elif len(list_of_files) != 0 and len(directory) != 0:
# Assume the list of files are relative paths to directory
fls = list_of_files
self.files = [os.path.join(directory, f) for f in fls]
elif len(directory) != 0:
# Assume directory contains all the files we need
fls = os.listdir(directory)
fls.sort()
self.files = [os.path.join(directory, f) for f in fls]
else:
self.files = []
# Handles automatic file reading and writing
if 'output_dir' in kwargs:
self.output_dir = kwargs['output_dir']
else:
self.output_dir = './'
# This allows the automatic logging every time this class is
# instantiated (i.e. every time this pipeline stage
# is run). That means any class that inherits from this base class
# will have automated logging.
logging_tools.setup_logger(log_directory=self.output_dir,
log_filename='astronomaly.log')
class_name = type(locals()['self']).__name__
function_call_signature = logging_tools.format_function_call(
class_name, *args, **kwargs)
logging_tools.log(function_call_signature)
def clean_up(self):
"""
Allows for any clean up tasks that might be required.
"""
pass
def get_sample(self, idx):
"""
Returns a single instance of the dataset given an index.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Raises
------
NotImplementedError
This function must be implemented when the base class is inherited.
"""
raise NotImplementedError
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Raises
------
NotImplementedError
This function must be implemented when the base class is inherited.
"""
raise NotImplementedError
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,240 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/frontend/interface.py | import numpy as np
import os
import importlib
import sys
class Controller:
def __init__(self, pipeline_file):
"""
This is the main controller for the interface between the Python
backend and the JavaScript frontend. The Controller is passed a python
file, which must contain a "run_pipeline" function and return a
dictionary. The Controller consists of various functions which get
called by the front end asking for things like data to plot, metadata,
anomaly scores etc.
Parameters
----------
pipeline_file : str
The script to run Astronomaly (see the "scripts" folder for
examples)
"""
self.dataset = None
self.features = None
self.anomaly_scores = None
self.visualisation = None
self.module_name = None
self.active_learning = None
self.current_index = 0 # Index in the anomalies list
# A dictionary to be used by the frontend for column names when
# colouring the visualisation plot
self.column_name_dict = {
'score': 'Raw anomaly score',
'trained_score': 'Active learning score',
'predicted_user_score': 'Predicted user score'
}
self.set_pipeline_script(pipeline_file)
def run_pipeline(self):
"""
Runs (or reruns) the pipeline. Reimports the pipeline script so changes
are reflected.
"""
pipeline_script = importlib.import_module(self.module_name)
print('Running pipeline from', self.module_name + '.py')
pipeline_dict = pipeline_script.run_pipeline()
# ***** Add some try catches here
self.dataset = pipeline_dict['dataset']
self.features = pipeline_dict['features']
self.anomaly_scores = pipeline_dict['anomaly_scores']
if 'visualisation' in list(pipeline_dict.keys()):
self.visualisation = pipeline_dict['visualisation']
if 'active_learning' in list(pipeline_dict.keys()):
self.active_learning = pipeline_dict['active_learning']
def get_data_type(self):
return self.dataset.data_type
def set_pipeline_script(self, pipeline_file):
"""
Allows the changing of the input pipeline file.
Parameters
----------
pipeline_file : str
New pipeline file
"""
module_name = pipeline_file.split(os.path.sep)[-1]
pth = pipeline_file.replace(module_name, '')
module_name = module_name.split('.')[0]
self.module_name = module_name
sys.path.append(pth) # Allows importing the module from anywhere
def get_display_data(self, idx):
"""
Simply calls the underlying Dataset's function to return display data.
"""
try:
return self.dataset.get_display_data(idx)
except KeyError:
return None
def get_features(self, idx):
"""
Returns the features of instance given by index idx.
"""
try:
out_dict = dict(zip(self.features.columns.astype('str'),
self.features.loc[idx].values))
for key in list(out_dict.keys()):
try:
formatted_val = '%.3g' % out_dict[key]
out_dict[key] = formatted_val
except TypeError: # Probably a string already
pass
return out_dict
except KeyError:
return {}
def set_human_label(self, idx, label):
"""
Sets the human-assigned score to an instance. Creates the column
"human_label" if necessary in the anomaly_scores dataframe.
Parameters
----------
idx : str
Index of instance
label : int
Human-assigned label
"""
ml_df = self.anomaly_scores
if 'human_label' not in ml_df.columns:
ml_df['human_label'] = [-1] * len(ml_df)
ml_df.loc[idx, 'human_label'] = label
ml_df = ml_df.astype({'human_label': 'int'})
self.active_learning.save(
ml_df, os.path.join(self.active_learning.output_dir,
'ml_scores.csv'), file_format='csv')
def run_active_learning(self):
"""
Runs the selected active learning algorithm.
"""
has_no_labels = 'human_label' not in self.anomaly_scores.columns
labels_unset = np.sum(self.anomaly_scores['human_label'] != -1) == 0
if has_no_labels or labels_unset:
print("Active learning requested but no training labels "
"have been applied.")
return "failed"
else:
pipeline_active_learning = self.active_learning
features_with_labels = \
pipeline_active_learning.combine_data_frames(
self.features, self.anomaly_scores)
active_output = pipeline_active_learning.run(features_with_labels)
# This is safer than pd.combine which always makes new columns
for col in active_output.columns:
self.anomaly_scores[col] = \
active_output.loc[self.anomaly_scores.index, col]
return "success"
def delete_labels(self):
"""
Allows the user to delete all the labels they've applied and start
again
"""
print('Delete labels called')
if 'human_label' in self.anomaly_scores.columns:
self.anomaly_scores['human_label'] = -1
print('All user-applied labels have been reset to -1 (i.e. deleted)')
def get_active_learning_columns(self):
"""
Checks if active learning has been run and returns appropriate columns
to use in plotting
"""
out_dict = {}
for col in self.anomaly_scores.columns:
if col in self.column_name_dict.keys():
out_dict[col] = self.column_name_dict[col]
return out_dict
def get_visualisation_data(self, color_by_column=''):
"""
Returns the data for the visualisation plot in the correct json format.
Parameters
----------
color_by_column : str, optional
If given, the points on the plot will be coloured by this column so
for instance, more anomalous objects are brighter. Current options
are: 'score' (raw ML anomaly score), 'trained_score' (score after
active learning) and 'user_predicted_score' (the regressed values
of the human applied labels)
Returns
-------
dict
Formatting visualisation plot data
"""
clst = self.visualisation
if clst is not None:
if color_by_column == '':
# Column would have already been checked by frontend
cols = [0.5] * len(clst)
clst['color'] = cols
else:
clst['color'] = \
self.anomaly_scores.loc[clst.index,
color_by_column]
out = []
clst = clst.sort_values('color')
for idx in clst.index:
dat = clst.loc[idx].values
out.append({'id': (str)(idx),
'x': '{:f}'.format(dat[0]),
'y': '{:f}'.format(dat[1]),
'opacity': '0.5',
'color': '{:f}'.format(clst.loc[idx, 'color'])})
return out
else:
return None
def get_original_id_from_index(self, ind):
"""
The frontend iterates through an ordered list that can change depending
on the algorithm selected. This function returns the actual index of an
instance (which might be 'obj2487' or simply '1') when given an array
index.
Parameters
----------
ind : int
The position in an array
Returns
-------
str
The actual object id
"""
this_ind = list(self.anomaly_scores.index)[ind]
return this_ind
def get_metadata(self, idx, exclude_keywords=[], include_keywords=[]):
"""
Returns the metadata for an instance in a format ready for display.
Parameters
----------
idx : str
Index of the object
exclude_keywords : list, optional
Any keywords to exclude being displayed
include_keywords : list, optional
Any keywords that should be displayed
Returns
-------
dict
Display-ready metadata
"""
idx = str(idx)
meta_df = self.dataset.metadata
ml_df = self.anomaly_scores
try:
out_dict = {}
if len(include_keywords) != 0:
cols = include_keywords
else:
cols = meta_df.columns
for col in cols:
if col not in exclude_keywords:
out_dict[col] = meta_df.loc[idx, col]
for col in ml_df.columns:
if col not in exclude_keywords:
out_dict[col] = ml_df.loc[idx, col]
for key in (list)(out_dict.keys()):
try:
formatted_val = '%.3g' % out_dict[key]
out_dict[key] = formatted_val
except TypeError: # Probably a string already
pass
return out_dict
except KeyError:
return {}
def get_coordinates(self, idx):
"""
If available, will return the coordinates of the requested object in
object format, ready to pass on to another website like simbad
Parameters
----------
idx : str
Index of the object
Returns
-------
dict
Coordinates
"""
met = self.dataset.metadata
if 'ra' in met and 'dec' in met:
return {'ra': str(met.loc[idx, 'ra']),
'dec': str(met.loc[idx, 'dec'])}
else:
return {}
def randomise_ml_scores(self):
"""
Returns the anomaly scores in a random order
"""
inds = np.random.permutation(self.anomaly_scores.index)
self.anomaly_scores = self.anomaly_scores.loc[inds]
def sort_ml_scores(self, column_to_sort_by='score'):
"""
Returns the anomaly scores sorted by a particular column.
"""
anomaly_scores = self.anomaly_scores
if column_to_sort_by in anomaly_scores.columns:
if column_to_sort_by == "iforest_score":
ascending = True
else:
ascending = False
anomaly_scores.sort_values(column_to_sort_by, inplace=True,
ascending=ascending)
else:
print("Requested column not in ml_scores dataframe")
def get_max_id(self):
return len(self.anomaly_scores)
def clean_up(self):
self.dataset.clean_up()
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,241 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/preprocessing/image_preprocessing.py | import numpy as np
from skimage.transform import resize
import cv2
from astropy.stats import sigma_clipped_stats
def image_transform_log(img):
"""
Normalise and then perform log transform on image
Parameters
----------
img : np.ndarray
Input image (assumed float values)
Returns
-------
np.ndarray
Transformed image
"""
mini = img[img != 0].min()
maxi = img.max()
offset = (maxi - mini) / 100
if maxi == 0 and mini == 0:
img = img + 0.01
else:
img = (img - mini) / (maxi - mini) + offset
return np.log(img)
def image_transform_inverse_sinh(img):
"""
Performs inverse hyperbolic sine transform on image
Parameters
----------
img : np.ndarray
Input image (assumed float values)
Returns
-------
np.ndarray
Transformed image
"""
if img.max() == 0:
return img
theta = 100 / img.max()
return np.arcsinh(theta * img) / theta
def image_transform_root(img):
"""
Normalise and then perform square root transform on image
Parameters
----------
img : np.ndarray
Input image (assumed float values)
Returns
-------
np.ndarray
Transformed image
"""
img[img < 0] = 0
mini = img[img != 0].min()
maxi = img.max()
offset = (maxi - mini) / 10
if maxi == 0 and mini == 0:
img = img + offset
else:
img = (img - mini) / (maxi - mini) + offset
return np.sqrt(img)
def image_transform_scale(img):
"""
Small function to normalise an image between 0 and 1. Useful for deep
learning.
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Scaled image
"""
if img.min() == img.max():
return img
return (img - img.min()) / (img.max() - img.min())
def image_transform_resize(img, new_shape):
"""
Resize an image to new dimensions (e.g. to feed into a deep learning
network).
Parameters
----------
img : np.ndarray
Input image
new_shape : tuple
Expected new shape for image
Returns
-------
np.ndarray
Reshaped image
"""
return resize(img, new_shape, preserve_range=True)
def image_transform_crop(img, new_shape=[160, 160]):
"""
Crops an image to new dimensions (assumes you want to keep the centre)
Parameters
----------
img : np.ndarray
Input image
new_shape : tuple
Expected new shape for image
Returns
-------
np.ndarray
Reshaped image
"""
delt_0 = (img.shape[0] - new_shape[0]) // 2
delt_1 = (img.shape[1] - new_shape[1]) // 2
return img[delt_0:img.shape[0] - delt_0, delt_1:img.shape[1] - delt_1]
def image_transform_gaussian_window(img, width=2.5):
"""
Applies a Gaussian window of a given width to the image. This has the
effect of downweighting possibly interfering objects near the edge of the
image.
Parameters
----------
img : np.ndarray
Input image
width : float, optional
The standard deviation of the Gaussian. The Gaussian is evaluated on a
grid from -5 to 5 so a width=1 corresponds to a unit Gaussian. The
width of the Gaussian will appear to be around 1/5 of the image, which
would be fairly aggressive downweighting of outlying sources.
Returns
-------
np.ndarray
Windowed image
"""
xvals = np.linspace(-5, 5, img.shape[0])
yvals = np.linspace(-5, 5, img.shape[1])
X, Y = np.meshgrid(xvals, yvals)
Z = 1 / np.sqrt(width) / 2 * np.exp(-(X**2 + Y**2) / 2 / width**2)
if len(img.shape) == 2: # Only a single channel image
return img * Z
else:
new_img = np.zeros_like(img)
for i in range(img.shape[-1]):
new_img[:, :, i] = img[:, :, i] * Z
return new_img
def image_transform_sigma_clipping(img, sigma=3, central=True):
"""
Applies sigma clipping, fits contours and
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
"""
if len(img.shape) > 2:
im = img[:, :, 0]
else:
im = img
im = np.nan_to_num(im) # OpenCV can't handle NaNs
mean, median, std = sigma_clipped_stats(im, sigma=sigma)
thresh = std + median
img_bin = np.zeros(im.shape, dtype=np.uint8)
img_bin[im <= thresh] = 0
img_bin[im > thresh] = 1
contours, hierarchy = cv2.findContours(img_bin,
cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
x0 = img.shape[0] // 2
y0 = img.shape[1] // 2
for c in contours:
if cv2.pointPolygonTest(c, (x0, y0), False) == 1:
break
contour_mask = np.zeros_like(img, dtype=np.uint8)
if len(contours) == 0:
# This happens if there's no data in the image so we just return zeros
return contour_mask
cv2.drawContours(contour_mask, [c], 0, (1, 1, 1), -1)
new_img = np.zeros_like(img)
new_img[contour_mask == 1] = img[contour_mask == 1]
return new_img
def image_transform_greyscale(img):
"""
Simple function that combines the rgb bands into a single image
using OpenCVs convert colour to grayscale function.
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Greyscale image
"""
if len(img.shape) > 2:
img = np.float32(img)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
else:
img = img
return img
def image_transform_remove_negatives(img):
"""
Sometimes negative values (due to noise) can creep in even after sigma
clipping which can cause problems later. Use this function before scaling
to ensure negative values are set to zero.
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Image with negatives removed
"""
new_img = img.copy()
new_img[new_img < 0] = 0
return new_img
def image_transform_cv2_resize(img, scale_percent):
"""
Function that uses OpenCVs resampling function to resize an image
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Resized image
"""
scale_percent = scale_percent
width = int(img.shape[1] * scale_percent / 100)
height = int(img.shape[0] * scale_percent / 100)
dim = (width, height)
return cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
def image_transform_sum_channels(img):
"""
Small function that stacks the different channels together to form
a new, single band image
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Stacked image
"""
one = img[:, :, 0] # g-band - blue b
two = img[:, :, 1] # r-band - green g
three = img[:, :, 2] # z-band - red r
img = np.add(one, two, three)
return img
def image_transform_band_reorder(img):
"""
Small function that rearranges the different channels together to form
a new image. Made specifically for the cutout.fits files from DECALS
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Stacked image
"""
one = img[0, :, :] # g-band - blue b
two = img[1, :, :] # r-band - green g
three = img[2, :, :] # z-band - red r
img = np.dstack((three, two, one))
return img
def image_transform_colour_correction(img, bands=('g', 'r', 'z'),
scales=None, m=0.03):
"""
Band weighting function used to match the display of astronomical images
from the DECaLS SkyViewer and SDSS. Created specifically for DECaLS fits
cutout files.
Requires array shapes to contain the channel axis last in line (Default
format for astronomical images).
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Weighted and reordered image
"""
rgbscales = {'u': (2, 1.5),
'g': (2, 6.0),
'r': (1, 3.4),
'i': (0, 1.0),
'z': (0, 2.2),
}
if scales is not None:
rgbscales.update(scales)
I = 0
for i in range(min(np.shape(img))):
plane, scale = rgbscales[bands[i]]
im = img[:, :, i]
im = np.maximum(0, im * scale + m)
I = I + im
I /= len(bands)
Q = 20
fI = np.arcsinh(Q * I) / np.sqrt(Q)
I += (I == 0.) * 1e-6
H, W = I.shape
rgb = np.zeros((H, W, 3), np.float32)
for i in range(min(np.shape(img))):
plane, scale = rgbscales[bands[i]]
im = img[:, :, i]
rgb[:, :, plane] = (im * scale + m) * fI / I
image = np.clip(rgb, 0, 1)
return image
def image_transform_axis_shift(img):
"""
Small function that shifts the band axis to the end.
This is used to align a fits file to the default order
used in astronomical images.
Parameters
----------
img : np.ndarray
Input image
Returns
-------
np.ndarray
Shifted image
"""
img_channel = np.argmin(np.shape(img))
img = np.moveaxis(img, img_channel, -1)
return img
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,242 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/base/logging_tools.py | import logging
import os
def setup_logger(log_directory='', log_filename="astronomaly.log"):
"""
Ensures the system logger is set up correctly. If a FileHandler logger has
already been attached to the current logger, nothing new is done.
Parameters
----------
log_directory : str, optional
Location of log file, by default ''
log_filename : str, optional
Log file name, by default "astronomaly.log"
Returns
-------
Logger
The Logger object
"""
root_logger = logging.getLogger()
reset = False
if len(root_logger.handlers) != 0:
for h in root_logger.handlers:
try:
flname = h.baseFilename
if flname != os.path.join(log_directory, log_filename):
print('Warning: logger already attached to log file:')
print(flname)
print('Now switching to new log file:')
print(os.path.join(log_directory, log_filename))
reset = True
except AttributeError:
pass
if reset:
root_logger.handlers = []
if len(root_logger.handlers) == 0:
log_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s - %(message)s")
root_logger.setLevel(logging.INFO)
if not os.path.exists(log_directory):
os.makedirs(log_directory)
file_handler = logging.FileHandler(
os.path.join(log_directory, log_filename))
file_handler.setFormatter(log_formatter)
file_handler.setLevel(logging.INFO)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.WARNING)
root_logger.addHandler(file_handler)
root_logger.addHandler(console_handler)
return root_logger
def format_function_call(func_name, *args, **kwargs):
"""
Formats a function of a PipelineStage or Dataset object to ensure proper
recording of the function and its arguments. args and kwargs should be
exactly those passed to the function.
Parameters
----------
func_name : str
Name of the stage
Returns
-------
str
Formatted function call
"""
out_str = func_name + '('
if len(args) != 0:
for a in args:
out_str += (str)(a) + ', '
if len(kwargs.keys()) != 0:
for k in kwargs.keys():
out_str += ((str)(k) + '=' + (str)(kwargs[k]) + ', ')
if out_str[-2] == ',':
out_str = out_str[:-2]
out_str += ')'
return out_str
def log(msg, level='INFO'):
"""
Actually logs a message. Ensures the logger has been set up first.
Parameters
----------
msg : str
Log message
level : str, optional
DEBUG, INFO, WARNING or ERROR, by default 'INFO'
"""
root_logger = logging.getLogger()
if len(root_logger.handlers) == 0:
setup_logger()
if level == 'ERROR':
root_logger.error(msg)
elif level == 'WARNING':
root_logger.warning(msg)
elif level == 'DEBUG':
root_logger.debug(msg)
else:
root_logger.info(msg)
def check_if_inputs_same(class_name, local_variables):
"""
Reads the log to check if this function has already been called with the
same arguments (this may still result in the function being rerun if the
input data has changed).
Parameters
----------
class_name : str
Name of PipelineStage
local_variables : dict
List of all local variables.
Returns
-------
args_same, bool
True if the function was last called with the same arguments.
checksum, int
Reads the checksum stored in the log file and returns it.
"""
hdlrs = logging.getLogger().handlers
# Try to be somewhat generic allowing for other handlers but this will
# only return the filename of the first FileHandler object it finds.
# This should be ok except for weird logging edge cases.
flname = ''
checksum = 0
for h in hdlrs:
try:
flname = h.baseFilename
break
except AttributeError:
pass
if len(flname) == 0 or not os.path.exists(flname):
# Log file doesn't exist yet
return False
else:
fl = open(flname)
func_args = {}
args_same = False
for ln in fl.readlines()[::-1]:
if class_name + '(' in ln:
# To be completely general, the string manipulation has to
# be a little complicated
stripped_ln = ln.split('-')[-2].split(')')[0].split('(')[-1]
the_list = stripped_ln.split('=')
kwarg_list = []
if len(the_list) > 1:
for l in the_list:
if ',' not in l:
kwarg_list.append(l)
else:
s = l.split(',')
if len(s) > 2:
kwarg_list.append(','.join(s[:-1]))
else:
kwarg_list.append(s[0])
kwarg_list.append(s[-1])
if len(kwarg_list) != 0:
for k in range(0, len(kwarg_list), 2):
try:
key = kwarg_list[k]
value = kwarg_list[k + 1]
func_args[key.strip()] = value.strip()
except ValueError:
# This happens when there are no arguments
pass
checksum_ln = ln.split('checksum:')
if len(checksum_ln) > 1:
checksum = int(checksum_ln[-1])
else:
checksum = 0
args_same = True
for k in func_args.keys():
if k not in local_variables.keys():
args_same = False
break
else:
if k != "force_rerun" and \
func_args[k] != (str)(local_variables[k]):
args_same = False
break
break
return args_same, checksum
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,243 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/data_management/image_reader.py | from astropy.io import fits
from astropy.wcs import WCS
import numpy as np
import os
import tracemalloc
import pandas as pd
import matplotlib as mpl
import io
from skimage.transform import resize
import cv2
from astronomaly.base.base_dataset import Dataset
from astronomaly.base import logging_tools
from astronomaly.utils import utils
mpl.use('Agg')
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas # noqa: E402, E501
import matplotlib.pyplot as plt # noqa: E402
def convert_array_to_image(arr, plot_cmap='hot'):
"""
Function to convert an array to a png image ready to be served on a web
page.
Parameters
----------
arr : np.ndarray
Input image
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
with mpl.rc_context({'backend': 'Agg'}):
fig = plt.figure(figsize=(1, 1), dpi=4 * arr.shape[1])
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
plt.imshow(arr, cmap=plot_cmap, origin='lower')
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
plt.close(fig)
return output
def apply_transform(cutout, transform_function):
"""
Applies the transform function(s) given at initialisation to the image.
Parameters
----------
cutout : np.ndarray
Cutout of image
Returns
-------
np.ndarray
Transformed cutout
"""
if transform_function is not None:
try:
len(transform_function)
new_cutout = cutout
for f in transform_function:
new_cutout = f(new_cutout)
cutout = new_cutout
except TypeError: # Simple way to test if there's only one function
cutout = transform_function(cutout)
return cutout
class AstroImage:
def __init__(self, filenames, file_type='fits', fits_index=None, name=''):
"""
Lightweight wrapper for an astronomy image from a fits file
Parameters
----------
filenames : list of files
Filename of fits file to be read. Can be length one if there's only
one file or multiple if there are multiband images
fits_index : integer
Which HDU object in the list to work with
"""
print('Reading image data from %s...' % filenames[0])
self.filenames = filenames
self.file_type = file_type
self.metadata = {}
self.wcs = None
self.fits_index = fits_index
self.hdul_list = []
try:
for f in filenames:
hdul = fits.open(f, memmap=True)
self.hdul_list.append(hdul)
except FileNotFoundError:
raise FileNotFoundError("File", f, "not found")
# get a test sample
self.get_image_data(0, 10, 0, 10)
if len(name) == 0:
self.name = self._strip_filename()
else:
self.name = name
print('Done!')
def get_image_data(self, row_start, row_end, col_start, col_end):
"""Returns the image data from a fits HDUlist object
Parameters
----------
Returns
-------
np.array
Image data
"""
images = []
rs = row_start
re = row_end
cs = col_start
ce = col_end
for hdul in self.hdul_list:
if self.fits_index is None:
for i in range(len(hdul)):
self.fits_index = i
# snap1 = tracemalloc.take_snapshot()
dat = hdul[self.fits_index].data
# snap2 = tracemalloc.take_snapshot()
# diff = snap2.compare_to(snap1, 'lineno')
# print(diff[0].size_diff)
if dat is not None:
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
break
else:
dat = hdul[self.fits_index].data
if len(dat.shape) > 2:
dat = dat[0][0]
image = dat[rs:re, cs:ce]
self.metadata = dict(hdul[self.fits_index].header)
if self.wcs is None:
self.wcs = WCS(hdul[self.fits_index].header, naxis=2)
if len(image.shape) > 2 and image.shape[-1] > 3:
image = image[:, :, 0]
if len(image.shape) > 2:
image = np.squeeze(image)
images.append(image)
if len(images) > 1:
# Should now be a 3d array with multiple channels
image = np.dstack(images)
self.metadata['NAXIS3'] = image.shape[-1]
else:
image = images[0] # Was just the one image
return image
def get_image_shape(self):
"""
Efficiently returns the shape of the image.
Returns
-------
tuple
Image shape
"""
return (self.metadata['NAXIS1'], self.metadata['NAXIS2'])
def clean_up(self):
"""
Closes all open fits files so they don't remain in memory.
"""
print("Closing Fits files...")
for hdul in self.hdul_list:
hdul.close()
logging_tools.log("Fits files closed successfully.")
print("Files closed.")
def _strip_filename(self):
"""
Tiny utility function to make a nice formatted version of the image
name from the input filename string
Returns
-------
string
Formatted file name
"""
s1 = self.filenames[0].split(os.path.sep)[-1]
# extension = s1.split('.')[-1]
return s1
def get_coords(self, x, y):
"""
Returns the RA and DEC coordinates for a given set of pixels.
Parameters
----------
x : int
x pixel value
y : y
y pixel value
Returns
-------
ra, dec
Sky coordinates
"""
return self.wcs.wcs_pix2world(x, y, 0)
class ImageDataset(Dataset):
def __init__(self, fits_index=None, window_size=128, window_shift=None,
display_image_size=128, band_prefixes=[], bands_rgb={},
transform_function=None, display_transform_function=None,
plot_square=False, catalogue=None,
plot_cmap='hot', **kwargs):
"""
Read in a set of images either from a directory or from a list of file
paths (absolute). Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
'./'
fits_index : integer, optional
If these are fits files, specifies which HDU object in the list to
work with
window_size : int, tuple or list, optional
The size of the cutout in pixels. If an integer is provided, the
cutouts will be square. Otherwise a list of
[window_size_x, window_size_y] is expected.
window_shift : int, tuple or list, optional
The size of the window shift in pixels. If the shift is less than
the window size, a sliding window is used to create cutouts. This
can be particularly useful for (for example) creating a training
set for an autoencoder. If an integer is provided, the shift will
be the same in both directions. Otherwise a list of
[window_shift_x, window_shift_y] is expected.
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
band_prefixes : list
Allows you to specify a prefix for an image which corresponds to a
band identifier. This has to be a prefix and the rest of the image
name must be identical in order for Astronomaly to detect these
images should be stacked together.
bands_rgb : Dictionary
Maps the input bands (in separate folders) to rgb values to allow
false colour image plotting. Note that here you can only select
three bands to plot although you can use as many bands as you like
in band_prefixes. The dictionary should have 'r', 'g' and 'b' as
keys with the band prefixes as values.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size "window_size" will be
extracted around these positions and must be the same for all
sources.
plot_square : bool, optional
If True this will add a white border indicating the boundaries of
the original cutout when the image is displayed in the webapp.
plot_cmap : str, optional
The colormap with which to plot the image
"""
super().__init__(fits_index=fits_index, window_size=window_size,
window_shift=window_shift,
display_image_size=display_image_size,
band_prefixes=band_prefixes, bands_rgb=bands_rgb,
transform_function=transform_function,
display_transform_function=display_transform_function,
plot_square=plot_square, catalogue=catalogue,
plot_cmap=plot_cmap,
**kwargs)
self.known_file_types = ['fits', 'fits.fz', 'fits.gz',
'FITS', 'FITS.fz', 'FITS.gz']
self.data_type = 'image'
images = {}
tracemalloc.start()
if len(band_prefixes) != 0:
# Get the matching images in different bands
bands_files = {}
for p in band_prefixes:
for f in self.files:
if p in f:
start_ind = f.find(p)
end_ind = start_ind + len(p)
flname = f[end_ind:]
if flname not in bands_files.keys():
bands_files[flname] = [f]
else:
bands_files[flname] += [f]
for k in bands_files.keys():
extension = k.split('.')[-1]
# print(k, extension)
if extension == 'fz' or extension == 'gz':
extension = '.'.join(k.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage(bands_files[k],
file_type=extension,
fits_index=fits_index,
name=k)
images[k] = astro_img
except Exception as e:
msg = "Cannot read image " + k + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
# Also convert the rgb dictionary into an index dictionary
# corresponding
if len(bands_rgb) == 0:
self.bands_rgb = {'r': 0, 'g': 1, 'b': 2}
else:
self.bands_rgb = {}
for k in bands_rgb.keys():
band = bands_rgb[k]
ind = band_prefixes.index(band)
self.bands_rgb[k] = ind
else:
for f in self.files:
extension = f.split('.')[-1]
if extension == 'fz' or extension == 'gz':
extension = '.'.join(f.split('.')[-2:])
if extension in self.known_file_types:
try:
astro_img = AstroImage([f],
file_type=extension,
fits_index=fits_index)
images[astro_img.name] = astro_img
except Exception as e:
msg = "Cannot read image " + f + "\n \
Exception is: " + (str)(e)
logging_tools.log(msg, level="ERROR")
if len(list(images.keys())) == 0:
msg = "No images found, Astronomaly cannot proceed."
logging_tools.log(msg, level="ERROR")
raise IOError(msg)
try:
self.window_size_x = window_size[0]
self.window_size_y = window_size[1]
except TypeError:
self.window_size_x = window_size
self.window_size_y = window_size
# Allows sliding windows
if window_shift is not None:
try:
self.window_shift_x = window_shift[0]
self.window_shift_y = window_shift[1]
except TypeError:
self.window_shift_x = window_shift
self.window_shift_y = window_shift
else:
self.window_shift_x = self.window_size_x
self.window_shift_y = self.window_size_y
self.images = images
self.transform_function = transform_function
if display_transform_function is None:
self.display_transform_function = transform_function
else:
self.display_transform_function = display_transform_function
self.plot_square = plot_square
self.plot_cmap = plot_cmap
self.catalogue = catalogue
self.display_image_size = display_image_size
self.band_prefixes = band_prefixes
self.metadata = pd.DataFrame(data=[])
if self.catalogue is None:
self.create_catalogue()
else:
self.convert_catalogue_to_metadata()
print('A catalogue of ', len(self.metadata),
'sources has been provided.')
if 'original_image' in self.metadata.columns:
for img in np.unique(self.metadata.original_image):
if img not in images.keys():
logging_tools.log('Image ' + img + """ found in catalogue
but not in provided image data. Removing from
catalogue.""", level='WARNING')
msk = self.metadata.original_image == img
self.metadata.drop(self.metadata.index[msk], inplace=True)
print('Catalogue reduced to ', len(self.metadata),
'sources')
self.index = self.metadata.index.values
def create_catalogue(self):
"""
If a catalogue is not supplied, this will generate one by cutting up
the image into cutouts.
"""
print('No catalogue found, one will automatically be generated by \
splitting the image into cutouts governed by the window_size..')
for image_name in list(self.images.keys()):
astro_img = self.images[image_name]
img_shape = astro_img.get_image_shape()
# Remember, numpy array index of [row, column]
# corresponds to [y, x]
xvals = np.arange(self.window_size_x // 2,
img_shape[1] - self.window_size_x // 2,
self.window_shift_x)
yvals = np.arange(self.window_size_y // 2,
img_shape[0] - self.window_size_y // 2,
self.window_shift_y)
X, Y = np.meshgrid(xvals, yvals)
x_coords = X.ravel()
y_coords = Y.ravel()
ra, dec = astro_img.get_coords(x_coords, y_coords)
original_image_names = [image_name] * len(x_coords)
new_df = pd.DataFrame(data={
'original_image': original_image_names,
'x': x_coords,
'y': y_coords,
'ra': ra,
'dec': dec,
'peak_flux': [-1] * len(ra)})
self.metadata = pd.concat((self.metadata, new_df),
ignore_index=True)
self.metadata.index = self.metadata.index.astype('str')
print('A catalogue of ', len(self.metadata), 'cutouts has been \
created.')
print('Done!')
def convert_catalogue_to_metadata(self):
if 'original_image' not in self.catalogue.columns:
if len(self.images) > 1:
logging_tools.log("""If multiple fits images are used the
original_image column must be provided in
the catalogue to identify which image the
source belongs to.""",
level='ERROR')
raise ValueError("Incorrect input supplied")
else:
self.catalogue['original_image'] = \
[list(self.images.keys())[0]] * len(self.catalogue)
if 'objid' not in self.catalogue.columns:
self.catalogue['objid'] = np.arange(len(self.catalogue))
if 'peak_flux' not in self.catalogue.columns:
self.catalogue['peak_flux'] = [np.NaN] * len(self.catalogue)
cols = ['original_image', 'x', 'y']
for c in cols[1:]:
if c not in self.catalogue.columns:
logging_tools.log("""If a catalogue is provided the x and y
columns (corresponding to pixel values) must be present""",
level='ERROR')
raise ValueError("Incorrect input supplied")
if 'ra' in self.catalogue.columns:
cols.append('ra')
if 'dec' in self.catalogue.columns:
cols.append('dec')
if 'peak_flux' in self.catalogue.columns:
cols.append('peak_flux')
met = {}
for c in cols:
met[c] = self.catalogue[c].values
the_index = np.array(self.catalogue['objid'].values, dtype='str')
self.metadata = pd.DataFrame(met, index=the_index)
self.metadata['x'] = self.metadata['x'].astype('int')
self.metadata['y'] = self.metadata['y'].astype('int')
def get_sample(self, idx):
"""
Returns the data for a single sample in the dataset as indexed by idx.
Parameters
----------
idx : string
Index of sample
Returns
-------
nd.array
Array of image cutout
"""
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
original_image = self.metadata.loc[idx, 'original_image']
this_image = self.images[original_image]
x_wid = self.window_size_x // 2
y_wid = self.window_size_y // 2
y_start = y0 - y_wid
y_end = y0 + y_wid
x_start = x0 - x_wid
x_end = x0 + x_wid
invalid_y = y_start < 0 or y_end > this_image.metadata['NAXIS1']
invalid_x = x_start < 0 or x_end > this_image.metadata['NAXIS2']
if invalid_y or invalid_x:
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
shp = [self.window_size_y,
self.window_size_x,
this_image.metadata['NAXIS3']]
else:
shp = [self.window_size_y, self.window_size_x]
cutout = np.ones((shp)) * np.nan
else:
cutout = this_image.get_image_data(y_start, y_end, x_start, x_end)
if self.metadata.loc[idx, 'peak_flux'] == -1:
if np.any(np.isnan(cutout)):
flx = -1
else:
flx = np.max(cutout)
self.metadata.loc[idx, 'peak_flux'] = flx
cutout = apply_transform(cutout, self.transform_function)
return cutout
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
try:
img_name = self.metadata.loc[idx, 'original_image']
except KeyError:
return None
this_image = self.images[img_name]
x0 = self.metadata.loc[idx, 'x']
y0 = self.metadata.loc[idx, 'y']
factor = 1.5
xmin = (int)(x0 - self.window_size_x * factor)
xmax = (int)(x0 + self.window_size_x * factor)
ymin = (int)(y0 - self.window_size_y * factor)
ymax = (int)(y0 + self.window_size_y * factor)
xstart = max(xmin, 0)
xend = min(xmax, this_image.metadata['NAXIS1'])
ystart = max(ymin, 0)
yend = min(ymax, this_image.metadata['NAXIS2'])
tot_size_x = int(2 * self.window_size_x * factor)
tot_size_y = int(2 * self.window_size_y * factor)
naxis3_present = 'NAXIS3' in this_image.metadata.keys()
if naxis3_present and this_image.metadata['NAXIS3'] > 1:
if this_image.metadata['NAXIS3'] != 3:
shp = [tot_size_y, tot_size_x]
else:
shp = [tot_size_y, tot_size_x, this_image.metadata['NAXIS3']]
else:
shp = [tot_size_y, tot_size_x]
cutout = np.zeros(shp)
# cutout[ystart - ymin:tot_size_y - (ymax - yend),
# xstart - xmin:tot_size_x - (xmax - xend)] = img[ystart:yend,
#
# xstart:xend]
img_data = this_image.get_image_data(ystart, yend, xstart, xend)
cutout[ystart - ymin:yend - ymin,
xstart - xmin:xend - xmin] = img_data
cutout = np.nan_to_num(cutout)
cutout = apply_transform(cutout, self.display_transform_function)
if len(cutout.shape) > 2 and cutout.shape[-1] >= 3:
new_cutout = np.zeros([cutout.shape[0], cutout.shape[1], 3])
new_cutout[:, :, 0] = cutout[:, :, self.bands_rgb['r']]
new_cutout[:, :, 1] = cutout[:, :, self.bands_rgb['g']]
new_cutout[:, :, 2] = cutout[:, :, self.bands_rgb['b']]
cutout = new_cutout
if self.plot_square:
offset_x = (tot_size_x - self.window_size_x) // 2
offset_y = (tot_size_y - self.window_size_y) // 2
x1 = offset_x
x2 = tot_size_x - offset_x
y1 = offset_y
y2 = tot_size_y - offset_y
mx = cutout.max()
cutout[y1:y2, x1] = mx
cutout[y1:y2, x2] = mx
cutout[y1, x1:x2] = mx
cutout[y2, x1:x2] = mx
min_edge = min(cutout.shape[:2])
max_edge = max(cutout.shape[:2])
if max_edge != self.display_image_size:
new_max = self.display_image_size
new_min = int(min_edge * new_max / max_edge)
if cutout.shape[0] <= cutout.shape[1]:
new_shape = [new_min, new_max]
else:
new_shape = [new_max, new_min]
if len(cutout.shape) > 2:
new_shape.append(cutout.shape[-1])
cutout = resize(cutout, new_shape, anti_aliasing=False)
return convert_array_to_image(cutout, plot_cmap=self.plot_cmap)
class ImageThumbnailsDataset(Dataset):
def __init__(self, display_image_size=128, transform_function=None,
display_transform_function=None, fits_format=False,
catalogue=None, check_corrupt_data=False,
additional_metadata=None, **kwargs):
"""
Read in a set of images that have already been cut into thumbnails.
This would be uncommon with astronomical data but is needed to read a
dataset like galaxy zoo. Inherits from Dataset class.
Parameters
----------
filename : str
If a single file (of any time) is to be read from, the path can be
given using this kwarg.
directory : str
A directory can be given instead of an explicit list of files. The
child class will load all appropriate files in this directory.
list_of_files : list
Instead of the above, a list of files to be loaded can be
explicitly given.
output_dir : str
The directory to save the log file and all outputs to. Defaults to
display_image_size : The size of the image to be displayed on the
web page. If the image is smaller than this, it will be
interpolated up to the higher number of pixels. If larger, it will
be downsampled.
transform_function : function or list, optional
The transformation function or list of functions that will be
applied to each cutout. The function should take an input 2d array
(the cutout) and return an output 2d array. If a list is provided,
each function is applied in the order of the list.
fits_format : boolean
Set to True if the cutouts are in fits format (as opposed to jpeg
or png).
catalogue : pandas.DataFrame or similar
A catalogue of the positions of sources around which cutouts will
be extracted. Note that a cutout of size "window_size" will be
extracted around these positions and must be the same for all
sources.
"""
super().__init__(transform_function=transform_function,
display_image_size=128, catalogue=catalogue,
fits_format=fits_format,
check_corrupt_data=check_corrupt_data,
display_transform_function=display_transform_function,
additional_metadata=additional_metadata,
**kwargs)
self.data_type = 'image'
self.known_file_types = ['png', 'jpg', 'jpeg', 'bmp', 'tif', 'tiff',
'fits', 'fits.fz', 'fits.gz', 'FITS',
'FITS.fz', 'FITS.gz'
]
self.transform_function = transform_function
self.check_corrupt_data = check_corrupt_data
if display_transform_function is None:
self.display_transform_function = self.transform_function
else:
self.display_transform_function = display_transform_function
self.display_image_size = display_image_size
self.fits_format = fits_format
if catalogue is not None:
if 'objid' in catalogue.columns:
catalogue.set_index('objid')
catalogue.index = catalogue.index.astype(
str) + '_' + catalogue.groupby(
level=0).cumcount().astype(str)
self.metadata = catalogue
else:
inds = []
file_paths = []
for f in self.files:
extension = f.split('.')[-1]
if extension in self.known_file_types:
inds.append(
f.split(os.path.sep)[-1][:-(len(extension) + 1)])
file_paths.append(f)
self.metadata = pd.DataFrame(index=inds,
data={'filename': file_paths})
self.index = self.metadata.index.values
if additional_metadata is not None:
self.metadata = self.metadata.join(additional_metadata)
def get_sample(self, idx):
"""
Returns the data for a single sample in the dataset as indexed by idx.
Parameters
----------
idx : string
Index of sample
Returns
-------
nd.array
Array of image cutout
"""
if self.fits_format:
try:
filename = self.metadata.loc[idx, 'filename']
img = fits.getdata(filename, memmap=True)
return apply_transform(img, self.transform_function)
except TypeError:
msg = "TypeError cannot read image: Corrupt file"
logging_tools.log(msg, level="ERROR")
if self.check_corrupt_data:
utils.remove_corrupt_file(
self.index, self.metadata.index, idx)
else:
print('Corrupted data: Enable check_corrupt_data.')
except OSError:
msg = "OSError cannot read image: Empty file"
logging_tools.log(msg, level="ERROR")
if self.check_corrupt_data:
utils.remove_corrupt_file(
self.index, self.metadata.index, idx)
else:
print('Missing data: Enable check_corrupt_data.')
else:
filename = self.metadata.loc[idx, 'filename']
img = cv2.imread(filename)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return apply_transform(img, self.transform_function)
def get_display_data(self, idx):
"""
Returns a single instance of the dataset in a form that is ready to be
displayed by the web front end.
Parameters
----------
idx : str
Index (should be a string to avoid ambiguity)
Returns
-------
png image object
Object ready to be passed directly to the frontend
"""
if self.fits_format:
try:
filename = self.metadata.loc[idx, 'filename']
cutout = fits.getdata(filename, memmap=True)
except TypeError:
msg = "TypeError cannot read image: Corrupted file"
logging_tools.log(msg, level="ERROR")
if self.check_corrupt_data:
cutout = np.zeros(
[1, self.display_image_size, self.display_image_size], dtype=int)
else:
print('Corrupted data: Enable check_corrupt_data.')
except OSError:
msg = "OSError cannot read image: Empty file"
logging_tools.log(msg, level="ERROR")
if self.check_corrupt_data:
cutout = np.zeros(
[1, self.display_image_size, self.display_image_size], dtype=int)
else:
print('Missing data: Enable check_corrupt_data.')
else:
filename = self.metadata.loc[idx, 'filename']
cutout = cv2.imread(filename)
cutout = cv2.cvtColor(cutout, cv2.COLOR_BGR2RGB)
cutout = apply_transform(cutout, self.display_transform_function)
min_edge = min(cutout.shape[:2])
max_edge = max(cutout.shape[:2])
if max_edge != self.display_image_size:
new_max = self.display_image_size
new_min = int(min_edge * new_max / max_edge)
if cutout.shape[0] <= cutout.shape[1]:
new_shape = [new_min, new_max]
else:
new_shape = [new_max, new_min]
if len(cutout.shape) > 2:
new_shape.append(cutout.shape[-1])
cutout = resize(cutout, new_shape, anti_aliasing=False)
return convert_array_to_image(cutout)
def fits_to_png(self, scores):
"""
Simple function that outputs png files from the input fits files
Parameters
----------
Scores : string
Score of sample
Returns
-------
png : image object
Images are created and saved in the output folder
"""
for i in range(len(scores)):
idx = scores.index[i]
filename = self.metadata.loc[idx, 'filenames']
flux = self.metadata.loc[idx, 'peak_flux']
for root, directories, f_names in os.walk(self.directory):
if filename in f_names:
file_path = os.path.join(root, filename)
output_path = os.path.join(self.output_dir, 'PNG', 'Anomaly Score')
if not os.path.exists(output_path):
os.makedirs(output_path)
data = fits.getdata(file_path, memmap=True)
if len(np.shape(data)) > 2:
one = data[0, :, :]
two = data[1, :, :]
three = data[2, :, :]
data = np.dstack((three, two, one))
transformed_image = apply_transform(
data, self.display_transform_function)
else:
transformed_image = apply_transform(
data, self.display_transform_function)
plt.imsave(output_path+'/AS:'+'%.6s' % scores.score[i]+'_NAME:'+str(
idx)+'_FLUX:'+'%.4s' % flux+'.png', transformed_image)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,244 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/postprocessing/scaling.py | from sklearn.preprocessing import StandardScaler
import pandas as pd
from astronomaly.base.base_pipeline import PipelineStage
class FeatureScaler(PipelineStage):
def __init__(self, **kwargs):
"""
Rescales features using a standard sklearn scalar that subtracts the
mean and divides by the standard deviation for each feature. Highly
recommended for most machine learning algorithms and for any data
visualisation such as t-SNE.
"""
super().__init__(**kwargs)
def _execute_function(self, features):
"""
Does the work in actually running the scaler.
Parameters
----------
features : pd.DataFrame or similar
The input features to run iforest on. Assumes the index is the id
of each object and all columns are to be used as features.
Returns
-------
pd.DataFrame
Contains the same original index and columns of the features input
with the features scaled to zero mean and unit variance.
"""
scl = StandardScaler()
output = scl.fit_transform(features)
return pd.DataFrame(data=output, index=features.index,
columns=features.columns)
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,245 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/frontend/run_server.py | from flask import Flask, render_template, request, Response
import json
from os.path import join
from astronomaly.frontend.interface import Controller
import logging
import argparse
# Main function to serve Astronomaly
parser = argparse.ArgumentParser(description='Run the Astronomaly server')
help_str = 'Location of the script Astronomaly should run. \
See the scripts folder for examples.'
parser.add_argument('script', help=help_str)
args = parser.parse_args()
script = args.script
webapp_dir = join('..', '..', 'webapp')
app = Flask(__name__,
static_folder=join(webapp_dir, 'public'),
template_folder=join(webapp_dir, 'public'))
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
controller = Controller(script)
@app.route('/')
def index():
"""
Serves the main page
"""
return render_template('index.html')
@app.route('/getindex', methods=["POST"])
def get_index():
"""
Returns the actual index (e.g. "obj287") of an instance given its position
in the array.
"""
if request.method == "POST":
ind = request.get_json()
ind = controller.get_original_id_from_index(int(ind))
return json.dumps(ind)
else:
return ""
@app.route('/getdatatype', methods=["POST"])
def get_data_type():
"""
Serves the data type we're working with (e.g. "image", "light_curve",
"raw_features")
"""
if request.method == "POST":
return json.dumps(controller.get_data_type())
else:
return ""
@app.route('/getmetadata', methods=["POST"])
def get_metadata():
"""
Serves the metadata for a particular instance
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_metadata(idx)
return json.dumps(output)
else:
return ""
@app.route('/getcoordinates', methods=["POST"])
def get_coordinates():
"""
Serves the coordinates (if available) for a particular object in string
format, separated by a comma
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_coordinates(idx)
return json.dumps(output)
else:
return ""
@app.route('/getlightcurve', methods=["POST"])
def get_light_curve():
"""
Serves the display data for a light curve
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getfeatures', methods=["POST"])
def get_features():
"""
Serves the features ready to be displayed in a table.
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_features(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getrawfeatures', methods=["POST"])
def get_raw_features():
"""
Serves raw features ready for basic plotting
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
output = json.dumps(output)
return output
else:
return ""
@app.route('/getimage', methods=["GET", "POST"])
def get_image():
"""
Serves the current instance as an image to be displayed
"""
if request.method == "POST":
idx = str(request.get_json())
output = controller.get_display_data(idx)
if output is None:
return ""
return Response(output.getvalue(), mimetype='image/png')
else:
return ""
@app.route('/getColumns', methods=["GET", "POST"])
def get_available_columns():
"""
Tells the frontend whether or not active learning has been run so that it
can display the appropriate options when selecting which column to colour
by
"""
if request.method == "POST":
output = controller.get_active_learning_columns()
return json.dumps(output)
else:
return ""
@app.route('/visualisation', methods=["GET", "POST"])
def get_visualisation():
"""
Serves the data to be displayed on the visualisation tab
"""
if request.method == "POST":
color_by_column = request.get_json()
output = controller.get_visualisation_data(
color_by_column=color_by_column)
js = json.dumps(output)
return js
@app.route('/retrain', methods=["GET", "POST"])
def retrain():
"""
Calls the human-in-the-loop learning
"""
res = controller.run_active_learning()
return json.dumps(res)
@app.route('/deletelabels', methods=["GET", "POST"])
def delete_labels():
"""
Deletes the existing labels allowing the user to start again
"""
controller.delete_labels()
return json.dumps("success")
@app.route('/sort', methods=["GET", "POST"])
def sort_data():
"""
Sorts the data by a requested column
"""
if request.method == "POST":
column = (str)(request.get_json())
if column == "random":
controller.randomise_ml_scores()
else:
controller.sort_ml_scores(column)
return json.dumps("success")
@app.route('/label', methods=["GET", "POST"])
def get_label():
"""
Records the label given to an instance by a human
"""
if request.method == "POST":
out_dict = request.get_json()
idx = out_dict['id']
label = (float)(out_dict['label'])
controller.set_human_label(idx, label)
return json.dumps("success")
@app.route('/getmaxid', methods=["GET", "POST"])
def get_max_id():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
max_id = controller.get_max_id()
return json.dumps(max_id)
@app.route('/getlistindex', methods=["GET", "POST"])
def get_list_index():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
idx = controller.current_index
return json.dumps(idx)
@app.route('/setlistindex', methods=["GET", "POST"])
def set_list_index():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
idx = int(request.get_json())
controller.current_index = idx
return json.dumps("success")
@app.route('/close', methods=["GET", "POST"])
def close():
"""
Let's the frontend know how long the list of objects is
"""
if request.method == "POST":
controller.clean_up()
print("Exiting Astronomaly... Goodbye!")
shutdown_hook = request.environ.get('werkzeug.server.shutdown')
if shutdown_hook is not None:
shutdown_hook()
return Response("Bye", mimetype='text/plain')
if __name__ == "__main__":
controller.run_pipeline()
host = 'http://127.0.0.1:5000/'
print('##### Astronomaly server now running #####')
print('Open this link in your browser:', host)
print()
app.run()
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,246 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/scripts/galaxy_zoo_example.py | # An example with a subset of Galaxy Zoo data
from astronomaly.data_management import image_reader
from astronomaly.preprocessing import image_preprocessing
from astronomaly.feature_extraction import shape_features
from astronomaly.postprocessing import scaling
from astronomaly.anomaly_detection import isolation_forest, human_loop_learning
from astronomaly.visualisation import umap_plot
import os
import pandas as pd
import zipfile
# Root directory for data
data_dir = os.path.join(os.getcwd(), 'example_data')
image_dir = os.path.join(data_dir, 'GalaxyZooSubset', '')
# Where output should be stored
output_dir = os.path.join(
data_dir, 'astronomaly_output', 'galaxy_zoo', '')
if not os.path.exists(output_dir):
os.makedirs(output_dir)
if not os.path.exists(image_dir):
# Data has not been unzipped yet
zip_ref = zipfile.ZipFile(os.path.join(data_dir, 'GalaxyZooSubset.zip'))
zip_ref.extractall(data_dir)
# These are transform functions that will be applied to images before feature
# extraction is performed. Functions are called in order.
image_transform_function = [
image_preprocessing.image_transform_sigma_clipping,
image_preprocessing.image_transform_scale]
# You can apply a different set of transforms to the images that get displayed
# in the frontend. In this case, I want to see the original images before sigma
# clipping is applied.
display_transform_function = [
image_preprocessing.image_transform_scale]
def run_pipeline():
"""
Any script passed to the Astronomaly server must implement this function.
run_pipeline must return a dictionary that contains the keys listed below.
Parameters
----------
Returns
-------
pipeline_dict : dictionary
Dictionary containing all relevant data. Keys must include:
'dataset' - an astronomaly Dataset object
'features' - pd.DataFrame containing the features
'anomaly_scores' - pd.DataFrame with a column 'score' with the anomaly
scores
'visualisation' - pd.DataFrame with two columns for visualisation
(e.g. TSNE or UMAP)
'active_learning' - an object that inherits from BasePipeline and will
run the human-in-the-loop learning when requested
"""
# This creates the object that manages the data
image_dataset = image_reader.ImageThumbnailsDataset(
directory=image_dir, output_dir=output_dir,
transform_function=image_transform_function,
display_transform_function=display_transform_function
)
# Creates a pipeline object for feature extraction
pipeline_ellipse = shape_features.EllipseFitFeatures(
percentiles=[90, 80, 70, 60, 50, 0],
output_dir=output_dir, channel=0, force_rerun=False,
central_contour=False)
# Actually runs the feature extraction
features = pipeline_ellipse.run_on_dataset(image_dataset)
# Now we rescale the features using the same procedure of first creating
# the pipeline object, then running it on the feature set
pipeline_scaler = scaling.FeatureScaler(force_rerun=False,
output_dir=output_dir)
features = pipeline_scaler.run(features)
# The actual anomaly detection is called in the same way by creating an
# Iforest pipeline object then running it
pipeline_iforest = isolation_forest.IforestAlgorithm(
force_rerun=False, output_dir=output_dir)
anomalies = pipeline_iforest.run(features)
# We convert the scores onto a range of 0-5
pipeline_score_converter = human_loop_learning.ScoreConverter(
force_rerun=False, output_dir=output_dir)
anomalies = pipeline_score_converter.run(anomalies)
try:
# This is used by the frontend to store labels as they are applied so
# that labels are not forgotten between sessions of using Astronomaly
if 'human_label' not in anomalies.columns:
df = pd.read_csv(
os.path.join(output_dir, 'ml_scores.csv'),
index_col=0,
dtype={'human_label': 'int'})
df.index = df.index.astype('str')
if len(anomalies) == len(df):
anomalies = pd.concat(
(anomalies, df['human_label']), axis=1, join='inner')
except FileNotFoundError:
pass
# This is the active learning object that will be run on demand by the
# frontend
pipeline_active_learning = human_loop_learning.NeighbourScore(
alpha=1, output_dir=output_dir)
# We use UMAP for visualisation which is run in the same way as other parts
# of the pipeline.
pipeline_umap = umap_plot.UMAP_Plot(
force_rerun=False,
output_dir=output_dir)
vis_plot = pipeline_umap.run(features)
# The run_pipeline function must return a dictionary with these keywords
return {'dataset': image_dataset,
'features': features,
'anomaly_scores': anomalies,
'visualisation': vis_plot,
'active_learning': pipeline_active_learning}
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,247 | MichelleLochner/astronomaly | refs/heads/main | /astronomaly/anomaly_detection/isolation_forest.py | from astronomaly.base.base_pipeline import PipelineStage
from sklearn.ensemble import IsolationForest
import pandas as pd
import pickle
from os import path
class IforestAlgorithm(PipelineStage):
def __init__(self, contamination='auto', **kwargs):
"""
Runs sklearn's isolation forest anomaly detection algorithm and returns
the anomaly score for each instance.
Parameters
----------
contamination : string or float, optional
Hyperparameter to pass to IsolationForest. 'auto' is recommended
"""
super().__init__(contamination=contamination, **kwargs)
self.contamination = contamination
self.iforest_obj = None
def save_iforest_obj(self):
"""
Stores the iforest object to the output directory to allow quick
rerunning on new data.
"""
if self.iforest_obj is not None:
f = open(path.join(self.output_dir, 'iforest_object.pickle'), 'wb')
pickle.dump(self.iforest_obj, f)
def _execute_function(self, features):
"""
Does the work in actually running isolation forest.
Parameters
----------
features : pd.DataFrame or similar
The input features to run iforest on. Assumes the index is the id
of each object and all columns are to
be used as features.
Returns
-------
pd.DataFrame
Contains the same original index of the features input and the
anomaly scores. More negative is more anomalous.
"""
iforest = IsolationForest(contamination=self.contamination)
iforest.fit(features)
scores = iforest.decision_function(features)
if self.save_output:
self.save_iforest_obj()
return pd.DataFrame(data=scores, index=features.index,
columns=['score'])
| {"/astronomaly/feature_extraction/wavelet_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/shape_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/lof.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/anomaly_detection/human_loop_learning.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/visualisation/umap_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flatten_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/autoencoder.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/raw_features.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/dimensionality_reduction/pca.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/dimensionality_reduction/truncated_svd.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/power_spectrum.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/light_curve_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/visualisation/tsne_plot.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/photutils_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/feature_extraction/flux_histogram.py": ["/astronomaly/base/base_pipeline.py", "/astronomaly/preprocessing/image_preprocessing.py"], "/astronomaly/feature_extraction/feets_features.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/data_management/image_reader.py": ["/astronomaly/base/base_dataset.py"], "/astronomaly/postprocessing/scaling.py": ["/astronomaly/base/base_pipeline.py"], "/astronomaly/frontend/run_server.py": ["/astronomaly/frontend/interface.py"], "/astronomaly/anomaly_detection/isolation_forest.py": ["/astronomaly/base/base_pipeline.py"]} |
65,249 | I4-Projektseminar-HHU-2016/seminar-project-HaydarAk | refs/heads/master | /DB_handler.py | import pickle
import glob
import sqlite3
import psutil
import os
import errno
import time
# reads & returns the amount of space of physical memory held by the process, this method is called in
def memory_usage():
p = psutil.Process(os.getpid()) # gets process_id from operating system
used_mem = p.memory_info()[0] / float(2 ** 20) # physical memory held by a process, value in bytes, for process p
return round(used_mem, 2) # / float(2 ** 20) divides val by 2^20, converts bytes -> megabytes
# creates directory 'sub_dir'
# 'errno.EEXIST' (sub_dir already exists) exception will be ignored
def make_sure_path_exists(sub_dir):
try:
os.makedirs(sub_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# pagerank algorithm:
# doesnt calc correctly
def compute_ranks():
d = 0.85
# Test-dict equals Example 2 from:
# http://www.cs.princeton.edu/~chazelle/courses/BIB/pagerank.htm
test_dict = {'10': ['20', '30', '40'],
'20': ['10'],
'30': ['10'],
'40': ['10','50', '60', '70', '80'],
'50': [],
'60': [],
'70': [],
'80': []
}
num_pages = len(test_dict)
num_of_loops = 50
n_ranks = {}
ranks = {}
for element in test_dict:
ranks[element] = 1.0
for _ in range(num_of_loops):
for source in test_dict:
newrank = (1-d)/num_pages
for target in test_dict:
if source in test_dict[target]:
newrank += d*ranks[target] / len(test_dict[target])
n_ranks[source] = newrank
ranks = n_ranks
for e in ranks.items():
print (e)
# inserts values from pagelinks dicts to db
def add_pagelinks(db_name):
cnt = 0
files = glob.glob("pagelinks_dict/*.pickle")
conn = sqlite3.connect(db_name+'.db')
c = conn.cursor()
max_files = len(files)
print (" --- inserting pagelinks dictionaries to db --- ")
for file in files:
cnt += 1
print ("Inserting ", cnt, " of ", max_files, " dicts")
t = pickle.load(open(file, 'rb'))
for k, v in t.items():
for value in v:
c.execute('''INSERT INTO links VALUES(?,?)''', (k,value,))
print("inserting done")
print("creating index")
c.execute("CREATE INDEX index_links ON links (source, target);")
print("commiting")
conn.commit()
print ("done")
conn.close()
# inserts values from pages dicts to db
def add_pages(db_name):
cnt = 0
files = glob.glob("page_dict/*.pickle")
print (" --- inserting pages dictionaries to db --- ")
max_files = len(files)
conn = sqlite3.connect(db_name+'.db')
c = conn.cursor()
for file in files:
cnt += 1
print("Inserting ", cnt, " of ", max_files, " dicts")
t = pickle.load(open(file, 'rb'))
for k, v in t.items():
c.execute('''INSERT INTO pages VALUES(?,?)''', (k, v,))
print("inserting done")
print("creating index")
c.execute("CREATE INDEX index_pages ON pages (id, title);")
print("commiting")
conn.commit()
print ("done")
conn.close()
# creates db & table
def create_db(db_name):
conn = sqlite3.connect(db_name+'.db')
c = conn.cursor()
try:
c.execute('''CREATE TABLE links
(source text, target text)''')
c.execute('''CREATE TABLE pages
(id text, title text)''')
c.execute('''CREATE TABLE p_link_ids
(source_id text, target_id text)''')
c.execute('''CREATE TABLE p_ranks
(p_id text, p_rank text, p_rank_new text)''')
conn.commit()
except sqlite3.OperationalError:
pass
conn.close()
# Joins values from pages & links to p_link_ids, if links.target = pages.title
def join_tables(db_name):
conn = sqlite3.connect(db_name+'.db')
c = conn.cursor()
print ("joining values from pages & links")
c.execute('''INSERT into p_link_ids (source_id, target_id)
SELECT l.source, p.id FROM links l INNER JOIN pages p
ON l.target = p.title''')
print("tables joined")
print("creating index")
c.execute("CREATE INDEX index_plinks ON p_link_ids (source_id, target_id);")
c.execute("DROP TABLE links;")
print("commiting")
conn.commit()
conn.close()
# fills pagerank table with IDs + start value for pagerank
def fill_p_ranks_table(db_name):
conn = sqlite3.connect(db_name+'.db')
c = conn.cursor()
print("inserting p_ranks")
c.execute('''INSERT into p_ranks (p_id, p_rank, p_rank_new)
SELECT DISTINCT (p_link_ids.source_id), '1.0', '1.0' FROM p_link_ids;''')
print("inserting done")
# c.execute("CREATE INDEX index_pranks ON p_ranks (p_id, p_rank);")
print("commiting")
conn.commit()
print("done")
print()
conn.close()
# main func of file.
# coordinates method calls
def build_db(database_name):
print("Building sqlite db. ")
create_db(database_name)
add_pages(database_name)
add_pagelinks(database_name)
join_tables(database_name)
fill_p_ranks_table(database_name)
print("Generating pagerank dicts for calc")
generate_pagerank_dict(database_name)
show_dict_snippet() # show snippet
# generates dictionaries for pagerank calc.
# dict structure:
# key: target_page
# values: list of page_ids, linking to target_page
def generate_pagerank_dict(db_name):
make_sure_path_exists("p_rank_dict")
conn = sqlite3.connect(db_name+'.db')
cursor = conn.cursor()
cnt = 0
cursor.execute('''SELECT * FROM p_link_ids ORDER BY source_id ASC ''')
prank_dict = {}
tmp = 0
while True:
vals = cursor.fetchmany(5000)
if len(prank_dict) > 50000 and vals[0][0] not in prank_dict:
print (memory_usage())
tmp += len(prank_dict)
#print ("elements :", tmp)
with open("p_rank_dict/dict"+str(cnt)+".pickle", 'wb') as pickle_file:
pickle.dump(prank_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
prank_dict.clear()
cnt += 1
if len(vals) == 0:
break
for tuples in vals:
if tuples[0] in prank_dict:
# append the new value to existing list
prank_dict[tuples[0]].append(tuples[1])
else:
# create a new value-list for key
prank_dict[tuples[0]] = [tuples[1]]
def get_pageranks(db_name): # under construction
t = pickle.load(open("p_rank_dict/dict1.pickle", "rb"))
conn = sqlite3.connect(db_name+'.db')
cursor = conn.cursor()
query = 'select count(p_id) from p_ranks'
cursor.execute(query)
npages = cursor.fetchone()[0]
tmp_dict = {}
for element in t.items():
tmp_dict = {}
out_list = list(element[1])
tmp_dict[element[0]]=out_list
query = 'select p_id, p_rank from p_ranks where p_id in (' + ','.join(map(str, out_list)) + ')'
cursor.execute(query)
ranks = {}
results = cursor.fetchall()
for r in results:
ranks[r[0]]=r[1]
break
# shows 5 pagerank_dict snippet
def show_dict_snippet():
time.sleep(2)
print ("showing snippet of a pagerank dict")
files = glob.glob("p_rank_dict/*.pickle")
try:
tmp = pickle.load(open(files[0], 'rb'))
cnt = 0
print (" \n \ntarget-page \t pages linking to target-page")
for element in tmp.keys():
print (element, '\t\t\t', tmp[element])
cnt += 1
if cnt > 5:
break
except IOError:
print ("no file for snippet") | {"/main_file.py": ["/SQLReader.py", "/DB_handler.py"]} |
65,250 | I4-Projektseminar-HHU-2016/seminar-project-HaydarAk | refs/heads/master | /SQLReader.py | # -*- coding: utf-8 -*-
import gzip
import time
import psutil
import glob
import multiprocessing
from multiprocessing import Process
import pickle
import os
import errno
from os import path
# reads & returns the amount of space of physical memory held by the process, this method is called in
def memory_usage():
p = psutil.Process(os.getpid()) # gets process_id from operating system
used_mem = p.memory_info()[0] / float(2 ** 20) # physical memory held by a process, value in bytes, for process p
return round(used_mem, 2) # / float(2 ** 20) divides val by 2^20, converts bytes -> megabytes
# creates directory 'sub_dir'
# 'errno.EEXIST' (sub_dir already exists) exception will be ignored
def make_sure_path_exists(sub_dir):
try:
os.makedirs(sub_dir)
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
# opens file with utf-8 encoding, reads line by line and returns line number of unreadable_lines
# used for file_tests
def test_if_full_unicode(file_name, table_name):
line_number = 0
unreadablle_lines = []
l = None
err_file = open("testfile", 'wb')
sql_file = gzip.open(file_name, 'rb')
while True:
try:
line_number += 1
l = sql_file.readline()
line = l.decode('utf-8')
if (line_number % 1000) == 0:
print(line_number)
if line.startswith('-- Dump completed'):
print("Finished")
sql_file.close()
break
except UnicodeDecodeError:
unreadablle_lines.append(line_number)
err_file.write(l)
continue
except Exception:
raise
sql_file.close()
print("unreadable lines =", len(unreadablle_lines))
print(unreadablle_lines)
test_read(file_name, table_name, unreadablle_lines)
def test_read(file_name, table_name, list_unreadable_lines):
sql_file = gzip.open(file_name, 'rb')
sql_prefix = "INSERT INTO `" + table_name + "` VALUES "
sql_suffix = ";"
lines = 0
r_dict = {}
while True:
try:
line = sql_file.readline().decode('utf-8', 'ignore')
lines += 1
if lines in list_unreadable_lines:
if line == "" or line.startswith("--"):
continue
elif not (line.startswith(sql_prefix) or line.endswith(sql_suffix)):
continue
else:
res = {}
tmp_list = []
values = line[len(sql_prefix)-1:len(line)-(len(sql_suffix))]
tmp_results = test_parse(values)
if len(tmp_results) >= 1:
for element in tmp_results:
if element[1] == '0':
tmp_list.append((element[0], element[2]))
res.update(dict(tmp_list))
for element in res.keys():
if element in r_dict:
print("ID: ", element, "page_title: ", r_dict[element], " linking to: ", res[element])
if lines > list_unreadable_lines[-1]:
print("re-reading done")
return
except Exception:
print("err in open_iso_files")
raise
# reader function for MySQl file dumps
# reads file: every line is ignored, except for 2 cases:
# -- lines containing values: puts the values into one of the 4 queues
# -- The line that indicates end of file_dump: stops reading
# arguments:
# file_name = file name of sql-dump,
# table_name = table table of INSERT INTO lines from sql_dump
# l1_queue: queue, in which value_lines are put in, for further processing
def read_file(file_name, table_name, l1_queue, pros):
sql_prefix = "INSERT INTO `" + table_name + "` VALUES " # INSERT Lines of sql_dump beginn with
sql_suffix = ";" # INSERT Lines of sql_dump end with
end_line = '-- Dump completed on' # pattern indicates end of dump
line_number = 0
elements = 0 # manager variable for equally filling queues
unreadable_lines = [] # list of line numbers, which could not be read
make_sure_path_exists(table_name) # create folder for file
try:
sql_file = gzip.open(file_name, 'rb')
except IOError:
print("file not found")
raise
# streams over zipped file, reads line by line and performs different actions
# see comments below for details
while True:
# tries reading line
# if UnicodeDecodeError is raised, line is not encoded in utf-8
# line_number is added to list and function continues with next iteration
try:
line_number += 1
line = sql_file.readline().decode('utf-8', 'ignore')
except UnicodeDecodeError:
unreadable_lines.append(line_number)
continue
if line_number % 100 == 0 and line_number > 0:
print("reached line:", line_number, " Queue: ", l1_queue.qsize())
# if true, rached end of dump
# call open_iso_files, if unreadable lines are found.
# put 'DONE' to end of each queue, which indicates, no more values are coming
# exits function
if line.startswith(end_line):
sql_file.close()
for _ in range(pros):
l1_queue.put('DONE')
return
# lines starting with -- are comment lines.
# blank lines and comment lines contain no values --> skip lines
elif line == "" or line.startswith("--"):
continue
# beginning or end of line doesnt match prefix / sufix --> line is not part of an INSERT line --> skip line
elif not (line.startswith(sql_prefix) or line.endswith(sql_suffix)):
continue
# if non of above: line is INSERT line
# strip prefix & suffix from line:
# add value to appropiate queue
# variable tik_tok manages queues, so that every queue is evenly filled
else:
value = line[len(sql_prefix)-1:len(line)-(len(sql_suffix))] #
elements += 1
l1_queue.put(value)
# sql parser function: gets line from queue, parses sql line and returns a list, containing all values as tuples.
# args:
# l_queue: queue, from which function reads lines
# val: for file naming and process identification useses
# table_name: table name of previously read file. used for file naming
# p_queue: function puts 'DONE' string, after finishing. Needed for process handling
# mem_cap: max usable physical memory
def parse_input(l_queue, val, table_name, p_queue, mem_cap):
parse_counter = 0
file_num = 0
results = []
# if element in queue equals 'DONE' : pickles data to disc and exits.
while True:
values = l_queue.get()
if values == 'DONE':
file_name = table_name + "_" + str(val) + "_" + str(file_num) + '.pickle'
full_path = path.relpath(table_name+"/"+file_name)
try:
with open(full_path, 'wb') as pickle_file:
pickle.dump(results, pickle_file, pickle.HIGHEST_PROTOCOL)
results.clear()
except FileNotFoundError:
print("cant save parsed pickle file")
raise
p_queue.put('DONE')
print("process", val, ": parsing done")
break
# variable inits for counting and stuff
parse_counter += 1
values = values[1:-1] # remove blanks
tmp_results = []
tuples = ()
tokens = -1
state = 0
values.lstrip(" ")
# file parsing starts here
# loop works like a finite state mashine. Loops & parses symbol by symbol
#
for index, symbol in enumerate(values):
if state == 0:
if symbol == '(':
state = 1
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 1:
if '0' <= symbol <= '9' or symbol == '-' or symbol == '.':
state = 2
elif symbol == '\'':
state = 3
elif symbol == 'N':
state = 5
elif symbol == ')':
tmp_results.append(tuples)
tuples = ()
state = 8
else:
raise ValueError("state: ", state, " character: ", symbol)
tokens = index
if state == 3:
tokens += 1
continue
elif state == 2:
if '0' <= symbol <= '9' or symbol == '-' or symbol == '.':
continue
elif symbol == ',' or symbol == ')':
tmp_str = values[tokens: index]
tokens = -1
tuples += (tmp_str,)
if symbol == ',':
state = 7
elif symbol == ')':
tmp_results.append(tuples)
tuples = ()
state = 8
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 3:
if symbol == '\'':
tmp_str = values[tokens: index]
tokens = -1
if '\\' in tmp_str:
tmp_str = tmp_str.replace("\\", "") # Unescape backslashed characters
tuples += (tmp_str,)
state = 6
elif symbol == '\\':
state = 4
continue
elif state == 4:
if symbol == '\'' or symbol == '"' or symbol == '\\':
state = 3
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 5:
if 'A' <= symbol <= 'Z':
continue
elif symbol == ',' or symbol == ')':
if values[tokens:index] == "NULL":
tuples += (None,)
else:
raise ValueError("state: ", state, " character: ", symbol)
tokens = -1
if symbol == ',':
state = 7
elif symbol == ')':
tmp_results.append(tuples)
tuples = ()
state = 8
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 6:
if symbol == ',':
state = 7
elif symbol == ')':
tmp_results.append(tuples)
tuples = ()
state = 8
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 7:
if '0' <= symbol <= '9' or symbol == '-' or symbol == '.':
state = 2
elif symbol == '\'':
state = 3
elif symbol == 'N':
state = 5
else:
raise ValueError("state: ", state, " character: ", symbol)
tokens = index
if state == 3:
tokens += 1
continue
elif state == 8:
if symbol is ',':
state = 9
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
elif state == 9:
if symbol == '(':
state = 1
else:
raise ValueError("state: ", state, " character: ", symbol)
continue
if table_name == 'page':
for element in tmp_results:
if element[1] == '0':
tmp_tuple = (element[0], element[2])
results.append(tmp_tuple)
else:
for element in tmp_results:
if element[1] == '0' and element[3] == '0':
tmp_tuple = (element[0], element[2])
results.append(tmp_tuple)
if memory_usage() >= mem_cap:
file_name = table_name+"_"+str(val)+"_"+str(file_num)+'.pickle'
full_path = path.relpath(table_name+"/"+file_name)
try:
with open(full_path, 'wb') as pickle_file:
pickle.dump(results, pickle_file, pickle.HIGHEST_PROTOCOL)
results.clear()
except FileNotFoundError:
print("can't save parsed pickle file")
raise
file_num += 1
l_queue.task_done()
# converts list of tuples to dict, for list from pagelinks.sql.gz
# keys: ID of an article, who has outgoing links
# value: list of article titles, the article in key is linking to
# args:
# d_queue: a queue with file names of pickled lists from parse_input()
# mem_cap: max. ram, function is allowed to use
def links_list_to_dict(mem_cap, val, file_list):
file_number = 0
result_dict = {}
cnt = 0
ele = 0
fil_max = len(file_list)
# loop: opens every file in list and adds saved list of tuples to dict.
for file in file_list:
cnt += 1
# open file and load pickled list
# print("handling file ", file)
tmp_list = (pickle.load(open(file, "rb")))
ele += len(tmp_list)
print("Process ", val, " || reading file: ", cnt, " of ", fil_max)
# if dict gets too big: save & clear dict
if memory_usage() >= mem_cap:
ele += len(result_dict)
full_path = path.relpath("pagelinks_dict/dict_"+str(val)+str(file_number)+'.pickle')
with open(full_path, 'wb') as pickle_file:
pickle.dump(result_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
result_dict.clear()
file_number += 1
# else add key, values to dict.
else:
for element in tmp_list:
if element[0] in result_dict:
# append the new value to existing list
result_dict[element[0]].append(element[1])
else:
# create a new value-list for key
result_dict[element[0]] = [element[1]]
full_path = path.relpath("pagelinks_dict/dict_"+str(val)+str(file_number)+'.pickle')
with open(full_path, 'wb') as pickle_file:
print("Process ", val, " || finishing pagelinks dict...")
pickle.dump(result_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
result_dict.clear()
# converts list of tuples to dict, for list from page.sql.gz
# keys: ID of an article
# value: title of the same article
# args:
# d_queue: a queue with file names of pickled lists from parse_input()
# mem_cap: max. ram, function is allowed to use
def page_list_to_dict(mem_cap, val, file_list):
file_number = 0
result_dict = {}
cnt = 0
ele = 0
fil_max = len(file_list)
# loop: if element in queue is 'DONE', no more files to read --> function saves current dict to file & terminates.
# otherwise: converting list of tuples to dict.
for file in file_list:
# reading file & updating dict with tuples from file
tmp_list = (pickle.load(open(file, "rb")))
result_dict.update(dict(tmp_list))
cnt += 1
print("Process ", val, " || Processed files: ", cnt, " of ", fil_max )
# check memory usage: if dict gets to big, save and clear
if memory_usage() >= mem_cap:
print("saving pages dict...")
full_path = path.relpath("page_dict/dict_"+str(val)+str(file_number)+'.pickle')
with open(full_path, 'wb') as pickle_file:
pickle.dump(result_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
result_dict.clear()
file_number += 1
full_path = path.relpath("page_dict/dict_"+str(val)+str(file_number)+'.pickle')
with open(full_path, 'wb') as pickle_file:
print("Process ", val, " || finishing pages dict...")
pickle.dump(result_dict, pickle_file, pickle.HIGHEST_PROTOCOL)
result_dict.clear()
file_number += 1
# makes list of tuples to dicts. starts 1-n processes for dicting pickle files.
# calls page_list_to_dict() or link_list to dict() based on, which file's lists have to be converted
# splits pickle-files between processes evenly
# uses 50% of free ram, dict size is based on this value
def generate_dicts(table_name):
import numpy
start = time.time()
free_mem = round(((psutil.virtual_memory()[1])/1024**2), 2)
free_mem = round((free_mem*0.5), 2)
cpus = psutil.cpu_count()
processes = []
if table_name is 'page':
make_sure_path_exists("page_dict/")
if cpus >= 2:
file_list = numpy.split(numpy.array(glob.glob('page/*.pickle')), cpus)
for i in range(cpus):
p = Process(target=page_list_to_dict, args=(free_mem/cpus, i, file_list[i]))
processes.append(p)
else:
file_list = glob.glob('page/*.pickle')
p = Process(target=page_list_to_dict, args=(free_mem, 0, file_list))
processes.append(p)
else:
make_sure_path_exists("pagelinks_dict")
if cpus >= 2:
file_list = numpy.split(numpy.array(glob.glob('pagelinks/*.pickle')), cpus)
for i in range(cpus):
p = Process(target=links_list_to_dict, args=(free_mem/cpus, i, file_list[i]))
processes.append(p)
else:
file_list = glob.glob('pagelinks/*.pickle')
p = Process(target=links_list_to_dict, args=(free_mem, 0, file_list))
processes.append(p)
# starts processes
for p in processes:
p.start()
# waits for processes to finish
for p in processes:
p.join()
print("Generated dictionary for ", table_name, " in ", (time.time()-start)/60, " minutes")
return True
# main function of file.
# handles part of processing & method calling
# returns True after reading, parsing and making dictionaries
def work_on_file(file_name, table_name):
start = time.time()
processes = []
print ('\n\n')
num_of_processes = psutil.cpu_count()
if num_of_processes < 2:
num_of_processes = 2
free_mem = round(((psutil.virtual_memory()[1])/1024**2), 2)
queue_length = int(free_mem/8)
print("Total free memory: ", round(free_mem), "MB queue length:", queue_length)
line1_queue = multiprocessing.JoinableQueue(1200)
p_queue = multiprocessing.JoinableQueue(4)
process_mem = round(free_mem*0.25)
print("25% of free memory is used for ", num_of_processes, " processes: ", process_mem, "MB")
for i in range(num_of_processes):
p = Process(target=parse_input, args=(line1_queue, i, table_name, p_queue, process_mem/num_of_processes))
processes.append(p)
for pro in processes:
pro.start()
print("Started", num_of_processes, " additional Processes \n")
print("Reading file", file_name)
read_file(file_name, table_name, line1_queue, num_of_processes)
print("reading ", file_name, " done")
print("waiting for processes to finish")
while 1:
time.sleep(2)
print("elements left in queue: ", line1_queue.qsize())
if line1_queue.qsize() == 0:
if (any(p.is_alive() for p in processes)) and p_queue.qsize() < 4:
continue
else:
break
print("Parsing time for ", file_name, ": ", (time.time()-start)/60, " minutes")
print("----------")
return generate_dicts(table_name)
| {"/main_file.py": ["/SQLReader.py", "/DB_handler.py"]} |
65,251 | I4-Projektseminar-HHU-2016/seminar-project-HaydarAk | refs/heads/master | /main_file.py | import os.path
from SQLReader import work_on_file
from DB_handler import build_db
if __name__ == '__main__':
pagelinks = 'enwiki-20160720-pagelinks.sql.gz'
pages = 'enwiki-20160720-page.sql.gz'
if os.path.isfile(pages) and os.path.isfile(pagelinks):
work_on_file(pages, 'page')
work_on_file(pagelinks, 'pagelinks')
build_db('test_db')
else:
raise IOError("file not found")
# method for for generating snippets of sql.gz files#
def generate_file_snippets (in_file, out_file, len_num):
import gzip
try:
sql_file = gzip.open(in_file, 'rb')
save_file = gzip.open(out_file, 'wb')
except IOError:
print("file not found")
raise
end_line = '-- Dump completed on'
line_number = 0
while True:
# tries reading line
# if UnicodeDecodeError is raised, line is not encoded in utf-8
# line_number is added to list and function continues with next iteration
try:
line_number += 1
line = sql_file.readline()
print(line_number)
except UnicodeDecodeError:
continue
if line_number <= len_num:
save_file.write(line)
try:
if line.decode('utf-8').startswith(end_line):
sql_file.close()
save_file.write(line)
save_file.close()
break
except UnicodeDecodeError:
continue
| {"/main_file.py": ["/SQLReader.py", "/DB_handler.py"]} |
65,252 | UnsignedArduino/CircuitPython-Project-Manager | refs/heads/main | /main.py | """
The main program.
-----------
Classes list:
No classes!
-----------
Functions list:
No functions!
"""
# TODO: Make binaries like in CPY Bundle Manager
import gui
from pathlib import Path
from sys import argv
from project_tools.create_logger import create_logger
import logging
LEVEL = logging.DEBUG
log_path = Path.cwd() / "log.log"
log_path.write_text("")
logger = create_logger(name=__name__, level=LEVEL)
logger.debug(f"Found {len(argv)} argument(s)")
logger.debug(f"({repr(argv)})")
path = None
if len(argv) > 1:
logger.debug("Path to .cpypmconfig was passed in!")
logger.debug(f"Path is {repr(argv[1])}")
path = Path(argv[1])
if path.is_dir():
path = None
logger.debug(f"Starting application...")
logger.info(f"Log level is {repr(LEVEL)}")
with gui.GUI() as gui:
gui.run(cpypmconfig_path=path)
logger.warning(f"Application stopped!")
| {"/main.py": ["/gui.py"], "/gui.py": ["/gui_tools/clickable_label.py"]} |
65,253 | UnsignedArduino/CircuitPython-Project-Manager | refs/heads/main | /gui_tools/clickable_label.py | """
A module that extends the tk.Label class to make a clickable link.
-----------
Classes list:
No classes!
-----------
Functions list:
No functions!
"""
import tkinter as tk
from tkinter import ttk
class ClickableLabel(tk.Label):
def __init__(self, master, callback, *args, **kwargs):
super().__init__(master=master, fg="blue", cursor="hand2", *args, **kwargs)
self.bind("<Button-1>", lambda e: callback)
| {"/main.py": ["/gui.py"], "/gui.py": ["/gui_tools/clickable_label.py"]} |
65,254 | UnsignedArduino/CircuitPython-Project-Manager | refs/heads/main | /gui.py | """
The main GUI code.
-----------
Classes list:
- class GUI(tk.Tk).__init__(self)
-----------
Functions list:
No functions!
"""
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox as mbox
from tkinter import filedialog as fd
from gui_tools.right_click.entry import EntryWithRightClick
from gui_tools.right_click.spinbox import SpinboxWithRightClick
from gui_tools.right_click.combobox import ComboboxWithRightClick
from gui_tools.right_click.listbox import ListboxWithRightClick
from gui_tools.right_click.text import TextWithRightClick
from gui_tools.idlelib_clone import tooltip
from gui_tools.scrollable_frame import VerticalScrolledFrame
from gui_tools.clickable_label import ClickableLabel
from gui_tools import download_dialog
from threading import Thread
from pathlib import Path
import traceback
import json
from webbrowser import open as open_application
from markdown import markdown as markdown_to_html
from pathlib import Path
from project_tools import drives, os_detect, project
from typing import Union, Any, Callable
import logging
from project_tools.create_logger import create_logger
logger = create_logger(name=__name__, level=logging.DEBUG)
class GUI(tk.Tk):
"""
The GUI for the CircuitPython Project Manager.
"""
def __init__(self):
super().__init__()
self.title("CircuitPython Project Manager")
self.resizable(False, False)
self.config_path = Path.cwd() / "config.json"
self.disable_closing = False
self.protocol("WM_DELETE_WINDOW", self.try_to_close)
def __enter__(self):
return self
def try_to_close(self) -> None:
"""
Try to close the application - checks if we are not busy and displays dialogs appropriately.
:return: None.
"""
logger.debug("User requested closing window...")
if self.disable_closing:
logger.warning("Currently in the middle of doing something!")
if mbox.askokcancel("CircuitPython Project Manager: Confirmation",
"Something is happening right now!\n"
"If you close out now, this will immediately stop what we are doing and may cause a "
"corrupt directory hierarchy, broken files and/or broken directories. "
"Are you sure you want to exit?",
icon="warning", default="cancel"):
logger.debug("User continued to close window!")
self.destroy()
else:
logger.debug("Destroying main window!")
self.destroy()
def save_key(self, key: str = None, value: Any = None) -> None:
"""
Save a key to the config file.
:param key: A string.
:param value: Something.
:return: None.
"""
if not self.config_path.exists():
self.config_path.write_text("{}")
try:
old_json = json.loads(self.config_path.read_text())
except json.decoder.JSONDecodeError:
old_json = {}
logger.debug(f"Setting {repr(key)} to {repr(value)}!")
old_json[key] = value
self.config_path.write_text(json.dumps(old_json, sort_keys=True, indent=4))
def load_key(self, key: str) -> Any:
"""
Retrieves a key from the config file.
:param key: A string.
:return: Something, or None if it was not found.
"""
if not self.config_path.exists():
self.config_path.write_text("{}")
try:
value = json.loads(self.config_path.read_text())[key]
return value
except (json.decoder.JSONDecodeError, KeyError):
logger.warning(f"Could not find {repr(key)} in config!")
return None
def validate_for_number(self, new: str = "") -> bool:
"""
Checks a string to see whether it's a number and within 3 digits.
:param new: The string to validate.
:return: A bool telling whether it passed validation.
"""
logger.debug(f"{repr(new)} did " + ("" if new.isdigit() and len(new) <= 3 else "not ") + "pass validation!")
return new.isdigit() and len(new) <= 3
def show_traceback(self) -> bool:
"""
Whether to show the traceback or not depending on the config file.
:return: None.
"""
try:
return bool(self.load_key("show_traceback_in_error_messages"))
except AttributeError:
return False
def add_tooltip(self, widget: tk.Widget, text: str) -> None:
"""
Add a tooltip to a widget.
:param widget: The widget to add to.
:param text: The text in the tooltip.
:return: None.
"""
tooltip.Hovertip(anchor_widget=widget, text=text)
def copy_to_clipboard(self, string: str = "") -> None:
"""
Copy something to the clipboard.
:param string: What to copy to the clipboard.
:return: None.
"""
logger.debug(f"Copying {repr(string)} to clipboard!")
self.clipboard_clear()
self.clipboard_append(string)
self.update()
def open_file(self, path: Union[Path, str], download_url: str = None) -> None:
"""
Open a file or a web page.
:param path: A string or a path representing the web page or the path of the file/directory.
:param download_url: If a file, the link to where we can download the file if it is missing.
:return: None.
"""
logger.debug(f"Opening {repr(path)}...")
if isinstance(path, Path):
if path.exists():
open_application(str(path))
else:
mbox.showerror("CircuitPython Project Manager: ERROR!",
"Oh no! An error occurred while opening this file!\n"
f"The file {repr(path)} does not exist!")
if download_url and mbox.askokcancel("CircuitPython Bundle Manager: Confirm",
"It looks like this file is available on GitHub!\n"
"Would you like to download it?"):
if download_dialog.download(master=self, url=download_url, path=path,
show_traceback=self.show_traceback()):
open_application(str(path))
else:
open_application(path)
def open_markdown(self, path: Union[str, Path], convert_to_html: bool = True, download_url: str = None) -> None:
"""
Open a file or a web page.
:param path: A string or a path to the markdown file.
:param convert_to_html: A bool on whether to convert the markdown to HTML or not.
:param download_url: If a file, the link to where we can download the file if it is missing.
:return: None.
"""
logger.debug(f"Opening markdown file {repr(path)}...")
if isinstance(path, Path):
path = Path(path)
if path.exists():
if convert_to_html:
logger.debug(f"Converting markdown to HTML...")
html_path = Path.cwd() / (path.stem + ".html")
html_path.write_text(markdown_to_html(text=path.read_text(), extensions=["pymdownx.tilde"]))
logger.debug(f"Opening HTML in browser...")
open_application(url=html_path.as_uri())
else:
logger.debug(f"Opening {repr(path)} as markdown!")
open_application(str(path))
else:
mbox.showerror("CircuitPython Project Manager: ERROR!",
"Oh no! An error occurred while opening this file!\n"
f"The file {repr(path)} does not exist!")
if download_url and mbox.askokcancel("CircuitPython Bundle Manager: Confirm",
"It looks like this file is available on GitHub!\n"
"Would you like to download it?"):
if download_dialog.download(master=self, url=download_url, path=path,
show_traceback=self.show_traceback()):
self.open_markdown(path=path)
def create_config(self) -> None:
"""
Re-create the config keys if they do not exist.
:return: None.
"""
if not self.load_key("show_traceback_in_error_messages"):
self.save_key("show_traceback_in_error_messages", False)
if not self.load_key("unix_drive_mount_point"):
self.save_key("unix_drive_mount_point", "/media")
def add_recent_project(self, path: Path) -> None:
"""
Add a project to the recent category.
:param path: The path of the .cpypmconfig file.
:return: None.
"""
self.save_key("last_dir_opened", str(path.parent.parent))
recent_projects = self.load_key("opened_recent")
if recent_projects is None:
recent_projects = []
if str(path) in recent_projects:
recent_projects.pop(recent_projects.index(str(path)))
recent_projects = [Path(p) for p in recent_projects]
while len(recent_projects) > 10:
recent_projects.pop()
recent_projects.insert(0, str(path))
self.save_key("opened_recent", [str(p) for p in recent_projects])
self.update_recent_projects()
def open_project(self, path: Path) -> None:
"""
Open a project.
:param path: The path to the .cpypmconfig file.
:return: None.
"""
logger.debug(f"Opening project at path {repr(path)}")
self.cpypmconfig_path = path
self.update_main_gui()
self.add_recent_project(path)
def open_project_dialog(self) -> None:
"""
Open a project with a dialog to select a file.
:return: None.
"""
logger.debug("Opening project...")
previous_path = self.load_key("last_dir_opened")
logger.debug(f"Previous path opened is {repr(previous_path)}")
path = fd.askopenfilename(initialdir=str(Path.cwd()) if previous_path is None else previous_path,
title="CircuitPython Project Manager: Select a .cpypmconfig file",
filetypes=((".cpypmconfig files", "*.cpypmconfig"), ("All files", "*.*")))
if path:
path = Path(path)
logger.debug(f"Returned valid path! Path is {repr(path)}")
self.open_project(path)
else:
logger.debug("User canceled opening project!")
def close_project(self) -> None:
"""
Close a project.
:return: None.
"""
logger.debug("Closing project...")
self.cpypmconfig_path = None
self.update_main_gui()
def dismiss_dialog(self, dlg: tk.Toplevel) -> None:
"""
Intercept a dialog's close button to make sure we release the window grab.
:param dlg: The dialog to destroy.
:return: None.
"""
if self.disable_closing:
logger.warning("Currently in the middle of doing something!")
if mbox.askokcancel("CircuitPython Project Manager: Confirmation",
"Something is happening right now!\n"
"If you close out now, this will immediately stop what we are doing and may cause a "
"corrupt directory hierarchy, broken files and/or broken directories. "
"Are you sure you want to exit?",
icon="warning", default="cancel"):
logger.debug("User continued to close window!")
logger.debug("Destroying dialog")
try:
dlg.grab_release()
dlg.destroy()
except tk.TclError:
pass
else:
logger.debug("Destroying dialog")
dlg.grab_release()
dlg.destroy()
def create_dialog(self, title: str) -> tk.Toplevel:
"""
Create a dialog and return it.
:param title: The title of the dialog.
:return:
"""
dlg = tk.Toplevel(master=self)
dlg.protocol("WM_DELETE_WINDOW", lambda: self.dismiss_dialog(dlg))
dlg.transient(self)
dlg.resizable(False, False)
dlg.title(title)
dlg.wait_visibility()
dlg.grab_set()
return dlg
def open_new_project_directory(self) -> None:
"""
Open a directory and return None or a pathlib.Path.
:return: None.
"""
logger.debug("Opening directory...")
previous_path = self.load_key("last_dir_opened")
logger.debug(f"Previous path opened is {repr(previous_path)}")
path = fd.askdirectory(initialdir=str(Path.cwd()) if previous_path is None else previous_path,
title="CircuitPython Project Manager: Select a directory")
if path:
path = Path(path)
logger.debug(f"Returned valid path! Path is {repr(path)}")
self.project_location_var.set(str(path))
self.save_key("last_dir_opened", str(path))
else:
logger.debug("User canceled opening project!")
def create_new_project_location(self) -> None:
"""
Create the new project location widgets.
:return: None.
"""
self.project_location_frame = ttk.Frame(master=self.new_project_frame)
self.project_location_frame.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.project_location_label = ttk.Label(master=self.project_location_frame, text="Project location: ")
self.project_location_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.project_location_var = tk.StringVar()
if os_detect.on_linux():
self.project_location_entry = EntryWithRightClick(master=self.project_location_frame,
textvariable=self.project_location_var, width=32)
else:
self.project_location_entry = EntryWithRightClick(master=self.project_location_frame,
textvariable=self.project_location_var, width=51)
self.project_location_entry.initiate_right_click_menu()
self.project_location_entry.grid(row=0, column=1, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.project_location_entry, "Where to put the new project.")
self.project_location_button = ttk.Button(master=self.project_location_frame, text="Browse...",
command=self.open_new_project_directory)
self.project_location_button.grid(row=0, column=2, padx=1, pady=0, sticky=tk.NW)
self.add_tooltip(self.project_location_button, "Launch the directory selector.")
def create_new_project_details(self) -> None:
"""
Create the new project detail widgets, like title and description.
:return: None.
"""
self.project_details_frame = ttk.Frame(master=self.new_project_frame)
self.project_details_frame.grid(row=1, column=0, padx=1, pady=1, sticky=tk.NW)
self.project_title_label = ttk.Label(master=self.project_details_frame, text="Project title: ")
self.project_title_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.project_title_var = tk.StringVar(value="Untitled")
if os_detect.on_linux():
self.project_title_entry = EntryWithRightClick(master=self.project_details_frame, width=24, textvariable=self.project_title_var)
else:
self.project_title_entry = EntryWithRightClick(master=self.project_details_frame, width=40, textvariable=self.project_title_var)
self.project_title_entry.initiate_right_click_menu()
self.project_title_entry.grid(row=0, column=1, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.project_title_entry, "The title of the project.")
self.project_autogen_var = tk.BooleanVar(value=True)
self.project_autogen_checkbox = ttk.Checkbutton(master=self.project_details_frame, text="Auto-generate a .gitignore",
variable=self.project_autogen_var)
self.project_autogen_checkbox.grid(row=0, column=2, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.project_autogen_checkbox, "Whether to auto-generate a .gitignore file for the Git VCS.")
self.project_description_label = ttk.Label(master=self.project_details_frame, text="Project description: ")
self.project_description_label.grid(row=1, column=0, columnspan=3, padx=1, pady=1, sticky=tk.NW)
self.project_description_text = TextWithRightClick(master=self.project_details_frame, width=60, height=10, wrap=tk.WORD)
self.project_description_text.initiate_right_click_menu()
self.project_description_text.grid(row=2, column=0, columnspan=3, padx=1, pady=1, sticky=tk.NW)
self.project_status = ttk.Label(master=self.project_details_frame)
self.project_status.stop = False
self.project_status.grid(row=3, column=0, columnspan=3, padx=1, pady=1, sticky=tk.NW)
def update_new_project_buttons(self) -> None:
"""
Update the new project buttons. Will reschedule itself automatically.
:return: None.
"""
if self.project_status.stop:
return
try:
if not self.project_title_var.get():
enable = False
self.project_status.config(text="No title found!")
elif not self.project_location_var.get() or not Path(self.project_location_var.get()).exists():
enable = False
self.project_status.config(text="The parent directory of the project does not exist!")
elif (Path(self.project_location_var.get()) / project.replace_sus_chars(self.project_title_var.get())).exists():
enable = False
self.project_status.config(text="A project under the same name already exists in that parent directory!")
else:
enable = True
self.project_status.config(text="All good!")
self.make_new_project_button.config(state=tk.NORMAL if enable else tk.DISABLED)
except tk.TclError:
pass
else:
self.after(ms=100, func=self.update_new_project_buttons)
def create_new_project_buttons(self) -> None:
"""
Create the new project buttons, like Ok and Cancel.
:return: None.
"""
self.project_buttons_frame = ttk.Frame(master=self.new_project_frame)
self.project_buttons_frame.grid(row=2, column=0, padx=1, pady=1, sticky=tk.N)
self.make_new_project_button = ttk.Button(master=self.project_buttons_frame, text="Make new project",
command=self.start_create_new_project_thread)
self.make_new_project_button.grid(row=0, column=0, padx=1, pady=1, sticky=tk.N)
self.add_tooltip(self.make_new_project_button, "Make a new project.")
self.cancel_new_project_button = ttk.Button(master=self.project_buttons_frame, text="Cancel",
command=lambda: self.dismiss_dialog(self.new_project_window))
self.cancel_new_project_button.grid(row=0, column=1, padx=1, pady=1, sticky=tk.N)
self.add_tooltip(self.cancel_new_project_button, "Close this dialog without creating a new project.")
self.update_new_project_buttons()
def set_childrens_state(self, frame, enabled: bool = True) -> None:
"""
Set the state of a frame's children.
:param frame: A Tkinter widget to iterate over's it's children.
:param enabled: Weather to enable/disable the children.
:return: None.
"""
logger.debug(f"{'Enabling' if enabled else 'Disabling'} {repr(frame)}")
for child in frame.winfo_children():
try:
child.configure(state=tk.NORMAL if enabled else tk.DISABLED)
except tk.TclError:
try:
self.set_childrens_state(frame=child, enabled=enabled)
except tk.TclError:
pass
def start_create_new_project_thread(self) -> None:
"""
Start the create new project thread.
:return: None.
"""
thread = Thread(target=self.create_new_project, args=(), daemon=True)
logger.debug(f"Starting create new project thread {repr(thread)}")
thread.start()
def create_new_project(self) -> None:
"""
Create a new project - this will block.
:return: None.
"""
self.project_status.stop = True
self.project_status.config(text="Creating project...")
self.disable_closing = True
self.set_childrens_state(self.new_project_frame, False)
try:
self.cpypmconfig_path = project.make_new_project(parent_directory=Path(self.project_location_var.get()),
project_name=self.project_title_var.get(),
project_description=self.project_description_text.get("1.0", tk.END),
autogen_gitignore=self.project_autogen_var.get())
except FileExistsError:
mbox.showerror("CircuitPython Project Manager: Error!",
"A project already exists under the same name!\n"
"Please try creating a project with a different name or try creating it somewhere else!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
self.disable_closing = False
return
self.update_main_gui()
self.disable_closing = False
self.dismiss_dialog(self.new_project_window)
self.add_recent_project(self.cpypmconfig_path)
self.update_recent_projects()
def open_create_new_project(self) -> None:
"""
Create a new project. This will open a new window.
:return: None.
"""
logger.debug("Creating new project...")
self.new_project_window = self.create_dialog(title="CircuitPython Project Manager: Make a new project...")
self.new_project_frame = ttk.Frame(master=self.new_project_window)
self.new_project_frame.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.create_new_project_location()
self.create_new_project_details()
self.create_new_project_buttons()
self.new_project_frame.wait_window()
def clear_recent_projects(self) -> None:
"""
Clear the recent projects.
:return: None.
"""
logger.debug("Clearing recent projects...")
if mbox.askokcancel("CircuitPython Project Manager: Confirm",
"Are you sure you want to clear all recent projects?"):
logger.debug("Clearing all recent projects!")
self.save_key("opened_recent", [])
self.update_recent_projects()
else:
logger.debug("User canceled clearing all recent projects!")
def update_recent_projects(self) -> None:
"""
Update the opened recent projects menu.
:return: None.
"""
self.opened_recent_menu.delete(0, tk.END)
self.recent_projects = self.load_key("opened_recent")
if self.recent_projects is None:
self.recent_projects = []
self.recent_projects = [Path(p) for p in self.recent_projects]
for path in self.recent_projects:
self.opened_recent_menu.add_command(label=str(path),
state=tk.NORMAL if path.exists() else tk.DISABLED,
command=lambda path=path: self.open_project(path))
if len(self.recent_projects) == 0:
self.opened_recent_menu.add_command(label="No recent projects!", state=tk.DISABLED)
self.opened_recent_menu.add_separator()
self.opened_recent_menu.add_command(label="Clear recent projects", command=self.clear_recent_projects,
state=tk.DISABLED if len(self.recent_projects) == 0 else tk.NORMAL)
def make_key_bind(self, ctrl_cmd: bool, mac_ctrl: bool, shift: bool, alt_option: bool, letter: str,
callback: Callable) -> str:
"""
Make a key-bind and bind to self.
:param ctrl_cmd: Have Control (PC) or Command (Mac) in the key combo.
:param mac_ctrl: Have Control (Mac) in the key combo.
:param shift: Have Shift in the key combo.
:param alt_option: Have Alt (PC) or Option (Mac) in the key combo.
:param letter: The letter to use as the key bind.
:param callback: What to call when the keybind is pressed.
:return: An accelerator that you can display.
"""
combo = ""
if os_detect.on_mac():
if ctrl_cmd: combo += "Command-"
if mac_ctrl: combo += "Control-"
if shift: combo += "Shift-"
if alt_option: combo += "Option-"
else:
if ctrl_cmd: combo += "Control-"
if shift: combo += "Shift-"
if alt_option: combo += "Alt-"
keycode = f"<{combo}{letter.upper() if shift else letter.lower()}>"
logger.debug(f"Binding {repr(keycode)} to {repr(callback)}")
self.bind(keycode, callback)
combo += letter.upper()
if not os_detect.on_mac():
combo = combo.replace("-", "+")
logger.debug(f"Combo for {repr(callback)} is {repr(combo)}")
return combo
def create_file_menu(self) -> None:
"""
Create the file menu.
:return: None.
"""
self.file_menu = tk.Menu(self.menu_bar)
self.menu_bar.add_cascade(menu=self.file_menu, label="File", underline=0)
self.file_menu.add_command(label="New...", command=self.open_create_new_project, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="n",
callback=lambda _: None if self.file_menu.entrycget("New...", "state") == tk.DISABLED else self.open_create_new_project()))
self.file_menu.add_command(label="Open...", command=self.open_project_dialog, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="o",
callback=lambda _: None if self.file_menu.entrycget("Open...", "state") == tk.DISABLED else self.open_project_dialog()))
self.opened_recent_menu = tk.Menu(self.file_menu)
self.file_menu.add_cascade(label="Open recent", menu=self.opened_recent_menu, underline=5)
self.update_recent_projects()
self.file_menu.add_command(label="Close project", command=self.close_project, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="q",
callback=lambda _: None if self.file_menu.entrycget("Close project", "state") == tk.DISABLED else self.close_project()))
self.file_menu.add_separator()
self.file_menu.add_command(label="Exit", command=self.try_to_close, underline=0)
if os_detect.on_mac():
self.file_menu.entryconfigure("Exit", accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=True,
alt_option=False, letter="w",
callback=lambda _: self.try_to_close()))
else:
self.file_menu.entryconfigure("Exit", accelerator="Alt+F4")
def create_edit_menu(self) -> None:
"""
Create the edit menu.
:return: None.
"""
self.edit_menu = tk.Menu(self.menu_bar)
self.menu_bar.add_cascade(menu=self.edit_menu, label="Edit", underline=0)
self.edit_menu.add_command(label="Open .cpypmconfig",
command=lambda: self.open_file(str(self.cpypmconfig_path)), underline=6)
self.edit_menu.add_command(label="Open .cpypmconfig file location",
command=lambda: self.open_file(str(self.cpypmconfig_path.parent)), underline=23)
self.edit_menu.add_separator()
self.edit_menu.add_command(label="Open project root file location",
command=lambda: self.open_file(self.cpypmconfig["project_root"]), underline=13)
self.edit_menu.add_command(label="Copy project root file location",
command=lambda: self.copy_to_clipboard(self.cpypmconfig["project_root"]))
self.edit_menu.add_separator()
self.edit_menu.add_command(label="Save changes", command=self.save_modified, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="s",
callback=lambda _: None if self.edit_menu.entrycget("Save changes", "state") == tk.DISABLED else self.save_modified()))
self.edit_menu.add_command(label="Discard changes", command=self.discard_modified, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="d",
callback=lambda _: None if self.edit_menu.entrycget("Discard changes", "state") == tk.DISABLED else self.discard_modified()))
def create_sync_menu(self) -> None:
"""
Create the sync menu.
:return: None.
"""
self.sync_menu = tk.Menu(self.menu_bar)
self.menu_bar.add_cascade(menu=self.sync_menu, label="Sync", underline=0)
self.sync_menu.add_command(label="Sync files", command=self.start_sync_thread, underline=0,
accelerator=self.make_key_bind(ctrl_cmd=True, mac_ctrl=False, shift=False,
alt_option=False, letter="r",
callback=lambda _: None if self.sync_menu.entrycget("Sync files", "state") == tk.DISABLED else self.start_sync_thread()))
def open_readme(self) -> None:
"""
Open the README, this may block on slow systems.
:return: None.
"""
self.open_markdown(Path.cwd() / "README.md", convert_to_html=self.convert_to_md_var.get(),
download_url="https://raw.githubusercontent.com/UnsignedArduino/CircuitPython-Project-Manager/main/README.md")
self.disable_open_readme = False
def start_open_readme_thread(self) -> None:
"""
Start the open README thread.
:return: None.
"""
self.disable_open_readme = True
thread = Thread(target=self.open_readme, args=(), daemon=True)
logger.debug(f"Starting open README thread {repr(thread)}")
thread.start()
def create_help_menu(self) -> None:
"""
Create the help menu.
:return: None.
"""
self.help_menu = tk.Menu(self.menu_bar)
self.menu_bar.add_cascade(menu=self.help_menu, label="Help", underline=0)
self.help_menu.add_command(label="Open configuration", command=lambda: self.open_file(str(self.config_path)), underline=5)
self.help_menu.add_command(label="Open logs", command=lambda: self.open_file(str(Path.cwd() / "log.log")), underline=5)
self.help_menu.add_separator()
self.help_menu.add_command(label="Open README.md",
command=self.start_open_readme_thread, underline=5,
accelerator="F1")
self.bind("<F1>", func=lambda _: None if self.help_menu.entrycget("Open README.md", "state") == tk.DISABLED else self.start_open_readme_thread())
self.convert_to_md_var = tk.BooleanVar(value=True)
self.disable_open_readme = False
self.help_menu.add_checkbutton(label="Convert Markdown to HTML", variable=self.convert_to_md_var, onvalue=True, offvalue=False)
self.help_menu.add_command(label="Open project on GitHub",
command=lambda: self.open_file("https://github.com/UnsignedArduino/CircuitPython-Project-Manager"),
underline=5)
self.help_menu.add_command(label="Open issue on GitHub",
command=lambda: self.open_file("https://github.com/UnsignedArduino/CircuitPython-Project-Manager/issues/new"),
underline=5)
def update_menu_state(self) -> None:
"""
Update the menu's disable and enabled items.
:return: None.
"""
logger.debug(f"Updating menu state...")
self.file_menu.entryconfigure("New...",
state=tk.NORMAL if self.cpypmconfig_path is None else tk.DISABLED)
self.file_menu.entryconfigure("Open...",
state=tk.NORMAL if self.cpypmconfig_path is None else tk.DISABLED)
self.file_menu.entryconfigure("Open recent",
state=tk.NORMAL if self.cpypmconfig_path is None else tk.DISABLED)
self.file_menu.entryconfigure("Close project",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Open .cpypmconfig",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Open .cpypmconfig file location",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Open project root file location",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Copy project root file location",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Save changes",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
self.edit_menu.entryconfigure("Discard changes",
state=tk.DISABLED if self.cpypmconfig_path is None else tk.NORMAL)
try:
if self.cpypmconfig_path is None or json.loads(self.cpypmconfig_path.read_text())["sync_location"] is None:
self.sync_menu.entryconfigure("Sync files", state=tk.DISABLED)
else:
self.sync_menu.entryconfigure("Sync files", state=tk.NORMAL)
except FileNotFoundError:
logger.exception("Uh oh, an exception has occurred!")
self.close_project()
mbox.showerror("CircuitPython Project Manager: Error!",
"Your project's .cpypmconfig file cannot be accessed, closing project!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
self.help_menu.entryconfigure("Open README.md", state=tk.DISABLED if self.disable_open_readme else tk.NORMAL)
self.help_menu.entryconfigure("Convert Markdown to HTML", state=tk.DISABLED if self.disable_open_readme else tk.NORMAL)
def create_menu(self) -> None:
"""
Create the menu.
:return: None.
"""
self.option_add("*tearOff", tk.FALSE)
self.menu_bar = tk.Menu(self, postcommand=self.update_menu_state)
self["menu"] = self.menu_bar
self.create_file_menu()
self.create_edit_menu()
self.create_sync_menu()
self.create_help_menu()
self.cpypmconfig_path = None
self.update_menu_state()
def destroy_all_children(self, widget):
"""
Destroy all the children of the widget.
:param widget: The parent of the children you want to destroy.
:return: None.
"""
logger.debug(f"Destroying all children of {repr(widget)}")
for child in widget.winfo_children():
try:
child.destroy()
except tk.TclError:
pass
def make_title(self, title: str) -> None:
"""
Make the title's label and entry box.
:title: The title of the project.
:return: None.
"""
self.title_frame = ttk.Frame(master=self.main_frame)
self.title_frame.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.title_label = ttk.Label(master=self.title_frame, text="Project title: ")
self.title_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.title_var = tk.StringVar(value=title)
if os_detect.on_linux():
self.title_entry = EntryWithRightClick(master=self.title_frame, width=24, textvariable=self.title_var)
else:
self.title_entry = EntryWithRightClick(master=self.title_frame, width=29, textvariable=self.title_var)
self.title_entry.initiate_right_click_menu()
self.title_entry.grid(row=0, column=1, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.title_entry, "The title of the opened project.")
def make_description(self, description: str) -> None:
"""
Make the description's labels and text box.
:param description: The description of the project.
:return: None.
"""
self.description_frame = ttk.Frame(master=self.main_frame)
self.description_frame.grid(row=1, column=0, padx=1, pady=1, sticky=tk.NW)
self.description_label = ttk.Label(master=self.description_frame, text="Project description: ")
self.description_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
if os_detect.on_linux():
self.description_text = TextWithRightClick(master=self.description_frame, width=35, height=11, wrap=tk.WORD)
else:
self.description_text = TextWithRightClick(master=self.description_frame, width=31, height=8, wrap=tk.WORD)
self.description_text.initiate_right_click_menu()
self.description_text.grid(row=1, column=0, padx=1, pady=1, sticky=tk.NW)
self.description_text.insert("1.0", description)
self.add_tooltip(self.description_text, "The description of the opened project.")
def update_drives(self) -> None:
"""
Update all the drives connected.
:return: None.
"""
try:
connected_drives = drives.list_connected_drives(not self.drive_selector_show_all_var.get(),
Path(self.load_key("unix_drive_mount_point")))
except OSError:
logger.error(f"Could not get connected drives!\n\n{traceback.format_exc()}")
mbox.showerror("CircuitPython Project Manager: ERROR!",
"Oh no! An error occurred while getting a list of connected drives!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
return
logger.debug(f"Connected drives: {repr(connected_drives)}")
self.drive_selector_combobox["values"] = connected_drives
def make_drive_selector(self, drive: Path) -> None:
"""
Make the drive selector.
:drive: A pathlib.Path to the drive.
:return: None.
"""
self.drive_selector_frame = ttk.Frame(master=self.main_frame)
self.drive_selector_frame.grid(row=2, column=0, columnspan=4, padx=1, pady=1, sticky=tk.NW)
self.drive_selector_label = ttk.Label(master=self.drive_selector_frame, text="Drive: ")
self.drive_selector_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.drive_selector_var = tk.StringVar()
if drive is not None:
self.drive_selector_var.set(str(drive))
if os_detect.on_linux():
self.drive_selector_combobox = ComboboxWithRightClick(master=self.drive_selector_frame, width=44, textvariable=self.drive_selector_var)
else:
self.drive_selector_combobox = ComboboxWithRightClick(master=self.drive_selector_frame, width=48, textvariable=self.drive_selector_var)
self.drive_selector_combobox.initiate_right_click_menu()
self.drive_selector_combobox.grid(row=0, column=1, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.drive_selector_combobox, "The CircuitPython device to sync to.")
self.drive_selector_refresh_btn = ttk.Button(master=self.drive_selector_frame, text="↻", width=2, command=self.update_drives)
self.drive_selector_refresh_btn.grid(row=0, column=2, padx=1, pady=0, sticky=tk.NW)
self.add_tooltip(self.drive_selector_refresh_btn, "Refresh the list of connected drives.")
self.drive_selector_show_all_var = tk.BooleanVar(value=False)
self.drive_selector_show_all_checkbtn = ttk.Checkbutton(master=self.drive_selector_frame, text="Show all drives?",
variable=self.drive_selector_show_all_var, command=self.update_drives)
self.drive_selector_show_all_checkbtn.grid(row=0, column=3, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.drive_selector_show_all_checkbtn, "Whether to show all drives in the list of connected drives instead of just CircuitPython drives.")
self.update_drives()
def update_listbox_context(self):
"""
Update the right-click context menu for the files to sync menu.
:return: None.
"""
self.to_sync_listbox.right_click_menu.entryconfigure("Delete",
state=tk.NORMAL if len(self.to_sync_listbox.curselection()) > 0 else tk.DISABLED
)
def make_file_sync_listbox(self, to_sync: list[str], project_root: Path) -> None:
"""
Create the listbox that holds the files and directories to sync.
:param to_sync: A list of str objects of stuff to sync.
:param project_root: A pathlib.Path of the project root.
:return: None.
"""
self.to_sync_frame = ttk.Frame(master=self.main_frame)
self.to_sync_frame.grid(row=0, column=1, rowspan=2, padx=1, pady=1, sticky=tk.NW)
self.to_sync_label = ttk.Label(master=self.to_sync_frame, text="Files and directories to sync: ")
self.to_sync_label.grid(row=0, column=0, columnspan=3, padx=1, pady=1, sticky=tk.NW)
self.to_sync_var = tk.StringVar(value=to_sync)
if os_detect.on_linux():
self.to_sync_listbox = ListboxWithRightClick(master=self.to_sync_frame, height=12, width=20, listvariable=self.to_sync_var)
else:
self.to_sync_listbox = ListboxWithRightClick(master=self.to_sync_frame, height=10, width=20, listvariable=self.to_sync_var)
self.to_sync_listbox.initiate_right_click_menu(disable=["Copy", "Cut", "Paste", "Delete", "Select all"],
callback=self.update_listbox_context)
self.to_sync_listbox.right_click_menu.entryconfigure("Delete", command=self.remove_thing_to_sync)
self.to_sync_listbox.right_click_menu.add_separator()
self.to_sync_listbox.right_click_menu.add_command(label="Add file", command=self.add_file_to_sync)
self.to_sync_listbox.right_click_menu.add_command(label="Add directory", command=self.add_directory_to_sync)
self.to_sync_listbox.grid(row=1, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.to_sync_listbox, "The files and directories to sync to the CircuitPython device.")
self.to_sync_scrollbar = ttk.Scrollbar(master=self.to_sync_frame, command=self.to_sync_listbox.yview)
self.to_sync_scrollbar.grid(row=1, column=1, padx=0, pady=1, sticky=tk.NSEW)
self.to_sync_listbox.config(yscrollcommand=self.to_sync_scrollbar.set)
def update_file_sync_buttons(self) -> None:
"""
Update the file sync buttons.
:return: None.
"""
try:
self.to_sync_remove_btn.config(state=tk.NORMAL if len(self.to_sync_listbox.curselection()) > 0 else tk.DISABLED)
except tk.TclError:
pass
else:
self.after(ms=100, func=self.update_file_sync_buttons)
def add_file_to_sync(self) -> None:
"""
Opens a file browser to select a file to sync.
:return: None.
"""
logger.debug("Opening file to sync...")
path = fd.askopenfilename(initialdir=self.cpypmconfig["project_root"],
title="CircuitPython Project Manager: Select a file to sync")
if path:
path = Path(path)
logger.debug(f"Returned valid path! Path is {repr(path)}")
try:
relative_path = path.relative_to(Path(self.cpypmconfig["project_root"]))
except ValueError:
logger.warning(f"{repr(path)} is not in the project!")
mbox.showerror("CircuitPython Project Manager: Error",
"That file is not in the project!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
return
logger.debug(f"Relative path is {repr(relative_path)}")
logger.debug(f"Files and directories to sync: {repr(self.cpypmconfig['files_to_sync'])}")
if str(relative_path) in self.cpypmconfig["files_to_sync"]:
logger.warning(f"{repr(relative_path)} is already in {repr(self.cpypmconfig['files_to_sync'])}")
mbox.showwarning("CircuitPython Project Manager: Warning",
"That file has already been added!")
else:
self.cpypmconfig["files_to_sync"].append(str(relative_path))
self.to_sync_var.set(self.cpypmconfig["files_to_sync"])
self.to_sync_listbox.see(len(self.cpypmconfig["files_to_sync"]) - 1)
else:
logger.debug("User canceled adding file to sync!")
def add_directory_to_sync(self) -> None:
"""
Opens a file browser to select a directory to sync.
:return: None.
"""
logger.debug("Opening file to sync...")
path = fd.askdirectory(initialdir=self.cpypmconfig["project_root"],
title="CircuitPython Project Manager: Select a directory to sync")
if path:
path = Path(path)
logger.debug(f"Returned valid path! Path is {repr(path)}")
try:
relative_path = path.relative_to(Path(self.cpypmconfig["project_root"]))
except ValueError:
logger.warning(f"{repr(path)} is not in the project!")
mbox.showerror("CircuitPython Project Manager: Error",
"That directory is not in the project!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
return
logger.debug(f"Relative path is {repr(relative_path)}")
logger.debug(f"Files and directories to sync: {repr(self.cpypmconfig['files_to_sync'])}")
if str(relative_path) in self.cpypmconfig["files_to_sync"]:
logger.warning(f"{repr(relative_path)} is already in {repr(self.cpypmconfig['files_to_sync'])}")
mbox.showwarning("CircuitPython Project Manager: Warning",
"That directory has already been added!")
else:
self.cpypmconfig["files_to_sync"].append(str(relative_path))
self.to_sync_var.set(self.cpypmconfig["files_to_sync"])
self.to_sync_listbox.see(len(self.cpypmconfig["files_to_sync"]) - 1)
else:
logger.debug("User canceled adding directory to sync!")
def remove_thing_to_sync(self) -> None:
"""
Removes the select item from the sync list.
:return: None.
"""
logger.debug("Asking user to confirm removal...")
item = self.to_sync_listbox.get(self.to_sync_listbox.curselection())
if mbox.askokcancel("CircuitPython Project Manager: Confirm",
f"Are you sure you want to remove {repr(item)} from being synced?"):
logger.debug(f"Removing item {repr(item)} (at index {repr(self.to_sync_listbox.curselection()[0])}")
self.cpypmconfig["files_to_sync"].pop(self.to_sync_listbox.curselection()[0])
self.to_sync_var.set(self.cpypmconfig["files_to_sync"])
else:
logger.debug(f"User canceled removal!")
def make_file_sync_buttons(self) -> None:
"""
Create the buttons next ot the listbox that holds the files and directories to sync.
:return: None.
"""
self.right_frame = ttk.Frame(master=self.to_sync_frame)
self.right_frame.grid(row=1, column=2, padx=1, pady=1, sticky=tk.NW)
self.to_sync_add_file_btn = ttk.Button(master=self.right_frame, text="Add file", width=12,
command=self.add_file_to_sync)
self.to_sync_add_file_btn.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.to_sync_add_file_btn, "Add a new file via the file selector.")
self.to_sync_add_directory_btn = ttk.Button(master=self.right_frame, text="Add directory", width=12,
command=self.add_directory_to_sync)
self.to_sync_add_directory_btn.grid(row=1, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.to_sync_add_directory_btn, "Add a new directory via the directory selector.")
self.to_sync_remove_btn = ttk.Button(master=self.right_frame, text="Remove", width=12,
command=self.remove_thing_to_sync)
self.to_sync_remove_btn.grid(row=2, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.to_sync_remove_btn, "Remove a file/directory from being synced.")
self.update_file_sync_buttons()
def save_modified(self) -> None:
"""
Save the configuration file.
:return: None.
"""
self.set_childrens_state(frame=self.main_frame, enabled=False)
self.disable_closing = True
self.edit_menu.entryconfigure("Save changes", state=tk.DISABLED)
self.edit_menu.entryconfigure("Discard changes", state=tk.DISABLED)
logger.debug(f"Saving .cpypmconfig to {repr(self.cpypmconfig_path)}")
self.cpypmconfig["project_name"] = self.title_var.get()
self.cpypmconfig["description"] = self.description_text.get("1.0", tk.END)
self.cpypmconfig["sync_location"] = self.drive_selector_combobox.get()
try:
self.cpypmconfig_path.write_text(json.dumps(self.cpypmconfig, indent=4))
except FileNotFoundError:
logger.exception("Uh oh, an exception has occurred!")
self.close_project()
mbox.showerror("CircuitPython Project Manager: Error!",
"Your project's .cpypmconfig file cannot be accessed, closing project!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
else:
self.set_childrens_state(frame=self.main_frame, enabled=True)
self.disable_closing = False
self.edit_menu.entryconfigure("Save changes", state=tk.NORMAL)
self.edit_menu.entryconfigure("Discard changes", state=tk.NORMAL)
def discard_modified(self) -> None:
"""
Discard modified configuration file.
:return: None.
"""
if not mbox.askokcancel("CircuitPython Project Manager: Confirm",
"Are you sure you want to discard all changes?"):
logger.debug("User canceled discarding all changes!")
return
try:
logger.debug("Discarding all changes!")
self.update_main_gui()
except FileNotFoundError:
logger.exception("Uh oh, an exception has occurred!")
self.close_project()
mbox.showerror("CircuitPython Project Manager: Error!",
"Your project's .cpypmconfig file cannot be accessed, closing project!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
def sync(self) -> None:
"""
Sync the files - this will block.
:return: None.
"""
try:
project.sync_project(self.cpypmconfig_path)
except ValueError:
logger.exception("Uh oh, an exception has occurred!")
mbox.showerror("CircuitPython Project Manager: Error!",
"The sync location has not been set!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
except Exception as _:
mbox.showerror("CircuitPython Project Manager: Error!",
"Uh oh! An unknown exception occurred!"
"\n\n" + (traceback.format_exc() if self.show_traceback() else ""))
self.set_childrens_state(self.main_frame, True)
self.disable_closing = False
self.sync_menu.entryconfigure("Sync files", state=tk.NORMAL)
self.dismiss_dialog(self.sync_dialog)
def start_sync_thread(self) -> None:
"""
Start the sync files thread.
:return: None.
"""
self.set_childrens_state(self.main_frame, False)
self.disable_closing = True
self.sync_menu.entryconfigure("Sync files", state=tk.DISABLED)
self.sync_dialog = self.create_dialog("CircuitPython Project Manager: Syncing files...")
self.sync_dialog.protocol("WM_DELETE_WINDOW", None)
self.sync_label = ttk.Label(master=self.sync_dialog, text="Syncing files...")
self.sync_label.grid(row=0, column=0, padx=1, pady=1, sticky=tk.NW)
thread = Thread(target=self.sync, args=(), daemon=True)
logger.debug(f"Starting sync thread {repr(thread)}")
thread.start()
def check_sync_buttons(self) -> None:
try:
self.sync_files_btn.config(
state=tk.DISABLED if not self.cpypmconfig["sync_location"] or not Path(self.cpypmconfig["sync_location"]).exists() else tk.NORMAL
)
except tk.TclError:
pass
else:
self.after(ms=100, func=self.check_sync_buttons)
def make_save_and_sync_buttons(self) -> None:
"""
Create the rest of the buttons, like the save and sync buttons.
:return: None.
"""
self.save_config_btn = ttk.Button(master=self.right_frame, text="Save", width=12, command=self.save_modified)
self.save_config_btn.grid(row=4, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.save_config_btn, "Save the .cpypmconfig file to disk.")
self.discard_config_btn = ttk.Button(master=self.right_frame, text="Discard", width=12, command=self.discard_modified)
self.discard_config_btn.grid(row=5, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.discard_config_btn, "Discard changes and reload the .cpypmconfig file from disk")
self.sync_files_btn = ttk.Button(master=self.right_frame, text="Sync", width=12, command=self.start_sync_thread)
self.sync_files_btn.grid(row=6, column=0, padx=1, pady=1, sticky=tk.NW)
self.add_tooltip(self.sync_files_btn, "Sync the files to the CircuitPython drive.")
self.check_sync_buttons()
def update_main_gui(self) -> None:
"""
Update the main GUI.
:return: None.
"""
self.disable_closing = True
self.update_menu_state()
logger.debug("Updating main GUI...")
self.destroy_all_children(widget=self.main_frame)
self.after(ms=200, func=self.create_main_gui)
def create_main_gui(self) -> None:
"""
Create the main GUI.
:return: None.
"""
logger.debug(f"self.cpypmconfig_path: {repr(self.cpypmconfig_path)}")
if self.cpypmconfig_path is None:
logger.info("No project is open!")
ttk.Label(
master=self.main_frame,
text="No project is open! Use the file menu to create\na new project or open an existing project!"
).grid(row=0, column=0, sticky=tk.NW)
else:
logger.info("Project is open - (re)loading everything!")
logger.debug(f"Parsing {repr(self.cpypmconfig_path)}")
self.cpypmconfig = json.loads(self.cpypmconfig_path.read_text())
self.make_title(self.cpypmconfig["project_name"])
self.make_description(self.cpypmconfig["description"])
self.make_drive_selector(self.cpypmconfig["sync_location"])
self.make_file_sync_listbox(self.cpypmconfig["files_to_sync"], Path(self.cpypmconfig["project_root"]))
self.make_file_sync_buttons()
ttk.Separator(master=self.right_frame, orient=tk.HORIZONTAL).grid(row=3, column=0, padx=1, pady=1, sticky=tk.NW + tk.E)
self.make_save_and_sync_buttons()
self.disable_closing = False
def make_main_gui(self, cpypmconfig_path: Path = None) -> None:
"""
Make the main GUI stuffs.
:param cpypmconfig_path: A pathlib.Path to the .cpypmconfig file, defaults to None.
:return: None.
"""
self.main_frame = ttk.Frame(master=self)
self.main_frame.grid(row=0, column=0, sticky=tk.NW)
self.cpypmconfig_path = cpypmconfig_path
self.update_main_gui()
def create_gui(self, cpypmconfig_path: Path = None) -> None:
"""
Create the GUI.
:param cpypmconfig_path: A pathlib.Path to the .cpypmconfig file, defaults to None.
:return: None.
"""
logger.debug("Creating GUI...")
if os_detect.on_linux():
self.global_style = ttk.Style()
self.global_style.theme_use("clam")
self.create_config()
self.create_menu()
self.make_main_gui(cpypmconfig_path)
if cpypmconfig_path is not None:
self.add_recent_project(cpypmconfig_path)
def run(self, cpypmconfig_path: Path = None) -> None:
"""
Run the GUI, this will block.
:param cpypmconfig_path: A pathlib.Path to the .cpypmconfig file, defaults to None.
:return: None.
"""
self.create_gui(cpypmconfig_path)
self.lift()
self.minsize(width=200, height=100)
self.mainloop()
def __exit__(self, err_type=None, err_value=None, err_traceback=None):
if err_type is not None:
mbox.showerror("CircuitPython Project Manager: ERROR!",
"Oh no! A fatal error has occurred!\n"
f"Error type: {err_type}\n"
f"Error value: {err_value}\n"
f"Error traceback: {err_traceback}\n\n" + traceback.format_exc())
logger.exception("Uh oh, a fatal error has occurred!", exc_info=True)
| {"/main.py": ["/gui.py"], "/gui.py": ["/gui_tools/clickable_label.py"]} |
65,255 | UnsignedArduino/CircuitPython-Project-Manager | refs/heads/main | /project_tools/project.py | """
This module handles CircuitPython projects.
-----------
Classes list:
No classes!
-----------
Functions list:
- replace_sus_chars(file_name: str) -> str
- make_new_project(parent_directory: Path, project_name: str = "Untitled", project_description: str = "",
autogen_gitignore: bool = True,
dfl_cpy_hierarchy: Path = (Path.cwd() / "default_circuitpython_hierarchy")) -> None
- sync_project(cpypm_config_path: Path) -> None
"""
from pathlib import Path
import shutil
import re
from json import loads as load_json_string, dumps as dump_json_string
from project_tools.create_logger import create_logger
import logging
logger = create_logger(name=__name__, level=logging.DEBUG)
def replace_sus_chars(file_name: str) -> str:
"""
Replace suspicious characters in file name - found at https://stackoverflow.com/a/13593932/10291933
:param file_name: A str - the file name to check.
:return: A str - the file name cleaned.
"""
return re.sub("[^\w\-_. ]", "_", file_name)
def make_new_project(parent_directory: Path, project_name: str = "Untitled", project_description: str = "",
autogen_gitignore: bool = True,
dfl_cpy_hierarchy: Path = (Path.cwd() / "default_circuitpython_hierarchy")) -> Path:
"""
Make a new CircuitPython project.
:param parent_directory: A pathlib.Path - where to put the project.
:param project_name: A str - what to call the project - defaults to "Untitled"
:param project_description: A str - a description of the project - defaults to ""
:param autogen_gitignore: A bool - whether to auto-generate a .gitignore for the project - defaults to True.
:param dfl_cpy_hierarchy: A pathlib.Path - where we copy the base project files from - defaults to
Path.cwd() / "default_circuitpython_hierarchy"
:raise FileExistsError: Raises FileExistsError if a CircuitPython project exists under the same name.
:return: A pathlib.Path to the .cpypmconfig file.
"""
project_path = parent_directory / dfl_cpy_hierarchy.name
logger.debug(f"Copying from {repr(dfl_cpy_hierarchy)} to {repr(project_path)}")
shutil.copytree(dfl_cpy_hierarchy, project_path)
new_path = parent_directory / replace_sus_chars(project_name)
if new_path.exists():
raise FileExistsError(f"{repr(new_path)} exists!")
logger.debug(f"Renaming {repr(project_path)} to {repr(new_path)}")
project_path.rename(new_path)
cpypm_path = new_path / ".cpypmconfig"
logger.debug(f"Path to .cpypmconfig is {repr(cpypm_path)}")
cpypm_config = load_json_string(cpypm_path.read_text())
cpypm_config["project_name"] = project_name
cpypm_config["description"] = project_description
cpypm_config["project_root"] = str(new_path)
cpypm_path.write_text(dump_json_string(cpypm_config, indent=4))
logger.debug(f"Filled .cpypmconfig")
if autogen_gitignore:
logger.debug("Auto-generating .gitignore")
gitignore_path = new_path / ".gitignore"
logger.debug(f"Path to .gitignore is {repr(gitignore_path)}")
gitignore = ""
gitignore += ".fseventsd/*\n"
gitignore += ".metadata_never_index\n"
gitignore += ".Trashes\n"
gitignore += "boot_out.txt\n"
gitignore_path.write_text(gitignore)
logger.debug(f"Wrote .gitignore")
logger.info(f"Made new project at {repr(new_path)}")
return cpypm_path
def sync_project(cpypm_config_path: Path) -> None:
"""
Sync a project to the CircuitPython device.
:param cpypm_config_path: A pathlib.Path - the path to the .cpypmconfig file.
:raise ValueError: Raises ValueError if the sync location of the file hasn't been set.
:return: None.
"""
cpypm_config = load_json_string(cpypm_config_path.read_text())
to_sync = [Path(p) for p in cpypm_config["files_to_sync"]]
project_root_path = Path(cpypm_config["project_root"])
sync_location_path = cpypm_config["sync_location"]
if sync_location_path is None:
raise ValueError("sync_location has not been filled out!")
else:
sync_location_path = Path(sync_location_path).absolute().resolve()
logger.info(f"Found {len(to_sync)} items to sync!")
logger.debug(f"Sync location is {repr(sync_location_path)}")
logger.debug(f"Project root path is {repr(project_root_path)}")
for path in to_sync:
new_path = sync_location_path / path
path = (project_root_path / path)
logger.debug(f"Syncing {repr(path)} to {repr(new_path)}")
if path.is_file():
new_path.write_bytes(path.read_bytes())
else:
if new_path.exists():
shutil.rmtree(new_path, ignore_errors=True)
# new_path.mkdir(parents=True, exist_ok=True)
shutil.copytree(path, new_path)
| {"/main.py": ["/gui.py"], "/gui.py": ["/gui_tools/clickable_label.py"]} |
65,261 | summunity/DjangoReact_CLI | refs/heads/main | /src/apps/interface.py |
from ..format_cmd import format_cmd_prompt
def launch_app( state, config ):
""" launch app state """
command_str = """
Which application do you want to launch:
"""
from .process import launch_app as launch_process
apps = config[config['launch'] == True]
for i in range(0, len(apps)):
app_name = apps.loc[i]['title']
command_str += '%s: %s\n' % (i+1, app_name)
command_str += 'b: back\n'
command_str = format_cmd_prompt(command_str)
user_input = input(command_str)
try : user_input = int(user_input) - 1
except:
if user_input == 'b' or user_input == 'back': state = 0
else: print( 'Invalid Input : %s' % user_input)
return state, None
# catch error when supplied value is greater than # of apps
if user_input > len(apps) :
print( 'Invalid Input : %s' % user_input)
return state, None
app = launch_process( apps.loc[user_input] )
# set the state to return to the main menu
state = 0
return state, app
def list_apps( state, active_threads ):
""" prints a list of all active App threads """
command_str = """
Active Apps:
"""
for i in range(0, len(active_threads)):
app_name = active_threads[i].name
command_str += '%s: %s\n' % (i+1, app_name)
command_str = format_cmd_prompt(command_str)
print( command_str)
# set the state to return to the main menu
state = 0
return state
def kill_app( state, active_threads ):
""" prints a list of all active App threads """
from ..thread import kill_process
command_str = """
Active Apps:
"""
for i in range(0, len(active_threads)):
app_name = active_threads[i].name
command_str += '%s: %s\n' % (i+1, app_name)
command_str += 'b: back\n'
command_str = format_cmd_prompt(command_str)
user_input = input(command_str)
try : user_input = int(user_input) - 1
except:
if user_input == 'b' or user_input == 'back': state = 0
else: print( 'Invalid Input : %s' % user_input)
return state, active_threads
# catch error when supplied value is greater than # of apps
if user_input > len(active_threads) :
print( 'Invalid Input : %s' % user_input)
return state, active_threads
proc = active_threads.pop(user_input)
kill_process(proc)
# set the state to return to the main menu
state = 0
return state, active_threads
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,262 | summunity/DjangoReact_CLI | refs/heads/main | /src/thread.py |
from time import sleep
def launching_func( app, commands ):
import os
import subprocess
os.chdir(app['path'])
for cmd in commands:
print( 'command', cmd)
subprocess.call(cmd)
sleep(1)
def launch_thread( app, commands ):
import multiprocessing
proc = multiprocessing.Process(
target=launching_func,
args=(app, commands))
proc.name = app['title']
proc.start()
return proc
# proc.terminate()
def kill_process( proc ):
proc.terminate()
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,263 | summunity/DjangoReact_CLI | refs/heads/main | /cli.py | """
Diamond Cronjob
=================
Event Detection based on data stored in Diamond
:Author: Nik Sumikawa
:Date: Nov 3, 2020
"""
import logging
log = logging.getLogger(__name__)
import pandas as pd
from src.apps.interface import *
from src.git.interface import *
import os
class CLI:
def __init__(self):
self.state = 0
self.active_threads = []
path = os.path.dirname(os.path.realpath(__file__))
self.config = pd.read_json('%s/config.json' % path)
self.run()
def run( self ):
while True:
try:
if self.state == -1 : break
if self.state == 0 : self.initial_state()
if self.state == 1 :
self.state, app = launch_app(self.state, self.config)
if app != None: self.active_threads.append( app )
if self.state == 2 : self.state = list_apps(self.state, self.active_threads)
if self.state == 3 :
self.state, self.active_threads = kill_app(self.state, self.active_threads)
if self.state == 4 :
self.state, app = git('pull', self.state, self.config)
if self.state == 5 :
self.state, app = git('push', self.state, self.config)
except KeyboardInterrupt:
break
def initial_state( self ):
command_str = """
What do you want to do:
1: Launch app
2: Active apps
3: disable app
4: Update project (git pull)
5: Commit project (git push)
q: quit
"""
command_str = self.format_cmd_prompt(command_str)
user_input = input(command_str)
try : user_input = int(user_input)
except:
if user_input == 'q' or user_input == 'quit': self.state = -1
else: print( 'Invalid Input : %s' % user_input)
return
if user_input >5:
print( 'Invalid Input : %s' % user_input)
return
self.state = user_input
# def launch_state( self ):
# """ launch app state """
#
# command_str = """
# Which application do you want to launch:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(apps)):
# app_name = apps.loc[i]['title']
# command_str += '%s: %s\n' % (i+1, app_name)
#
# command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# user_input = input(command_str)
#
# try : user_input = int(user_input) - 1
# except:
# if user_input == 'b' or user_input == 'back': self.state = 0
# else: print( 'Invalid Input : %s' % user_input)
# return
#
# # catch error when supplied value is greater than # of apps
# if user_input > len(apps) :
# print( 'Invalid Input : %s' % user_input)
# return
#
# self.launch_app( apps.loc[user_input] )
#
# # set the state to return to the main menu
# self.state = 0
#
#
# def active_state( self ):
# """ prints a list of all active App threads """
#
# command_str = """
# Active Apps:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(self.active_threads)):
# app_name = self.active_threads[i].getName()
# command_str += '%s: %s\n' % (i+1, app_name)
#
# # command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# print( command_str )
#
#
# # set the state to return to the main menu
# self.state = 0
#
# def disable_state( self ):
# """ prints a list of all active App threads """
#
# command_str = """
# Active Apps:
# """
#
# apps = self.config[self.config['launch'] == True]
# for i in range(0, len(self.active_threads)):
# app_name = self.active_threads[i].getName()
# command_str += '%s: %s\n' % (i+1, app_name)
#
# command_str += 'b: back\n'
#
# command_str = self.format_cmd_prompt(command_str)
#
# user_input = input(command_str)
#
# try : user_input = int(user_input) - 1
# except:
# if user_input == 'b' or user_input == 'back': self.state = 0
# else: print( 'Invalid Input : %s' % user_input)
# return
#
# # catch error when supplied value is greater than # of apps
# if user_input > len(apps) :
# print( 'Invalid Input : %s' % user_input)
# return
#
# thread = self.active_threads.pop(user_input)
# thread.raise_exception()
#
# # set the state to return to the main menu
# self.state = 0
# def launch_app( self, app ):
# """ launch application """
#
# from cmd_thread import CmdThread
#
# if app['type'] == 'django':
# cmd = 'python manage.py runserver 0.0.0.0:%s' % app['port']
# thread = CmdThread(app, [cmd])
# thread.start()
# thread.setName('Launch-%s' % app['title'])
# self.active_threads.append( thread )
#
# if app['type'] == 'react':
# cmd = 'npm run export PORT=%s react-scripts start' % app['port']
# # subprocess.call(cmd)
# # subprocess.check_output(cmd)
# print( cmd )
def git_pull( self, app ):
""" launch application """
import subprocess
import os
from cmd_thread import CmdThread
cmd = 'git pull'
thread = CmdThread(app, [cmd])
thread.start()
thread.setName('Launch-%s' % app['title'])
self.active_threads.append( thread )
def git_push( self, app ):
""" launch application """
import subprocess
import os
from cmd_thread import CmdThread
command_str = "Commit Message: \n"
user_input = input(command_str)
cmd = [
'git add -A',
'git commit -m %s' % user_input,
'git push'
]
thread = CmdThread(app, cmd)
thread.start()
thread.setName('Launch-%s' % app['title'])
self.active_threads.append( thread )
def format_cmd_prompt( self, cmd ):
""" formats the command prompt """
formatted = ''
for line in cmd.split('\n'):
formatted += line.lstrip() + '\n'
return formatted
if __name__ == "__main__":
# from django_config.logger import initialize_logging
# initialize_logging()
CLI()
#
# from cmd_thread import CmdThread
# thread = CmdThread(
# {'path': 'C:/Users/nxa18331/Desktop/websites/bitbucket/restapi'},
# ['python manage.py runserver 0.0.0.0:8000']
# )
#
# thread.start()
# #
# #
# while True:
#
# try:
# var = input(""" What do you want to do?: """
# )
#
# print( var )
# if var == 'q':
# print( 'do we raise exception??')
# thread.raise_exception()
#
# except KeyboardInterrupt:
# break
# Cronjob(timeframe=2, filter=False)
#
# Cronjob(mask='N06G', timeframe=7, filter=False)
# Cronjob(
# backfill = True,
# start_date = '2020-11-01',
# stepsize=2 )
# debug()
# import argparse
#
# parser = argparse.ArgumentParser(description='Event Detection Cronjob - Diamond')
# parser.add_argument(
# '--backfill',
# required=False,
# help='When True, the data is backfilled'
# )
#
# parser.add_argument(
# '--startdate',
# required=False,
# help='start of the backfill window in %Y-%m-%d format'
# )
#
# parser.add_argument(
# '--enddate',
# required=False,
# help='end of the backfill window in %Y-%m-%d format'
# )
#
# parser.add_argument(
# '--threads',
# required=False,
# help='number of threads to execute in parallel'
# )
#
#
# args = parser.parse_args()
#
# if args.backfill == 'True' :
# log.debug('Backfill')
#
# # extract the startdate from command line. default to 30 day window
# startdate = args.startdate
# if startdate == None:
# from datetime import datetime, timedelta
# startdate = (datetime.now() - timedelta(days=30)).strftime('%Y-%m-%d')
#
# # extract the startdate from command line. default to 30 day window
# enddate = args.enddate
# if enddate == None:
# from datetime import datetime, timedelta
# enddate = (datetime.now()).strftime('%Y-%m-%d')
#
# kwargs = {
# 'backfill': True,
# 'start_date': startdate,
# 'end_date': enddate,
# 'stepsize': 1,
# }
#
# threads = args.threads
# if threads != None: kwargs['threads'] = threads
#
# Cronjob(**kwargs)
#
#
# else:
# log.debug('Standard cronjob')
#
# kwargs = {
# 'filter': False,
# 'timeframe': 1,
# }
#
# threads = args.threads
# if threads != None: kwargs['threads'] = threads
#
# Cronjob(**kwargs)
# log.debug('finished....')
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,264 | summunity/DjangoReact_CLI | refs/heads/main | /src/apps/process.py |
from ..thread import launch_thread
def launch_app( app ):
""" launch application """
if app['type'] == 'django':
cmd = 'python manage.py runserver 0.0.0.0:%s' % app['port']
return launch_thread(app, [cmd])
if app['type'] == 'react':
cmd = 'npm run export PORT=%s react-scripts start' % app['port']
# subprocess.call(cmd)
# subprocess.check_output(cmd)
print( cmd )
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,265 | summunity/DjangoReact_CLI | refs/heads/main | /src/format_cmd.py |
def format_cmd_prompt( cmd ):
""" formats the command prompt """
formatted = ''
for line in cmd.split('\n'):
formatted += line.lstrip() + '\n'
return formatted
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,266 | summunity/DjangoReact_CLI | refs/heads/main | /src/git/interface.py |
from ..format_cmd import format_cmd_prompt
from .process import *
def git( type, state, config ):
""" launch app state """
command_str = """
Which application do you want to pull:
"""
for i in range(0, len(config)):
app_name = config.loc[i]['title']
command_str += '%s: %s\n' % (i+1, app_name)
command_str += 'b: back\n'
command_str = format_cmd_prompt(command_str)
user_input = input(command_str)
try : user_input = int(user_input) - 1
except:
if user_input == 'b' or user_input == 'back': state = 0
else: print( 'Invalid Input : %s' % user_input)
return state, None
# catch error when supplied value is greater than # of apps
if user_input > len(config) :
print( 'Invalid Input : %s' % user_input)
return state, None
if type == 'pull':
app = git_pull( config.loc[user_input] )
elif type == 'push':
app = git_push( config.loc[user_input] )
else:
print( 'Invalid type : %s' % type)
return state, None
# set the state to return to the main menu
state = 0
return state, app
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,267 | summunity/DjangoReact_CLI | refs/heads/main | /cmd_thread.py |
import threading
import time
import ctypes
class CmdThread(threading.Thread):
def __init__(self, app, commands):
threading.Thread.__init__(self)
self.app = app
self.commands = commands
def run(self):
import subprocess
import os
os.chdir(self.app['path'])
# target function of the thread class
try:
for cmd in self.commands:
subprocess.call(cmd)
finally:
print('ended')
return
# def get_id(self):
# if hasattr(self, '_thread_id'):
# return self._thread_id
#
def get_id( self ):
# returns id of the respective thread
if hasattr(self, '_thread_id'):
return self._thread_id
for id, thread in threading._active.items():
if thread is self:
return id
def raise_exception(self):
thread_id = self.get_id()
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, ctypes.py_object(SystemExit))
print('what is this', res)
if res >= 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(thread_id, 0)
print('Exception raise failure')
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,268 | summunity/DjangoReact_CLI | refs/heads/main | /src/git/process.py |
from ..thread import launch_thread
def git_pull( app ):
""" launch application """
url = 'https://%s:%s@%s' % (
app['username'],
app['password'],
app['git'],
)
cmd = 'git pull %s' % url
return launch_thread(app, [cmd])
def git_push( app ):
""" launch application """
command_str = "Commit Message: \n"
user_input = input(command_str)
cmd = [
'git config --global user.name "%s"' % app['username'],
'git add -A',
'git commit -m "%s"' % user_input,
'git push'
]
return launch_thread(app, cmd)
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,269 | summunity/DjangoReact_CLI | refs/heads/main | /multiprocTest.py |
from time import sleep
def TestFunction( test1, test2 ):
print( 'these are the props', test1, test2)
while True:
print( 'we are looping' )
sleep(1)
return
if __name__ == "__main__":
import multiprocessing
proc = multiprocessing.Process(target=TestFunction, args=({'test':1, 'test2':2}))
proc.name = 'proc 1'
proc.start()
sleep(5)
print( proc.name)
# Terminate the process
proc.terminate() # sends a SIGTERM
while True:
try:
var = input(""" What do you want to do?: """
)
print( var )
if var == 'q':
print( 'do we raise exception??')
thread.raise_exception()
except KeyboardInterrupt:
break
print( 'finished')
| {"/src/apps/interface.py": ["/src/format_cmd.py", "/src/apps/process.py", "/src/thread.py"], "/cli.py": ["/src/apps/interface.py", "/src/git/interface.py", "/cmd_thread.py"], "/src/apps/process.py": ["/src/thread.py"], "/src/git/interface.py": ["/src/format_cmd.py", "/src/git/process.py"], "/src/git/process.py": ["/src/thread.py"]} |
65,277 | bwsi-hadr/student-image-processing | refs/heads/master | /analyzeimage.py |
def analyzeimage(old_image):
# analyze the image 'old_image' here
# save the analyzed image to 'static/temp.jpg'
# return the path of the new image file
return "static/temp.jpg" | {"/view.py": ["/analyzeimage.py"]} |
65,278 | bwsi-hadr/student-image-processing | refs/heads/master | /view.py | from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField, RadioField
from random import Random
import cv2, numpy
from werkzeug import secure_filename
from flask_wtf.file import FileField
from analyzeimage import analyzeimage
from PIL import Image
import time
import urllib.request
from flask import Flask, flash, redirect, render_template, request, session, abort, url_for, Response, send_file
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2e278443e'
app.config['UPLOAD_FOLDER'] = 'uploads/'
# # import a python file from a different folder - this will be done later
# import sys
# sys.path.insert(0, "access-images/")
# from RemoteSensingDB import RemSensDB
class ReusableForm(Form):
### Define text fields and other inputs for the forms on both html pages
indexfortemps = 0
error = ""
radio = RadioField("Search by", choices=[("File", "File"), ("Name","Name"),("ID","ID")], default="Name")
name = TextField("Filename", description="filename")
id_number = TextField("Filename", description="id number")
file = FileField(u'Image File')
# app.route(path) is the python function run at localhost:8484/path
# the '@' symbol is a decorator function
# when 'app.route' (a function) is run with the arguments below
# python also runs 'imagepage'
# the arguments of 'imagepage' are the outputs of the decorated function
@app.route("/analyze/<string:imname>", methods=['GET', 'POST'])
def imagepage(imname):
### these are the sources of the old (left) and new (right) images
old_image = "/static/{}.jpg".format(imname)
# before analysis, both images have the same source (and are identical)
new_image = old_image
# create a form
form = ReusableForm(request.form)
# this runs when the form is submitted
# and the website gets the data as a POST request
# more on POST (and GET) requests at https://developer.mozilla.org/en-US/docs/Learn/HTML/Forms/Sending_and_retrieving_form_data
if request.method == 'POST':
print("old_image", old_image)
# remove initial "/" from the image src so the computer can access it
filename_of_image_to_analyze = old_image[1:]
##################################################################################################
# analyze the image, and write it to static/temp.jpg (NO STARTING SLASH)
new_image = analyzeimage(filename_of_image_to_analyze)
#
##################################################################################################
new_image = "/static/temp.jpg"
print("new image", new_image)
render_template('image.html', form=form, old_image=old_image, new_image=new_image)
if form.validate():
# if all required fields are submitted, this returns true
pass
# add the current time to the end of the filename to prevent computers from caching images
old_image_time = "{}?time={}".format(old_image,time.time())
new_image_time = "{}?time={}".format(new_image,time.time())
return render_template('image.html', form=form, old_image=old_image_time, new_image=new_image_time)
@app.route("/", methods=['GET','POST'])
def query():
# create RemSensDB object (defined in RemoteSensingDB.py)
dataset = RemSensDB()
# create form
form = ReusableForm(request.form)
if request.method == "POST":
"""
process data
if we find an error in filling out the form, it will print to the console
"""
try:
searchType = request.form["radio"]
if searchType == "Name":
# if searching by name
filename = request.form["name"]
if not filename:
raise AssertionError("Missing filename")
# acquire this file from the database
filestr = dataset.findByName(filename)
if not filestr:
raise AssertionError("Image not found")
# convert db buffer image to a numpy array
npimg = numpy.frombuffer(filestr, numpy.uint8)
# convert numpy image to an opencv image
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
# save the image to static/analyze.jpg for accessing later
cv2.imwrite("static/analyze.jpg",img)
elif searchType == "ID":
# if searching by ID
id_number = request.form["id_number"]
if not id_number:
raise AssertionError("Missing ID")
# acquire this file from the database
filestr = dataset.findByID(id_number)
if not filestr:
raise AssertionError("Image not found")
# convert db buffer image to a numpy array
npimg = numpy.frombuffer(filestr, numpy.uint8)
# convert numpy image to an opencv image
img = cv2.imdecode(npimg, cv2.IMREAD_COLOR)
# save the image to static/analyze.jpg for accessing later
cv2.imwrite("static/analyze.jpg",img)
elif searchType == "File":
# if uploading an image
# get the uploaded file
uploadFilename = request.files['file']
if not uploadFilename:
raise AssertionError("Missing File")
# save the image to static/analyze.jpg for accessing later
uploadFilename.save("static/analyze.jpg")
except AssertionError as e:
print("\n\n\n\n\n\n~~~~~\n{}\n~~~~~\n\n\n\n".format(e))
# redirect to the analysis page
return redirect(url_for('imagepage', imname="analyze"))
return render_template('query.html', form=form)
# @app.route("/browse/<int:start>")
# def browse(start):
# return str(start)
# run the flask app at localhost:port
# go to localhost:8484 when running this code to see the project
def runFlask():
app.run(port=8484, debug=True)
# if the program is being run, and not imported
if __name__ == "__main__":
runFlask() | {"/view.py": ["/analyzeimage.py"]} |
65,279 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/test_util.py | import os
from pytest import raises
from . import util
from . import const
from datetime import datetime
def test_now_str():
timestamp = util.now_str()
assert util.RE_TIMESTAMP.match(timestamp) is not None
def test_looks_like_uuid():
assert util.looks_like_uuid("e084c014-9ba1-41a3-9eb3-6daef8097bc5") == True
assert util.looks_like_uuid("This is not a UUID") == False
def test_status_valid():
for status in const.STATUSES:
assert util.status_valid(status)
assert util.status_valid('not a status') == False
def test_protocols_valid():
for protocol in const.PROTOCOLS:
assert util.protocol_valid(protocol)
assert util.protocol_valid('not a protocol') == False
def test_bag_type_valid():
for bag_type in const.BAG_TYPES:
assert util.bag_type_valid(bag_type)
assert util.bag_type_valid('not a bag type') == False
def test_fixity_type_valid():
for fixity_type in const.FIXITY_TYPES:
assert util.fixity_type_valid(fixity_type)
assert util.fixity_type_valid('not a fixity type') == False
def test_username():
assert util.username('joe') == 'dpn.joe'
def test_xfer_dir():
assert util.xfer_dir('joe') == '/home/dpn.joe/outbound'
def test_rsync_link():
link = util.rsync_link('tdr', 'example.com', '/home/dpn.tdr/outbound', 'file.tar')
assert link == "dpn.tdr@example.com:/home/dpn.tdr/outbound/file.tar"
def test_digest():
filepath = os.path.abspath(os.path.join(__file__, '..', 'testdata', 'checksum.txt'))
assert util.digest(filepath, 'md5') == '772bdaf5340fd975bb294806d340f6d9'
assert util.digest(filepath, 'sha256') == 'c8843be4c9d672ae91542f5539e770c6eadc5465161e4ffa5389ecef460f553f'
# Should raise exception if we don't implement the requested algorithm.
with raises(ValueError):
util.digest(filepath, 'md6')
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,280 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /xfer_test.py | # xfer_test.py
#
# A quick and dirty script to implement DPN replicating node
# functions. This is not a production script. It merely
# implements the following basic transfer features for an initial
# test run:
#
# 1. Query a remote node for pending transfer requests.
# 2. Use rsync to copy files in the transfer requests.
# 3. Calculate the sha-265 checksums of the files.
# 4. Send the checksums back to the remote node.
#
# Pre-reqs:
#
# 1. This must run on a box that has access to the remote DPN servers,
# such as devops.aptrust.org.
# 2. The dpn_rest_settings.py file must be configured correctly. The
# template for that file is settings_template.py. The actual
# settings file is not in GitHub.
#
# Usage:
#
# python xfer_test.py [remote_node]
#
# Param remote_node should be one of: tdr, sdr, chron or hathi
#
# ----------------------------------------------------------------------
from dpnclient import client, util
import dpn_rest_settings
import hashlib
import os
import subprocess
class XferTest:
def __init__(self, config):
self.client = client.Client(dpn_rest_settings, dpn_rest_settings.TEST)
def replicate_files(self, namespace):
"""
Replicate bags from the specified namespace.
"""
requests = self.client.get_transfer_requests(namespace)
for request in requests:
link = request['link']
replication_id = request['replication_id']
# download the file via rsync
print("Downloading {0}".format(link))
local_path = self.copy_file(link)
# calculate the checksum
checksum = util.digest(local_path, "sha256")
# send the checksum as receipt
print("Returning checksum receipt {0}".format(checksum))
self.client.set_transfer_fixity(namespace, replication_id, checksum)
def copy_file(self, location):
filename = os.path.basename(location.split(":")[1])
dst = os.path.join(dpn_rest_settings.INBOUND_DIR, filename)
command = ["rsync", "-Lav", "--compress",
"--compress-level=0", "--quiet", location, dst]
#print(" ".join(command))
try:
with subprocess.Popen(command, stdout=subprocess.PIPE) as proc:
print(str(proc.communicate()[0]))
return dst
except Exception as err:
print("ERROR Transfer failed: {0}".format(err))
raise err
if __name__ == "__main__":
xfer = XferTest(dpn_rest_settings.TEST)
xfer.replicate_files("test")
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,281 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/test_client.py | from pytest import raises
from .client import Client
# TODO: Integration tests. There is nothing testable
# in client.py without a server to talk to.
# class ClientTestSettings:
# def __init__(self):
# self.MY_NODE = "dpn.example.com"
# self.KEYS = {"remote": "000000000000"}
# client_test_config = {
# 'url': 'http://dpn.example.com/api/',
# 'token': '1234567890',
# 'rsync_host': 'dpn.example.com',
# 'max_xfer_size': 0,
# }
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,282 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/client.py | import json
from . import const
from . import util
from .base_client import BaseClient
from requests.exceptions import RequestException
from datetime import datetime
class Client(BaseClient):
"""
This is the higher-level DPN REST client that performs meaningful
repository operations. It's based on the lower-level BaseClient, which
just does raw REST operations, and it does expose the BaseClient's
methods.
:param settings: An instance of dpn_rest_settings, which is just a
python config file. See settings_template.py for info about what should
be in the settings file.
:param active_config: A dictionary from dpn_rest_settings.py containing
information about how to connect to a DPN rest server. The
dpn_rest_settings.py file may have dictionaries called TEST, DEV, and
PRODUCTION, each with keys 'url', 'token', 'rsync_host' and 'max_xfer_size'.
"""
def __init__(self, settings, active_config):
super(Client, self).__init__(active_config['url'], active_config['token'])
self.rsync_host = active_config['rsync_host']
self.max_xfer_size = active_config['max_xfer_size']
self.settings = settings
self.my_node = None
self.all_nodes = []
self.replicate_to = []
self.replicate_from = []
self.restore_to = []
self.restore_from = []
self.nodes_by_namespace = {}
self._init_nodes()
def _init_nodes(self):
"""
Initializes some information about all known nodes, including
which node is ours, which nodes we can replicate to and from,
and which nodes we can restore to and from.
"""
response = self.node_list()
data = response.json()
self.all_nodes = data['results']
for node in self.all_nodes:
if node['namespace'] == self.settings.MY_NODE:
self.my_node = node
if node['replicate_from']:
self.replicate_from.append(node)
if node['replicate_to']:
self.replicate_to.append(node)
if node['restore_from']:
self.restore_from.append(node)
if node['restore_to']:
self.restore_to.append(node)
self.nodes_by_namespace[node['namespace']] = node
return True
def create_bag_entry(self, obj_id, bag_size, bag_type, fixity, local_id):
"""
Creates a new registry entry on your own node. You must be admin
to do this, and you cannot create registry entries on other nodes.
:param obj_id: The ID of the DPN bag you want the other node to copy.
:param bag_size: The size, in bytes, of the bag.
:param bag_type: The type of bag/registry entry. See const.BAG_TYPES.
:returns: The newly created registry entry as a dict.
:raises RequestException: Check the response property for details.
"""
if not util.looks_like_uuid(obj_id):
raise ValueError("obj_id '{0}' should be a uuid".format(obj_id))
if not isinstance(bag_size, int):
raise TypeError("bag_size must be an integer")
if not util.bag_type_valid(bag_type):
raise ValueError("bag_type '{0}' is not valid".format(bag_type))
timestamp = util.now_str()
entry = {
"original_node": self.my_node['namespace'],
"admin_node": self.my_node['namespace'],
"uuid": obj_id,
"fixities": [{"algorithm":"sha256", "digest":fixity}],
"local_id": local_id,
"version_number": 1,
"created_at": timestamp,
"updated_at": timestamp,
"size": bag_size,
"first_version": obj_id,
}
response = self.bag_create(entry)
if response is not None:
return response.json()
return None
def create_transfer_request(self, obj_id, bag_size, username, fixity):
"""
Creates a transfer request on your own node asking some other node
to copy your file. You must be admin on your node to create a transfer
request, and you cannot create transfer requests on other nodes.
:param obj_id: The ID of the DPN bag you want the other node to copy.
:param bag_size: The size, in bytes, of the bag.
:param username: The SSH username the replicating node uses to connect to your node.
:param fixity: The SHA-256 digest of the bag to be copied.
:returns: The newly created transfer request as a dict.
:raises RequestException: Check the response property for details.
"""
if not util.looks_like_uuid(obj_id):
raise ValueError("obj_id '{0}' should be a uuid".format(obj_id))
if not isinstance(bag_size, int):
raise TypeError("bag_size must be an integer")
if not isinstance(username, str) or username.strip() == "":
raise ValueError("username must be a non-empty string")
if not isinstance(fixity, str) or fixity.strip() == "":
raise ValueError("fixity must be a non-empty string")
link = "{0}@{1}:/dpn/bags/{2}".format(username, self.rsync_host, obj_id + ".tar")
xfer_req = {
"uuid": obj_id,
"link": link,
"from_node": self.my_node['namespace'],
"to_node": username,
"size": bag_size,
"fixity_algorithm": "sha256",
"fixity_value": fixity,
}
response = self.transfer_create(xfer_req)
if response is not None:
return response.json()
return None
def get_transfer_requests(self, remote_node_namespace):
"""
Retrieves transfer requests from another node (specified by namespace)
that your node is supposed to fulfill.
:param remote_node_namespace: The namespace of the node to connect to.
:returns: A list of transfer requests, each of which is a dict.
:raises RequestException: Check the response property for details.
"""
other_node = self.nodes_by_namespace[remote_node_namespace]
url = other_node['api_root']
api_key = self.settings.KEYS[remote_node_namespace]
client = BaseClient(url, api_key)
page_num = 0
xfer_requests = []
# Get transfer requests in batches
while True:
page_num += 1
response = client.transfer_list(status='Requested',
page_size=20,
to_node=self.settings.MY_NODE,
page=page_num)
data = response.json()
xfer_requests.extend(data['results'])
if len(xfer_requests) >= data['count']:
break
return xfer_requests
def reject_transfer_request(self, remote_node_namespace, replication_id):
"""
Tells a remote node that you are rejectting its transfer request.
:param remote_node_namespace: The namespace of the node to connect to.
:param replication_id: The ID of the transfer request you are rejecting.
:returns: An updated transfer request.
:raises RequestException: Check the response property for details.
"""
return self._update_transfer_request(
remote_node_namespace, replication_id, const.STATUS_REJECT, None)
def set_transfer_fixity(self, remote_node_namespace, replication_id, fixity):
"""
Tells a remote node that you have copied the file in its transfer
request and that you calculated the specified SHA-256 checksum on
that file.
:param remote_node_namespace: The namespace of the node to connect to.
:param replication_id: The ID of the transfer request you completed.
:param fixity: The SHA-256 checksum of the file you copied.
:returns: An updated transfer request.
:raises RequestException: Check the response property for details.
"""
return self._update_transfer_request(
remote_node_namespace, replication_id, None, fixity)
def _update_transfer_request(self, remote_node_namespace, replication_id, status, fixity):
other_node = self.nodes_by_namespace[remote_node_namespace]
url = other_node['api_root']
api_key = self.settings.KEYS[remote_node_namespace]
client = BaseClient(url, api_key)
data = { "replication_id": replication_id }
if status is not None:
data['status'] = status
if fixity is not None:
data['fixity_value'] = fixity
print(data)
response = client.transfer_update(data)
if response is not None:
return response.json()
return None
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,283 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/test_base_client.py | from .base_client import BaseClient
# TODO: Integration tests!
def test_headers():
baseclient = BaseClient("http://www.example.com", "API_TOKEN_1234")
headers = baseclient.headers()
assert headers['Content-Type'] == 'application/json'
assert headers['Accept'] == 'application/json'
assert headers['Authorization'] == 'token API_TOKEN_1234'
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,284 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/util.py | import re
from . import const
import hashlib
from datetime import datetime
# Regex for something that looks like a UUID.
RE_UUID = re.compile("^[a-f0-9]{8}-[a-f0-9]{4}-[a-f0-9]{4}-[a-f0-9]{4}-?[a-f0-9]{12}\Z", re.IGNORECASE)
RE_TIMESTAMP = re.compile(r'^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.*\d*Z\Z')
def now_str():
"""
Returns datetime.now in the form of a string. Useful for creating
JSON dates.
"""
return datetime.utcnow().isoformat("T") + "Z"
def looks_like_uuid(string):
"""
Returns True if string looks like a UUID.
"""
return RE_UUID.match(string) != None
def status_valid(status):
"""
Returns True if status is a valid DPN status option.
"""
return status in const.STATUSES
def protocol_valid(protocol):
"""
Returns True if protocol is a valid DPN protocol option.
"""
return protocol in const.PROTOCOLS
def bag_type_valid(bag_type):
"""
Returns True if bag_type is a valid DPN bag type.
"""
return bag_type in const.BAG_TYPES
def fixity_type_valid(fixity_type):
"""
Returns True if fixity_type is a valid DPN fixity type.
"""
return fixity_type in const.FIXITY_TYPES
def username(namespace):
"""
Returns the local user name (ssh account) for the specified namespace.
"""
return "dpn.{0}".format(namespace)
def xfer_dir(namespace):
"""
Returns the name of the "outbound" directory for the specified
partner. E.g. "tdr" => /home/dpn.tdr/outbound
*** TODO: USE SETTINGS INSTEAD! THIS SHOULD NOT BE HARD CODED! ***
"""
user = username(namespace)
return "/home/{0}/outbound".format(user)
def rsync_link(namespace, my_server, partner_outbound_dir, filename):
"""
Returns the rsync url for the specified namespace to copy the
specified file.
:param namespace: is the namespace of the node you want to copy
this file (tdr, srd, chron, etc).
:param my_server: should be your server's fully-qualified domain
name or IP address, as set in your dpn_rest_settings.py file.
:param partner_outbound_dir: should be the name of the directory
in which you hold files for the partner specified in namespace to
copy outbound files.
:param filename: is the name of the file to copy (usually a UUID
with a .tar extension)
:returns: A string that looks like this: user@myserver.kom:dir/filename.tar
"""
if partner_outbound_dir.endswith('/') == False:
partner_outbound_dir += '/'
return "{0}@{1}:{2}{3}".format(
username(namespace), my_server, partner_outbound_dir, filename)
def digest(abs_path, algorithm):
"""
Returns the sha256 or md5 hex hash of a file.
:param abs_path: Absolute path to file.
:param algorithm: Either 'md5' or 'sha256'
:returns str: Hex digest of the file.
"""
size = 65536
if algorithm == 'md5':
checksum = hashlib.md5()
elif algorithm == 'sha256':
checksum = hashlib.sha256()
else:
raise ValueError("algorithm must be either md5 or sha256")
with open(abs_path, 'rb') as f:
buf = f.read(size)
while len(buf) > 0:
checksum.update(buf)
buf = f.read(size)
return checksum.hexdigest()
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,285 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/__init__.py | # Package dpnclient - A REST client for DPN.
from . import const
from . import util
from .base_client import BaseClient
from .client import Client
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,286 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/const.py | # Status
STATUS_ACCEPTED = 'Accepted'
STATUS_CONFIRMED = 'Confirmed'
STATUS_CANCELLED = 'Cancelled'
STATUS_FINISHED = 'Finished'
STATUS_PREPARED = 'Prepared'
STATUS_REQUESTED = 'Requested'
STATUS_REJECTED = 'Rejected'
STATUS_RECEIVED = 'Received'
STATUSES = (STATUS_ACCEPTED, STATUS_CONFIRMED, STATUS_CANCELLED,
STATUS_FINISHED, STATUS_PREPARED, STATUS_REQUESTED,
STATUS_REJECTED, STATUS_RECEIVED)
# Protocols
PROTOCOL_HTTPS = 'H'
PROTOCOL_RSYNC = 'R'
PROTOCOLS = (PROTOCOL_HTTPS, PROTOCOL_RSYNC)
# Bag Types
BAGTYPE_DATA = 'D'
BAGTYPE_RIGHTS = 'R'
BAGTYPE_BRIGHTENING = 'B'
BAG_TYPES = (BAGTYPE_DATA, BAGTYPE_RIGHTS, BAGTYPE_BRIGHTENING)
# Fixity
FIXITY_SHA256 = 'sha256'
FIXITY_TYPES = (FIXITY_SHA256)
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,287 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /settings_template.py | # Fill out the following settings and save as dpn_rest_settings.py.
# Don't check dpn_rest_settings.py into source control, since this
# is a public repo and the settings file will have your API keys.
#
# Enter the URL (with port) and API key for *your own* DPN node.
# The API key should be the key for a user on your own node who
# has admin access.
#
# Set MY_NODE to the namespace of your node ('tdr', 'sdr', 'aptrust', etc.)
MY_NODE = 'aptrust'
# This should be the IP address or fully-qualified domain name of your
# DPN node. This is used in constructing links to bags you want partners
# to replicate.
MY_SERVER = 'devops.aptrust.org'
# Where do we keep DPN bags?
# OUTBOUND_DIR - full path to dir containing DPN bags for other nodes to copy.
# INBOUND_DIR - full path to dir where we will store bags that we are
# replicating from other nodes. We need to run checksums on
# these and then send them off to long-term storage.
OUTBOUND_DIR = '/path/to/outbound'
INBOUND_DIR = '/path/to/inbound'
# PARTNER_OUTBOUND_DIR is the name of the directory under the partner's
# home directory where they should look for files we want them to copy.
# For example, partner xyz will have an account on MY_SERVER under
# /home/dpn.xyz. We'll put files in /home/dpn.xyz/outbound for them to
# copy.
PARTNER_OUTBOUND_DIR = "outbound"
# Configurations for OUR OWN node.
# url is the url for your own node
# token is the API key/token for admin user at your own node.
# rsync_host is the hostname from which other nodes will transfer your content
# max_xfer_size is the max size of files you are willing to transfer in
TEST = { 'url': '', 'token': '', 'rsync_host': '', 'max_xfer_size': 0 }
DEV = { 'url': '', 'token': '', 'rsync_host': '', 'max_xfer_size': 0 }
PRODUCTION = { 'url': '', 'token': '', 'rsync_host': '', 'max_xfer_size': 0 }
available = [TEST, DEV, PRODUCTION]
# API keys for OTHER nodes that we want to query.
# Key is node namespace. Value is API key to connect to that node.
KEYS = {
'aptrust': 'api key goes here',
'hathi': 'api key goes here',
'chron': 'api key goes here',
'sdr': 'api key goes here',
'tdr': 'api key goes here',
}
def show_available():
for config in available:
if config['url'] != '' and config['key'] != '':
max_xfer_size = config['max_xfer_size']
if max_xfer_size == 0:
max_xfer_size = "no size limit"
print("{0} ... {1}".format(config['url'], max_xfer_size))
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,288 | dpn-admin/DPN-PYTHON-CLIENT | refs/heads/master | /dpnclient/base_client.py | from . import const
import json
import requests
from requests.exceptions import RequestException
class BaseClient:
"""
Base client for DPN REST service. This client returns requests.Response
objects that include the status code of the response, and the raw text
and json data. For all of this class's list/get/create/update methods,
you'll be interested in the following attributes of the response object:
response.status_code - Integer. HTTP status code returned by the server.
response.text - Raw response text. May be HTML on status code 500.
response.json() - The response JSON (for non-500 responses).
For more information about the requests library and its Response objects,
see the requests documentation at:
http://docs.python-requests.org/en/latest/
All methods that don't get the expected response from the server raise
a RequestException, which the caller must handle. Check the response
property of the RequestException for details (status_code, text, etc.).
"""
def __init__(self, url, token):
while url.endswith('/'):
url = url[:-1]
self.url = url
self.token = token
self.verify_ssl = True # TDR cert is not legit - FIX THIS!
def headers(self):
"""
Returns a dictionary of default headers for the request.
"""
return {
'Content-Type': 'application/json',
'Accept': 'application/json',
'Authorization': 'token {0}'.format(self.token),
}
# ------------------------------------------------------------------
# Node methods
# ------------------------------------------------------------------
def node_list(self, **kwargs):
"""
Returns a list of DPN nodes.
:param replicate_to: Boolean value.
:param replicate_from: Boolean value.
:param page_size: Number of max results per page.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/node/".format(self.url)
response = requests.get(url, headers=self.headers(), params=kwargs,
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def node_get(self, namespace):
"""
Returns the DPN node with the specified namespace.
:param namespace: The namespace of the node. ('tdr', 'sdr', etc.)
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/node/{1}/".format(self.url, namespace)
response = requests.get(url, headers=self.headers(), verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
# ------------------------------------------------------------------
# Bag methods
# ------------------------------------------------------------------
def bag_list(self, **kwargs):
"""
Returns a requests.Response object whose json contains a list of
bag entries.
:param before: DPN DateTime string to FILTER results by last_modified_date earlier than this.
:param after: DPN DateTime String to FILTER result by last_modified_date later than this.
:param first_node: String to FILTER by node namespace.
:param object_type: String character to FILTER by object type.
:param ordering: ORDER return by (accepted values: last_modified_date)
:param page_size: Number of max results per page.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/bag/".format(self.url)
response = requests.get(url, headers=self.headers(), params=kwargs,
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def bag_get(self, obj_id):
"""
Returns a requests.Response object whose json contains the single
bag entry that matches the specified obj_id.
:param obj_id: A UUID string. The id of the bag entry to return.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/bag/{1}/".format(self.url, obj_id)
response = requests.get(url, headers=self.headers(), verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def bag_create(self, obj):
"""
Creates a bag entry. Only the repository admin can make this call,
which means you can issue this call only against your own node.
:param obj: The object to create.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/bag/".format(self.url)
response = requests.post(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 201:
raise RequestException(response.text, response=response)
return response
def bag_update(self, obj):
"""
Updates a bag entry. Only the repository admin can make this call,
which means you can issue this call only against your own node.
:param obj: The object to create.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/bag/{1}/".format(self.url, obj['dpn_object_id'])
response = requests.put(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
# ------------------------------------------------------------------
# Restoration methods
# ------------------------------------------------------------------
def restore_list(self, **kwargs):
"""
Returns a paged list of Restore requests.
*** RESTORE IS NOT YET IMPLEMENTED ***
:param dpn_object_id: Filter by DPN Object ID
:param status: Filter by status code.
:param node: Filter by node namespace.
:param ordered: Order by comma-separated list: 'created' and/or 'updated'
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/restore/".format(self.url)
response = requests.get(url, headers=self.headers(), params=kwargs,
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def restore_get(self, restore_id):
"""
Returns the restore request with the specified event id.
*** RESTORE IS NOT YET IMPLEMENTED ***
:param obj_id: The restore_id of the restore request.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/restore/{1}/".format(self.url, restore_id)
response = requests.get(url, headers=self.headers(), verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def restore_create(self, obj):
"""
Creates a restore request. Only the repository admin can make this call,
which means you can issue this call only against your own node.
*** RESTORE IS NOT YET IMPLEMENTED ***
:param obj: The request to create.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/restore/".format(self.url)
response = requests.post(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 201:
raise RequestException(response.text, response=response)
return response
def restore_update(self, obj):
"""
Updates a restore request.
*** RESTORE IS NOT YET IMPLEMENTED ***
:param obj_id: The ID of the restore request (NOT the ID of a DPN bag).
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/restore/{1}/".format(self.url, obj['restore_id'])
response = requests.put(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
# ------------------------------------------------------------------
# Replication Transfer methods
# ------------------------------------------------------------------
def transfer_list(self, **kwargs):
"""
Returns a list of transfer requests, where the server wants you
to transfer bags to your repository.
:param dpn_object_id: Filter by exact DPN Object ID value.
:param status: Filter by request status ('Requested', 'Confirmed', etc)
:param fixity: [true|false|none] to Filter by fixity status.
:param valid: [true|false|none] to Filter by validation status.
:param from_node: Filter by namespace that originated request. ("aptrust"|"chron"|"sdr"...)
:param to_node: Filter by namespace that should fulfill request. ("aptrust"|"chron"|"sdr"...)
:param created_on: Order result by record creation date. (prepend '-' to reverse order)
:param updated_on: Order result by last update. (prepend '-' to reverse order)
:param page_size: Max number of results per page.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/replicate/".format(self.url)
response = requests.get(url, headers=self.headers(), params=kwargs,
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def transfer_get(self, replication_id):
"""
Returns the transfer requests with the specified id.
:param replication_id: The replication_id of the transfer request you want to retrieve.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/replicate/{1}/".format(self.url, replication_id)
response = requests.get(url, headers=self.headers(), verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
def transfer_create(self, obj):
"""
Creates a transfer request. Only the repository admin can make this call,
which means you can issue this call only against your own node.
:param obj: The request to create.
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/replicate/".format(self.url)
response = requests.post(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 201:
raise RequestException(response.text, response=response)
return response
def transfer_update(self, obj):
"""
Updates a transfer request. The only fields in the transfer object
relevant to this request are the replication_id, fixity_value,
and status, which you must set to either 'A' (Accept) or 'R' (Reject).
:param obj_id: The ID of the restore request (NOT the ID of a DPN bag).
:returns: requests.Response
:raises RequestException: Check the response property for details.
"""
url = "{0}/api-v1/replicate/{1}/".format(self.url, obj['replication_id'])
print("transfer_update " + json.dumps(obj))
print("Headers: " + str(self.headers()))
print("URL: " + url)
response = requests.put(url, headers=self.headers(), data=json.dumps(obj),
verify=self.verify_ssl)
if response.status_code != 200:
raise RequestException(response.text, response=response)
return response
| {"/dpnclient/test_util.py": ["/dpnclient/__init__.py"], "/xfer_test.py": ["/dpnclient/__init__.py"], "/dpnclient/test_client.py": ["/dpnclient/client.py"], "/dpnclient/client.py": ["/dpnclient/__init__.py", "/dpnclient/base_client.py"], "/dpnclient/test_base_client.py": ["/dpnclient/base_client.py"], "/dpnclient/util.py": ["/dpnclient/__init__.py"], "/dpnclient/__init__.py": ["/dpnclient/base_client.py", "/dpnclient/client.py"], "/dpnclient/base_client.py": ["/dpnclient/__init__.py"]} |
65,312 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/sp_data_module.py | import pandas as pd
import copy
from flask import Flask
from flask import Blueprint
from flask import request
from flask import jsonify
from sparepart import jobs
from sparepart.util import util
from sparepart.dao import dao
from sparepart.data_model import model_fun
from datetime import datetime
from plotly import plot as plt
bp = Blueprint('sp_data_module', __name__)
@bp.route('/test')
def get_data():
start_year = '2017'
end_year = '2017'
# js = dao.get_top5_all_plant_used_sno(start_year)
jobs.import_data_into_db()
# print(js)
return 'success'
@bp.route('/dashboard/keychart/post', methods=['POST'])
def get_keychart_data_all():
start_year = request.json['start_year']
end_year = request.json['end_year']
plants = []
for item in request.json['plants']:
tmp = util.plant_agg(item)
plants.append(tmp)
sno_type_count = dao.get_sno_type_count(start_year, end_year, plants)
js = dict()
js['msg'] = 'success'
js['percentage'] = sno_type_count[0]
js['total_amount'] = dao.get_sno_count(start_year, end_year, plants)
js['total_price'] = dao.get_total_price_count(start_year, end_year, plants)
# year_gap = int(end_year) - int(start_year)
# js = {}
# i = 0
# while i <= year_gap:
# js[str(int(start_year) + i)] = []
# i += 1
# js['msg'] = 'sucess'
# for item in sno_type_count:
# plant = util.plant_split(item[1])
# percent = round((int(item[2]) / 10000)*100)
# print(type(item[0]))
# js[str(item[0])].append({plant: percent})
return jsonify(js)
@bp.route('/dashboard/keychart/get')
def get_keychart_data():
temp = dao.get_sno_type_count()
js = {}
js_list = []
js['msg'] = 'sucess'
for item in temp:
plant = util.plant_split(item[1])
percent = round((int(item[2]) / 40000)*100)
js_list.append({plant: percent})
js['2017'] = js_list
return jsonify(js)
@bp.route('/dashboard/polar/get')
def get_polar_data():
js = util.polar_data()
return jsonify(js)
@bp.route('/dashboard/bar/get')
def get_bar_data_t():
js = dao.get_top5_sno_data()
return jsonify(js)
@bp.route('/analysis/timeanalysis/get/<string:sno>')
def get_timeanalysis_data(sno):
data = dao.get_timeanalysis_data(sno)
print(data)
if not data:
return jsonify({'msg': 'nodata'})
df = pd.DataFrame(data, columns=['sno','date','sum'])
df['date'] = df['date'].astype(str)
train_data = df[0:-1]
test_data = df[-1:]
next_month = model_fun.arima_predict(train_data)
next_month_real = test_data['sum'].values[0]
print('{} {} 预估 {}, 实际用量{}'.format(sno, test_data.values[0][1], next_month, next_month_real))
predict = copy.deepcopy(data)
predict[-1] = [sno, data[-1][1], next_month]
js = {'msg':'success','actual_value':data,'predict_value':predict}
return jsonify(js)
@bp.route('/analysis/fbp/get')
def t():
freq = request.args.get("freq")
periods = int(request.args.get('periods'))
sno = request.args.get('sno')
df = dao.get_fbp_data(sno, freq)
rs = model_fun.fbp(df, periods, freq)
rs['msg'] = 'success'
# rs['ds'] = rs['ds'].strftime('%Y-%m')
return jsonify(rs)
@bp.route('/dashboard/linechart/get')
def get_line_chart_data():
temp = dao.get_unused_sno_amount_price()
df = pd.DataFrame(temp, columns=['year_i','year_o','amount_sum','total_price'])
df['amount_sum'] = df['amount_sum'].astype(int)
df['total_price'] = df['total_price'].apply(lambda x: round(x/100000))
df['amount_sum'] = df['amount_sum'].apply(lambda x: round(x/10000))
js = df.to_dict(orient='list')
return jsonify(js)
@bp.route('/dashboard/scatter/post', methods=['POST'])
def get_scatter_data():
#-----------------------------------------------------
#提供dashboard页面散点图数据。
#:method: POST
#:post_param: start_year 开始年份
#:post_param: end_year 结束年份
#:post_param: plants 厂区数组,类似["PFA1","PFA2"...]
#:return: json
#-----------------------------------------------------
start_year = request.json['start_year']
end_year = request.json['end_year']
plants = []
for item in request.json['plants']:
tmp = util.plant_agg(item)
plants.append(tmp)
scatter_data = dao.get_scatter_data(start_year, end_year, plants)
js = {}
for item in scatter_data:
plant = util.plant_split(item[0])
if (plant in js):
js[plant].append([item[1], int(item[3]), item[2]])
else:
js[plant] = [[item[1], int(item[3]), item[2]]]
return jsonify(js)
@bp.route('/dashboard/polar/test', methods=['POST'])
def get_polar_data_test():
#-----------------------------------------------------
#提供dashboard页面极点图数据。
#:method: POST
#:post_param: start_year 开始年份
#:post_param: end_year 结束年份
#:post_param: plants 厂区数组,类似["PFA1","PFA2"...]
#:return: json
#-----------------------------------------------------
start_year = request.json['start_year']
end_year = request.json['end_year']
plants = []
for item in request.json['plants']:
tmp = util.plant_agg(item)
plants.append(tmp)
return jsonify("")
'''
@bp.route('/dashboard/scatter/get')
def get_scatter_data():
js = util.scatter_data()
return jsonify(js)
''' | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,313 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/data_model/model_fun.py | import os
os.environ['OMP_NUM_THREADS'] = "1"
import numpy as np
import pickle
import statsmodels.api as sm
import calendar
import warnings
warnings.filterwarnings("ignore")
# import xgboost as xgb
import pandas as pd
import fbprophet
from itertools import product
from datetime import datetime, timedelta
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.linear_model import LinearRegression
from sklearn.metrics import explained_variance_score
from statsmodels.tsa.arima_model import ARIMA
from fbprophet.diagnostics import cross_validation,performance_metrics
def linearFun(x_train, y_train, x_test, y_test):
lr = LinearRegression()
model = lr.fit(x_train, y_train)
print("模型参数:")
print(model)
print("模型截距:")
print(lr.intercept_)
print("参数权重:")
print(lr.coef_)
y_pred = lr.predict(x_test)
print('MSE:', metrics.mean_squared_error(y_test, y_pred))
print('RMSE', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
def xgBoostFun(x_train, y_train,x_test, y_test):
param = {'boosting_type':'gbdt',
'objective' : 'reg:linear', #任务类型
#'objective' : 'regression', #任务类型
'eval_metric' : 'auc',
'eta' : 0.01,
'max_depth' : 19,
'colsample_bytree':0.8,
'subsample': 0.9,
'subsample_freq': 8,
'alpha': 0.6,
'lambda': 0,
}
train_data = xgb.DMatrix(x_train, label=y_train)
test_data = xgb.DMatrix(x_test, label=y_test)
model = xgb.train(param, train_data, evals=[(train_data, 'train'), (test_data, 'valid')], num_boost_round = 10000, early_stopping_rounds=200, verbose_eval=25)
y_pred = model.predict(test_data)
print('XGBoost 预测结果', y_pred)
# print('XGBoost 准确率:', explained_variance_score(y_test,y_pred))
def xgBoostReg(x_train, y_train, x_test, y_test):
model = xgb.XGBRegressor(n_estimators=150, learning_rate=0.1, gamma=0, max_depth=10)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print("XGBoost 预测结果统计")
show_stats(y_pred)
print('accruace',explained_variance_score(y_test,y_pred))
# df = pd.DataFrame(y_pred, columns=['y_pred'])
def show_stats(data):
print('min', np.min(data))
print('max', np.max(data))
print('ptp', np.ptp(data))
print('mean', np.mean(data))
print('std', np.std(data))
print('var', np.var(data))
def fbp(df, p, freq):
model = fbprophet.Prophet()
model.fit(df)
future = model.make_future_dataframe(periods=p, freq=freq, include_history=True)
# future.tail()
forecast = model.predict(future)
# model.plot(forecast)
# model.plot_components(forecast)
# print(forecast)
if freq == 'Y':
time_format = '%Y'
elif freq == 'M':
time_format = '%Y-%m'
elif freq == 'D':
time_format = '%Y-%m-%d'
df_cv = cross_validation(model, horizon='30 days')
df_pe = performance_metrics(df_cv)
df_cv.to_csv('C:/Users/47135/Desktop/df_cv.csv', encoding='UTF-8')
df_pe.to_csv('C:/Users/47135/Desktop/df_pe.csv', encoding='UTF-8')
forecast['ds'] = forecast['ds'].dt.strftime(time_format)
result = forecast.to_dict(orient='list')
# print(result)
return result
def arima_df():
with open('C:/Code/darkHorseRace/sparepart/data_model/df.pkl', 'rb') as file:
df = pickle.load(file)
df = pd.DataFrame(df)
return df
"""
使用ARIMA时间序列预测下一个月的领用量
输入:某sno的领用量
输出:下一个月的预估
"""
def arima_predict(df, verbose=False):
# 设置参数范围
ps = range(0, 5)
qs = range(0, 5)
ds = range(0, 1)
parameters = product(ps, ds, qs)
parameters_list = list(parameters)
# 寻找最优ARMA模型参数,即best_aic最小
results = []
best_aic = float("inf") # 正无穷
for param in parameters_list:
try:
#model = ARIMA(df_month.Price,order=(param[0], param[1], param[2])).fit()
# SARIMAX 包含季节趋势因素的ARIMA模型
model = sm.tsa.statespace.SARIMAX(df['sum'],order=(param[0], param[1], param[2]),\
enforce_stationarity=False,enforce_invertibility=False).fit()
except ValueError:
print('参数错误:', param)
continue
aic = model.aic
if aic < best_aic:
best_model = model
best_aic = aic
best_param = param
results.append([param, model.aic])
if verbose:
# 输出最优模型
print('最优模型: ', best_model.summary())
# 预测下一个月的领用量
y_pred = round(best_model.get_prediction(start=len(df)+1, end=len(df)+1).predicted_mean).values[0]
y_pred_t = round(best_model.get_prediction(start=len(df)+1, end=len(df)+2).predicted_mean).values
print('y_pred_t: ',y_pred_t)
print('y_pred_t type:',type(y_pred_t))
if y_pred < 0:
y_pred = 0
# if y_pred_t[0] < 0:
# y_pred_t[0] = 0
# if y_pred_t[1] < 0:
# y_pred_t[1] = 0
return int(y_pred)
# return y_pred_t
if __name__ == "__main__":
# df = arima_df()
# # arima_predict(df,verbose=True)
# sno_list = "SV000048"
# temp = df[df['sno'] == sno_list]
# #print(temp)
# train_data = temp[0:-1]
# test_data = temp[-1:]
# #print('train_data: ', train_data)
# #print('test_data: ', test_data)
# next_month = arima_predict(train_data)
# next_month_real = test_data['sum'].values[0]
# print('{} {} 预估 {}, 实际用量{}'.format(sno_list, test_data.values[0][1], next_month, next_month_real))
df = pd.read_csv('C:/Code/fbp.csv')
a = fbp(df,12) | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,314 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/util/util.py | import pandas as pd
import numpy as np
import sys
import json
from datetime import datetime as dt
from dateutil.relativedelta import relativedelta
def polar_data():
df = df_pre_dispose()
dfn = df.groupby([df['o_warehouse_date'].apply(lambda x:x.year),'sno'], as_index=False).agg({'asset_no':pd.Series.nunique,'amount':np.sum}).sort_values('sno',ascending=False)
sno_used_in_all_plant = dfn[dfn['asset_no'] == 10].sort_values('amount',ascending=False).head(5)
# print(sno_used_in_all_plant)
top_5_sno_used_in_all_plant = sno_used_in_all_plant['sno'].to_list()
temp = df[df['sno'].isin(top_5_sno_used_in_all_plant)].groupby([df['o_warehouse_date'].apply(lambda x:x.year), 'sno', 'asset_no'], as_index=False).agg({'amount':np.sum})
# print(temp)
# rs = temp.to_dict(orient='list')
rs = {}
count = 1
amount = []
for item in zip(temp['sno'], temp['amount']):
if count%10 == 0 :
amount.append(item[1])
rs[item[0]] = amount
amount = []
count += 1
continue
amount.append(item[1])
count += 1
# print('amount: ',amount)
# print('count: ',count)
return rs
def temp_data():
df = df_pre_dispose()
dfn = df.groupby([df['o_warehouse_date'].apply(lambda x:x.year),'asset_no']).agg({'sno':pd.Series.nunique}).sort_values('sno',ascending=False).head(5)
dfn.reset_index(inplace=True)
top5_plant = dfn['asset_no'].to_list()
df_top5_plant = df[df['asset_no'].isin(top5_plant)]
df_top5_plant_top3_sno = df_top5_plant.groupby([df['o_warehouse_date'].apply(lambda x:x.year), 'asset_no', 'sno']).agg({'amount':np.sum}).sort_values(by=['asset_no','amount'],ascending=[False,False])
# print(df_top5_plant_top3_sno)
df_top5_plant_top3_sno.reset_index(inplace=True)
rs = []
for item in top5_plant:
temp_df = df_top5_plant_top3_sno[df_top5_plant_top3_sno['asset_no'] == item].iloc[0:3]
rs.append({item : temp_df[['sno','amount']].to_dict(orient='list')})
# print(temp_df[['sno','amount']].to_dict())
# print(rs)
return rs
def scatter_data():
# dfn = df.groupby([df['o_warehouse_date'].apply(lambda x:x.quarter),'sno','asset_no'])['amount'].sum()#.sort_values('asset_no',ascending=False)
df = df_pre_dispose()
dfn = df.groupby(['asset_no',df['o_warehouse_date'].apply(lambda x:x.month)]).agg({'amount':'sum','sno':'count'})
dfn = dfn.reset_index()
dfn.sort_values('amount',inplace=True, ascending=False)
dfn.reset_index(inplace=True)
dfn.drop('index', axis=1, inplace=True)
# df.set_index(df['o_warehouse_date']).groupby(pd.TimeGrouper('M')).apply(lambda x:x.groupby(['sno','asset_no']).sum())
dfnn = dfn.groupby('asset_no')
result = {}
temp = ['PFA1','PFA2','PFA3','PFE','PFN','PFY','PFS','PFN','PFH','PFC','PFW']
for item in temp:
dfnn = dfn[dfn['asset_no'] == item]
dfnn.drop('asset_no', inplace=True, axis=1)
js = dfnn.to_json(orient='split')
js = json.loads(js)
js.pop('index')
js.pop('columns')
result[item] = js['data']
return result
def df_pre_dispose():
df = pd.read_csv("C:/Code/darkHorseRace/sparepart/upload/temp/import_db_bak.csv", usecols=['sno','asset_no','amount','o_warehouse_date'])
df['amount'] = df['amount'].astype(int)
df['o_warehouse_date'] = pd.to_datetime(df['o_warehouse_date'], format='%Y/%m/%d')
df['asset_no'] = df['asset_no'].str[0:2]
df['asset_no'] = df['asset_no'].str.upper()
# df['asset_no'] = df['asset_no'].apply(lambda x:'PFS' if x=='98' else x)
df['asset_no'] = df['asset_no'].apply(lambda x : plant_split(x))
temp = ['PFA1','PFA2','PFA3','PFE','PFN','PFY','PFS','PFH','PFC','PFW']
df = df[df['asset_no'].isin(temp)]
return df
def file_pre_dispose(file_path):
try:
df = pd.read_csv(file_path, encoding='UTF-8')
df = df.dropna()
temp = ['amount','price_per_unit','total_price']
for t in temp:
df[t] = df[t].astype(str).str.replace(',', '')
df[t] = df[t].astype(str).str.replace(r'\.00','')
temp = ['price_per_unit','total_price']
df.drop(index=df.loc[df['amount'].str.contains(r'\-')].index, inplace=True)
df.drop(index=df.loc[df['price_per_unit'].str.contains(r'\-')].index, inplace=True)
df.drop(index=df.loc[df['total_price'].str.contains(r'\-')].index, inplace=True)
for t in temp:
df[t] = df[t].astype(float)
return df
except OSError as e:
return None
def plant_split(x):
if x=='94':
return 'PFA1'
elif x=='95':
return 'PFA2'
elif x=='97':
return 'PFA3'
elif x=='98':
return 'PFS'
elif x=='96':
return 'PFE'
elif x=='9N':
return 'PFN'
elif x=='9Y':
return 'PFY'
elif x=='9H':
return 'PFH'
elif x=='9C':
return 'PFC'
elif x=='9W':
return 'PFW'
return x
def plant_agg(x):
if x=='PFA1':
return '94'
elif x=='PFA2':
return '95'
elif x=='PFA3':
return '97'
elif x=='PFS':
return '98'
elif x=='PFE':
return '96'
elif x=='PFN':
return '9N'
elif x=='PFY':
return '9Y'
elif x=='PFH':
return '9H'
elif x=='PFC':
return '9C'
elif x=='PFW':
return '9W'
return x
pass
def delta_month(start_date, end_date):
start_year = start_date.year
start_month = start_date.month
end_year = end_date.year
end_month = end_date.month
delta_month = (end_year - start_year) * 12 + (end_month - start_month)
return delta_month
def fill_month_sum(df):
#df.set_index('id',inplace=True)
min_time = dt.strptime(df['date'].min(), '%Y-%m')
max_time = dt.strptime(df['date'].max(), '%Y-%m')
start_sno= df.iloc[0,0]
temp_df = pd.DataFrame(columns=['id','sno','date','sum'])
for i in range(0, df.shape[0]):
if df.iloc[i, 0] != start_sno:
# start_sno = df.iloc[i, 0]
min_time = dt.strptime(df['date'].min(), '%Y-%m')
if current_time != max_time:
delta = delta_month(current_time, max_time)
while delta > 0:
next_time = max_time - relativedelta(months=delta-1)
# a = {'id':i,'sno':start_sno,'date':next_time,'sum':0}
# print(a)
temp_df = temp_df.append({'id':i,'sno':start_sno,'date':next_time,'sum':0}, ignore_index = True)
delta = delta - 1
start_sno = df.iloc[i, 0]
current_time = dt.strptime(df.iloc[i, 1], '%Y-%m')
delta = delta_month(min_time, current_time)
if delta > 0:
while delta > 0:
next_time = min_time + relativedelta(months=delta-1)
# a = {'id':i,'sno':start_sno,'date':next_time,'sum':0}
# print(a)
temp_df = temp_df.append({'id':i,'sno':start_sno,'date':next_time,'sum':0}, ignore_index = True)
delta = delta - 1
# print('sno: ', start_sno)
# print('current time: ', current_time)
# print('month delta: ', delta)
# else:
# start_sno = df.iloc[i, 0]
# print('start_sno: ', start_sno)
min_time = current_time + relativedelta(months=1)
i = i + 1
# print(temp_df)
temp_df.to_csv('temp_g.csv', encoding='utf-8')
return temp_df | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,315 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/config.py | from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
class Config(object):
# DEBUG = False
# TSETING = False
UPLOAD_SUCCESS_PATH = "/upload/uploaded/"
UPLOAD_FAIL_PATH = "/upload/fail/"
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://spadmin:SPADMIN@localhost:3306/spadmin"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SCHEDULER_API_ENABLED = True
SCHEDULER_TIMEZONE = 'Asia/Shanghai'
# SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=app.config['SQLALCHEMY_DATABASE_URI'])}
# SECRET_KEY = "1qaz@WSX"
SQLALCHEMY_POOL_SIZE = "5"
SQLALCHEMY_POOL_TIMEOUT = "15"
SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)}
# SCHEDULER_EXECUTORS = {'default': {'type': 'threadpool', 'max_workers': 10}}
class ProductionConfig(Config):
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://root:svw123@localhost:3306/spadmin"
class DevelopmentConfig(Config):
DEBUG = True
class TestingConfig(Config):
TSETING = True | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,316 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/jobs.py | # from flask import current_app
import pandas as pd
import shutil
from os import path
from os import listdir
from sparepart.dao import dao
from sparepart import db
from sparepart.util import util
from datetime import datetime
from flask import current_app
def sp_job():
dao.add_msg("SV000048预测数据")
def sno_month_analysis_model():
df = dao.get_xgboost_data()
pass
def import_data_into_db():
basepath = path.dirname(__file__)
upload_file_path = basepath + '/upload/temp/'
filename_list = listdir(upload_file_path)
if len(filename_list) == 0:
msg = "此次任务没有找到任何数据文件需要进行导入。"
dao.add_msg(msg)
return False
filename = filename_list[0]
file_path = upload_file_path + filename
try:
df = util.file_pre_dispose(file_path)
if df is None:
return False
row_count = df.shape[0]
db_engine = ''
with current_app.app_context():
app = current_app
db_engine = db.get_engine(app=app)
pd.io.sql.to_sql(df, 'tm_spare_part_all', db_engine, schema="spadmin", if_exists="append", index=False)
shutil.move(file_path, basepath + upload_success_path + filename)
msg = filename + '数据处理完毕,成功导入' + str(row_count) + '条,并已经移入已上传目录。'
dao.add_msg(msg)
except OSError as error:
with current_app.app_context():
upload_fail_path = current_app.config.get('UPLOAD_FAIL_PATH')
shutil.move(file_path, basepath + upload_fail_path + filename)
return None | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,317 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/manage.py | from flask import Blueprint
from flask import current_app
from flask import jsonify
from flask import request
from sparepart import jobs
from sparepart.models import models
from sparepart.dao import dao
from os import path
from datetime import datetime
from werkzeug.utils import secure_filename
from sparepart import config
bp = Blueprint('manage', __name__)
@bp.route('/user/get')
def get_user_data():
aus = models.AuthUser.query.all()
aus_rs = []
for au in aus:
aus_rs.append(au.to_json())
# return jsonify(au)
return jsonify(aus_rs)
@bp.route('/user/delete/<int:uid>')
def delete_user_data(uid):
flag = dao.delete_user_by_uid(uid)
if flag:
return jsonify({'msg':True})
else:
return jsonify({'msg':False})
@bp.route('/user/modify', methods=['POST'])
def modify_user_data():
if request.method == 'POST':
email = request.form['email']
department = request.form['department']
phone = request.form['phone']
uid = int(request.form['uid'])
au = {'uid': uid, 'email': email, 'department': department, 'phone': phone}
flag = dao.update_user(au)
if flag:
return jsonify({'msg':True})
else:
return jsonify({'msg':False})
@bp.route('/user/insert', methods=['POST'])
def insert_user_data():
if request.method == 'POST':
email = request.form['email']
department = request.form['department']
password = request.form['password']
phone = request.form['phone']
username = request.form['username']
au_dict = {'username': username, 'email': email, 'password': password, 'department': department, 'phone': phone }
au = models.AuthUser()
au.set_attrs(au_dict)
au_flag = dao.add_user(au)
return jsonify({'msg':au_flag})
@bp.route('/system/job/add', methods=['POST'])
def add_job():
# result = current_app.apscheduler.add_job(func=jobs.sp_job,id="job1",seconds=10,trigger="interval",replace_existing=True)
if request.method == 'POST':
job_name = request.form['jobName']
func_name = 'sparepart.jobs:' + request.form['funcName']
args = request.form['args']
trigger = request.form['trigger']
interval_date = request.form['intervalDate']
date_value = request.form['dateValue']
interval_num = int(request.form['intervalNum'])
print(func_name)
if trigger == 'interval':
if interval_date == 'weeks':
result = current_app.apscheduler.add_job(func=func_name, id=job_name, weeks=interval_num, trigger=trigger, replace_existing=True)
elif interval_date == 'days':
result = current_app.apscheduler.add_job(func=func_name, id=job_name, days=interval_num, trigger=trigger, replace_existing=True)
elif interval_date == 'hours':
result = current_app.apscheduler.add_job(func=func_name, id=job_name, hours=interval_num, trigger=trigger, replace_existing=True)
elif trigger == 'date':
result = current_app.apscheduler.add_job(func=func_name, id=job_name, hours=interval_date, trigger=trigger, replace_existing=True)
# print(job_name)
# print(func_name)
# print(args)
# print(trigger)
# print(interval_date)
# print(date_value)
# sparepart.jobs:sp_job
# if trigger == 'interval':
# result = current_app.apscheduler.add_job(func=func_name, hours=interval_date, trigger=trigger, replace_existing=True)
return jsonify({'msg':'success'})
@bp.route('/system/job/remove/<job_id>')
def remove_job(job_id):
current_app.apscheduler.remove_job(job_id)
return jsonify({'msg':'success'})
@bp.route('/system/job/pause/<job_id>')
def pause_job(job_id):
current_app.apscheduler.pause_job(job_id)
return jsonify({'msg':'success'})
@bp.route('/system/job/resume/<job_id>')
def resume_job(job_id):
current_app.apscheduler.resume_job(job_id)
return jsonify({'msg':'success'})
@bp.route('/file/upload', methods=['POST'])
def upload_file():
if request.method == 'POST' :
f = request.files['file']
filename = secure_filename(f.filename)
temp = filename.split('.')
base_path = path.dirname(__file__)
print(base_path)
abs_path = path.abspath(base_path + '/upload/temp/')
print(abs_path)
f.save(abs_path +'/'+ datetime.now().strftime('%Y%m%d%H%M%S') + '.' + temp[-1])
return jsonify({'msg': 'success'})
@bp.route('/system/msg/get')
def get_msg():
msg = dao.get_msg_count()
js = dict()
js['msg'] = 'success'
js['msg_count'] = len(msg)
js['msg_result'] = list()
for item in msg:
js['msg_result'].append(item.to_json())
return jsonify(js)
@bp.route('/system/msg/update')
def update_msg():
mid = request.args['mid']
is_read = request.args['is_read']
js = dao.update_msg(int(mid), int(is_read))
return jsonify({'msg':'success'})
# ------------------------------
# 测试config以及sqlalchemy
# ------------------------------
'''
@bp.route('/test/test')
def job_test():
# jobs.import_data_into_db()
db_config = current_app.config.get('SQLALCHEMY_DATABASE_URI')
upload_success_path = current_app.config.get('UPLOAD_SUCCESS_PATH')
print('upload_success_path:', upload_success_path)
print('db_config:', db_config)
print('current_app_config', current_app.config.__getitem__)
sys_config = config.Config
print('config.py:',sys_config.__dict__)
sys_config = config.ProductionConfig
print('pro:', sys_config.__dict__)
count = dao.get_msg_count()
print(count)
return "111"
'''
#--------------------------------------------- | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,318 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/dao/dao.py | import pandas as pd
import datetime
from sparepart import db
from sparepart.models import models
from sqlalchemy import func
from sqlalchemy import desc
from sqlalchemy import extract
from sqlalchemy import distinct
from sqlalchemy import text
# def get_sno_month_analysis_data():
# SMA = models.SnoMonthAnalysis
# sma_lists = db.session.query(SMA.sno, func.sum(SMA.quantity).label('sum')).group_by(SMA.sno).order_by(desc('sum')).limit(5).all()
# # smas = models.SnoMonthAnalysis.query(extract('year', .consume_date).label('year'),func.sum(SnoMonthAnalysis.quantity).label('count')).group_by('year').limit(5).all()
# sma_rs = []
# for item in sma_lists:
# sma_rs.append({'sno':item[0],'sum':int(item[1])})
# return sma_rs
def get_scatter_data(start_year, end_year, plants):
#-----------------------------------------------------
#提供bp.sp_data_module get_scatter_data方法命名sql。
#:param: start_year 开始年份
#:param: end_year 结束年份
#:param: plants 厂区数组,类似["PFA1","PFA2"...]
#:return: list
#:sql: select upper(substr(asset_no,1,2)),date_format(o_warehouse_date, '%Y-%m') as y_m,count(1),sum(amount) from tm_spare_part_all
# where year(o_warehouse_date) >= @start_year and year(o_warehouse_date) <= @end_year and upper(substr(asset_no,1,2)) in @plants
# group by upper(substr(asset_no,1,2)), date_format(o_warehouse_date, '%Y-%m')
#-----------------------------------------------------
tspa = models.TmSparePartAll
temp = db.session.query(func.upper(func.substr(tspa.asset_no, 1, 2)).label('plant'), func.date_format(tspa.o_warehouse_date, '%Y-%m').label('year_month'), func.count(tspa.sno), func.sum(tspa.amount)).\
filter(func.upper(func.substr(tspa.asset_no,1,2)).in_(plants), func.year(tspa.o_warehouse_date)>=start_year, func.year(tspa.o_warehouse_date)<=end_year).\
group_by(func.upper(func.substr(tspa.asset_no, 1, 2)), func.date_format(tspa.o_warehouse_date, '%Y-%m')).\
order_by('plant', 'year_month').all()
return temp
def get_unused_sno_amount_price():
tspa = models.TmSparePartAll
# having(func.year(tspa.o_warehouse_date).notin_(['2019','2018']).label('year_o')).\
year_o = func.year(tspa.o_warehouse_date).label('year_o')
year_i = func.year(tspa.i_warehouse_date).label('year_i')
temp = db.session.query(year_i, year_o, func.sum(tspa.amount), func.sum(tspa.total_price)).\
group_by(year_i, year_o).\
having(text("year_o not in ('2018','2019')")).\
having(text('year_i < 2017')).\
order_by(tspa.i_warehouse_date).all()
return temp
def get_top5_all_plant_used_sno(start_year):
tsp = models.TmSparePart
temp = db.session.query(func.year(tsp.o_warehouse_date), tsp.sno).\
filter(func.year(tsp.o_warehouse_date) == start_year).group_by(func.year(tsp.o_warehouse_date), tsp.sno).\
having(func.count(distinct(tsp.asset_no)) == 10).order_by(desc(func.sum(tsp.amount))).limit(5).all()
return temp
def get_sno_type_count(start_year, end_year, plants):
tsp = models.TmSparePart
temp = db.session.query(func.count(distinct(tsp.sno))).\
filter(func.upper(func.substr(tsp.asset_no,1,2)).in_(plants), func.year(tsp.o_warehouse_date)>=start_year, func.year(tsp.o_warehouse_date)<=end_year).all()
return temp[0]
def get_sno_count(start_year, end_year, plants):
tsp = models.TmSparePart
temp = db.session.query(func.sum(tsp.amount)).filter(func.upper(func.substr(tsp.asset_no,1,2)).in_(plants), func.year(tsp.o_warehouse_date)>=start_year, func.year(tsp.o_warehouse_date)<=end_year).all()
return int(temp[0][0])
def get_total_price_count(start_year, end_year, plants):
tsp = models.TmSparePart
temp = db.session.query(func.sum(tsp.total_price)).filter(func.upper(func.substr(tsp.asset_no,1,2)).in_(plants), func.year(tsp.o_warehouse_date)>=start_year, func.year(tsp.o_warehouse_date)<=end_year).all()
return temp[0][0]
def get_top5_sno_data():
tsp = models.TmSparePart
temp = db.session.query(tsp.sno,func.year(tsp.o_warehouse_date),func.sum(tsp.amount).label('sum')).group_by(tsp.sno, func.year(tsp.o_warehouse_date)).order_by(desc('sum')).limit(5).all()
tsp_rs = []
for item in temp:
# print(type(item))
tsp_rs.append({'sno':item[0], 'sum':int(item[2])})
return tsp_rs
def get_sno_month_analysis_data():
sma = models.SnoMonthAnalysis
sma_list = db.session.query(sma).all()
return sma_list
def get_xgboost_data():
tsp = models.TmSparePart
temp = db.session.query(tsp.sno, func.date_format(tsp.o_warehouse_date, '%Y-%m'), tsp.asset_no, func.sum(tsp.amount).label('sum')).group_by(tsp.sno, func.date_format(tsp.o_warehouse_date, '%Y-%m'), tsp.asset_no).all()
return temp
def get_timeanalysis_data(sno):
sma = models.SnoMonthAnalysis
temp = db.session.query(sma.sno, sma.consume_date, sma.quantity).filter(sma.sno == sno).all()
sma_list = []
# print(temp)
for item in temp:
print(type(item[1]))
month = item[1].strftime('%Y-%m')
print(month)
sma_list.append([item[0],month,item[2]])
return sma_list
def get_fbp_data(sno, freq):
tsp = models.TmSparePart
if freq == 'Y':
time_format = '%Y'
elif freq == 'M':
time_format = '%Y-%m'
elif freq == 'D':
time_format = '%Y-%m-%d'
temp = db.session.query(tsp.sno, func.date_format(tsp.o_warehouse_date, time_format), func.sum(tsp.amount)).filter(tsp.sno == sno).group_by(tsp.sno, func.date_format(tsp.o_warehouse_date, time_format)).all()
tsp_list = []
for item in temp:
# day = item[1].strftime(time_format)
tsp_list.append([item[1], item[2]])
# print(tsp_list)
df = pd.DataFrame(tsp_list, columns=['ds','y'])
# print(df)
# df.to_csv('C:/Code/fbp.csv')
return df
def delete_user_by_uid(uid):
au = models.AuthUser.query.filter_by(uid=uid).first()
try:
db.session.delete(au)
db.session.commit()
return True
except:
return False
def update_user(au):
au_origin = models.AuthUser.query.filter_by(uid=au['uid']).first()
au_origin.email = au['email']
au_origin.department = au['department']
au_origin.phone = au['phone']
try:
db.session.commit()
return True
except:
return False
def add_user(au):
db.session.add(au)
db.session.commit()
return True
def add_msg(msg):
tm = models.TmMsg(msg,1,0)
db.session.add(tm)
db.session.commit()
return True
def get_msg_count():
tm = models.TmMsg.query.filter_by(is_read=0).all()
return tm
def update_msg(mid, is_read):
tm = models.TmMsg.query.filter_by(mid=mid).first()
tm.is_read = is_read
try:
db.session.commit()
return True
except:
return False
| {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,319 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/data_model/spmain.py | import pandas as pd
import numpy as np
import sp_module as spm
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from model_fun import xgBoostReg
def import_csv(path):
df = pd.read_csv(path, encoding='utf-8')
# print(df.head())
return df
def clean_data(df):
df = df[['物料号','日期','成本中心','数量','申购类型']]
df = df.rename(columns={'物料号':'sno','日期':'date','成本中心':'assetno','数量':'sum','申购类型':'type'})
# df_lx = df.loc[df['type'] == '零星']
# df_bk = df.loc[df['type'] == '补库']
df_sv = df.loc[df['sno'].str.startswith('SV')]
df_tv = df.loc[df['sno'].str.startswith('TV')]
df = pd.concat([df_sv, df_tv], axis=0)
df['date'] = pd.to_datetime(df['date'], format='%Y/%m/%d')
df['date'] = df['date'].map(lambda x:x.strftime('%Y-%m'))
df['sum'] = df['sum'].str.replace(',','')
df['sum'] = df['sum'].str.replace('.00','')
df.drop(index=df.loc[df['sum'].str.match(r"\D")].index, inplace=True)
df.drop(index=df.loc[df['sum'] == ''].index, inplace=True)
df.drop(index=df.loc[df['sum'].str.contains(r'\.')].index, inplace=True)
df.drop(index=df.loc[df['sno'] == 'SV200946'].index, inplace=True)
df.drop(index=df.loc[df['sum'] == '0'].index, inplace=True)
df['sum'] = df['sum'].astype(int)
df.drop(index=df.loc[df['sum'] > 1000].index, inplace=True)
df = df.groupby(['sno','date'])['sum'].sum()
df = df.reset_index()
# df['date'] = pd.to_datetime(df['date'], format='%Y-%M')
# df.to_csv('g.csv', encoding='utf-8')
temp_df = spm.fill_month_sum(df)
# print(temp_df.head())
# print(df.shape)
return temp_df
def clean_data_new(df):
# df = df[['物料号','日期','成本中心','数量','申购类型']]
df = df.rename(columns={'物料号':'sno','日期':'date','成本中心':'assetno','数量':'sum','申购类型':'type'})
print(df.shape)
df['type'] = df['type'].str.replace(' ','')
df.drop(index=df.loc[df['type'] == ''].index, inplace=True)
df.drop(index=df.loc[df['sum'].str.match(r"\D")].index, inplace=True)
df.drop(index=df.loc[df['sum'] == ''].index, inplace=True)
df.drop(index=df.loc[df['sum'].str.contains(r'\.')].index, inplace=True)
df.drop(index=df.loc[df['sno'] == 'SV200946'].index, inplace=True)
df.drop(index=df.loc[df['sum'] == '0'].index, inplace=True)
df_sv = df.loc[df['sno'].str.startswith('SV')]
df_tv = df.loc[df['sno'].str.startswith('TV')]
df = pd.concat([df_sv, df_tv], axis=0)
df['date'] = pd.to_datetime(df['date'], format='%Y/%M/%d')
df['date'] = df['date'].map(lambda x:x.strftime('%Y%M%d'))
df['assetno'] = df['assetno'].str.upper()
df['sum'] = df['sum'].astype(int)
print(df.shape)
return df[['sno','date','assetno','type','sum']]
def describe_data(df):
print('head:', df.head())
print('info:', df.info())
print('describe', df.describe())
print('skew:', df.skew(axis=0))
print('kurtosis:', df.kurtosis(axis=0))
if __name__ == '__main__':
df = import_csv(r'ship-detail20171.csv')
df = clean_data(df)
# df = clean_data_new(df)
# describe_data(df)
'''
le = LabelEncoder()
df['sno'] = le.fit_transform(df['sno'])
# df['date'] = le.fit_transform(df['date'])
df['assetno'] =le.fit_transform(df['assetno'])
df['type'] = le.fit_transform(df['type'])
#特征值
print(df.head())
print(df.corr())
x = df[['sno','date','assetno','type']]
#预测值
y = df['sum']
#xgboost使用
#------------------
#y = y/y.max(axis=0)
#------------------
scaler = StandardScaler()
scaler.fit(x)
x = scaler.transform(x)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=1)
#linearFun(x_train, y_train, x_test, y_test)
xgBoostReg(x_train, y_train, x_test, y_test)
# print(dfn.head())
# print(y_test.head())
# y_test['pred'] = dfn['y_pred']
# y_test.to_csv('y.csv', encoding='utf-8')
''' | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,320 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/login.py | from flask import Flask
from flask import Blueprint
from flask import request
from sparepart.models import models
# from flask import render_template
bp = Blueprint('login', __name__)
@bp.route('/login', methods=['POST'])
def loginValid():
if request.method == 'POST':
username = request.form['username']
password = request.form['password']
print(username,password)
auth_user = models.AuthUser.query.filter_by(username=username).first()
if auth_user == None:
return "invalidUser"
elif auth_user.password != password:
return "invalidPassword"
else :
return "validUser"
| {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,321 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/run.py | from sparepart import create_app
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
app = create_app()
if __name__ == "__main__":
app.run(host='127.0.0.1', port='5000') | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,322 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/settings/config_dev.py | from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
DEBUG = True
UPLOAD_SUCCESS_PATH = "/upload/uploaded/"
UPLOAD_FAIL_PATH = "/upload/fail/"
SQLALCHEMY_DATABASE_URI = "mysql+pymysql://spadmin:SPADMIN@localhost:3306/spadmin"
SQLALCHEMY_TRACK_MODIFICATIONS = False
SCHEDULER_API_ENABLED = True
SCHEDULER_TIMEZONE = 'Asia/Shanghai'
# SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=app.config['SQLALCHEMY_DATABASE_URI'])}
# SECRET_KEY = "1qaz@WSX"
SQLALCHEMY_POOL_SIZE = "5"
SQLALCHEMY_POOL_TIMEOUT = "15"
SCHEDULER_JOBSTORES = {'default': SQLAlchemyJobStore(url=SQLALCHEMY_DATABASE_URI)}
# SCHEDULER_EXECUTORS = {'default': {'type': 'threadpool', 'max_workers': 10}}
| {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,323 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/__init__.py | from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
import os
from flask import Flask
from flask_apscheduler import APScheduler
from apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore
from . import login
from . import manage
from . import sp_data_module
from sparepart.config import Config
def create_app(test_config=None):
app = Flask(__name__, instance_relative_config=False)
# app.config.from_mapping(SECRET_KEY='dev', DATABASE=os.path.join(app.instance_path, 'flaskr.sqlite'))
app.jinja_env.variable_start_string = '[['
app.jinja_env.variable_end_string = ']]'
# if test_config is None:
# #app.config.from_object(Config)
# app.config.from_pyfile('./settings/config_dev.py', silent=False)
# else:
# app.config.from_object(config.TestingConfig)
try:
os.mkdir(app.instance_path)
except OSError:
pass
app.config['UPLOAD_SUCCESS_PATH'] = "/upload/uploaded/"
app.config['UPLOAD_FAIL_PATH'] = "/upload/fail/"
app.config['SQLALCHEMY_DATABASE_URI'] = "mysql+pymysql://spadmin:SPADMIN@localhost:3306/spadmin"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SCHEDULER_API_ENABLED'] = True
app.config['SCHEDULER_TIMEZONE'] = 'Asia/Shanghai'
app.config['SCHEDULER_JOBSTORES'] = {'default': SQLAlchemyJobStore(url=app.config['SQLALCHEMY_DATABASE_URI'])}
# db = SQLAlchemy(app)
# from SP import models
# db.init_app(app)
# app.add_url_rule('/', endpoint='index')
app.register_blueprint(login.bp)
app.register_blueprint(manage.bp)
app.register_blueprint(sp_data_module.bp)
db.init_app(app)
scheduler = APScheduler()
scheduler.init_app(app)
scheduler.start()
return app | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
65,324 | Alisax31/darkHorseRace | refs/heads/master | /sparepart/models/models.py | from sparepart import db
from datetime import datetime
class EntityBase(object):
def to_json(self):
fields = self.__dict__
if "_sa_instance_state" in fields:
del fields["_sa_instance_state"]
return fields
class TmHoliday(db.Model, EntityBase):
__tablename__ = 'tm_holiday'
hid = db.Column(db.Integer, primary_key=True)
holiday_date = db.Column(db.DateTime)
class TmMsg(db.Model, EntityBase):
__tablename__ = 'tm_msg'
mid = db.Column(db.Integer, primary_key=True)
message = db.Column(db.Text)
uid = db.Column(db.Integer)
create_time = db.Column(db.DateTime, default=datetime.now)
is_read = db.Column(db.Integer, default=0)
def __init__(self, message, uid, is_read):
self.message = message
self.uid = uid
self.is_read = is_read
class AuthUser(db.Model, EntityBase):
__tablename__ = "auth_user"
uid = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String('50'), nullable=False)
email = db.Column(db.String('255'))
password = db.Column(db.String('32'), nullable=False)
department = db.Column(db.String('45'))
phone = db.Column(db.String('45'))
create_time = db.Column(db.DateTime, default=datetime.now)
update_time = db.Column(db.DateTime)
def set_attrs(self,attrs_dict):
for key,value in attrs_dict.items():
if hasattr(self,key) and key != "uid":
setattr(self,key,value)
class SnoMonthAnalysis(db.Model, EntityBase):
__tablename__ = "sno_month_analysis"
id = db.Column(db.Integer, primary_key=True)
sno = db.Column(db.String('50'))
consume_date = db.Column(db.DateTime)
quantity = db.Column(db.Integer)
class TmSparePart(db.Model, EntityBase):
__tablename__ = "tm_spare_part"
sid = db.Column(db.Integer, primary_key=True)
sno = db.Column(db.String(50))
desc = db.Column(db.String)
amount = db.Column(db.Integer)
price_per_unit = db.Column(db.Float)
total_price = db.Column(db.Float)
asset_no = db.Column(db.String(50))
i_warehouse_date = db.Column(db.DateTime)
p_type = db.Column(db.String(50))
o_warehouse_date = db.Column(db.DateTime)
class TmSparePartAll(db.Model, EntityBase):
__tablename__ = "tm_spare_part_all"
sid = db.Column(db.Integer, primary_key=True)
sno = db.Column(db.String(100))
desc = db.Column(db.String)
amount = db.Column(db.Integer)
price_per_unit = db.Column(db.Float)
total_price = db.Column(db.Float)
asset_no = db.Column(db.String(50))
i_warehouse_date = db.Column(db.DateTime)
p_type = db.Column(db.String(10))
o_warehouse_date = db.Column(db.DateTime) | {"/sparepart/sp_data_module.py": ["/sparepart/__init__.py"], "/sparepart/jobs.py": ["/sparepart/__init__.py"], "/sparepart/manage.py": ["/sparepart/__init__.py"], "/sparepart/dao/dao.py": ["/sparepart/__init__.py"], "/sparepart/run.py": ["/sparepart/__init__.py"], "/sparepart/__init__.py": ["/sparepart/config.py"], "/sparepart/models/models.py": ["/sparepart/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.