index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
34,526
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/views.py
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import Shows
from django.contrib.admin.views.decorators import staff_member_required
from .forms import ShowsModelForm
from comments.models import Comment
from comments.forms import CommentModelForm
from django.contrib.auth.decorators import login_required
from PIL import Image
# Create your views here.
def shows_list_view(request):
if request.user.is_authenticated:
queryset = Shows.objects.published()
else:
queryset = Shows.objects.published()[:5]
context = {
'queryset': queryset,
'all': False,
'live-action': False,
# 'tv-series': False,
'animation': False,
}
context[request.path.split('/')[-2]] = True
if context['live-action']:
if request.user.is_authenticated:
queryset = Shows.objects.published().filter(animation=False)
else:
queryset = Shows.objects.published().filter(animation=False)[:2]
if context['animation']:
if request.user.is_authenticated:
queryset = Shows.objects.published().filter(animation=True)
else:
queryset = Shows.objects.published().filter(animation=True)[:2]
# if request.user.is_authenticated:
# qs = Shows.objects.all()
# queryset = (queryset | qs).distinct()
context['queryset'] = queryset
return render(request, 'shows/shows_list.html', context)
@login_required
def show_create_view(request):
form = ShowsModelForm(request.POST or None, request.FILES or None)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
img = obj.image
image = Image.open(f'media/{img}')
image.thumbnail((300, 300))
image.save(f'media/{img}', 'JPEG')
form = ShowsModelForm()
context = {
'form': form
}
return render(request, 'shows/show_create.html', context)
@login_required
def show_update_view(request, username, slug):
obj = get_object_or_404(Shows, slug=slug, user__username__exact=username)
form = ShowsModelForm(request.POST or None,
request.FILES or None, instance=obj)
if form.is_valid():
obj = form.save(commit=False)
obj.user = request.user
obj.save()
context = {
'form': form
}
return render(request, 'shows/show_create.html', context)
@login_required
def show_delete_view(request, username, slug):
obj = get_object_or_404(Shows, slug=slug, user__username__exact=username)
if request.POST:
obj.delete()
return redirect('/shows/all/')
context = {
'obj': obj,
}
return render(request, 'shows/show_delete.html', context)
def show_detail_view(request, username, slug):
obj = get_object_or_404(Shows, slug=slug, user__username__exact=username)
comments = Comment.objects.filter(
slug=obj.slug, commented_to__exact=username)
form = CommentModelForm(request.POST or None)
if request.user.is_authenticated:
if form.is_valid():
form_obj = form.save(commit=False)
form_obj.slug = obj.slug
form_obj.commented_to = obj.user
form_obj.user = request.user
form_obj.save()
form = CommentModelForm()
context = {
'obj': obj,
"form": form,
'comments': comments
}
return render(request, 'shows/show_detail.html', context)
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,527
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/migrations/0003_auto_20200525_1550.py
|
# Generated by Django 2.2.1 on 2020-05-25 11:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shows', '0002_shows_animation'),
]
operations = [
migrations.AlterField(
model_name='shows',
name='animation',
field=models.BooleanField(),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,528
|
yhtps237/ostendit
|
refs/heads/master
|
/comments/migrations/0004_auto_20200527_1156.py
|
# Generated by Django 2.2.1 on 2020-05-27 07:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0003_auto_20200527_1153'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='commented_to',
field=models.CharField(max_length=100),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,529
|
yhtps237/ostendit
|
refs/heads/master
|
/pages/views.py
|
from django.shortcuts import render
from shows.models import Shows
# Create your views here.
def home_view(request):
context = {
'title': 'Hello World'
}
if request.user.is_authenticated:
queryset = Shows.objects.published()
else:
queryset = Shows.objects.published()[:5]
context['queryset'] = queryset
return render(request, 'home.html', context)
def about_view(request):
context = {
'title': 'About me'
}
return render(request, 'about.html', context)
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,530
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/migrations/0008_auto_20200527_1448.py
|
# Generated by Django 2.2.1 on 2020-05-27 10:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shows', '0007_auto_20200527_1037'),
]
operations = [
migrations.AlterField(
model_name='shows',
name='content',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='shows',
name='published',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,531
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/forms.py
|
from django import forms
from .models import Shows
class ShowsModelForm(forms.ModelForm):
title = forms.CharField(label='', widget=forms.TextInput(
attrs={'class': 'form-control'}))
slug = forms.CharField(label='', widget=forms.TextInput(
attrs={'placeholder': 'Your title but lowercase and replace spaces with "-". ', 'class': 'form-control'}))
content = forms.CharField(label='', widget=forms.Textarea(
attrs={'class': 'form-control'}))
# published = forms.DateTimeField(
# label='', widget=forms.DateInput(attrs={'placeholder': 'asdf'}))
published = forms.DateTimeField(
widget=forms.DateInput(attrs={'placeholder': 'yy-mm-dd hh-mm-ss'}))
class Meta:
model = Shows
fields = [
'title',
'slug',
'content',
'published',
'image',
'animation',
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,532
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/urls.py
|
from django.urls import path
from .views import (
shows_list_view,
show_detail_view,
show_create_view,
show_update_view,
show_delete_view,
)
app_name = 'shows'
urlpatterns = [
# path('movies/', shows_list_view),
# path('<slug:slug>/', show_detail_view),
path('live-action/', shows_list_view, name='live-action'),
path('animation/', shows_list_view, name='animation'),
path('new/', show_create_view, name='new'),
path('all/', shows_list_view, name='all'),
path('<str:username>/<slug:slug>/', show_detail_view),
path('<str:username>/<slug:slug>/edit/', show_update_view),
path('<str:username>/<slug:slug>/delete/', show_delete_view),
path('', shows_list_view),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,533
|
yhtps237/ostendit
|
refs/heads/master
|
/user/views.py
|
from django.shortcuts import render, redirect, get_object_or_404
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from shows.models import Shows
from django.db import IntegrityError
from .forms import RegistrationForm
# Create your views here.
def signup_view(request):
form = RegistrationForm(request.POST or None)
context = {
'form': form,
}
if request.method == "GET":
return render(request, 'user/signup.html', context)
else:
try:
firstname = request.POST.get('first_name')
lastname = request.POST.get('last_name')
email = request.POST.get('email')
username = request.POST.get('username')
password1 = request.POST.get("password1")
password2 = request.POST.get("password2")
if password1 == password2:
user = User.objects.create_user(
username=username, email=email, password=password1, first_name=firstname, last_name=lastname)
# user = authenticate(username=username, password=password1)
login(request, user)
return redirect(f'/user/{user}')
else:
return render(request, 'user/signup.html', context)
except IntegrityError:
# context['error'] = 'This username has already been taken. Please use a new one.'
return render(request, 'user/signup.html', context)
def login_view(request):
form = AuthenticationForm(request.POST or None)
context = {
'form': form
}
if request.method == 'GET':
return render(request, 'user/login.html', context)
else:
# if form.is_valid():
username = request.POST.get('username')
password = request.POST.get('password')
print(username, password)
user = authenticate(username=username, password=password)
if user is None:
return render(request, 'user/login.html', {'form': AuthenticationForm(request.POST or None), 'my_error': 'Username and password did not match'})
else:
login(request, user)
return redirect(f'/user/{user}')
def logout_view(request):
if request.method == 'POST':
logout(request)
return redirect('/')
def user_profile_view(request, username):
user = get_object_or_404(User, username=username)
if request.user.is_authenticated:
objs = Shows.objects.filter(user__username__exact=username)
else:
objs = Shows.objects.filter(user__username__exact=username)[:2]
context = {
'user': user,
'queryset': objs
}
return render(request, 'user/user_profile.html', context)
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,534
|
yhtps237/ostendit
|
refs/heads/master
|
/comments/migrations/0005_auto_20200527_1159.py
|
# Generated by Django 2.2.1 on 2020-05-27 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('comments', '0004_auto_20200527_1156'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='commented_to',
field=models.SlugField(),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,535
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/migrations/0004_auto_20200525_1558.py
|
# Generated by Django 2.2.1 on 2020-05-25 11:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shows', '0003_auto_20200525_1550'),
]
operations = [
migrations.AlterField(
model_name='shows',
name='animation',
field=models.BooleanField(blank=True, null=True),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,536
|
yhtps237/ostendit
|
refs/heads/master
|
/comments/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Comment(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, null=True)
commented_to = models.SlugField()
slug = models.SlugField(unique=False)
comment = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['-timestamp']
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,537
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/admin.py
|
from django.contrib import admin
from .models import Shows
# Register your models here.
admin.site.register(Shows)
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,538
|
yhtps237/ostendit
|
refs/heads/master
|
/shows/migrations/0007_auto_20200527_1037.py
|
# Generated by Django 2.2.1 on 2020-05-27 06:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shows', '0006_auto_20200527_0939'),
]
operations = [
migrations.AlterField(
model_name='shows',
name='slug',
field=models.SlugField(),
),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,539
|
yhtps237/ostendit
|
refs/heads/master
|
/user/urls.py
|
from django.urls import path
from .views import (
signup_view,
login_view,
logout_view,
user_profile_view,
)
app_name = 'user'
urlpatterns = [
path('signup/', signup_view, name="signup_view"),
path('login/', login_view, name="login_view"),
path('logout/', logout_view, name="logout_view"),
path('<str:username>/', user_profile_view, name="user_profile_view"),
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,540
|
yhtps237/ostendit
|
refs/heads/master
|
/searches/models.py
|
from django.db import models
from django.conf import settings
# Create your models here.
class Search(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL, null=True)
query = models.CharField(max_length=100)
timestamp = models.DateTimeField(auto_now_add=True)
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,541
|
yhtps237/ostendit
|
refs/heads/master
|
/comments/forms.py
|
from django import forms
from .models import Comment
class CommentModelForm(forms.ModelForm):
comment = forms.CharField(label='', widget=forms.Textarea(
attrs={'class': 'form-control'}))
class Meta:
model = Comment
fields = [
'comment'
]
|
{"/shows/views.py": ["/shows/models.py", "/shows/forms.py", "/comments/models.py", "/comments/forms.py"], "/pages/views.py": ["/shows/models.py"], "/shows/forms.py": ["/shows/models.py"], "/shows/urls.py": ["/shows/views.py"], "/user/views.py": ["/shows/models.py"], "/shows/admin.py": ["/shows/models.py"], "/user/urls.py": ["/user/views.py"], "/comments/forms.py": ["/comments/models.py"]}
|
34,549
|
triplez23/mapf-3
|
refs/heads/master
|
/utils.py
|
from matplotlib import colors
from matplotlib.animation import ArtistAnimation
import matplotlib.pyplot as plt
import numpy as np
import math
import matplotlib
matplotlib.use("Agg")
class Agent():
def __init__(self, start, target, route):
self.start = start
self.target = None
self.route = None
def split_by_char(line):
return [char for char in line]
def process_layout(layout):
lines = []
for line in layout:
line = line.replace('\n', '')
lines.append(line)
maze = [split_by_char(line) for line in lines]
agents = []
targets = []
for i in range(len(maze)):
for j in range(len(maze[0])):
if maze[i][j] == 'A':
agents.append((j, i))
maze[i][j] = ' '
elif maze[i][j] == 'T':
targets.append((j, i))
maze[i][j] = ' '
return maze, agents, targets
def reconstruct_path(maze, prev, start, end, waits):
node = end
path = []
while node != start:
path.append(node)
node = prev[node]
path.append(start)
path.reverse()
for w in range(len(waits)):
if waits[w] in path:
i = path.index(waits[w])
if i > 0:
path.insert(i - 1, path[i-1])
path_dict = {}
for i in range(len(path)):
path_dict[i] = path[i]
return path_dict
def get_distance(open_node, neighbour):
dy = neighbour[0] - open_node[0]
dx = neighbour[1] - open_node[1]
return math.sqrt(dx**2 + dy**2)
def get_manhattan_distance(open_node, neighbour):
dy = neighbour[0] - open_node[0]
dx = neighbour[1] - open_node[1]
return abs(dy)+abs(dx)
def transform_array_to_int(array, steps):
int_array = np.zeros((len(array), len(array[0])))
for step in steps:
# if step != steps[0] or step != steps[-1]:
int_array[step[1]][step[0]] = 4
for i in range(len(array)):
for j in range(len(array[0])):
if array[i][j] == 'X':
int_array[i][j] = 1
elif array[i][j] == 'S':
int_array[i][j] = 2
elif array[i][j] == 'E':
int_array[i][j] = 3
return int_array.astype(np.int)
def plot_paths(layout, paths):
images = []
cmap = colors.ListedColormap(['white', 'black', 'red', 'green', 'blue'])
lengths = [len(path) for path in paths]
lengths.sort()
longest_path = lengths[-1]
fig = plt.figure()
for i in range(longest_path):
steps = []
for path in paths:
if path.get(i) != None:
steps.append(path.get(i))
# print(steps)
layout_arr = transform_array_to_int(layout, steps)
img = plt.pcolor(layout_arr[::-1], cmap=cmap,
edgecolors='k', linewidths=1)
images.append([img])
if i==0:
plt.savefig('pics/start.png')
images.insert(0,images[1])
images.append(images[-1])
animation = ArtistAnimation(fig, images, interval=250)
print('Animation steps:', len(images))
animation.save('video/anim.mp4', dpi=800)
def import_current_constraints(constraints, timestep):
current_constraints = []
for c in constraints:
if c[0] == timestep + 1:
current_constraints.append(c[1])
return current_constraints
def is_occupied(neighbour, current_constraint):
occupied = False
for cc in current_constraint:
if cc == neighbour:
occupied = True
print('Deadlock at position: ', cc, neighbour)
return occupied
|
{"/alocate_targets.py": ["/a_star.py", "/utils.py"], "/maze_solver.py": ["/utils.py", "/a_star.py", "/alocate_targets.py"], "/a_star.py": ["/utils.py"]}
|
34,550
|
triplez23/mapf-3
|
refs/heads/master
|
/alocate_targets.py
|
from a_star import run_a_star
import numpy as np
import time
from utils import plot_paths
from itertools import permutations
class Rated_combination():
def __init__(self, pathway, rating):
self.pathway = pathway
self.rating = rating
# def sort_by_distance(agents, targets):
# distance_table = [[0 for x in range(len(targets))]
# for y in range(len(agents))]
# for i in range(len(agents)):
# for j in range(len(targets)):
# distance_table[i][j]
# print(distance_table)
def rate_pathway(pathway, distance_table):
distance = 0
for combination in pathway:
distance += distance_table[combination[0]][combination[1]]
return distance
def find_combination(distance_table):
agents_vec = []
for k in range(len(distance_table)):
agents_vec.append(k)
combinations = []
targets_perms = permutations(agents_vec)
for perm in targets_perms:
combinations.append(zip(agents_vec, perm))
rated_combinations = []
for i in range(len(combinations)):
pathway = []
for j in combinations[i]:
pathway.append(j)
rated_comb = Rated_combination(
pathway=pathway, rating=rate_pathway(pathway, distance_table))
rated_combinations.append(rated_comb)
return rated_combinations
# return smallest overall distance travelled by agents
def sort_by_overall_dist(rated_combinations):
rated_combinations = sorted(
rated_combinations, key=lambda x: x.rating, reverse=False)
return rated_combinations
def alocate(maze):
print('Agents: ', maze.agents)
print('Targets: ', maze.targets)
distance_table = [[0 for x in range(len(maze.targets))]
for y in range(len(maze.agents))]
path_table = [[0 for x in range(len(maze.targets))]
for y in range(len(maze.agents))]
for i in range(len(maze.agents)):
for j in range(len(maze.targets)):
maze.layout = maze.original_layout
path = run_a_star(
maze, maze.original_layout, maze.agents[i], maze.targets[j], put_on_a_show=True, constraints=None)
distance_table[i][j] = len(path)
path_table[i][j] = path
sorted_combs = sort_by_overall_dist(find_combination(distance_table))
best_solution = sorted_combs[0]
paths = []
for path in best_solution.pathway:
paths.append(path_table[path[0]][path[1]])
return paths
|
{"/alocate_targets.py": ["/a_star.py", "/utils.py"], "/maze_solver.py": ["/utils.py", "/a_star.py", "/alocate_targets.py"], "/a_star.py": ["/utils.py"]}
|
34,551
|
triplez23/mapf-3
|
refs/heads/master
|
/maze_solver.py
|
from termcolor import colored, cprint
import os
import time
import utils
import numpy as np
from a_star import run_a_star
from alocate_targets import alocate
class Maze():
def __init__(self, layout, original_layout, agents, targets):
self.layout = layout
self.original_layout = original_layout
self.agents = agents
self.targets = targets
def print_maze(self, clear=False):
if clear:
os.system('cls' if os.name == 'nt' else 'clear')
for i in range(len(self.layout)):
for j in range(len(self.layout[0])):
if self.layout[i][j] == 'X': # wall
cprint('\u2588\u2588', 'grey', end='')
elif self.layout[i][j] == ' ': # fresh node
cprint('\u2588\u2588', 'white', end='')
elif self.layout[i][j] == 'S': # start
cprint('\u2588\u2588', 'green', end='')
elif self.layout[i][j] == 'E': # end
cprint('\u2588\u2588', 'red', end='')
elif self.layout[i][j] == 'O': # opened
cprint('\u2588\u2588', 'yellow', end='')
elif self.layout[i][j] == 'P': # path
cprint('\u2588\u2588', 'blue', end='')
print()
def print_path(self, path):
for value in path.values():
self.layout[value[1]][value[0]] = 'P'
self.print_maze()
for value in path.values():
self.layout[value[1]][value[0]] = ' '
def get_neighbours(self, node):
y, x = node
neighbours = [(y + 1, x), (y, x + 1), (y-1, x), (y, x - 1), (y, x)]
return neighbours
def report(self, name):
self.print_maze()
opened_counter = 0
path_counter = 0
for i in range(len(self.layout)):
for j in range(len(self.layout[0])):
if self.layout[i][j] == 'O': # opened
opened_counter += 1
elif self.layout[i][j] == 'P': # path
path_counter += 1
opened_counter += path_counter
print(30 * '-')
print(name)
print(30 * '-')
cprint('\u2588\u2588', 'green', end='')
print(' Start')
cprint('\u2588\u2588', 'red', end='')
print(' End')
cprint('\u2588\u2588', 'yellow', end='')
print(' Opened')
cprint('\u2588\u2588', 'blue', end='')
print(' Path')
cprint('\u2588\u2588', 'grey', end='')
print(' Wall')
print(30 * '-')
print('Nodes expanded:', opened_counter)
print('Path length:', path_counter)
def update_constraints(constraints, path):
for key, value in path.items():
constraints.append((key, value))
return constraints
def run_solver(maze):
paths = alocate(maze)
sorted_paths = sorted(paths, key=lambda path: len(path), reverse=True)
priority_path = sorted_paths.pop(0)
constraints = []
constraints = update_constraints(constraints, priority_path)
final_paths = []
final_paths.append(priority_path)
for path in sorted_paths:
lock = False
for key, value in path.items():
if (key, value) in constraints:
lock = True
break
if lock:
path = run_a_star(
maze, maze.original_layout, path.get(
0), list(path.values())[-1],
put_on_a_show=False, constraints=constraints)
final_paths.append(path)
constraints = update_constraints(constraints, path)
else:
final_paths.append(path)
print('Final paths: ')
for path in final_paths:
print(path)
utils.plot_paths(maze.original_layout, final_paths)
def Main():
layout = open('data/many_agents-10.txt', 'r')
maze, agents, targets = utils.process_layout(layout)
maze = Maze(maze, maze, agents, targets)
run_solver(maze)
Main()
|
{"/alocate_targets.py": ["/a_star.py", "/utils.py"], "/maze_solver.py": ["/utils.py", "/a_star.py", "/alocate_targets.py"], "/a_star.py": ["/utils.py"]}
|
34,552
|
triplez23/mapf-3
|
refs/heads/master
|
/a_star.py
|
import utils
import heapq
def run_a_star(maze, layout, start, end, put_on_a_show, constraints):
queue = []
closed = []
prev = {}
distance = {}
timestep = 0
heapq.heappush(queue, (0, start, timestep))
distance[start] = 0
layout[start[1]][start[0]] = 'S'
layout[end[1]][end[0]] = 'E'
current_constraint = []
waits = []
while queue:
open_node = heapq.heappop(queue)[1]
if open_node == end:
path = utils.reconstruct_path(layout, prev, start, end, waits)
return path
if constraints:
current_constraint = utils.import_current_constraints(
constraints, timestep)
neighbours = maze.get_neighbours(open_node)
for neighbour in neighbours:
if neighbour not in closed:
distance_to_node = distance[open_node] + \
utils.get_manhattan_distance(open_node, neighbour)
distance_to_end = utils.get_manhattan_distance(neighbour, end)
if constraints:
occupied = utils.is_occupied(neighbour, current_constraint)
else:
occupied = False
if maze.layout[neighbour[1]][neighbour[0]] != 'X':
if not occupied:
if (neighbour not in queue) or (distance_to_node < distance[neighbour]):
prev[neighbour] = open_node
distance[neighbour] = distance_to_node
if neighbour not in queue:
heapq.heappush(
queue, (distance_to_node + distance_to_end, neighbour, timestep))
closed.append(neighbour)
elif occupied:
if neighbour not in waits:
waits.append(neighbour)
closed.append(open_node)
timestep += 1
|
{"/alocate_targets.py": ["/a_star.py", "/utils.py"], "/maze_solver.py": ["/utils.py", "/a_star.py", "/alocate_targets.py"], "/a_star.py": ["/utils.py"]}
|
34,561
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/main.py
|
# In[] imports
# from ctypes import *
from ctypes import CDLL
#import ctypes
import os
# import numpy and sin, cos for convenience
import numpy as np
# handbuilt functions for all this
from utils import tic, toc, vis
from trim import trim
from sim import upd_sim
from mpc import linearise, dmom, calc_MC, calc_x_seq, calc_HFG, dlqr, square_mat_degen_2d
# import progressbar for convenience
import progressbar
# import parameters
from parameters import initial_state_vector_ft_rad, simulation_parameters, paras_mpc
# import exit() function for debugging
from sys import exit
# from scipy.linalg import expm, inv, pinv
from scipy.signal import cont2discrete
# In[]
#----------------------------------------------------------------------------#
#-------------------------prepare data for nlplant.c-------------------------#
#----------------------------------------------------------------------------#
# unwrap simulation parameters
time_step, time_start, time_end, stab_flag, fi_flag = simulation_parameters
# create interface with c shared library .so file in folder "C"
if stab_flag == 1:
so_file = os.getcwd() + "/C/nlplant_xcg35.so"
elif stab_flag == 0:
so_file = os.getcwd() + "/C/nlplant_xcg25.so"
nlplant = CDLL(so_file)
# initialise x
x = initial_state_vector_ft_rad
# In[]
#----------------------------------------------------------------------------#
#---------------------------------Simulate-----------------------------------#
#----------------------------------------------------------------------------#
output_vars = [6,7,8,9,10,11]
# trim aircraft
h_t = 10000
v_t = 700
x, opt_res = trim(h_t, v_t, fi_flag, nlplant)
u = x[12:16]
# x = x[np.newaxis].T
# turn x, u into matrices
x = x[np.newaxis].T
u = u[np.newaxis].T
x0 = np.copy(x)
rng = np.linspace(time_start, time_end, int((time_end-time_start)/time_step))
# create storage
x_storage = np.zeros([len(rng),len(x)])
A = np.zeros([len(x),len(x),len(rng)])
B = np.zeros([len(x),len(u),len(rng)])
C = np.zeros([len(output_vars),len(x),len(rng)])
D = np.zeros([len(output_vars),len(u),len(rng)])
# Q = np.eye(A.shape[0])
# Q[0,0] = 0
# Q[1,1] = 0
# Q[2,2] = 0.1
# Q[3,3] = 0.1
# Q[4,4] = 0.1
# Q[5,5] = 0
# Q[6,6] = 0.5
# Q[7,7] = 1
# Q[8,8] = 1
# Q[9,9] = 100
# Q[10,10] = 100
# Q[11,11] = 100
# Q[12,12] = 0
# Q[13,13] = 0
# Q[14,14] = 0
# Q[15,15] = 0
# Q[16,16] = 0
# Q[17,17] = 0
# R = np.eye(B.shape[1])
# R[0,0] = 1000
# R[1,1] = 10
# R[2,2] = 100
# R[3,3] = 1
Q = np.eye(9)
R = np.eye(4)
bar = progressbar.ProgressBar(maxval=len(rng)).start()
tic()
for idx, val in enumerate(rng):
#----------------------------------------#
#------------linearise model-------------#
#----------------------------------------#
[A[:,:,idx], B[:,:,idx], C[:,:,idx], D[:,:,idx]] = linearise(x, u, output_vars, fi_flag, nlplant)
Ad, Bd, Cd, Dd = cont2discrete((A[:,:,idx],B[:,:,idx],C[:,:,idx],D[:,:,idx]), time_step)[0:4]
#----------------------------------------#
#--------------Take Action---------------#
#----------------------------------------#
degen_idx = [2,3,4,6,7,8,9,10,11]
Ad = square_mat_degen_2d(Ad, degen_idx)
Bd = np.vstack((Bd[2:5,:], Bd[6:12,:]))
x_degen = np.array([x[i] for i in degen_idx])
x0_degen = np.array([x0[i] for i in degen_idx])
K = dlqr(Ad,Bd,Q,R)
u = - (K @ (x_degen - x0_degen))
#----------------------------------------#
#--------------Integrator----------------#
#----------------------------------------#
x = upd_sim(x, u, fi_flag, time_step, nlplant)
#----------------------------------------#
#------------Store History---------------#
#----------------------------------------#
x_storage[idx,:] = x[:,0]
bar.update(idx)
toc()
# In[]
#----------------------------------------------------------------------------#
#---------------------------------Visualise----------------------------------#
#----------------------------------------------------------------------------#
#%matplotlib qt
vis(x_storage, rng)
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,562
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/parameters.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 8 22:02:06 2020
@author: johnviljoen
"""
''' This file contains all parameters (paras) required to run the simulation,
the aircraft, environmental, simulation, initial conditions, and other parameters'''
#import numpy as np
import numpy as np
from numpy import pi
from scipy.constants import g
# In[simulation parameters]
time_step, time_start, time_end = 0.001, 0., 3.
# fi_flag = 1 -> high fidelity model (full Nguyen)
# fi_flag = 1 -> low fidelity model (Stevens Lewis reduced)
fi_flag = 1
# stability_flag only functional for high fidelity model currently!
# stability_flag = 1 -> unstable xcg 35% model
# stability_flag = 0 -> stable xcg 25% model
stab_flag = 0
# In[MPC parameters]
hzn = 10
pred_dt = 0.001
# In[initial_conditions]
''' states in m/s, rad, rad/s '''
npos = 0. # m
epos = 0. # m
h = 3048. # m
phi = 0. # rad
theta = 0. # rad
psi = 0. # rad
vt = 213.36 # m/s
alpha = 1.0721 * pi/180 # rad
beta = 0. # rad
p = 0. # rad/s
q = 0. # rad/s
r = 0. # rad/s
''' control states in lbs, deg '''
T = 2886.6468 # lbs
dh = -2.0385 # deg
da = -0.087577 # deg
dr = -0.03877 # deg
lef = 0.3986 # deg
# In[limits]
npos_min = -np.inf # (m)
epos_min = -np.inf # (m)
h_min = 0 # (m)
phi_min = -np.inf # (deg)
theta_min = -np.inf # (deg)
psi_min = -np.inf # (deg)
V_min = 0 # (m/s)
alpha_min = -20. # (deg)
beta_min = -30. # (deg)
p_min = -30 # (deg/s)
q_min = -10 # (deg/s)
r_min = -5 # (deg/s)
T_min = 1000 # (lbs)
dh_min = -25 # (deg)
da_min = -21.5 # (deg)
dr_min = -30. # (deg)
lef_min = 0. # (deg)
npos_max = np.inf # (m)
epos_max = np.inf # (m)
h_max = 10000 # (m)
phi_max = np.inf # (deg)
theta_max = np.inf # (deg)
psi_max = np.inf # (deg)
V_max = 900 # (m/s)
alpha_max = 90 # (deg)
beta_max = 30 # (deg)
p_max = 30 # (deg/s)
q_max = 10 # (deg/s)
r_max = 5 # (deg/s)
T_max = 19000 # (lbs)
dh_max = 25 # (deg)
da_max = 21.5 # (deg)
dr_max = 30 # (deg)
lef_max = 25 # (deg)
# In[wrap for input]
# initial_state_vector = np.array([npos, epos, h, phi, theta, psi, vt, alpha, beta, p, q, r, T, dh, da, dr, lef, fi_flag])
simulation_parameters = [time_step, time_start, time_end, stab_flag, fi_flag]
paras_mpc = [hzn, pred_dt]
m2f = 3.28084 # metres to feet conversion
f2m = 1/m2f # feet to metres conversion
initial_state_vector_ft_rad = np.array([npos*m2f, epos*m2f, h*m2f, phi, theta, psi, vt*m2f, alpha, beta, p, q, r, T, dh, da, dr, lef, -alpha*180/pi])
act_lim = [[T_max, dh_max, da_max, dr_max, lef_max],
[T_min, dh_min, da_min, dr_min, lef_min]]
x_lim = [[npos_max, epos_max, h_max, phi_max, theta_max, psi_max, V_max, alpha_max, beta_max, p_max, q_max, r_max],
[npos_min, epos_min, h_min, phi_min, theta_min, psi_min, V_min, alpha_min, beta_min, p_min, q_min, r_min]]
# In[additional info provided for brevity]
# weight = 91188 # Newtons
# Ixx = 12875 # Kg m^2
# Iyy = 75674 # Kg m^2
# Izz = 85552 # Kg m^2
# Ixz = 1331 # Kg m^2
# # the other Izy, Iyz = 0
# b = 9.144 # m wingspan
# S = 27.87 # m^2 wing area
# cbar = 3.45 # m wing mean aerodynamic chord
# He = 216.9 # engine angular momentum constant
# x_cg_ref = 0.35 * cbar # assuming mac = cbar
# x_cg = 0.8*x_cg_ref # FOR NOW THIS IS WRONG
# # unecessary:
# length = 14.8 #m
# height = 4.8 #m
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,563
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/sim.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 14:50:18 2021
@author: johnviljoen
"""
import numpy as np
from numpy import pi
from parameters import act_lim
import ctypes
def upd_thrust(T_cmd, T_state):
# command saturation
T_cmd = np.clip(T_cmd,act_lim[1][0],act_lim[0][0])
# rate saturation
return np.clip(T_cmd - T_state, -10000, 10000)
def upd_dstab(dstab_cmd, dstab_state):
# command saturation
dstab_cmd = np.clip(dstab_cmd,act_lim[1][1],act_lim[0][1])
# rate saturation
return np.clip(20.2*(dstab_cmd - dstab_state), -60, 60)
def upd_ail(ail_cmd, ail_state):
# command saturation
ail_cmd = np.clip(ail_cmd,act_lim[1][2],act_lim[0][2])
# rate saturation
return np.clip(20.2*(ail_cmd - ail_state), -80, 80)
def upd_rud(rud_cmd, rud_state):
# command saturation
rud_cmd = np.clip(rud_cmd,act_lim[1][3],act_lim[0][3])
# rate saturation
return np.clip(20.2*(rud_cmd - rud_state), -120, 120)
def upd_lef(h, V, coeff, alpha, lef_state_1, lef_state_2, nlplant):
nlplant.atmos(ctypes.c_double(h),ctypes.c_double(V),ctypes.c_void_p(coeff.ctypes.data))
atmos_out = coeff[1]/coeff[2] * 9.05
alpha_deg = alpha*180/pi
LF_err = alpha_deg - (lef_state_1 + (2 * alpha_deg))
#lef_state_1 += LF_err*7.25*time_step
LF_out = (lef_state_1 + (2 * alpha_deg)) * 1.38
lef_cmd = LF_out + 1.45 - atmos_out
# command saturation
lef_cmd = np.clip(lef_cmd,act_lim[1][4],act_lim[0][4])
# rate saturation
lef_err = np.clip((1/0.136) * (lef_cmd - lef_state_2),-25,25)
return LF_err*7.25, lef_err
def calc_xdot(x, u, fi_flag, nlplant):
# initialise variables
xdot = np.zeros([18,1])
temp = np.zeros(6)
coeff = np.zeros(3)
#--------------Thrust Model--------------#
temp[0] = upd_thrust(u[0], x[12])
#--------------Dstab Model---------------#
temp[1] = upd_dstab(u[1], x[13])
#-------------aileron model--------------#
temp[2] = upd_ail(u[2], x[14])
#--------------rudder model--------------#
temp[3] = upd_rud(u[3], x[15])
#--------leading edge flap model---------#
temp[5], temp[4] = upd_lef(x[2], x[6], coeff, x[7], x[17], x[16], nlplant)
#----------run nlplant for xdot----------#
nlplant.Nlplant(ctypes.c_void_p(x.ctypes.data), ctypes.c_void_p(xdot.ctypes.data), ctypes.c_int(fi_flag))
xdot[12:18,0] = temp
return xdot
def upd_sim(x, u, fi_flag, time_step, nlplant):
# find xdot
xdot = calc_xdot(x, u, fi_flag, nlplant)
# update x
x += xdot*time_step
return x
def calc_out(x, u, output_vars):
# return the variables
return x[output_vars]
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,564
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/trim.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 14:49:02 2021
@author: johnviljoen
"""
import numpy as np
from numpy import pi
# import scipy fmin for trim function
from scipy.optimize import minimize
from sim import calc_xdot
from parameters import act_lim, x_lim
# calculate objective function for trimming
def obj_func(UX0, h_t, v_t, fi_flag, nlplant):
V = v_t
h = h_t
P3, dh, da, dr, alpha = UX0
npos = 0
epos = 0
#h
phi = 0
#theta = alpha in straight level flight
psi = 0
#V
#alpha
beta = 0
p = 0
q = 0
r = 0
#P3
#dh
#da
#dr
#dlef1
#dlef2
rho0 = 2.377e-3
tfac = 1 - 0.703e-5*h
temp = 519*tfac
if h >= 35000:
temp = 390
rho = rho0*tfac**4.14
qbar = 0.5*rho*V**2
ps = 1715*rho*temp
dlef = 1.38*alpha*180/pi - 9.05*qbar/ps + 1.45
x = np.array([npos, epos, h, phi, alpha, psi, V, alpha, beta, p, q, r, P3, dh, da, dr, dlef, -alpha*180/pi])
# set thrust limits
if x[12] > act_lim[0][0]:
x[12] = act_lim[0][0]
elif x[12] < act_lim[1][0]:
x[12] = act_lim[1][0]
# set elevator limits
if x[13] > act_lim[0][1]:
x[13] = act_lim[0][1]
elif x[13] < act_lim[1][1]:
x[13] = act_lim[1][1]
# set aileron limits
if x[14] > act_lim[0][2]:
x[14] = act_lim[0][2]
elif x[14] < act_lim[1][2]:
x[14] = act_lim[1][2]
# set rudder limits
if x[15] > act_lim[0][3]:
x[15] = act_lim[0][3]
elif x[15] < act_lim[1][3]:
x[15] = act_lim[1][3]
# set alpha limits
if x[7] > x_lim[0][7]*pi/180:
x[7] = x_lim[0][7]*pi/180
elif x[7] < x_lim[1][7]*pi/180:
x[7] = x_lim[1][7]*pi/180
u = np.array([x[12],x[13],x[14],x[15]])
xdot = calc_xdot(x, u, fi_flag, nlplant)
phi_w = 10
theta_w = 10
psi_w = 10
weight = np.array([0, 0, 5, phi_w, theta_w, psi_w, 2, 10, 10, 10, 10, 10]).transpose()
cost = np.matmul(weight,xdot[0:12]**2)
return cost
def trim(h_t, v_t, fi_flag, nlplant):
# initial guesses
thrust = 5000 # thrust, lbs
elevator = -0.09 # elevator, degrees
alpha = 8.49 # AOA, degrees
rudder = -0.01 # rudder angle, degrees
aileron = 0.01 # aileron, degrees
UX0 = [thrust, elevator, alpha, rudder, aileron]
options={
'gtol': 1e-05,
'norm': np.inf,
'eps': 1.4901161193847656e-08,
'maxiter': 10000,
'disp': False,
'return_all': False,
'finite_diff_rel_step': None}
opt = minimize(obj_func, UX0, args=((h_t, v_t, fi_flag, nlplant)), method='BFGS',tol=1e-14,options=options)
P3_t, dstab_t, da_t, dr_t, alpha_t = opt.x
rho0 = 2.377e-3
tfac = 1 - 0.703e-5*h_t
temp = 519*tfac
if h_t >= 35000:
temp = 390
rho = rho0*tfac**4.14
qbar = 0.5*rho*v_t**2
ps = 1715*rho*temp
dlef = 1.38*alpha_t*180/pi - 9.05*qbar/ps + 1.45
x_trim = np.array([0, 0, h_t, 0, alpha_t, 0, v_t, alpha_t, 0, 0, 0, 0, P3_t, dstab_t, da_t, dr_t, dlef, -alpha_t*180/pi])
return x_trim, opt
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,565
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/utils.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 14:45:08 2021
@author: johnviljoen
"""
# import time for tic toc functions
import time
# import matplotlib for visualisation
import matplotlib.pyplot as plt
from numpy import pi
# In[]
def TicTocGenerator():
# Generator that returns time differences
ti = 0 # initial time
tf = time.time() # final time
while True:
ti = tf
tf = time.time()
yield tf-ti # returns the time difference
TicToc = TicTocGenerator() # create an instance of the TicTocGen generator
# This will be the main function through which we define both tic() and toc()
def toc(tempBool=True):
# Prints the time difference yielded by generator instance TicToc
tempTimeInterval = next(TicToc)
if tempBool:
print( "Elapsed time: %f seconds.\n" %tempTimeInterval )
def tic():
# Records a time in TicToc, marks the beginning of a time interval
toc(False)
# In[]
def vis(x_storage, rng):
fig, axs = plt.subplots(12, 1)
#fig.suptitle('Vertically stacked subplots')
axs[0].plot(rng, x_storage[:,0])
axs[0].set_ylabel('npos (ft)')
axs[1].plot(rng, x_storage[:,1])
axs[1].set_ylabel('epos (ft)')
axs[2].plot(rng, x_storage[:,2])
axs[2].set_ylabel('h (ft)')
axs[3].plot(rng, x_storage[:,3])
axs[3].set_ylabel('$\phi$ (rad)')
axs[4].plot(rng, x_storage[:,4])
axs[4].set_ylabel('$\theta$ (rad)')
axs[5].plot(rng, x_storage[:,5])
axs[5].set_ylabel('$\psi$ (rad)')
axs[6].plot(rng, x_storage[:,6])
axs[6].set_ylabel("V_t (ft/s)")
axs[7].plot(rng, x_storage[:,7]*180/pi)
axs[7].set_ylabel('alpha (deg)')
axs[8].plot(rng, x_storage[:,8]*180/pi)
axs[8].set_ylabel('beta (deg)')
axs[9].plot(rng, x_storage[:,9]*180/pi)
axs[9].set_ylabel('p (deg/s)')
axs[10].plot(rng, x_storage[:,10]*180/pi)
axs[10].set_ylabel('q (deg/s)')
axs[11].plot(rng, x_storage[:,11]*180/pi)
axs[11].set_ylabel('r (deg/s)')
axs[11].set_xlabel('time (s)')
fig2, axs2 = plt.subplots(5,1)
axs2[0].plot(rng, x_storage[:,12])
axs2[0].set_ylabel('P3')
axs2[1].plot(rng, x_storage[:,13])
axs2[1].set_ylabel('dh')
axs2[2].plot(rng, x_storage[:,14])
axs2[2].set_ylabel('da')
axs2[3].plot(rng, x_storage[:,15])
axs2[3].set_ylabel('dr')
axs2[4].plot(rng, x_storage[:,16])
axs2[4].set_ylabel('lef')
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,566
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/redundant/gym_testing.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 10 13:54:33 2021
@author: johnviljoen
"""
from parameters import x_lim, act_lim
import numpy as np
import gym
from gym import spaces
class F16_env(gym.Env):
"""Custom Environment that follows gym interface"""
metadata = {'render.modes': ['human']}
def __init__(self, arg1, arg2):
super(F16_env, self).__init__()
# Define action and observation space
# They must be gym.spaces objects
# Example when using discrete actions:
self.action_space = spaces.Box(low=np.array(act_lim[1])[0:4], high=np.array(act_lim[0])[0:4])
self.observation_space = spaces.Box(low=np.array(x_lim[1]), high=np.array(x_lim[0]))
#self.action_space = spaces.Box(low=act_lim[1], high=act_lim[0], shape=(np.array([len(act_lim[0]),1,1])), dtype=np.float64)
# Example for using image as input:
#self.observation_space = spaces.Box(low=x_lim[1], high=x_lim[0] shape=(len(x_lim[0]), 1, 1), dtype=np.float64)
# def step(self, action):
# # Execute one time step within the environment
# ...
# def reset(self):
# # Reset the state of the environment to an initial state
# ...
# def render(self, mode='human', close=False):
# # Render the environment to the screen
# ...
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,567
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/redundant/d_mpc_testing.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 9 19:40:12 2021
@author: johnviljoen
"""
# In[] imports
# from ctypes import *
from ctypes import CDLL
#import ctypes
import os
# import numpy and sin, cos for convenience
import numpy as np
# handbuilt functions for all this
from utils import tic, toc, vis
from trim import trim
from sim import upd_sim, calc_xdot
from mpc import linearise, dmom, calc_HFG, calc_MC
# import progressbar for convenience
import progressbar
# import parameters
from parameters import initial_state_vector_ft_rad, simulation_parameters, paras_mpc
# import exit() function for debugging
from sys import exit
# In[]
#----------------------------------------------------------------------------#
#-------------------------prepare data for nlplant.c-------------------------#
#----------------------------------------------------------------------------#
# unwrap simulation parameters
time_step, time_start, time_end, stab_flag, fi_flag = simulation_parameters
# create interface with c shared library .so file in folder "C"
if stab_flag == 1:
so_file = os.getcwd() + "/C/nlplant_xcg35.so"
elif stab_flag == 0:
so_file = os.getcwd() + "/C/nlplant_xcg25.so"
nlplant = CDLL(so_file)
# initialise x
x = initial_state_vector_ft_rad
output_vars = [6,7,8,9,10,11]
# trim aircraft
h_t = 10000
v_t = 700
x, opt_res = trim(h_t, v_t, fi_flag, nlplant)
u = x[12:16]
A,B,C,D = linearise(x, u, output_vars, fi_flag, nlplant)
# In[]
# Import do_mpc package:
import do_mpc
model_type = 'discrete' # either 'discrete' or 'continuous'
model = do_mpc.model.Model(model_type)
do_mpc.controller.MPC
import casadi
casadi.casadi.Function
# doesnt seem compatible with my simulation unfortunately
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,568
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/mpc.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jul 7 14:55:50 2021
@author: johnviljoen
"""
import numpy as np
from sim import calc_xdot, calc_out
from scipy.linalg import solve_discrete_lyapunov
from sys import exit
import scipy
# In[ discrete linear quadratic regulator ]
# from https://github.com/python-control/python-control/issues/359:
def dlqr(A,B,Q,R):
"""
Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
Discrete-time Linear Quadratic Regulator calculation.
State-feedback control u[k] = -K*(x_ref[k] - x[k])
select the states that you want considered and make x[k] the difference
between the current x and the desired x.
How to apply the function:
K = dlqr(A_d,B_d,Q,R)
Inputs:
A_d, B_d, Q, R -> all numpy arrays (simple float number not allowed)
Returns:
K: state feedback gain
"""
# first, solve the ricatti equation
P = np.array(scipy.linalg.solve_discrete_are(A, B, Q, R))
# compute the LQR gain
K = np.array(scipy.linalg.inv(B.T @ P @ B+R) @ (B.T @ P @ A))
return K
def square_mat_degen_2d(mat, degen_idx):
degen_mat = np.zeros([len(degen_idx),len(degen_idx)])
for i in range(len(degen_idx)):
degen_mat[:,i] = mat[degen_idx, [degen_idx[i] for x in range(len(degen_idx))]]
return degen_mat
# In[]
def linearise(x, u, output_vars, fi_flag, nlplant):
eps = 1e-05
A = np.zeros([len(x),len(x)])
B = np.zeros([len(x),len(u)])
C = np.zeros([len(output_vars),len(x)])
D = np.zeros([len(output_vars),len(u)])
# Perturb each of the state variables and compute linearization
for i in range(len(x)):
dx = np.zeros([len(x),1])
dx[i] = eps
A[:, i] = (calc_xdot(x + dx, u, fi_flag, nlplant)[:,0] - calc_xdot(x, u, fi_flag, nlplant)[:,0]) / eps
C[:, i] = (calc_out(x + dx, u, output_vars)[:,0] - calc_out(x, u, output_vars)[:,0]) / eps
# Perturb each of the input variables and compute linearization
for i in range(len(u)):
du = np.zeros([len(u),1])
du[i] = eps
B[:, i] = (calc_xdot(x, u + du, fi_flag, nlplant)[:,0] - calc_xdot(x, u, fi_flag, nlplant)[:,0]) / eps
D[:, i] = (calc_out(x, u + du, output_vars)[:,0] - calc_out(x, u, output_vars)[:,0]) / eps
return A, B, C, D
# In[]
def calc_MC(A, B, hzn):
# hzn is the horizon
nstates = A.shape[0]
ninputs = B.shape[1]
# x0 is the initial state vector of shape (nstates, 1)
# u is the matrix of input vectors over the course of the prediction of shape (ninputs,horizon)
# initialise CC, MM, Bz
CC = np.zeros([nstates*hzn, ninputs*hzn])
MM = np.zeros([nstates*hzn, nstates])
Bz = np.zeros([nstates, ninputs])
for i in range(hzn):
MM[nstates*i:nstates*(i+1),:] = np.linalg.matrix_power(A,i+1)
for j in range(hzn):
if i-j >= 0:
CC[nstates*i:nstates*(i+1),ninputs*j:ninputs*(j+1)] = np.matmul(np.linalg.matrix_power(A,(i-j)),B)
else:
CC[nstates*i:nstates*(i+1),ninputs*j:ninputs*(j+1)] = Bz
return MM, CC
# In[]
def calc_x_seq(A_d, B_d, x0, u_seq, hzn):
# find MM, CC
MM, CC = calc_MC(A_d, B_d, hzn)
return np.matmul(MM,x0) + np.matmul(CC,u_seq)
# In[]
def calc_HFG(A_d, B_d, C_d, K, R, hzn):
# calculate Q_mat
Q = np.matmul(C_d.T, C_d)
# calc R_mat
R_mat = np.eye(B_d.shape[1]) * R
Q_bar = solve_discrete_lyapunov((A_d + np.matmul(B_d, K)).T, Q + np.matmul(np.matmul(K.T,R_mat), K))
Q_mat = dmom(Q, hzn)
Q_mat[-Q.shape[0]:, -Q.shape[1]:] = Q_bar
MM, CC = calc_MC(A_d, B_d, hzn)
H = np.matmul(np.matmul(CC.T,Q_mat),CC) + dmom(R_mat,hzn)
F = np.matmul(np.matmul(CC.T,Q_mat),MM)
G = np.matmul(np.matmul(MM.T,Q_mat),MM)
return H, F, G
# In[]
def dmom(mat, num_mats):
# diagonal matrix of matrices -> dmom
# dimension extraction
nrows = mat.shape[0]
ncols = mat.shape[1]
# matrix of matrices matomats -> I thought it sounded cool
matomats = np.zeros((nrows*num_mats,ncols*num_mats))
for i in range(num_mats):
for j in range(num_mats):
if i == j:
matomats[nrows*i:nrows*(i+1),ncols*j:ncols*(j+1)] = mat
return matomats
# In[]
# def calc_HFG(A, B, C, hzn, Q, R):
# MM, CC = calc_MC(hzn, A, B, 1)
# Q = np.matmul(C.T,C)
# Q_full = dmom(Q, hzn)
# # Q_full = np.eye(hzn)
# R_full = np.eye(hzn) * 0.01
# H = np.matmul(np.matmul(CC.T, Q_full),CC) + R_full
# F = np.matmul(np.matmul(CC.T, Q_full), MM)
# G = np.matmul(np.matmul(MM.T, Q_full), MM)
# return H, F, G
# In[]
# dual mode predicted HFG
def calc_dm_HFG(A, B, C, K, hzn, Q, R):
MM, CC = calc_MC(hzn, A, B, 1)
Q = np.matmul(C.T,C)
Q_full = dmom(Q, hzn)
# Q_full = np.eye(hzn)
rhs = Q + np.matmul(np.matmul(K.T,R), K)
Qbar = np.array([])
R_full = np.eye(hzn) * 0.01
H = np.matmul(np.matmul(CC.T, Q_full),CC) + R_full
F = np.matmul(np.matmul(CC.T, Q_full), MM)
G = np.matmul(np.matmul(MM.T, Q_full), MM)
return H, F, G
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,569
|
johnviljoen/f16_mpc_py
|
refs/heads/master
|
/redundant/test.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 31 14:41:39 2021
@author: johnviljoen
"""
import numpy as np
from numpy import pi
import ctypes
from ctypes import CDLL
import os
from parameters import act_lim, x_lim
from parameters import initial_state_vector_ft_rad as x0
from parameters import simulation_parameters as paras_sim
from utils import tic, toc, vis
import progressbar
import gym
from gym import spaces
from scipy.optimize import minimize
class F16(gym.Env):
def __init__(self, x0, u0, paras_sim):
super().__init__()
# system state
self.x = np.copy(x0[np.newaxis].T)
self.x0 = np.copy(x0[np.newaxis].T)
# input demand
self.u = np.copy(u0[np.newaxis].T)
self.u0 = np.copy(u0[np.newaxis].T)
# output state indices
self.y_vars = [6,7,8,9,10,11]
# measured state indices
self.z_vars = [6,7,8,9]
# fidelity flGag
self.fi_flag = paras_sim[4]
# time step
self.dt = paras_sim[0]
self.xdot = np.zeros([x0.shape[0]])
# create interface with c shared library .so file in folder "C"
if paras_sim[3] == 1:
so_file = os.getcwd() + "/C/nlplant_xcg35.so"
elif paras_sim[3] == 0:
so_file = os.getcwd() + "/C/nlplant_xcg25.so"
nlplant = CDLL(so_file)
self.nlplant = nlplant
self.action_space = spaces.Box(low=np.array(act_lim[1])[0:4], high=np.array(act_lim[0])[0:4], dtype=np.float32)
self.observation_space = spaces.Box(low=np.array(x_lim[1] + act_lim[1]), high=np.array(x_lim[0] + act_lim[0]), shape=(17,), dtype=np.float32)
def calc_xdot(self, x, u):
def upd_thrust(T_cmd, T_state):
# command saturation
T_cmd = np.clip(T_cmd,act_lim[1][0],act_lim[0][0])
# rate saturation
return np.clip(T_cmd - T_state, -10000, 10000)
def upd_dstab(dstab_cmd, dstab_state):
# command saturation
dstab_cmd = np.clip(dstab_cmd,act_lim[1][1],act_lim[0][1])
# rate saturation
return np.clip(20.2*(dstab_cmd - dstab_state), -60, 60)
def upd_ail(ail_cmd, ail_state):
# command saturation
ail_cmd = np.clip(ail_cmd,act_lim[1][2],act_lim[0][2])
# rate saturation
return np.clip(20.2*(ail_cmd - ail_state), -80, 80)
def upd_rud(rud_cmd, rud_state):
# command saturation
rud_cmd = np.clip(rud_cmd,act_lim[1][3],act_lim[0][3])
# rate saturation
return np.clip(20.2*(rud_cmd - rud_state), -120, 120)
def upd_lef(h, V, coeff, alpha, lef_state_1, lef_state_2, nlplant):
nlplant.atmos(ctypes.c_double(h),ctypes.c_double(V),ctypes.c_void_p(coeff.ctypes.data))
atmos_out = coeff[1]/coeff[2] * 9.05
alpha_deg = alpha*180/pi
LF_err = alpha_deg - (lef_state_1 + (2 * alpha_deg))
#lef_state_1 += LF_err*7.25*time_step
LF_out = (lef_state_1 + (2 * alpha_deg)) * 1.38
lef_cmd = LF_out + 1.45 - atmos_out
# command saturation
lef_cmd = np.clip(lef_cmd,act_lim[1][4],act_lim[0][4])
# rate saturation
lef_err = np.clip((1/0.136) * (lef_cmd - lef_state_2),-25,25)
return LF_err*7.25, lef_err
# initialise variables
xdot = np.zeros([18,1])
temp = np.zeros(6)
coeff = np.zeros(3)
#--------------Thrust Model--------------#
temp[0] = upd_thrust(u[0], x[12])
#--------------Dstab Model---------------#
temp[1] = upd_dstab(u[1], x[13])
#-------------aileron model--------------#
temp[2] = upd_ail(u[2], x[14])
#--------------rudder model--------------#
temp[3] = upd_rud(u[3], x[15])
#--------leading edge flap model---------#
temp[5], temp[4] = upd_lef(x[2], x[6], coeff, x[7], x[17], x[16], self.nlplant)
#----------run nlplant for xdot----------#
self.nlplant.Nlplant(ctypes.c_void_p(x.ctypes.data), ctypes.c_void_p(xdot.ctypes.data), ctypes.c_int(self.fi_flag))
#----------assign actuator xdots---------#
xdot[12:18,0] = temp
return xdot
def step(self, action):
self.x += self.calc_xdot(self.x, self.u)*self.dt
return self.x
def reset(self):
self.x = np.copy(self.x0)
self.u = np.copy(self.u0)
def get_obs(self, x, u):
return x[self.y_vars]
def trim(self, h_t, v_t):
def obj_func(UX0, h_t, v_t, fi_flag, nlplant):
V = v_t
h = h_t
P3, dh, da, dr, alpha = UX0
npos = 0
epos = 0
#h
phi = 0
#theta = alpha in straight level flight
psi = 0
#V
#alpha
beta = 0
p = 0
q = 0
r = 0
#P3
#dh
#da
#dr
#dlef1
#dlef2
rho0 = 2.377e-3
tfac = 1 - 0.703e-5*h
temp = 519*tfac
if h >= 35000:
temp = 390
rho = rho0*tfac**4.14
qbar = 0.5*rho*V**2
ps = 1715*rho*temp
dlef = 1.38*alpha*180/pi - 9.05*qbar/ps + 1.45
x = np.array([npos, epos, h, phi, alpha, psi, V, alpha, beta, p, q, r, P3, dh, da, dr, dlef, -alpha*180/pi])
# set thrust limits
if x[12] > act_lim[0][0]:
x[12] = act_lim[0][0]
elif x[12] < act_lim[1][0]:
x[12] = act_lim[1][0]
# set elevator limits
if x[13] > act_lim[0][1]:
x[13] = act_lim[0][1]
elif x[13] < act_lim[1][1]:
x[13] = act_lim[1][1]
# set aileron limits
if x[14] > act_lim[0][2]:
x[14] = act_lim[0][2]
elif x[14] < act_lim[1][2]:
x[14] = act_lim[1][2]
# set rudder limits
if x[15] > act_lim[0][3]:
x[15] = act_lim[0][3]
elif x[15] < act_lim[1][3]:
x[15] = act_lim[1][3]
# set alpha limits
if x[7] > x_lim[0][7]*pi/180:
x[7] = x_lim[0][7]*pi/180
elif x[7] < x_lim[1][7]*pi/180:
x[7] = x_lim[1][7]*pi/180
u = np.array([x[12],x[13],x[14],x[15]])
xdot = self.calc_xdot(x, u)
phi_w = 10
theta_w = 10
psi_w = 10
weight = np.array([0, 0, 5, phi_w, theta_w, psi_w, 2, 10, 10, 10, 10, 10]).transpose()
cost = np.matmul(weight,xdot[0:12]**2)
return cost
# initial guesses
thrust = 5000 # thrust, lbs
elevator = -0.09 # elevator, degrees
alpha = 8.49 # AOA, degrees
rudder = -0.01 # rudder angle, degrees
aileron = 0.01 # aileron, degrees
UX0 = [thrust, elevator, alpha, rudder, aileron]
opt = minimize(obj_func, UX0, args=((h_t, v_t, self.fi_flag, self.nlplant)), method='Nelder-Mead',tol=1e-10,options={'maxiter':5e+04})
P3_t, dstab_t, da_t, dr_t, alpha_t = opt.x
rho0 = 2.377e-3
tfac = 1 - 0.703e-5*h_t
temp = 519*tfac
if h_t >= 35000:
temp = 390
rho = rho0*tfac**4.14
qbar = 0.5*rho*v_t**2
ps = 1715*rho*temp
dlef = 1.38*alpha_t*180/pi - 9.05*qbar/ps + 1.45
x_trim = np.array([0, 0, h_t, 0, alpha_t, 0, v_t, alpha_t, 0, 0, 0, 0, P3_t, dstab_t, da_t, dr_t, dlef, -alpha_t*180/pi])
return x_trim, opt
def linearise(self, x, u):
eps = 1e-06
A = np.zeros([len(x),len(x)])
B = np.zeros([len(x),len(u)])
C = np.zeros([len(self.y_vars),len(x)])
D = np.zeros([len(self.y_vars),len(u)])
# Perturb each of the state variables and compute linearization
for i in range(len(x)):
dx = np.zeros([len(x),1])
dx[i] = eps
A[:, i] = (self.calc_xdot(x + dx, u)[:,0] - self.calc_xdot(x, u)[:,0]) / eps
C[:, i] = (self.get_obs(x + dx, u)[:,0] - self.get_obs(x, u)[:,0]) / eps
# Perturb each of the input variables and compute linearization
for i in range(len(u)):
du = np.zeros([len(u),1])
du[i] = eps
B[:, i] = (self.calc_xdot(x, u + du)[:,0] - self.calc_xdot(x, u)[:,0]) / eps
D[:, i] = (self.get_obs(x, u + du)[:,0] - self.get_obs(x, u)[:,0]) / eps
return A, B, C, D
# make starting array immutable to cause error if used innapropriately
x0.flags.writeable = False
# instantiate the object
f16 = F16(x0, x0[12:16], paras_sim)
# trim the aircraft at 10000ft, 700 ft/s
f16.x = f16.trim(10000,700)[0][np.newaxis].T
f16.u = f16.x[12:16]
rng = np.linspace(paras_sim[1], paras_sim[2], int((paras_sim[2]-paras_sim[1])/paras_sim[0]))
# create storage
x_storage = np.zeros([len(rng),len(f16.x)])
A = np.zeros([len(f16.x),len(f16.x),len(rng)])
B = np.zeros([len(f16.x),len(f16.u),len(rng)])
C = np.zeros([len(f16.y_vars),len(f16.x),len(rng)])
D = np.zeros([len(f16.y_vars),len(f16.u),len(rng)])
bar = progressbar.ProgressBar(maxval=len(rng)).start()
tic()
for idx, val in enumerate(rng):
#----------------------------------------#
#------------linearise model-------------#
#----------------------------------------#
[A[:,:,idx], B[:,:,idx], C[:,:,idx], D[:,:,idx]] = f16.linearise(f16.x, f16.u)
#----------------------------------------#
#--------------Take Action---------------#
#----------------------------------------#
# MPC prediction using squiggly C and M matrices
#CC, MM = calc_MC(paras_mpc[0], A[:,:,idx], B[:,:,idx], time_step)
#----------------------------------------#
#--------------Integrator----------------#
#----------------------------------------#
x = f16.step(f16.u)
#----------------------------------------#
#------------Store History---------------#
#----------------------------------------#
x_storage[idx,:] = x[:,0]
bar.update(idx)
toc()
# In[]
#----------------------------------------------------------------------------#
#---------------------------------Visualise----------------------------------#
#----------------------------------------------------------------------------#
#%matplotlib qt
vis(x_storage, rng)
|
{"/main.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/sim.py": ["/parameters.py"], "/trim.py": ["/sim.py", "/parameters.py"], "/redundant/gym_testing.py": ["/parameters.py"], "/redundant/d_mpc_testing.py": ["/utils.py", "/trim.py", "/sim.py", "/mpc.py", "/parameters.py"], "/mpc.py": ["/sim.py"], "/redundant/test.py": ["/parameters.py", "/utils.py"]}
|
34,575
|
sanchit2843/object_detection_helper
|
refs/heads/main
|
/utils/util.py
|
def split_line_yolo(line):
line = line.split(" ")
path = line[0]
boxes = [[int(y) for y in x.split(",")] for x in line[1:]]
return path, boxes
|
{"/plot_yolo_txt.py": ["/utils/util.py"], "/class_distribution_yolo_txt.py": ["/utils/util.py"]}
|
34,576
|
sanchit2843/object_detection_helper
|
refs/heads/main
|
/plot_yolo_txt.py
|
import os
import cv2
import argparse
from utils.util import split_line_yolo
def plot_image(image, boxes):
for box in boxes:
image = cv2.rectangle(
image, (box[0], box[1]), (box[2], box[3]), (255, 0, 0), thickness=2
)
image = cv2.putText(
image,
str(box[4]),
((box[0] + box[2]) // 2, (box[1] + box[3]) // 2),
cv2.FONT_HERSHEY_SIMPLEX,
color=(0, 0, 255),
thickness=2,
)
return image
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# add more formats based on what is supported by opencv
parser.add_argument(
"--yolo_txt_path",
type=str,
required=True,
help="path to yolo txt",
)
parser.add_argument(
"--output_path",
type=str,
default="./",
help="path to save images",
)
args = parser.parse_args()
for i in open(args.yolo_txt_path, "r"):
path, box = split_line_yolo(i)
image = cv2.imread(path)
img = plot_image(image, box)
cv2.imwrite(
os.path.join(
args.output_path,
i,
),
img,
)
|
{"/plot_yolo_txt.py": ["/utils/util.py"], "/class_distribution_yolo_txt.py": ["/utils/util.py"]}
|
34,577
|
sanchit2843/object_detection_helper
|
refs/heads/main
|
/class_distribution_yolo_txt.py
|
import os
import argparse
from utils.util import split_line_yolo
from collections import Counter
# give class wise box count
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--yolo_txt_path",
type=str,
required=True,
help="path to yolo txt",
)
args = parser.parse_args()
class_id_list = []
class_id_decoder = {0: "car", 1: "truck", 2: "bus", 3: "heavy truck"}
for i in open(args.yolo_txt_path, "r"):
path, box = split_line_yolo(i)
for b in box:
class_id_list.append(class_id_decoder[b[4]])
c = Counter(class_id_list)
print(c)
|
{"/plot_yolo_txt.py": ["/utils/util.py"], "/class_distribution_yolo_txt.py": ["/utils/util.py"]}
|
34,616
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/config.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# config.py
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class BaseConfig(object):
"""Base configuration."""
SECRET_KEY = 'my_precious'
DEBUG = False
BCRYPT_LOG_ROUNDS = 13
WTF_CSRF_ENABLED = True
DEBUG_TB_ENABLED = False
DEBUG_TB_INTERCEPT_REDIRECTS = False
class DevelopmentConfig(BaseConfig):
"""Development configuration."""
DEBUG = True
BCRYPT_LOG_ROUNDS = 1
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'dev.sqlite')
DEBUG_TB_ENABLED = True
class TestingConfig(BaseConfig):
"""Testing configuration."""
DEBUG = True
TESTING = True
BCRYPT_LOG_ROUNDS = 1
WTF_CSRF_ENABLED = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///'
DEBUG_TB_ENABLED = False
class ProductionConfig(BaseConfig):
"""Production configuration."""
SECRET_KEY = 'my_precious'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example'
DEBUG_TB_ENABLED = False
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,617
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/user/views.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/user/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, url_for, \
redirect, flash, request
from flask.ext.login import login_user, logout_user, login_required, current_user
from project import bcrypt, db
from project.models import User, PushDevice
from project.user.forms import *
from project.push.tasks import sendpush
import binascii
import os
import json
################
#### config ####
################
user_blueprint = Blueprint('user', __name__,)
################
#### routes ####
################
'''
@user_blueprint.route('/register', methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form)
if form.validate_on_submit():
user = User(
email=form.email.data,
password=form.password.data
)
db.session.add(user)
db.session.commit()
login_user(user)
flash('Thank you for registering.', 'success')
return redirect(url_for("user.members"))
return render_template('user/register.html', form=form)
'''
@user_blueprint.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
flash("User not found", "danger")
return render_template('user/login.html', form=form)
devices = PushDevice.query.filter_by(user_id=user.id).all()
if len(devices) > 0:
otp = binascii.b2a_hex(os.urandom(4)).decode()
user.otp = bcrypt.generate_password_hash(otp)
print(otp)
device = devices[0]
push_status_txt = sendpush(device.push_id, otp)
push_json = json.loads(push_status_txt)
if "status" in push_json:
if push_json['status'] == "OK":
flash("One Time Password Sent To Device", "success")
else :
flash("Could Not Communicate With Device", "danger")
db.session.commit()
return redirect(url_for('user.two_factor_login'))
if user and bcrypt.check_password_hash(
user.password, request.form['password']):
login_user(user)
flash('You are logged in. Welcome!', 'success')
return redirect(url_for('user.members'))
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/login.html', form=form)
return render_template('user/login.html', title='Please Login', form=form)
@user_blueprint.route('/2FA', methods=['GET', 'POST'])
def two_factor_login():
form = TwoFactorLoginForm(request.form)
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user and bcrypt.check_password_hash(user.password, form.password.data):
if bcrypt.check_password_hash(user.otp, form.otp.data):
login_user(user)
flash('You are logged in. Welcome!', 'success')
user.otp = None
db.session.commit()
return redirect(url_for('user.members'))
else:
flash('Invalid one time password.', 'danger')
else:
flash('Invalid email and/or password.', 'danger')
return render_template('user/two_factor_login.html', form=form)
@user_blueprint.route('/add_device', methods=['GET', 'POST'])
@login_required
def add_device():
form = AddDeviceForm(request.form)
if form.validate_on_submit():
device = PushDevice.query.filter_by(device_uid=form.device_uid.data).first()
if device is None:
flash('Device not found (please check id)', "danger")
else:
device.user = current_user
db.session.commit()
flash('Device registered to your account', "success")
return redirect(url_for('user.members'))
return render_template('user/add_device.html', form=form)
@user_blueprint.route('/logout')
@login_required
def logout():
logout_user()
flash('You were logged out. Bye!', 'success')
return redirect(url_for('main.home'))
@user_blueprint.route('/members')
@login_required
def members():
user = current_user
devices = PushDevice.query.filter_by(user_id=user.id).all()
if len(devices) < 1:
flash('Please <a href="/add_device" class="alert-link">add</a> a two factor auth device', 'info')
return render_template('user/members.html')
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,618
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/main/views.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/main/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, request, flash
from project.main.forms import SendToDeviceForm
from project.models import PushDevice
from project.push.tasks import sendpush
import binascii
import os
import json
################
#### config ####
################
main_blueprint = Blueprint('main', __name__,)
################
#### routes ####
################
@main_blueprint.route('/', methods=['GET', 'POST'])
def home():
form = SendToDeviceForm(request.form)
if form.validate_on_submit():
device = PushDevice.query.filter_by(device_uid=form.device_uid.data).first()
if device is None:
flash('Device not found (please check id)', "danger")
else:
otp = binascii.b2a_hex(os.urandom(4)).decode()
push_status_txt = sendpush(device.push_id, otp)
push_json = json.loads(push_status_txt)
if "status" in push_json:
if push_json['status'] == "OK":
flash("One Time Password Sent To Device", "success")
else:
flash("Could Not Communicate With Device ( " + push_status_txt + " )", "danger")
return render_template('main/home.html', form=form)
@main_blueprint.route("/about/")
def about():
return render_template("main/about.html")
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,619
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/__init__.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/__init__.py
#################
#### imports ####
#################
import os
from flask import Flask, render_template
from flask.ext.login import LoginManager
from flask.ext.bcrypt import Bcrypt
from flask_bootstrap import Bootstrap
from flask.ext.sqlalchemy import SQLAlchemy
################
#### config ####
################
app = Flask(__name__)
config_name = os.environ.get('APP_SETTINGS', 'project.config.DevelopmentConfig')
app.config.from_object(config_name)
####################
#### extensions ####
####################
login_manager = LoginManager()
login_manager.init_app(app)
bcrypt = Bcrypt(app)
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
###################
### blueprints ####
###################
from project.user.views import user_blueprint
from project.main.views import main_blueprint
from project.push.views import push_blueprint
app.register_blueprint(user_blueprint)
app.register_blueprint(main_blueprint)
app.register_blueprint(push_blueprint)
###################
### flask-login ####
###################
from project.models import User
login_manager.login_view = "user.login"
login_manager.login_message_category = 'danger'
@login_manager.user_loader
def load_user(user_id):
return User.query.filter(User.id == int(user_id)).first()
########################
#### error handlers ####
########################
@app.errorhandler(403)
def forbidden_page(error):
return render_template("errors/403.html"), 403
@app.errorhandler(404)
def page_not_found(error):
return render_template("errors/404.html"), 404
@app.errorhandler(500)
def server_error_page(error):
return render_template("errors/500.html"), 500
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,620
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/push/views.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/main/views.py
#################
#### imports ####
#################
from flask import render_template, Blueprint, request
from project.models import db, PushDevice
import binascii
import os
import json
################
#### config ####
################
push_blueprint = Blueprint('push', __name__,)
################
#### routes ####
################
@push_blueprint.route('/push/register', methods=['POST'])
def register():
ret_dict = {}
if 'push_id' in request.form :
device = PushDevice()
device.push_id = request.form.get('push_id')
device.device_uid = binascii.b2a_hex(os.urandom(4)).decode()
db.session.add(device)
db.session.commit()
ret_dict['device_uid'] = device.device_uid
else :
ret_dict['error'] = 'could not register push device'
return json.dumps(ret_dict)
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,621
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/user/forms.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
# project/user/forms.py
from flask_wtf import Form
from wtforms import PasswordField, StringField, SubmitField
from wtforms.validators import DataRequired, Email, Length, EqualTo
class LoginForm(Form):
email = StringField('Email Address', [DataRequired(), Email()])
password = PasswordField('Password', [DataRequired()])
class TwoFactorLoginForm(Form):
email = StringField('Email Address', [DataRequired(), Email()])
password = PasswordField('Password', [DataRequired()])
otp = PasswordField('One Time Code', [DataRequired()])
class RegisterForm(Form):
email = StringField(
'Email Address',
validators=[DataRequired(), Email(message=None), Length(min=6, max=40)])
password = PasswordField(
'Password',
validators=[DataRequired(), Length(min=6, max=25)]
)
confirm = PasswordField(
'Confirm password',
validators=[
DataRequired(),
EqualTo('password', message='Passwords must match.')
]
)
class AddDeviceForm(Form):
device_uid = StringField('Device ID', validators=[DataRequired()])
submit = SubmitField('Add')
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,622
|
ownpush/otp_demo_server
|
refs/heads/master
|
/project/push/tasks.py
|
"""
The MIT License (MIT)
Copyright (c) 2016 Fastboot Mobile LLC.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from nacl import signing, encoding
from nacl.public import PrivateKey, PublicKey, Box
import nacl.utils
import json
import base64
import requests
def encrypt_data(private_hex, public_hex, message):
sk = PrivateKey(private_hex, nacl.encoding.HexEncoder)
pk = PublicKey(public_hex, nacl.encoding.HexEncoder)
box = Box(sk, pk)
nonce = nacl.utils.random(Box.NONCE_SIZE)
encoded = box.encrypt(message.encode(), nonce, encoder=nacl.encoding.HexEncoder)
return encoded
def generate_token(sig_key_hex, enc_key_hex, api_key, message, to):
signing_key = signing.SigningKey(sig_key_hex, encoder=nacl.encoding.HexEncoder)
to = api_key + "." + to
header_dict = {}
header_dict['API_KEY'] = signing_key.verify_key.encode(encoder=nacl.encoding.HexEncoder).decode()
header_dict['alg'] = "FM-1"
header_dict['typ'] = "JWT"
header_dict['srv_v'] = "v0.0"
header_dict['to'] = to
device_to_parts = to.split(".")
header_txt = json.dumps(header_dict)
encoded = encrypt_data(enc_key_hex, device_to_parts[1], message)
txt = encoded.ciphertext
nonce = encoded.nonce
body_dict = {}
body_dict['data'] = txt.decode()
body_dict['nonce'] = nonce.decode()
body_txt = json.dumps(body_dict)
header_b64 = base64.b64encode(header_txt.encode()).decode()
body_b64 = base64.b64encode(body_txt.encode()).decode()
data = header_b64 + "." + body_b64
sig = signing_key.sign(data.encode('utf-8'), nacl.encoding.Base64Encoder)
data = data + "." + sig.signature.decode()
return data
def sendpush(to, message):
token = generate_token("PRIVATE_API_KEY",
"PRIVATE_APP_KEY",
"PUBLIC_APP_KEY",
message,
to)
data = {"token": token}
r = requests.post('https://demo.ownpush.com/send', data=data, verify=False)
return r.text
|
{"/project/user/views.py": ["/project/__init__.py", "/project/user/forms.py", "/project/push/tasks.py"], "/project/main/views.py": ["/project/push/tasks.py"], "/project/__init__.py": ["/project/user/views.py", "/project/main/views.py", "/project/push/views.py"]}
|
34,654
|
ryan00234/331
|
refs/heads/master
|
/temp.py
|
# -*- coding: utf-8 -*-
import os
pkg_name = 'bf.cloud.bfclouddemowithui'
def cpu():
# os.popen('adb wait-for-device')
cmd_cpu = os.popen('adb shell dumpsys cpuinfo |grep bf.cloud.bfclouddemowithui')
for i in cmd_cpu.readlines():
cpuinfo = i.split(' ')
user = float(cpuinfo[4][:-1])
kernel = float(cpuinfo[7][:-1])
return (user, kernel)
def mem():
cmd_mem = os.popen('adb shell dumpsys meminfo bf.cloud.bfclouddemowithui | grep TOTAL')
for i in cmd_mem.readlines():
meminfo = i.split(' ')
list.sort(meminfo)
return int(meminfo[-4])
|
{"/331.py": ["/temp.py"]}
|
34,655
|
ryan00234/331
|
refs/heads/master
|
/331.py
|
# -*- coding: utf-8 -*-
from flask import Flask, render_template, jsonify, request
from flask_bootstrap import Bootstrap
from flask_moment import Moment
import temp
device_id = '192.168.17.157:5555'
app = Flask(__name__)
# app.config.from_object('config')
bootstrap = Bootstrap(app)
moment = Moment(app)
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/debug')
def debug():
return render_template('debug_ui_172.html')
@app.route('/debug2')
def debug2():
return render_template('debug_connection_172.html')
@app.route('/SQL')
def sql():
return render_template('SQL.html')
@app.route('/test')
def test():
# device = request.args.get('device', 0, type=str)
temp.os.popen('adb connect ' + device_id)
return render_template('temp.html')
@app.route('/device')
def device_set():
global device_id
device = request.args.get('device', '')
temp.os.popen('adb connect ' + device)
device_id = device
max1 = temp.os.popen('adb -s ' + device_id + ' shell getprop|grep heapgrowthlimit').readline()
max2 = temp.os.popen('adb -s ' + device_id + ' shell getprop|grep dalvik.vm.heapstartsize').readline()
max3 = temp.os.popen('adb -s ' + device_id + ' shell getprop|grep dalvik.vm.heapsize').readline()
return jsonify(max1=max1[-8:], max2=max2[-8:], max3=max3[-8:])
@app.route('/cpu_info')
def cpu_info():
cpu = temp.cpu(device_id)
return jsonify(cpu=cpu)
@app.route('/mem_info')
def mem_info():
mem = temp.mem(device_id)
return jsonify(mem=mem)
@app.route('/Android')
def android():
return render_template('Android.html')
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
if __name__ == '__main__':
app.run(host='127.0.0.1', port=5000)
|
{"/331.py": ["/temp.py"]}
|
34,660
|
ediecs/Sis500
|
refs/heads/master
|
/core/test.py
|
# -*- coding: utf-8 -*-
import re
from core.functions import *
from core.data import *
import re, pickle, os
def regexTest():
result = run("qual a melhor epoca", "algodao")
print(result['0']['pergunta'][0])
rgxPage = re.compile('500pr_pgnumber\w{3}')
end = re.sub(rgxPage,'', result['0']['pergunta'][0])
print(end)
def perguntaTest():
result1 = run("qual a melhor epoca de plantio", "algodao")
result2 = run("qual a melhor epoca para realizar a lavagem", "algodao")
result3 = run("quais são os limites de radiacao solar", "abacaxi")
r1 = result1['0']['pergunta']
r2 = result2['0']['pergunta']
r3 = result3['0']['pergunta']
print(r1)
print(r1.lstrip('0123456789.- '))
print(r2)
print(r2.lstrip('0123456789.- '))
print(r3)
print(r3.lstrip('0123456789.- '))
def splitTest():
rgx = '(?=\?\n)'
stringteste = """A tolerância à seca desse acesso de espécie silvestre pode
ser introgredida no amendoim cultivado? Qual a importância
desse estudo para o Nordeste?
Sim. Para o melhoramento do amendoim cultivado, um trabalho
dessa natureza torna-se importante devido ao aproveitamento """
print("["+stringteste.split("?",)[0]+"]")
print("["+stringteste.split("?")[1]+"]")
print("[" + stringteste.split("?")[2] + "]")
print(re.split(rgx, stringteste))
def todosTest():
filepath = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", "txt", "500pr_procTxt_" + "todos" + ".txt"))
with open(filepath, 'rb') as file:
lista = pickle.load(file)
print(lista[0])
print(len(lista[0]))
def runTest():
lista = []
newTop3 = {}
test1 = run("plantio do algodao 2222222","abacaxi")
test2 = run("plantio do algodao asadsd","algodao")
for i in range(0,3):
lista.append(test1[str(i)])
pass
for i in range(0,3):
lista.append(test2[str(i)])
sortedList = sorted(lista, key=lambda l: l["ratio"], reverse=True)[:3]
for i in range(0,3):
newTop3[str(i)] = sortedList[i]
print(newTop3)
#runTest()
print (run("quanto ao plantio do algodao","todos"))
|
{"/core/test.py": ["/core/functions.py", "/core/data.py"], "/core/functions.py": ["/core/data.py"], "/core/setup.py": ["/core/functions.py", "/core/data.py"]}
|
34,661
|
ediecs/Sis500
|
refs/heads/master
|
/core/data.py
|
#0 = nome do livro, 1 = indice de inicio do getAnswers,2, indice final,3 = pergunta zero, 4 = modo de gravação de txt
#5 = nome livro correto
listaLivros = \
[
["abacaxi", 16, 194, "A falta de chuva prejudica o abacaxizeiro", "w", "Abacaxi"],
["algodao", 17, 266, "Qual a origem mais provável do algodoeiro", "w", "Algodão"],
["amendoim", 17, 240, "Quais os fatores climáticos mais importantes para o crescimento da planta e o desenvolvimento do amendoim", "wb", "Amendoim"],
["arroz", 17, 246, "Quais são os elementos climáticos que mais influenciam a produtividade do arroz de terras altas", "w", "Arroz"],
["banana", 13, 199, "Onde se originou a bananeira e quais espécies participaram da sua evolução", "w", "Banana"],
["bufalos", 13, 161, "O que são búfalos domésticos e qual sua origem", "w", "Búfalos"],
["caju", 19, 249, "Qual é a origem do cajueiro", "wb", "Caju"],
["caprinos_ovinos_corte", 17, 242, "Saber criar caprinos e ovinos de corte é suficiente para ganhar dinheiro", "w", "Caprinos e Ovinos de Corte"],
["citros", 16, 212, "Como está classificado o gênero citros", "wb", "Citros"],
["feijao", 17, 247, "O que é um feijão", "wb", "Feijão"],
["fruticultura_irrigada", 20, 274, "Como o clima afeta a produção de plantas", "wb", "Fruticultura Irrigada"],
["gado_corte",14, 253, "Como deve ser o manejo do rebanho de cria na época de nascimentos", "w", "Gado de Corte"],
["gado_corte_pantanal",16, 256, "Quando começou a pecuária de corte no Pantanal", "w", "Gado de Corte do Pantanal"],
["gado_leite", 14, 297, "Quando iniciar os cuidados com os bezerros", "wb", "Gado de Leite"],
["geotecnologia_geoinformacao",17, 249, "O que é um satélite artificial", "wb", "Geotecnologia e Geoinformação"],
["gergelim", 19, 210, "Qual é o local de origem do gergelim", "wb", "Gergelim"],
["hortas", 18, 237, "O que são hortaliças", "wb", "Hortas"],
["integracao_lavoura_pecuaria_floresta", 23, 393, "O que é integração lavoura-pecuária-floresta (ILPF)", "wb", "Integração Lavoura-Pecuária-Floresta"],
["maca", 12, 225, "Qual é o local de origem da macieira", "wb", "Maçã"],
["mamao", 17, 171, "Quais as características da família Caricaceae", "w", "Mamão"],
["mamona", 17, 249, "Como escolher uma área adequada para cultivar mamona", "wb", "Mamona"],
["mandioca", 17, 177, "A que ordem, família, gênero e espécie pertence a mandioca", "w", "Mandioca"],
["manga", 17, 185, "Qual a classificação botânica da mangueira", "w", "Manga"],
["maracuja", 17, 341, "Qual a origem da palavra maracujá", "wb", "Maracuja"],
["milho", 18, 327, "Como o clima influencia a cultura do milho", "wb", "Milho"],
["ovinos", 15, 159, "Como uma associação de pequenos criadores de ovinos, dos quais alguns também criam caprinos, pode obter recursos financeiros e apoio técnico", "w", "Ovinos"],
["pequenas_frutas", 13, 183, "Por que as pequenas frutas recebem essa denominação", "w", "Pequenas Frutas"],
["pera",19, 231, "Qual é o centro de origem da pereira", "wb", "Pêra"],
["pesca_piscicultura_pantanal", 17, 189, "O que são recursos pesqueiros", "w", "Pesca e Piscicultura do Pantanal"],
["poscolheita_hortalicas", 15, 252, "Produzir hortaliças é um bom negócio", "wb", "Póscolheita de Hortaliças"],
["producao_organica_hortalicas", 18, 299, "O que é agroecologia", "w", "Produção Orgânica de Hortaliças"],
["sistema_plantio_direto", 17, 249, "O que são sistemas conservacionistas de manejo do solo", "w", "Sistema de Plantio Direto"],
["sorgo", 17, 324, "Como saber a época mais indicada para o plantio de sorgo granífero", "w", "Sorgo"],
["suinos", 17, 244, "Qual a diferença entre granja de suínos e sistema de produção de suínos", "w", "Suínos"],
["trigo", 17, 309, "Qual é a origem do trigo", "w", "Trigo"],
["uva", 17, 203, "Quais são os métodos usados no melhoramento genético da videira", "w", "Uva"]
]
def getListaLivros():
return listaLivros
|
{"/core/test.py": ["/core/functions.py", "/core/data.py"], "/core/functions.py": ["/core/data.py"], "/core/setup.py": ["/core/functions.py", "/core/data.py"]}
|
34,662
|
ediecs/Sis500
|
refs/heads/master
|
/core/functions.py
|
#IMPORTAÇÕES:
#PDF MINER - pacotes necessários para realizar extração do texto dos PDFs
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
from io import StringIO
#OUTRAS IMPORTAÇÕES
import re #Importa o módulo interno para uso de expressões regulares
from fuzzywuzzy import fuzz #Importa a biblioteca "fuzzyWuzzy"(distãncia de levenshtein)
import pickle #importa pickle(serialização e deserialização de arquivos)
import os #biblioteca para comunicação com o sistema(salvamento e abertura de arquivos)
from core.data import * #importa as funções e dados contidos neste projeto
basepath = os.path.dirname(__file__) #Cria o diretório base com relação ao diretório atual deste arquivo
#FUNÇÕES:
def getText(pdfname,pageZero,pageEnd): #Função que extrai o texto do pdf, usando os parâmetros padrão da biblioteca pdfminer
global basepath
rsrcmgr = PDFResourceManager()
retstr = StringIO()
codec = 'utf-8'
laparams = LAParams()
filepath = os.path.abspath(os.path.join(basepath, "..", "pdf", "500pr","500pr_"+pdfname+".pdf"))
device = TextConverter(rsrcmgr, retstr, codec=codec, laparams=laparams)
fp = open(filepath, 'rb') #Pega o arquivo do pdf no caminho especificado
interpreter = PDFPageInterpreter(rsrcmgr, device)
password = ""
maxpages = 0
caching = True
pagenos=set()
#Roda um loop, usando todos os atributos anteriores, interpretando o pdf por página e realizando a conversão
for pagenumber, page in enumerate(PDFPage.get_pages(fp, pagenos, maxpages=maxpages, password=password,caching=caching, check_extractable=True)):
if (pagenumber < pageZero -1) or (pagenumber >= pageEnd): #Pula as primeiras páginas, que não possuem nenhuma pergunta/resposta
pass
else:
#Enquanto itera pelas páginas, adiciona ao topo de capa uma uma string reconhecível para salvarmos o número da página
if pagenumber >= 100:
retstr.write("\n 500pr_pgnumber" + str(pagenumber + 1) +"\n")
else:
retstr.write("\n 500pr_pgnumber0" + str(pagenumber + 1) +"\n")
interpreter.process_page(page)
text = retstr.getvalue() #salva o texto gerado pela iteração anterior na variável text
fp.close()
device.close()
retstr.close()
filepath = os.path.abspath(os.path.join(basepath, "..", "txt" , "500pr_txt_" + pdfname + ".txt"))
try:
with open(filepath, "w") as textFile:
textFile.write(text)
except UnicodeEncodeError:
with open(filepath, "w",encoding='utf-8') as textFile:
textFile.write(text)
print(pdfname+ " - ok")
def getAnswers(filename,perguntaZero): #Função que separa as perguntas e respostas do texto minerado do pdf
global basepath
filepath = os.path.abspath(os.path.join(basepath, "..", "txt", "500pr_txt_" + filename + ".txt"))
respostas = []
perguntas = []
paginas = []
nomeLivro = []
nomeLivroTratado = ""
try:
file = open(filepath,"r")#Abre o arquivo de texto
file = file.read()
except UnicodeDecodeError:
file = open(filepath, "r",encoding='utf-8')
file = file.read()
rgx = re.compile('(?<=\.)[^.]*$') # Compilação do regex a ser utilizado no loop
rgxPage = re.compile('(?<=500pr_pgnumber)\w{3}')
rgxSplit = '(?=\?\n)'
i = 0
while True:
try:
#splitString = file.split("?")[i]
splitString = re.split(rgxSplit, file)[i]
except IndexError:
break
respostas.append(splitString) #As duas listas recebem a mesma string separadas no "?"
perguntas.append(" ".join(splitString.split())) #Retira grande parte do espaço em branco e desnecessário da string perguntas
paginas.append(splitString)
respostas[i] = re.sub(rgx,'', respostas[i]) #Porém uma pega o regex sem a pergunta(a resposta)
perguntas[i] = rgx.findall(perguntas[i]) #e a outra pega o regex que só dá match na pergunta(a pergunta)
paginas[i] = rgxPage.findall(splitString)
respostas[i].replace('\\n', '\n')
i += 1
perguntas[0] = perguntaZero #Por motivos específicos, o código não consegue pegar a primeira pergunta, então temos que colocá-la manualmente
respostas.pop(0) #Pelo mesmo motivo, a primeira resposta é nula, então apagamos ela manualmente
for i in range(0,len(paginas)):
if not bool(paginas[i]):
paginas[i] = lastTrue
else:
lastTrue = paginas[i]
x = 0
nomeLivroTratado = corrigeNome(filename)
while x != 500:
nomeLivro.append(nomeLivroTratado)
x += 1
respostas = limpaTexto(respostas)
respostas = limpaRespostas(respostas)
perguntas = limpaTexto(perguntas)
perguntas = limpaPergunta(perguntas)
conjunto = [perguntas,respostas,paginas,nomeLivro]
filepath = os.path.abspath(os.path.join(basepath, "..", "txt", "500pr_procTxt_" + filename + ".txt"))
with open(filepath, 'wb') as fp:
pickle.dump(conjunto, fp)
def run(perguntaUser, livro):
if(livro) == "todos":
return runAll(perguntaUser)
filepath = os.path.abspath(os.path.join(basepath, "..", "txt", "500pr_procTxt_" + livro + ".txt"))
with open(filepath, 'rb') as file:
lista = pickle.load(file)
perguntas = lista[0]
respostas = lista[1]
paginas = lista[2]
nomeLivro = lista[3]
listaRatio = []
i = 0
for index, pergunta in enumerate(perguntas):
listaRatio.append([ i , checkRatio(perguntaUser, pergunta)])
i += 1
sortedList = sorted(listaRatio, key=lambda l: l[1],reverse=True)[:3]
top3 = {"0": {"pergunta": perguntas[sortedList[0][0]], "resposta": respostas[sortedList[0][0]],
"pagina": paginas[sortedList[0][0]], "ratio": sortedList[0][1], "nomeLivro":nomeLivro[sortedList[0][0]]},
"1": {"pergunta": perguntas[sortedList[1][0]], "resposta": respostas[sortedList[1][0]],
"pagina": paginas[sortedList[1][0]], "ratio": sortedList[1][1], "nomeLivro":nomeLivro[sortedList[0][0]]},
"2": {"pergunta": perguntas[sortedList[2][0]], "resposta": respostas[sortedList[2][0]],
"pagina": paginas[sortedList[2][0]], "ratio": sortedList[2][1], "nomeLivro":nomeLivro[sortedList[0][0]]}
}
#0 = pergunta similar, 1 = resposta da pergunta, 2 = pagina encontrada, 3 = ratio da resposta, 4 nome do livro
return top3
def runAll(perguntaUser):
lista = []
listaLivros = getListaLivros()
newTop3 = {}
for i in range(0,len(listaLivros)):
top3 = (run(perguntaUser, listaLivros[i][0]))
for x in range(0,3):
lista.append(top3[str(x)])
sortedList = sorted(lista, key=lambda l: l["ratio"], reverse=True)[:3]
for i in range(0,3):
newTop3[str(i)] = sortedList[i]
return newTop3
def checkRatio(str1, str2):
bestRatio = 0
ratios = [fuzz.ratio(str1, str2), fuzz.partial_ratio(str1, str2), fuzz.token_sort_ratio(str1, str2),
fuzz.token_set_ratio(str1, str2)]
for ratio in ratios:
if ratio > bestRatio:
if (len(str1) <= 5) or (len(str2) <=5):
bestRatio = 25
else:
bestRatio = ratio
return bestRatio
def limpaTexto(listaentrada):
rgxPage = re.compile('500pr_pgnumber\w{3}')
listasaida = []
for item in listaentrada:
if isinstance(item,str):
listasaida.append(re.sub(rgxPage, '', item))
elif isinstance(item,list):
try:
listasaida.append(re.sub(rgxPage, '', item[0]))
except IndexError:
pass
return listasaida
def limpaPergunta(listaentrada):
listasaida = []
for item in listaentrada:
try:
if isinstance(item, str):
listasaida.append(item.lstrip('0123456789.- '))
elif isinstance(item, list):
listasaida.append(item[0].lstrip('0123456789.- '))
except IndexError:
pass
return listasaida
def limpaRespostas(listaentrada):
listasaida = []
for item in listaentrada:
try:
if isinstance(item, str):
listasaida.append(item.lstrip('?'))
elif isinstance(item, list):
listasaida.append(item[0].lstrip('?'))
except IndexError:
pass
return listasaida
def corrigeNome(nomeLivro):
lista = getListaLivros()
for dadosLivro in lista:
if nomeLivro == dadosLivro[0]:
nomeLivroTratado = dadosLivro[5]
return nomeLivroTratado
|
{"/core/test.py": ["/core/functions.py", "/core/data.py"], "/core/functions.py": ["/core/data.py"], "/core/setup.py": ["/core/functions.py", "/core/data.py"]}
|
34,663
|
ediecs/Sis500
|
refs/heads/master
|
/core/setup.py
|
from core.functions import *
from core.data import *
#Importa a lista de livros que está no arquivo "data"
def importaLista():
return getListaLivros()
#Itera por todos os pdfs na lista e transforma em texto
def pdf2text():
lista = importaLista()
for i in range(0,len(lista)-1):
getText(lista[i][0], lista[i][1], lista[i][2])
#PEGA TODOS OS TXTS E PROCESSA AS PERGUNTAS E RESPOSTAS, E GERA O PROCTXT SINGULAR POR LIVRO
def text2answers():
lista = importaLista()
for i in range(0,len(lista)):
getAnswers(lista[i][0], lista[i][3])
print(lista[i][0])
#pdf2text()
#text2answers()
|
{"/core/test.py": ["/core/functions.py", "/core/data.py"], "/core/functions.py": ["/core/data.py"], "/core/setup.py": ["/core/functions.py", "/core/data.py"]}
|
34,668
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/tasks/all_subreddits.py
|
import luigi
import luigi.contrib.postgres
from app.helpers.prepare_ingestion import PrepareIngestion
from app.utils.helper import derive_subreddits_list_save_path, derive_current_timestamp, derive_home_dir
class GetAllSubreddits(luigi.Task):
"""
Gets the latest list of available subreddits from r/ListOfSubreddits
"""
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
home_dir = luigi.Parameter(default=derive_home_dir())
def requires(self):
return None
def output(self):
subreddits_list_save_file_path = derive_subreddits_list_save_path(self.start, self.home_dir)
return luigi.LocalTarget(subreddits_list_save_file_path)
def complete(self):
return False
def run(self):
# Preparing Ingestion, obtaining all available latest subreddits from r/ListOfSubreddits
prepare = PrepareIngestion()
subreddits_list = prepare.fetch_all_subreddits_list()
with self.output().open('w') as f:
f.write('\n'.join(subreddits_list))
self.status = "Completed"
self.complete = lambda: True
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,669
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/helpers/subreddit_ingestion.py
|
from app.utils.constants import CONFIG_PATH
import praw
from dataclasses import dataclass
import configparser
@dataclass
class Post:
post_id: str
url: str
total_comments: int = 0
c: int = 0
post_score: float = 0.0
class SubredditIngestion:
"""
A helper class for the secondary task IngestSubreddit, that performs all task specific operations
"""
def __init__(self, subreddit_name, start, top_n_subreddits, top_n_posts, top_n_comments):
self.subreddit_name = subreddit_name
self.start = start
self.top_n_subreddits = top_n_subreddits
self.top_n_posts = top_n_posts
self.top_n_comments = top_n_comments
self.total_posts = 0
self.subreddit_score = 0.0
self.top_n_posts_list = []
self.all_posts_list = []
# TODO Change the constants below
self.reddit = self.get_api_wrapper()
def get_api_wrapper(self, config_path=CONFIG_PATH):
"""
Fetches Reddit API credentials from config.ini file
:param config_path: Path to the stored config file
:return: The Reddit PRAW API wrapper instance for the passed API credentials
"""
config = configparser.ConfigParser()
config.read(config_path)
client_id = config['REDDIT']['client_id']
client_secret = config['REDDIT']['client_secret']
username = config['REDDIT']['username']
password = config['REDDIT']['password']
user_agent = config['REDDIT']['user_agent']
return praw.Reddit(client_id=client_id,
client_secret=client_secret,
username=username,
password=password,
user_agent=user_agent)
def derive_top_data(self):
"""
Derives only the top posts and comments along with their scores for ingestion
:return subreddit_contents: Subreddit score and top contents (posts & comments) for each subreddit
"""
for post in self.reddit.subreddit(self.subreddit_name).top(limit=None):
# Increment the total number of posts counter for final subreddit score calculation
self.total_posts += 1
# instantiate post object from post id and post url
post_object = Post(post.id, post.url)
all_comments_list = []
submission = self.reddit.submission(id=post_object.post_id)
submission.comments.replace_more(limit=None)
for top_level_comment in submission.comments.list():
# Increment the number of comments counter for each post
post_object.total_comments += 1
# Increment the total number of points for each post by adding comment upvotes to total upvotes
post_object.post_score += top_level_comment.score
comment_upvotes = top_level_comment.score if top_level_comment.score else 0
all_comments_list.append(({"comment_body": top_level_comment.body, "comment_upvotes": comment_upvotes}))
# sorting the comment list based on decreasing comment scores (Obtain only top 5 comments for ingestion)
# Handle insufficient number of comments case:
if len(all_comments_list) == 0:
top_n_comments_list = []
elif len(all_comments_list) > self.top_n_comments:
top_n_comments_list = \
sorted(all_comments_list, key=lambda i: i['comment_upvotes'], reverse=True)[0:self.top_n_comments]
else:
top_n_comments_list = \
sorted(all_comments_list, key=lambda i: i['comment_upvotes'], reverse=True)
# Calculate post score for each post from comment points
try:
post_object.post_score = post_object.post_score/post_object.total_comments
# Handle cases when there are no comments for the post
except ZeroDivisionError:
post_object.post_score = 0
# Populate list containing all posts for a subreddit
self.all_posts_list.append({"post_id": post_object.post_id,
"post_url": post_object.url,
"post_score": post_object.post_score,
"top_n_comments": top_n_comments_list})
# Add post score to derive subreddit score
self.subreddit_score += post_object.post_score
# Calculate overall subreddit score:
try:
self.subreddit_score = self.subreddit_score/self.total_posts
except ZeroDivisionError:
self.subreddit_score = 0
# Obtain only top n posts and save the data
# Handle insufficient number of posts case:
if len(self.all_posts_list) == 0:
self.top_n_posts_list = []
elif len(self.all_posts_list) > self.top_n_posts:
self.top_n_posts_list = sorted(self.all_posts_list, key=lambda i: i['post_score'], reverse=True)[0:self.top_n_posts]
else:
self.top_n_posts_list = sorted(self.all_posts_list, key=lambda i: i['post_score'], reverse=True)
subreddit_contents = {
"subreddit": self.subreddit_name,
"subreddit_score": self.subreddit_score,
"top_contents": self.top_n_posts_list
}
return subreddit_contents
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,670
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/tests/test_helpers/test_rank_subreddits.py
|
from parameterized import parameterized
import unittest
from app.helpers.rank_subreddits import SubredditsRanking
from tests.test_config.constants import SUBREDDIT_CONTENTS_SAVE_DIR_1, START_DATE_1, SUBREDDIT_CONTENTS_SAVE_DIR_2, \
START_DATE_2, RANKING_DATA_1, RANKING_DATA_2
from collections import OrderedDict
def get_ranking_index_test_inputs():
# parameterize input data and expected result as test case inputs
return [(SUBREDDIT_CONTENTS_SAVE_DIR_1,
START_DATE_1,
OrderedDict([('RatedChess', 1.25), ('IndianMusicOnline', 0.14893617021276595)])),
(SUBREDDIT_CONTENTS_SAVE_DIR_2,
START_DATE_2,
OrderedDict([('IndianMusicOnline', 3.14893617021276595), ('RatedChess', 1.25)]))]
def get_ranking_data_test_inputs():
# parameterize input data and expected result as test case inputs
return \
[
(
SUBREDDIT_CONTENTS_SAVE_DIR_1,
START_DATE_1,
RANKING_DATA_1
),
(
SUBREDDIT_CONTENTS_SAVE_DIR_2,
START_DATE_2,
RANKING_DATA_2
)
]
class TestSubredditsRanking(unittest.TestCase):
def arrange_fixtures(self, start_date, save_dir):
return SubredditsRanking(start_date, save_dir)
@parameterized.expand(get_ranking_index_test_inputs)
def test_get_ranking_index(self, save_dir, start_date, expected_result):
# Arrange
ranking = self.arrange_fixtures(start_date, save_dir)
# Act
ranking.get_ranking_index()
# Assert
self.assertEqual(ranking.sorted_ranking_index, expected_result)
@parameterized.expand(get_ranking_data_test_inputs)
def test_get_ranking_data(self, save_dir, start_date, expected_result):
# Arrange
ranking = self.arrange_fixtures(start_date, save_dir)
ranking.get_ranking_index()
# Act
ranking_data_list = ranking.get_ranking_data()
# Assert
self.assertEqual(ranking_data_list, expected_result)
if __name__ == '__main__':
unittest.main()
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,671
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/tasks/ingestion.py
|
import luigi
import os
import luigi.contrib.postgres
from app.utils.helper import derive_current_timestamp, create_dir, derive_home_dir
from app.tasks.all_subreddits import GetAllSubreddits
from app.tasks.ingest_subreddit import IngestSubreddit
class Ingestion(luigi.Task):
"""
Ingest the reddit data for all top subreddits and top posts and comments within that subreddit
"""
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
home_dir = luigi.Parameter(default=derive_home_dir())
data_lake_dir = luigi.Parameter(default=None)
save_dir_path = luigi.Parameter(default=None)
data_dir_path = luigi.Parameter(default=None)
def output(self):
# Create directory for the current run
self.data_lake_dir = os.path.join(self.home_dir, "datalake")
self.save_dir_path = os.path.join(self.data_lake_dir, str(self.start))
self.data_dir_path = os.path.join(self.save_dir_path, 'data')
output_path = os.path.join(self.save_dir_path, "Ingestion_status.txt")
return luigi.LocalTarget(output_path)
def run(self):
# Running the ingestion pipeline to store reddit data for all subreddits and posts
outputs = []
create_dir(self.data_lake_dir)
create_dir(self.save_dir_path)
create_dir(self.data_dir_path)
for input in self.input():
with input.open('r') as list_file:
subreddits = list_file.readlines()
# remove whitespace characters like `\n` at the end of each line
subreddits = [x.strip() for x in subreddits]
for subreddit_name in subreddits:
subreddit_ingestions = IngestSubreddit(subreddit_name=subreddit_name,
start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments,
data_dir_path=self.data_dir_path
)
outputs.append(subreddit_ingestions.output().path)
yield subreddit_ingestions
with self.output().open('w') as f:
f.write("Ingestion complete")
def requires(self):
yield GetAllSubreddits(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments)
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,672
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/utils/helper.py
|
import os
import datetime
from app.utils.constants import SUBREDDIT_CONTENTS_SAVE_DIR, HOME_DIR, CONFIG_PATH
import configparser
def cleanup_exisiting_files(file_path):
# Handle errors while calling os.remove()
try:
os.remove(file_path)
except FileNotFoundError:
print(f"No file in path {file_path} to delete")
def derive_current_timestamp():
current_datetime = datetime.datetime.now()
timestamp = f"{current_datetime.day}{current_datetime.month}{current_datetime.year}" \
f"{current_datetime.hour}{current_datetime.minute}{current_datetime.second}"
return timestamp
def derive_subreddits_rank_save_path(start_date):
return os.path.join(SUBREDDIT_CONTENTS_SAVE_DIR, f"{start_date}", 'data')
def derive_subreddits_list_save_path(start_date, home_dir):
filename = f"ListOfSubreddits_{start_date}.txt"
return os.path.join(home_dir, 'datalake', start_date, filename)
def derive_db_config_value(param):
# Obtain connection details from config file
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
return config['POSTGRES'][param]
def create_dir(path):
if not os.path.exists(path):
os.mkdir(path)
def derive_home_dir():
return HOME_DIR
def derive_data_lake_dir():
os.path.join(HOME_DIR, "datalake")
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,673
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/tasks/ingest_subreddit.py
|
import luigi
import os
import luigi.contrib.postgres
import json
from app.utils.helper import derive_current_timestamp
from app.helpers.subreddit_ingestion import SubredditIngestion
class IngestSubreddit(luigi.Task):
"""
Task to individually ingest the Subreddit data and store as separate output targets
"""
subreddit_name = luigi.Parameter()
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
data_dir_path = luigi.Parameter()
def run(self):
# Instantiate the subreddit ingestion object
subreddit_ingestion = SubredditIngestion(self.subreddit_name,
self.start,
self.top_n_subreddits,
self.top_n_posts,
self.top_n_comments)
results = subreddit_ingestion.derive_top_data()
# Save subreddit data into a subreddit specific file
with open(self.output().path, "w") as output_file:
json.dump(results, output_file)
def output(self):
# derive the save paths for each subreddit
subreddit_save_path = os.path.join(self.data_dir_path, f"{self.subreddit_name}.json")
return luigi.LocalTarget(subreddit_save_path)
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,674
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/helpers/rank_subreddits.py
|
import os
import glob
import json
from collections import OrderedDict
from operator import itemgetter
class SubredditsRanking:
"""
A helper class for the primary task RankSubreddits, that performs all task specific operations
"""
def __init__(self, start_date, dir_path):
self.start_date = start_date
self.dir_path = dir_path
self.unsorted_ranking_index = {}
self.sorted_ranking_index = OrderedDict()
def get_ranking_index(self):
"""
Generates the subreddit ranking index, an OrderedDict() in the form {"subreddit_name1": "subreddit_score1,..}
"""
all_files = os.path.join(self.dir_path, f"*")
saved_files_list = glob.glob(all_files)
for subreddit_file in saved_files_list:
with open(subreddit_file, "r") as input_file:
subreddit_data = json.load(input_file)
# Populate unordered ranking index dict with type {subreddit_name: subreddit_score}
update_item = {subreddit_data['subreddit']: subreddit_data['subreddit_score']}
self.unsorted_ranking_index.update(update_item)
# Create sorted ranking index by sorting unordered ranking index by dict values (subreddit_score)
# in decreasing order to obtain best to worst rankings
self.sorted_ranking_index = OrderedDict(sorted(self.unsorted_ranking_index.items(),
key=itemgetter(1),
reverse=True))
def get_ranking_data(self):
"""
Generates the final subreddit rankings list
:return ranking_data_list: A list of ranked subreddits along with their scores and storage paths
"""
ranking_data_list = []
current_rank = 0
for subreddit, score in self.sorted_ranking_index.items():
current_rank += 1
subreddit_save_path = self.get_saved_path(subreddit)
ranking_data_list.append(tuple([self.start_date, current_rank, subreddit, score, subreddit_save_path]))
return ranking_data_list
def get_saved_path(self, subreddit):
"""
Derive the storage path of each Subreddit specific json file
:param subreddit: The subreddit name
:return: Contents storage path of the subreddit name
"""
return os.path.join(self.dir_path, f"{subreddit}.json")
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,675
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/tasks_pipeline.py
|
# default run:
# PYTHONPATH='.' luigi --module tasks_pipeline --local-scheduler PipelineWrappertask
# custom run:
# PYTHONPATH='.' luigi --module tasks_pipeline --local-scheduler PipelineWrappertask(<-configurable params passed->))
from luigi.contrib.simulate import RunAnywayTarget
import luigi.contrib.postgres
import configparser
import logging
from app.utils.constants import CONFIG_PATH
from app.utils.helper import derive_current_timestamp, derive_home_dir
from app.tasks.all_subreddits import GetAllSubreddits
from app.tasks.ingestion import Ingestion
from app.tasks.rank_subreddits import RankSubreddits
from app.tasks.store_rankings import StoreRankings
logger = logging.getLogger('luigi-interface')
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
class PipelineWrapperTask(luigi.WrapperTask):
"""
A wrapper tasks that runs the entire pipeline in a specific order
:params: Custom parameters can be passed for task start timestamp, top 'n' subreddits, posts and comments if necessary.
"""
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
home_dir = luigi.Parameter(default=derive_home_dir())
def run(self):
self.output().done()
def requires(self):
yield GetAllSubreddits(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments,
home_dir=self.home_dir)
yield Ingestion(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments,
home_dir=self.home_dir)
yield RankSubreddits(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments)
yield StoreRankings(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments)
def output(self):
return RunAnywayTarget(self)
if __name__ == '__main__':
luigi.run()
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,676
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/tasks/rank_subreddits.py
|
import luigi
import luigi.contrib.postgres
import os
import csv
from app.utils.constants import SUBREDDIT_CONTENTS_SAVE_DIR
from app.helpers.rank_subreddits import SubredditsRanking
from app.utils.helper import derive_subreddits_rank_save_path, derive_current_timestamp
from app.tasks.ingestion import Ingestion
class RankSubreddits(luigi.Task):
"""
Get the ranking of all subreddits based on the calculated subreddit score
"""
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
def requires(self):
yield Ingestion(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments)
def output(self):
output_path = os.path.join(SUBREDDIT_CONTENTS_SAVE_DIR, f"{str(self.start)}", "SubredditsRanking.csv")
return luigi.LocalTarget(output_path)
def run(self):
# Instantiating SubredditsRanking() object
dir_path = derive_subreddits_rank_save_path(self.start)
ranking = SubredditsRanking(self.start, dir_path)
ranking.get_ranking_index()
# Save subreddit data into a subreddit specific file
ranking_data_list = ranking.get_ranking_data()
# Save sorted_ranking_index data into a subreddits rankings file
with open(self.output().path, "w") as out:
csv_out = csv.writer(out)
# csv_out.writerow(['timestamp', 'rank', 'subreddit', 'subreddit_score', 'storage_location'])
for row in ranking_data_list:
csv_out.writerow(row)
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,677
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/tests/test_helpers/test_prepare_ingestion.py
|
from parameterized import parameterized
import unittest
from app.helpers.prepare_ingestion import PrepareIngestion
def extract_subreddit_names_test_inputs():
# parameterize input data and expected result as test case inputs
return [(" \r\n/r/IndianMusicOnline \r\n/r/RatedChess", ['IndianMusicOnline', 'RatedChess']),
(" \r\n/r/IndianMusicOnline", ['IndianMusicOnline'])]
class TestPrepareIngestion(unittest.TestCase):
def arrange_fixtures(self):
return PrepareIngestion()
@parameterized.expand(extract_subreddit_names_test_inputs)
def test_extract_subreddit_names(self, input_string, expected_result):
# Arrange
prepare = self.arrange_fixtures()
# Act
result = prepare.extract_subreddit_names(input_string)
# Assert
self.assertEqual(result, expected_result)
if __name__ == '__main__':
unittest.main()
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,678
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/utils/constants.py
|
import os
HOME_DIR = os.getcwd().split('/app/utils')[0]
ALL_SUBREDDITS_URL = "https://www.reddit.com/r/ListOfSubreddits/wiki/listofsubreddits.json"
SUBREDDIT_CONTENTS_SAVE_DIR = os.path.join(HOME_DIR, "datalake")
INGESTION_TASKS_STATUS_PATH = os.path.join(HOME_DIR, "ingestion_status.txt")
PIPELINE_STATUS_PATH = os.path.join(HOME_DIR, "pipeline_status.txt")
SUBREDDITS_RANKING_PATH = os.path.join(HOME_DIR, "subreddits_ranking.json")
CONFIG_PATH = os.path.join(HOME_DIR, 'app', 'utils', 'config.ini')
ALL_SUBREDDITS_JSON = os.path.join(HOME_DIR, 'listofsubreddits.json')
MINIMAL_SUBREDDITS_JSON = os.path.join(HOME_DIR, 'minimal_listofsubreddits.json')
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,679
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/helpers/prepare_ingestion.py
|
import json
import urllib
from app.utils.constants import ALL_SUBREDDITS_URL, ALL_SUBREDDITS_JSON, MINIMAL_SUBREDDITS_JSON
class PrepareIngestion:
"""
A helper class for the primary task GetAllSubreddits, that performs all task specific operations
"""
def __init__(self):
self.url = ALL_SUBREDDITS_URL
self.contents = None
def fetch_all_subreddits_list(self):
"""
Fetches response containing all subreddits names from subreddit r/ListOfSubreddits
:return A list containing all subreddit names
"""
try:
req = urllib.request.Request(self.url)
response = urllib.request.urlopen(req)
data = response.read()
self.contents = json.loads(data)
# Handle HTTP Error 429: Too Many Requests
except urllib.error.HTTPError:
# Obtain list of subreddits from already downloaded json file
# NOTE: Use file present in ALL_SUBREDDITS_JSON (production) or MINIMAL_SUBREDDITS_JSON (for testing)
with open(ALL_SUBREDDITS_JSON, 'r') as f:
self.contents = json.load(f)
unprocessed_string = self.contents['data']['content_md']
return self.extract_subreddit_names(unprocessed_string)
@staticmethod
def extract_subreddit_names(input_string):
"""
Extracts all subreddits names as a list from the obtained response from r/ListOfSubreddits
:return all_subreddits_list: A list containing all subreddit names
"""
all_subreddits_list = []
# split string to generate words
words = input_string.split(' ')
# select only the subreddits names from the file starting with "/r/"
all_subreddit_handles = [word for word in words if word.startswith("/r/") or word.startswith("\r\n/r/")]
for subreddit in all_subreddit_handles:
if '.' in subreddit:
subreddit = subreddit.rstrip(".")
subreddit = subreddit.split("/r/")[1]
all_subreddits_list.append(subreddit)
return all_subreddits_list
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,680
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/tests/test_tasks/test_tasks.py
|
import luigi
import luigi.interface
import unittest
import os
import shutil
from app.tasks.all_subreddits import GetAllSubreddits
from tests.test_config.constants import SUBREDDIT_CONTENTS_SAVE_DIR
class TestAllTasks(unittest.TestCase):
def setUp(self):
if os.path.exists(SUBREDDIT_CONTENTS_SAVE_DIR):
shutil.rmtree(SUBREDDIT_CONTENTS_SAVE_DIR, ignore_errors=True)
def teardown(self):
shutil.rmtree(SUBREDDIT_CONTENTS_SAVE_DIR, ignore_errors=True)
def test_get_all_subreddits_task(self):
# Act
luigi.build([GetAllSubreddits()], local_scheduler=True, no_lock=True, workers=1)
# Assert
self.assertEqual(GetAllSubreddits().status, "Completed")
def test_get_all_subreddits_task(self):
# Act
luigi.build([GetAllSubreddits()], local_scheduler=True, no_lock=True, workers=1)
# Assert
self.assertEqual(GetAllSubreddits().status, "Completed")
if __name__ == '__main__':
unittest.main()
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,681
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/tasks/store_rankings.py
|
import luigi
import os
import luigi.contrib.postgres
import configparser
import logging
from app.utils.constants import SUBREDDIT_CONTENTS_SAVE_DIR, CONFIG_PATH
from app.tasks.rank_subreddits import RankSubreddits
from app.helpers.ranking_storage import RankingStorage
from app.utils.helper import derive_current_timestamp
logger = logging.getLogger('luigi-interface')
config = configparser.ConfigParser()
config.read(CONFIG_PATH)
class StoreRankings(luigi.Task):
"""
Store the rankings data onto postgres for historical tracking
"""
start = luigi.Parameter(default=derive_current_timestamp())
top_n_subreddits = luigi.IntParameter(default=3)
top_n_posts = luigi.IntParameter(default=3)
top_n_comments = luigi.IntParameter(default=3)
def requires(self):
yield RankSubreddits(start=self.start,
top_n_subreddits=self.top_n_subreddits,
top_n_posts=self.top_n_posts,
top_n_comments=self.top_n_comments)
def output(self):
output_path = os.path.join(SUBREDDIT_CONTENTS_SAVE_DIR, f"{str(self.start)}", "db_insert_status.txt")
return luigi.LocalTarget(output_path)
def run(self):
store_rankings = RankingStorage()
for input in self.input():
with input.open('r') as csv_file:
for line in csv_file:
row_elements = line.strip('\n').split(',')
store_rankings.insert_into_db(row_elements)
with self.output().open('w') as f:
f.write("Finished writing to database")
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,682
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/app/helpers/ranking_storage.py
|
from app.utils.constants import SUBREDDIT_CONTENTS_SAVE_DIR
import os
import psycopg2
import csv
from app.utils.helper import derive_db_config_value
class RankingStorage:
"""
A helper class for the primary task StoreRankings, that performs all task specific operations
"""
def __init__(self):
# self.rankings_csv = os.path.join(SUBREDDIT_CONTENTS_SAVE_DIR, f"{start_date}", "SubredditsRanking.csv")
self.host = derive_db_config_value('host')
self.user = derive_db_config_value('user')
self.dbname = derive_db_config_value('dbname')
self.password = derive_db_config_value('password')
self.conn = self.get_db_conn()
def get_db_conn(self):
"""
Derive the connection string and supply PostgresDB connection
:return conn: Connection to the specified database and table
"""
connection_string = f"host={self.host} dbname={self.dbname} user={self.user} password={self.password}"
return psycopg2.connect(connection_string)
def insert_into_db(self, row):
"""
Insert individual rankings row data into the database rankings table
:param row: The ranking associated with each subreddit rank to be inserted into the rankings table
"""
cursor = self.conn.cursor()
cursor.execute("INSERT INTO subreddit_rankings VALUES (%s, %s, %s, %s, %s)",
(row[0], row[1], row[2], row[3], row[4]))
self.conn.commit()
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,683
|
ms-shankar/trending-subreddits
|
refs/heads/master
|
/tests/test_config/constants.py
|
import os
HOME_DIR = os.getcwd()
SUBREDDIT_CONTENTS_SAVE_DIR_1 = os.path.join(HOME_DIR, "tests", "test_data", "35202015316", "data")
START_DATE_1 = "35202015316"
SUBREDDIT_CONTENTS_SAVE_DIR_2 = os.path.join(HOME_DIR, "tests", "test_data", "35202015317", "data")
START_DATE_2 = "35202015317"
RANK1_PATH_1 = os.path.join(f"{SUBREDDIT_CONTENTS_SAVE_DIR_1}", "RatedChess.json")
RANK2_PATH_1 = os.path.join(f"{SUBREDDIT_CONTENTS_SAVE_DIR_1}", "IndianMusicOnline.json")
RANK1_PATH_2 = os.path.join(f"{SUBREDDIT_CONTENTS_SAVE_DIR_2}", "IndianMusicOnline.json")
RANK2_PATH_2 = os.path.join(f"{SUBREDDIT_CONTENTS_SAVE_DIR_2}", "RatedChess.json")
RANKING_DATA_1 = [('35202015316', 1, 'RatedChess', 1.25, RANK1_PATH_1),
('35202015316', 2, 'IndianMusicOnline', 0.14893617021276595, RANK2_PATH_1)]
RANKING_DATA_2 = [('35202015317', 1, 'IndianMusicOnline', 3.148936170212766, RANK1_PATH_2),
('35202015317', 2, 'RatedChess', 1.25, RANK2_PATH_2)]
INPATH = os.path.join(HOME_DIR, "tests", "test_data", "infile.txt")
BASE_DIR = os.getcwd().split('/tests/test_config')[0]
SUBREDDIT_CONTENTS_SAVE_DIR = os.path.join(BASE_DIR, "datalake")
|
{"/app/tasks/all_subreddits.py": ["/app/helpers/prepare_ingestion.py", "/app/utils/helper.py"], "/app/helpers/subreddit_ingestion.py": ["/app/utils/constants.py"], "/tests/test_helpers/test_rank_subreddits.py": ["/app/helpers/rank_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/ingestion.py": ["/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingest_subreddit.py"], "/app/utils/helper.py": ["/app/utils/constants.py"], "/app/tasks/ingest_subreddit.py": ["/app/utils/helper.py", "/app/helpers/subreddit_ingestion.py"], "/tasks_pipeline.py": ["/app/utils/constants.py", "/app/utils/helper.py", "/app/tasks/all_subreddits.py", "/app/tasks/ingestion.py", "/app/tasks/rank_subreddits.py", "/app/tasks/store_rankings.py"], "/app/tasks/rank_subreddits.py": ["/app/utils/constants.py", "/app/helpers/rank_subreddits.py", "/app/utils/helper.py", "/app/tasks/ingestion.py"], "/tests/test_helpers/test_prepare_ingestion.py": ["/app/helpers/prepare_ingestion.py"], "/app/helpers/prepare_ingestion.py": ["/app/utils/constants.py"], "/tests/test_tasks/test_tasks.py": ["/app/tasks/all_subreddits.py", "/tests/test_config/constants.py"], "/app/tasks/store_rankings.py": ["/app/utils/constants.py", "/app/tasks/rank_subreddits.py", "/app/helpers/ranking_storage.py", "/app/utils/helper.py"], "/app/helpers/ranking_storage.py": ["/app/utils/constants.py", "/app/utils/helper.py"]}
|
34,688
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_credentials.py
|
# credentials.py
# Twitter App access keys for @user
# Consume:
CONSUMER_KEY = '5DTFYr4f0OPxT5CLgBFnlph6o'
CONSUMER_SECRET = 'x1LrDVkXhijc6sF8e11feKpJh5AVt85peTyBzWRFSbRDo8icFJ'
# Access:
ACCESS_TOKEN = '1423189604-U62DkH9fHtUbHAHxu69OiK5m7ljV06Kc4tKOUKQ'
ACCESS_SECRET = 'prjj1qFpKXiyOCu3U0xwjForprXcNUUyzIoBvGh0R0e88'
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,689
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_Raw2Parsed.py
|
#將rawdata轉成parsed_data
import ast
import json
dic = {}
data=[]
with open('./All_Data/Twitters_Rawdata/2020-07-20_API.txt') as fp: #填入要parse的rawdata名稱
for line in fp:
obj = ast.literal_eval(line)
for x in range (len(obj)):
if (x%6==0):
dic={}
dic['Name'] = obj[x]
elif (x%6==1):
dic['Text'] = obj[x]
elif (x%6==2):
dic['Time'] = obj[x]
elif (x%6==3):
dic['Fav'] = obj[x]
elif (x%6==4):
dic['Retw'] = obj[x]
else:
dic['Source'] = obj[x]
data.append(dic)
json_result = json.dumps(data) #轉換成json格式
#print(json_result)
f = open('./All_Data/Twitters_ParsedData/2020-07-20_API.json','w')
f.write(json_result)
f.close()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,690
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_Crawler.py
|
from MetaClass import Clean
from MetaClass import Crawler
import tweepy
from Twitters_credentials import *
# import Twitters_w2l
import time
from datetime import datetime
import json
today=datetime.now().strftime('%Y-%m-%d')
RawData=[]
def word2list(filename):
names = []
with open(filename, 'r') as f:
for l in f.readlines():
names.append(l.strip())
# print(names)
return names
def get_Twitters_data():
# Authentication and access using keys:
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
json_dict = {}
# Return API with authentication:
api = tweepy.API(auth)
# create an extractor object:
extractor = api
#top100 twitters'name
top100_list = word2list('Twitters_top100.txt')
list_try = []
# create a tweet list as follows:
for tweetersname in top100_list:
try:
tweets = extractor.user_timeline(screen_name=tweetersname, count=200)
print(tweetersname)#印出正在爬的作者
list_try.append(tweetersname)
for tweet in tweets[:200]:
RawData.append(tweetersname)#作者姓名
RawData.append(tweet.text) #推文
RawData.append(tweet.created_at.strftime('%Y-%m-%d %H:%M:%S')) #推文時間
RawData.append(tweet.favorite_count) #按讚次數
RawData.append(tweet.retweet_count) #轉推次數
RawData.append(tweet.source) #推文工具
#print(self.RawData)
except:
pass
with open(f'./All_Data/Twitters_Rawdata/{today}_API.txt','w') as f:
f.write(str(RawData))
# with open('./All_Data/Twitters_Rawdata/list_try.txt','w') as f:
# f.write(str(list_try))
# print(RawData)
if __name__ == '__main__':
get_Twitters_data()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,691
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_gen_top_twitters_keys.py
|
#推文內容丟入W2V找出前四名
import pandas as pd
import json
import glob
from tqdm import tqdm
from gensim.models import Word2Vec
import pickle
import numpy as np
from Module_Clean import Clean
#清洗資料
def clean_text(x):
#print(text)
#print('-------')
text = Clean(x)
text.Capitalize()
text.DeletePunctuation()
text.DeleteRedundant_Twitters()
#print(text)
return text.Text
def gen_keywords(start,end):
total_date = pd.date_range(start,end,freq='d')
ans = {}
for dates in tqdm(total_date):
#print(dates)
ans_list = []
all_similar = []
date = dates.strftime('%Y%m%d')
try:
df = pd.read_json(f'./All_Data/2_weeks_twitters/{date}.json')
df['clean_text'] = df.Text.apply(clean_text)
string = [ ' '.join(df.clean_text.values).split() ]
model = Word2Vec(string,min_count=2) #min_count多少代表這個字最少要出現幾次
for all_word in model.wv.vocab.keys():
similar = model.wv.most_similar(all_word)
for i in similar:
all_similar.append(i[0])
value,count = np.unique(all_similar,return_counts=True)
count_sort_ind = np.argsort(-count)
value = value[count_sort_ind] ; count = count[count_sort_ind]
for i in range(len(count)):
ans_list.append( (value[i],count[i]/len(model.wv.vocab.keys())) )
ans[date] = ans_list
except:
print(dates)
pass
with open('top_twitters_keys','wb')as f:
pickle.dump(ans,f)
if __name__ == '__main__':
gen_keywords('2018-01-01','2020-07-08')
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,692
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/News_Crawler_WSJ.py
|
# ws.py
from bs4 import BeautifulSoup as bs
import json
from datetime import date
import requests
from fake_useragent import UserAgent
def getUrl(text):#整理資料,只留下title跟link
link = ''
title = ''
text = str(text)
filt = text.split('"')
#today = str(date.today()).replace('-', '')
for a in filt:
if len(a)>=5:
if a[:5]=='https':
link = a
if a[0]=='>':
title = a[1:-5]
return title, link
# def cleanHtml(a):
# str_1 = str(a)
# split_1 = str_1.split('"')
# Html = split_1[3]
# split_2 = split_1[4].split('>')
# split_3 = split_2[1].split('<')
# Title = split_3[0]
# # print(Html,Title)
# return(Html,Title)
ua = UserAgent()
headers = {
'User-Agent':ua.random
}
print(headers)#檢查哪個user agent可以哪個不行
today = str(date.today())
today_for_crawl = str(date.today()).replace('-', '')
target = '20200715' #要抓的日期
date_for_pubdate = '2020-07-15'
url='https://www.wsj.com/news/archive/'+target
print(target)
page = requests.get(url,headers = headers)
print(page)
# print(page.text)
soup = bs(page.content, 'html.parser')
found = soup.findAll('a', {'class':''})
# print(found)
found = [a for a in soup.findAll('a', {'class':''}) if 'articles' in str(a).split('/')]
print(len(found))
# print(found)
news_list = []
for i, a in enumerate(found):
# if i%2==1:
b = getUrl(a)
if (not '<img' in b) and b[0][0:5]!='About':
news_list.append(b)
# print(news_list)
# print('~~~~~~~~~~~~~~~~~~')
# print(len(news_list[1]))
data = []
for news in news_list:
dic = {}
dic['title'] = news[0]
dic['link'] = news[1]
dic['pubdate'] = date_for_pubdate
dic['source'] = 'Wall Street Journal'
dic['author'] = None
dic['description'] = None
dic['content'] = None
dic['urlToImage'] = None
data.append(dic)
with open(f'./All_Data/News_ParsedData/2020-07-15_WSJ.json','w') as f:
json.dump(data,f)
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,693
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/util.py
|
# tag.py
import random
import json
import datetime as dt
from download import strategy_list
SchemaLocation='All_Data/'
Path={
'schema':SchemaLocation,
'feed':'{}Reference/Info.json'.format(SchemaLocation),
'id':'{}Reference/Info_ID.json'.format(SchemaLocation),
}
class Sign():
SUCCESS = ''
USERNAME_TAKEN = 'This username is already taken :('
ABSENT_USERNAME = 'Username does not exist >o<'
WRONG_PASSWORD = 'You have the wrong password :('
WRONG_RETYPE = 'Your passwords don\'t match :('
EMPTY_INPUT = 'You left your input boxes empty ><'
PICK_UTIL = 'Please pick a destination before you enjoy the ride :)'
def tag(num):
tag = ''
for i in range(num):
n = random.randint(0, 61)
if n<10:
tag+=str(n)
elif n<36:
tag+=chr(n+55)
else:
tag+=chr(n+61)
return tag
def json_safeLoading(filename):
try:
with open(filename, 'r') as f:
return json.load(f)
except:
with open(filename, 'w+') as f:
json.dump({}, f)
return {}
def json_safeDumping(content, filename):
with open(filename, 'w+') as f:
json.dump(content, f, indent=4)
class InfoJson():
def __init__(self, filename):
self.info = json_safeLoading(filename)
self.filename = filename
def pull(self):
self.info = json_safeLoading(filename)
def push(self):
json_safeDumping(self.info, self.filename)
class UserList(InfoJson):
def __init__(self, filename):
super().__init__(filename)
def create(self, username, password, tag):
self.info[username] = {
'password': password,
'id': tag,
}
self.push()
def delete(self):
pass
def changePassword(self):
pass
class UserFeed(InfoJson):
def __init__(self, filename):
super().__init__(filename)
def create(self, tag, time):
self.info[tag] = {
'login':[],
'click':[],
'note':[],
'log':[],
'portfolio': strategy_list
}
self.updateLogin(tag, time)
def notes(self, tag):
return self.info[tag]['note']
def clicks(self, tag):
return self.info[tag]['click']
def logs(self, tag):
return self.info[tag]['log']
def addHistory(self, tag, log):
self.logs(tag).append(log)
def updateLogin(self, tag, time):
self.addHistory(tag, {'action': 'login', 'content': {'time': time}})
self.info[tag]['login'].append(time)
self.push()
def updateClick(self, tag, clickContent):
self.addHistory(tag, {'action': 'click', 'content': clickContent})
for i, click in enumerate(self.clicks(tag)):
if click['title']==clickContent['title'] and click['url']==clickContent['url']:
self.clicks(tag).pop(i)
self.clicks(tag).append(clickContent)
self.push()
def updateNote(self, tag, noteContent):
self.addHistory(tag, {'action': 'note', 'content': noteContent})
for i, note in enumerate(self.notes(tag)):
if note['title']==noteContent['title'] and note['url']==noteContent['url']:
self.notes(tag).pop(i)
self.notes(tag).append(noteContent)
self.push()
class UserInfo():
utilities = [{
'image': '/static/img/togo/{}.png'.format(a),
'name': a,
'id': a,
'input': '{}_input'.format(a),
'html': '{}.html'.format(a),
} for a in ['NewsAssistant', 'Stock',]]
currentForm = {
'date':'2020-05-05',
'pf':'pph_2',
'kw':'',
"click":{},
'time':dt.datetime.now().strftime('%Y%m%d %H:%M:%S'),
'note': '',
}
userlist = UserList(Path['id'])
userfeed = UserFeed(Path['feed'])
flag = {'signup':False, 'login':False}
pflists = [{'value': 'New', 'companies': ['Company A', '']}]
def __init__(self):
self.defaultForm = self.currentForm
def loggedIn(self):
return self.flag['login']
def signingUp(self):
return self.flag['signup']
def notes(self):
return self.userfeed.notes(self.tag)
def clicks(self):
return self.userfeed.clicks(self.tag)
def logs(self):
return self.userfeed.logs(self.tag)
def copy(self, target):
dest = []
for t in target:
dest.append(t)
return dest
def copyClicks(self):
return self.copy(self.clicks())
def copyNotes(self):
return self.copy(self.notes())
def copyLogs(self):
return self.copy(self.logs())
def updateTime(self):
self.currentForm['time'] = dt.datetime.now().strftime('%Y%m%d %H:%M:%S')
def addHistory(self, content):
self.userfeed.addHistory(self.tag, content)
def blankInputs(self):
return {
'msg': '',
'username': '',
'password': '',
'retype': '',
'show_retype': '',
'utilities': self.utilities,
}
def getInputs(self):
return {
'msg': self.msg,
'username': self.username,
'password': self.password,
'retype': self.retype,
'show_retype': self.show_retype,
'utilities': self.utilities,
}
def fillInfo(self, username, password, retype, show_retype=0, msg=''):
self.username = username
self.password = password
self.retype = retype
self.show_retype = show_retype
self.msg = msg
def updateForm(self, req=None):
if not req:
self.returnDefault()
return
self.currentForm = {
'date': req.values['datepicker'],
'pf': req.values['portfolio'],
'kw': req.form['ikeyword'],
'time': dt.datetime.now().strftime('%Y%m%d %H:%M:%S'),
}
def returnDefault(self):
self.currentForm = self.defaultForm
return self.currentForm
def checkRetype(self):
if self.retype=='' or self.password=='':
self.msg = Sign.EMPTY_INPUT
elif not self.retype==self.password:
self.msg = Sign.WRONG_RETYPE
else:
self.msg = Sign.SUCCESS
return self.msg
def checkName(self, signin=False):
if self.username=='':
self.msg = Sign.EMPTY_INPUT
elif self.username.upper() in [u.upper() for u in self.userlist.info.keys()]:
if signin:
self.msg = Sign.SUCCESS
else:
self.msg = Sign.USERNAME_TAKEN
else:
self.msg = Sign.SUCCESS
return self.msg
def checkSignup(self):
if self.checkName()==Sign.SUCCESS:
if self.signingUp():
self.checkRetype()
else:
self.msg = Sign.SUCCESS
return self.msg
def signup(self):
self.show_retype = 1
if self.checkSignup()==Sign.SUCCESS:
if self.signingUp():
self.tag = tag(40)
self.userlist.create(self.username, self.password, self.tag)
self.updateTime()
self.userfeed.create(self.tag, self.currentForm['time'])
self.flag['login'] = True
self.flag['signup'] = True
return self.msg
def checkSignin(self):
if self.username=='' or self.password=='':
self.msg = Sign.EMPTY_INPUT
elif self.checkName(signin=True)==Sign.SUCCESS:
if not self.password==self.userlist.info[self.username]['password']:
self.msg = Sign.WRONG_PASSWORD
else:
self.msg = Sign.ABSENT_USERNAME
return self.msg
def signin(self):
self.show_retype = 0
if self.checkSignin()==Sign.SUCCESS:
self.updateTime()
self.tag = self.userlist.info[self.username]['id']
self.userfeed.updateLogin(self.tag, self.currentForm['time'])
self.flag['login'] = True
return self.msg
def addNote(self, currForm, req):
self.currentForm = {
"date": currForm['date'],
"pf": currForm['pf'],
"kw": currForm['kw'],
"url": req['url'],
"title" : req['title'],
"tab": req['tab'],
'time': dt.datetime.now().strftime('%Y%m%d %H:%M:%S'),
'note': req['note'],
}
self.userfeed.updateNote(self.tag, self.currentForm)
def addClick(self, currForm, req):
self.currentForm = {
"date": currForm['date'],
"pf": currForm['pf'],
"kw": currForm['kw'],
"url": req['url'],
"title" : req['title'],
"tab": req['tab'],
'note': '',
'time': dt.datetime.now().strftime('%Y%m%d %H:%M:%S'),
}
self.userfeed.updateClick(self.tag, self.currentForm)
def changeNote(self, news, noteContent):
for i, note in enumerate(self.notes()):
if note['title']==news['title'] and note['url']==news['url']:
self.notes()[i]['note'] = noteContent
self.userfeed.push()
def deleteNote(self, news):
self.addHistory({'action': 'delete note', 'content': news})
for i, note in enumerate(self.notes()):
if note['title']==news['title'] and note['url']==news['url']:
self.notes().pop(i)
break
self.userfeed.push()
def deleteStory(self, news, move=False):
if not move:
self.addHistory({'action': 'delete click', 'content': news})
for i, click in enumerate(self.clicks()):
if click['title']==news['title'] and click['url']==news['url']:
self.clicks().pop(i)
self.userfeed.push()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,694
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_Cleaned to 2_week_twitter.py
|
#Cleaned data to two_week_twitter
import json
import pandas as pd
import datetime
time_range = pd.date_range('20180101','20200713') #想要輸出多久的資料
for time in time_range:
two_week_range = pd.date_range(pd.to_datetime(time)-pd.to_timedelta(2,'w'),time,freq='d')
df = []
for days in two_week_range:
days = days.strftime('%Y%m%d')
try:
with open(f'./All_Data/Twitters_CleanedData/{days}.json','r') as f:
x=json.load(f)
df.extend(x)
except:pass
with open(f'./All_Data/2_weeks_twitters/{days}.json','w') as file:
json.dump(df,file)
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,695
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_gen_top_twitters.py
|
#從gen_top_twitters_keys中選出的關鍵字,再去選出推文
import pickle
import pandas as pd
from tqdm import tqdm
import json
from Module_Clean import Clean
def clean_text(x): #清洗資料
text = Clean(x)
text.Capitalize()
text.DeletePunctuation()
text.DeleteRedundant_Twitters()
# print(text.Text)
return text.Text
with open('top_twitters_keys','rb')as f:
key = pickle.load(f)
def gen_toptwitters(start,end):
with open('top_twitters_keys','rb')as f:
key = pickle.load(f)
total_date = [x.strftime('%Y%m%d') for x in pd.date_range(start,end,freq='d')]
for date in tqdm(total_date):
#取前四大關鍵字
keys = [i for i,j in key[date][:4]]
df = pd.read_json(f'./All_Data/2_weeks_twitters/{date}.json')
df['clean_text'] = df.Text.apply(clean_text)
for i in range(4):
try:
df[f'count{i+1}'] = df.clean_text.apply(lambda x:x.split().count(keys[i]))
except:pass
df=df.sort_values(['Time','Name'],ascending=False)
#將關鍵字填入list中第一項,之後UI抓值用
try:
top_twitters_1 = json.loads(df.query('count1 > 0').to_json(orient = 'records'))
top_twitters_1.insert(0,keys[0])
except:pass
try:
top_twitters_2 = json.loads(df.query('count2 > 0').to_json(orient = 'records'))
top_twitters_2.insert(0,keys[1])
except:pass
try:
top_twitters_3 = json.loads(df.query('count3 > 0').to_json(orient = 'records'))
top_twitters_3.insert(0,keys[2])
except:pass
try:
top_twitters_4 = json.loads(df.query('count4 > 0').to_json(orient = 'records'))
top_twitters_4.insert(0,keys[3])
except:pass
with open(f'./All_Data/top_twitters/{date}_1.json','w')as f:
json.dump(top_twitters_1,f)
with open(f'./All_Data/top_twitters/{date}_2.json','w')as f:
json.dump(top_twitters_2,f)
with open(f'./All_Data/top_twitters/{date}_3.json','w')as f:
json.dump(top_twitters_3,f)
with open(f'./All_Data/top_twitters/{date}_4.json','w')as f:
json.dump(top_twitters_4,f)
if __name__ == '__main__':
gen_toptwitters('2018-01-01','2020-07-08')
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,696
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/News_try_and_delete_redundant.py
|
#抓贅字用,修正清洗資料用
#也可看時間序列變化
import pickle
import pandas as pd
with open('top_news_keys','rb')as f:
data = pickle.load(f)
def show_key_words_with_score():
total_date = pd.date_range('20180101','20200708',freq='d')
total_date = total_date.strftime('%Y%m%d')
for date in total_date:
try:
print(date)
print(data[date][:3])
except:
print('This day is wrong:',date)
pass
return()
def show_only_key_words():
for i in data:
for x,y in data[i][:3]:
print(x)
return()
if __name__ == '__main__':
# show_only_key_words()
show_key_words_with_score()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,697
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_gen_top_tweet_author.py
|
#輸出三個私募基金推特
import json
import pandas as pd
data = pd.DataFrame()
time_range = pd.date_range('20180101','20200708')
time_range = time_range.strftime('%Y%m%d')
for date in time_range:
with open(f'./All_Data/2_weeks_twitters/{date}.json') as file:
data = pd.read_json(file)
df = data.query('Name == "realDonaldTrump"') #以FundyLongShort為例
ans = json.loads(df.to_json(orient='records'))
# print(date)
# print(ans)
with open(f'./All_Data/top_author_twitters/realDonaldTrump+{date}.json','w') as f:
json.dump(ans,f)
#print(time_range)
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,698
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/index.py
|
# index.py
from flask import Flask, redirect, url_for, render_template, request,jsonify,make_response
import json
import pandas as pd
import datetime as dt
from util import *
from getInputs import *
user = UserInfo()
app = Flask(__name__)
developing = True
@app.route('/draw')
def drawingBoard():
return render_template('DrawingBoard.html')
@app.route("/")
def login():
return redirect(url_for('main'))
@app.route('/Main', methods=['POST', 'GET'])
def main():
def signup():
if 'signup' in request.values.keys():
return True
global user, LOGIN_FLAG, SIGNUP_FLAG
if request.method=='GET':
return render_template('Main.html', inputs=user.blankInputs())
elif request.method=='POST':
user.fillInfo(username=request.values['usr'], password=request.values['pwd'], retype=request.values['retype'])
if signup():
user.signup()
else:
user.signin()
if user.loggedIn():
for u in user.utilities:
if request.values[u['input']]=='1':
return redirect(url_for(u['name']))
# return render_template(u['html'], inputs=utilInputs(util=u['name'], form=user.returnDefault()))
user.msg = Sign.PICK_UTIL
return render_template('Main.html', inputs=user.getInputs())
@app.route("/NewsAssistant", methods=["POST", "GET"])
def NewsAssistant():
if request.method == "POST":
if not user.loggedIn():
return redirect(url_for('main'))
user.updateForm(req=request)
return render_template("NewsAssistant.html", inputs=utilInputs(user.currentForm, util='NewsAssistant', user_portfolios=user.pflists))
elif request.method=="GET":
if not user.loggedIn():
if developing:
user.fillInfo(username='BazingaWonka', password='buzz', retype='')
user.signin()
else:
return redirect(url_for('main'))
return render_template("NewsAssistant.html", inputs=utilInputs(user.currentForm, util='NewsAssistant', user_portfolios=user.pflists))
@app.route('/NewsAssistant/HistoryLog', methods=['POST', 'GET'])
def HistoryLog():
if request.method=='GET':
if not user.loggedIn():
return redirect(url_for('main'))
return render_template('HistoryLog.html', logfile=userLog(user))
@app.route("/NewsAssistant/History", methods=['POST', 'GET'])
def History():
if request.method=='GET':
if not user.loggedIn():
return redirect(url_for('main'))
return render_template('NewsAssistant_History.html', history=userHistory(user))
@app.route("/log/news-assistant-change-note", methods=["POST"])
def newsAssistant_changeNote():
req = request.get_json()
user.changeNote(news=req['news'], noteContent=req['note'])
res = make_response(jsonify({"message": "OK"}), 200)
return res
@app.route("/log/news-assistant-delete-note", methods=["POST"])
def newsAssistant_deleteNote():
req = request.get_json()
user.deleteNote(news=req['news'])
res = make_response(jsonify({"message": "OK"}), 200)
return res
@app.route("/log/news-assistant-delete-story", methods=["POST"])
def newsAssistant_deleteStory():
req = request.get_json()
user.deleteStory(news=req['news'], move=(int(req['move'])==1))
res = make_response(jsonify({"message": "OK"}), 200)
return res
@app.route("/log/news-assistant-click", methods=["POST"])
def newsAssistant_click():
user.addClick(currForm=user.currentForm, req=request.get_json())
res = make_response(jsonify({"message": "OK"}), 200)
return res
@app.route("/log/news-assistant-note", methods=["POST"])
def newsAssistant_note():
user.addNote(currForm=user.currentForm, req=request.get_json())
res = make_response(jsonify({"message": "OK"}), 200)
return res
@app.route("/log/news-assistant-download", methods=['Post'])
def newsAssistant_Download():
res = make_response(jsonify({"message": "OK"}), 200)
utilInputs(user.currentForm, util='download')
return res
@app.route("/portfolio-upload", methods=['post'])
def uploadPortfolio():
res = make_response(jsonify({"message": "New portfolio uploaded!"}), 200)
for attr in dir(request):
print('{}: {}'.format(attr, getattr(request, attr)))
file = request.files['upload']
data = json.load(file)
# print(data)
return res
if __name__ == "__main__":
app.run()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,699
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitter_deleted.py
|
import pandas as pd
import json
import ast
import os
from datetime import datetime, timedelta
with open('./All_Data/Twitters_Rawdata/list_try.txt') as f: #open twitter author list
name_list = f.readline()
name_list = ast.literal_eval(name_list)
def find_delete_tweet(date):
print('Date:', date)
# after_data
with open ('./All_Data/Twitters_ParsedData/{}_API.json'.format(date),'r') as f: #open after day parsed data
after_data = json.load(f)
date_minus_one = (datetime.strptime(date, '%Y-%m-%d') - timedelta(days=1)).strftime('%Y-%m-%d')
with open ('./All_Data/Twitters_ParsedData/{}_API.json'.format(date_minus_one),'r') as f: #open before day parsed data
previous_data = json.load(f)
for name in name_list:
after_date = []
for i in range(0,len(after_data)):
if after_data[i]['Name'] == name:
after_date.append(after_data[i])
#previous_data
previous_date = []
for i in range(0,len(previous_data)):
if previous_data[i]['Name'] == name:
previous_date.append(previous_data[i])
diff=[]
find_if_first_deleted=[]
missing_tweet = []
for i in range(0,len(after_date)):
for j in range(0,len(previous_date)):
if after_date[i]['Text'] == previous_date[j]['Text'] and after_date[i]['Time'] == previous_date[j]['Time'] and after_date[i]['Name'] == previous_date[j]['Name']:
if after_date[i]['Name']=='bespokeinvest':continue # bespokeinvest有400則,先忽略
result = i-j
find_if_first_deleted.append(j)
if len(diff)==0:
diff.append(result)
else:
if result != diff[-1]:
for count in range(0,diff[-1]-result):
missing_tweet.append(previous_date[j-count-1])
diff.append(result)
# print(i,j)
if len(find_if_first_deleted) != 0:
if find_if_first_deleted[0] != 0: #看前一天第一則是否為零,若不為零則就是被刪除
for i in range(0,find_if_first_deleted[0]):
missing_tweet.append(previous_date[i])
# print(find_if_first_deleted[0])
# print(result)
# print(diff)
if len(missing_tweet)==0:
print('{}:\nThere\'s no deleted tweet'.format(name))
else:
print('{}:\n'.format(name),missing_tweet)
with open('./All_Data/Twitter_deleted/{}_{}.json'.format(name,date),'w') as f:
json.dump(missing_tweet,f)
print(name,date) #印出存誰的資訊、日期
return missing_tweet
######deal with data from 0705 to 0720#########
# time_list=[]
# start_date = datetime.strptime('2020-07-05','%Y-%m-%d')
# while start_date <= datetime.strptime('2020-07-20','%Y-%m-%d'):
# result = start_date.strftime('%Y-%m-%d')
# time_list.append(result)
# start_date+=timedelta(days=1)
# for time in time_list:
# find_delete_tweet(time)
if __name__ == '__main__':
# find_delete_tweet()
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,700
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/getInputs.py
|
from GetUIData import *
from util import *
import datetime
from download import *
def utilInputs(form=None, util=None, user_portfolios=None):
if util=='NewsAssistant':
selected = {'pph_1':'','pph_2':'','pph_3':'','pph_4':'','pph_5':''}
options = [
{'value': 'pph_1', 'label': 'Daily 5% above'},
{'value': 'pph_2', 'label': 'Daily 5% below'},
{'value': 'pph_3', 'label': 'Weekly 10% above'},
{'value': 'pph_4', 'label': 'Weekly 10% below'},
{'value': 'pph_5', 'label': 'Monthly 20% above'},
]
if user_portfolios!=None:
for p in user_portfolios:
options.append({'value': p['value'], 'label': p['value']})
selected[p['value']] = ''
try:
selected[form['pf']]='selected'
except:
selected['pph_1']='selected'
top_news = News.get_top_news(form['date'], range(1, 4), form['kw'])
portfolio_list, portfolio_news = News.get_portfolio_news(form['date'],form['pf'],form['kw'])
print(form['pf'])
# download_data = package(form['date'],form['pf'],form['kw'])
if portfolio_list:
ret = Chart.get_chart_data(form['date'],form['pf'])
else:
ret=''
top_tws = Twitter.get_top_twitter(form['date'], range(1, 5))
celebs, hot_tws = Twitter.get_hot_twitter(form['date'])
week = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday']
week_days = [{'full': w, 'short': w[:3]} for w in week]
hour = ['{:02d}'.format(i) for i in range(1, 13)]
hour = hour+hour+hour
minute = ['{:02d}'.format(i) for i in range(60)]
minute = minute+minute+minute
return {
'date': form['date'],
'selected': selected,
'options': options,
'portfolio': portfolio_list,
'portfolio_news': portfolio_news,
'keyword': form['kw'],
'top_news': top_news,
'ret': ret,
'top_tws': top_tws,
'hot_tws': hot_tws,
'celebs': celebs,
'week_days': week_days,
'hour': hour,
'minute': minute,
# 'download_data':download_data
}
elif util == 'download':
download_data = package(form['date'],form['pf'],form['kw'])
return {
'download_data':download_data
}
elif util=='Stock':
return {}
elif util == 'download':
download_data = package(form['date'],form['pf'],form['kw'])
return {
'download_data':download_data
}
else:
return {}
def userHistory(user):
clicks = user.copyClicks()
clicks.reverse()
notes = user.copyNotes()
notes.reverse()
click_days, note_days = [], []
def getCrude(news):
return news['time'][:8]
def getTime(news):
time = news['time']
return datetime(int(time[:4]), int(time[4:6]), int(time[6:8])).strftime('%b %d, %Y')
if len(clicks)>0:
click_days = [{'date':getTime(clicks[0]), 'crude': getCrude(clicks[0]), 'clicks':[clicks.pop(0)]}]
for i, click in enumerate(clicks):
time = getTime(click)
if time==click_days[-1]['date']:
click_days[-1]['clicks'].append(click)
else:
click_days.append({'date':time, 'crude':getCrude(click), 'clicks':[click]})
if len(notes)>0:
note_days = [{'date':getTime(notes[0]), 'crude': getCrude(notes[0]), 'notes':[notes.pop(0)]}]
for i, note in enumerate(notes):
time = getTime(note)
if time==note_days[-1]['date']:
note_days[-1]['notes'].append(note)
else:
note_days.append({'date':time, 'crude': getCrude(note), 'notes':[note]})
return {
'click': click_days,
'note': note_days,
}
def userLog(user):
logs = user.copyLogs()
logs.reverse()
log_days = []
def getCrude(news):
return news['content']['time'][:8]
def getTime(news):
time = news['content']['time']
return datetime(int(time[:4]), int(time[4:6]), int(time[6:8])).strftime('%b %d, %Y')
if len(logs)>0:
log_days = [{'date':getTime(logs[0]), 'crude': getCrude(logs[0]), 'logs':[logs.pop(0)]}]
for i, log in enumerate(logs):
time = getTime(log)
if time==log_days[-1]['date']:
log_days[-1]['logs'].append(log)
else:
log_days.append({'date':time, 'crude':getCrude(log), 'logs':[log]})
return log_days
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,701
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/try_api.py
|
import pandas as pd
import json
import ast
import glob
import os
from datetime import datetime
import random
from Module_Clean import Clean
fullName = pd.read_json("./All_Data/Reference/InfoCodeToFullName.json").set_index('InfoCode')
synonym = pd.read_json("./All_Data/Reference/Synonym.json").set_index('InfoCode')
def get_data(Name,Strategy,date=datetime.today().strftime('%Y%m%d')): #生infocode, 輸出infocode list
with open ('./All_Data/Reference/InfoCodeToFullName.json') as f:
x = pd.read_json(f)
Info_list = x['InfoCode'].sample(n=10,random_state=Strategy)
Info_list = Info_list.to_json(orient='values')
# with open('{}_{}_{}.txt'.format(Name,Strategy,date),'w') as f:
# f.write(Info_list)
if os.path.exists('{}_{}.json'.format(str(Name),Strategy)) == False:
with open('{}_{}.json'.format(Name,Strategy),'w') as f:
hist_port=[]
json.dump(hist_port,f)
with open('{}_{}.json'.format(Name,Strategy),'r') as f:
hist_port = json.load(f)
incase_dup=[] #確保日期不會重複
if len(hist_port)==0:
dic = {}
dic[date]=eval(Info_list)
hist_port.append(dic)
for i in range(0,len(hist_port)):
incase_dup.append(list(hist_port[i].keys())[0])
if date not in incase_dup:
dic = {}
dic[date]=eval(Info_list)
hist_port.append(dic)
with open('./All_Data/api_port/{}_{}.json'.format(Name,Strategy),'w') as f:
json.dump(hist_port,f)
# f.write(str(dic))
print(hist_port)
return hist_port
def Info2PortfolioNews_element(Name,Strategy): # infocode list轉need info
with open ('./All_Data/Reference/InfoCodeToFullName.json') as f:
x = pd.read_json(f)
with open ('./All_Data/api_port/{}_{}.json'.format(Name,Strategy),'r') as f:
y = json.load(f)
show_re = []
for j in range(0,len(y)):
Info_list = list(y[j].values())[0]
dic={'Date': int(list(y[j].keys())[0])}
# print(dic)
temp=[]
for i in x['InfoCode']:
if i in Info_list:
data = x.loc[x['InfoCode']== i].values.tolist()
# data[0].insert(0,20200716)
# print(data[0][1])
temp.append(data)
# print(temp)
new_list = [x[0] for x in temp]
result=[]
for i in range(0,len(new_list)):
if i == 0:
dic['InfoCode']=[]
dic['FullName']=[]
for j in range(0,len(new_list[i])):
if j==0:
# dic['InfoCode']=new_list[i][j]
dic['InfoCode'].append(new_list[i][j])
else:
# dic['FullName']=new_list[i][j]
dic['FullName'].append(new_list[i][j])
result.append(dic)
show_re.append(dic)
# print(result)
with open('./All_Data/api_port/{}_{}_{}.json'.format(Name,Strategy,dic['Date']),'w') as f:
json.dump(result,f)
print(show_re)
return show_re
# get_data('travis0825',6)
# Info2PortfolioNews_element('travis0825',6)
# print(fullName)
def get_syn_intersection(df,syn):
'''
用來比對新聞標題是否包含公司名稱,並且將包含哪些公司名稱存成list
input為dataframe
df:新聞
syn:公司名稱、同義詞
'''
#special word 將兩個字併起來,並且中間加上 _ 以便找尋 synonym,如AAPL US併成AAPL_US
def get_special_word(news):
news_title = news.split()
special_word = Clean(news);special_word.Separate(2);special_word = special_word.Text
special_word = list(map(lambda x:x.replace(' ','_'),special_word))
news_title.extend(special_word)
return news_title
df['title_add_special'] = df['title_cleaned'].apply(lambda x:get_special_word(x))
df = df.reset_index(drop=True)
def get_company(news_title):
intersection=[]
title = set(news_title)
for co in syn.index:
syn_word = set(syn.loc[co].Synonym)
#如果標題中含有公司的同義字,就把公司名稱加入list中
if len(title & syn_word)>0:
intersection.extend([fullName.loc[co].Name])
return intersection
df['title_company'] = df['title_add_special'].apply(lambda x:get_company(x))
df['count'] = df['title_company'].apply(lambda x:1 if len(x)>0 else 0)
del df['title_add_special']
return df
df = pd.read_json('./All_Data/api_port/travis0825_6_20200825.json').set_index('Date')
for date in df.index:
portfolio = df.loc[date].InfoCode
port_list=[]
for i in portfolio:
if i in synonym.index.to_list():
port_list.append(i)
portfolio = port_list
# print(portfolio)
portfolio_list = fullName.loc[portfolio].Name.to_list()
# print(portfolio_list)
synonym_list = synonym.loc[portfolio]
# print(synonym_list)
news = pd.read_json('/Users/tianyouwu/Desktop/Intern/All_Data/2_weeks_news/20200505.json').reset_index(drop=True)
portfolio_news = get_syn_intersection(news,synonym_list)
portfolio_news = portfolio_news.query('count == 1')
portfolio_news.sort_values(['pubdate','source'],ascending=False)
ans = json.loads(portfolio_news.to_json(orient = 'records'))
ans.insert(0,portfolio_list)
print(ans)
with open ('./All_Data/api_port/news_6_20200825.json','w') as f:
json.dump(ans,f)
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,702
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Twitters_Parsed2Cleaned.py
|
#Parsed data to Cleaned data
import pandas as pd
import json
import datetime
import glob
#合併parsed data
data = pd.DataFrame()
all_txt_files = glob.glob('./All_Data/Twitters_ParsedData/*.json')
for file in all_txt_files:
a = pd.read_json(file)
data = data.append(a)
#只取推文、作者、時間
b = data[['Name','Time','Text']]
c = b.sort_values('Time')
#將時間改成yyyymmdd
c['Time'] = pd.to_datetime(c['Time']).apply(lambda x:x.strftime('%Y%m%d'))
#將相同推文內容去除
c.drop_duplicates('Text',inplace=True)
#取20170101到20200601的資料
time_range = pd.date_range('20170101','20200713')
time_range = time_range.strftime('%Y%m%d')
#建json檔
for time in time_range:
df = c.query('Time == @time')
ans = json.loads(df.to_json(orient='records'))
with open(f'./All_Data/Twitters_CleanedData/{time}.json','w') as f:
json.dump(ans,f)
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,703
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/Module_Clean.py
|
from MetaClass import Clean
import re
import json
import pandas as pd
#co_list = pd.read_json('./Schema/Reference/InfoCodeToFullName.json').InfoCode.values.tolist()
def not_redundant_int(x):
'''
有些數字在做文字探勘時要去掉,但如果這些數字是公司的infocode則先不去掉
'''
try:
x=int(x)
return False
except:return True
#return False 代表是要被清掉的詞
class Clean(Clean):
def Capitalize(self):
self.Text = self.Text.upper()
return self
def Separate(self, gram=1):
parts = self.Text.split(" ")
self.Text = []
for i in range(len(parts) + 1 - gram):
text = ""
for j in range(gram):
if j < gram and j != 0:
text = text + " " + str(parts[i + j])
else:
text = text + str(parts[i + j])
self.Text.append(text)
return self
def DeletePunctuation(self
, punctuation=['HTTP[S]?://\S+',"'S", "S'",',', '\.', '-', '"', "'",":",";",'!','‘','\$','&','/','\(','\)','\?','…','’','“','\=']
):
mid = self.Text
for puncs in punctuation:
mid = mid.encode('ascii',errors='ignore').decode('ascii')
mid = re.sub(puncs, '', mid)
resultwords = re.split(" \W+ ", mid)
self.Text = " ".join(resultwords)
return (self)
def DeleteRedundant_Twitters(self):
words=['A', 'THE', 'AN', 'TO', 'AND', 'OR', 'NOT','HE','HE','SHE','HIS','HER','THEM','THEY','BACK',
'WANT','RIGHT','LEFT','WITHOUT','WITH','THEM','OF','AS','IN','MORE','FOR','ARE','IS','NEW','WILL','BE','AFTER',
'WANTS', 'KNOW', 'HE', 'HISTORY', 'NAMES', 'TOO', 'RUN', 'NEEDS', 'WEEK', 'ANOTHER', 'GETTING', 'ON','BUT','COULD',
'OUT','AT','THAN','HAVE','BY','WHAT','CAN','NOW','OVER','IT','ABOUT','MAY','HAS','HAVE','THEIR','QUARTER','DUE','UP','ITS',
'YOU','YOUR','ENEN','WHY','HOW','THAT','THERE','THESE','NO','BEFORE','DO','DID','DONE','DOING','DONT','WAS','WERE',
'LOOK','DON’T','ALL','INTO','ONTO','AROUND','TOWARDS','FROM','REVIEW','EUROPE','NORTH','GOVERNMENT','EXPERT',''
'LEAD', 'NEED', 'GOES', 'BEHIND', 'GROUP', 'NEAR', 'WORKING', 'METOO', 'IF', 'GETS', 'GO', 'COMES', 'WHEN', 'THERE',
'PUT', 'USE', 'GOING', 'TALKS', 'WE', 'THEY', 'LIKELY', 'I', 'MONTH', 'OUR', 'PLAY', 'OWN', 'MY', 'MAKES', 'AD',
'AWAY', 'OFF', 'MUCH', 'LIVE', 'TV', 'NEARLY', 'DURING', 'BRING', 'PLAN', 'YIELD', 'WIN', 'FINALLY', 'TRY', 'AMONG',
'TAKING', 'WHERE', 'MADE', 'BUILD', 'TIES', 'HERE', 'THINK', 'YET', 'BOYS', 'RULES', 'NEXT', 'LESS', 'PART',
'LEAVES', 'ASKS', 'NEWS', 'JUST', 'LOOKS', 'BEYOND', 'LATEST', 'KEY', 'MOVE', 'THIS', 'FINDS', 'THOSE', 'LITTLE',
'LIKE', 'BEEN', 'TODAY', 'NOTHING', 'HER', 'ALMOST', 'HAD', 'COMING', 'EDGES', 'FIRST', 'READ', 'AGAIN', 'DAY',
'WEAK', 'BETTER', 'LET', 'BETWEEN', 'GROWING', 'TAKE', 'LEARN', 'MONTHS', 'BEING', 'YEAR', 'MINUTES',
'RUNNING', 'RECORD', 'QUESTION', 'VS', 'WOULD', 'TOP', 'WAY', 'MANY', 'PEOPLE', 'HIS', 'EASY', 'SOME',
'ACROSS', 'DRIVE','WANT','NEED','GET','TALK','MAKE','US','CHINA','BIG','YORK','WORLD','MILLION',
'WHITE','MARKET','MARKETS','TIME','AMERICA','UK','MAN','WOMAN','MEN','WOMEN','CAN’T','TWO','AMID','KEEP','END','HELP',
'YEARS','LIFE','HIT','3RD','VERY',
'YES','ASK','OTHERS','SOMETHING','ANYONE','EVERYONE','60M...','SO','BOTH','WANTED','YOURS','GUY','SAME','LOVES','GOING','DOES',
'TRUE','EPIC','FOOT','REASONS','WASNT','DOG','11%','WEEKS','HANDS'
'SINCE','SAID','WHICH','MYSELF','YOURSELF','HISSELF','HERSELF','THEMSELVES','NOPE','ALSO',
'ANY','ME','SAY','ONE','SEE','RT','WHO','SHOULD','LIST','REAL','MIGHT','FEW','IM','NOR','REALLY','MOST','OTHER','ONLY','OKAY','ALONG',
'ONCE','SEEMS','ACTUALLY','REVIEWS','FATHER','VIA','STILL','WE','MINE','ISNT','AINT','SAYS','EVER','CANNOT','THOUGH','LAST','SURE','THING',
'DOOR','TRYING','NICE','ALWAYS','USUALLY','SOMETIMES','SELDOM','NEVER','REMEMBER','EVERY','GOT','ENOUGH','HIM','HER','HIS',
'AM','WOULD','WOULDNT','OFTEN','TOTAL','AGE','SOON','BECAUSE','WO','DAYS','THERE','THERES','THEIR','COLUMN','ABLE','YEP','THATS','GONE','EXAMPLE',
'THER','REASON','CHART','WONT','KNOWS','KNOW','TAKES','TOOK','DIFFERENT','DIFFERENCE','CAUSE','LISTEN','SUCH','HEAR','SIMILAR','HEY','HI','CONSTANT','EVEN','CASES',
'SMART','DEGINITELY','READING','MATH','NAME','STREET','YOURE','ASKED','USING','WHOSE','ABSOLUTE','ABSOLUTELY','CAME','WHILE','FIGURE','GIRL','TALKING',
'SPORTING','NIGHT','PERHAPS','USED','GIVE','THINKS','ONES','HEART','MOSTLY','ACTING','THANK','THANKS','THOUGHT','PLEASE','SAW','ABOVE','WHATS','MAYBE',
'FUNNY','LEAST','LINK','DAILY','WORK','OH','CHILD','DOZEN','EACH','HELPS','FAVOTITE','STORY','IVE','MORNING','WEVE','HOUR',
'SORRY','EST','ELSE','@DAVIDTAGGART','BUTTON','@JOHNPGAVIN','WENT','THROUGH','ENTRY','@BGURLEY','BIBLE','TOLD','TELL','MEANWHILE','ANYTHING','ANYWHERE',
'PROBABLY','QUITE','SOURCES','STUDIES','LOVE','CANT','WOW','PAPER','CHOICE','GONNA','TYPE','SISTER','GUYS','FILES','STATION','EXERCISE',
'WEEKEND','LOOKING','FULLY','HEARD','BUSY','HAHA','LOTS','RAN','RUN','HOURS','TWEETS','FIND','INSTEAD','AH','ATWELL','WEBSITE','SUMMARY','THUS','SEEM',
'ADD','GAME','LEAVE','LISTED','USES','IDEA','YEAH','AHEAD','APPEARS','WAIT','SPEECH','TH','FINT','HOLDERS','WTF','BA','#2','HES','SIT',
'FAR','FINE','DC','ID','8K','PRETTY','SHOW','SHOWS','READY','DIDNT','HAVING','SLAP','THINGS','OMG','YOY','IMEDIATELY','THEYRE','Q4','@EJENK','HAVENT','TWITTER',
'CAREER','BURIED','RUNS','DEC','ACTUAL','CALL','UNTIL','RIP','PEERS','PICTURE','YY','HOWEVER'
]
resultwords = [word for word in re.split("\s+", self.Text) if word.upper() not in words and len(word)>1 and not_redundant_int(word)]
self.Text = " ".join(resultwords)
return (self)
def DeleteRedundant_News(self):
words=['A', 'THE', 'AN', 'TO', 'AND', 'OR', 'NOT','HE','HE','SHE','HIS','HER','THEM','THEY','BACK',
'WANT','RIGHT','LEFT','WITHOUT','WITH','THEM','OF','AS','IN','MORE','FOR','ARE','IS','NEW','WILL','BE','AFTER',
'WANTS', 'KNOW', 'HE', 'HISTORY', 'NAMES', 'TOO', 'RUN', 'NEEDS', 'WEEK', 'ANOTHER', 'GETTING', 'ON','BUT','COULD',
'OUT','AT','THAN','HAVE','BY','WHAT','CAN','CANT','NOW','OVER','IT','ABOUT','MAY','HAS','HAVE','THEIR','QUARTER','DUE','UP','ITS',
'YOU','YOUR','ENEN','WHY','HOW','THAT','THERE','THESE','NO','BEFORE','DO','DID','DONE','DOING','DONT','WAS','WERE',
'LOOK','DON’T','ALL','INTO','ONTO','AROUND','TOWARDS','FROM','REVIEW','EUROPE','NORTH','GOVERNMENT','EXPERT',''
'LEAD', 'NEED', 'GOES', 'BEHIND', 'GROUP', 'NEAR', 'WORKING', 'METOO', 'IF', 'GETS', 'GO', 'COMES', 'WHEN', 'THERE',
'PUT', 'USE', 'GOING', 'TALKS', 'WE', 'THEY', 'LIKELY', 'I', 'MONTH', 'OUR', 'PLAY', 'OWN', 'MY', 'MAKES', 'AD',
'AWAY', 'OFF', 'MUCH', 'LIVE', 'TV', 'NEARLY', 'DURING', 'BRING', 'PLAN', 'YIELD', 'WIN', 'FINALLY', 'TRY', 'AMONG',
'TAKING', 'WHERE', 'MADE', 'BUILD', 'TIES', 'HERE', 'THINK', 'YET', 'BOYS', 'RULES', 'NEXT', 'LESS', 'PART',
'LEAVES', 'ASKS', 'NEWS', 'JUST', 'LOOKS', 'BEYOND', 'LATEST', 'KEY', 'MOVE', 'THIS', 'FINDS', 'THOSE', 'LITTLE',
'LIKE', 'BEEN', 'TODAY', 'NOTHING', 'HER', 'ALMOST', 'HAD', 'COMING', 'EDGES', 'FIRST', 'READ', 'AGAIN', 'DAY',
'WEAK', 'BETTER', 'LET', 'BETWEEN', 'GROWING', 'TAKE', 'LEARN', 'MONTHS', 'BEING', 'YEAR', 'MINUTES',
'RUNNING', 'RECORD', 'QUESTION', 'VS', 'WOULD', 'TOP', 'WAY', 'MANY', 'PEOPLE', 'HIS', 'EASY', 'SOME',
'ACROSS', 'DRIVE','WANT','NEED','GET','TALK','MAKE','US','CHINA','BIG','YORK','WORLD','MILLION',
'WHITE','MARKET','MARKETS','TIME','AMERICA','UK','MAN','WOMAN','MEN','WOMEN','CAN’T','TWO','AMID','KEEP','END','HELP',
'YEARS','LIFE','HIT','YES','ASK','WHICH','WHO','HOME','SAYS','SAY','STOCK','STOCKS','GOOD','PUSH','ONE','SUPER','INVESTORS',
'INVESTOR','POWER','CITY','CALL','CALLS','BILLION','MILLION','WATCH','LOVE','ISNT','ARENT','WERENT','ANYTHING','EVERYTHING',
'GIVE','THINKS','HES','JAN','FIVE','COURT',''
]
resultwords = [word for word in re.split("\s+", self.Text) if word.upper() not in words and len(word)>1 and not_redundant_int(word)]
self.Text = " ".join(resultwords)
return (self)
def Close(self):
print(self.Text)
return self
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,704
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/GetUIData.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 9 16:07:17 2020
@author: ZuroChang
"""
import json
import pandas as pd
import datetime as dt
fullName = pd.read_json("./All_Data/Reference/InfoCodeToFullName.json").set_index('InfoCode')
method_list = {
'pph_1':'news_PortfolioList_AbovePositive5',
'pph_2':'news_PortfolioList_BelowNegative5',
'pph_3':'news_PortfolioList_WeekAbovePositive10',
'pph_4':'news_PortfolioList_WeekBelowNegative10',
'pph_5':'news_PortfolioList_MonthAbovePositive20',
'New': 'New'
}
class News:
def get_top_news(day, ran, kw):
def get_top_news_ind(which_day, num, keyword):
which_day = pd.to_datetime(which_day).strftime('%Y%m%d')
with open(f'./All_Data/top_news/{which_day}_{num}.json')as f:
file = json.load(f)
key = file[0]
news = file[1:]
news = pd.DataFrame.from_records(news)
news = news[['title','link','pubdate','source']]
news = json.loads(news.to_json(orient='records'))
if keyword != '':
keyword = keyword.upper()
choose = []
for i in news:
title = i['title'].upper().split()
if keyword in title:
choose.append(i)
news = choose
return key,news
news_lists = []
for i in ran:
k, n = get_top_news_ind(day, i, kw)
news_lists.append({
'key': k,
'list': n,
})
return news_lists
def get_portfolio_news(which_day,method,keyword):
which_day = pd.to_datetime(which_day).strftime('%Y%m%d')
method = method_list[method]
print('method: {}'.format(method))
try:
with open(f'./All_Data/portfolio_news/{method}_{which_day}.json')as f:
file = json.load(f)
if len(file)>1:
portfolio = file[0]
news = file[1:]
news = pd.DataFrame.from_records(news)
news['title_company'] = news['title_company'].apply(lambda x:x[0])
news = news.sort_values(['title_company','pubdate','source'],ascending=[True,False,True])
news = news[['title','link','pubdate','source','title_company']]
news = json.loads(news.to_json(orient='records'))
#當該投組 沒有新聞時
else :
portfolio = file[0]
news = ''
if keyword != '':
keyword = keyword.upper()
choose = []
for i in news:
title = i['title'].upper().split()
if keyword in title:
choose.append(i)
news = choose
return portfolio,news
except:
portfolio = ''
news = ''
return portfolio,news
class Twitter:
def get_top_twitter(day, ran):
def get_top_twitter_ind(which_day,num):
which_day = pd.to_datetime(which_day).strftime('%Y%m%d')
with open(f'./All_Data/top_twitters/{which_day}_{num}.json')as f:
file = json.load(f)
key = file[0]
twitter = file[1:]
return key, twitter
lists = []
for i in ran:
k, l = get_top_twitter_ind(day, i)
lists.append({
'key': k,
'list': l,
})
return lists
def get_hot_twitter(day):
day = pd.to_datetime(day).strftime('%Y%m%d')
accounts = [
'FundyLongShort',
'SmallCapLS',
'ShortSightedCap',
]
files = []
for account in accounts:
with open('./All_Data/top_author_twitters/{}+{}.json'.format(account, day), 'r') as f:
files.append(json.load(f))
return accounts,files
class Chart:
def get_chart_data(which_day,method):
which_day = pd.to_datetime(which_day).strftime('%Y%m%d')
method = method_list[method].replace('news_PortfolioList_','')
data = pd.read_json(f'./All_Data/UIData/PortfolioPerformance_{method}_{which_day}.json')
data['company'] = data['InfoCode'].apply(lambda x:fullName.loc[int(x)][0])
data['Single']=data['Single']*360
data=data.rename(columns={'Single':'day','Nearest7DaysAnnualSingle':'week',
'Nearest30DaysAnnualSingle':'month','Nearest365DaysAnnualSingle':'year'})
data = data[['company','day','week','month','year']]
if len(data)>20:
if method=='AbovePositive5':
data = data.sort_values('day', ascending=False).iloc[:20,:]
elif method=='BelowNegative5':
data = data.sort_values('day', ascending=True).iloc[:20,:]
elif method=='WeekAbovePositive10':
data = data.sort_values('week', ascending=False).iloc[:20,:]
elif method=='WeekBelowNegative10':
data = data.sort_values('week', ascending=True).iloc[:20,:]
elif method=='MonthAbovePositive20':
data = data.sort_values('month', ascending=False).iloc[:20,:]
else:
if method=='AbovePositive5':
data = data.sort_values('day', ascending=False)
elif method=='BelowNegative5':
data = data.sort_values('day', ascending=True)
elif method=='WeekAbovePositive10':
data = data.sort_values('week', ascending=False)
elif method=='WeekBelowNegative10':
data = data.sort_values('week', ascending=True)
elif method=='MonthAbovePositive20':
data = data.sort_values('month', ascending=False)
data=json.loads(data.to_json(orient='records'))
return data
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,705
|
traviswu0910/Intern_Project
|
refs/heads/master
|
/download.py
|
import pandas as pd
import json
from datetime import datetime
from os import path, mkdir
fPath = './All_Data'
strategy_list = {
'pph_1':'news_PortfolioList_AbovePositive5',
'pph_2':'news_PortfolioList_BelowNegative5',
'pph_3':'news_PortfolioList_WeekAbovePositive10',
'pph_4':'news_PortfolioList_WeekBelowNegative10',
'pph_5':'news_PortfolioList_MonthAbovePositive20'
}
def package(date, strategy_name, keyword):
print(strategy_name)
p = path.join(fPath,'Download_Data')
if not path.isdir(p):
mkdir(p)
date = date.replace('-','')
result={}
def top_news_package(date,keyword):
top_news_package_dic={}
del_key_list=['content','description','feedburner:origlink','guid','metadata:id',
'metadata:sponsored','metadata:type','urlToImage','title_cleaned',
'count1','count2','count3']
for num in range(1,4):
try:
with open(fPath+'/top_news/{}_{}.json'.format(date,num),'r') as f:
top_news_file = json.load(f)
# print(top_news_file[1])
for i in range(len(top_news_file)):
if i == 0:
top_news_package_dic[top_news_file[i]]=[]
else:
if keyword == '':
for key in del_key_list:
if key in top_news_file[i]:
del top_news_file[i][key]
top_news_package_dic[top_news_file[0]].append(top_news_file[i])
else:
keyword = keyword.upper()
if keyword in top_news_file[i]['title'].upper().split():
# print('hihi')
# print(top_news_file[num]['title_cleaned'])
# print(num,i)
for key in del_key_list:
if key in top_news_file[i]:
del top_news_file[i][key]
top_news_package_dic[top_news_file[0]].append(top_news_file[i])
except:pass
# print(top_news_package_dic)
return top_news_package_dic #return dict {'A':['author':...,]}
result['Top_News'] = top_news_package(date,keyword)
def top_twitters_package(date):
top_twitters_package_dic={}
del_key_list = ['clean_text','count1','count2','count3','count4']
for num in range(1,5):
try:
with open(fPath+'/top_twitters/{}_{}.json'.format(date,num),'r') as f:
top_twitters_file = json.load(f)
# print(top_twitters_file)
for i in range(len(top_twitters_file)):
if i == 0:
top_twitters_package_dic[top_twitters_file[i]]=[]
else:
for key in del_key_list:
if key in top_twitters_file[i]:
del top_twitters_file[i][key]
top_twitters_package_dic[top_twitters_file[0]].append(top_twitters_file[i])
except:pass
# print(top_twitters_package_dic)
return top_twitters_package_dic # return {'A':['Name':...,]}
result['Top_Twitters'] = top_twitters_package(date)
def portfolio_performance_package(date,strategy_name):
strategy_name= strategy_list[strategy_name].split('_')[2]
# print(strategy_name)
portfolio_performance_package_dic={}
del_key_list = ['AnnualConti','AnnualSingle','Conti','Nearest30DaysSingle','Nearest365DaysSingle',
'Nearest7DaysSingle','Period','Price','created','id']
# print('PortfolioPerformance_{}_{}'.format(strategy_name,date))
try:
with open(fPath+'/UIData/PortfolioPerformance_{}_{}.json'.format(strategy_name,date),'r') as f:
portfolio_performance_file = json.load(f)
# print(portfolio_performance_file[0]['InfoCode'])
for i in range(len(portfolio_performance_file)):
# print(i)
for key in del_key_list:
if key in portfolio_performance_file[i]:
# # print(key)
# # print('hihi')
del portfolio_performance_file[i][key]
# print(portfolio_performance_file[i]['InfoCode'])
portfolio_performance_package_dic[portfolio_performance_file[i]['InfoCode']]=portfolio_performance_file[i]
# print('123')
except:pass
# print(portfolio_performance_package_dic)
return portfolio_performance_package_dic # return dict {'A':['InfoCode':...,]}
result['Portfolio_Performance'] = portfolio_performance_package(date,strategy_name)
def portfolio_news_package(date,strategy_name,keyword):
strategy_name= strategy_list[strategy_name]
portfolio_news_package_dic={}
del_key_list=['author','content','description','feedburner:origlink','guid','metadata:id',
'metadata:sponsored','metadata:type','urlToImage','title_cleaned','count']
try:
with open(fPath+'/portfolio_news/{}_{}.json'.format(strategy_name,date),'r') as f:
portfolio_news_file = json.load(f)
# print(portfolio_news_file)
for i in range(len(portfolio_news_file)):
if i == 0:
portfolio_news_package_dic['portfolio_news']=[]
else:
portfolio_news_file[i]['Company_Name'] = portfolio_news_file[i].pop('title_company')
if keyword == '':
for key in del_key_list:
if key in portfolio_news_file[i]:
del portfolio_news_file[i][key]
portfolio_news_package_dic['portfolio_news'].append(portfolio_news_file[i])
else:
keyword = keyword.upper()
if keyword in portfolio_news_file[i]['title'].upper().split():
for key in del_key_list:
if key in portfolio_news_file[i]:
del portfolio_news_file[i][key]
portfolio_news_package_dic['portfolio_news'].append(portfolio_news_file[i])
except:pass
# print(portfolio_news_package_dic)
return portfolio_news_package_dic # return {'A':[{'link':...,}]}
result['Portfolio_News'] = portfolio_news_package(date,strategy_name,keyword)
def portfolio_list_package(date):
portfolio_list_package_dic={}
try:
with open(fPath+'/UIData/PortfolioList_AbovePositive5.json','r') as f:
portfolio_list_file = json.load(f)
# print(portfolio_list_file)
for i in range(len(portfolio_list_file)):
if str(portfolio_list_file[i]['Date']) == date:
portfolio_list_package_dic['Company_Name'] = portfolio_list_file[i]['FullName']
except:pass
# print(portfolio_list_package_dic)
return portfolio_list_package_dic # return {'A':['Compname','']}
result['Portfolio_Information'] = portfolio_list_package(date)
def top_author_twitter_package(date):
top_author_twitters_dic = {}
author_list = ['FundyLongShort','SmallCapLS','ShortSightedCap']
try:
for author in author_list:
with open(fPath+'/top_author_twitters/{}+{}.json'.format(author,date),'r') as f:
top_author_twitters_file = json.load(f)
# print(top_author_twitters_file)
top_author_twitters_dic[author] = []
for i in range(len(top_author_twitters_file)):
top_author_twitters_dic[author].append(top_author_twitters_file[i])
except:pass
# print(top_author_twitters_dic)
return top_author_twitters_dic # return {'A':[{'Name':...,}]}
result['Top_Twitters_Author'] = top_author_twitter_package(date)
strategy_name= strategy_list[strategy_name]
with open(fPath+'/Download_Data/{}_{}_{}.json'.format(date, strategy_name, keyword), 'w') as f:
json.dump(result,f)
return result
# print(package('20200505','pph_2','')['Portfolio_Perfomance'].keys())
|
{"/Twitters_Crawler.py": ["/Twitters_credentials.py"], "/Twitters_gen_top_twitters_keys.py": ["/Module_Clean.py"], "/util.py": ["/download.py"], "/Twitters_gen_top_twitters.py": ["/Module_Clean.py"], "/index.py": ["/util.py", "/getInputs.py"], "/getInputs.py": ["/GetUIData.py", "/util.py", "/download.py"], "/try_api.py": ["/Module_Clean.py"]}
|
34,706
|
Lorderot/recommendation-system
|
refs/heads/master
|
/models.py
|
from server import db
from sqlalchemy import inspect
class BaseModel(db.Model):
""" Base data model for all objects """
__abstract__ = True
def __init__(self, *args):
super().__init__(*args)
def __repr__(self):
"""Define a base way to print models"""
return '%s%s' % (self.__class__.__name__,
self.json())
def json(self):
return {c.key: getattr(self, c.key)
for c in inspect(self).mapper.Column_attrs}
class Apartment(BaseModel):
""" Model for apartments table """
__tablename__ = 'tb_apartments'
id = db.Column("tb_apartment_id", db.Integer, primary_key=True, autoincrement=True, nullable=False, unique=True)
city = db.Column('city', db.String, nullable=True)
city_region = db.Column('city_region', db.String, nullable=True)
country = db.Column('country', db.String, nullable=True)
picture_url = db.Column('picture_url', db.String, nullable=True)
size_square_feet = db.Column('size_square_feet', db.String, nullable=True)
price = db.Column('price', db.Float, nullable=True)
latitude = db.Column('latitude', db.Float, nullable=True)
longitude = db.Column('longitude', db.Float, nullable=True)
address = db.Column('address', db.String, nullable=True)
leasing_available = db.Column('leasing_available', db.Boolean, nullable=True)
dist_to_closest_cinema = db.Column('dist_to_closest_cinema', db.Float, nullable=True)
num_of_cinemas = db.Column('num_of_cinemas', db.Integer, nullable=True)
dist_to_closest_cafe = db.Column('dist_to_closest_cafe', db.Float, nullable=True)
num_of_cafes = db.Column('num_of_cafes', db.Integer, nullable=True)
dist_to_closest_pub = db.Column('dist_to_closest_pub', db.Float, nullable=True)
num_of_pubs = db.Column('num_of_pubs', db.Integer, nullable=True)
dist_to_closest_restaurant = db.Column('dist_to_closest_restaurant', db.Float, nullable=True)
num_of_restaurants = db.Column('num_of_restaurants', db.Integer, nullable=True)
dist_to_closest_cafe_rest = db.Column('dist_to_closest_cafe_rest', db.Float, nullable=True)
num_of_cafes_rests = db.Column('num_of_cafes_rests', db.Integer, nullable=True)
dist_to_closest_park = db.Column('dist_to_closest_park', db.Float, nullable=True)
num_of_parks = db.Column('num_of_parks', db.Integer, nullable=True)
dist_to_closest_railway_station = db.Column('dist_to_closest_railway_station', db.Float, nullable=True)
num_of_railway_stations = db.Column('num_of_railway_stations', db.Integer, nullable=True)
dist_to_closest_highway = db.Column('dist_to_closest_highway', db.Float, nullable=True)
num_of_highways = db.Column('num_of_highways', db.Integer, nullable=True)
is_country_side = db.Column('is_country_side', db.Boolean, nullable=True)
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,707
|
Lorderot/recommendation-system
|
refs/heads/master
|
/config.py
|
import os
class BaseConfig(object):
DEBUG = False
TESTING = False
class ProductionConfig(BaseConfig):
# export PROD_DATABASE_URL=postgresql://DB_USER:PASSWORD@HOST/DATABASE
SQLALCHEMY_DATABASE_URI = os.environ['PROD_DATABASE_URL']
class DevelopmentConfig(BaseConfig):
# export DEV_DATABASE_URL=postgresql://DB_USER:PASSWORD@HOST/DATABASE
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = os.environ['DEV_DATABASE_URL']
class TestingConfig(BaseConfig):
# export TEST_DATABASE_URL=postgresql://DB_USER:PASSWORD@HOST/DATABASE
TESTING = True
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ['TEST_DATABASE_URL']
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,708
|
Lorderot/recommendation-system
|
refs/heads/master
|
/get_real_estate.py
|
import numpy as np
import pandas as pd
import postgresql
from flask import jsonify
from geopy import distance, Point
from geopy.geocoders import Nominatim
from shapely.geometry import Point as shPoint
CITY_CENTERS = {
'SAN DIEGO': {
'Latitude': 32.715736,
'Longitude': -117.161087,
},
'SAN FRANCISCO': {
'Latitude': 37.773972,
'Longitude': -122.431297,
}
}
CITY_AVG_SPEED = {
'SAN DIEGO': 46.67098,
'SAN FRANCISCO': 28.96819
}
SQR_METERS_PER_PERSON = 150.
CHECKINS_BOUND = 20
WORK_AND_STUDY_PCT = 0.3
WORK_OR_STUDY_PCT = 0.4
USE_DB_DRIVER = True
def address_to_coords(address_raw):
if pd.notnull(address_raw):
try:
geolocator = Nominatim()
location = geolocator.geocode(address_raw)
return dict(Latitude=location.latitude, Longitude=location.longitude)
except:
return {}
else:
return {}
def check_validity(polygon_dict, city, coords_list):
return [
loc for loc in coords_list if (polygon_dict[city].contains(shPoint(loc['Longitude'], loc['Latitude'])) if
city in polygon_dict.keys() else False)
]
def midpoint(POLYGONS_DICT, json_data):
city = json_data['City']
valid_checkins = check_validity(POLYGONS_DICT, city, json_data['Coordinates'])
geo_wk = [v for k, v in json_data.items() if (k in ['Work']) & bool(v)]
valid_wk = check_validity(POLYGONS_DICT, city, geo_wk)
geo_st = [v for k, v in json_data.items() if (k in ['Study']) & bool(v)]
valid_st = check_validity(POLYGONS_DICT, city, geo_st)
valid_dict = {
'Work': valid_wk[0] if valid_wk else {},
'Study': valid_st[0] if valid_wk else {},
'Check-ins': valid_checkins
}
len_checkins = len(valid_checkins)
if len_checkins >= CHECKINS_BOUND:
if bool(valid_wk) & bool(valid_st):
ratio = int(len_checkins * WORK_AND_STUDY_PCT)
valid_checkins.extend(valid_wk * ratio + valid_st * ratio)
elif valid_wk:
ratio = int(len_checkins * WORK_OR_STUDY_PCT)
valid_checkins.extend(valid_wk * ratio)
elif valid_st:
ratio = int(len_checkins * WORK_OR_STUDY_PCT)
valid_checkins.extend(valid_st * ratio)
else:
pass
else:
valid_checkins.extend(valid_wk + valid_st)
if valid_checkins:
center_dict = {
'Center_' + ('lat' if k == 'Latitude' else 'long'): np.mean(
[loc[k] for loc in valid_checkins]) for k in ['Latitude', 'Longitude']
}
else:
center_dict = {
'Center_' + ('lat' if k == 'Latitude' else 'long'): v for k, v in CITY_CENTERS.get(city, {}).items()
}
return center_dict, valid_dict
def center_profits(center_dict, valid_dict, city):
center_dict['Profits'] = {}
if valid_dict['Work']:
center_dict['Profits']['Work_distance'] = distance.distance(
Point(valid_dict['Work']['Latitude'], valid_dict['Work']['Longitude']),
Point(center_dict['Center_lat'], center_dict['Center_long'])).km
center_dict['Profits']['Work_time'] = (center_dict['Profits']['Work_distance'] / CITY_AVG_SPEED.get(
city, CITY_AVG_SPEED['SAN DIEGO']) * 60.)
else:
center_dict['Profits']['Work_distance'] = np.nan
center_dict['Profits']['Work_time'] = np.nan
if valid_dict['Study']:
center_dict['Profits']['Study_distance'] = distance.distance(
Point(valid_dict['Study']['Latitude'], valid_dict['Study']['Longitude']),
Point(center_dict['Center_lat'], center_dict['Center_long'])).km
center_dict['Profits']['Study_time'] = (center_dict['Profits']['Study_distance'] / CITY_AVG_SPEED.get(
city, CITY_AVG_SPEED['SAN DIEGO']) * 60.)
else:
center_dict['Profits']['Study_distance'] = np.nan
center_dict['Profits']['Study_time'] = np.nan
return center_dict
def get_real_estate(real_est_df, db_engine, polygons_dict, json_data, use_pandas=False, output_len=500):
json_data['City'] = json_data['City'].upper()
json_data['Study'] = address_to_coords(json_data['Study'])
json_data['Work'] = address_to_coords(json_data['Work'])
center_dict, valid_dict = midpoint(polygons_dict, json_data)
if not use_pandas:
request = (r"SELECT * FROM get_nearest_apartments(" +
"{lat}, {long}, '{city_to_search}', {park_count_ge}, {square_feet_ge}, {output_len}, {fake_countryside})")
request_fmt = request.format(lat=center_dict['Center_lat'],
long=center_dict['Center_long'],
city_to_search=json_data['City'],
park_count_ge=int(json_data['PetsToWalkPresence']),
square_feet_ge=json_data['AmountOfPeopleLiving'] * SQR_METERS_PER_PERSON,
output_len=output_len,
fake_countryside=(not json_data['InCity']))
try:
if not USE_DB_DRIVER:
best_re = pd.read_sql_query(request_fmt, db_engine)
else:
db = postgresql.open(db_engine.replace('postgresql', 'pq'))
get_some = db.query(request_fmt)
best_re = pd.DataFrame(get_some, columns=get_some[0].column_names)
db.close()
except:
print('DB error. Can not pull N best apartments')
center_dict = center_profits(center_dict, valid_dict, json_data['City'])
center_dict['Apartments'] = []
return jsonify(center_dict)
if not json_data['InCity']:
center_dict['Center_lat'], center_dict['Center_long'] = best_re[['latitude', 'longitude']].iloc[0].values
request_fmt = request.format(lat=center_dict['Center_lat'],
long=center_dict['Center_long'],
city_to_search=json_data['City'],
park_count_ge=int(json_data['PetsToWalkPresence']),
square_feet_ge=json_data['AmountOfPeopleLiving'] * SQR_METERS_PER_PERSON,
output_len=output_len,
fake_countryside=False)
try:
if not USE_DB_DRIVER:
best_re = pd.read_sql_query(request_fmt, db_engine)
else:
db = postgresql.open(db_engine.replace('postgresql', 'pq'))
get_some = db.query(request_fmt)
best_re = pd.DataFrame(get_some, columns=get_some[0].column_names)
db.close()
except:
print('DB error. Can not pull N best apartments')
center_dict = center_profits(center_dict, valid_dict, json_data['City'])
center_dict['Apartments'] = []
return jsonify(center_dict)
else:
if not real_est_df.empty:
sub_df = real_est_df[real_est_df['city'].str.upper() == json_data['City']]
if not json_data['InCity']:
temp_df = sub_df[sub_df['is_country_side']]
temp_df['distance_to_center'] = temp_df[['latitude', 'longitude']].apply(
lambda x: distance.distance(Point(x['latitude'], x['longitude']),
Point(center_dict['Center_lat'], center_dict['Center_long'])).km
if x.notnull().all() else np.NaN, axis=1)
center_dict['Center_lat'], center_dict['Center_long'] = (temp_df
.sort_values(by=['distance_to_center'], ascending=1)[['latitude', 'longitude']].iloc[0].values)
else:
if json_data['PetsToWalkPresence']:
sub_df = sub_df[sub_df['num_of_parks'] > 0]
sub_df = sub_df[sub_df['size_square_feet'] >= json_data['AmountOfPeopleLiving'] * SQR_METERS_PER_PERSON]
sub_df['distance_to_center'] = sub_df[['latitude', 'longitude']].apply(
lambda x: distance.distance(Point(x['latitude'], x['longitude']),
Point(center_dict['Center_lat'], center_dict['Center_long'])).km
if x.notnull().all() else np.NaN, axis=1)
best_re = sub_df.sort_values(by=['distance_to_center'], ascending=1).iloc[:output_len]
else:
center_dict = center_profits(center_dict, valid_dict, json_data['City'])
center_dict['Apartments'] = []
return jsonify(center_dict)
center_dict = center_profits(center_dict, valid_dict, json_data['City'])
target_to_rename = {
'latitude': 'Lat',
'longitude': 'Long',
'address': 'Address',
'picture_url': 'Image_url',
'size_square_feet': 'Area',
'price': 'Price',
'leasing_available': 'Leasing_available',
'distance_to_center': 'Distance_to_center',
'profits': 'Profits'
}
target_cols = [col for col in target_to_rename.keys() if col != 'profits']
feature_to_rename = {
'num_of_cafes_rests': 'Cafe_nearby',
'num_of_cinemas': 'Cinema_nearby',
'num_of_highways': 'Highway_nearby',
'num_of_parks': 'Park_nearby'
}
feature_cols = list(feature_to_rename.keys())
best_re['profits'] = best_re[feature_cols].rename(columns=feature_to_rename).apply(
lambda x: x.astype(bool).to_dict(), axis=1)
best_re = best_re[target_cols + ['profits']].rename(columns=target_to_rename)
apartmets_dict = best_re.to_dict(orient='records')
center_dict['Apartments'] = apartmets_dict
return jsonify(center_dict)
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,709
|
Lorderot/recommendation-system
|
refs/heads/master
|
/server.py
|
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
import os
import pandas as pd
import geopandas as gpd
from get_real_estate import get_real_estate
import pull_city_polygons as pcpolygon
app = Flask(__name__)
# export APP_SETTINGS=config.ProductionConfig
# export APP_SETTINGS=config.DevelopmentConfig
# export APP_SETTINGS=config.TestingConfig
app.config.from_object(os.environ['APP_SETTINGS'])
# suppress warning
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_POOL_SIZE'] = 10
db = SQLAlchemy(app)
# to help Alembic detect changes in models. Depends on db object
import models
RUN_UNDER_PANDAS = False
DB_ENGINE = os.environ['DEV_DATABASE_URL']
POLYGONS_DATA_DIR = r'polygons\Polygons.shp'
DATA = pd.DataFrame()
if RUN_UNDER_PANDAS:
try:
DATA = (pd.read_sql_query('SELECT * FROM tb_apartments', DB_ENGINE).set_index('tb_apartment_id'))
except:
pass
try:
POLYGONS_DATA = gpd.read_file(POLYGONS_DATA_DIR)
except:
POLYGONS_DATA = pcpolygon.update_city_polygons(POLYGONS_DATA_DIR)
POLYGONS_DICT = pcpolygon.convert_gpd_to_dict(POLYGONS_DATA)
@app.route('/api/destination/prod', methods=['GET', 'POST'])
def prod():
if request.method == 'POST':
return get_real_estate(DATA, DB_ENGINE, POLYGONS_DICT, request.get_json(), use_pandas=RUN_UNDER_PANDAS)
else:
return 'Real estate filtrator [PROD]'
if __name__ == '__main__':
app.run(host='0.0.0.0')
db.init_app(app)
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,710
|
Lorderot/recommendation-system
|
refs/heads/master
|
/pull_city_polygons.py
|
import pandas as pd
import geopandas as gpd
import osmnx as ox
ox.config(use_cache=True)
USA_CITIES = [
'SAN DIEGO', 'SAN FRANCISCO'
]
def update_city_polygons(polygons_data_dir):
polygons_gdf = gpd.GeoDataFrame()
for city in USA_CITIES:
city_to_search = '{city}, US'.format(city=city)
try:
temp_gdf = ox.gdf_from_place(city_to_search)
temp_gdf['city'] = city
except:
temp_gdf = gpd.GeoDataFrame()
polygons_gdf = pd.concat([polygons_gdf, temp_gdf])
polygons_gdf.to_file(polygons_data_dir)
return polygons_gdf
def convert_gpd_to_dict(polygons_gdf):
polygons_dict = {}
for city in USA_CITIES:
polygons_dict[city] = polygons_gdf.loc[
polygons_gdf['city'].str.upper() == city, 'geometry'].iloc[0]
return polygons_dict
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,711
|
Lorderot/recommendation-system
|
refs/heads/master
|
/manage.py
|
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from server import app, db
migrate = Migrate(app, db)
manager = Manager(app)
manager.add_command('db', MigrateCommand)
# python manage.py db migrate //detects changes
# python manage.py db upgrade //updates DB
if __name__ == '__main__':
manager.run()
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,712
|
Lorderot/recommendation-system
|
refs/heads/master
|
/jsons/test_request.py
|
import json
import requests
from time import clock
JSON_TO_SEND = r'input.json'
with open(JSON_TO_SEND) as json_file:
json_data = json.load(json_file)
request_header = {
'Content-Type': 'application/json',
'Accept': 'application/json'
}
st = clock()
resp = requests.post('http://localhost:5000/api/destination/prod', data=json.dumps(json_data), headers=request_header)
with open('output.json', 'w') as json_file:
json.dump(resp.json(), json_file, indent=4)
print(clock() - st, resp.json())
|
{"/models.py": ["/server.py"], "/server.py": ["/get_real_estate.py", "/pull_city_polygons.py", "/models.py"], "/manage.py": ["/server.py"]}
|
34,754
|
Sherlock-Hou/Huffing_n_PUFfin
|
refs/heads/master
|
/PUFAttackSimulation.py
|
from ArbiterPUF import ArbiterPUF
from ArbiterPUFClone import ArbiterPUFClone, PUFClassifier
from numpy import shape
from CRP import CRP
import json
from pandas import DataFrame
from LogisticRegression import LogisticRegressionModel, LogisticRegressionCostFunction, RPROP
import random
from multiprocessing import Pool
from time import time
from Simplified_Arbiter_PUF import SimplifiedArbiterPUF
from CMAEvolutionStrategy import CMAEvolutionStrategy
from ArbiterPUFFitnessMetric import ArbiterPUFFitnessMetric, XORArbiterPUFFitnessMetric
from NaturalEvoultionStrategy import NaturalEvolutionStrategy, MyNaturalEvolutionStrategy
from XORArbiterPUF import XORArbiterPUF
def generate_random_physical_characteristics_for_arbiter_puf(number_of_challenges):
# 4 delays for each stage to represent p, q, r & s delay
return [[random.random() for delay in range(4)] for challenge_stage in range(number_of_challenges)]
def generate_random_puf_challenge(puf_challenge_bit_length):
return [random.choice([-1, 1]) for challenge_bit in range(puf_challenge_bit_length)]
def create_puf_clone_training_set(puf_to_generate_crps_from, training_set_size):
training_set = []
for challenge in range(training_set_size):
random_challenge = generate_random_puf_challenge(puf_to_generate_crps_from.challenge_bits)
training_set.append(CRP(random_challenge, puf_to_generate_crps_from.get_response(random_challenge)))
return training_set
def does_clone_response_match_original(original_response, clone_response):
return original_response == clone_response
def save_training_set_to_json(training_set, output_file):
with open(output_file, 'w') as output_file:
json.dump([training_example.__dict__ for training_example in training_set], output_file, indent=4)
def get_test_results_of_puf_clone_against_original(clone_puf, original_puf, tests, pool):
results = pool.starmap(does_clone_response_match_original,
[(original_puf.get_response(test),
clone_puf.get_response(test)) for test in tests])
return sum(results)
def print_ml_accuracy(number_of_tests, tests_passed):
print((tests_passed / number_of_tests) * 100, '% accuracy on tests')
def generate_arbiter_clone_with_my_nes(bit_length, training_set):
puf_clone = SimplifiedArbiterPUF(get_random_vector(bit_length))
puf_clone.delay_vector = MyNaturalEvolutionStrategy(puf_clone.challenge_bits,
ArbiterPUFFitnessMetric(training_set)).train(len(training_set))
return puf_clone
def generate_xor_arbiter_clone_with_my_nes(bit_length, number_of_xors, training_set):
puf_clone = generate_xor_arbiter_puf(bit_length, number_of_xors)
print("Attack on", puf_clone.__str__())
puf_vectors = MyNaturalEvolutionStrategy((len(puf_clone.arbiter_pufs), bit_length),
XORArbiterPUFFitnessMetric(training_set)).train(len(training_set))
internal_pufs = [SimplifiedArbiterPUF(candidate_vector) for candidate_vector in puf_vectors]
puf_clone.arbiter_pufs = internal_pufs
return puf_clone
def generate_arbiter_clone_with_open_ai_nes(bit_length, training_set):
puf_clone = SimplifiedArbiterPUF(get_random_vector(bit_length))
puf_clone.delay_vector = NaturalEvolutionStrategy(puf_clone.challenge_bits,
ArbiterPUFFitnessMetric(training_set)).train(len(training_set))
return puf_clone
def generate_arbiter_clone_with_cmaes(bit_length, training_set):
puf_clone = SimplifiedArbiterPUF(get_random_vector(bit_length))
puf_clone.delay_vector = CMAEvolutionStrategy(bit_length, ArbiterPUFFitnessMetric(training_set),
puf_clone.challenge_bits).train(len(training_set))
return puf_clone
def generate_arbiter_clone_with_lr_rprop(bit_length, training_set):
logistic_regression_model = LogisticRegressionModel(get_random_vector(bit_length))
puf_clone = ArbiterPUFClone(logistic_regression_model, PUFClassifier())
puf_clone.train_machine_learning_model_with_multiprocessing(RPROP(),
training_set,
LogisticRegressionCostFunction(
puf_clone.machine_learning_model))
return puf_clone
def get_random_vector(length):
return [random.random() for weight in range(length)]
def generate_arbiter_puf(bit_length):
return SimplifiedArbiterPUF(get_random_vector(bit_length))
def generate_xor_arbiter_puf(bit_length, number_of_xors):
return XORArbiterPUF([generate_arbiter_puf(bit_length) for puf in range(number_of_xors + 1)])
def puf_attack_sim():
# Original PUF to be cloned, has a randomly generated vector for input (physical characteristics) and a given challenge bit length (number of stages)
puf_challenge_bit_length = 8
number_of_xors = 1
# original_puf = generate_arbiter_puf(puf_challenge_bit_length)
original_puf = generate_xor_arbiter_puf(puf_challenge_bit_length, number_of_xors)
# create a training set of CRPs for the clone to train on
training_set_length = 4000
puf_clone_training_set = create_puf_clone_training_set(original_puf, training_set_length)
# save_training_set_to_json(puf_clone_training_set, 'ArbiterPUF_Training_Set.json')
print("original puf: bit_length", puf_challenge_bit_length,
"number of xors", number_of_xors, "training set length", len(puf_clone_training_set))
# create clone PUF
start_time = time()
# puf_clone = generate_arbiter_clone_with_my_nes(puf_challenge_bit_length, puf_clone_training_set)
puf_clone = generate_arbiter_clone_with_lr_rprop(puf_challenge_bit_length, puf_clone_training_set)
# puf_clone = generate_xor_arbiter_clone_with_my_nes(puf_challenge_bit_length, number_of_xors, puf_clone_training_set)
training_time = time() - start_time
print("Time to train is", training_time)
# testing the clone to ensure it has the same output as the original puf
number_of_tests = 100000
pool = Pool()
tests_for_puf = pool.map(generate_random_puf_challenge,
[original_puf.challenge_bits for length in range(number_of_tests)])
print_ml_accuracy(number_of_tests,
get_test_results_of_puf_clone_against_original(puf_clone, original_puf, tests_for_puf, pool))
pool.close()
pool.join()
if __name__ == '__main__':
puf_attack_sim()
|
{"/PUFAttackSimulation.py": ["/CMAEvolutionStrategy.py", "/ArbiterPUFFitnessMetric.py"]}
|
34,755
|
Sherlock-Hou/Huffing_n_PUFfin
|
refs/heads/master
|
/CMAEvolutionStrategy.py
|
from numpy import identity, sqrt, power, exp, floor, log, divide, sum, multiply, square, subtract
from numpy.random import multivariate_normal
from numpy.ma import sum, dot, transpose
from random import random
from numpy.linalg import inv
class CMAEvolutionStrategy:
def __init__(self, problem_dimension, fitness_metric, learning_rate=1,
population_size=4, default_step_size=0.3):
self.fitness_metric = fitness_metric
self.problem_dimension = problem_dimension
self.learning_rate = learning_rate
self.identity_matrix = identity(self.problem_dimension) # todo get value
self.population_size = int(population_size + floor(3 * log(self.problem_dimension)))
self.number_of_parents = self.population_size / 2
self.weights = [log(self.number_of_parents + 1 / 2) - log(sample_index + 1) for sample_index in
range(int(self.number_of_parents))]
self.number_of_parents = int(self.number_of_parents)
self.weights = divide(self.weights, sum(self.weights))
self.number_of_parents = int(floor(self.number_of_parents))
self.variance_effective_selection_mass = power(sum([power(weight, 2) for weight in self.weights]), -1)
self.variance_effectiveness_of_sum_of_weights = (self.variance_effective_selection_mass + 2) \
/ (
self.problem_dimension + self.variance_effective_selection_mass + 5)
self.time_constant_for_covariance_matrix = ((4 + self.variance_effectiveness_of_sum_of_weights
/ self.problem_dimension)
/ (self.problem_dimension + 4
+ 2 * self.variance_effectiveness_of_sum_of_weights / 2))
self.learning_rate_for_rank_one_update_of_covariance_matrix = 2 / square(problem_dimension)
self.learning_rate_for_parent_rank_of_covariance_matrix = min(
1 - self.learning_rate_for_rank_one_update_of_covariance_matrix,
2 * self.variance_effectiveness_of_sum_of_weights - 1 / self.variance_effectiveness_of_sum_of_weights
/ (square(self.problem_dimension + 2) + self.variance_effectiveness_of_sum_of_weights))
self.time_constant_for_step_size_control = ((self.variance_effectiveness_of_sum_of_weights + 5)
/ (self.problem_dimension
+ self.variance_effectiveness_of_sum_of_weights + 5))
self.step_size_dampening = 1 + 2 * max(0, sqrt((self.variance_effectiveness_of_sum_of_weights - 1)
/ (self.population_size + 1)) - 1) \
+ self.time_constant_for_covariance_matrix
# Can also be 1 to save any bother
self.expected_value_from_identity_normal = (sqrt(2) *
((self.problem_dimension + 1) / 2) / (self.problem_dimension / 2))
self.current_distribution_mean_of_normal = [random() for value in range(self.problem_dimension)]
self.step_size = default_step_size # should always be > 0
self.covariance_matrix = self.identity_matrix
self.isotropic_evolution_path = [0 for value in range(self.problem_dimension)]
self.anisotropic_evolution_path = [0 for value in range(self.problem_dimension)]
self.discount_factor_for_isotropic = 1 - self.time_constant_for_step_size_control
self.discount_factor_for_anisotropic = (1
- ((4 + self.variance_effective_selection_mass / self.population_size)
/ (self.population_size + 4 + (
2 * self.variance_effective_selection_mass)
/ self.population_size))) # todo DO!
self.complements_of_discount_variance_for_isotropic = sqrt(1 - square(self.discount_factor_for_isotropic))
self.complements_of_discount_variance_for_anisotropic = sqrt(1 - square(self.discount_factor_for_anisotropic))
self.learning_rate_of_variance_effective_selection_mass = self.variance_effective_selection_mass / square(
problem_dimension)
self.division_thingy = 1 + 2 * max(
[0, sqrt(((self.variance_effective_selection_mass - 1) / self.population_size + 1) + 1)]) \
+ self.discount_factor_for_isotropic
def train(self, fitness_requirement):
generation = 0
while self.fitness_metric.get_fitness(self.current_distribution_mean_of_normal) <= fitness_requirement:
print("Generation", generation)
self.update_for_next_generation()
generation += 1
return self.current_distribution_mean_of_normal
def update_for_next_generation(self):
sample_candidates = self.get_new_sample_candidates()
sample_fitnesses = [self.fitness_metric.get_fitness(sample) for sample in sample_candidates]
sorted_samples = self.get_current_population_sorted(sample_candidates, sample_fitnesses)
next_generation_mean = self.get_updated_distribution_mean(sorted_samples)
self.isotropic_evolution_path = self.get_updated_isotropic_evolution_path(next_generation_mean)
self.anisotropic_evolution_path = self.get_updated_anisotropic_evolution_path(next_generation_mean)
self.covariance_matrix = self.get_updated_covariance_matrix(sorted_samples)
self.step_size = self.get_updated_step_size()
self.current_distribution_mean_of_normal = next_generation_mean
print("Step size", self.step_size)
print("Current mean", self.current_distribution_mean_of_normal)
print()
def get_new_sample_candidates(self):
return [self.get_sample_from_multivariate_normal_distribution() for candidate_sample in
range(self.population_size)]
def get_sample_from_multivariate_normal_distribution(self):
sample_candidate = (self.current_distribution_mean_of_normal
+ (self.step_size * multivariate_normal([0 for value in range(self.problem_dimension)],
self.covariance_matrix)))
# print("Candidate:", sample_candidate)
return sample_candidate
# return multivariate_normal(self.current_distribution_mean_of_normal,
# (self.covariance_matrix * square(self.step_size)))
def get_step_of_distribution_mean(self, sorted_sample_population):
return sum([weight * self.get_adjusted_sample(sorted_sample)
for weight, sorted_sample in zip(self.weights, sorted_sample_population)])
# return dot(self.weights,
# [self.get_adjusted_sample(sorted_sample)
# for sorted_sample in sorted_sample_population[:int(self.number_of_parents)]])
def get_adjusted_sample(self, sorted_sample):
return (sorted_sample - self.current_distribution_mean_of_normal) / self.step_size
def get_current_population_sorted(self, sample_population, fitness):
sorted_population = [sample for (fitness, sample) in
sorted(zip(fitness, sample_population), key=lambda pair: pair[0])]
return sorted_population[:self.number_of_parents]
def get_updated_distribution_mean(self, sorted_sample_population):
return self.current_distribution_mean_of_normal \
+ self.learning_rate \
* self.get_step_of_distribution_mean(sorted_sample_population)
# def get_updated_distribution_mean(self, next_distribution_mean_of_normal ,step_of_distribution_mean):
# return next_distribution_mean_of_normal + (self.learning_rate * step_of_distribution_mean)
def get_updated_isotropic_evolution_path(self, next_distribution_mean_of_normal):
return multiply(self.discount_factor_for_isotropic, self.isotropic_evolution_path) \
+ self.complements_of_discount_variance_for_isotropic \
* sqrt(self.variance_effective_selection_mass) \
* self.get_square_root_inverse_of_covariance_matrix() \
* self.get_displacement_of_distribution_mean_of_normal(next_distribution_mean_of_normal)
def distribute_identity_matrix_normal_under_neutral_selection(self, next_distribution_mean_of_normal):
return sqrt(self.variance_effective_selection_mass) \
* self.get_displacement_of_distribution_mean_of_normal(next_distribution_mean_of_normal) \
* self.get_square_root_inverse_of_covariance_matrix()
def get_square_root_inverse_of_covariance_matrix(self):
inverse_of_covariance_matrix = self.get_inverse_of_covariance_matrix()
return sqrt(inverse_of_covariance_matrix)
def get_displacement_of_distribution_mean_of_normal(self, next_distribution_mean_of_normal):
displacement_of_mean = divide((next_distribution_mean_of_normal - self.current_distribution_mean_of_normal),
self.step_size)
return displacement_of_mean
def get_updated_step_size(self):
return self.step_size * exp((self.time_constant_for_step_size_control / self.step_size_dampening) * (
len(self.isotropic_evolution_path) / self.expected_value_from_identity_normal) - 1)
# todo CURRENTLY WORKING HERE
def get_updated_anisotropic_evolution_path(self, next_distribution_mean_of_normal):
return multiply(self.discount_factor_for_anisotropic, self.anisotropic_evolution_path) \
+ self.get_indicator_result() * self.complements_of_discount_variance_for_anisotropic \
* sqrt(self.variance_effective_selection_mass) \
* self.get_displacement_of_distribution_mean_of_normal(next_distribution_mean_of_normal)
def get_indicator_result(self):
return 1 if (len(self.isotropic_evolution_path) / sqrt(1 - square(1 - self.time_constant_for_step_size_control))
< (1.4 + (2 / (self.problem_dimension + 1))) * self.expected_value_from_identity_normal) else 0
def get_updated_covariance_matrix(self, sample_population):
covariance_discount_factor = self.get_covariance_matrix_discount_factor()
rank_one_matrix = self.get_rank_one_matrix()
rank_minimum_matrix = self.get_rank_minimum_matrix(sample_population)
return multiply(covariance_discount_factor, self.covariance_matrix) \
+ (multiply(self.learning_rate_for_rank_one_update_of_covariance_matrix, rank_one_matrix)) \
+ (multiply(self.learning_rate_for_parent_rank_of_covariance_matrix, rank_minimum_matrix))
def get_covariance_matrix_discount_factor(self):
return (1
+ self.learning_rate_for_rank_one_update_of_covariance_matrix
* self.get_preventer_of_axes_increase_decider()
- self.learning_rate_for_rank_one_update_of_covariance_matrix
- self.learning_rate_for_parent_rank_of_covariance_matrix * sum(self.weights)
)
def get_preventer_of_axes_increase_decider(self):
return (1 - power(self.get_indicator_result(),
2)) * self.learning_rate_for_rank_one_update_of_covariance_matrix * self.learning_rate \
* (2 - self.learning_rate)
def get_rank_one_matrix(self):
return multiply(self.anisotropic_evolution_path, transpose(self.anisotropic_evolution_path))
def get_rank_minimum_matrix(self, sorted_sample_population):
return sum([multiply(
(self.get_steped_difference(sorted_sample) * transpose(self.get_steped_difference(sorted_sample))), weight)
for weight, sorted_sample in
zip(self.get_adjusted_weights(sorted_sample_population), sorted_sample_population)])
def get_adjusted_weights(self, sorted_sample_population):
return [weight * self.decide_how_weight_is_adjusted(weight, sorted_sample)
for weight, sorted_sample in zip(self.weights, sorted_sample_population)]
def decide_how_weight_is_adjusted(self, weight, sorted_sample):
return 1 if weight >= 0 else self.problem_dimension / square(len(self.get_inverse_of_covariance_matrix()
* (
sorted_sample - self.current_distribution_mean_of_normal
/ self.step_size)))
def get_inverse_of_covariance_matrix(self):
return inv(self.covariance_matrix)
def get_steped_difference(self, sorted_sample):
return divide(subtract(sorted_sample, self.current_distribution_mean_of_normal),
self.step_size)
|
{"/PUFAttackSimulation.py": ["/CMAEvolutionStrategy.py", "/ArbiterPUFFitnessMetric.py"]}
|
34,756
|
Sherlock-Hou/Huffing_n_PUFfin
|
refs/heads/master
|
/ArbiterPUFFitnessMetric.py
|
from numpy import count_nonzero
from Simplified_Arbiter_PUF import SimplifiedArbiterPUF
from XORArbiterPUF import XORArbiterPUF
class XORArbiterPUFFitnessMetric:
def __init__(self, training_set):
self.training_set = training_set
def get_fitness(self, candidate_vectors):
internal_pufs = [SimplifiedArbiterPUF(candidate_vector) for candidate_vector in candidate_vectors]
candidate_puf = XORArbiterPUF(internal_pufs)
hamming_distance = sum([count_nonzero(training_example.response - candidate_puf.get_response(training_example.challenge))
for training_example in self.training_set])
fitness = len(self.training_set) - hamming_distance
return fitness
class ArbiterPUFFitnessMetric:
def __init__(self, training_set):
self.training_set = training_set
def get_fitness(self, candidate_vector):
candidate_puf = SimplifiedArbiterPUF(candidate_vector)
hamming_distance = sum([count_nonzero(training_example.response - candidate_puf.get_response(training_example.challenge))
for training_example in self.training_set])
fitness = len(self.training_set) - hamming_distance
return fitness
|
{"/PUFAttackSimulation.py": ["/CMAEvolutionStrategy.py", "/ArbiterPUFFitnessMetric.py"]}
|
34,823
|
catherineverdiergo/XebExercice
|
refs/heads/master
|
/mowerstestplayer.py
|
# -*- coding:utf-8 -*-
import re
from mower import Mower
UP_RIGHT_CORNER_PATTERN = re.compile('([-+]?\\d+) ([-+]?\\d+)')
MOWER_STATUS_PATTERN = re.compile('([-+]?\\d+) ([-+]?\\d+) ([NESW])')
def read_integer(matcher, grp_number, line_number):
"""
Parse an integer from a regex matcher able to parse strings formatted as "%d %d".
:param matcher: regex matcher applied on the line string regarding the UP_RIGHT_CORNER_PATTERN pattern.
:param grp_number: index of the integer to read (1 or 2)
:param line_number: line number in the input file (used in exception to report error)
:return: an integer if no exception occurs
"""
try:
coordinate = int(matcher.group(grp_number))
if coordinate < 0:
raise Exception('Error line {}: not a positive integer'.format(line_number))
else:
return coordinate
except ValueError:
raise Exception('Error line {}: not a valid integer'.format(line_number))
def read_grid_up_right_corner(line, line_number):
"""
Parse the first line of the test file (which provides the grid lawn upper right corner coordinates).
:param line: the first line of the test file as a string
:param line_number: line number in the input file (used in exception to report error)
:return: an tuple holding grid upper right corner coordinates if no exception occurs
"""
matcher = re.match(UP_RIGHT_CORNER_PATTERN, line)
if matcher and len(matcher.groups()) == 2:
up_right_x = read_integer(matcher, 1, line_number)
up_right_y = read_integer(matcher, 2, line_number)
return up_right_x, up_right_y
else:
raise Exception('Error line {}, format expected: "%d %d"'.format(line_number))
def read_mower_status(line, line_number, up_right):
"""
Parse a "mower status" line in the input test file (format: "%d %d [NESW]") and returns a mower initial status.
:param line: a line holding a mower status in the test file as a string
:param line_number: line number in the input file (used in exception to report error)
:param up_right: (int, int) tuple ==> grid lawn upper right corner coordinates
:return: a tuple as ((int, int), [NESW]) giving a mower status (position, orientation) if no exception occurs
"""
matcher = re.match(MOWER_STATUS_PATTERN, line)
if matcher and len(matcher.groups()) == 3:
x = read_integer(matcher, 1, line_number)
y = read_integer(matcher, 2, line_number)
if x > up_right[0] or y > up_right[1]:
raise Exception('Error line {}, coordinates should be less or equal than line 1 coordinates'
.format(line_number))
else:
if Mower.is_valid_orientation(matcher.group(3)):
return (x, y), matcher.group(3)
else:
raise Exception('Error line {}, format expected:'.format(line_number) +
'"%d %d {one char in N,E,S,W}"')
else:
raise Exception('Error line {}, format expected:'.format(line_number) + '"%d %d {one char in N,E,S,W}"')
def read_program(line, line_number):
"""
Parse a mower "program" line in the test file (should be a sequence of valid mower actions (a, G or D) as a string.
:param line: line holding the mower program as a string
:param line_number: line number in the input file (used in exception to report error)
:return: the program as a string if no exception occurs
"""
for action in line:
if not Mower.is_valid_moving_code(action):
raise Exception('Error line {}: programs should be a sequence matching "[AGD]*"'.format(line_number))
return line
def read_line(f):
"""
Read a line from a text file and remove end of line characters.
:param f: text file handler
:return: a cleaned line as string
"""
line = f.readline()
length = len(line)
if length > 0 and line[length - 1] == '\n':
line = line[:-1]
length -= 1
if length > 0 and line[length - 1] == '\r':
line = line[:-1]
return line
class MowersTestPlayer(object):
"""
Class to read and apply a test file with a format defined in the exercise statements:
First line ==> gives the grid lawn upper right corner coordinates. ex: 5 5
Other lines ==> sequence of couple of lines giving (mower status + a mower program to apply)
"""
def __init__(self, file_name):
self._filename = file_name # test file name
self._mowers = [] # list of tuples (Mover, program) parsed from: the test file
self._final_status = [] # list of final mower status when test has been applied
@property
def mowers(self):
return self._mowers
@property
def all_status(self):
return self._final_status
def open(self):
"""
Open and parse input test file (file_name).
:return: A list of tuples (Mower, program) if no exception occurs
"""
with open(self._filename, 'r') as f:
line = read_line(f)
Mower.set_up_right_corner(read_grid_up_right_corner(line, 1))
line_number = 1
status = None
self._mowers = []
line = read_line(f)
while line != '':
line_number += 1
if line_number % 2 == 0:
status = read_mower_status(line, line_number, Mower.GRID_UP_RIGHT_CORNER)
else:
program = read_program(line, line_number)
self._mowers.append((Mower(status[0], status[1]), program))
line = read_line(f)
f.close()
def apply(self, with_history=False):
"""
Apply the program for each mower identified in the test file.
:return: the list of final status of the mowers as strings when with_history is False
else return the list of all steps executed by mower and all initial status
"""
self._final_status = []
if not with_history:
for tmover in self._mowers:
tmover[0].move_multiple_steps(tmover[1])
self._final_status.append(tmover[0].get_str_status())
return self.all_status
else:
initial_status = []
for tmover in self._mowers:
initial_status.append(tmover[0].status)
mower_history = []
for step in tmover[1]:
tmover[0].move_one_step(step)
mower_history.append((tmover[0].status, step))
self._final_status.append(mower_history)
return self.all_status, initial_status
|
{"/mowerstestplayer.py": ["/mower.py"], "/mowersviz.py": ["/mowerstestplayer.py", "/mower.py"], "/testmowers.py": ["/mowerstestplayer.py"]}
|
34,824
|
catherineverdiergo/XebExercice
|
refs/heads/master
|
/mowersviz.py
|
# -*- coding:utf-8 -*-
from matplotlib import pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib.colors import ListedColormap
from mowerstestplayer import MowersTestPlayer
from mower import Mower
from matplotlib.animation import FuncAnimation
from matplotlib.patches import FancyArrow
TITLE_LINE1 = '\nTest file: {}\n'
TITLE_LINE2 = '\nMower {}\n\nStatus: position=({} ,{}), orientation={}\n Step: {}'
TITLE_LINE3 = '\n Next action: {}'
class MowersViz(object):
"""
This class allows to visualize the steps of the program applied to a given mower in a test file.
It uses numpy and matplotlib ==> it should be executed in an anaconda3 python environment.
Inspired from this article: https://eli.thegreenplace.net/2016/drawing-animated-gifs-with-matplotlib/
"""
LARGE_GREENS_CMAP = cm.get_cmap('Greens', 512)
# Create a new green colormap from the 'Greens' matplotlib colormap
FULL_GREEN_CMAP = ListedColormap(LARGE_GREENS_CMAP(np.linspace(0.4, 0.6, 256)))
def __init__(self, scenario_file):
"""
Constructor. Initialize the test visualizer.
:param scenario_file: path of test file
"""
self._scenario_file = scenario_file
# Create the test player and apply test
mplayer = MowersTestPlayer(self._scenario_file)
mplayer.open()
self._scenario, self._initmowers = mplayer.apply(with_history=True)
# other instance variables initialization
self._fig, self._ax, self._img_grid, self._grid_lawn = self.create_graphic_ctx()
self._circle, self._arrow = None, None
self._mower_index = 0
self.draw_mower(0, 0)
def create_graphic_ctx(self):
"""
Create a graphic context for the visualization.
:return: a figure, an axe, a numpy array, an image grid
"""
fig, ax = plt.subplots(figsize=(7, 7))
fig.set_tight_layout(True)
plt.xlim(-0.5, Mower.GRID_UP_RIGHT_CORNER[0] + 0.5)
plt.ylim(-0.5, Mower.GRID_UP_RIGHT_CORNER[0] + 0.5)
plt.xticks(np.arange(0, Mower.GRID_UP_RIGHT_CORNER[0] + 1, 1.0))
plt.yticks(np.arange(0, Mower.GRID_UP_RIGHT_CORNER[1] + 1, 1.0))
for tick in ax.yaxis.get_major_ticks():
tick.label1.set_fontsize(14)
for tick in ax.xaxis.get_major_ticks():
tick.label1.set_fontsize(14)
tick.label1.set_fontweight('bold')
tick.label1.set_fontweight('bold')
fig.suptitle(TITLE_LINE1.format(self._scenario_file), fontsize='xx-large')
grid_lawn = np.ones((Mower.GRID_UP_RIGHT_CORNER[1] + 1, Mower.GRID_UP_RIGHT_CORNER[0] + 1))
grid_lawn[0, 0] = 0
img_grid = plt.imshow(grid_lawn, cmap=MowersViz.FULL_GREEN_CMAP)
# plt.show()
return fig, ax, img_grid, grid_lawn
def clear_graphic_ctx(self):
"""
Clear the graphic context.
:return: None
"""
if self._circle is not None:
self._circle.remove()
self._circle = None
if self._arrow is not None:
self._arrow.remove()
self._arrow = None
self._grid_lawn[:, :] = 1
self._img_grid.set_data(self._grid_lawn)
def draw_mower(self, mower_index, step_number):
"""
Create a representation of a mower on the grid_lawn.
Our representation is a filled circle with an arrow for the mower orientation.
:param mower_index: mower rank in the test file (0 is first)
:param step_number: program step to represent (0 ==> initial status. Other steps are from 1 to
the length of the program associated to the mower: [1, len(program])
:return: None
"""
if step_number == 0:
circle_x = self._initmowers[mower_index][0][0]
circle_y = self._initmowers[mower_index][0][1]
orientation = self._initmowers[mower_index][1]
self._grid_lawn.fill(1)
else:
circle_x = self._scenario[mower_index][step_number - 1][0][0][0]
circle_y = self._scenario[mower_index][step_number - 1][0][0][1]
orientation = self._scenario[mower_index][step_number - 1][0][1]
self._grid_lawn[circle_y, circle_x] = 0
self._img_grid.set_data(self._grid_lawn)
self._circle = plt.Circle((circle_x, circle_y), 0.4, color='blue', alpha=0.3)
self._ax.add_patch(self._circle)
if orientation == 'N':
self._arrow = FancyArrow(circle_x, circle_y, 0, 0.37, color='w', width=0.03, joinstyle='miter')
elif orientation == 'E':
self._arrow = FancyArrow(circle_x, circle_y, 0.37, 0, color='w', width=0.03, joinstyle='miter')
elif orientation == 'S':
self._arrow = FancyArrow(circle_x, circle_y, 0, -0.37, color='w', width=0.03, joinstyle='miter')
elif orientation == 'W':
self._arrow = FancyArrow(circle_x, circle_y, -0.37, 0, color='w', width=0.03, joinstyle='miter')
self._ax.add_patch(self._arrow)
title = TITLE_LINE1.format(self._scenario_file)
title += TITLE_LINE2.format(self._mower_index + 1, circle_x, circle_y, orientation, step_number)
if step_number < len(self._scenario[self._mower_index]):
title += TITLE_LINE3.format(self._scenario[self._mower_index][step_number][1])
self._fig.suptitle(title, fontsize='xx-large')
def get_mower_index_and_step(self, refresh_step):
"""
Retrieve the mower index and the step in the mower program for the current refresh step of the animation.
:param refresh_step: step in the animation
:return: the mower index in the scenario and the step in the mower program
"""
idx = 0
steps = 0
for mower in self._scenario:
if refresh_step < steps + len(mower) + 1:
return idx, refresh_step - steps # - len(mower) - 1
else:
steps += len(mower) + 1
idx += 1
def update(self, i):
"""
Refresh matplotlib objects to display the next step of the scenario.
:param i: step considered
:return: None
"""
if self._circle is not None:
self._circle.remove()
self._circle = None
if self._arrow is not None:
self._arrow.remove()
self._arrow = None
mower_index, step = self.get_mower_index_and_step(i)
if mower_index != self._mower_index:
self.clear_graphic_ctx()
self._mower_index = mower_index
self.draw_mower(self._mower_index, step)
def anim(self, anim_gif=None):
"""
Animate the test scenario or generate an animated gif of the test scenario.
:param anim_gif: file to generate (optional)
:return: None
"""
scenario_steps = 0
for mower in self._scenario:
scenario_steps += len(mower) + 1
anim = FuncAnimation(self._fig, self.update,
frames=np.arange(0, scenario_steps),
interval=2000)
if anim_gif:
anim.save(anim_gif, dpi=80, writer='imagemagick')
else:
plt.show()
if __name__ == '__main__':
mViz = MowersViz('testmowers1.data')
# mViz.anim(anim_gif='testmowers1.gif')
mViz.anim()
# plt.show()
|
{"/mowerstestplayer.py": ["/mower.py"], "/mowersviz.py": ["/mowerstestplayer.py", "/mower.py"], "/testmowers.py": ["/mowerstestplayer.py"]}
|
34,825
|
catherineverdiergo/XebExercice
|
refs/heads/master
|
/mower.py
|
# -*- coding:utf-8 -*-
"""
Xebia exercice: Robotic mower moving on a grid lawn modelization.
"""
class Mower(object):
GRID_UP_RIGHT_CORNER = None # should hold upper right corner of the grid ( via set_up_right_corner method)
# list of valid orientations (!!! keep this order to insure proper swing operations !!!)
ORIENTATIONS = ['N', 'E', 'S', 'W']
# list of (coordinate, operation) to perform when moving the mower forward regarding ORIENTATIONS list
# for instance 'W': (0, -1) means that if the mower's position is 'N' and if the mower should move forward,
# we should operate on the first coordinate (index 0 or x) and add it -1
MOVE_FORWARD_OPERATIONS = {'N': (1, 1), 'E': (0, 1), 'S': (1, -1), 'W': (0, -1)}
MOVING_CODES = ['A', 'D', 'G'] # list of valid moving codes
# operations to perform on orientation when action is 'D' or 'G' regarding the ORIENTATIONS list
# if the current mower's orientation is 'S' (index 2 in the ORIENTATIONS list) and if it should move on the left
# (moving code 'G'), its next orientation index in the ORIENTATIONS list will be 2-1=1 (hence 'E')
# To swing properly, the orientation shift is performed modulo the length of the ORIENTATIONS list (see the swing
# method)
SWING_OPERATIONS = {'D': 1, 'G': -1}
@classmethod
def is_valid_position(cls, position):
"""
Check if a position is valid. As position, we expect a tuple of 2 positive integers.
:param position: position parameter to validate
:return: boolean (True if position parameter is valid, otherwise False)
"""
return isinstance(position, tuple) and len(position) == 2 and isinstance(position[0], int) \
and isinstance(position[1], int )and position[0] >= 0 and position[1] >= 0
@classmethod
def is_valid_orientation(cls, orientation):
"""
Check if an orientation is valid. Should be in the ORIENTATIONS list.
:param orientation: orientation parameter to check
:return: boolean (True if orientation parameter is valid, otherwise False)
"""
return orientation in Mower.ORIENTATIONS
@classmethod
def is_valid_moving_code(cls, moving_code):
"""
Check if a moving_code is valid. Should be in the MOVING_CODES list.
:param moving_code: moving_code parameter to check
:return: boolean (True if moving_code parameter is valid, otherwise False)
"""
return moving_code in Mower.MOVING_CODES
@classmethod
def set_up_right_corner(cls, up_right_corner):
"""
Class method to initialize (as a valid position parameter) the upper right corner of the lawn grid.
:param up_right_corner: upper right corner of the lawn grid
:return: None
"""
if Mower.is_valid_position(up_right_corner):
Mower.GRID_UP_RIGHT_CORNER = up_right_corner
else:
raise Exception('up_right_corner parameter should be a tuple2 with positive coordinates')
def __init__(self, position, orientation):
"""
Mower constructor. set initial position and orientation for the mower.
:param position: initial position
:param orientation: initial orientation
"""
if Mower.is_valid_position(position):
self._position = position
else:
raise Exception('position parameter should be a tuple2 with positive coordinates')
if Mower.is_valid_orientation(orientation):
self._orientation = orientation
else:
raise Exception('orientation parameter should be among {}'.format(Mower.ORIENTATIONS))
@property
def position(self):
"""
Mower position accessor.
:return: the mower position
"""
return self._position
@property
def orientation(self):
"""
Mower orientation accessor.
:return: the mower orientation
"""
return self._orientation
@property
def status(self):
"""
Mower status accessor.
:return: a tuple with mower position and orientation
"""
return self.position, self.orientation
def get_str_status(self):
return '{} {} {}'.format(self.position[0], self.position[1], self.orientation)
def move_forward(self):
"""
Move the mower forward (moving code 'A') from its current position and orientation.
Computes the new position of the mower.
:return: None
"""
if Mower.is_valid_position(Mower.GRID_UP_RIGHT_CORNER):
coordinates = list(self._position)
target_coordinate = Mower.MOVE_FORWARD_OPERATIONS[self.orientation][0]
operand_2_add = Mower.MOVE_FORWARD_OPERATIONS[self.orientation][1]
next_coordinate = self.position[target_coordinate] + operand_2_add
if 0 <= next_coordinate <= Mower.GRID_UP_RIGHT_CORNER[target_coordinate]:
coordinates[target_coordinate] = next_coordinate
self._position = tuple(coordinates)
else:
raise Exception('Mower.GRID_UP_RIGHT_CORNER should be defined (use Mower.set_up_right_corner method)')
def swing(self, moving_code):
"""
Swings the mower (moving code 'G' or 'D').
Computes the next orientation of the mower.
:param moving_code: 'G' or 'D'
:return: None
"""
current_orientation_index = Mower.ORIENTATIONS.index(self.orientation)
next_orientation_index = (current_orientation_index + Mower.SWING_OPERATIONS[moving_code]) \
% len(Mower.ORIENTATIONS)
self._orientation = Mower.ORIENTATIONS[next_orientation_index]
def move_one_step(self, moving_code):
"""
Apply a moving code ('A' or 'G' or 'D') to the mower.
Computes the next status (position + orientation) of the mower).
:param moving_code: a valid moving code
:return:
"""
if Mower.is_valid_moving_code(moving_code):
if moving_code == 'A':
self.move_forward()
else:
self.swing(moving_code)
def move_multiple_steps(self, moving_program):
"""
Apply a set of moving code to the mower.
Computes the next status (position + orientation) of the mower).
:param moving_program: a string as a list of moving codes
:return: None
"""
for action in moving_program:
self.move_one_step(action)
|
{"/mowerstestplayer.py": ["/mower.py"], "/mowersviz.py": ["/mowerstestplayer.py", "/mower.py"], "/testmowers.py": ["/mowerstestplayer.py"]}
|
34,826
|
catherineverdiergo/XebExercice
|
refs/heads/master
|
/testmowers.py
|
import unittest
from mowerstestplayer import MowersTestPlayer
class MowerTestCase(unittest.TestCase):
def test1(self):
# Open and parse test file
player = MowersTestPlayer('testmowers1.data')
player.open()
# play test
results = player.apply()
# check results
self.assertEqual("1 3 N", results[0])
self.assertEqual("5 1 E", results[1])
def test2(self):
# Open and parse test file
player = MowersTestPlayer('testmowers2.data')
player.open()
# play test
results = player.apply()
# for r in results:
# print(r)
# check results
self.assertEqual("0 0 E", results[0])
self.assertEqual("0 0 E", results[1])
def test3(self):
# Open and parse test file
player = MowersTestPlayer('testmowers3.data')
player.open()
# play test
results = player.apply()
# for r in results:
# print(r)
# check results
self.assertEqual("3 2 E", results[0])
# self.assertEqual("0 0 E", results[1])
if __name__ == '__main__':
unittest.main()
|
{"/mowerstestplayer.py": ["/mower.py"], "/mowersviz.py": ["/mowerstestplayer.py", "/mower.py"], "/testmowers.py": ["/mowerstestplayer.py"]}
|
34,873
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/people/migrations/0004_auto_20170206_2215.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-02-06 22:15
from __future__ import unicode_literals
from django.db import migrations
import sorl.thumbnail.fields
class Migration(migrations.Migration):
dependencies = [
('people', '0003_organizationadmin'),
]
operations = [
migrations.AlterModelOptions(
name='organizationadmin',
options={'ordering': ('organization', 'email'), 'verbose_name': 'Organization Admin', 'verbose_name_plural': 'Organization Admins - These email addresses will be able to log in and manage job postings for this organization'},
),
migrations.AlterModelOptions(
name='personlink',
options={'ordering': ('person', 'name'), 'verbose_name': 'Person Link', 'verbose_name_plural': 'Person Links - The first item will be linked as "Visit Website" in author bios'},
),
migrations.AlterField(
model_name='organization',
name='logo',
field=sorl.thumbnail.fields.ImageField(blank=True, help_text='Resized to fit 300x200 box in template', null=True, upload_to='img/uploads/org_logos'),
),
]
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,874
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/jobs/migrations/0001_initial.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-28 01:00
from __future__ import unicode_literals
import caching.base
from django.db import migrations, models
import django.db.models.deletion
import source.jobs.models
class Migration(migrations.Migration):
initial = True
dependencies = [
('people', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Job',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('is_live', models.BooleanField(default=True, help_text='Job will display if this is checked and dates are within proper range', verbose_name='Display on site')),
('name', models.CharField(max_length=128, verbose_name='Job name')),
('slug', models.SlugField(unique=True)),
('description', models.TextField(blank=True)),
('listing_start_date', models.DateField(default=source.jobs.models.get_today)),
('listing_end_date', models.DateField(default=source.jobs.models.get_today_plus_30)),
('tweeted_at', models.DateTimeField(blank=True, null=True)),
('url', models.URLField(blank=True, null=True)),
('contact_name', models.CharField(blank=True, max_length=128, verbose_name='Contact name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='Contact email')),
('location', models.CharField(blank=True, max_length=128, verbose_name='Job location')),
('organization', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='people.Organization')),
],
options={
'ordering': ('organization', 'slug'),
},
bases=(caching.base.CachingMixin, models.Model),
),
]
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,875
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/utils/caching.py
|
import hashlib
import json
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.cache import cache
from django.core.urlresolvers import resolve, reverse
from django.http import HttpRequest, HttpResponse, HttpResponseForbidden, Http404, JsonResponse
from django.utils.cache import get_cache_key
from django.utils.decorators import method_decorator
from django.utils.encoding import iri_to_uri
from django.utils.translation import get_language
from django.views.generic import View
from .json import LazyEncoder
from threading import local
_local = local()
def get_url_prefix():
"""Get the prefix for the current thread, or None."""
return getattr(_local, 'prefix', None)
def reverse_with_locale(viewname, urlconf=None, args=None, kwargs=None, prefix=None):
"""Wraps Django's reverse to prepend the correct locale."""
prefixer = get_url_prefix()
if prefixer:
prefix = prefix or '/'
url = reverse(viewname, urlconf, args, kwargs, prefix)
if prefixer:
url = prefixer.fix(url)
# Django's @cache_page cache keys include protocol/domain
protocol_domain = getattr(settings, 'BASE_URL', 'http://127.0.0.1:8000')
# Ensure any unicode characters in the URL are escaped.
reversed_url = '{}{}'.format(protocol_domain, iri_to_uri(url))
return reversed_url
def expire_page_cache(path, key_prefix=None):
# pass the path through funfactory resolver in order to get locale
resolved_path = resolve(path)
path_with_locale = reverse_with_locale(
resolved_path.func,
args = resolved_path.args,
kwargs = resolved_path.kwargs
)
# get cache key, expire if the cached item exists
key = get_url_cache_key(
path_with_locale, key_prefix=key_prefix
)
if key:
if cache.get(key):
cache.set(key, None, 0)
return True
return False
def get_url_cache_key(url, key_prefix=None):
'''
modified version of http://djangosnippets.org/snippets/2595/
'''
if key_prefix is None:
try:
key_prefix = getattr(settings, 'CACHES', {})['default']['KEY_PREFIX']
except:
key_prefix = ''
ctx = hashlib.md5()
path = hashlib.md5(iri_to_uri(url).encode('utf-8'))
cache_key = 'views.decorators.cache.cache_page.%s.%s.%s.%s' % (
key_prefix, 'GET', path.hexdigest(), ctx.hexdigest()
)
return cache_key
class ClearCache(View):
def render_json_to_response(self, context):
result = json.dumps(context, cls=LazyEncoder)
return JsonResponse(result, safe=False)
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
path = request.GET.get('path', None)
try:
resolved_path = resolve(path)
expire_page_cache(path)
except:
raise Http404
if self.request.is_ajax():
result = {'success': 'True'}
return self.render_json_to_response(result)
else:
return HttpResponse('Cache cleared for "%s"!' % path)
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,876
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/utils/json.py
|
import json
from django.http import JsonResponse
from django.utils.functional import Promise
from django.utils.encoding import force_text
from django.core.serializers.json import DjangoJSONEncoder
class LazyEncoder(DjangoJSONEncoder):
def default(self, obj):
if isinstance(obj, Promise):
return force_text(obj)
return super(LazyEncoder, self).default(obj)
def render_json_to_response(context):
'''
Utility method for rendering a view's data to JSON response.
'''
result = json.dumps(context, sort_keys=False, indent=4, cls=LazyEncoder)
return JsonResponse(result, safe=False)
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,877
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/code/migrations/0003_auto_20161213_1755.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-12-13 17:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('code', '0002_auto_20160128_0100'),
]
operations = [
migrations.AddField(
model_name='code',
name='demo_site',
field=models.URLField(blank=True, verbose_name='Demo site'),
),
migrations.AddField(
model_name='code',
name='is_featured',
field=models.BooleanField(default=False, help_text='A selection of featured projects appears on the Code landing page', verbose_name='Featured repo'),
),
migrations.AlterField(
model_name='code',
name='url',
field=models.URLField(verbose_name='Repository URL'),
),
]
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,878
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/people/models.py
|
import requests
from datetime import datetime
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from django.db import models
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
from django.utils.html import format_html
from caching.base import CachingManager, CachingMixin
from sorl.thumbnail import ImageField
from source.base.utils import disable_for_loaddata
from source.utils.auth import get_or_create_user
from source.utils.caching import expire_page_cache
class LivePersonManager(CachingManager):
def get_queryset(self):
return super(LivePersonManager, self).get_queryset().filter(is_live=True)
class Person(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
is_live = models.BooleanField('Display on site', default=True)
show_in_lists = models.BooleanField('Show on People list page', default=True)
first_name = models.CharField(max_length=128)
last_name = models.CharField(max_length=128)
slug = models.SlugField(unique=True)
photo = ImageField(upload_to='img/uploads/person_photos', blank=True, null=True)
email = models.EmailField('Email address', blank=True)
twitter_username = models.CharField(max_length=32, blank=True)
twitter_bio = models.TextField(blank=True)
twitter_profile_image_url = models.URLField(blank=True)
github_username = models.CharField(max_length=32, blank=True)
github_repos_num = models.PositiveIntegerField(blank=True, null=True)
github_gists_num = models.PositiveIntegerField(blank=True, null=True)
description = models.TextField('Bio', blank=True)
organizations = models.ManyToManyField('Organization', blank=True)
objects = models.Manager()
live_objects = LivePersonManager()
class Meta:
ordering = ('last_name', 'first_name',)
verbose_name_plural = 'People'
def __str__(self):
return '%s %s' % (self.first_name, self.last_name)
def save(self, *args, **kwargs):
self.first_name = self.first_name.strip()
self.last_name = self.last_name.strip()
# clean up our username fields, just in case
if self.twitter_username:
self.twitter_username = self.twitter_username.strip()
if self.twitter_username.startswith('@'):
self.twitter_username = self.twitter_username.strip('@')
if '/' in self.twitter_username:
self.twitter_username = self.twitter_username.split('/')[-1]
if self.github_username:
self.github_username = self.github_username.strip()
if '/' in self.github_username:
self.github_username = self.github_username.split('/')[-1]
super(Person, self).save(*args, **kwargs)
def name(self):
return u'{0} {1}'.format(self.first_name, self.last_name).strip()
@models.permalink
def get_absolute_url(self):
return ('person_detail', (), {
'slug': self.slug })
@property
def sort_letter(self):
return self.last_name[:1]
def get_live_article_set(self):
return self.article_set.filter(is_live=True, show_in_lists=True, pubdate__lte=datetime.now())
def get_live_article_authored_set(self):
return self.article_authors.filter(is_live=True, show_in_lists=True, pubdate__lte=datetime.now())
def get_live_organization_set(self):
return self.organizations.filter(is_live=True)
def get_live_code_set(self):
return self.code_set.filter(is_live=True)
def get_website(self):
try:
return self.personlink_set.all()[0].url
except:
return None
def get_bio(self):
return self.description or self.twitter_bio or ''
def admin_image_tag(self):
if self.photo:
return format_html(
'<img src="{}{}" style="height: 30px;" />',
settings.MEDIA_URL,
self.photo,
)
return None
admin_image_tag.short_description = 'Photo'
def admin_email_tag(self):
if self.email:
return format_html(
'<a href="mailto:{}">{}</a>',
self.email,
self.email,
)
return None
admin_email_tag.short_description = 'Email'
def admin_twitter_tag(self):
if self.twitter_username:
return format_html(
'<a href="https://twitter.com/{}">@{}</a>',
self.twitter_username,
self.twitter_username,
)
return None
admin_twitter_tag.short_description = 'Twitter'
def admin_github_tag(self):
if self.github_username:
return format_html(
'<a href="https://github.com/{}">{}</a>',
self.github_username,
self.github_username,
)
return None
admin_github_tag.short_description = 'Github'
class PersonLink(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
person = models.ForeignKey(Person, on_delete=models.CASCADE)
name = models.CharField(max_length=128)
url = models.URLField()
objects = models.Manager()
class Meta:
ordering = ('person', 'name',)
verbose_name = 'Person Link'
verbose_name_plural = 'Person Links - The first item will be linked as "Visit Website" in author bios'
def __str__(self):
return '%s: %s' % (self.person.name, self.name)
class LiveOrganizationManager(CachingManager):
def get_queryset(self):
return super(LiveOrganizationManager, self).get_queryset().filter(is_live=True)
class Organization(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
is_live = models.BooleanField('Display on site', default=True)
show_in_lists = models.BooleanField('Show on Organization list page', default=True)
name = models.CharField(max_length=255)
slug = models.SlugField(unique=True)
email = models.EmailField('Email address', blank=True)
twitter_username = models.CharField(max_length=32, blank=True)
github_username = models.CharField(max_length=32, blank=True)
github_repos_num = models.PositiveIntegerField(blank=True, null=True)
github_gists_num = models.PositiveIntegerField(blank=True, null=True)
homepage = models.URLField(blank=True)
description = models.TextField(blank=True)
# Location
address = models.CharField(max_length=255, blank=True)
city = models.CharField(max_length=64, blank=True)
state = models.CharField(max_length=32, blank=True)
country = models.CharField(max_length=32, blank=True, help_text="Only necessary if outside the U.S.")
logo = ImageField(upload_to='img/uploads/org_logos', help_text="Resized to fit 300x200 box in template", blank=True, null=True)
objects = models.Manager()
live_objects = LiveOrganizationManager()
class Meta:
ordering = ('name',)
def __str__(self):
return '%s' % self.name
def save(self, *args, **kwargs):
# clean up our username fields, just in case
if self.twitter_username.startswith('@'):
self.twitter_username = self.twitter_username.strip('@')
if '/' in self.twitter_username:
self.twitter_username = self.twitter_username.split('/')[-1]
if '/' in self.github_username:
self.github_username = self.github_username.split('/')[-1]
super(Organization, self).save(*args, **kwargs)
@models.permalink
def get_absolute_url(self):
return ('organization_detail', (), {
'slug': self.slug })
@property
def location_string_for_static_map(self):
_locs = []
for _loc in [self.address, self.city, self.state, self.country]:
if _loc: _locs.append(_loc)
return ",".join(_locs).replace(' ','+')
@property
def location(self):
_locs = []
for _loc in [self.city, self.state, self.country]:
if _loc: _locs.append(_loc)
return ", ".join(_locs)
@property
def sort_letter(self):
return self.name.replace('The ', '')[:1]
def get_live_article_set(self):
return self.article_set.filter(is_live=True, show_in_lists=True, pubdate__lte=datetime.now())
def get_live_person_set(self):
return self.person_set.filter(is_live=True)
def get_live_code_set(self):
return self.code_set.filter(is_live=True)
def get_live_job_set(self):
return self.job_set.filter(is_live=True, listing_start_date__lte=datetime.today(), listing_end_date__gte=datetime.today())
def has_open_jobs(self):
return self.get_live_job_set().exists()
def admin_count(self):
return self.organizationadmin_set.count()
def admin_image_tag(self):
if self.logo:
return format_html(
'<img src="{}{}" style="height: 15px;" />',
settings.MEDIA_URL,
self.logo,
)
return None
admin_image_tag.short_description = 'Logo'
def admin_email_tag(self):
if self.email:
return format_html(
'<a href="mailto:{}">{}</a>',
self.email,
self.email,
)
return None
admin_email_tag.short_description = 'Email'
def admin_twitter_tag(self):
if self.twitter_username:
return format_html(
'<a href="https://twitter.com/{}">@{}</a>',
self.twitter_username,
self.twitter_username,
)
return None
admin_twitter_tag.short_description = 'Twitter'
def admin_github_tag(self):
if self.github_username:
return format_html(
'<a href="https://github.com/{}">{}</a>',
self.github_username,
self.github_username,
)
return None
admin_github_tag.short_description = 'Github'
class OrganizationAdmin(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
email = models.EmailField('Email address', unique=True)
objects = models.Manager()
class Meta:
ordering = ('organization', 'email',)
verbose_name = 'Organization Admin'
verbose_name_plural = 'Organization Admins - These email addresses will be able to log in and manage job postings for this organization'
def __str__(self):
return '%s: %s' % (self.organization.name, self.email)
def clean(self):
for field in self._meta.fields:
if isinstance(field, (models.CharField, models.TextField)):
setattr(self, field.name, getattr(self, field.name).strip())
class OrganizationLink(CachingMixin, models.Model):
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
organization = models.ForeignKey(Organization, on_delete=models.CASCADE)
name = models.CharField(max_length=128)
url = models.URLField()
objects = models.Manager()
class Meta:
ordering = ('organization', 'name',)
verbose_name = 'Organization Link'
def __str__(self):
return '%s: %s' % (self.organization.name, self.name)
@receiver(post_save, sender=Person)
@disable_for_loaddata
def clear_caches_for_person(sender, instance, **kwargs):
# clear cache for person detail page
expire_page_cache(instance.get_absolute_url())
# clear cache for person list page
expire_page_cache(reverse('person_list'))
# clear cache for community page
expire_page_cache(reverse('community_list'))
# clear caches for related articles
for article in instance.get_live_article_set():
expire_page_cache(article.get_absolute_url())
expire_page_cache(reverse('article_list'))
#if article.section.slug:
# expire_page_cache(reverse(
# 'article_list_by_section',
# kwargs = { 'section': article.section.slug }
# ))
#if article.category:
# expire_page_cache(reverse(
# 'article_list_by_category',
# kwargs = { 'category': article.category.slug }
# ))
for article in instance.get_live_article_authored_set():
expire_page_cache(article.get_absolute_url())
expire_page_cache(reverse('article_list'))
#if article.section.slug:
# expire_page_cache(reverse(
# 'article_list_by_section',
# kwargs = { 'section': article.section.slug }
# ))
#if article.category:
# expire_page_cache(reverse(
# 'article_list_by_category',
# kwargs = { 'category': article.category.slug }
# ))
# clear caches for related organizations
for organization in instance.get_live_organization_set():
expire_page_cache(organization.get_absolute_url())
# clear caches for related code index entries
for code in instance.get_live_code_set():
expire_page_cache(code.get_absolute_url())
@receiver(post_save, sender=Organization)
@disable_for_loaddata
def clear_caches_for_organization(sender, instance, **kwargs):
# clear cache for organization detail page
expire_page_cache(instance.get_absolute_url())
# clear cache for organization list page
expire_page_cache(reverse('organization_list'))
# clear cache for community page
expire_page_cache(reverse('community_list'))
# clear caches for related articles
for article in instance.get_live_article_set():
expire_page_cache(article.get_absolute_url())
expire_page_cache(reverse('article_list'))
#if article.section.slug:
# expire_page_cache(reverse(
# 'article_list_by_section',
# kwargs = { 'section': article.section.slug }
# ))
#if article.category:
# expire_page_cache(reverse(
# 'article_list_by_category',
# kwargs = { 'category': article.category.slug }
# ))
# clear caches for related people
for person in instance.get_live_person_set():
expire_page_cache(person.get_absolute_url())
# clear caches for related code index entries
for code in instance.get_live_code_set():
expire_page_cache(code.get_absolute_url())
@receiver(post_save, sender=OrganizationAdmin)
@disable_for_loaddata
def update_org_admin_user(sender, instance, **kwargs):
# make sure there's a User record associated with each OrganizationAdmin
get_or_create_user(instance.email)
@receiver(post_delete, sender=OrganizationAdmin)
def delete_org_admin_user(sender, instance, **kwargs):
# make sure we don't have orphan User records when
# an OrganizationAdmin gets deleted
try:
admin = User.objects.get(username__iexact=instance.email)
admin.delete()
except:
pass
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
34,879
|
OpenNews/opennews-source
|
refs/heads/master
|
/source/code/migrations/0005_code_grouping.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2017-01-19 22:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('code', '0004_auto_20170117_1904'),
]
operations = [
migrations.AddField(
model_name='code',
name='grouping',
field=models.CharField(blank=True, max_length=64),
),
]
|
{"/source/utils/caching.py": ["/source/utils/json.py"], "/source/people/models.py": ["/source/base/utils.py", "/source/utils/auth.py", "/source/utils/caching.py"], "/source/code/urls.py": ["/source/code/views.py"], "/source/people/management/commands/migrate_org_admins.py": ["/source/people/models.py"], "/config/settings/production.py": ["/config/settings/common.py"], "/source/articles/urls.py": ["/source/articles/views.py"], "/config/settings/local.py": ["/config/settings/common.py"], "/source/people/urls/community.py": ["/source/people/views.py"], "/source/base/views.py": ["/source/articles/views.py", "/source/people/models.py", "/source/utils/json.py"], "/source/jobs/urls.py": ["/source/jobs/views.py"], "/source/people/management/commands/export_people_data.py": ["/source/people/models.py"], "/source/people/views.py": ["/source/people/models.py", "/source/utils/json.py"], "/config/urls.py": ["/source/base/views.py"], "/source/articles/views.py": ["/source/articles/forms.py"], "/source/jobs/views.py": ["/source/base/helpers.py", "/source/people/models.py", "/source/utils/caching.py", "/source/utils/json.py"], "/source/guides/views.py": ["/source/guides/forms.py"], "/source/code/views.py": ["/source/code/forms.py"], "/source/base/urls.py": ["/source/base/views.py", "/source/articles/views.py", "/source/utils/caching.py"], "/source/people/admin.py": ["/source/people/models.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.