repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
Powercoders-International/ft-web-dev | refs/heads/main | from shop.models import Article
from shop.serializers import ArticleSerializer
from rest_framework.viewsets import ModelViewSet
class ArticleViewSet(ModelViewSet):
queryset = Article.objects.all()
serializer_class = ArticleSerializer
| Python | 8 | 29.5 | 48 | /05-django/solutions/exercise-4-restframework/shop/views.py | 0.819672 | 0.819672 |
Powercoders-International/ft-web-dev | refs/heads/main | from shop.models import Article
from rest_framework.serializers import HyperlinkedModelSerializer
class ArticleSerializer(HyperlinkedModelSerializer):
class Meta:
model = Article
fields = ['id', 'name', 'category']
read_only_fields = ['id']
| Python | 10 | 26.200001 | 65 | /05-django/solutions/exercise-5-filters/shop/serializers.py | 0.705882 | 0.705882 |
Powercoders-International/ft-web-dev | refs/heads/main | from django_filters import FilterSet
from shop.models import Article
class ArticleFilter(FilterSet):
class Meta:
model = Article
fields = ['category']
| Python | 9 | 18.333334 | 36 | /05-django/solutions/exercise-5-filters/shop/filters.py | 0.695402 | 0.695402 |
Powercoders-International/ft-web-dev | refs/heads/main | from django.urls import path
from shop.views import view_article
from shop.views import view_articles
urlpatterns = [
path('articles/', view_articles),
path('articles/<int:id>/', view_article),
]
| Python | 9 | 21.888889 | 45 | /05-django/solutions/exercise-2-static/shop/urls.py | 0.718447 | 0.718447 |
Powercoders-International/ft-web-dev | refs/heads/main | from json import loads
from django.http import JsonResponse
from django.http import HttpResponseNotAllowed
from django.http import HttpResponseNotFound
from shop.models import Article
def view_articles(request):
""" Handles GET and POST requests for a collection of articles.
curl --include \
http://localhost:8000/shop/articles/
curl --include \
--request POST \
--header "Content-Type: application/json" \
--data '{"name":"test"}' \
http://localhost:8000/shop/articles/
"""
if request.method == 'GET':
articles = []
for article in Article.objects.all():
articles.append({
'id': article.id,
'name': article.name
})
articles = Article.objects.all()
return JsonResponse({'articles': articles})
if request.method == 'POST':
data = loads(request.body)
name = data.get('name')
article = Article.objects.create(name=name)
return JsonResponse({
'id': article.id,
'name': article.name
})
return HttpResponseNotAllowed(['GET', 'POST'])
def view_article(request, id):
""" Handles GET, PATCH and DELETE requests for a single article.
curl --include \
http://localhost:8000/shop/articles/1/
curl --include \
--request PATCH \
--header "Content-Type: application/json" \
--data '{"name":"foo"}' \
http://localhost:8000/shop/articles/1/
curl --include \
--request DELETE \
http://localhost:8000/shop/articles/1/
"""
article = Article.objects.filter(id=id).first()
if not article:
return HttpResponseNotFound()
if request.method == 'GET':
return JsonResponse({
'id': article.id,
'name': article.name
})
if request.method == 'PATCH':
data = loads(request.body)
name = data.get('name')
article.name = name
article.save()
return JsonResponse({
'id': article.id,
'name': article.name
})
if request.method == 'DELETE':
article.delete()
return JsonResponse({'id': id})
return HttpResponseNotAllowed(['GET', 'PATCH', 'DELETE'])
| Python | 85 | 25.952942 | 68 | /05-django/solutions/exercise-3-models/shop/views.py | 0.571803 | 0.561763 |
Powercoders-International/ft-web-dev | refs/heads/main | from django.contrib.admin import ModelAdmin, register
from shop.models import Article
@register(Article)
class ArticelAdmin(ModelAdmin):
pass
| Python | 7 | 20.142857 | 53 | /05-django/solutions/exercise-3-models/shop/admin.py | 0.804054 | 0.804054 |
da-mob/capgfirstjenkin | refs/heads/master | def add(x, y):
"""Add function"""
return x+y
def subtract(x,y):
"""Subtract function"""
return x-y
def multiply(x,y):
"""Multiply function"""
return x*y
| Python | 9 | 18.666666 | 27 | /Calci.py | 0.570621 | 0.570621 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | import datetime
from django.contrib.auth.models import User
from django.contrib.auth.models import AbstractUser
from django.db import models
from django.utils.translation import gettext_lazy as _
# from ProjektPortfolioLab.donation import settings
from django.conf import settings
User = settings.AUTH_USER_MODEL
class Category(models.Model):
name = models.CharField(max_length=64)
def __str__(self):
return self.name
INSTITUTIONS = (
('1', "Fundacja"),
('2', "Organizacja pozarządowa"),
('3', "Zbiórka lokalna"),
)
class Institution(models.Model):
istitution_name = models.CharField(max_length=128)
description = models.TextField()
type = models.CharField(max_length=2, choices=INSTITUTIONS, default='1')
categories = models.ManyToManyField(Category)
def __str__(self):
return self.istitution_name
class Donation(models.Model):
quantity = models.IntegerField()
categories = models.ManyToManyField(Category)
institution = models.ForeignKey(Institution, on_delete=models.CASCADE)
address = models.TextField()
phone_number = models.CharField(max_length=12)
city = models.CharField(max_length=64)
zip_code = models.TextField()
pick_up_date = models.DateField()
pick_up_time = models.TimeField(default=datetime.time)
pick_up_comment = models.TextField()
user = models.ForeignKey(User, on_delete=models.CASCADE)
#
# class CustomUser(AbstractUser):
# email = models.EmailField(_('email address'), unique=True)
| Python | 51 | 28.803921 | 76 | /charitydonation/models.py | 0.719264 | 0.710059 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | from django.contrib import admin
from .models import Category, Institution, Donation
admin.site.register(Category)
admin.site.register(Institution)
admin.site.register(Donation) | Python | 6 | 28.833334 | 51 | /charitydonation/admin.py | 0.837079 | 0.837079 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | """donation URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from charitydonation.views import LandingPage, AddDonation, UserView, PasswordChangeView, PasswordChangeDoneView, DonationReady
from accounts.views import RegisterView, LoginView, LogoutView
urlpatterns = [
path('admin/', admin.site.urls),
path('', LandingPage.as_view(), name='landing-page'),
path('add_donation/', AddDonation.as_view(), name='add-donation'),
path('login/', LoginView.as_view(), name='login'),
path('register/', RegisterView.as_view(), name='register'),
path('logout/', LogoutView.as_view(), name='logout'),
path('user_view/', UserView.as_view(), name='user-view'),
path('password_change/', PasswordChangeView.as_view(), name='user-change'),
path('password_change/done/', PasswordChangeDoneView.as_view(), name='user-change-done'),
path('add_donation/form-confirmation/', DonationReady.as_view(), name='form-ready'),
]
| Python | 35 | 44 | 127 | /donation/urls.py | 0.707937 | 0.702857 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from .models import Donation
from django import forms
from django.contrib.auth.forms import UserCreationForm, AuthenticationForm
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.contrib.auth import get_user_model
# class CreateUserForm(UserCreationForm):
# class Meta:
# model = get_user_model()
# fields = ('email', 'username', 'password1', 'password2')
class AddDonationForm(forms.Form):
class Meta:
model = Donation
fields = ('quantity', 'categories', 'institution', 'address', 'phone_number',
'city', 'zip_code', 'pick_up_date', 'pick_up_time', 'pick_up_comment', 'user')
| Python | 20 | 37.450001 | 96 | /charitydonation/forms.py | 0.704811 | 0.704811 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | # Generated by Django 3.1 on 2021-09-13 16:42
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charitydonation', '0002_auto_20210909_1554'),
]
operations = [
migrations.AddField(
model_name='donation',
name='pick_up_time',
field=models.TimeField(default=datetime.time),
),
migrations.AlterField(
model_name='donation',
name='pick_up_date',
field=models.DateField(),
),
]
| Python | 24 | 22.75 | 58 | /charitydonation/migrations/0003_auto_20210913_1642.py | 0.578947 | 0.526316 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | from django.shortcuts import render
from django.views import View, generic
from django.contrib.auth import views
# from .forms import RegisterForm
from django.shortcuts import render
from django.views import View, generic
# from charitydonation.models import Donation, Institution
from .forms import CreateUserForm, LoginForm, CustomUserCreationForm
from django.contrib.auth import login, logout, authenticate, views
from django.shortcuts import redirect
from django.urls import reverse_lazy
class LoginView(View):
def get(self, request):
form = LoginForm()
return render(request, 'login.html', {'form': form})
def post(self, request, *args, **kwargs):
form = LoginForm(request.POST)
if form.is_valid():
user = authenticate(email=form.cleaned_data['email'], password=form.cleaned_data['password'])
# breakpoint()
if user is not None:
login(request, user)
return redirect('landing-page')
else:
return render(request, 'login.html', {'form': form})
else:
return render(request, 'login.html', {'form': form})
class RegisterView(View):
def get(self, request):
form = CustomUserCreationForm()
return render(request, 'register.html', {'form': form})
def post(self, request):
form = CustomUserCreationForm(request.POST)
if form.is_valid():
form.save()
# instance = form.save(commit=False)
# instance.set_password(instance.password)
# # form.clean_password2()
# instance.save()
# # email = form.cleaned_data['email']
# raw_password = form.cleaned_data['password']
# user = authenticate(email=email, password=raw_password)
# user.save()
# login(request, user)
return redirect('landing-page')
return render(request, 'register.html', {'form': form})
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('landing-page')
| Python | 59 | 34.576271 | 105 | /accounts/views.py | 0.627918 | 0.627442 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | from django.shortcuts import render
from django.views import View, generic
from .models import Donation, Institution, Category
from .forms import AddDonationForm
from django.contrib.auth import login, logout, authenticate, views
from django.shortcuts import redirect
from django.urls import reverse_lazy
from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin
from django.views.generic.edit import CreateView
from django.db.models import Avg, Count
from django.core.paginator import Paginator
from django.contrib.auth.views import PasswordChangeView, PasswordChangeDoneView
from django.http import HttpResponse
from django.db.models import Q, Sum
class LandingPage(View):
def get(self, request):
count_bags = Donation.objects.all()
count_b = count_bags.aggregate(Sum('quantity'))['quantity__sum']
count_institutions = Donation.objects.distinct("institution").count()
#
all_institution_fund = Institution.objects.filter(type='1')
all_institution_org = Institution.objects.filter(type='2')
all_institution_lok = Institution.objects.filter(type='3')
return render(request, 'index.html', {'count_b': count_b, 'count_institutions': count_institutions,
'all_institution_fund': all_institution_fund,
'all_institution_org': all_institution_org,
'all_institution_lok': all_institution_lok}
)
class AddDonation(LoginRequiredMixin, View):
login_url = '/'
# raise_exception = True
def get(self, request):
categories_all = Category.objects.all()
institutions_all = Institution.objects.all()
form = AddDonationForm()
# redirect_field_name = 'landing-page'
return render(request, 'form.html',
{'categories_all': categories_all,
'institutions_all': institutions_all, 'form': form})
def post(self, request):
form = AddDonationForm(request.POST)
if form.is_valid():
# categories_all = Category.objects.all()
categories = form.cleaned_data['categories']
# institutions_all = Institution.objects.all()
quantity = form.cleaned_data['bags']
# category_id = request.POST.get('category.id')
# catogeria = Institution.objects.filter(id=category_id)
institution = form.cleaned_data['organization']
# if request.POST.get(
# catego = Category.objects.get(id=category_id)
address = form.cleaned_data['address']
city = form.cleaned_data['city']
zip_code = form.cleaned_data['postcode']
phone_number = form.cleaned_data['phone']
pick_up_date = form.cleaned_data['data']
pick_up_time = form.cleaned_data['time']
pick_up_comment = form.cleaned_data['more_info']
user = request.user
donat = Donation.objects.create(
quantity=quantity, categories=categories, institution=institution,
address=address, phone_number=phone_number, city=city, zip_code=zip_code,
pick_up_date=pick_up_date, pick_up_comment=pick_up_comment, pick_up_time=pick_up_time,
user=user)
donat.save()
# redirect_field_name = 'landing-page'
return render(request, 'form-confirmation.html', {'form': form})
return render(request, 'form.html', {'form': form})
# return HttpResponse("Źle")
# class LoginView(views.LoginView):
# form_class = LoginForm
# template_name = 'login.html'
#
#
# class RegisterView(generic.CreateView):
# form_class = CreateUserForm
# template_name = 'register.html'
# success_url = reverse_lazy('login')
class UserView(LoginRequiredMixin, View):
login_url = '/'
def get(self, request):
donation_user = Donation.objects.filter(user=request.user)
return render(request, 'user-view.html', {'donation_user': donation_user})
class PasswordChangeView(PasswordChangeView):
template_name = 'change-password.html'
success_url = 'done/'
class PasswordChangeDoneView(PasswordChangeDoneView):
template_name = 'change-password-done.html'
class DonationReady(View):
def get(self, request):
return render(request, 'form-confirmation.html')
| Python | 113 | 38.486725 | 107 | /charitydonation/views.py | 0.641416 | 0.640744 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | # Generated by Django 3.1 on 2021-09-09 15:54
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('charitydonation', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='donation',
name='pick_up_time',
),
migrations.AlterField(
model_name='donation',
name='pick_up_date',
field=models.DateTimeField(verbose_name=datetime.datetime),
),
migrations.AlterField(
model_name='institution',
name='type',
field=models.CharField(choices=[('1', 'Fundacja'), ('2', 'Organizacja pozarządowa'), ('3', 'Zbiórka lokalna')], default='1', max_length=2),
),
]
| Python | 28 | 27.357143 | 151 | /charitydonation/migrations/0002_auto_20210909_1554.py | 0.574307 | 0.54534 |
krzysztof-dudzic/ProjektPortfolioLab | refs/heads/main | from django.db import models
from django.contrib.auth.models import (
BaseUserManager, AbstractBaseUser, UserManager
)
#
# class UserManager(BaseUserManager):
# def create_user(self, email, password=None):
# """
# Creates and saves a User with the given email and password.
# """
# if not email:
# raise ValueError('Users must have an email address')
#
# if not password:
# raise ValueError("Users must have a password!!! ")
# user = self.model(
# email=self.normalize_email(email),
# )
#
# user.set_password(password)
# user.staff = is_staff
# user.admin = is_admin
# user.active = is_active
# # user.save(using=self._db)
# return user
#
# def create_staffuser(self, email, password):
# """
# Creates and saves a staff user with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# # user.save(using=self._db)
# return user
#
# def create_superuser(self, email, password):
# """
# Creates and saves a superuser with the given email and password.
# """
# user = self.create_user(
# email,
# password=password,
# )
# user.staff = True
# user.admin = True
# # user.save(using=self._db)
# return user
#
# class User(AbstractBaseUser):
# email = models.EmailField(
# verbose_name='email address',
# max_length=255,
# unique=True,
# )
# # full_name = models.CharField(max_length=255, blank=True, null=True)
# is_active = models.BooleanField(default=True)
# staff = models.BooleanField(default=False) # a admin user; non super-user
# admin = models.BooleanField(default=False) # a superuser
# timestamp = models.DateTimeField(auto_now_add=True)
# # notice the absence of a "Password field", that is built in.
#
# USERNAME_FIELD = 'email'
# REQUIRED_FIELDS = [] # Email & Password are required by default.
# objects = UserManager()
#
# def get_full_name(self):
# # The user is identified by their email address
# return self.email
#
# def get_short_name(self):
# # The user is identified by their email address
# return self.email
#
# def __str__(self):
# return self.email
#
# def has_perm(self, perm, obj=None):
# "Does the user have a specific permission?"
# # Simplest possible answer: Yes, always
# return True
#
# def has_module_perms(self, app_label):
# "Does the user have permissions to view the app `app_label`?"
# # Simplest possible answer: Yes, always
# return True
#
# @property
# def is_staff(self):
# "Is the user a member of staff?"
# return self.staff
#
# @property
# def is_active(self):
# "Is the user a admin member?"
# return self.active
#
# @property
# def is_admin(self):
# "Is the user a admin member?"
# return self.admin
#
#
#
#
#
# class GuestEmail(models.Model):
# email = models.EmailField()
# active = models.BooleanField(default=True)
# update = models.DateTimeField(auto_now=True)
# timestamp = models.DateTimeField(auto_now_add=True)
#
# def __str__(self):
# return self.email
from django.db import models
from django.contrib.auth.models import AbstractUser
from django.utils.translation import gettext_lazy as _
class UserManager(BaseUserManager):
def create_user(self, email, password=None, **extra_fields):
if not email:
raise ValueError('Users must have an email address')
if not password:
raise ValueError("Users must have a password!!! ")
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
extra_fields.setdefault('is_active', True)
email = self.normalize_email(email)
user = self.model(email=email, **extra_fields)
user.set_password(password)
user.save()
return user
def create_superuser(self, email, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
extra_fields.setdefault('is_active', True)
if extra_fields.get('is_staff') is not True:
raise ValueError(_('Superuser must have is_staff=True.'))
if extra_fields.get('is_superuser') is not True:
raise ValueError(_('Superuser must have is_superuser=True.'))
return self.create_user(email, password, **extra_fields)
class CustomUser(AbstractUser):
username = None
email = models.EmailField(_('email address'), max_length=255, unique=True)
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = []
objects = UserManager()
def __str__(self):
return self.email
| Python | 163 | 29.969326 | 79 | /accounts/models.py | 0.598851 | 0.597068 |
jjcaudill/apartment_hunter | refs/heads/master | from requests import post as post_request
from getopt import getopt
from sys import maxsize, argv
from os import environ
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
# TODO: Property values are hardcoded, possibly we can accept them
APARTMENT_LIST_REQUEST = {
'operationName': 'propertyFloorplansSummary',
'query': 'query propertyFloorplansSummary($propertyId: ID!, $amliPropertyId: ID!, $moveInDate: String) {\n propertyFloorplansSummary(propertyId: $propertyId, amliPropertyId: $amliPropertyId, moveInDate: $moveInDate) {\nfloorplanName\nbathroomMin\nbedroomMin\npriceMin\npriceMax\nsqftMin\navailableUnitCount\nfloorplanId}\n}\n',
'variables': {
'amliPropertyId': 89220,
# We will insert moveInDate. ex: 'moveInDate': '2020-02-26'
'propertyId': 'XK-AoxAAACIA_JnR'
}
}
FLOORPLAN_ID_REQUEST = {
'operationName': 'floorplan',
'query': 'query floorplan($id: ID, $amliId: ID) {floorplan(id: $id, amliId: $amliId) {cms}}',
'variables': {
# We will insert ID. ex: 'amliId': '1752'
}
}
# TODO: Be able to change the moveInDate and floorplanId
FLOORPLAN_DETAILS_REQUEST = {
'operationName': 'units',
'query': 'query units($propertyId: ID!, $floorplanId: ID, $amliFloorplanId: ID, $moveInDate: String, $pets: String) {\n units(propertyId: $propertyId, floorplanId: $floorplanId, amliFloorplanId: $amliFloorplanId, moveInDate: $moveInDate, pets: $pets) {\nfloor\npets\nunitNumber\nrpAvailableDate\nrent\nsqftMin\n}\n}\n',
'variables': {
# We will insert amliFloorplanId. ex: 'amliFloorplanId': '1752'
# We will insert floorplanId. ex: 'floorplanId': 'XMwgnSwAADgA00ur'
# We will insert moveInDate. ex: 'moveInDate': '2020-02-29'
'pets': 'Dogs',
'propertyId': 89220
}
}
GRAPHQL_ENDPOINT = 'https://www.amli.com/graphql'
RECIPIENT_EMAILS = ['jjc2011@gmail.com', 'maerth@gmail.com']
# TODO: Way to interact with database to see history and how things have changed
# TODO: Add option to insert email and specify the apartment structure
def usage():
print(
'Script to find availibility of AMLI apartments:\n'
'Parameters:\n'
'\t--move_in_date or -m: Specify a move in date. Required!\n'
'\t--floorplans or -f: Specify a comma delimited list of floorplans\n'
'\t--price_max or -p: Specify maximum price you are willing to pay\n'
'\t--sqft_min or -s: Specify minimum square footage required\n'
'\t--bedrooms_min: Specify minimum number of bedrooms required\n'
'\t--bathrooms_min: Specify minimum number of bathrooms required\n'
)
return 1
def generate_html_font(text, size):
return '<font size="{}" face="verdana">{}</font>'.format(size, text)
def generate_html(apartment_map):
available_apartments = 0
available_floorplans = 0
html_content = ''
for floorplan, apartments in apartment_map.items():
if apartments:
available_floorplans += 1
floorplan_details = generate_html_font('Floorplan {}: {} sqft'.format(floorplan.name, floorplan.square_footage), 4)
floorplan_img = '<img src="{}" alt="Floorplan {}">'.format(floorplan.img_url, floorplan.name)
html_content += '<li>{}{}<ul>'.format(floorplan_details, floorplan_img)
for apartment in apartments:
available_apartments += 1
apartment_info = 'Unit {}: Floor {}, Price ${}, Pets {}, Date Available {}'.format(apartment.unit, apartment.floor, apartment.rent, apartment.pets, apartment.date_available)
html_content += '<li>{}</li>'.format(generate_html_font(apartment_info, 2))
html_content += '</ul></li>'
html_found = 'Found {} apartments for {} different floorplans!{}'.format(available_apartments, available_floorplans, html_content)
results = '<body><ul>{}</body></ul>'.format(generate_html_font(html_found, 5))
return results, available_apartments
# TODO: insert into database and use that to diff.
def email_results(apartment_map):
print('Sending email!')
from_email = environ.get('SENDGRID_USERNAME')
api_key = environ.get('SENDGRID_API_KEY')
html_content, available_apartments = generate_html(apartment_map)
for email in RECIPIENT_EMAILS:
message = Mail(
from_email=from_email,
to_emails=email,
subject='Found {} available apartments!'.format(available_apartments),
html_content=html_content)
try:
sg = SendGridAPIClient(api_key)
response = sg.send(message)
except Exception as e:
print(str(e))
def fetch_all_floorplans(move_in_date):
body = APARTMENT_LIST_REQUEST
body['variables']['moveInDate'] = move_in_date
response = post_request(GRAPHQL_ENDPOINT, json=body, headers={'Content-Type':'application/json'})
if response.status_code != 200:
raise Exception('Failed to grab floorplans')
# Return a list of floorplan data
"""
[
{
"floorplanName": "A3",
"bathroomMin": 1,
"bedroomMin": 1,
"priceMax": 1896,
"sqftMin": 742,
"availableUnitCount": 1,
"floorplanId": "1752"
},
...
]
"""
return response.json()['data']['propertyFloorplansSummary']
def fetch_floorplan_details(id):
body = FLOORPLAN_ID_REQUEST
body['variables']['amliId'] = id
response = post_request(GRAPHQL_ENDPOINT, json=body, headers={'Content-Type':'application/json'})
if response.status_code != 200:
raise Exception('Failed to grab floorplan details')
"""
Return details of floorplan
{
"data": {
"main_image": {
"url": "https://images.prismic.io/amli-website/b3758197-4bf2-4e38-85ab-f11da2041306_austin_aldrich_A3+update.jpg?auto=compress,format&rect=0,0,650,490&w=650&h=490",
...
},
...
},
"id": "XMwgnSwAADgA00ur",
...
}
"""
return response.json()['data']['floorplan']['cms']
def fetch_apartments(floorplan, move_in_date):
body = FLOORPLAN_DETAILS_REQUEST
body['variables']['amliFloorplanId'] = floorplan.number_id
body['variables']['floorplanId'] = floorplan.weird_id
body['variables']['moveInDate'] = move_in_date
response = post_request(GRAPHQL_ENDPOINT, json=body, headers={'Content-Type':'application/json'})
if response.status_code != 200:
raise Exception('Failed to grab apartments')
"""
Return a list of apartment data
[
{
"floor": 1,
"pets": "Cats",
"unitNumber": "150",
"rpAvailableDate": "2020-02-29",
"rent": 1896
},
...
]
"""
return response.json()['data']['units']
class Floorplan:
"""Holds data specific to floorplan"""
def __init__(self, data):
self.bathrooms = data['bathroomMin']
self.bedrooms = data['bedroomMin']
self.max_rent = data['priceMax']
self.name = data['floorplanName']
self.number_id = data['floorplanId']
self.square_footage = data['sqftMin']
def fetch_details(self):
"""For some reason they have two ids and both are needed on fetching"""
cms = fetch_floorplan_details(self.number_id)
self.img_url = cms['data']['main_image']['url']
self.weird_id = cms['id']
class Apartment:
"""Holds data specific to apartment"""
def __init__(self, data, floorplan):
self.date_available = data['rpAvailableDate']
self.floor = data['floor']
self.floorplan = floorplan
self.pets = data['pets']
self.rent = data['rent']
self.unit = data['unitNumber']
def main():
opts, args = getopt(argv[1:], 'hs:p:f:m:', ['help', 'bathrooms_min=', 'bedrooms_min=', 'sqft_min=', 'price_max=', 'floorplans=', 'moveInDate='])
specified_floorplans = []
sqft_min = bedrooms_min = bathrooms_min = 0
price_max = maxsize
move_in_date = ''
for opt, val in opts:
if opt in ('-h', '--help'):
return usage()
elif opt == '--bathrooms_min':
bathrooms_min = int(val)
elif opt == '--bedrooms_min':
bedrooms_min = int(val)
elif opt in ('-s', '--sqft_min'):
sqft_min = int(val)
elif opt in ('-p', '--price_max'):
price_max = int(val)
elif opt in ('-f', '--floorplans'):
specified_floorplans = val.split(',')
elif opt in ('-m', '--move_in_date'):
move_in_date = val
if not move_in_date:
return usage()
floorplans = []
apartment_map = {} # Floorplan to list of Apartments
print('Grabbing floorplans!')
floorplan_data = fetch_all_floorplans(move_in_date)
print('Fetched floorplans!')
# Convert data into Floorplans and add if matches filters
for data in floorplan_data:
if data['availableUnitCount'] == 0:
continue
floorplan = Floorplan(data)
if floorplan.bathrooms < bathrooms_min:
continue
if floorplan.bedrooms < bedrooms_min:
continue
if floorplan.square_footage < sqft_min:
continue
if floorplan.max_rent > price_max:
continue
if specified_floorplans and floorplan.name not in specified_floorplans:
continue
floorplan.fetch_details()
floorplans.append(floorplan)
print('Parsed floorplans!')
# Ok, now we have a list of all desired floorplans meeting our requirements. Time to get the apartments!
for floorplan in floorplans:
data_for_apartments = fetch_apartments(floorplan, move_in_date)
apartments = []
for data in data_for_apartments:
apartment = Apartment(data, floorplan)
if apartment.rent > price_max:
continue
apartments.append(apartment)
if apartments:
apartment_map[floorplan] = apartments
print('Parsed apartments!')
# Now that we have the apartment data, lets email it to ourselves.
email_results(apartment_map)
return 0
if __name__ == '__main__':
main()
| Python | 272 | 34.547794 | 330 | /scripts/python/amli_fetch.py | 0.654463 | 0.641225 |
Sprunth/TFO2ReelLogger | refs/heads/master | import os.path
import sqlite3
import Scraper
import sys
def create_db():
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute('''CREATE TABLE reellog
(lure text, body text, location text, species text, level integer, weight real, class text,
unique(lure, body, location, species, level, weight, class))''')
conn.commit()
conn.close()
def sample_db_entry():
scrape_data = "'Culprit Worm', 'Amazon River', 'Baia de Santa Rosa', 'Matrincha', '6', '0.062', 'Wimpy III'"
command = "INSERT INTO reellog VALUES (%s)" % scrape_data
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute(command)
conn.commit()
conn.close()
def parse_and_store(html_file_path):
conn = sqlite3.connect('reellog.db')
c = conn.cursor()
c.execute("SELECT COUNT(*) from reellog")
(old_entry_count, ) = c.fetchone()
to_write = Scraper.scrape(html_file_path)
for row in to_write:
command = "INSERT INTO reellog VALUES (%s)" % row
try:
c.execute(command)
print('+ %s' % row)
except sqlite3.IntegrityError:
print('= %s' % row)
conn.commit()
c.execute("SELECT COUNT(*) from reellog")
(new_entry_count,) = c.fetchone()
conn.close()
print("%i new entries added" % (int(new_entry_count) - int(old_entry_count)))
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Need one argument: path to html_file", file=sys.stderr)
sys.exit(1)
if not os.path.isfile('reellog.db'):
print('No reellog.db found, creating')
create_db()
parse_and_store(sys.argv[1])
# sample_db_entry()
print('Done')
| Python | 71 | 23.02817 | 112 | /db.py | 0.600234 | 0.592614 |
Sprunth/TFO2ReelLogger | refs/heads/master | from bs4 import BeautifulSoup
from pprint import pprint
from functools import reduce
import sys
def scrape(html_file_path):
soup = BeautifulSoup(open(html_file_path), 'html.parser')
rows = soup.find_all('tr')
commands = list()
for row in rows[1:]:
cols = row.find_all('td')
lure_string = list(cols[0].descendants)[0]
lure = lure_string.text
body_of_water = cols[1].string
location = cols[2].string
fish_string = cols[3]
fish_type = fish_string.font.string
fish_level = fish_string.find('font').text
size_strings = list(map(lambda x: x.string, cols[4].find_all('font')))
weight_idx = -1
for idx in range(len(size_strings)):
if 'lb' in size_strings[idx]:
weight_idx = idx
break
weight = size_strings[weight_idx].split()[0]
fish_class = reduce(lambda x, y: "%s %s" % (x, y), size_strings[weight_idx+1:])
if 'L e g e n d a r y' in fish_class:
fish_class = 'Legendary'
elif 'B R U I S E R' in fish_class:
fish_class = 'Bruiser'
# size not stored for now
# size = reduce(lambda x, y: "%s %s" % (x, y), size_strings[:-3])
command = "'%s', '%s', '%s', '%s', '%s', '%s', '%s'" % (lure, body_of_water, location, fish_type, fish_level,
weight, fish_class)
commands.append(command)
return commands
if __name__ == '__main__':
if len(sys.argv) != 2:
print("Need one argument: path to html_file", file=sys.stderr)
sys.exit(1)
scrape_data = scrape(sys.argv[1])
pprint(scrape_data)
| Python | 59 | 28.067797 | 117 | /Scraper.py | 0.538192 | 0.529446 |
FabianeTelles/REP_PYTHON | refs/heads/master | n = input('Digite algo: ')
print(n .isnumeric())
| Python | 2 | 23.5 | 26 | /teste9.py | 0.607843 | 0.607843 |
FabianeTelles/REP_PYTHON | refs/heads/master | Nome = input('Digite seu nome:')
print('Seja bem vindo,',format(Nome))
| Python | 2 | 34.5 | 37 | /ex005.py | 0.418803 | 0.418803 |
FabianeTelles/REP_PYTHON | refs/heads/master | n1 = input('Digite um valor: ')
print(type (n1))
| Python | 2 | 23.5 | 31 | /teste1.py | 0.607843 | 0.568627 |
FabianeTelles/REP_PYTHON | refs/heads/master | Numero1 = int (input ('Primeiro numero = ' ))
Numero2 = int (input ('Segundo numero = '))
print (Numero1 + Numero2)
| Python | 3 | 37.666668 | 45 | /Desafio 3.py | 0.490323 | 0.464516 |
FabianeTelles/REP_PYTHON | refs/heads/master | n = input('Digite algo:')
print ('Esse número é', (type (n)))
print ('Ele é númerico ? ', n.isnumeric ())
print (' Ele é alfabético? ', n.isalpha ())
print (' Ele é um decimal? ', n.isdecimal ())
print (' Ele é minúsculo? ', n .islower ())
print ('Ele é maiúsculo?', n.isupper ())
print ('Ele é um dígito?', n.isdigit ())
print ('Verificação concluída') | Python | 9 | 38.333332 | 45 | /teste11.py | 0.612188 | 0.612188 |
canSAS-org/NXcanSAS_examples | refs/heads/master | #!/usr/bin/env python
'''
Create HDF5 example files in the NXcanSAS format
These examples are based on the examples described at the canSAS2012 meeting.
:see: http://cansas-org.github.io/canSAS2012/examples.html
'''
import datetime
import h5py
import inspect
import numpy
import os
import sys
import punx.h5structure
def basic_setup():
'''
Create the HDF5 file and basic structure, return the nxdata object
structural model::
SASroot
SASentry
SASdata
COMMENTS:
Various metadata are added to the NXroot group (as attributes).
These are optional but help define the history of each file.
The "default" attribute supports the NeXus motivation to easily
identify the data to be use for a default visualization of this file.
The value is the name of the child group on the path to this data.
The path defined by canSAS is "/sasentry/sasdata/I".
In the file root (SASroot/NXroot) group, default="sasentry".
In the entry (SASentry/NXentry) group, default="sasdata".
In the data (SASdata/NXdata) group, signal="I".
In the data group, the "axes" attribute associates the Q dataset
with the signal="I" dataset. It may be changed as needed by the
situation, such as for Qx, Qy.
To preserve the mapping between canSAS and NeXus structures,
additional canSAS attributes are added. These are permitted and
ignored by NeXus.
The arrays in these examples are small, when compared with
many real-world datasets but are intended to demonstrate the
use of the format while reducing the file size of the example.
A "units" attribute is recommended for all numerical datasets (fields).
Additional metadata about these examples are stored in "run" and "title".
In NXcanSAS, these are fields (datasets).
===== ====== =======================================================
term type description
===== ====== =======================================================
run string name of the function to create this example
title string one line summary of the function to create this example
===== ====== =======================================================
'''
stack = inspect.stack()
function_name = stack[1][3]
filename = function_name + ".h5"
nxroot = h5py.File(filename, "w")
nxroot.attrs['file_name'] = filename
nxroot.attrs['HDF5_Version'] = h5py.version.hdf5_version
nxroot.attrs['h5py_version'] = h5py.version.version
nxroot.attrs['file_time'] = str(datetime.datetime.now())
nxroot.attrs['producer'] = __file__ # os.path.abspath(__file__)
nxroot.attrs['default'] = "sasentry"
nxentry = nxroot.create_group("sasentry")
nxentry.attrs["NX_class"] = "NXentry"
nxentry.attrs["SAS_class"] = "SASentry"
nxentry.attrs['default'] = "sasdata"
nxentry.create_dataset("definition", data="NXcanSAS")
nxdata = nxentry.create_group("sasdata")
nxdata.attrs["NX_class"] = "NXdata"
nxdata.attrs["SAS_class"] = "SASdata"
nxdata.attrs["signal"] = "I"
nxdata.attrs["axes"] = "Q"
# additional metadata about these examples:
# save the name of the function as "run"
nxentry.create_dataset('run', data=function_name)
# save the 1st doc string of the function as "title"
module_members = dict(inspect.getmembers(inspect.getmodule(basic_setup)))
func = module_members[function_name]
doc = inspect.getdoc(func).strip().splitlines()[0]
nxentry.create_dataset('title', data=doc)
return nxdata
def fake_data(*dimensions):
'''
create a dataset array from random numbers with the supplied dimension(s)
Examples:
:1D: ``fake_data(5)``
:2D: ``fake_data(5, 10)``
:2D image: ``fake_data(10, 50)``
:3D (3 2D images): ``fake_data(3, 10, 50)``
Use a random number generator.
There is no science to be interpreted from the values.
Use of "*dimensions" allows this routine to be called with arbitrary
array shape.
'''
return numpy.random.rand(*dimensions)
def example_01_1D_I_Q():
'''
I(|Q|): The most common SAS data, a one-dimensional set of data.
structural model::
SASroot
SASentry
SASdata
I: float[10]
Q: float[10]
'''
nxdata = basic_setup()
nxdata.attrs["Q_indices"] = 0
n = 10
ds = nxdata.create_dataset("I", data=fake_data(n))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(n))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_02_2D_image():
'''
I(|Q|): Analysis of 2-D images is common
The "Q_indices" attribute indicates the dependency relationship
of the Q field with one or more dimensions of the plottable "I" data.
This is an integer array that defines the indices of the "I" field
which need to be used in the "Q" dataset in order to reference the
corresponding axis value. This 2D example is a good demonstration
of this feature.
structural model::
SASroot
SASentry
SASdata
I: float[10, 50]
Q: float[10, 50]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Q Q".split()
nxdata.attrs["Q_indices"] = [0, 1]
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_03_2D_image_and_uncertainties():
'''
I(|Q|) +/- sigma(|Q|): Uncertainties (a.k.a "errors") may be identified.
The mechanism is to provide a dataset of the same shape as "I",
and identify its name in the "uncertainties" attribute.
Note in NeXus, this attribute is spelled as a plural, not "uncertainty".
The value is the name of the dataset with the uncertainties.
The name "Idev" is preferred by canSAS for the uncertainty of "I".
This technique to identify uncertainties can be applied to
any dataset. It is up to each analysis procedure to recognize
and handle this information.
structural model::
SASroot
SASentry
SASdata
I: float[10, 50]
@uncertainties=Idev
Q: float[10, 50]
Idev: float[10, 50]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Q Q".strip()
nxdata.attrs["Q_indices"] = [0, 1]
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds.attrs["uncertainties"] = "Idev"
ds = nxdata.create_dataset("Q", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Idev", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
nxdata.file.close()
def example_04_2D_vector():
'''
I(Q): Q may be represented as a vector
The canSAS interpretation of the "axes" attribute differs from NeXus.
In canSAS, "Q" when used as a vector is recognized as a value of the
"axess" attrribute. NeXus requires *each* value in the "axes" attribute
list must exist as a dataset in the same group.
structural model::
SASroot
SASentry
SASdata
I: float[10, 50]
Qx: float[10, 50]
Qy: float[10, 50]
Qz: float[10, 50]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Qx Qy".strip()
nxdata.attrs["Qx_indices"] = 0
nxdata.attrs["Qy_indices"] = 1
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Qx", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qy", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qz", data=0*fake_data(h, v))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_05_2D_SAS_WAS():
'''
I(|Q|): common multi-method technique: small and wide angle scattering
WAS is not in the scope of the NXcanSAS definition.
Still, it is useful to demonstrate how WAS might be included in
a NXcanSAS data file, in a NXdata group.
structural model::
SASroot
SASentry
SASdata
@name="sasdata"
I: float[10, 50]
Q: float[10, 50]
SASdata
@name="wasdata"
I: float[25, 25]
Q: float[25, 25]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Q Q".strip()
nxdata.attrs["Q_indices"] = [0, 1]
# SAS data
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
nxentry = nxdata.parent
# WAS data
was_group = nxentry.create_group("wasdata")
was_group.attrs["NX_class"] = "NXdata"
was_group.attrs["signal"] = "I"
was_group.attrs["axes"] = "Q Q".strip()
was_group.attrs["Q_indices"] = [0, 1]
h = 25
v = 25
ds = was_group.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds = was_group.create_dataset("Q", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_06_2D_Masked():
'''
I(|Q|) and mask: Data masks are possible in analysis of SAS
NeXus has defined a 32-bit integer "pixel_mask" field to describe
various possible reasons for masking a specific pixel, as a bit map.
The same definition is used in two NeXus classes.
See either NXdetector base class or NXmx application definition for details.
Here, the random data only uses a value of 0 (no mask) or 1 (dead pixel).
structural model::
SASroot
SASentry
SASdata
I: float[10, 50]
Q: float[10, 50]
Mask: int[10, 50]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Q Q".strip()
nxdata.attrs["Q_indices"] = [0, 1]
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
mask_data = numpy.int32(numpy.random.rand(h, v) + 0.5)
ds = nxdata.create_dataset("Mask", data=mask_data)
nxdata.file.close()
def example_07_2D_as_1D():
'''
I(|Q|): Mapping of 2D data into 1D is common
This is used to describe radial averaged I(Q) data.
It may also be used to describe data combined from several measurements.
structural model::
SASroot
SASentry
SASdata
I: float[10*50]
Q: float[10*50]
'''
nxdata = basic_setup()
nxdata.attrs["Q_indices"] = 0
h = 10
v = 50
ds = nxdata.create_dataset("I", data=fake_data(h * v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(h * v))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_08_SANS_SAXS():
'''
I(|Q|): Complementary SANS & SAXS techniques.
This example shows where to place the I(Q) data.
It could be improved by showing the placement of the additional
data related to the wavelength of the radiation of each source.
Both SANS and SAXS data belong in the same entry as they pertain
to the same sample. A "probe_type" attribute has been added
to each data group to further identify in a standard way,
using nouns defined by NeXus.
The selection of the "sans" group is arbitrary in this example.
NeXus does not allow for multiple values in the "default" attribute.
structural model::
SASroot
SASentry
SASdata
@name="sans"
@probe_type="neutron"
I: float[10]
Q: float[10]
SASdata
@name="saxs"
@probe_type="xray"
I: float[25]
Q: float[25]
The example code below shows an h5py technique to rename the
"sasdata" group to "sans". This adds clarity to the example data file.
'''
nxdata = basic_setup()
nxdata.attrs["probe_type"] = "neutron"
nxdata.attrs["Q_indices"] = 0
n = 10
ds = nxdata.create_dataset("I", data=fake_data(n))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(n))
ds.attrs["units"] = "1/nm"
nxentry = nxdata.parent
nxentry.attrs["default"] = "sans"
nxentry["sans"] = nxdata # change the nxdata group name
del nxentry["sasdata"]
nxdata = nxentry.create_group("saxs")
nxdata.attrs["NX_class"] = "NXdata"
nxdata.attrs["SAS_class"] = "SASdata"
nxdata.attrs["probe_type"] = "xray"
n = 25
ds = nxdata.create_dataset("I", data=fake_data(n))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(n))
ds.attrs["units"] = "1/nm"
nxdata.file.close()
def example_09_1D_time():
'''
I(t,|Q|): A time-series of 1D I(Q) data
This is another example of how to apply the "AXISNAME_indices"
attributes. "Time" is used with the first index of "I",
"Q" with the second.
structural model::
SASroot
SASentry
SASdata
@axes=Time,Q
@Time_indices=0
@Q_indices=1
Time: float[nTime]
Q: float[10]
I: float[nTime,10]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Time Q".split()
nxdata.attrs["Time_indices"] = 0
nxdata.attrs["Q_indices"] = 1
n = 10
nTime = 5
ds = nxdata.create_dataset("I", data=fake_data(nTime, n))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(n))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
nxdata.file.close()
def example_10_1D_time_Q():
'''
I(t,|Q(t)|): A time-series of 1D I(Q) data where Q is a function of time
This is another example of how to apply the "AXISNAME_indices"
attributes. "Time" is used with the first index of "I",
"Q" with both.
structural model::
SASroot
SASentry
SASdata
@Q_indices=0,1
@Time_indices=0
@I_axes=Time,Q
I: float[nTime,10]
Q: float[nTime,10]
Time: float[nTime]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Time Q".split()
nxdata.attrs["Time_indices"] = 0
nxdata.attrs["Q_indices"] = [0, 1]
n = 10
nTime = 5
ds = nxdata.create_dataset("I", data=fake_data(nTime, n))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Q", data=fake_data(nTime, n))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
nxdata.file.close()
def example_11_1D_time_Q_and_uncertainties():
'''
I(t,|Q|) +/- sigma(t,|Q|): A time-series of 1D I(Q) data with uncertainties where Q is a function of time
This is another example of how to apply the "AXISNAME_indices"
attributes. "Time" is used with the first index of "I",
"Q" with both.
structural model::
SASroot
SASentry
SASdata
@Q_indices=0,1
@Time_indices=0
@I_axes=Time,Q
I: float[nTime,10]
@uncertainties=Idev
Q: float[nTime,10]
Time: float[nTime]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Time Q".split()
nxdata.attrs["Time_indices"] = 0
nxdata.attrs["Q_indices"] = [0, 1]
n = 10
nTime = 5
ds = nxdata.create_dataset("I", data=fake_data(nTime, n))
ds.attrs["units"] = "1/m"
ds.attrs["uncertainties"] = "Idev"
ds = nxdata.create_dataset("Q", data=fake_data(nTime, n))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
ds = nxdata.create_dataset("Idev", data=fake_data(nTime, n))
ds.attrs["units"] = "1/m"
nxdata.file.close()
def example_12_2D_vector_time():
'''
I(t,Q): A time-series of 2D I(Q) data, where Q is a vector
see: *example_04_2D_vector*
structural model::
SASroot
SASentry
SASdata
@Qx_indices=1
@Qy_indices=2
@Time_indices=0
@I_axes=Time,Qx,Qy
I: float[nTime,10,50]
Qx: float[10,50]
Qy: float[10,50]
Qz: float[10,50]
Time: float[nTime]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Time Qx Qy".split()
nxdata.attrs["Time_indices"] = 0
nxdata.attrs["Qx_indices"] = 1
nxdata.attrs["Qy_indices"] = 2
h = 10
v = 50
nTime = 5
ds = nxdata.create_dataset("I", data=fake_data(nTime, h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Qx", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qy", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qz", data=fake_data(h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
nxdata.file.close()
def example_13_varied_parameters_Q_time():
'''
I(T,t,P,Q(t)): several varied parameters
Additional parameters are temperature, time, and pressure.
Only Q depends on time.
structural model::
SASroot
SASentry
SASdata
@Temperature_indices=0
@Time_indices=1
@Pressure_indices=2
@I_axes=Temperature,Time,Pressure,.,.
I: float[nTemperature,nTime,nPressure,10,50]
Qx: float[nTime,10,50]
Qy: float[nTime,10,50]
Qz: float[nTime,10,50]
Time: float[nTime]
Temperature: float[nTemperature]
Pressure: float[nPressure]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Temperature Time Pressure . .".split()
nxdata.attrs["Temperature_indices"] = 0
nxdata.attrs["Time_indices"] = 1
nxdata.attrs["Pressure_indices"] = 2
h = 10
v = 50
nTime = 5
nTemperature = 7
nPressure = 3
ds = nxdata.create_dataset("I", data=fake_data(nTemperature, nTime, nPressure, h, v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Qx", data=fake_data(nTime, h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qy", data=fake_data(nTime, h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qz", data=fake_data(nTime, h, v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Temperature", data=fake_data(nTemperature))
ds.attrs["units"] = "K"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
ds = nxdata.create_dataset("Pressure", data=fake_data(nPressure))
ds.attrs["units"] = "MPa"
nxdata.file.close()
def example_14_varied_parameters_all_time():
'''
I(t,T,P,Q(t,T,P)): several varied parameters
All Q (vector) are different for each combination of time, temperature, and pressure.
structural model::
SASroot
SASentry
SASdata
@Time_indices=0
@Temperature_indices=1
@Pressure_indices=2
@I_axes=Time,Temperature,Pressure,.
I: float[nTime,nTemperature,nPressure,10*50]
Qx: float[nTime,nTemperature,nPressure,10*50]
Qy: float[nTime,nTemperature,nPressure,10*50]
Qz: float[nTime,nTemperature,nPressure,10*50]
Time: float[nTime]
Temperature: float[nTemperature]
Pressure: float[nPressure]
'''
nxdata = basic_setup()
nxdata.attrs["axes"] = "Temperature Time Pressure .".split()
nxdata.attrs["Temperature_indices"] = 0
nxdata.attrs["Time_indices"] = 1
nxdata.attrs["Pressure_indices"] = 2
h = 10
v = 50
nTime = 5
nTemperature = 7
nPressure = 3
ds = nxdata.create_dataset("I", data=fake_data(nTime, nTemperature, nPressure, h*v))
ds.attrs["units"] = "1/m"
ds = nxdata.create_dataset("Qx", data=fake_data(nTime, nTemperature, nPressure, h*v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qy", data=fake_data(nTime, nTemperature, nPressure, h*v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Qz", data=fake_data(nTime, nTemperature, nPressure, h*v))
ds.attrs["units"] = "1/nm"
ds = nxdata.create_dataset("Temperature", data=fake_data(nTemperature))
ds.attrs["units"] = "K"
ds = nxdata.create_dataset("Time", data=fake_data(nTime))
ds.attrs["units"] = "s"
ds = nxdata.create_dataset("Pressure", data=fake_data(nPressure))
ds.attrs["units"] = "MPa"
nxdata.file.close()
if __name__ == "__main__":
# get a list of the example functions, then document and run each
g_dict = dict(globals()) # keep separate from next line
examples = sorted([f for f in g_dict if f.startswith("example_")])
for funcname in examples:
func = g_dict[funcname]
funcdoc = func.__doc__.strip().splitlines()[0]
print(funcname + ': ' + funcdoc)
func()
h5_file = funcname + '.h5'
structure_file = os.path.join('structure', h5_file + '.txt')
mc = punx.h5structure.h5structure(h5_file)
mc.array_items_shown = 0
structure = mc.report()
fp = open(structure_file, 'w')
fp.write('\n'.join(structure or ''))
fp.write('\n')
fp.close()
| Python | 737 | 28.841248 | 109 | /canSAS2012_examples/create_examples.py | 0.582913 | 0.564361 |
KeiGiang/Arrows-Only | refs/heads/master | import sys, os
import fileinput
funcString = "function ("
openBracket = '('
closingBracket = ') {'
closingBracket2 = '){'
arrowSyntax = ') => {'
def main():
if (len(sys.argv) < 2 or len(sys.argv) > 3):
print 'ERROR:'
print 'Please supply either a directory to a folder containing JavaScript files, or a JavaScript file and an optional output file name in the following formats:'
print '----------------------------------------'
print 'To convert all files in a directory:'
print 'python func-to-arrow.py "directory-to-folder"'
print 'To convert a single file with optional output file:'
print 'python func-to-arrow.py "JavaScript-file.js" "output-file.js"'
elif (len(sys.argv) == 2):
input1 = sys.argv[1]
jsFileExt = '.js'
# newFileName = sys.argv[1].split('.')[0] + '-new.' + sys.argv[1].split('.')[1]
if (jsFileExt in input1):
parseFile(input1, False, False)
else:
parseDir(input1)
# for f in os.listdir(input1):
# if (jsFileExt in f):
# parseFile(f, False, input1)
elif (len(sys.argv) == 3):
fileIn = sys.argv[1]
fileOut = sys.argv[2]
if ((jsFileExt in sys.argv[1]) and (jsFileExt in sys.argv[2])):
parseFile(open(fileIn), fileOut, False)
else:
print 'Please check your file types'
exit()
def parseDir(folder):
for f in os.listdir(folder):
if (('.js' in f) and ('.min.' not in f)):
parseFile(f, False, folder)
elif (os.path.isdir(os.path.join(folder, f)) and (f != 'node_modules')):
parseDir(os.path.join(folder, f))
return
def parseFile(fileIn, fileOut, directory):
if (fileOut):
newFileName = fileOut
else:
newFileName = str(fileIn).split('.')[0] + '-new.' + fileIn.split('.')[1]
if (directory):
fileIn = os.path.join(directory, fileIn)
newFile = open(os.path.join(directory, newFileName), 'a+')
else:
newFile = open(newFileName, 'a+')
isSame = True
for line in open(fileIn):
toWrite = arrowStyle(line)
newFile.write(toWrite)
if (line != toWrite):
isSame = False
newFile.close();
if isSame:
os.remove(os.path.join(directory, newFileName))
print 'No changes were made to ' + fileIn
else:
print 'Changes were made to ' + fileIn
oldFile = os.path.join(directory, newFileName.replace('-new', '-old'))
os.rename(fileIn, oldFile)
# print fileIn + ' has been renamed to ' + oldFile
# print newFileName + ' has been renamed to ' + fileIn
os.rename(os.path.join(directory, newFileName), fileIn)
def arrowStyle(line):
if (funcString in line):
newLine = line.replace(funcString, openBracket)
newLine = newLine.replace(closingBracket, arrowSyntax)
newLine = newLine.replace(closingBracket2, arrowSyntax)
return newLine
else:
return line
main()
| Python | 98 | 30.316326 | 169 | /func-to-arrow.py | 0.578364 | 0.57087 |
KeiGiang/Arrows-Only | refs/heads/master | import os, sys
def main():
if (len(sys.argv) == 2):
if os.path.isdir(sys.argv[1]):
parseDir(sys.argv[1])
def parseDir(folder):
for f in os.listdir(folder):
if ('-old.' in f):
os.remove(os.path.join(folder, f))
elif (os.path.isdir(os.path.join(folder, f)) and (f != 'node_modules')):
parseDir(os.path.join(folder, f))
main()
| Python | 15 | 25.333334 | 80 | /remove-old.py | 0.544304 | 0.536709 |
RamneekSingh24/Discord-Bot-Codedrills | refs/heads/main | import requests
from bs4 import BeautifulSoup
import discord
import os
from tabulate import tabulate
import handlers
import pandas as pd
from helpers import get_url, get_problems, trim,load_problems
from handlers import start_contest, update_leaderboard,add_cf_user,users,get_recommendations_topics, set_handle, recommendations_handle
from keep_alive import keep_alive
import weasyprint as wsp
import PIL as pil
# global running
# running = contest_running
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
global contest_running
if message.author == client.user:
return
msg = message.content
#params = msg.lower().split(' ')
params = msg.split(' ')
if params[0][0] != '!':
return
if params[0] == '!setrc':
handle = params[1]
rc = set_handle(handle)
if rc < 0:
await message.channel.send('Invalid codeforces handle')
else:
await message.channel.send('Done! Getting recommandations from: '+handle+".")
if params[0] == '!topics':
msg = get_recommendations_topics(recommendations_handle)
await message.channel.send(msg)
if params[0] == '!add':
username = params[1]
rc = add_cf_user(username)
if rc == -1:
await message.channel.send('User already registered!')
elif rc == -2:
await message.channel.send('Not a valid user on CodeForces!')
else:
await message.channel.send(f"Sucessfully added {username}")
elif params[0] == '!all':
await message.channel.send(users)
elif params[0] == '!start':
if handlers.contest_running:
await message.channel.send("A contest is already Active !")
return
task = "_".join(word for word in params[1:])
#img_filepath = 'table.png'
#print(task)
msg = start_contest(task)
if msg == "error":
await message.channel.send("Please Try Again!")
else:
e = discord.Embed(
title=f"Problem Set {handlers.ID}\n",
description=msg,
color=0xFF5733)
await message.channel.send(embed=e)
elif params[0] == '!lb':
id = params[1] if len(params) > 1 else handlers.ID
df_lead = update_leaderboard(id)
df_lead['Total'] = df_lead[list(df_lead.columns)[1:]].sum(axis=1)
df_lead.sort_values(by='Total',ascending=False, inplace=True)
await message.channel.send("```"+tabulate(df_lead, headers='keys', tablefmt='psql', showindex=False)+"```")
# f = discord.File('table.png', filename="image.png")
# e = discord.Embed(title='Leaderboard', color=0xFF5733)
# e.set_image(url="attachment://image.png")
# await message.channel.send(file=f, embed=e)
elif params[0] == "!prob":
id = params[1] if len(params) > 1 else handlers.ID
msg = load_problems(id)
e = discord.Embed(
title=f"Problem Set {handlers.ID}\n",
description=msg,
color=0xFF5733)
await message.channel.send(embed=e)
elif params[0] == "!end":
if handlers.contest_running == 0:
await message.channel.send("No contest is running !")
else:
handlers.contest_running = 0
await message.channel.send("Contest Abandoned !")
keep_alive()
client.run(os.getenv('TOKEN'))
| Python | 113 | 27.778761 | 135 | /main.py | 0.657749 | 0.645141 |
RamneekSingh24/Discord-Bot-Codedrills | refs/heads/main | import requests
from bs4 import BeautifulSoup
import discord
import os
import pandas as pd
import weasyprint as wsp
import PIL as pil
def get_url(task,handle):
URL = 'https://recommender.codedrills.io/profile?handles=cf%2Fjatinmunjal2k'
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
# print(task)
result = soup.find(id=task)
url = result.find(title='An url for sharing and keeping track of solved problems for this recommendation list')
link = "https://recommender.codedrills.io"+url['href']
return link
def get_problems(task, ID,handle):
# print(ID)
items = [[],[]]
buffer = ""
URL = get_url(task,handle)
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
elems = soup.find_all('tr')
idx = 1
for e in elems:
a_tag = e.find('a')
buffer = buffer +"["+str(idx)+"](" + a_tag['href'] + ") " + a_tag.text + "\n"
items[0].append(a_tag.text)
items[1].append(a_tag['href'])
idx += 1
df = pd.DataFrame(list(zip(items[0],items[1])), columns = ['name', 'link'])
df.to_csv('contests/problems-contest'+str(ID)+'.csv' , index = False)
#print(df.head(3))
return buffer
def load_problems(id):
df = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
buffer = ""
for idx, row in df.iterrows():
buffer = buffer + row['name'] + " [Link](" + row['link'] + ")\n"
return buffer
def trim(source_filepath, target_filepath=None, background=None):
if not target_filepath:
target_filepath = source_filepath
img = pil.Image.open(source_filepath)
if background is None:
background = img.getpixel((0, 0))
border = pil.Image.new(img.mode, img.size, background)
diff = pil.ImageChops.difference(img, border)
bbox = diff.getbbox()
img = img.crop(bbox) if bbox else img
img.save(target_filepath)
| Python | 65 | 27.615385 | 113 | /helpers.py | 0.652875 | 0.646427 |
RamneekSingh24/Discord-Bot-Codedrills | refs/heads/main | import requests
from bs4 import BeautifulSoup
import discord
import os
from tabulate import tabulate
import pandas as pd
from helpers import get_url, get_problems, trim,load_problems
from keep_alive import keep_alive
import weasyprint as wsp
import PIL as pil
global ID, contest_running, users, recommendations_handle
ID = 0
contest_running = 0
users = []
recommendations_handle = 'jatinmunjal2k'
def get_recommendations_topics(handle='jatinmunjal2k'):
topics = "Available Topics:\n"
URL = 'https://recommender.codedrills.io/profile?handles=cf%2F' + handle
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
ul = soup.find("ul", class_="nav nav-pills")
tags = ul.find_all('li')
for e in tags:
topics = topics + e.text.strip() + ", "
return topics[:-2]
def set_handle(handle):
global recommendations_handle
r = requests.head('https://codeforces.com/profile/'+handle)
if r.status_code != 200:
return -1
recommendations_handle = handle
return 0
def start_contest(task):
global ID, contest_running
try:
ID += 1
problems_str = get_problems(task, ID,recommendations_handle)
init_leaderboard(ID)
contest_running = 1
return problems_str
except:
ID -= 1
return "error"
def add_cf_user(cf_handle):
global users
if cf_handle in users:
return -1
r = requests.head('https://codeforces.com/profile/'+cf_handle)
if r.status_code != 200:
return -2
users.append(cf_handle)
if contest_running == 1:
df = pd.read_csv('contests/leaderboard'+str(ID)+'.csv')
entry = [cf_handle] + [0]*(df.shape[1]-1)
df.loc[len(df)] = entry
df.to_csv('contests/leaderboard'+str(ID)+'.csv',index = False)
return 1
# def print_leaderboard(id, img_filepath):
# df_leaderboard = pd.read_csv('contests/leaderboard'+str(id)+'.csv')
# css = wsp.CSS(string='''
# @page { size: 2048px 2048px; padding: 0px; margin: 0px; }
# table, td, tr, th { border: 1px solid black; }
# td, th { padding: 4px 8px; }
# ''')
# html = wsp.HTML(string=df_leaderboard.to_html(index=False))
# html.write_png(img_filepath, stylesheets=[css])
# trim(img_filepath)
def init_leaderboard(id):
df = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
problems = df['name']
zeros = [ [0]*len(users) for i in range(len(problems))]
df_scoreboard = pd.DataFrame(data=list(zip(users,*zeros)), columns=['User']+list(range(1,len(problems)+1)))
df_scoreboard.to_csv('contests/leaderboard'+str(id)+'.csv',index=False)
# print_leaderboard(id, img_filepath)
def update_leaderboard(id):
global users
df_prob = pd.read_csv('contests/problems-contest'+str(id)+'.csv')
df_lead = pd.read_csv('contests/leaderboard'+str(id)+'.csv')
for idxu, ru in df_lead.iterrows():
user = ru['User']
URL = 'https://codeforces.com/submissions/' + user
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
submissions = soup.find_all('tr')
ac = []
for submission in submissions:
data = submission.find_all('td')
try:
url = data[3].find('a')['href'].split('/')
verdict = data[5].text
#print(url, repr(verdict))
if 'Accepted' in verdict:
ac.append('/'+url[2]+'/'+url[-1])
except:
continue
j = 0
for idx, row in df_prob.iterrows():
j += 1
link = row['link']
for pid in ac:
if pid in link:
df_lead.at[idxu,str(j)] = 1
df_lead.to_csv('contests/leaderboard'+str(id)+'.csv',index = False)
# print_leaderboard(id, 'table.png')
return df_lead
| Python | 126 | 27.769842 | 109 | /handlers.py | 0.64333 | 0.630099 |
Dorencon/Classification-and-detection | refs/heads/master | from openvino.inference_engine import IECore
import cv2
import numpy as np
class InferenceEngineClassifier:
def __init__(self, configPath = None, weightsPath = None, device = None, extension = None, classesPath = None):
IEc = IECore()
if (extension and device == "CPU"):
IEc.add_extension(extension, device)
self.net = IEc.read_network(configPath, weightsPath)
self.exec_net = IEc.load_network(self.net, device_name=device)
with open(classesPath, 'r') as f:
self.classes = [i.strip() for i in f]
def _prepare_image(self, image, h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def classify(self, image):
input_blob = next(iter(self.net.inputs))
out_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
image = self._prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: image})
output = output[out_blob]
return output
def get_top(self, prob, topN = 1):
prob = np.squeeze(prob)
top = np.argsort(prob)
out = []
for i in top[1000 - topN:1000]:
out.append([self.classes[i], '{:.15f}'.format(prob[i])])
out.reverse()
return out | Python | 33 | 39.484848 | 115 | /ie_classifier.py | 0.594757 | 0.582772 |
Dorencon/Classification-and-detection | refs/heads/master | import ie_classifier as ic
import argparse
import logging as log
import sys
import cv2
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help='Path to an .xml \
file with a trained model.', required=True, type=str)
parser.add_argument('-w', '--weights', help='Path to an .bin file \
with a trained weights.', required=True, type=str)
parser.add_argument('-i', '--input', help='Path to \
image file', required=True, type=str)
parser.add_argument('-c', '--classes', help='File containing \
classnames', type=str, default=None)
parser.add_argument('-d', '--device', help='Device name',
default = "CPU", type = str)
parser.add_argument('-e', '--cpu_extension', help='For custom',
default = None, type = str)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Start IE classification sample")
ie_classifier = ic.InferenceEngineClassifier(configPath=args.model,
weightsPath=args.weights, device=args.device, extension=args.cpu_extension,
classesPath=args.classes)
img = cv2.imread(args.input)
prob = ie_classifier.classify(img)
predictions = ie_classifier.get_top(prob, 5)
log.info("Predictions: " + str(predictions))
return
if __name__ == '__main__':
sys.exit(main()) | Python | 38 | 37.789474 | 79 | /classification_sample.py | 0.647658 | 0.645621 |
Dorencon/Classification-and-detection | refs/heads/master | from openvino.inference_engine import IECore
import cv2
import numpy as np
class InferenceEngineDetector:
def __init__(self, configPath = None, weightsPath = None,
device = None, extension = None, classesPath = None):
IEc = IECore()
if (extension and device == 'CPU'):
IEc.add_extension(extension, device)
self.net = IEc.read_network(configPath, weightsPath)
self.exec_net = IEc.load_network(self.net, device_name = device)
with open(classesPath, 'r') as f:
self.classes = [i.strip() for i in f]
def _prepare_image(self, image, h, w):
image = cv2.resize(image, (w, h))
image = image.transpose((2, 0, 1))
return image
def detect(self, image):
input_blob = next(iter(self.net.inputs))
output_blob = next(iter(self.net.outputs))
n, c, h, w = self.net.inputs[input_blob].shape
image = self._prepare_image(image, h, w)
output = self.exec_net.infer(inputs={input_blob: image})
output = output[output_blob]
return output
def draw_detection(self, detections, image, confidence = 0.5, draw_text = True):
detections = np.squeeze(detections)
h, w, c = image.shape
for classdet in detections:
if (classdet[2] > confidence):
image = cv2.rectangle(image, (int(classdet[3] * w), int(classdet[4] * h)),
(int(classdet[5] * w), int(classdet[6] * h)),
(0, 255, 0), 1)
if (draw_text):
image = cv2.putText(image,
self.classes[int(classdet[1])]
+ ' ' + str('{:.2f}'.format(classdet[2] * 100)) + '%',
(int(classdet[3] * w - 5), int(classdet[4] * h - 5)),
cv2.FONT_HERSHEY_SIMPLEX, 0.45,
(0, 0, 255), 1)
return image | Python | 42 | 47.07143 | 90 | /ie_detector.py | 0.511893 | 0.492071 |
Dorencon/Classification-and-detection | refs/heads/master | import ie_detector as id
import logging as log
import cv2
import argparse
import sys
def build_argparser():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', help = 'Path to an .xml \
file with a trained model.', required = True, type = str)
parser.add_argument('-w', '--weights', help = 'Path to an .bin file \
with a trained weights.', required = True, type = str)
parser.add_argument('-i', '--input', help = 'Path to \
image file.', required = True, type = str)
parser.add_argument('-d', '--device', help = 'Device name',
default='CPU', type = str)
parser.add_argument('-l', '--cpu_extension',
help = 'MKLDNN (CPU)-targeted custom layers. \
Absolute path to a shared library with the kernels implementation',
type = str, default=None)
parser.add_argument('-c', '--classes', help = 'File containing \
classnames', type = str, default=None)
return parser
def main():
log.basicConfig(format="[ %(levelname)s ] %(message)s",
level=log.INFO, stream=sys.stdout)
args = build_argparser().parse_args()
log.info("Start IE detection sample")
ie_detector = id.InferenceEngineDetector(configPath=args.model,
weightsPath=args.weights,
device=args.device,
extension=args.cpu_extension,
classesPath=args.classes)
img = cv2.imread(args.input)
detections = ie_detector.detect(img)
image_detected = ie_detector.draw_detection(detections, img)
cv2.imshow('Image with detections', image_detected)
cv2.waitKey(0)
cv2.destroyAllWindows()
return
if (__name__ == '__main__'):
sys.exit(main()) | Python | 44 | 41.954544 | 91 | /detection_sample.py | 0.569614 | 0.566437 |
riti121/cafe | refs/heads/master | """
import tensorflow as tf
modeltest=tf.keras.models.load_model("facial_1 (1)")
print("--model loaded successfully--")
"""
import cv2
import sys
import os
class FaceCropper(object):
cascades_path = 'haarcascade_frontalface_default.xml'
def __init__(self):
self.face_cascade = cv2.CascadeClassifier(self.cascades_path)
def generate(self, image_path, show_result):
name=""
img = cv2.imread(image_path)
if (img is None):
print("Can't open image file")
return 0
#img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(img, 1.1, 3, minSize=(100, 100))
if (faces is None):
print('Failed to detect face')
return 0
if (show_result):
for (x, y, w, h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
roi_color = img[y:y + h, x:x + w]
print("[INFO] Object found. Saving locally.")
name= str(w) + str(h) + '_faces.jpg'
cv2.imwrite(str(w) + str(h) + '_faces.jpg', roi_color)
#cv2.imshow('cropped image',roi_color)
#cv2.imshow('marked image',img)
cv2.waitKey(0)
cv2.destroyAllWindows()
facecnt = len(faces)
print("Detected faces: %d" % facecnt)
i = 0
height, width = img.shape[:2]
for (x, y, w, h) in faces:
r = max(w, h) / 2
centerx = x + w / 2
centery = y + h / 2
nx = int(centerx - r)
ny = int(centery - r)
nr = int(r * 2)
faceimg = img[ny:ny+nr, nx:nx+nr]
lastimg = cv2.resize(faceimg, (32, 32))
i += 1
cv2.imwrite("image%d.jpg" % i, lastimg)
return name
#fc=FaceCropper().generate("IMG_20200226_000431.png",True)
| Python | 61 | 30.147541 | 83 | /facialrecognition.py | 0.516044 | 0.485008 |
riti121/cafe | refs/heads/master | import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
import bot
import time
import tensorflow as tf
import facialrecognition as fr
import cv2
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
emo = ['Angry', 'Fear', 'Happy',
'Sad', 'Surprise', 'Neutral']
model = tf.keras.models.load_model("facial_1 (1)")
buddy=''
mood=''
def prepare(self, filepath):
IMG_SIZE = 48
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def capture(self):
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
name="IMG_{}.png".format(timestr)
camera.export_to_png(name)
print("Captured")
fc=fr.FaceCropper().generate(name,True)
try:
prediction = self.model.predict([self.prepare(fc)])
prediction=list(map(float,prediction[0]))
except:
prediction="prepare function could not run(0 faces detected)"
self.mood='Neutral'
print(prediction)
try:
self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]
except:
print("Exception handled..!! Picture could not be cleared properly. Please check lighting")
self.mood='Neutral'
bot.setname(self.textforcamera.text)
print(bot.getname())
ChatWindow.mood=self.mood
class ChatWindow(Screen):
mood=''
bot.pre_processing()
#bot.chatcode()
def on_enter(self, *args):
self.chat_history.text="Hey "+bot.getname()+", what brings you here today!!\n Current Mood: "+self.mood+" !! "
def send_message(self):
message=self.text.text
self.text.text=''
#self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')
self.chat_history.text += '\n' +"User: "+message
# Set layout height to whatever height of chat history text is + 15 pixels
# (adds a bit of space at teh bottom)
# Set chat history label to whatever height of chat history text is
# Set width of chat history text to 98 of the label width (adds small margins)
#self.layout.height = self.chat_history.texture_size[1] + 15
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run() | Python | 177 | 35.305084 | 327 | /backup(main).py | 0.621167 | 0.608093 |
riti121/cafe | refs/heads/master | lst=sorted(list(map(int,input().split())))[::-1]
temp=lst[0]
for i in range(1,len(lst)):
if temp==lst[i]:
continue
else:
print(lst[i])
break
| Python | 8 | 20.625 | 48 | /test.py | 0.525714 | 0.508571 |
riti121/cafe | refs/heads/master | import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
from gensim.parsing.preprocessing import strip_non_alphanum, preprocess_string
import bot
import time
import tensorflow as tf
import facialrecognition as fr
import cv2
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
emo = ['Angry', 'Fear', 'Happy',
'Sad', 'Surprise', 'Neutral']
model = tf.keras.models.load_model("facial_1 (1)")
buddy=''
mood=''
def prepare(self, filepath):
IMG_SIZE = 48
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE)
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
def capture(self):
camera = self.ids['camera']
timestr = time.strftime("%Y%m%d_%H%M%S")
name="IMG_{}.png".format(timestr)
camera.export_to_png(name)
print("Captured")
fc=fr.FaceCropper().generate(name,True)
try:
prediction = self.model.predict([self.prepare(fc)])
prediction=list(map(float,prediction[0]))
except:
prediction="prepare function could not run(0 faces detected)"
self.mood='Neutral'
print(prediction)
try:
self.mood=self.emo[prediction.index(max(prediction))] # self.emo[list(prediction[0]).index(1)]
except:
print("Exception handled..!! Picture could not be captured properly. Please check lighting")
self.mood='Neutral'
bot.setname(self.textforcamera.text)
print(bot.getname())
ChatWindow.mood=self.mood
self.textforcamera.text = ''
class ChatWindow(Screen):
one=True
prev=""
mood=''
bot.pre_processing()
counter=1
#bot.chatcode()
def on_enter(self, *args):
print(self.mood)
greeting_msg="Hey "+bot.getname()+", my name is Cafe Buddy consider me a friend of yours!!\n"
#self.chat_history.text="Hey "+bot.getname()+", what brings you here today!!\n Current Mood: "+self.mood+" !! "
#emo = ['Angry', 'Fear', 'Happy','Sad', 'Surprise', 'Neutral']
if self.mood=='Happy':
buddy_msg="you seem quite happy. Is there still anything that disturbs you?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Angry':
buddy_msg="you seem quite disturbed. Is there anything that disturbs you?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Fear' or self.mood=='Surprise' or self.mood=='Neutral':
buddy_msg="Is everything okay? You are looking stressed?\n"
self.chat_history.text=greeting_msg+buddy_msg
if self.mood=='Sad':
buddy_msg="hey, what is it that worries you so much? Why are you looking so sad?\n"
self.chat_history.text=greeting_msg+buddy_msg
def send_message(self):
message=self.text.text
self.text.text=''
#self.history.update_chat_history(f'[color=dd2020]{chat_app.connect_page.username.text}[/color] > {message}')
self.chat_history.text = '\n' +"User: "+message
if self.mood=='Happy':
if self.counter==1:
if (bot.predict(message) >= 0.55):
buddy_msg='That is good. In case you ever feel otherways. Please feel free to have a session with me\n'
else:
self.mood='Neutral'
buddy_msg = 'Please express yourself freely, i am hearing.\n'
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
else:
print(self.counter)
if self.counter==1:
keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]
print(keyword)
if len(keyword)>0:
buddy_msg = 'Will you please tell me in a bit more detail about it?'
self.one=True
else:
buddy_msg='I understand. Seems like something\'s bothering you. '\
'Could you further describe it, in short?'
self.one=False
self.counter+=1
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
elif self.counter==2:
if self.one==True:
keyword=[]
print(bot.predict(message))
keyword.extend([preprocess_string(message.lower())][0])
print(keyword)
if 'friend' in keyword and bot.predict(message)[0][0] <= 0.6:
buddy_msg = "Many people tend to expect too much of others, their family, "\
"their friends or even just acquaintances. It's a usual mistake"\
", people don't think exactly the way you do.\nDon't let the "\
"opinions of others make you forget what you deserve. You are "\
"not in this world to live up to the expectations of others, "\
"nor should you feel that others are here to live up to yours."\
"\nThe first step you should take if you want to learn how to "\
"stop expecting too much from people is to simply realize and "\
"accept the fact that nobody is perfect and that everyone "\
"makes mistakes every now and then."
elif 'work' in keyword or 'studi' in keyword or 'exam' in keyword:
if bot.predict(message)[0][0] <= 0.6:
buddy_msg = bot.getname() + ", don't take too much stress. I can list some really cool "\
"ways to handle it.\nYou should develop healthy responses which "\
"include doing regular exercise and taking good quality sleep. "\
"You should have clear boundaries between your work or academic "\
"life and home life so you make sure that you don't mix them.\n"\
"Tecniques such as meditation and deep breathing exercises can be "\
"really helping in relieving stress.\n Always take time to "\
"recharge so as to avoid the negative effects of chronic stress "\
"and burnout. We need time to replenish and return to our pre-"\
"stress level of functioning."
elif 'famili' in keyword and bot.predict(message)[0][0]<=0.6:
buddy_msg=bot.getname() + ", don't take too much stress. All you need to do is adjust "\
"your priorities. Don't take on unnecessary duties and "\
"responsibilities.\nTake advice from people whose opinion you "\
"trust, and get specific advice when issues arise.\nYou should "\
"use stress management techniques and always hope for the best. "\
"These situations arise in everyone's life and what matters the "\
"most is taking the right decision at such moments."
else:
if self.prev == "":
buddy_msg="It's ohk can you tell me something about your day... Did anything happen today that made you feel worried?\n"
self.prev="same"
self.one=False
else:
buddy_msg='It looks like you might be feeling comfortable talking '\
'about yourself. Could you share your feelings?\n'
self.one=False
self.counter+=1
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
elif self.counter==3:
if not self.one:
print("Welcome to level 3")
keyword=[word for word in preprocess_string(message.lower()) if word in ('friend','work','education','school','college','family','studi','exam','fight')]
if len(keyword)>0:
buddy_msg = 'Will you please tell me in a bit more detail about it?'
self.one=True
self.counter=2
else:
buddy_msg= 'I see. Among the thoughts occuring in your mind, which one upsets you the most and why?\n'
self.chat_history.text += '\n'+"Cafe Buddy: "+buddy_msg
self.chat_history.text_size = (self.chat_history.width * 0.98, None)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run() | Python | 283 | 44.575974 | 327 | /CafeApp.py | 0.563697 | 0.555633 |
riti121/cafe | refs/heads/master | import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
from collections import Counter
import time
class Home(Screen):
pass
class Questions(Screen):
ques_path='Personality Test(base)\Questions.txt'
personalities={'isfj':'Defender','esfj':'Cousellor','istj':'Logistician','estp':'Entrepreneur','esfp':'Entertainer','istp':'Virtuoso','isfp':'Adventurer','entj':'Commander','entp':'Debator','intj':'Architect','intp':'Logician','enfj':'Protagonist','enfp':'Campaigner','infj':'Advocate','infp':'Mediator','estj':'Executive'}
personality=''
questions=[]
question_1 = ObjectProperty(None)
question_2 = ObjectProperty(None)
counter=1
answers=[0]*20
with open(ques_path) as quest_file:
questions=[r.split('SPLIT') for r in quest_file.readlines()]
def personality_exam(self,answers):
e,s,j,t=['e','i'],['s','n'],['j','p'],['t','f']
e.extend([answers[r] for r in range(0,20,4)])
s.extend([answers[r] for r in range(1,20,4)])
t.extend([answers[r] for r in range(2,20,4)])
j.extend([answers[r] for r in range(3,20,4)])
personality=''
for option in e,s,t,j:
temp=Counter(option)
personality+=option[0] if temp['a']>temp['b'] else option[1]
Report.personality=personality
def on_enter(self, *args):
self.question_1.text=self.questions[0][0]
self.question_2.text=self.questions[0][1]
def ask_question1(self):
if(self.counter==20):
self.answers[self.counter-1]='a'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='a'
self.counter+=1
def ask_question2(self):
if(self.counter==20):
self.answers[self.counter-1]='b'
self.personality_exam(self.answers)
self.counter=1
sm.current = 'rep'
else:
self.question_1.text=self.questions[self.counter][0]
self.question_2.text=self.questions[self.counter][1]
self.answers[self.counter-1]='b'
self.counter+=1
class Report(Screen):
personality=''
def on_enter(self, *args):
self.per.text=Questions.personalities[self.personality]+'\n'+'('+self.personality+')'
self.image.source= Report.personality+'\INTRODUCTION\Image.png'
class Description(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\INTRODUCTION\Introduction.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CareerOptions(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\career.txt'
with open(file_path) as file:
self.detail.text=file.read()
class Strengths(Screen):
def on_enter(self, *args):
self.persona.text=Questions.personalities[Report.personality]
file_path=Report.personality+'\STRENGTHS\Strengths.txt'
with open(file_path) as file:
self.detail.text=file.read()
class CameraClick(Screen):
pass
class ChatWindow(Screen):
pass
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('design_edit.kv')
sm = WindowManager()
screens=[Home(name="home"), Questions(name="quest"), Report(name="rep"), Description(name='description'), CareerOptions(name='career'), Strengths(name='strengths'), ChatWindow(name='chat'),CameraClick(name='camera')]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run() | Python | 121 | 32.983471 | 327 | /CafeBuddy-Ayushi/temp.py | 0.629774 | 0.616152 |
riti121/cafe | refs/heads/master | import kivy
kivy.require('1.11.1')
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import ObjectProperty
import time
class Home(Screen):
def animation_begins(self):
textvalue=self.labelvalue.text.split()
var=" "
for i in textvalue:
var+=i
self.labelvalue.text=var
time.sleep(3)
class WindowManager(ScreenManager):
pass
kv=Builder.load_file('designing.kv')
sm = WindowManager()
screens=[Home(name="home")]
for screen in screens:
sm.add_widget(screen)
sm.current = "home"
class CafeApp(App):
def build(self):
return sm
if __name__=='__main__':
CafeApp().run() | Python | 34 | 20.705883 | 57 | /temp.py | 0.663501 | 0.656716 |
chaositect/artificial_neural_network_examples | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 26 15:20:48 2021
@author: Grant Isaacs
"""
#IMPORT LIBRARIES------------------------------------------------------------->
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.preprocessing import LabelEncoder
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import confusion_matrix, accuracy_score
#PREPROCESS DATA-------------------------------------------------------------->
#load data
dataset = pd.read_csv("_")
X = dataset.iloc[:, 3:-1].values
y = dataset.iloc[:, -1].values
#check for missing values
print(sum(np.equal(X, None)))
#encode categorical variables
lencoder = LabelEncoder()
X[: , 2] = lencoder.fit_transform(X[:, 2])
ctransform = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [1])], remainder='passthrough')
X = np.array(ctransform.fit_transform(X))
#split dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state=361981)
#scale the features
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
#STRUCTURE THE ANN------------------------------------------------------------>
#initialize the neural network
neural_net = tf.keras.models.Sequential()
#create the input layer and first hidden layer to form a shallow learning model.
"""Layer quantity is determined by experimentation and expertise."""
neural_net.add(tf.keras.layers.Dense(units=6, activation='relu'))
#create the second hidden layer to form a deep learning model.
neural_net.add(tf.keras.layers.Dense(units=6, activation='relu'))
#add the output layer
"""Output units equals the output dimensions minus 1.
This model generates a probability between 0 and 1 (Sigmoid)"""
neural_net.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
#TRAIN THE ANN---------------------------------------------------------------->
#compile the neural network
"""In this example the adam optimizer is used for stochastic gradient desecent.
The output is binary so binary cross entropy is selected for the loss function."""
neural_net.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
#train the neural network
"""Batch and Epoch arguments were chose based on previous training data.
Modify accordingly."""
neural_net.fit(X_train, y_train, batch_size=32, epochs=100)
#GENERATE PREDICTIONS--------------------------------------------------------->
#predict test set
y_pred = neural_net.predict(X_test)
y_pred = (y_pred > 0.5)
y_test = y_test.reshape(len(y_test), 1)
y_pred = y_pred.reshape(len(y_pred), 1)
print(np.concatenate((y_pred, y_test), 1))
#build confusion matrix
cmatrix = confusion_matrix(y_test, y_pred)
print(cmatrix)
print(accuracy_score(y_test, y_pred))
#individual prediction
"""Apply the transform method to scale the variables to the same distribution as the training data."""
pred = neural_net.predict(scaler.transform([[1.0, 0.0, 0.0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]]))
print("Predicted Probabilty the Customer will leave: {}".format(pred))
pred = neural_net.predict(scaler.transform([[1.0, 0.0, 0.0, 600, 1, 40, 3, 60000, 2, 1, 1, 50000]])) > 0.5
print("Binary statement will the customer leave: {}".format(pred)) | Python | 101 | 32.940594 | 106 | /ann_probability_output.py | 0.675226 | 0.646922 |
Farshad-Hasanpour/dataset-loading-class | refs/heads/master | import gzip
import json
import csv
class Dataset:
def __init__(self, filename):
self.filename = filename
def read_json(self, useful_keys=(), required_keys=(), is_gzip=False, encoding='utf8'):
"""
:param useful_keys: (tuple): Keys to return for each dataset record. Pass empty to return all keys.
:param required_keys: (tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.
:param is_gzip: (boolean): Whether the file is a compressed file or not.
:param encoding: (string): The default is 'utf8'.
:return: (list of dictionary): For each JSON record, return a dictionary inside a list.
"""
dataset = list()
if is_gzip:
open_function = gzip.GzipFile
else:
open_function = open
# Load dataset file
with open_function(self.filename, 'rb') as file:
# For each record in dataset
for line in file:
data = json.loads(line, encoding=encoding)
# By default get the dataset record
append_record = True
# If required keys does not exist do not get the record otherwise
for key in required_keys:
if not data.get(key):
append_record = False
break
# get useful reviews
if append_record:
# Determine useful keys
useful = ()
if 0 == len(useful_keys):
useful = data.keys()
else:
useful = useful_keys
temp = {}
for key in useful:
temp[key] = data.get(key)
dataset.append(temp)
return dataset
def read_csv(self, useful_keys=(), required_keys=(), delimiter=',', is_gzip=False, encoding='utf8'):
"""
:param useful_keys: (tuple or string): Keys to return for each dataset record. Pass empty to return all keys.
:param required_keys: (tuple): Required keys for each record. If one of these keys does not exist, this function ignores the dataset record.
:param delimiter: (string): CSV delimiter
:param is_gzip: (boolean): Whether the file is a compressed file or not.
:param encoding: (string): The default is 'utf8'.
:return: (list of list | list): For each CSV row, return a list inside another list and a list of headers.
"""
dataset = list()
if is_gzip:
open_function = gzip.open
else:
open_function = open
# Load dataset file
with open_function(self.filename, mode='rt', encoding=encoding) as file:
content = csv.reader((line.replace('\0', '') for line in file), delimiter=delimiter)
# Get keys of dataset
headers = next(content)
# Transform keys to index
useful = []
required = []
if 0 == len(useful_keys):
iteration = headers
else:
iteration = useful_keys
for key in iteration:
useful.append(headers.index(key))
for key in required_keys:
required.append(headers.index(key))
# For each record in dataset
for row in content:
if not row:
continue
# By default get the record from dataset
append_record = True
# If one of required keys does not exists ignore this dataset record otherwise get the record
for i in required:
if row[i] == '':
append_record = False
break
if append_record:
dataset.append(list(row[index] for index in useful))
return dataset, headers
| Python | 93 | 33.290321 | 142 | /Dataset.py | 0.673041 | 0.670846 |
hamzaalkharouf/House-Price-prediction | refs/heads/master | import pickle
import argparse
import numpy as np
#take model
#Calculate price from scikit
def path(list_data):
parser = argparse.ArgumentParser()
parser.add_argument("-path","--path",type = str)
args = parser.parse_args()
# './model.pickle'
loaded_model = pickle.load(open(args.path, 'rb'))
x = np.array(list_data).reshape(1,6)
result = loaded_model.predict(x)
if x.shape[0] == 1:
result = result[0]
return result
| Python | 18 | 24.555555 | 53 | /scikit_learn.py | 0.65 | 0.63913 |
hamzaalkharouf/House-Price-prediction | refs/heads/master | from flask import Flask,request
import scikit_learn
import Write_Csv
app = Flask(__name__)
#append data(from url) to list
def Data_append(x1,x2,x3,x4,x5,x6):
list_data=[]
list_data.append(x1)
list_data.append(x2)
list_data.append(x3)
list_data.append(x4)
list_data.append(x5)
list_data.append(x6)
return list_data
#route /
#take data from url then send them to scikit_learn of Calculate price from scikit
#return information
@app.route('/')
def hello_world():
transaction_date=float(request.args.get('transaction_date'))
house_age=float(request.args.get('house_age'))
distance_to_the__nearest_MRT_station=float(request.args.get('distance_to_the__nearest_MRT_station'))
number_of_convenience_stores=float(request.args.get('number_of_convenience_stores'))
latitude=float(request.args.get('latitude'))
longitude=float(request.args.get('longitude'))
list_data=[]
list_data=Data_append(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude)
price=scikit_learn.path(list_data)
list_data.append(price)
Write_Csv.Write_Csv(list_data)
return '''<h3>
transaction date : {}<br>
house age= {}<br>
distance to the nearest MRT station= {}<br>
number of convenience stores= {}<br>
latitude= {}<br>
longitude= {}<br>
price ={}
</h3>'''.format(transaction_date,house_age,distance_to_the__nearest_MRT_station,number_of_convenience_stores,latitude,longitude,price)
#to run servier => py app.py -path ./model.pickle
if __name__ == '__main__':
app.run(port=5060,debug=False,use_reloader=False)
# http://127.0.0.1:5060/?transaction_date=2017.917&house_age=10&distance_to_the__nearest_MRT_station=306.59470&number_of_convenience_stores=15&latitude=24.98034&longitude=121.53951
| Python | 45 | 41.444443 | 180 | /app.py | 0.678534 | 0.646073 |
hamzaalkharouf/House-Price-prediction | refs/heads/master | import pandas as pd
import os
THIS_FOLDER = os.path.dirname(os.path.abspath(__file__))
my_file_estate = os.path.join(THIS_FOLDER,'csv\\Real estate.csv')
def Write_Csv(list_data):
df = pd.read_csv(my_file_estate)
file = open(my_file_estate,"a")
number=df['No'].values[-1]
number+=1
file.write(str(number)+",")
for i in list_data:
if i != list_data[6]:
file.write(str(i)+",")
else :file.write(str(i)+"\n")
file.close()
df.reset_index(drop = True,inplace=True)
| Python | 17 | 29.529411 | 65 | /Write_Csv.py | 0.60501 | 0.599229 |
rakshitshah-28/APSITSkills-Project | refs/heads/master | # Write a Python program to check whether a
# specified value is contained in a group of values.
# 3 -> [1, 5, 8, 3] : True -1 -> [1, 5, 8, 3] : False
import random
def check_in_group():
while True:
test_case = [1, 5, 8, 3]
print('\nEnter \'-1\' to QUIT.')
value = input('Enter - ')
try:
value = int(value)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if value == -1:
print('\tTHANK YOU.\n\tRETURNING TO MAIN MENU.\n')
break
if value in test_case:
print('True')
break
else:
print('False')
continue
# in case needed.
def check_random():
while True:
test_case = list()
length = input('\nEnter Length of the test_case - ')
try:
length = int(length)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
for _ in range(length):
test_case.append(random.choice(range(10)))
break
# print(test_case)
while True:
print('\nEnter \'-1\' to QUIT.')
value = input('Enter - ')
try:
value = int(value)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if value == -1:
print('\tTHANK YOU.\n\tRETURNING TO MAIN MENU.\n')
break
if value in test_case:
print('True')
break
else:
print('False')
continue
| Python | 59 | 25.59322 | 62 | /Part_5.py | 0.479924 | 0.467177 |
rakshitshah-28/APSITSkills-Project | refs/heads/master | # Menu-Driven program
import string
import random
# just for reference purposes.
from Part_1 import all_prime
from Part_2 import even_odd
from Part_3 import prime_composite
from Part_4 import vowel_consonant
from Part_5 import check_in_group
while True:
print('\nChoose your Option - ')
print('0. Exit')
print('1. Print Prime Numbers between 1 to 1000.')
print('2. To Find whether Number is ODD or EVEN.')
print('3. To Find whether Number is PRIME or COMPOSITE.')
print('4. To Find whether Alphabet is VOWEL or NOT.')
print('5. To Check specified Value n Group of Values')
option = input('Enter - ')
try:
option = int(option)
except:
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if (option < 0 or option > 5):
print('\tINVALID CHOICE.\n\tTRY AGAIN.\n')
continue
if option == 0:
print('\n\tTHANK YOU FOR JOINING US!')
exit(-1)
elif option == 1:
all_prime()
elif option == 2:
even_odd()
elif option == 3:
prime_composite()
elif option == 4:
vowel_consonant()
elif option == 5:
check_in_group()
| Python | 44 | 25.75 | 61 | /CheckingWithProgram.py | 0.609176 | 0.587935 |
RafaelDiaz7/some-programs-and-exercises | refs/heads/main | # Realizar un programa que utilizando una estructura repetitiva (bucle) presente por pantalla
# los números del 1 al 10 separados por un guión.
# El resultado debe ser similar a esto:
# 1 - 2 - 3 - 4 - 5 - 6 - 7 - 8 - 9 - 10
for i in range(1,11):
print(i, end="-")
| Python | 9 | 29.333334 | 94 | /bucle_simple/bucle_simple.py | 0.655678 | 0.593407 |
RafaelDiaz7/some-programs-and-exercises | refs/heads/main | import string
def open_file():
fname = input('Type the filename: ')
fhandler = open(fname, encoding='utf-8')
return fhandler
def count_characters(filehandler, text):
# print('------Printin filehandler: ', filehandler)
char_count = dict()
for line in filehandler:
line = line.strip().lower()
line = line.translate(line.maketrans('', '', string.punctuation))
# print('--After doing strip each char, Character: ', line)
text = text + line
print('')
print('____________________________________________')
print('The text after concatenate lines: ', text)
print('|')
print('|___Type of the text variable: ', type(text))
print('____________________________________________')
# tratar de no hacer un for anidado aca, eso es lo que hay que mejorar de este codio
for character in text:
# print('')
# print('Char in text:', character)
if character.isalpha():
if character in char_count:
char_count[str(character)] += 1
else:
char_count[str(character)] = 1
# char_count[character] = char_count.get(character)
else:
continue
return char_count
def order_by_decreasing(counter):
inverse_counter_lst = list()
for element in counter:
inverse_counter_lst.append((counter[element], element))
inverse_counter_lst.sort(reverse=True)
for number, element in inverse_counter_lst:
print(f'{element} -> {number}')
first_text = ""
# order_by_decreasing(count_characters(open_file(), first_text))
print(count_characters(open_file(), first_text))
| Python | 54 | 30.074074 | 88 | /cantidad_letras/cantidad_letras.py | 0.570322 | 0.568534 |
alexeal90/X-Serv-13.6-Calculadora | refs/heads/master | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Alejandro Valeriano Fernandez - GITT
Ejercicio 13.6
Calculadora
"""
import sys
def suma(operando1, operando2):
try:
return int(operando1) + int(operando2)
except NameError:
print ("Invalid arguments")
def rest(operando1, operando2):
try:
return int(operando1) - int(operando2)
except NameError:
print ("Invalid arguments")
def mult(operando1, operando2):
try:
return int(operando1) * int(operando2)
except NameError:
print ("Invalid arguments")
def div(operando1, operando2):
try:
return float(operando1) / float(operando2)
except NameError:
print ("Invalid arguments")
if __name__ == "__main__":
if len(sys.argv) != 4:
print
sys.exit("Usage: $ python calculadora.py funcion operando1 operando2")
if sys.argv[1] == 'add':
print sys.argv[2] + ' mas ' + sys.argv[3] + ' = ' + str(suma (sys.argv[2], sys.argv[3]))
if sys.argv[1] == 'substract':
print sys.argv[2] + ' menos ' + sys.argv[3] + ' = ' + str(rest (sys.argv[2], sys.argv[3]))
if sys.argv[1] == 'multiply':
print sys.argv[2] + ' por ' + sys.argv[3] + ' = ' + str(mult (sys.argv[2], sys.argv[3]))
if sys.argv[1] == 'divide':
try:
print sys.argv[2] + ' entre ' + sys.argv[3] + ' = ' + str(div (sys.argv[2], sys.argv[3]))
except:
print 'error al dividir'
else:
print 'Las posibles operaciones son "add", "substract", "multiply" y "divide"'
| Python | 61 | 21.803278 | 92 | /calculadora.py | 0.626168 | 0.595255 |
bdflemin/scripts | refs/heads/master | from re import findall
from subprocess import check_output
from pwd import getpwall
d = {}
filesearch = open("/etc/login.defs","r")
for line in filesearch:
if findall("^UID_(MIN|MAX)",line):
a = line.strip().split()
d[str(a[0])] = str(a[1])
filesearch.close()
for p in getpwall():
if int(p[2]) >= int(d['UID_MIN']) and int(p[2]) <= int(d['UID_MAX']):
print p[0] | Python | 17 | 21 | 70 | /find_users.py | 0.635389 | 0.621984 |
bdflemin/scripts | refs/heads/master | #!/usr/bin/python
import pyrax, time, sys, multiprocessing, argparse, os, __builtin__
parser = argparse.ArgumentParser(description='Remove Cloud Files Fast')
parser.add_argument('--container', nargs='?', dest='cont', required=True, help="The Cloud Contain To Remove Objects From")
parser.add_argument('--username', nargs='?', dest='username', help="Your Cloud Username")
parser.add_argument('--password', nargs='?', dest='password', help="Your Cloud API Key")
parser.add_argument('--file', nargs='?', dest='file', help="Your Cloud API Key File")
parser.add_argument('--region', nargs='?', dest='region', help="Set the Cloud File region")
args = parser.parse_args()
def authenticate(username='', passwd='', path=''):
if username or passwd:
pyrax.set_credentials(username,passwd)
elif path:
pyrax.set_credential_file(os.path.expanduser(path))
else:
print "Authentication Failed... please use username/password or file to authenticate"
sys.exit()
def worker(num):
try:
global obj
print "Deleting:", obj[num].name
obj[num].delete()
#time.sleep(1 + random.random()*5)
#print num
except:
print "Unexpected error in worker:", sys.exc_info()
raise
def pooling(length):
try:
pool = multiprocessing.Pool(processes=20)
for num in xrange(length):
pool.apply_async(worker, [num])
#pool.apply(worker,[num])
pool.apply_async(time.sleep, 5)
pool.close()
pool.join()
except:
print "Unexpected error in pooling:", sys.exc_info()[0]
raise
if __name__ == "__main__":
authenticate(username=args.username,passwd=args.password,path=args.file)
cf = pyrax.connect_to_cloudfiles(region=args.region)
limit = 10000
marker = ""
obj = cf.get_container(args.cont).get_objects(limit=limit, marker=marker)
while obj:
try:
marker = obj.pop()
length = len(obj)
pooling(length)
obj = cf.get_container(args.cont).get_objects(limit=limit, marker=marker.name)
except:
print "Unexpected error:", sys.exc_info()[0]
raise | Python | 59 | 32.271187 | 122 | /cfdelete.py | 0.698777 | 0.692661 |
bdflemin/scripts | refs/heads/master | import sqlite3 as lite
import sys
import argparse
from time import strftime
from subprocess import check_output
def collect():
# Grab information from mtr
output = check_output(["mtr","-nr","-c5","50.56.142.146"])
date = strftime('%Y%m%d %H%M')
# split the data into an array and clean up array a bit
a = output.split("\n")
del a[0]
del a[-1]
# Connect to the sqlite3 server to place the information into it
con = lite.connect('/root/icmp/data.db')
cur = con.cursor()
# loop through the data and store information into sqlite
for i in a:
array = i.replace("%","").split()
del array[0]
cur.execute("insert into netreport values ('%s','%s',%0.1f,%i,%0.1f,%0.1f,%0.1f,%0.1f,%0.1f);" %
(str(date), str(array[0]), float(array[1]), int(array[2]), float(array[3]), float(array[4]), float(array[5]), float(array[6]), float(array[7]),))
con.commit()
if con:
con.close()
if __name__ == '__main__':
collect() | Python | 33 | 28.030304 | 149 | /mtr_stats.py | 0.628004 | 0.591432 |
bdflemin/scripts | refs/heads/master | import os, re
def filePop(top):
sizeList = []
#exclude = "^/proc.*|^/sys.*|^/boot.*|^/tmp.*|^/mnt.*"
exclude = "^/proc.*|^/sys.*|^/boot.*|/tmp.*|/home.*|/var.*|/data.*"
# Skip any files that are located in /proc, /sys or /boot
for root,dirs,files in os.walk(top):
if re.findall(exclude,root):
continue
for f in files:
fullpath = os.path.join(root,f)
if (os.path.isfile(fullpath) or os.path.isdir(fullpath)) and not os.path.islink(fullpath):
sizeList.append((os.path.getsize(fullpath),fullpath))
return sizeList
def fileSort(fileList,top=15):
sList = sorted(fileList, key=lambda a: a[0], reverse=True)
for i in xrange(0,15):
size = ((sList[i][0] / 1024) / 1024)
directory = sList[i][1]
print '%s MB --> %s' % (size,directory)
fileSort(filePop("/")) | Python | 23 | 37 | 102 | /findLargestFile.py | 0.570447 | 0.552119 |
bdflemin/scripts | refs/heads/master | #!/usr/bin/python
import pyinotify
class MyEventHandler(pyinotify.ProcessEvent):
def process_IN_CREATE(self, event):
print "CREATE event:", event.pathname
def process_IN_DELETE(self, event):
def main():
wm = pyinotify.WatchManager()
wm.add_watch('/home/brya5376/test'), pyinotify.ALL_EVENTS, rec=True)
if __name__ == '__main__':
main()
# http://www.saltycrane.com/blog/2010/04/monitoring-filesystem-python-and-pyinotify/ | Python | 16 | 26 | 84 | /pyinotify.py | 0.726218 | 0.703016 |
Maveric4/SudokuSolver | refs/heads/master | import numpy as np
from random import randint
from copy import deepcopy
import cv2
import utils
import grid
# Global variables
sudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 0, 9]]
counter = 0
solutions = []
recur_cnt = 0
def is_possible(y, x, n):
global sudoku_grid
for i in range(0, 9):
if sudoku_grid[y][i] == n:
return False
for j in range(0, 9):
if sudoku_grid[j][x] == n:
return False
x0 = (x//3)*3
y0 = (y//3)*3
for k in range(0, 3):
for l in range(0, 3):
if sudoku_grid[y0+k][x0+l] == n:
return False
return True
def solve_recursion():
global sudoku_grid, counter, solutions, recur_cnt
recur_cnt += 1
if recur_cnt > 10**5:
return
for y in range(9):
for x in range(9):
if sudoku_grid[y][x] == 0:
for n in range(1, 10):
if is_possible(y, x, n):
sudoku_grid[y][x] = n
solve_recursion()
sudoku_grid[y][x] = 0
return
counter += 1
solutions.append(deepcopy(sudoku_grid))
def main():
global sudoku_grid, counter, solutions
model = utils.load_mnist_model()
img = cv2.imread("./SudokuOnline/puzzle1.jpg")
sudoku_grid = grid.recognize_grid(model, img)
solve_recursion()
print("Number or recurrent function invocations: {}".format(recur_cnt))
print("There are {} possible solutions".format(counter))
if len(solutions) > 0:
print("Random solution:")
solved_grid = solutions[randint(0, counter - 1)]
print(np.matrix(solved_grid))
img_solved = grid.draw_solved_grid(model, img, solved_grid)
cv2.imwrite("./results/result1.jpg", img_solved)
cv2.imshow("Solved sudoku", img_solved)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| Python | 80 | 26.8125 | 75 | /sudoku_solver.py | 0.503145 | 0.448338 |
Maveric4/SudokuSolver | refs/heads/master | import tensorflow as tf
import datetime
import os
import numpy as np
from tensorflow.python.keras.callbacks import TensorBoard
mnist = tf.keras.datasets.mnist
(training_images, training_labels), (test_images, test_labels) = mnist.load_data()
# print(training_images.shape)
# print(test_images.shape)
training_images = training_images / 255.0
test_images = test_images / 255.0
training_images = training_images.reshape(training_images.shape[0], 28, 28, 1)
test_images = test_images.reshape(test_images.shape[0], 28, 28, 1)
test_images, validation_images = np.split(test_images, [int(test_images.shape[0]*0.4)])
test_labels, validation_labels = np.split(test_labels, [int(test_labels.shape[0]*0.4)])
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
## Designing callbacks
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
print("\nReached {} epoch".format(epoch + 1))
if logs.get('accuracy') > 0.997:
print("Reached 99.99% accuracy so cancelling training!")
self.model.stop_training = True
log_dir = os.path.join(
"logs",
"fit",
datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard_callback = TensorBoard(log_dir=log_dir, histogram_freq=1)
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.fit(training_images,
training_labels,
validation_data=(validation_images, validation_labels),
epochs=20,
callbacks=[myCallback(), tensorboard_callback],
verbose=2)
# model.summary()
metrics = model.evaluate(test_images, test_labels)
print("[Loss, Accuracy]")
print(metrics)
model.save("./models/train_mnist1_model3.h5")
| Python | 58 | 34.637932 | 93 | /MNISTmodel/train_mnist1.py | 0.691973 | 0.659574 |
Maveric4/SudokuSolver | refs/heads/master | import numpy as np
from random import randint
from copy import deepcopy
import cv2
import utils
import grid
import paho.mqtt.client as mqtt
import io
from PIL import Image
# Global variables
BROKER_ADRESS = "192.168.9.201"
sudoku_grid = [[5, 3, 0, 0, 7, 0, 0, 0, 0],
[6, 0, 0, 1, 9, 5, 0, 0, 0],
[0, 9, 8, 0, 0, 0, 0, 6, 0],
[8, 0, 0, 0, 6, 0, 0, 0, 3],
[4, 0, 0, 8, 0, 3, 0, 0, 1],
[7, 0, 0, 0, 2, 0, 0, 0, 6],
[0, 6, 0, 0, 0, 0, 2, 8, 0],
[0, 0, 0, 4, 1, 9, 0, 0, 5],
[0, 0, 0, 0, 8, 0, 0, 0, 9]]
counter = 0
solutions = []
recur_cnt = 0
IMG_NAME = 'puzzle1.jpg'
def on_connect(client, userdata, flags, rc):
print("Connected to broker with result code " + str(rc))
client.subscribe("sudoku/#")
def on_message(client, userdata, msg):
global counter
counter = 0
if msg.topic == "sudoku/photo":
try:
stream = io.BytesIO(msg.payload)
open_cv_image = np.array(Image.open(stream).convert('RGB'))
# Convert RGB to BGR
open_cv_image = open_cv_image[:, :, ::-1].copy()
cv2.imwrite('./mqtt_com/' + IMG_NAME, open_cv_image)
except Exception as e:
print("Exception: ")
print(e)
solve_sudoku()
send_solution(client)
if msg.payload.decode() == "End":
print("Okey! I'm disconnecting :)")
client.disconnect()
def send_message(client, topic, msg):
client.publish(topic, msg)
def is_possible(y, x, n):
global sudoku_grid
for i in range(0, 9):
if sudoku_grid[y][i] == n:
return False
for j in range(0, 9):
if sudoku_grid[j][x] == n:
return False
x0 = (x//3)*3
y0 = (y//3)*3
for k in range(0, 3):
for l in range(0, 3):
if sudoku_grid[y0+k][x0+l] == n:
return False
return True
def solve_recursion():
global sudoku_grid, counter, solutions, recur_cnt
recur_cnt += 1
if recur_cnt > 10**5:
return
for y in range(9):
for x in range(9):
if sudoku_grid[y][x] == 0:
for n in range(1, 10):
if is_possible(y, x, n):
sudoku_grid[y][x] = n
solve_recursion()
sudoku_grid[y][x] = 0
return
counter += 1
solutions.append(deepcopy(sudoku_grid))
def solve_sudoku():
global sudoku_grid, counter, solutions
model = utils.load_mnist_model()
img = cv2.imread("./mqtt_com/" + IMG_NAME)
sudoku_grid = grid.recognize_grid(model, img)
solve_recursion()
print("Number or recurrent function invocations: {}".format(recur_cnt))
print("There are {} possible solutions".format(counter))
if len(solutions) > 0:
print("Random solution:")
solved_grid = solutions[randint(0, counter - 1)]
print(np.matrix(solved_grid))
img_solved = grid.draw_solved_grid(model, img, solved_grid)
cv2.imwrite("./results/" + IMG_NAME, img_solved)
# cv2.imshow("Solved sudoku", img_solved)
# cv2.waitKey(0)
def send_solution(client):
global solutions, counter
with open("./results/" + IMG_NAME, "rb") as f:
fileContent = f.read()
byteArrayPhoto = bytearray(fileContent)
client.publish("sudoku/solution/photo", byteArrayPhoto)
# client.publish("sudoku/solution/grid", str(solutions[randint(0, counter - 1)]))
def main():
client = mqtt.Client()
client.connect(BROKER_ADRESS, 1883, 60)
client.on_connect = on_connect
client.on_message = on_message
client.loop_forever()
if __name__ == "__main__":
main()
| Python | 130 | 27.692308 | 85 | /solve_sudoku_from_app.py | 0.547306 | 0.509247 |
Maveric4/SudokuSolver | refs/heads/master | # import tensorflow as tf
import cv2
import sys
sys.path.append("..")
import utils
import numpy as np
model_path = "./models/train_mnist1_model3.h5"
img_path = "../img/seven.png"
# img_path = "../img/one.png"
# img_path = "../img/six.png"
mnist_model = utils.load_model(model_path)
## Way 1
print("Way 1")
digit_img = utils.standarize_digit_img_to_model_input(img_path, 28)
bin_digit_img = utils.binarize_img(digit_img)
img = utils.prepare_to_predict(bin_digit_img)
cv2.imshow("Digit", digit_img)
cv2.imshow("Binary digit", bin_digit_img)
cv2.waitKey(50)
prob_predictions = mnist_model.predict(img)
prediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]
print("Prediction: {}".format(prediction[0]))
## Way 2
print("Way 2")
prediction = utils.predict_digit(mnist_model, img_path)
print("Prediction: {}".format(prediction))
| Python | 33 | 25.030304 | 83 | /MNISTmodel/test_mnist.py | 0.705471 | 0.684517 |
Maveric4/SudokuSolver | refs/heads/master | import cv2
from copy import deepcopy
import numpy as np
import utils
RESCALE = 3
def find_cell_param(joints):
# Set up the detector with default parameters.
params = cv2.SimpleBlobDetector_Params()
# filter by area
params.filterByArea = True
params.minArea = 1
params.maxArea = 50
detector = cv2.SimpleBlobDetector_create(params)
# Detect blobs
keypoints = detector.detect(~joints)
sorted_keypoints = sorted(keypoints, key=lambda x: (x.pt[0], x.pt[1]))
min_keypoint = sorted_keypoints[0]
max_keypoint = sorted_keypoints[-1]
# for it, keypoint in enumerate(keypoints):
# img_contours = deepcopy(img)
# Draw detected blobs as red circles.
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS ensures the size of the circle corresponds to the size of blob
# im_with_keypoints = cv2.drawKeypoints(img_contours, [min_keypoint, max_keypoint], np.array([]), (0, 0, 255),
# cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
# cv2.imshow("Keypoints", im_with_keypoints)
# cv2.waitKey(0)
return (max_keypoint.pt[0] - min_keypoint.pt[0]) / 7, (max_keypoint.pt[1] - min_keypoint.pt[1]) / 7, min_keypoint.pt, max_keypoint.pt
def get_joints(img):
img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))
# retval = cv2.getPerspectiveTransform(img) TO DO https://blog.ayoungprogrammer.com/2013/03/tutorial-creating-multiple-choice.html/
img_gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
bin_img = cv2.adaptiveThreshold(cv2.bitwise_not(img_gray), 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY, 15, -2)
# cv2.imshow("Bin: ", bin_img)
# cv2.waitKey(0)
scale = 20
horizontal_size = bin_img.shape[0] // scale
horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
img_eroded_horizontal = cv2.erode(bin_img, horizontal_structure, anchor=(-1, -1))
img_dilated_horizontal = cv2.erode(img_eroded_horizontal, horizontal_structure, anchor=(-1, -1))
vertical_size = bin_img.shape[1] // scale
vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
img_eroded_vertical = cv2.erode(bin_img, vertical_structure, anchor=(-1, -1))
img_dilated_vertical = cv2.erode(img_eroded_vertical, vertical_structure, anchor=(-1, -1))
# mask = img_dilated_vertical + img_dilated_horizontal
joints = cv2.bitwise_and(img_dilated_horizontal, img_dilated_vertical)
# cv2.imshow("joints: ", joints)
# cv2.waitKey(0)
return bin_img, joints
def recognize_grid(model, img):
bin_img, joints = get_joints(img)
cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)
grid = []
for x in range(-1, 8):
row = []
for y in range(-1, 8):
roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),
int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]
alpha = 0.1
roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]
row.append(utils.predict_digit(model, roi))
# cv2.imshow("ROI: ", roi)
# cv2.waitKey(0)
grid.append(row)
return grid
def draw_solved_grid(model, img, solved_sudoku):
solved_img = deepcopy(cv2.resize(img, (int(img.shape[1] / RESCALE), int(img.shape[0] / RESCALE))))
bin_img, joints = get_joints(img)
cell_height, cell_width, min_pt, max_pt = find_cell_param(joints)
for x in range(-1, 8):
for y in range(-1, 8):
roi = bin_img[int(min_pt[1]+cell_width*x):int(min_pt[1]+cell_width*(x+1)),
int(min_pt[0]+cell_height*y):int(min_pt[0]+cell_height*(y+1))]
alpha = 0.1
roi = roi[int(roi.shape[1]*alpha):int(roi.shape[1]*(1-alpha)), int(roi.shape[0]*alpha):int(roi.shape[0]*(1-alpha))]
if utils.predict_digit(model, roi) == 0:
pt = (int((min_pt[0] + cell_height * y + min_pt[0] + cell_height * (y + 1))/2) - 5, int((min_pt[1] + cell_width * x + min_pt[1] + cell_width * (x + 1))/2)+8)
cv2.putText(solved_img, str(solved_sudoku[x+1][y+1]), pt, cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
return solved_img
def main():
model = utils.load_mnist_model()
img = cv2.imread("./SudokuOnline/puzzle1.jpg")
sudoku_grid = recognize_grid(model, img)
print(np.matrix(sudoku_grid))
img = cv2.resize(img, (int(img.shape[1]/RESCALE), int(img.shape[0]/RESCALE)))
cv2.imshow("Img: ", img)
# cv2.imshow("Gray: ", img_gray)
# cv2.imshow("Bin: ", bin_img)
# cv2.imshow("Dilated horizontal: ", img_dilated_horizontal)
# cv2.imshow("Dilated vertical: ", img_dilated_vertical)
# cv2.imshow("Joints: ", joints)
# cv2.imshow("Mask: ", mask)
cv2.waitKey(0)
if __name__ == "__main__":
main()
| Python | 110 | 43.836365 | 173 | /grid.py | 0.625912 | 0.593471 |
Maveric4/SudokuSolver | refs/heads/master | import cv2
import numpy as np
import tensorflow
def standarize_digit_img_to_model_input(img, size):
if isinstance(img, str):
img = cv2.imread(img)
img_resized = cv2.resize(img, (size, size))
return img_resized
def binarize_img(img):
gray_img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
blur = cv2.GaussianBlur(gray_img, (5, 5), 0)
ret, th = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
return cv2.bitwise_not(th)
def prepare_to_predict(img):
return img.reshape(1, 28, 28, 1) / 255.0
def predict_digit(model, img):
digit_img = standarize_digit_img_to_model_input(img, 28)
if len(img.shape) == 3:
bin_digit_img = binarize_img(digit_img)
else:
bin_digit_img = digit_img
img = prepare_to_predict(bin_digit_img)
prob_predictions = model.predict(img)
if np.any(prob_predictions > 0.7):
prediction = [(np.where(item == np.amax(item)))[0][0] for item in prob_predictions]
return prediction[0]
else:
return 0
def load_model(model_path):
return tensorflow.keras.models.load_model(model_path)
def load_mnist_model():
model_path = "./MNISTmodel/models/train_mnist1_model3.h5"
return tensorflow.keras.models.load_model(model_path)
| Python | 45 | 27.133333 | 91 | /utils.py | 0.661405 | 0.629834 |
bernarducs/mei | refs/heads/master | from selenium import webdriver
def config(path_folder: str, headless: bool):
fp = webdriver.FirefoxProfile()
fp.set_preference("browser.download.folderList", 2)
fp.set_preference("browser.download.manager.showWhenStarting", False)
fp.set_preference("browser.download.dir", path_folder)
fp.set_preference("browser.helperApps.neverAsk.saveToDisk", "application/csv")
fp.set_preference("dom.disable_beforeunload", True)
fp.set_preference("browser.download.manager.closeWhenDone", True)
options = webdriver.FirefoxOptions()
if headless:
options.add_argument('-headless')
driver = webdriver.Firefox(fp, options=options)
return driver
| Python | 19 | 35.105263 | 82 | /init.py | 0.733236 | 0.731778 |
bernarducs/mei | refs/heads/master | import os
import time
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.expected_conditions import presence_of_element_located
from selenium.webdriver.support.wait import WebDriverWait
from helpers import print_timestamp
class Mei:
def __init__(self, driver, files_path, uf):
self.driver = driver
self.files_path = os.path.join(os.getcwd(), files_path)
# print(self.files_path)
self.uf = uf
def _retorna_xpath(self, driver, timeout, freq, xpath):
wbw = WebDriverWait(driver=driver,
timeout=timeout,
poll_frequency=freq)
wbw.until(presence_of_element_located((By.XPATH, xpath)),
"Elemento não encontrado.")
xpath = driver.find_element_by_xpath(xpath)
return xpath
def retorna_tabela(self, xpath_btn_consulta, xpath_tab_completa):
time.sleep(2)
print('Extraindo tabela.', print_timestamp())
tentativas = [1, 2, 3]
for i in tentativas:
print(f"Tentativa {i} de 3...")
self.driver.find_element_by_xpath(xpath_btn_consulta).click()
try:
self._retorna_xpath(self.driver, 150, 5, xpath_tab_completa)
print('Tabela carregada.', print_timestamp())
return True
except TimeoutException:
print('Tabela não foi carregada.')
return False
def del_arquivos_inuteis(self):
files_path = self.files_path
for file in os.listdir(files_path):
if file[:13] == 'relatorio_mei':
os.remove(os.path.join(files_path, file))
def renomeia_arquivo(self):
files_path = self.files_path
uf = self.uf
file = r'relatorio_mei.csv'
if file in os.listdir(files_path):
old_file = os.path.join(files_path, file)
new_file = self.nome_arquivo(uf)
new_file = os.path.join(files_path, new_file)
try:
os.rename(old_file, new_file)
print(f"Arquivo renomeado para {new_file} " + print_timestamp())
except FileExistsError:
print("Arquivo já existe.")
def verifica_arquivo(self):
files_path = self.files_path
if not os.path.exists(files_path):
os.mkdir(files_path)
print(f"Arquivos baixados ficarão na pasta {files_path}.")
uf = self.uf
name = self.nome_arquivo(uf)
if name in os.listdir(files_path):
return name
else:
return False
def nome_arquivo(self, uf):
data = print_timestamp(now=False)
return f"{uf}_cnae_e_municipios_{data}.csv"
def exporta_csv(self):
driver = self.driver
xpath_btn_exportar = '//*[@id="form:botaoExportarCsv"]'
driver.find_element_by_xpath(xpath_btn_exportar).click()
time.sleep(10)
print('Download concluído.', print_timestamp())
def abre_browser(self):
url = 'http://www22.receita.fazenda.gov.br/inscricaomei/private/pages/relatorios/opcoesRelatorio.jsf#'
xpath = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/p'
while True:
driver = self.driver
try:
driver.get(url)
print('Browser iniciado. ' + print_timestamp())
print('Extraindo ' + self.uf + '...')
self._retorna_xpath(driver, 15, 5, xpath)
break
except TimeoutException as e:
driver.quit()
print(e)
def carrega_pagina_relatorio(self, xpath_page):
driver = self.driver
page = driver.find_element_by_xpath(xpath_page)
page.click()
def uf_listbox(self, xpath_listbox):
time.sleep(5)
driver = self.driver
uf = self.uf
el = driver.find_element_by_xpath(xpath_listbox)
for option in el.find_elements_by_tag_name('option'):
if option.text == uf:
option.click()
break
class MeiCnaeMunicipio(Mei):
xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[6]/a'
xpath_listbox = '//*[@id="form:uf"]'
xpath_municipios = '//*[@id="form:listaMunicipiosUF"]'
xpath_relatorio = '//*[@id="form:listaMunicipiosRelatorio"]'
xpath_btn_inserir = '//*[@id="form:btnInserir"]'
xpath_btn_consulta = '//*[@id="form:botaoConsultar"]'
xpath_tab_completa = '//*[@id="form:j_id62"]'
def __init__(self, driver, files_path, uf):
super().__init__(driver, files_path, uf)
def verifica_listbox_municipios(self):
driver = self.driver
for tries in [1, 2, 3]:
print(f"Carregando municípios. Tentativa {tries}/3.", print_timestamp())
time.sleep(5)
# verifica se a 1a listbox está preenchida
cities = driver.find_element_by_xpath(self.xpath_municipios)
n_cities = len(cities.text.split('\n'))
if n_cities > 1 or cities.text == 'BRASILIA':
cities.find_elements_by_tag_name('option')[0].click()
cities.send_keys(Keys.SHIFT, Keys.END)
driver.find_element_by_xpath(self.xpath_btn_inserir).click()
time.sleep(5)
# verifica se a 2a listbox está preenchida
rel = driver.find_element_by_xpath(self.xpath_relatorio)
n_rel = len(rel.text.split('\n'))
if n_rel > 1 or rel.text == 'BRASILIA':
print("Municipíos carregados.")
break
# se nao atenderem as condições
if n_cities <= 1 and tries == 3:
print("Não foi possível carregar os municípios.")
return False
return True
class MeiCnaeSexoUF(Mei):
xpath_page = '/html/body/table/tbody/tr[2]/td/form/div/div/div[1]/ul/li[7]/a'
xpath_listbox = '//*[@id="form:uf"]'
xpath_municipios = '//*[@id="form:municipioUF"]'
xpath_btn_consulta = '//*[@id="form:botaoConsultar"]'
xpath_tab_completa = '//*[@id="form:botaoExportarCsv"]'
def __init__(self, driver, files_path, uf):
super().__init__(driver, files_path, uf)
def nome_arquivo(self, uf):
data = print_timestamp(now=False)
return f"{uf}_cnae_e_sexo_{data}.csv"
| Python | 169 | 37.343197 | 110 | /mei.py | 0.576235 | 0.569753 |
bernarducs/mei | refs/heads/master | import os
import fire
from selenium.common.exceptions import NoSuchElementException, \
WebDriverException, NoSuchWindowException
from init import config
from mei import MeiCnaeMunicipio, MeiCnaeSexoUF
from helpers import retorna_ufs
def ufs_por_municipio_cnae(pasta="arquivos", invisivel=True):
ufs = retorna_ufs()
for uf in ufs:
uf_por_municipio_cnae(uf=uf, pasta=pasta, invisivel=invisivel)
def uf_por_municipio_cnae(uf="PERNAMBUCO", pasta="arquivos", invisivel=True):
path_file = os.path.join(os.getcwd(), pasta)
driver = config(path_file, headless=invisivel)
mei = MeiCnaeMunicipio(driver, path_file, uf)
file = mei.verifica_arquivo()
if not file:
mei.del_arquivos_inuteis()
try:
mei.abre_browser()
mei.carrega_pagina_relatorio(mei.xpath_page)
mei.uf_listbox(mei.xpath_listbox)
checkbox = mei.verifica_listbox_municipios()
if checkbox:
table = mei.retorna_tabela(mei.xpath_btn_consulta,
mei.xpath_tab_completa)
if table:
mei.exporta_csv()
mei.renomeia_arquivo()
else:
print(f"Não foi possível exportar o arquivo")
else:
print(f"Não foi possível exportar o arquivo.")
driver.quit()
except (NoSuchElementException, WebDriverException,
NoSuchWindowException) as e:
print(e)
driver.quit()
print("Não foi possível exportar o arquivo.")
else:
print(f"O arquivo {file} já existe.")
def ufs_por_sexo_cnae(pasta="arquivos", invisivel=True):
ufs = retorna_ufs()
for uf in ufs:
uf_por_sexo_cnae(uf=uf, pasta=pasta, invisivel=invisivel)
def uf_por_sexo_cnae(uf="PERNAMBUCO", pasta="arquivos", invisivel=True):
path_file = os.path.join(os.getcwd(), pasta)
driver = config(path_file, headless=invisivel)
mei = MeiCnaeSexoUF(driver, path_file, uf)
file = mei.verifica_arquivo()
if not file:
mei.del_arquivos_inuteis()
try:
mei.abre_browser()
mei.carrega_pagina_relatorio(mei.xpath_page)
mei.uf_listbox(mei.xpath_listbox)
table = mei.retorna_tabela(mei.xpath_btn_consulta,
mei.xpath_tab_completa)
if table:
mei.exporta_csv()
mei.renomeia_arquivo()
else:
print(f"Não foi possível exportar o arquivo")
driver.quit()
except (NoSuchElementException, WebDriverException,
NoSuchWindowException) as e:
print(e)
driver.quit()
print("Não foi possível exportar o arquivo.")
else:
print(f"O arquivo {file} já existe.")
if __name__ == '__main__':
fire.Fire()
| Python | 85 | 33.411766 | 77 | /bot.py | 0.586667 | 0.586667 |
bernarducs/mei | refs/heads/master | import time
def print_timestamp(now=True):
timestamp = time.localtime(time.time())
if now:
print_time = '{}/{}/{} {}:{}:{}'.format(timestamp.tm_mday, timestamp.tm_mon, timestamp.tm_year,
timestamp.tm_hour, timestamp.tm_min, timestamp.tm_sec)
return print_time
print_time = '{:04d}{:02d}{:02d}'.format(timestamp.tm_year, timestamp.tm_mon, timestamp.tm_mday)
return print_time
def retorna_ufs():
with open('lista de uf.txt', 'r', encoding='latin-1') as f:
file = f.readlines()
ufs = [uf[:-1] for uf in file]
return ufs
| Python | 18 | 33.222221 | 103 | /helpers.py | 0.584416 | 0.571429 |
dleonard203/cs50_01 | refs/heads/master | from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import csv
database_url = "postgres://ioovleludzhxle:a966a7d36f9e61edd437415d538afd38b89ab723d71177647d3766c32e0b2106@ec2-54-221-243-211.compute-1.amazonaws.com:5432/d3a4ekarkutr2s"
engine = create_engine(database_url)
db = scoped_session(sessionmaker(bind=engine))
def import_books():
csv_file = open('books.csv', 'r')
rows = csv.reader(csv_file, delimiter=',')
cur_row = 0
db.execute("""CREATE TABLE books (
id SERIAL PRIMARY KEY,
isbn varchar NOT NULL,
title varchar NOT NULL,
author varchar NOT NULL,
year INTEGER NOT NULL)""")
for row in rows:
if cur_row != 0:
db.execute("INSERT INTO books (isbn, title, author, year) VALUES (:isbn, :title, :author, :year)",
{"isbn": row[0], "title": row[1], "author": row[2], "year": int(row[3])})
cur_row += 1
db.commit()
if __name__ == '__main__':
import_books() | Python | 32 | 30.96875 | 170 | /import.py | 0.622032 | 0.554606 |
dleonard203/cs50_01 | refs/heads/master | import os
from flask import Flask, session, render_template, request, url_for, redirect, jsonify
from flask_session import Session
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import requests
import datetime
import json
#res=requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "e3j4VgqHagE14fcn1XjkXg","isbns": "9781632168146" })
#database_url = "postgres://ioovleludzhxle:a966a7d36f9e61edd437415d538afd38b89ab723d71177647d3766c32e0b2106@ec2-54-221-243-211.compute-1.amazonaws.com:5432/d3a4ekarkutr2s"
app = Flask(__name__)
# Check for environment variable
# if not os.getenv("DATABASE_URL"):
# raise RuntimeError("DATABASE_URL is not set")
# Configure session to use filesystem
app.config["SESSION_PERMANENT"] = False
app.config["SESSION_TYPE"] = "filesystem"
Session(app)
# Set up database
engine = create_engine(os.getenv("DATABASE_URL"))
#engine = create_engine(database_url)
db = scoped_session(sessionmaker(bind=engine))
def assert_login():
def wrap(f):
def wrapped_f(*args, **kwargs):
if not is_logged_in():
session['messages'] = "Please login to view that page"
return redirect(url_for('index'))
else:
return f(*args, **kwargs)
wrapped_f.__name__ = f.__name__
return wrapped_f
return wrap
@app.route("/", methods = ["GET", "POST"])
def index(msg=''):
if 'messages' in session:
error_message = session['messages']
session.pop('messages')
return render_template('landing.html', msg=error_message)
if msg != '':
return render_template("landing.html", msg=msg)
if request.method == "POST":
return try_login(request.form)
else:
return render_template("landing.html")
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
return account_creation_handler(request.form)
return render_template("register.html")
@app.route("/success")
def success(username):
return render_template("success.html", username=username)
@app.route("/logout")
@assert_login()
def logout():
name = session['username']
session.pop('username')
return render_template('goodbye.html', name=name)
@app.route('/welcome', methods=["GET"])
@assert_login()
def welcome():
return render_template('welcome.html', username=session['username'])
def search(req):
title = req.form['title'].upper()
isbn = req.form['isbn'].upper()
author = req.form['author'].upper()
books = all_books()
matches = []
for book in books:
if book[1].upper().find(isbn) > -1 and book[2].upper().find(title) > -1 and book[3].upper().find(author) > -1:
matches.append(book)
return matches
@app.route('/results', methods=["POST", "GET"])
@assert_login()
def results():
books = search(request)
if len(books) == 0:
return render_template('results.html', msg = 'Sorry, no books meeting that criteria are available')
else:
return render_template('results.html', books = books)
@app.route('/book/<string:isbn>', methods=["POST", "GET"])
@assert_login()
def display_results(isbn):
book = get_book_by_isbn(isbn)
reviews = get_reviews_by_isbn(isbn)
goodreads = goodreads_res(isbn)
if goodreads.status_code == 200:
content = json.loads(goodreads.content)
rating = content['books'][0]['average_rating']
review_count = content['books'][0]['reviews_count']
else:
rating = 'N/A'
review_count = 'N/A'
if request.method == "GET":
return render_template('book_details.html', book = book, reviews=reviews, rating=rating, review_count=review_count)
else:
username = session['username']
if user_reviewed(username, isbn):
msg = 'Sorry, you have already reviewed this book'
else:
update_reviews(username, isbn, request.form['content'], request.form['rating'])
msg = 'Thanks for your review, ' + username
reviews = get_reviews_by_isbn(isbn)
return render_template('book_details.html', book = book, reviews=reviews, msg=msg, rating=rating, review_count=review_count)
@app.route('/api/<string:isbn>')
def goodreads_api(isbn):
res = goodreads_res(isbn)
if res.status_code == 200:
api_content = json.loads(res.content)
my_book = get_book_by_isbn(isbn)
if my_book:
return_dict = {'title': my_book[2], 'author': my_book[3], 'year': my_book[4], 'isbn': isbn, 'review_count': api_content['books'][0]['reviews_count'], 'average_score': api_content['books'][0]['average_rating']}
return jsonify(return_dict)
else:
return not_found(isbn)
@app.route('/not_found')
def not_found(isbn):
return render_template('not_found.html', isbn=isbn), 404
def goodreads_res(isbn):
return requests.get("https://www.goodreads.com/book/review_counts.json", params={"key": "e3j4VgqHagE14fcn1XjkXg","isbns": isbn})
def get_book_by_isbn(isbn):
book = db.execute('SELECT * FROM books where isbn = :isbn', {'isbn': isbn}).first()
return list(book)
def all_books():
return db.execute('SELECT * FROM books').fetchall()
def update_reviews(username, isbn, review, rating):
db.execute("INSERT INTO reviews (isbn, username, date, content, rating) VALUES (:isbn, :username, :date, :content, :rating)",
{'isbn': isbn, 'username': username, 'date': pretty_date(), 'content': review, 'rating': rating})
db.commit()
def get_reviews_by_isbn(isbn):
res = db.execute('SELECT * FROM reviews WHERE isbn = :isbn', {'isbn': isbn}).fetchall()
cache = []
for rev in res:
cache.append(rev)
return cache
def user_reviewed(username, isbn):
res = db.execute("SELECT * FROM reviews where username = :username and isbn = :isbn", {"username": username, "isbn": isbn}).first()
if res:
return True
return False
def pretty_date():
res = str(datetime.datetime.now())[:10]
final = res[5:7] + '/' + res[8:10] + '/' + res[0:4]
return final
def is_logged_in():
if 'username' not in session:
return False
else:
return True
def try_login(form):
username = form.get("username")
password = form.get("password")
db_entry = db.execute("SELECT username, password from users where username = :username", {"username": username}).first()
if db_entry is None:
return index(msg = 'No user \'' + username + '\' found')
elif db_entry[1] != password:
return index(msg = 'Incorrect password')
else:
session['username'] = username
return welcome()
def account_creation_handler(form):
username = form.get("username")
password = form.get("password")
email = form.get("email")
if username_taken(username):
return render_template("register.html", err_msg = "Sorry, but the username " + username + " is already in use. Please pick another one.")
else:
create_account(username, password, email)
return success(username)
def username_taken(username):
sqla_res = db.execute("select count(*) from users where username = :username", {"username": username})
res = sqla_res.first()[0]
if res == 0:
return False
return True
def create_account(username, password, email):
sql = "INSERT INTO users (username, password, email) VALUES (:username, :password, :email)"
db.execute(sql, {"username": username, "password": password, "email": email})
db.commit() | Python | 226 | 32.402657 | 221 | /application.py | 0.638712 | 0.628776 |
DamienPond001/Udemy_API | refs/heads/master | from flask_restful import Resource, reqparse
from werkzeug.security import safe_str_cmp
from flask_jwt_extended import (
create_access_token,
create_refresh_token,
jwt_refresh_token_required,
get_jwt_identity
)
from models.user import UserModel
class UserRegister(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
def post(self):
data = UserRegister.parser.parse_args()
if UserModel.find_by_username(data['username']):
return {"message": "A user with that username already exists"}, 400
user = UserModel(**data)
user.save_to_db()
return {"message": "User created successfully."}, 201
class User(Resource):
@classmethod
def get(cls, user_id):
user = UserModel.find_by_id(user_id)
if user is None:
return {'message' :'user not found'}, 404
else:
return user.json()
@classmethod
def delete(cls, user_id):
user = UserModel.find_by_id(user_id)
if user is None:
return {'message' : 'User not found'}, 404
else:
user.delete_from_db()
return {'message' : 'User deleted'}
class UserLogin(Resource):
parser = reqparse.RequestParser()
parser.add_argument('username',
type=str,
required=True,
help="This field cannot be blank."
)
parser.add_argument('password',
type=str,
required=True,
help="This field cannot be blank."
)
def post(self):
data = self.parser.parse_args()
user = UserModel.find_by_username(data['username'])
#This is what 'authenticate()' used to do
if user is not None and safe_str_cmp(user.password, data['password']):
#What the 'identity()' function used to do
access_token = create_access_token(identity = user.id, fresh = True)
refresh_token = create_refresh_token(user.id)
return {
'access_token' : access_token,
'refresh_token' : refresh_token
}, 200
else:
return {'message' : 'Invalid credentials'}, 401
class TokenRefresh(Resource):
@jwt_refresh_token_required
def post(self):
current_user = get_jwt_identity()
new_token = create_access_token(identity = current_user, fresh = False)
return {'access_token' : new_token}, 200 | Python | 93 | 30.354839 | 79 | /API/Section9/1_recap_of_code/start/resources/user.py | 0.541681 | 0.534477 |
DamienPond001/Udemy_API | refs/heads/master | # Import pandas
import pandas as pd
# Read 'monthly_max_temp.csv' into a DataFrame: weather1
weather1 = pd.read_csv('monthly_max_temp.csv', index_col='Month')
# Print the head of weather1
print(weather1.head())
# Sort the index of weather1 in alphabetical order: weather2
weather2 = weather1.sort_index()
# Print the head of weather2
print(weather2.head())
# Sort the index of weather1 in reverse alphabetical order: weather3
weather3 = weather1.sort_index(ascending=False)
# Print the head of weather3
print(weather3.head())
# Sort weather1 numerically using the values of 'Max TemperatureF': weather4
weather4 = weather1.sort_values('Max TemperatureF')
# Print the head of weather4
print(weather4.head())
# Import pandas
import pandas as pd
# Reindex weather1 using the list year: weather2
weather2 = weather1.reindex(year)
# Print weather2
print(weather2)
# Reindex weather1 using the list year with forward-fill: weather3
weather3 = weather1.reindex(year).ffill()
# Print weather3
print(weather3)
Mean TemperatureF
Month
Jan 32.133333
Feb NaN
Mar NaN
Apr 61.956044
May NaN
Jun NaN
Jul 68.934783
Aug NaN
Sep NaN
Oct 43.434783
Nov NaN
Dec NaN
Mean TemperatureF
Month
Jan 32.133333
Feb 32.133333
Mar 32.133333
Apr 61.956044
May 61.956044
Jun 61.956044
Jul 68.934783
Aug 68.934783
Sep 68.934783
Oct 43.434783
Nov 43.434783
Dec 43.434783
# Import pandas
import pandas as pd
# Reindex names_1981 with index of names_1881: common_names
common_names = names_1981.reindex(names_1881.index)
# Print shape of common_names
print(common_names.shape)
# Drop rows with null counts: common_names
common_names = common_names.dropna()
# Print shape of new common_names
print(common_names.shape) | Python | 85 | 24.694118 | 76 | /Datacamp/manipulating_indices.py | 0.601008 | 0.519469 |
DamienPond001/Udemy_API | refs/heads/master | # Group titanic by 'pclass'
by_class = titanic.groupby('pclass')
# Aggregate 'survived' column of by_class by count
count_by_class = by_class['survived'].count()
# Print count_by_class
print(count_by_class)
# Group titanic by 'embarked' and 'pclass'
by_mult = titanic.groupby(['embarked', 'pclass'])
# Aggregate 'survived' column of by_mult by count
count_mult = by_mult['survived'].count()
# Print count_mult
print(count_mult)
# Read life_fname into a DataFrame: life
life = pd.read_csv(life_fname, index_col='Country')
# Read regions_fname into a DataFrame: regions
regions = pd.read_csv(regions_fname, index_col='Country')
# Group life by regions['region']: life_by_region. This is doable because of the same indexes
life_by_region = life.groupby(regions.region)
# Print the mean over the '2010' column of life_by_region
print(life_by_region['2010'].mean())
# Group titanic by 'pclass': by_class
by_class = titanic.groupby('pclass')
# Select 'age' and 'fare'
by_class_sub = by_class[['age','fare']]
# Aggregate by_class_sub by 'max' and 'median': aggregated
aggregated = by_class_sub.agg(['max', 'median'])
age fare
max median max median
pclass
1 80.0 39.0 512.3292 60.0000
2 70.0 29.0 73.5000 15.0458
3 74.0 24.0 69.5500 8.0500
# Print the maximum age in each class
print(aggregated.loc[:, ('age','max')])
pclass
1 80.0
2 70.0
3 74.0
Name: (age, max), dtype: float64
# Print the median fare in each class
print(aggregated.loc[:, ('fare', 'median')])
pclass
1 80.0
2 70.0
3 74.0
Name: (age, max), dtype: float64
# Read the CSV file into a DataFrame and sort the index: gapminder
gapminder = pd.read_csv('gapminder.csv', index_col=['Year','region','Country']).sort_index()
# Group gapminder by 'Year' and 'region': by_year_region
by_year_region = gapminder.groupby(level = ['Year', 'region'])
# Define the function to compute spread: spread
def spread(series):
return series.max() - series.min()
# Create the dictionary: aggregator
aggregator = {'population':'sum', 'child_mortality':'mean', 'gdp':spread}
# Aggregate by_year_region using the dictionary: aggregated
aggregated = by_year_region.agg(aggregator)
# Print the last 6 entries of aggregated
print(aggregated.tail(6))
# Read file: sales
sales = pd.read_csv('sales.csv', index_col='Date', parse_dates=True)
# Create a groupby object: by_day
by_day = sales.groupby(sales.index.strftime('%a'))
# Create sum: units_sum
units_sum = by_day['Units'].sum()
# Print units_sum
print(units_sum) | Python | 96 | 26.59375 | 93 | /Datacamp/grouby.py | 0.664275 | 0.628399 |
DamienPond001/Udemy_API | refs/heads/master | # Add the first circle glyph to the figure p
p.circle('fertility', 'female_literacy', source=latin_america, size=10, color='red', legend='Latin America')
# Add the second circle glyph to the figure p
p.circle('fertility', 'female_literacy', source=africa, size=10, color='blue', legend='Africa')
# Assign the legend to the bottom left: p.legend.location
p.legend.location = 'bottom_left'
# Fill the legend background with the color 'lightgray': p.legend.background_fill_color
p.legend.background_fill_color='lightgray'
# Specify the name of the output_file and show the result
output_file('fert_lit_groups.html')
show(p)
# Import HoverTool from bokeh.models
from bokeh.models import HoverTool
# Create a HoverTool object: hover
hover = HoverTool(tooltips=[('Country','@Country')])
# Add the HoverTool object to figure p
p.add_tools(hover)
# Specify the name of the output_file and show the result
output_file('hover.html')
show(p) | Python | 29 | 31.413794 | 108 | /Datacamp/bokeh_tooltips.py | 0.751864 | 0.747604 |
DamienPond001/Udemy_API | refs/heads/master | # Import necessary module
from sqlalchemy import create_engine
import pandas as pd
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Save the table names to a list: table_names
table_names = engine.table_names()
# Print the table names to the shell
print(table_names)
#Executing a query
con = engine.connect()
# Perform query: rs
rs = con.execute("SELECT * from Album")
# Save results of the query to DataFrame: df
df = pd.DataFrame(rs.fetchall())
df.columns = rs.keys()
# Close connection
con.close()
#auto close connection
with engine.connect() as con:
rs = con.execute("SELECT LastName, Title FROM Employee")
df = pd.DataFrame(rs.fetchmany(3))
df.columns = rs.keys()
#ALTERNATIVELY
# Import packages
from sqlalchemy import create_engine
import pandas as pd
# Create engine: engine
engine = create_engine('sqlite:///Chinook.sqlite')
# Execute query and store records in DataFrame: df
df = pd.read_sql_query('SELECT * FROM Album', engine) | Python | 45 | 21.244444 | 60 | /Datacamp/Working_with_DB.py | 0.725 | 0.724 |
DamienPond001/Udemy_API | refs/heads/master | import pandas as pd
df = pd.read_csv('....')
df.head()
df.tail()
df.columns
df.shape
#Display summary stats of numeric columns
df.describe()
#Display frequencies of categorical columns
df['Borough'].value_counts(dropna=False)
#display means and counts of columns
df[['col1', 'col2']].count()
df[['col1', 'col2']].mean()
df['2015'].quantile([0.05, 0.95])
# Import matplotlib.pyplot
import matplotlib.pyplot as plt
# Plot the histogram
df['Existing Zoning Sqft'].plot(kind='hist', rot=70, logx=True, logy=True)
# Display the histogram
plt.show()
# Create the boxplot
df.boxplot(column='initial_cost', by='Borough', rot=90)
# Display the plot
plt.show() | Python | 36 | 17.416666 | 74 | /Datacamp/data_explore.py | 0.708459 | 0.681269 |
DamienPond001/Udemy_API | refs/heads/master | #To read in
df = pd.read_csv('data.csv', parse_dates=True, index_col='Date)
# Prepare a format string: time_format
time_format = '%Y-%m-%d %H:%M'
# Convert date_list into a datetime object: my_datetimes
my_datetimes = pd.to_datetime(date_list, format=time_format)
# Construct a pandas Series using temperature_list and my_datetimes: time_series
time_series = pd.Series(temperature_list, index=my_datetimes)
# Extract the hour from 9pm to 10pm on '2010-10-11': ts1
ts1 = ts0.loc['2010-10-11 21:00:00':'2010-10-11 22:00:00']
# Extract '2010-07-04' from ts0: ts2
ts2 = ts0.loc['2010-07-04']
# Extract data from '2010-12-15' to '2010-12-31': ts3
ts3 = ts0.loc['2010-12-15':'2010-12-31']
#Sometimes we may wnat to reindex a df using the timeseries index of another df.
#python fills in non-matching indices with Nan values
# Reindex without fill method: ts3
ts3 = ts2.reindex(ts1.index)
# Reindex with fill method, using forward fill: ts4
ts4 = ts2.reindex(ts1.index, method='ffill') | Python | 28 | 34.357143 | 81 | /Datacamp/datetime_indices.py | 0.719919 | 0.613751 |
DamienPond001/Udemy_API | refs/heads/master | # Import create_engine
from sqlalchemy import create_engine
# Create an engine that connects to the census.sqlite file: engine
engine = create_engine('sqlite:///census.sqlite')# Create an engine to the census database
engine = create_engine('mysql+pymysql://'+'student:datacamp'+'@courses.csrrinzqubik.us-east-1.rds.amazonaws.com:3306/'+'census')
# Print table names
print(engine.table_names())
#Reflection is the process of reading the database and building the metadata
#based on that information. It's the opposite of creating a Table by hand and
#is very useful for working with existing databases. To perform reflection, you need to import
#the Table object from the SQLAlchemy package. Then, you use this Table object to read
#your table from the engine and autoload the columns. Using the Table object in this manner
#is a lot like passing arguments to a function. For example, to autoload the columns with the engine,
#you have to specify the keyword arguments autoload=True and autoload_with=engine to Table().
# Import Table
from sqlalchemy import Table, MetaData
metadata = MetaData()
# Reflect census table from the engine: census
census = Table('census', metadata, autoload=True, autoload_with=engine)
# Print the column names
print(census.columns.keys())
# Print full table metadata
print(repr(metadata.tables['census']))
# Print census table metadata
print(repr(census)) | Python | 34 | 40.205883 | 128 | /Datacamp/sqlalchemy.py | 0.776429 | 0.772857 |
DamienPond001/Udemy_API | refs/heads/master | # Create a figure with the "box_select" tool: p
p = figure(x_axis_label='Year', y_axis_label='Time', tools='box_select')
# Add circle glyphs to the figure p with the selected and non-selected properties
p.circle('Year', 'Time', source=source,selection_color='red', nonselection_alpha=0.1)
# Specify the name of the output file and show the result
output_file('selection_glyph.html')
show(p)
# import the HoverTool
from bokeh.models import HoverTool
# Add circle glyphs to figure p
p.circle(x, y, size=10,
fill_color='grey', alpha=0.1, line_color=None,
hover_fill_color='firebrick', hover_alpha=0.5,
hover_line_color='white')
# Create a HoverTool: hover
hover = HoverTool(tooltips=None, mode='vline')
# Add the hover tool to the figure p
p.add_tools(hover)
# Specify the name of the output file and show the result
output_file('hover_glyph.html')
show(p)
#Import CategoricalColorMapper from bokeh.models
from bokeh.models import CategoricalColorMapper
# Convert df to a ColumnDataSource: source
source = ColumnDataSource(df)
# Make a CategoricalColorMapper object: color_mapper
color_mapper = CategoricalColorMapper(factors=['Europe', 'Asia', 'US'],
palette=['red', 'green', 'blue'])
# Add a circle glyph to the figure p
p.circle('weight', 'mpg', source=source,
color=dict(field='origin', transform=color_mapper),
legend='origin')
# Specify the name of the output file and show the result
output_file('colormap.html')
show(p)
| Python | 51 | 28.843138 | 85 | /Datacamp/bokeh_interaction.py | 0.704993 | 0.699737 |
DamienPond001/Udemy_API | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 15:04:52 2018
@author: Damien
"""
from flask import Flask
app = Flask(__name__) #unique __name__ - special python variable
#What requests we need to understand
@app.route('/') #http://www.google.com/ - '/' represents the home page [http://www.google.com/maps represents a '/maps' endpoint]
def home(): #whatever this does it must return a reponse to the browser
return "Hello, world!"
app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests
#run from conda "python app.py"
#copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer) | Python | 20 | 32.200001 | 129 | /API/app.py | 0.702866 | 0.653092 |
DamienPond001/Udemy_API | refs/heads/master | '''This was created after installing virtualenv. This allows use to create a virtual environment that mimics
a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.
Run: conda create -n venv python=3.5.0 anaconda
to create a virtual env called venv with python 3.5.0
conda activate venv
conda deactivate'''
from flask import Flask, request
from flask_restful import Resource, Api, reqparse
from flask_jwt import JWT, jwt_required
from security import authenticate, identity
app = Flask(__name__)
app.secret_key = "secret_key" #this should be long and complicated in a production sense
api = Api(app)
jwt = JWT(app, authenticate, identity)
'''
JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity
If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT
The JWT calls the identity function which gets the correct id and returns the user
'''
items = []
class Item(Resource):
parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class
parser.add_argument('price',
type = float,
required = True,
help = "This field cannot be left blank")
@jwt_required()
def get(self, name):
item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None
return {"item" : item}, 200 if item is not None else 404
def post(self, name):
#Note that the 'Header' and 'Body' need to be set
if next(filter(lambda x: x['name'] == name, items), None) is not None:
return {"message" : "an item with name '{}' already exists.".format(name)}, 400 #400 = bad request
data = Item.parser.parse_args()
#data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use)
item = {'name' : name, 'price' : data['price']}
items.append(item)
return item, 201 #201 is code for created
def delete(self, name):
global items
items = list(filter(lambda x : x['name'] != name, items))
return {"message" : "Item deleted"}
def put(slef, name):
# parser = reqparse.RequestParser() #reqparse allows us to specify which items in the JSON payload can be used for the variable updates
# parser.add_argument('price', #we add which arguments we can allow through. The request gets run through the parser
# type = float,
# required = True,
# help = "This field cannot be left blank") #and many more!
data = Item.parser.parse_args() #any args other than "price" will just get erased
#data = request.get_json() #this is sone in the above
#print(data['another']) --- this would return an error, even if 'another' was in the json payload as by this point it has been removed by the parser
item = next(filter(lambda x: x['name'] == name, items), None)
if item is None:
item = {"name" : name, "price" : data['price']}
items.append(item)
else:
item.update(data) #Note, item is a reference to the items entry and so will be updated there as well
print(items)
return item
class ItemList(Resource):
def get(self):
return{"items" : items}
api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name
api.add_resource(ItemList, '/items')
app.run(port=5000, debug=True) #debug gives better error messages | Python | 86 | 42.372093 | 154 | /API/Section5/code/app.py | 0.648163 | 0.637973 |
DamienPond001/Udemy_API | refs/heads/master | #Used for mereging when there is an ordering (eg dates)
# Perform the first ordered merge: tx_weather
tx_weather = pd.merge_ordered(austin, houston)
# Print tx_weather
print(tx_weather)
# Perform the second ordered merge: tx_weather_suff
tx_weather_suff = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'])
# Print tx_weather_suff
print(tx_weather_suff)
# Perform the third ordered merge: tx_weather_ffill
tx_weather_ffill = pd.merge_ordered(austin, houston, on='date', suffixes=['_aus','_hus'], fill_method='ffill')
# Print tx_weather_ffill
print(tx_weather_ffill)
#Similar to pd.merge_ordered(), the pd.merge_asof() function will also merge
#values in order using the on column, but for each row in the left DataFrame,
#only rows from the right DataFrame whose 'on' column values are less than the
#left value will be kept.
#This function can be used to align disparate datetime frequencies without having to first resample.
oil.head()
Date Price
0 1970-01-01 3.35
1 1970-02-01 3.35
2 1970-03-01 3.35
3 1970-04-01 3.35
4 1970-05-01 3.35
auto.head()
mpg cyl displ hp weight accel yr origin \
0 18.0 8 307.0 130 3504 12.0 1970-01-01 US
1 15.0 8 350.0 165 3693 11.5 1970-01-01 US
2 18.0 8 318.0 150 3436 11.0 1970-01-01 US
3 16.0 8 304.0 150 3433 12.0 1970-01-01 US
4 17.0 8 302.0 140 3449 10.5 1970-01-01 US
name
0 chevrolet chevelle malibu
1 buick skylark 320
2 plymouth satellite
3 amc rebel sst
4 ford torino
# Merge auto and oil: merged
merged = pd.merge_asof(auto, oil, left_on='yr', right_on='Date')
# Print the tail of merged
print(merged.tail())
mpg cyl displ hp weight accel yr origin name \
387 27.0 4 140.0 86 2790 15.6 1982-01-01 US ford mustang gl
388 44.0 4 97.0 52 2130 24.6 1982-01-01 Europe vw pickup
389 32.0 4 135.0 84 2295 11.6 1982-01-01 US dodge rampage
390 28.0 4 120.0 79 2625 18.6 1982-01-01 US ford ranger
391 31.0 4 119.0 82 2720 19.4 1982-01-01 US chevy s-10
Date Price
387 1982-01-01 33.85
388 1982-01-01 33.85
389 1982-01-01 33.85
390 1982-01-01 33.85
391 1982-01-01 33.85
# Resample merged: yearly
yearly = merged.resample('A', on='Date')[['mpg','Price']].mean()
# Print yearly
print(yearly)
mpg Price
Date
1970-12-31 17.689655 3.35
1971-12-31 21.111111 3.56
1972-12-31 18.714286 3.56
1973-12-31 17.100000 3.56
1974-12-31 22.769231 10.11
1975-12-31 20.266667 11.16
1976-12-31 21.573529 11.16
1977-12-31 23.375000 13.90
1978-12-31 24.061111 14.85
1979-12-31 25.093103 14.85
1980-12-31 33.803704 32.50
1981-12-31 30.185714 38.00
1982-12-31 32.000000 33.85
# print yearly.corr()
print(yearly.corr())
mpg Price
Date
1970-12-31 17.689655 3.35
1971-12-31 21.111111 3.56
1972-12-31 18.714286 3.56
1973-12-31 17.100000 3.56
1974-12-31 22.769231 10.11
1975-12-31 20.266667 11.16
1976-12-31 21.573529 11.16
1977-12-31 23.375000 13.90
1978-12-31 24.061111 14.85
1979-12-31 25.093103 14.85
1980-12-31 33.803704 32.50
1981-12-31 30.185714 38.00
1982-12-31 32.000000 33.85 | Python | 109 | 32.321102 | 110 | /Datacamp/merge_ordered.py | 0.587717 | 0.331314 |
DamienPond001/Udemy_API | refs/heads/master | # Import package
import tweepy
# Store OAuth authentication credentials in relevant variables
access_token = "1092294848-aHN7DcRP9B4VMTQIhwqOYiB14YkW92fFO8k8EPy"
access_token_secret = "X4dHmhPfaksHcQ7SCbmZa2oYBBVSD2g8uIHXsp5CTaksx"
consumer_key = "nZ6EA0FxZ293SxGNg8g8aP0HM"
consumer_secret = "fJGEodwe3KiKUnsYJC3VRndj7jevVvXbK2D5EiJ2nehafRgA6i"
# Pass OAuth details to tweepy's OAuth handler
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
####################################################
#Need to define a Stream Listener class
class MyStreamListener(tweepy.StreamListener):
def __init__(self, api=None):
super(MyStreamListener, self).__init__()
self.num_tweets = 0
self.file = open("tweets.txt", "w")
def on_status(self, status):
tweet = status._json
self.file.write( json.dumps(tweet) + '\n' )
self.num_tweets += 1
if self.num_tweets < 100:
return True
else:
return False
self.file.close()
def on_error(self, status):
print(status)
#####################################################
# Initialize Stream listener
l = MyStreamListener()
# Create your Stream object with authentication
stream = tweepy.Stream(auth, l)
# Filter Twitter Streams to capture data by the keywords:
stream.filter(track = ['clinton', 'trump', 'sanders', 'cruz'])
#Once the twitter data is sitting locally:
# Import package
import json
# String of path to file: tweets_data_path
tweets_data_path = "tweets.txt"
# Initialize empty list to store tweets: tweets_data
tweets_data = []
# Open connection to file
tweets_file = open(tweets_data_path, "r")
# Read in tweets and store in list: tweets_data
for line in tweets_file:
tweet = json.loads(line)
tweets_data.append(tweet)
# Close connection to file
tweets_file.close()
# Import package
import pandas as pd
# Build DataFrame of tweet texts and languages
df = pd.DataFrame(tweets_data, columns=['text', 'lang'])
# Print head of DataFrame
print(df.head())
| Python | 78 | 24.358974 | 62 | /Datacamp/twitter_example.py | 0.650152 | 0.643579 |
DamienPond001/Udemy_API | refs/heads/master | def ecdf(data):
"""Compute ECDF for a one-dimensional array of measurements."""
# Number of data points: n
n = len(data)
# x-data for the ECDF: x
x = np.sort(data)
# y-data for the ECDF: y
y = np.arange(1, n+1) / n
return x, y
# Compute ECDF for versicolor data: x_vers, y_vers
x_vers, y_vers = ecdf(versicolor_petal_length)
# Generate plot
_ = plt.plot(x_vers, y_vers, marker='.', linestyle='none')
# Label the axes
plt.xlabel('versicolor_petal_length')
plt.ylabel('ECDF')
# Display the plot
plt.show() | Python | 25 | 20.76 | 67 | /Datacamp/EDA_ECDF.py | 0.639042 | 0.635359 |
DamienPond001/Udemy_API | refs/heads/master | #Dataframes are made up of Series objects. Each Series is labelled 1D numpy array
import pandas as pd
#df is some DataFrame
df.head()
df.tail()
df.iloc[1, :]
df.loc['row_index', :]
#to return column info
df.info()
#to convert DataFrame to numpy array:
df.values
#note though that many numpy methods work on pandas dfs
########
#creating Dataframes from scratch
########
d = {"col1" :[1,3,4,5], "col2" : [4,5,6,7]}
df = pd.DataFrame(d)
col1 = [1, 3, 5, 6]
col2 = [6, 7, 8, 9]
cols = [col1, col2]
indices = ["col1", "col2"]
d = zip(indices, cols)
d = dict(list(d))
df = pd.DataFramed
df.columns = ["newcol1", "newcol2"]
#Broadcasting
df['col3'] = "M"
d = {"col1" : [1, 3, 4, 5], "col2" : "M"}
df = pd.DataFrame(d) #Broadcasts col2 | Python | 42 | 16.666666 | 81 | /Datacamp/pandas.py | 0.626181 | 0.577598 |
DamienPond001/Udemy_API | refs/heads/master | #Row concatenation
row_concat = pd.concat([uber1, uber2, uber3])
#where each is a df
#Note though that the original row indices will be maintained
#Use the ignore_index = True to reset the indices in sequential order
#Use the axis =1 to do column concatenation
#If we have many files to concatenate:
# Import necessary modules
import glob
import pandas as pd
# Write the pattern: pattern
pattern = '*.csv'
# * = all strings
# ? = single character
# Save all file matches: csv_files
csv_files = glob.glob(pattern)
#this gives a list of files that match the pattern
# Create an empty list: frames
frames = []
# Iterate over csv_files
for csv in csv_files:
# Read csv into a DataFrame: df
df = pd.read_csv(csv)
# Append df to frames
frames.append(df)
# Concatenate frames into a single DataFrame: uber
uber = pd.concat(frames) | Python | 36 | 22.777779 | 69 | /Datacamp/Joining_data.py | 0.720468 | 0.715789 |
DamienPond001/Udemy_API | refs/heads/master | '''This was created after installing virtualenv. This allows use to create a virtual environment that mimics
a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.
Run: conda create -n venv python=3.5.0 anaconda
to create a virtual env called venv with python 3.5.0
conda activate venv
conda deactivate'''
from flask import Flask, request
from flask_restful import Resource, Api
app = Flask(__name__)
api = Api(app)
items = []
class Item(Resource):
def get(self, name):
item = next(filter(lambda x: x['name'] == name, items), None) #if next produces nothing, return None
return {"item" : item}, 200 if item is not None else 404
def post(self, name):
#Note that the 'Header' and 'Body' need to be set
if next(filter(lambda x: x['name'] == name, items), None) is not None:
return {"message" : "an item with name '{}' already exists.".format(name)}, 400 #400 = bad request
data = request.get_json() #args: force:Forces the content header, silent: returns None (generally don't use)
item = {'name' : name, 'price' : data['price']}
items.append(item)
return item, 201 #201 is code for created
class ItemList(Resource):
def get(self):
return{"items" : items}
api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name
api.add_resource(ItemList, '/items')
app.run(port=5000, debug=True) #debug gives better error messages | Python | 40 | 37.25 | 136 | /API/Section4/code/app.py | 0.680183 | 0.65533 |
DamienPond001/Udemy_API | refs/heads/master | # Create the pivot table: medals_won_by_country
medals_won_by_country = medals.pivot_table(index = 'Edition', columns='NOC', values= "Athlete", aggfunc='count')
# Slice medals_won_by_country: cold_war_usa_urs_medals
cold_war_usa_urs_medals = medals_won_by_country.loc[1952:1988, ['USA','URS']]
NOC USA URS
Edition
1952 130.0 117.0
1956 118.0 169.0
1960 112.0 169.0
1964 150.0 174.0
1968 149.0 188.0
1972 155.0 211.0
1976 155.0 285.0
1980 NaN 442.0
1984 333.0 NaN
1988 193.0 294.0
# If .max() returns the maximum value of Series or 1D array, .idxmax() returns the index of the maximizing element.
# Create most_medals
most_medals = cold_war_usa_urs_medals.idxmax(axis='columns')
Edition
1952 USA
1956 URS
1960 URS
1964 URS
1968 URS
1972 URS
1976 URS
1980 URS
1984 USA
1988 URS
dtype: object
# Print most_medals.value_counts()
print(most_medals.value_counts())
In [5]: cold_war_usa_urs_medals.idxmax()
Out[5]:
NOC
USA 1984
URS 1980
dtype: int64 | Python | 43 | 23.744186 | 116 | /Datacamp/idxmax_idxmin.py | 0.654751 | 0.492004 |
DamienPond001/Udemy_API | refs/heads/master | # Import figure from bokeh.plotting
from bokeh.plotting import figure
# Import output_file and show from bokeh.io
from bokeh.io import output_file, show
# Create the figure: p
p = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
# Add a circle glyph to the figure p
p.circle(fertility,female_literacy)
# Call the output_file() function and specify the name of the file
output_file('fert_lit.html')
# Display the plot
show(p)
# Create the figure: p
p = figure(x_axis_label='fertility', y_axis_label='female_literacy (% population)')
# Add a circle glyph to the figure p
p.circle(fertility_latinamerica, female_literacy_latinamerica, size=10, alpha=0.8, color='blue')
# Add an x glyph to the figure p
p.x(fertility_africa, female_literacy_africa)
# Specify the name of the file
output_file('fert_lit_separate.html')
# Display the plot
show(p)
#lines
# Import figure from bokeh.plotting
from bokeh.plotting import figure
# Create a figure with x_axis_type="datetime": p
p = figure(x_axis_type='datetime', x_axis_label='Date', y_axis_label='US Dollars')
# Plot date along the x axis and price along the y axis
p.line(date, price)
p.circle(date, price, fill_color='white', size=4)
# Specify the name of the output file and show the result
output_file('line.html')
show(p)
#patches
# Create a list of az_lons, co_lons, nm_lons and ut_lons: x
x = [az_lons, co_lons, nm_lons, ut_lons]
# Create a list of az_lats, co_lats, nm_lats and ut_lats: y
y = [az_lats, co_lats, nm_lats, ut_lats]
# Add patches to figure p with line_color=white for x and y
p.patches(x,y, line_color='white')
# Specify the name of the output file and show the result
output_file('four_corners.html')
show(p) | Python | 66 | 25.469696 | 104 | /Datacamp/bokeh.py | 0.727377 | 0.724513 |
DamienPond001/Udemy_API | refs/heads/master | #stack does something similar to pivot using the indices
# Unstack users by 'weekday': byweekday
users =
visitors signups
city weekday
Austin Mon 326 3
Sun 139 7
Dallas Mon 456 5
Sun 237 12
byweekday = users.unstack(level = 'weekday')
# Print the byweekday DataFrame
print(byweekday)
visitors signups
weekday Mon Sun Mon Sun
city
Austin 326 139 3 7
Dallas 456 237 5 12
# Stack byweekday by 'weekday' and print it
print(byweekday.stack(level = 'weekday'))
visitors signups
city weekday
Austin Mon 326 3
Sun 139 7
Dallas Mon 456 5
Sun 237 12
# Stack 'city' back into the index of bycity: newusers
newusers = bycity.stack(level = "city")
# Swap the levels of the index of newusers: newusers
newusers = newusers.swaplevel(0,1)
# Print newusers and verify that the index is not sorted
print(newusers)
# Sort the index of newusers: newusers
newusers = newusers.sort_index() | Python | 41 | 29.926828 | 56 | /Datacamp/stack_unstack.py | 0.528808 | 0.486977 |
DamienPond001/Udemy_API | refs/heads/master | '''This was created after installing virtualenv. This allows use to create a virtual environment that mimics
a fresh Python install. This ensures that any updates to packages don't affect previous applications built on previous package versions.
Run: conda create -n venv python=3.5.0 anaconda
to create a virtual env called venv with python 3.5.0
conda activate venv
conda deactivate'''
from flask import Flask
from flask_restful import Api
from flask_jwt import JWT
from security import authenticate, identity
from user import UserRegister
from item import Item, ItemList
app = Flask(__name__)
app.secret_key = "secret_key" #this should be long and complicated in a production sense
api = Api(app)
jwt = JWT(app, authenticate, identity)
'''
JWT creates an endpoint /auth. When we call /auth we send a username and password, which is passed on to authenticate and identity
If authenticate returns a user, and that is the identity and the /auth endpoint returns a JWT
The JWT calls the identity function which gets the correct id and returns the user
'''
api.add_resource(Item, '/item/<string:name>') #http://127.0.0.1:5000/item/item_name
api.add_resource(ItemList, '/items')
api.add_resource(UserRegister, '/register')
if __name__ == '__main__': #This ensures that this is not run if app.py is imported, but only when called
app.run(port=5000, debug=True) #debug gives better error messages | Python | 35 | 39.42857 | 136 | /API/Section6/code/UseDB/app.py | 0.761669 | 0.747525 |
DamienPond001/Udemy_API | refs/heads/master | # Extract selected columns from weather as new DataFrame: temps_f
temps_f = weather[['Min TemperatureF', 'Mean TemperatureF', 'Max TemperatureF']]
# Convert temps_f to celsius: temps_c
temps_c = (temps_f - 32) * 5/9 #broadcasting
# Rename 'F' in column names with 'C': temps_c.columns
temps_c.columns = ['Min TemperatureC', 'Mean TemperatureC', 'Max TemperatureC']
# Print first 5 rows of temps_c
print(temps_c.head())
import pandas as pd
# Read 'GDP.csv' into a DataFrame: gdp
gdp = pd.read_csv('GDP.csv', index_col='DATE', parse_dates=True)
# Slice all the gdp data from 2008 onward: post2008
post2008 = gdp.loc['2008':, :]
# Print the last 8 rows of post2008
print(post2008.tail(8))
VALUE
DATE
2014-07-01 17569.4
2014-10-01 17692.2
2015-01-01 17783.6
2015-04-01 17998.3
2015-07-01 18141.9
2015-10-01 18222.8
2016-01-01 18281.6
2016-04-01 18436.5
# Resample post2008 by year, keeping last(): yearly
yearly = post2008.resample('A').last()
# Print yearly
print(yearly)
VALUE
DATE
2014-07-01 17569.4
2014-10-01 17692.2
2015-01-01 17783.6
2015-04-01 17998.3
2015-07-01 18141.9
2015-10-01 18222.8
2016-01-01 18281.6
2016-04-01 18436.5
# Compute percentage growth of yearly: yearly['growth']
yearly['growth'] = yearly.pct_change()*100
# Print yearly again
print(yearly)
VALUE growth
DATE
2008-12-31 14549.9 NaN
2009-12-31 14566.5 0.114090
2010-12-31 15230.2 4.556345
2011-12-31 15785.3 3.644732
2012-12-31 16297.3 3.243524
2013-12-31 16999.9 4.311144
2014-12-31 17692.2 4.072377
2015-12-31 18222.8 2.999062
2016-12-31 18436.5 1.172707
# Import pandas
import pandas as pd
# Read 'sp500.csv' into a DataFrame: sp500
sp500 = pd.read_csv('sp500.csv', index_col='Date', parse_dates=True)
# Read 'exchange.csv' into a DataFrame: exchange
exchange = pd.read_csv('exchange.csv', index_col='Date', parse_dates=True)
# Subset 'Open' & 'Close' columns from sp500: dollars
dollars = sp500[['Open', 'Close']]
# Print the head of dollars
print(dollars.head())
# Convert dollars to pounds: pounds
pounds = dollars.multiply(exchange['GBP/USD'], axis='rows')
#NOTE: similar add(), subtract(), divide() methods. These offer more flexibility than using standard +, -, / operators
# Print the head of pounds
print(pounds.head()) | Python | 88 | 27.420454 | 118 | /Datacamp/dataframe_arithmetic.py | 0.6436 | 0.4572 |
DamienPond001/Udemy_API | refs/heads/master | from models.user import UserModel
from werkzeug.security import safe_str_cmp
def authenticate(username, password):
user = UserModel.find_by_username(username)
if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison
return user
#identity function is unique to flask JWT
#payload is the contents on the JWT Token
def identity(payload):
user_id = payload['identity']
return UserModel.find_by_id(user_id)
| Python | 14 | 34 | 124 | /API/Section7/code/security.py | 0.721248 | 0.721248 |
DamienPond001/Udemy_API | refs/heads/master |
# Append names_1981 after names_1881 with ignore_index=True: combined_names
combined_names = names_1881.append(names_1981, ignore_index=True)
#ignore_index resets the index, else the indices from the original dfs are placed on top of one another
# Concatenate weather_max and weather_mean horizontally: weather
weather = pd.concat([weather_max, weather_mean], axis=1)
#axis=1 means concat horizontally (this does something similar to a full outer join)
Max TemperatureF Mean TemperatureF
Apr 89.0 53.100000
Aug NaN 70.000000
Dec NaN 34.935484
Feb NaN 28.714286
Jan 68.0 32.354839
Jul 91.0 72.870968
Jun NaN 70.133333
Mar NaN 35.000000
May NaN 62.612903
Nov NaN 39.800000
Oct 84.0 55.451613
Sep NaN 63.766667
for medal in medal_types:
# Create the file name: file_name
file_name = "%s_top5.csv" % medal
# Create list of column names: columns
columns = ['Country', medal]
# Read file_name into a DataFrame: df
medal_df = pd.read_csv(file_name, header=0, index_col='Country', names=columns) #names sets the column names
# Append medal_df to medals
medals.append(medal_df)
# Concatenate medals horizontally: medals
medals = pd.concat(medals, axis='columns') #same as axis=1
# Print medals
print(medals)
#using multi level indexes:
for medal in medal_types:
file_name = "%s_top5.csv" % medal
# Read file_name into a DataFrame: medal_df
medal_df = pd.read_csv(file_name, index_col='Country')
# Append medal_df to medals
medals.append(medal_df)
# Concatenate medals: medals
medals = pd.concat(medals, axis='rows', keys=['bronze', 'silver', 'gold'])
# Print medals in entirety
print(medals)
Total
Country
bronze United States 1052.0
Soviet Union 584.0
United Kingdom 505.0
France 475.0
Germany 454.0
silver United States 1195.0
Soviet Union 627.0
United Kingdom 591.0
France 461.0
Italy 394.0
gold United States 2088.0
Soviet Union 838.0
United Kingdom 498.0
Italy 460.0
Germany 407.0
# Sort the entries of medals: medals_sorted
medals_sorted = medals.sort_index(level=0)
# Print the number of Bronze medals won by Germany
print(medals_sorted.loc[('bronze','Germany')])
# Print data about silver medals
print(medals_sorted.loc['silver'])
# Create alias for pd.IndexSlice: idx
#A slicer pd.IndexSlice is required when slicing on the inner level of a MultiIndex
idx = pd.IndexSlice
# Print all the data on medals won by the United Kingdom
print(medals_sorted.loc[idx[:,'United Kingdom'], :])
# Make the list of tuples: month_list
month_list = [('january', jan), ('february', feb), ('march', mar)]
# Create an empty dictionary: month_dict
month_dict = {}
for month_name, month_data in month_list:
# Group month_data: month_dict[month_name]
month_dict[month_name] = month_data.groupby('Company').sum()
# Concatenate data in month_dict: sales
sales = pd.concat(month_dict)
# Print sales
print(sales)
Units
Company
february Acme Coporation 34
Hooli 30
Initech 30
Mediacore 45
Streeplex 37
january Acme Coporation 76
Hooli 70
Initech 37
Mediacore 15
Streeplex 50
march Acme Coporation 5
Hooli 37
Initech 68
Mediacore 68
Streeplex 40
# Print all sales by Mediacore
idx = pd.IndexSlice
print(sales.loc[idx[:, 'Mediacore'], :]) | Python | 129 | 31.790697 | 112 | /Datacamp/append_concatdf.py | 0.558629 | 0.50591 |
DamienPond001/Udemy_API | refs/heads/master | # Convert the sex column to type 'category'
tips.sex = tips.sex.astype('category') #converting to categorical vars helps with memory and further analysis
# Convert the smoker column to type 'category'
tips.smoker = tips.smoker.astype('category')
# Print the info of tips
print(tips.info())
#sometimes we may need to tell python how to deal with values it can't convert
tips['total_bill'] = pd.to_numeric(tips['total_bill'], errors='coerce')
| Python | 11 | 39.454544 | 110 | /Datacamp/data-types.py | 0.748315 | 0.748315 |
DamienPond001/Udemy_API | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 24 16:43:47 2018
@author: Damien
"""
from flask import Flask, jsonify, request, render_template
#NOTE on JSON: JSON are essentially dictionaries but in string format. Thus we need to convert of Python dicts to text
app = Flask(__name__) #unique __name__ - special python variable
stores = [
{
'name': 'My Store',
'items': [
{
'name':'My Item',
'price':15.99
}
]
}
]
@app.route('/')
def home():
return render_template('index.html') #Looks in template folder
#POST - recieves data
#GET - send data back
##End points we are going to define
#POST /store data: {name:}
@app.route('/store', methods = ['POST']) #default to GET
def create_store():
request_data = request.get_json()
new_store = {
'name': request_data['name'],
'items' : []
}
stores.append(new_store)
return jsonify(new_store)
#GET /store/<string:name>
@app.route('/store/<string:name>') #<string:name> is a flask keyword
def get_store(name):
for store in stores:
if store['name'] == name:
return jsonify(store)
else:
return jsonify({'message' : 'No such store'})
#GET /store
@app.route('/store')
def get_stores():
return jsonify({'stores' : stores}) #convert list to dictionary
#POST /store/<string:name>/item {name:, price:}
@app.route('/store/<string:name>/item', methods = ['POST']) #default to GET
def create_item(name):
request_data = request.get_json()
for store in stores:
if store['name'] == name:
new_item = {
'name' : request_data['name'],
'price' : request_data['price']
}
store['items'].append(new_item)
return jsonify(new_item)
else:
return jsonify({"message" : " No such store"})
#GET /store/<string:name>/item
@app.route('/store/<string:name>/item') #<string:name> is a flask keyword
def get_item_in_store(name):
for store in stores:
if store['name'] == name:
return jsonify({'items' : store['items']})
else:
return jsonify({'message' : 'No such store'})
app.run(port=5000) #app runs on port/area of computer that the computer sends and recieces requests
#run from conda "python app.py"
#copy 127.0.0.1:5000 into browswer (127.0.0.1 is the IP reserved fro your computer) | Python | 85 | 27.670588 | 118 | /API/storeapp.py | 0.592775 | 0.577586 |
DamienPond001/Udemy_API | refs/heads/master | from user import User
from werkzeug.security import safe_str_cmp
#some database
users = [
User(1, "bob", "asdf"),
User(2, "Damien", "bitches")
]
#below allows us to find the user by username or ID without having to iterate over the above list
username_mapping = {u.username : u for u in users} #list comprehension where the function is a key:value pair
userid_mapping = {u.id : u for u in users}
def authenticate(username, password):
user = username_mapping.get(username, None) #note that this is the same as the [] notation, but allows a default value
if user is not None and safe_str_cmp(user.password, password): #safe_str_cmp() alleviates issues with string comparison
return user
#identity function is unique to flask JWT
#payload is the contents on the JWT Token
def identity(payload):
user_id = payload['identity']
return userid_mapping.get(user_id, None)
| Python | 24 | 36.958332 | 124 | /API/Section6/code/NoDB/security.py | 0.700214 | 0.698073 |
DamienPond001/Udemy_API | refs/heads/master | #Link the ranges with panning
# Link the x_range of p2 to p1: p2.x_range
p2.x_range = p1.x_range
# Link the y_range of p2 to p1: p2.y_range
p2.y_range = p1.y_range
# Link the x_range of p3 to p1: p3.x_range
p3.x_range = p1.x_range
# Link the y_range of p4 to p1: p4.y_range
p4.y_range = p1.y_range
# Specify the name of the output_file and show the result
output_file('linked_range.html')
show(layout)
#Link selection
# Create ColumnDataSource: source
source = ColumnDataSource(data)
# Create the first figure: p1
p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female literacy (% population)',
tools='box_select,lasso_select')
# Add a circle glyph to p1
p1.circle('fertility', 'female literacy', source=source)
# Create the second figure: p2
p2 = figure(x_axis_label='fertility (children per woman)', y_axis_label='population (millions)',
tools='box_select,lasso_select')
# Ad
p2.circle('fertility', 'population', source=source)
# Create row layout of figures p1 and p2: layout
layout = row(p1, p2)
# Specify the name of the output_file and show the result
output_file('linked_brush.html')
show(layout) | Python | 44 | 25.522728 | 105 | /Datacamp/bokeh_linked_plots.py | 0.709262 | 0.682676 |
DamienPond001/Udemy_API | refs/heads/master | #Basics of reading in:
filename = 'file.txt'
file = open(filename, mode = 'r') #'r' is top read, 'w' is to write
text = file.read()
file.close()
with open('huck_finn.txt', 'r') as file: #with is referred to as the context manager
print(file.read())
#Using NumPy - for numeric arrays
#This allows use of sci-kit learn
import numpy as np
#Can use:
data = np.loadtxt(filename, delimiter = "'", skiprows = 1, usecols=[0, 2], dtype=str)
#Alternatively, use Pandas (this is preferable)
import pandas as pd
data = pd.read_csv(filename, sep = '\t', comment='#', na_values='Nothing') #comment drops everything after '#', na_values are user specified nulls
#header=0 and names=new_names will label the rows
#parse_date does something
#index_col specifies which col should be the index
data.head() #prints first 5 rows .head(10) displays 10 rows
data_array = data.values #converts to numpy array
#Other types of import files:
#Pickled file: files containing python data structures that don't traslate to an obvious readible form (i.e. dicts, lists, tuples)
# Import pickle package
import pickle
# Open pickle file and load data: d
with open('data.pkl', 'rb') as file:
d = pickle.load(file)
#Excel
file = "excel.xlsx"
data = pd.ExcelFile(file)
print(data.sheet_names)
df1 = data.parse('name_of_sheet')
df2 = data.parse(1) #index of sheet
df1 = data.parse(0, skiprows=[1], names=['Country', 'AAM due to War (2002)'])
#SAS
# Import sas7bdat package
from sas7bdat import SAS7BDAT
# Save file to a DataFrame: df_sas
with SAS7BDAT('sales.sas7bdat') as file:
df_sas = file.to_data_frame()
#Stata
# Import pandas
import pandas as pd
# Load Stata file into a pandas DataFrame: df
df = pd.read_stata('disarea.dta')
#HDF5 (Hierarchical Data Format version 5)
import h5py
# Assign filename: file
file = 'LIGO_data.hdf5'
# Load file: data
data = h5py.File(file, 'r')
# Print the datatype of the loaded file
print(type(data))
# Print the keys of the file. HDF5 files have a heirarchical structure that can be drilled down using the keys
for key in data.keys():
print(key)
group = data['strain']
# Check out keys of group
for key in group.keys():
print(key)
# Set variable equal to time series data: strain
strain = data['strain']['Strain'].value
#MATLAB
# Import package
import scipy.io
# Load MATLAB file: mat
mat = scipy.io.loadmat('albeck_gene_expression.mat') #loads a dict with the variables : values of thingfs that were saved in the MATLAB workspace
| Python | 98 | 25.010204 | 148 | /Datacamp/Reading_Data.py | 0.698039 | 0.686275 |
DamienPond001/Udemy_API | refs/heads/master | # Import plotting modules
import matplotlib.pyplot as plt
import seaborn as sns
# Plot a linear regression between 'weight' and 'hp'
sns.lmplot(x='weight', y='hp', data=auto)
# Display the plot
plt.show()
#RESIDUALS
# Import plotting modules
import matplotlib.pyplot as plt
import seaborn as sns
# Generate a green residual plot of the regression between 'hp' and 'mpg'
sns.residplot(x='hp', y='mpg', data=auto, color='green')
# Display the plot
plt.show()
#HIGHER ORDER
# Generate a scatter plot of 'weight' and 'mpg' using red circles
plt.scatter(auto['weight'], auto['mpg'], label='data', color='red', marker='o')
# Plot in blue a linear regression of order 1 between 'weight' and 'mpg'
sns.regplot(x='weight', y='mpg', data=auto, label='order 1', color='blue', order=1, scatter=None)
# Plot in green a linear regression of order 2 between 'weight' and 'mpg'
sns.regplot(x='weight', y='mpg', data=auto, label='order 2', color='green', order=2, scatter=None)
# Add a legend and display the plot
plt.legend(loc='upper right')
plt.show()
# Plot a linear regression between 'weight' and 'hp', with a hue (specifies categories) of 'origin' and palette of 'Set1'
sns.lmplot('weight', 'hp', data=auto, hue='origin', palette='Set1')
# Display the plot
plt.show()
# Plot linear regressions between 'weight' and 'hp' grouped row-wise by 'origin'
sns.lmplot('weight', 'hp', data=auto, row='origin')
# Display the plot
plt.show() | Python | 47 | 29.553192 | 121 | /Datacamp/seaborn.py | 0.712892 | 0.707317 |
DamienPond001/Udemy_API | refs/heads/master |
# Build a query to count the distinct states values: stmt
stmt = select([func.count(census.columns.state.distinct())])
# Execute the query and store the scalar result: distinct_state_count
distinct_state_count = connection.execute(stmt).scalar()
# Print the distinct_state_count
print(distinct_state_count)
# Import func
from sqlalchemy import func
# Build a query to select the state and count of ages by state: stmt
stmt = select([census.columns.state, func.count(census.columns.age)])
# Group stmt by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys())
# Import func
from sqlalchemy import func
# Build an expression to calculate the sum of pop2008 labeled as population
pop2008_sum = func.sum(census.columns.pop2008).label('population')
# Build a query to select the state and sum of pop2008: stmt
stmt = select([census.columns.state, pop2008_sum])
# Group stmt by state
stmt = stmt.group_by(census.columns.state)
# Execute the statement and store all the records: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
# Print the keys/column names of the results returned
print(results[0].keys()) | Python | 48 | 27.354166 | 75 | /Datacamp/sqlalchemy_grouping_labeling.py | 0.766348 | 0.750184 |
DamienPond001/Udemy_API | refs/heads/master | #melting restores pivoted dfs
visitors = pd.melt(visitors_by_city_weekday, id_vars=['weekday'], value_name='visitors')
#id_vars specify columns to maintain
#value_names specify name of column containing the values
# Set the new index: users_idx
users_idx = users.set_index(['city', 'weekday'])
# Print the users_idx DataFrame
print(users_idx)
visitors signups
city weekday
Austin Sun 139 7
Dallas Sun 237 12
Austin Mon 326 3
Dallas Mon 456 5
# Obtain the key-value pairs: kv_pairs
kv_pairs = pd.melt(users_idx, col_level=0)
# Print the key-value pairs
print(kv_pairs)
variable value
0 visitors 139
1 visitors 237
2 visitors 326
3 visitors 456
4 signups 7
5 signups 12
6 signups 3
7 signups 5 | Python | 31 | 27.67742 | 88 | /Datacamp/melting.py | 0.595721 | 0.547297 |
DamienPond001/Udemy_API | refs/heads/master | #pivot tables aggregate data with duplicate indices
weekday city visitors signups
0 Sun Austin 139 7
1 Sun Dallas 237 12
2 Mon Austin 326 3
3 Mon Dallas 456 5
# Create the DataFrame with the appropriate pivot table: by_city_day
by_city_day = users.pivot_table(index = 'weekday', columns = "city")
# Print by_city_day
print(by_city_day)
signups visitors
city Austin Dallas Austin Dallas
weekday
Mon 3 5 326 456
Sun 7 12 139 237
# Use a pivot table to display the count of each column: count_by_weekday1
count_by_weekday1 = users.pivot_table(index='weekday', aggfunc='count')
# Print count_by_weekday
print(count_by_weekday1)
city signups visitors
weekday
Mon 2 2 2
Sun 2 2 2
# Replace 'aggfunc='count'' with 'aggfunc=len': count_by_weekday2
count_by_weekday2 = users.pivot_table(index='weekday', aggfunc=len)
# Create the DataFrame with the appropriate pivot table: signups_and_visitors
signups_and_visitors = users.pivot_table(index = "weekday", aggfunc=sum)
# Print signups_and_visitors
print(signups_and_visitors)
signups visitors
weekday
Mon 8 782
Sun 19 376
# Add in the margins: signups_and_visitors_total
signups_and_visitors_total = users.pivot_table(index = "weekday", aggfunc=sum, margins=True)
# Print signups_and_visitors_total
print(signups_and_visitors_total)
signups visitors
weekday
Mon 8 782
Sun 19 376
All 27 1158 | Python | 52 | 34.096153 | 92 | /Datacamp/pivit_tables.py | 0.565789 | 0.525768 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.