seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
6554163613 | import wx
import os
import cv2
import core
import device
from utils import load_graph_model
def selectModel():
path = os.path.dirname(os.path.abspath(__file__))
number = 0
if number == 0:
modelPath = path+r"\Mobnet075F-model-stride16.json"
return modelPath
def initializeModel(modelPath):
graph = load_graph_model(modelPath)
return graph
def loadCamera(id,width,height):
cam_obj = cv2.VideoCapture(id,cv2.CAP_DSHOW)
if cam_obj.isOpened()==False:
cam_obj.open()
cam_obj.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cam_obj.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
return cam_obj
def closeCamera(cam_obj):
cam_obj.release()
def selectCamera(last_index):
number = 0
hint = "請選擇攝影機 (選項 0 到 " + str(last_index) + " ): "
if last_index == 0:
return number
try:
number = int(input(hint))
except Exception:
print("輸入非數字,請重試!")
return selectCamera(last_index)
if number > last_index or number < 0:
print("不存在的編號,請重試!")
return selectCamera(last_index)
return number
def selectResolution():
number = 1
print("0:1080p")
print("1:720p")
print("2:480p")
hint = "請選擇攝影機解析度(選項 0 到 2 ):"
try:
number = int(input(hint))
except Exception:
print("輸入非數字,請重試!")
return select_resolution()
if number > 2 or number < 0:
print("不存在的編號,請重試!")
return selectResolution()
elif number == 0:
return 1920,1080
elif number == 1:
return 1280,720
else:
return 640,480
def selectBackground():
hint = "是否自訂背景?(0:是 1:否)"
number = 1
try:
number = int(input(hint))
except Exception:
print("輸入非數字,請重試!")
return selectBackground()
if number > 1 or number < 0:
print("不存在的編號,請重試!")
return selectBackground()
elif number == 1:
return False, None
else:
path = input("請輸入背景圖片完整路徑(可直接拖曳檔案至本視窗):")
if os.path.isfile(path):
return True, path
else:
print("找不到檔案,請確認路徑後重試!")
return selectBackground()
def calc_targetResolution(stride,length):
target = (int(length) // stride) * stride + 1
return target
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
deviceList = device.getDeviceList()
index = 0
for name in deviceList:
print(str(index) + ': ' + name)
index += 1
lastIndex = index - 1
if lastIndex < 0:
print("No device is connected")
stride = 16 #add option for model in future
modelPath = selectModel()
graph = initializeModel(modelPath)
camId = selectCamera(lastIndex)
imageW, imageH = selectResolution()
targetW = calc_targetResolution(stride,imageW)
targetH = calc_targetResolution(stride,imageH)
useBackground, bgPath = selectBackground()
cap = loadCamera(camId, imageW, imageH)
ret, frame = cap.read()
while(ret):
final=core.make_final(graph, frame, imageW, imageH, targetW, targetH, stride, bgPath, useBackground)
cv2.imshow('Output', final)
key = cv2.waitKey(1)
if key == 27:
break
ret, frame = cap.read()
closeCamera(cap) | terry30207/background_remove | ui.py | ui.py | py | 3,551 | python | en | code | 0 | github-code | 13 |
36514547110 | class BabyShop:
def __init__(self, name, brand, price, safety_standard, good_availability, warranty, age_suitability, supplier,
country):
self.name = name
self.brand = brand
self.price = price
self.safety_standard = safety_standard
self.good_availability = good_availability
self.warranty = warranty
self.age_suitability = age_suitability
self.supplier = supplier
self.country = country
def __del__(self):
print("Object " + self.name + " was deleted")
def __str__(self):
return str(self.__dict__)
| DanyloShyshla/verbose-garbanzo | Models/baby_shop.py | baby_shop.py | py | 616 | python | en | code | 0 | github-code | 13 |
14106688400 | import os
import simplejson
import numpy as np
import matplotlib.pyplot as plt
from common import *
class Point_Object:
def __init__(self, position, is_sink, collision_radius, field_radius):
self.position = position
self.collision_radius = collision_radius
self.field_radius = field_radius
if is_sink: # attract
self.multiplier = 1
else: # repel
self.multiplier = -1
def distance(self, position):
'''compute the distance from center to position'''
v = self.position - position
return np.sqrt(np.dot(v, v))
def is_in_collision(self, position):
'''check if position is in collision with this instance'''
if self.collision_radius == 0.0:
return False
if self.distance(position) < self.collision_radius:
return True
return False
def is_intersecting(self, position_1, position_2):
'''check if the trajectory will intersect the collision region'''
v = np.subtract(self.position, position_1)
u = np.subtract(position_2, position_1)
s = np.dot(v, u) / np.sqrt(np.dot(u, u))
if s < 0:
c = position_1
elif s > 1:
c = position_2
else:
c = position_1 + s * u
n = np.subtract(self.position, c)
d = np.sqrt(np.dot(n, n))
if d < self.collision_radius:
return True
return False
def compute_force(self, position):
'''compute the field force of this instance on given position'''
d = self.distance(position)
if d < self.field_radius:
return np.array([0.0, 0.0])
g = np.exp(-d * d)
dir = self.position - position
dir = dir / np.sqrt(np.dot(dir, dir))
return self.multiplier * g * dir
def render(self, figure, ax):
ax.scatter(self.position[0], self.position[1], color="black", s=5)
ax.scatter(self.position[0], self.position[1], color="orange", s=1000, alpha=0.1)
class Line_Obstacle:
def __init__(self, properties):
self.is_horizontal = properties["horizontal"]
self.start = np.array([properties["start"]["x"], properties["start"]["y"]])
self.end = np.array([properties[ "end"]["x"], properties[ "end"]["y"]])
self.collision_radius = 0.1 # collision radius of obstacles
self.field_radius = 0.5 # field radius of obstacles
def distance(self, position):
'''compute the minimum distance from position to any point on line obstacle'''
v = np.subtract(position, self.start)
u = np.subtract(self.end, self.start)
s = np.dot(v, u) # no need to divide by length of line segment due to unit length segment
if s < 0:
c = self.start
elif s > 1:
c = self.end
else:
c = self.start + s * u
n = np.subtract(position, c)
return np.sqrt(np.dot(n, n))
def is_in_collision(self, position):
'''check if position is in collision with this obstacle'''
if self.distance(position) < self.collision_radius:
return True
return False
def is_intersecting(self, position_1, position_2):
'''
check if the trajectory from position 1 to 2 intersect
this line obstacle.
Implementation ref:
https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/
'''
# helper functions
def orientation(p, q, r):
val = (float(q[1] - p[1]) * (r[0] - q[0])) - (float(q[0] - p[0]) * (r[1] - q[1]))
if (val > 0): # Clockwise orientation
return 1
elif (val < 0): # Counterclockwise orientation
return 2
else: # Collinear orientation
return 0
def onSegment(p, q, r):
if ((q[0] <= max(p[0], r[0])) and (q[0] >= min(p[0], r[0])) and
(q[1] <= max(p[1], r[1])) and (q[1] >= min(p[1], r[1]))):
return True
return False
if self.is_horizontal:
extra = np.array([self.collision_radius, 0])
else:
extra = np.array([0, self.collision_radius])
p1 = self.start - extra
q1 = self.end + extra
p2 = position_1
q2 = position_2
o1 = orientation(p1, q1, p2)
o2 = orientation(p1, q1, q2)
o3 = orientation(p2, q2, p1)
o4 = orientation(p2, q2, q1)
# General case
if ((o1 != o2) and (o3 != o4)):
return True
# Special Cases
# p1 , q1 and p2 are collinear and p2 lies on segment p1q1
if ((o1 == 0) and onSegment(p1, p2, q1)):
return True
# p1 , q1 and q2 are collinear and q2 lies on segment p1q1
if ((o2 == 0) and onSegment(p1, q2, q1)):
return True
# p2 , q2 and p1 are collinear and p1 lies on segment p2q2
if ((o3 == 0) and onSegment(p2, p1, q2)):
return True
# p2 , q2 and q1 are collinear and q1 lies on segment p2q2
if ((o4 == 0) and onSegment(p2, q1, q2)):
return True
# If none of the cases
return False
def compute_force(self, position):
'''compute the field force of this obstacle on given position'''
d = self.distance(position)
if d < self.field_radius:
return np.array([0.0, 0.0])
g = np.exp(-d * d)
if self.is_horizontal:
return np.array([0.0, g])
else:
return np.array([g, 0.0])
def render(self, figure, ax):
'''draw the obstacle on figure'''
ax.plot([self.start[0], self.end[0]], [self.start[1], self.end[1]], color="black", linewidth=2)
ax.plot([self.start[0], self.end[0]], [self.start[1], self.end[1]], color="orange", linewidth=10, alpha=0.1)
class Map:
def __init__(self, map_json):
'''load start and goal location as well as a list of obstacles'''
self.start = None
self.goal = None
self.obstacles = []
with open(map_json, 'r') as f:
map_dict = simplejson.load(f)
self.grid_size = map_dict["grid_size"]
self.start = np.array([map_dict["start"]["x"], map_dict["start"]["y"]])
self.goal = np.array([map_dict[ "goal"]["x"], map_dict[ "goal"]["y"]])
for obstacles in map_dict["line_obstacles"]:
self.obstacles.append(Line_Obstacle(obstacles))
print("[INFO]: Map initialized.")
def is_in_bound(self, position):
'''check whether position is still in the map area'''
if position[0] < 0 or position[0] > self.grid_size:
return False
if position[1] < 0 or position[1] > self.grid_size:
return False
return True
def is_in_collision(self, position):
'''check whether given position is in collision with obstables'''
for obstacle in self.obstacles:
if obstacle.is_in_collision(position):
return True
return False
def is_reachable(self, start_position, end_position):
'''
check whether end position can be reached from start position
in a straight line fashion
'''
for obstacle in self.obstacles:
if obstacle.is_intersecting(start_position, end_position):
return False
return True
def compute_goal_force(self, position):
'''return force vector from given position to goal position'''
dir = np.subtract(self.goal, position)
return GOAL_FORCE_AMPLITUDE * np.divide(dir, np.sqrt(np.dot(dir, dir)))
def compute_obstacles_force(self, position):
'''
find all obstacles that has an effect on given position and
compute the net gradient
'''
force = np.array([0.0, 0.0])
for obstacle in self.obstacles:
force += obstacle.compute_force(position)
return force
def compute_net_force(self, position, alpha=1.0, beta=1.0):
'''
combine goal force and obstacles' force
net_force = alpha * goal_force + beta * obstacles_force
'''
force = self.compute_goal_force(position)
force += self.compute_obstacles_force(position)
return force
def render(self):
'''draw the map'''
figure, ax = plt.subplots(figsize=(self.grid_size, self.grid_size))
ax.scatter(0.5, 0.5, s=5, color="green", marker="o")
ax.scatter(self.grid_size - 0.5, self.grid_size - 0.5, s=5, color="red", marker="o")
for obstacle in self.obstacles:
obstacle.render(figure, ax)
return figure, ax | Victor-YG/ARRT | src/environment.py | environment.py | py | 8,785 | python | en | code | 1 | github-code | 13 |
72722352017 | """empty message
Revision ID: abc83a63df8d
Revises: c5d5f52381c5
Create Date: 2021-02-17 14:32:47.552539
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'abc83a63df8d'
down_revision = 'c5d5f52381c5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('edges')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('edges',
sa.Column('from_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('to_id', sa.INTEGER(), autoincrement=False, nullable=False),
sa.ForeignKeyConstraint(['from_id'], ['sources.id'], name='edges_from_id_fkey'),
sa.ForeignKeyConstraint(['to_id'], ['sources.id'], name='edges_to_id_fkey'),
sa.PrimaryKeyConstraint('from_id', 'to_id', name='edges_pkey')
)
# ### end Alembic commands ###
| knolist/knolist | migrations/versions/abc83a63df8d_.py | abc83a63df8d_.py | py | 982 | python | en | code | 1 | github-code | 13 |
28978923684 | from django.utils import timezone
from django.db import models
from django.contrib.auth.models import User
from django.core.validators import MinValueValidator, MaxValueValidator
from decimal import Decimal
from django.db.models.signals import pre_delete
from django.dispatch import receiver
import os
class Product(models.Model):
name = models.CharField(max_length=255, null=True, blank=True)
image = models.ImageField(upload_to='products/', null=True, blank=True)
author = models.CharField(max_length=255, null=True, blank=True, default='Default Author')
year = models.IntegerField(null=True, blank=True, default=2023)
publisher = models.CharField(max_length=255, null=True, blank=True, default='Default Publisher')
genre = models.CharField(max_length=255, null=True, blank=True, default='Default Genre')
description = models.TextField(null=True, blank=True)
rating = models.DecimalField(
max_digits=2,
decimal_places=1,
validators=[
MinValueValidator(Decimal('0.0'), message="Rating must be 0.0 or greater."),
MaxValueValidator(Decimal('5.0'), message="Rating must be 5.0 or less."),
],
null=True,
blank=True,
default=3.0 # Nilai default untuk rating
)
amount = models.IntegerField(null=True, blank=True) # Nilai default untuk amount
date_added = models.DateTimeField(default=timezone.now)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
def to_dict(self):
product_dict = {
'model': 'Product',
'id': self.user.pk,
'fields':{
'name': self.name,
'image': str(self.image), # Convert ImageField to string for simplicity
'author': self.author,
'year': self.year,
'publisher': self.publisher,
'genre': self.genre,
'description': self.description,
'rating': float(self.rating),
'amount': self.amount,
'date_added': self.date_added.strftime("%Y-%m-%d %H:%M:%S"), # Convert DateTimeField to string
'user': self.user.pk if self.user else 0 # Get username if user exists
}
}
return product_dict
@receiver(pre_delete, sender=Product)
def delete_product_image(sender, instance, **kwargs):
# Get the image field value
image = instance.image
# Check if the image exists and delete it
if image:
if os.path.isfile(image.path):
os.remove(image.path)
| Gyan-Bano/tugas-pbp-gyan | main/models.py | models.py | py | 2,580 | python | en | code | 0 | github-code | 13 |
3345873243 | import ptypes
from ptypes import *
import functools,operator,itertools,types
import logging
ptypes.setbyteorder(ptypes.config.byteorder.bigendian)
### X.224 Variable
class X224Variable(ptype.definition):
cache = {}
@X224Variable.define
class CR_TPDU(pstruct.type):
'''Connection Request'''
type = 0xe
class _class_option(pbinary.struct):
_fields_ = [
(4, 'class'),
(4, 'option'),
]
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint16_t, 'SRC-REF'),
(_class_option, 'CLASS-OPTION'),
]
@X224Variable.define
class CC_TPDU(pstruct.type):
'''Connection Confirm'''
type = 0xd
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint16_t, 'SRC-REF'),
(CR_TPDU._class_option, 'CLASS-OPTION'),
]
@X224Variable.define
class DR_TPDU(pstruct.type):
'''Disconnect Request'''
type = 0x8
class _reason(pint.enum, pint.uint8_t):
# XXX: this enumeration is incomplete (ITU-T Rec. X.224 page 63)
_values_ = [
('Normal disconnect initiated by session entity', 128+0),
('Connection negotiation failed', 128+2),
('Protocol error', 128+5),
('Reference overflow', 128+7),
('Header or parameter length invalid', 128+10),
]
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint16_t, 'SRC-REF'),
(CR_TPDU._class_option, 'class-option'),
(_reason, 'REASON'),
]
@X224Variable.define
class DC_TPDU(pstruct.type):
'''Disconnect Confirm'''
type = 0xc
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint16_t, 'SRC-REF'),
]
class EOT_NR(pbinary.struct):
class unused(pbinary.enum):
length, _values_ = 7, []
def __nr(self):
res = self['EOT']
return EOT_NR.unused if res else 7
_fields_ = [
(1, 'EOT'),
(__nr, 'NR'),
]
@X224Variable.define
class DT_TPDU(EOT_NR):
'''Data (Class 0 and 1)'''
type = 0xf
@X224Variable.define
class ED_TPDU(pstruct.type):
'''Expedited Data'''
type = 0x1
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(EOT_NR, 'ED-TPDU-NR'),
]
@X224Variable.define
class AK_TPDU(pstruct.type):
'''Data Acknowledgement'''
type = 0x6
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint8_t, 'YR-TPDU-NR'),
]
@X224Variable.define
class EA_TPDU(pstruct.type):
'''Expedited Data Acknowledgement'''
type = 0x2
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint8_t, 'YR-EDTU-NR'),
]
@X224Variable.define
class RJ_TPDU(pstruct.type):
'''Reject'''
type = 0x5
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(pint.uint8_t, 'YR-TU-NR'),
]
@X224Variable.define
class ER_TPDU(pstruct.type):
'''Error'''
type = 0x7
class _cause(pint.enum, pint.uint8_t):
_values_ = [
('Reason not specified', 0),
('Invalid parameter code', 1),
('Invalid TPDU type', 2),
('Invalid parameter value', 3),
]
_fields_ = [
(pint.uint16_t, 'DST-REF'),
(_cause, 'CAUSE'),
]
### X.224 Parameters
class X224Param(ptype.definition):
cache = {}
class X224Parameter(pstruct.type):
def __value(self):
res, cb = self['code'].li, self['length'].li
res = X224Param.lookup(res)
return dyn.block(cb)
_fields_ = [
(pint.uint8_t, 'code'),
(pint.uint8_t, 'flags'),
(pint.uint16_t, 'length'),
(__value, 'value'),
]
class X224ParameterArray(parray.block):
_object_ = X224Parameter
### Unused X.224 parameters
# @X224Param.define
# class RDPNegotiationRequest(pint.uint32_t, pint.enum):
# type = 0x01
# _values_ = [
# ('PROTOCOL_RDP', 0x00000000),
# ('PROTOCOL_SSL', 0x00000001),
# ('PROTOCOL_HYBRID', 0x00000002),
# ]
# @X224Param.define
# class RDPNegotiationResponse(pint.uint32_t, pint.enum):
# type = 0x02
# _values_ = [
# ('PROTOCOL_RDP', 0x00000000),
# ('PROTOCOL_SSL', 0x00000001),
# ('PROTOCOL_HYBRID', 0x00000002),
# ]
# @X224Param.define
# class RDPNegotiationFailure(pint.uint32_t, pint.enum):
# type = 0x02
# _values_ = [
# ('SSL_REQUIRED_BY_SERVER', 0x00000001),
# ('SSL_NOT_ALLOWED_BY_SERVER', 0x00000002),
# ('SSL_CERT_NOT_ON_SERVER', 0x00000003),
# ('INCONSISTENT_FLAGS', 0x00000004),
# ('HYBRID_REQUIRED_BY_SERVER', 0x00000005),
# ('SSL_WITH_USER_AUTH_REQUIRED_BY_SERVER', 0x00000006),
# ]
### X.224 TPDU
class TPDU(pstruct.type):
class _type(pbinary.struct):
class code(pbinary.enum):
length, _values_ = 4, [
('Connection request', 0xe),
('Connection confirm', 0xd),
('Disconnect request', 0x8),
('Disconnect confirm', 0xc),
('Data', 0xf),
('Expedited data', 0x1),
('Data acknowledgement', 0x6),
('Expedited data acknowledgement', 0x2),
('Reject', 0x5),
('Error', 0x7),
]
_fields_ = [
(code, 'high'),
(4, 'low'),
]
def __variable(self):
res, fixed = self['length'].li.int(), self['type'].li.size()
if res < fixed:
logging.info("{:s} : length is too short ({:d} < {:d})".format(self.shortname(), res, fixed))
return dyn.block(0)
variable = res - fixed
res = self['type'].li
hi, lo = res['high'], res['low']
return X224Variable.withdefault(hi, ptype.block, length=variable)
def __parameters(self):
res = self['length'].li.int()
parameters = sum(self[fld].li.size() for fld in ['type','variable'])
if res >= parameters:
return dyn.block(res - parameters)
logging.info("{:s} : length is too short ({:d} < {:d})".format(self.shortname(), res, parameters))
return dyn.block(0)
_fields_ = [
(pint.uint8_t, 'length'),
(_type, 'type'),
(__variable, 'variable'),
(__parameters, 'parameters'),
]
def alloc(self, **fields):
if 'length' in fields.keys():
return super(TPDU, self).alloc(**fields)
res = super(TPDU, self).alloc(**fields)
return res.set(length=sum(res[fld].size() for fld in ['type','variable','parameters']))
def summary(self):
res = []
res.append("length={:#x}".format(self['length'].int()))
t = self['type']
res.append("type={:s}({:#x}, {:#06b})".format(t.__field__('high').str(), t['high'], t['low']))
res.append("variable={:s}".format(self['variable'].summary()))
if self['parameters'].size() > 0:
res.append("parameters=...{:d} bytes...".format(self['parameters'].size()))
return ' '.join(res)
### X.224 TPKT
class TPKT(pstruct.type):
def __reserved(self):
res = self['version'].li
return pint.uint8_t if res.int() == 3 else pint.uint16_t
def __tpdu(self):
res = sum(self[fld].li.size() for fld in ['version','reserved','length'])
return dyn.clone(TPDU, blocksize=lambda self, cb=self['length'].int() - res: cb if cb > 0 else 0)
def __userdata(self):
res = sum(self[fld].li.size() for fld in ['version','reserved','length','pdu'])
return dyn.block(self['length'].li.int() - res) if self['length'].int() >= res else dyn.block(0)
_fields_ = [
(pint.uint8_t, 'version'),
(__reserved, 'reserved'),
(pint.uint16_t, 'length'),
(TPDU, 'pdu'),
(__userdata, 'data'),
]
def alloc(self, **fields):
if 'length' in fields.keys():
return super(TPKT, self).alloc(**fields)
res = super(TPKT, self).alloc(**fields)
return res.set(length=sum(res[fld].size() for fld in ['version','reserved','length','pdu','data']))
if __name__ == '__main__':
import ptypes, protocol.x224 as x224
### connection request
data = "030000221de00000000000" + str().join(map("{:02x}".format, bytearray(b'Cookie: mstshash=hash\r\n')))
ptypes.setsource(ptypes.prov.bytes(bytes(bytearray.fromhex(data))))
a = x224.TPKT()
a = a.l
print(a['data'])
### client mcs erect domain request
data = "0300000c02f0800401000100"
data = "0300000802f08028"
data = "0300000c02f08038000603eb"
ptypes.setsource(ptypes.prov.string(data.decode('hex')))
a = x224.TPKT()
a = a.l
print(a['data'])
| arizvisa/syringe | template/protocol/x224.py | x224.py | py | 8,636 | python | en | code | 35 | github-code | 13 |
24769702949 | import os
import Opioid2D
from Opioid2D.public.Node import Node
from pug import Filename, Dropdown
from pug.component import *
from pig.components.behavior.Animate_Grid import Animate_Grid
from pig.components.controls.Key_Direction_Controls import \
Key_Direction_Controls
class Key_Animate_Direction( Animate_Grid, Key_Direction_Controls):
"""Control object velocity with up, down, left, right keys. Uses a set of
animation or single images stored in a file that contains multiple frames.
"""
#component_info
_set = 'pig'
_type = 'controls'
_class_list = [Node]
# attributes: ['name','desc'] or ['name', agui, {'doc':'desc', extra info}]
# separate these to make derived components easier to write
_field_list = Animate_Grid._grid_list + \
[['fps','Frames per second'],
['up_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['upright_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['right_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['downright_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['down_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['downleft_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['left_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
['upleft_frames',
'List of frame numbers and/or tuples in the ' +\
'range form ([start], stop, [step])'],
] + \
Key_Direction_Controls._field_list
#defaults
rotate = False
up_frames = upright_frames = right_frames = downright_frames = down_frames\
= downleft_frames = left_frames = upleft_frames = (0,1)
dir = None
framedict = {}
image_sprite = None
@component_method
def on_added_to_scene(self):
Key_Direction_Controls.on_added_to_scene(self)
(Opioid2D.Delay(0) + Opioid2D.CallFunc(self.do_load_frames)).do()
def do_load_frames(self):
self.load_frames()
self.change_velocity(0,1)
self.owner.velocity = (0,0)
def load_frames(self):
dirs = ['up_frames','upright_frames','right_frames','downright_frames',
'down_frames','downleft_frames','left_frames','upleft_frames']
for dir in dirs:
try:
self._frame_sequence = getattr(self, dir)
except:
continue
self.framedict[dir] = self.get_frame_images()
def change_velocity(self, x_change, y_change):
Key_Direction_Controls.change_velocity(self, x_change, y_change)
vx, vy = self.owner.velocity
dir = None
if vx > 0:
if vy > 0:
dir = "downright_frames"
elif vy < 0:
dir = "upright_frames"
else:
dir = "right_frames"
elif vx < 0:
if vy > 0:
dir = "downleft_frames"
elif vy < 0:
dir = "upleft_frames"
else:
dir = "left_frames"
elif vy > 0:
dir = "down_frames"
elif vy < 0:
dir = "up_frames"
if self.dir == dir or dir == None:
return
self.dir = dir
self.frames = self.framedict[dir]
try:
self.anim_action.abort()
except:
pass
action = Opioid2D.Animate(self.frames, fps=self.fps,
mode=self.modes["Repeat"])
if self.rotate:
if self.image_sprite is None:
self.image_sprite = Opioid2D.Sprite()
self.image_sprite.attach_to(self.owner)
self.owner.image = self.frames[0]
self.owner.alpha = 0
image_sprite = self.image_sprite
image_sprite.rotation = -self.owner.rotation
else:
image_sprite = self.owner
self.anim_action = action.do(image_sprite)
def do_set_animation(self):
self._frame_sequence = self.up_frames
Animate_Grid.do_set_animation(self)
register_component( Key_Animate_Direction)
| sunsp1der/pug | pig/components/controls/Key_Animate_Direction.py | Key_Animate_Direction.py | py | 4,893 | python | en | code | 0 | github-code | 13 |
75055471696 | from flask import abort, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from .. import db
from ..models import Notes
from .forms import NotesForm
from . import user
@user.route('/')
@user.route('/index')
@login_required
def index():
notes = Notes.query.all()
return render_template('/user/index.html', notes = notes)
@user.route('/forms', methods = ['GET', 'POST'])
@login_required
def forms():
form = NotesForm()
if form.validate_on_submit():
note = Notes(title=form.title.data,
description=form.description.data,
user_id=current_user.id)
db.session.add(note)
db.session.commit()
flash('Note Saved')
return redirect(url_for('user.forms'))
return(render_template('/user/forms.html', form=form, title='Add Note')) | Man-Jain/Flask-Keep | app/user/views.py | views.py | py | 791 | python | en | code | 0 | github-code | 13 |
6198811922 | class Request:
def __init__(self, token, type, flags, datacenter, complete_func, quick_ack_func):
self.message_id = 0
self.message_seq_no = 0
self.connection_token = 0
self.retry_count = 0
self.failed_by_salt = False
self.completed = False
self.cancelled = False
self.is_init_request = False
self.serialized_length = 0
self.start_time = 0
self.min_start_time = 0
self.last_resend_time = 0
self.server_failure_count = 0
self.raw_request = None
self.rpc_request = None
self.responds_to_message_ids = []
self.request_token = token
self.connection_type = type
self.request_flags = flags
self.datacenter_id = datacenter
self.on_complete_func = complete_func
self.on_quick_ack_func = quick_ack_func
def add_respond_message_id(self, id):
self.responds_to_message_ids.append(id)
def responds_to_message_id(self, id):
return self.message_id == id or id in self.responds_to_message_ids
def clear(self, clear_time):
self.message_id = 0
self.message_seq_no = 0
self.connection_token = 0
if clear_time:
self.start_time = 0
self.min_start_time = 0
def on_complete(self, result, error):
if self.on_complete_func is not None and (result is not None or error is not None):
self.on_complete_func(result, error)
def on_quick_ack(self):
if self.on_quick_ack_func is not None:
self.on_quick_ack()
| vijfhoek/telecli | mtproto/request.py | request.py | py | 1,594 | python | en | code | 0 | github-code | 13 |
7093242307 | from django.http.response import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import authenticate, login as lgn, logout as lgout
from .forms import SignUpForm
from .models import Film
@csrf_exempt
def signup(request):
if request.method == 'POST':
form = SignUpForm(request.POST)
if form.is_valid():
form.save()
return HttpResponse('User created successfully!')
return HttpResponse(f"{form.errors}")
return HttpResponse('Only post method allowed!')
@csrf_exempt
def login(request):
if request.method == 'POST':
username = request.POST.get("username")
password = request.POST.get("password")
user = authenticate(request, username=username, password=password)
if user:
lgn(request, user)
return HttpResponse('Login Successfully')
else:
return HttpResponse('Login Failed - Your password or username is wrong')
else:
return HttpResponse('request method not allowed !')
@csrf_exempt
def logout(request):
if request.method == 'POST':
if request.user.is_authenticated:
lgout(request)
return HttpResponse('You were loged out seccessfully !')
return HttpResponse('You should login first !')
return HttpResponse('Request method not allowed !')
@csrf_exempt
def upload_film(request):
if request.method == 'POST':
if request.user.is_authenticated:
if request.user.is_superuser:
film = Film()
film.name = request.POST.get('name')
film.summary = request.POST.get('summary')
film.genre = request.POST.get('genre')
film.director = request.POST.get('director')
film.actors = request.POST.get('actors')
film.country = request.POST.get('country')
film.yearOfPublication = request.POST.get('yearOfPublication')
film.photo = request.POST.get('photo')
film.save()
return HttpResponse('Film uploaded successfully !')
return HttpResponse('You should be admin !')
return HttpResponse('You should login first !')
return HttpResponse('Request method not allowed !')
@csrf_exempt
def show_all_film(request):
if request.method == 'GET':
films = list(Film.objects.all().values_list('id', 'name', 'photo'))
return films
return HttpResponse('Request method not allowed !')
| aliiimaher/DownloadMovie-BackEnd | DownloadMovie_BackEnd/backend/views.py | views.py | py | 2,545 | python | en | code | 5 | github-code | 13 |
33528179049 | import os
import time
import random
# import git
from redis import Redis
import tempfile
import pickle
import zlib
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from celery import Celery
from flask import send_file, jsonify
from matplotlib.ticker import MaxNLocator
from collections import Counter
from itertools import chain
from project.machine_learning.src.model_trainer import model_trainer
from project.machine_learning.src import util
from project.machine_learning.src.csv_file_modifier.modifier import csv_modifier
from project.machine_learning.src.preprocessor import preprocess as pre
from project.machine_learning.src.duplicate_remover import comment_database as cdb
from werkzeug.utils import secure_filename
from project.machine_learning.src import extractor
matplotlib.use('Agg')
model = model_trainer()
model.open_model('model_gbdt.pkl')
model.open_vocabulary('vectorizer.pkl')
model.open_binarizer('binarizer.pkl')
# r = Redis("machine_learning_app_redis_1", 6379)
r = Redis.from_url(os.environ['REDIS_URL'])
def process(comment):
process = pre(dictionary_file='word.pkl')
return process.process_comment(comment)
def store_df(data, name) -> bool:
r.flushdb()
r.set(name, zlib.compress( pickle.dumps(data)))
print('sucessfully stored')
return True
def plot_graph(counter, savedir):
modifier = csv_modifier()
# all_labels = ['security', 'self-direction', 'benevolence', 'conformity', 'stimulation', 'power', 'achievement', 'tradition', 'universalism', 'hedonism']
print('counter is ', counter)
labels, amount = util.label_counter(counter)
print("labels are", labels)
print("label amounts are", amount)
plt.rcParams["figure.figsize"] = [11.0, 3.50]
plt.rcParams["figure.autolayout"] = True
colors = ['yellowgreen', 'gold', 'lightskyblue', 'lightcoral']
plt.bar(labels, amount, align='center')
filename = modifier.find_next_filename('lb', savedir, 'jpg')
plt.savefig(os.path.join(savedir, filename), bbox_inches='tight', pad_inches=.1)
return filename
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
def background_file_labeler(file, column: str):
result = ""
processed_files = []
download_folder = "project/server/"
filename = file
data = pickle.loads(zlib.decompress(r.get(column)))
print("processing file: " + filename, 'in column', column)
process = pre(file, column, dictionary_file='word.pkl')
data['new_line'] = data[column].apply(lambda x: process.process_comment(x)[0])
print("predicting", filename)
prediction, binarizer = model.predict(data[['new_line', 'language']])
prediction = binarizer.inverse_transform(prediction)
# print(prediction)
data['prediction'] = prediction
tmp = data['prediction'].values
print('tmp is', tmp)
values = []
for item in tmp:
for val in item:
if val == val:
values.append(val)
dataname = random_string()
store_df(data, dataname)
value_count = Counter(values)
# image = plot_graph(value_count, download_folder)
res = ""
for key, val in value_count.items():
res = res + key + ': ' + str(val) + ' '
return {"data": dataname, "count": res}
def file_labeler():
if 'file' not in request.files:
print("no file part")
flash('No file part')
return redirect(request.url)
file = request.files.get('file')
task = background_file_labeler(file)
return True
def label(comment: str):
comment = process(comment)[0]
print("predictin comment: ", comment)
item_to_predict = pd.DataFrame()
item_to_predict['new_line'] = [ comment ]
item_to_predict['language'] = ['python']
results, binarizer = model.predict(item_to_predict)
results = to_only_none(results)
results = binarizer.inverse_transform(results)
tmp = ""
for result in results[0]:
if tmp == "":
tmp = result
else:
tmp = tmp + ", " + result
results = tmp
if results == 'none':
results = 'There is no value mention.'
return results
def to_only_none(input):
res = []
for i, item in enumerate(input):
if item[4] == 1 or not np.any(item):
x = [0] * len(item)
x[4] = 1
res.append(x)
else:
res.append(item)
return np.array(res)
def repo(repo_url, branch):
print("attempting to get from repo")
repo = repo_url
column = 'line'
with tempfile.TemporaryDirectory() as tmpdirname:
files = extractor.get_comment_from_repo_using_all_languages(repo , branch, tmpdirname)
data = pd.DataFrame()
for file in files:
print(file)
new_data = pd.read_csv(file)
new_data = new_data.drop_duplicates(subset=['line'])
new_data = new_data.drop_duplicates(subset=['location'])
data = pd.concat([new_data, data])
print("processing file: " + file)
processor = pre(file, column, dictionary_file='word.pkl')
print('preprocessing...')
data['new_line'] = data[column].apply(lambda x: processor.process_comment(x)[0])
print("predicting...")
prediction, binarizer = model.predict(data[['new_line', 'language']])
prediction = binarizer.inverse_transform(prediction)
data['prediction'] = prediction
dataname = random_string()
store_df(data, dataname)
tmp = data['prediction'].values
print(tmp)
values = []
for item in tmp:
for val in item:
if val == val:
values.append(val)
value_count = Counter(values)
res = ""
for key, val in value_count.items():
res = res + key + ': ' + str(val) + ' '
# print(res)
return {"data": dataname, "count": res}
def remove_files(files: list[str]) -> None:
for file in files:
os.remove(file)
def random_string():
rand_alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'z', 'x', 'w', '1','2','3','4','5','6','7','8','9', '0']
res = ""
for i in range(random.randint(4, 7)):
res += random.choice(rand_alphabet)
return res
if __name__ == '__main__':
app.run(debug=True)
| luyangliuable/human-values-code-machine-learning-app | project/machine_learning/app.py | app.py | py | 6,122 | python | en | code | 0 | github-code | 13 |
21666901039 | import time
import unittest
import time
from datetime import datetime
import sqlalchemy as sa
from sqlalchemy import create_engine
from sqlalchemy.orm import Session, DeclarativeBase
from sqlalchemy_mixins import TimestampsMixin
class Base(DeclarativeBase):
__abstract__ = True
class BaseModel(Base, TimestampsMixin):
"""Model to use as base."""
__abstract__ = True
id = sa.Column(sa.Integer, primary_key=True)
name = sa.Column(sa.String)
class User(BaseModel):
"""User model exemple."""
__tablename__ = 'user'
class TestTimestamps(unittest.TestCase):
"""Test case for Timestamp mixin."""
@classmethod
def setUpClass(cls):
cls.engine = create_engine('sqlite:///:memory:', echo=False)
def setUp(self):
self.session = Session(self.engine)
Base.metadata.create_all(self.engine)
user_1 = User(name='User')
self.session.add(user_1)
self.session.commit()
def tearDown(self):
Base.metadata.drop_all(self.engine)
def test_timestamp_must_be_abstract(self):
"""Test whether TimestampsMixin is abstract."""
self.assertTrue(hasattr(TimestampsMixin, '__abstract__'),
'TimestampsMixin must have attribute __abstract__')
self.assertTrue(TimestampsMixin.__abstract__,
'__abstract__ must be True')
def test_timestamp_has_datetime_columns(self):
"""Test whether TimestampsMixin has attrs created_at and updated_at."""
user = self.session.query(User).first()
self.assertTrue(hasattr(User, 'created_at'),
'Timestamp doesn\'t have created_at attribute.')
self.assertEqual(datetime, type(user.created_at),
'created_at column should be datetime')
self.assertTrue(hasattr(User, 'updated_at'),
'Timestamp doesn\'t have updated_at attribute.')
self.assertEqual(datetime, type(user.updated_at),
'updated_at column should be datetime')
def test_updated_at_column_must_change_value(self):
"""Test whether updated_at value is most recently after update."""
user = self.session.query(User).first()
dt_1 = user.updated_at
time.sleep(1)
user.name = 'New name'
self.session.commit()
dt_2 = user.updated_at
self.assertLess(dt_1, dt_2, 'dt_1 should be older than dt_2')
if __name__ == '__main__':
unittest.main()
| absent1706/sqlalchemy-mixins | sqlalchemy_mixins/tests/test_timestamp.py | test_timestamp.py | py | 2,503 | python | en | code | 697 | github-code | 13 |
8772577022 | from dataclasses import dataclass
import pickle
class Serializable:
@classmethod
def from_bytes(cls, bytearr: bytes):
obj = pickle.loads(bytearr)
if not isinstance(obj, cls):
raise TypeError(f"Unpickled object is not instance of {cls}")
return obj
def __bytes__(self):
return pickle.dumps(self)
@dataclass
class ChatMessage(Serializable):
sender: str
text: str
timestamp: float
@classmethod
def from_string(cls, string: str):
print("Chat " + string)
splited = string.split(":")
timestamp, sender, text = splited[0], splited[1], splited[2:]
text = "".join(text)
return ChatMessage(timestamp=float(timestamp), sender=sender, text=text)
def __str__(self):
return f"{self.timestamp}:{self.sender}:{self.text}"
@dataclass
class LogMessage(Serializable):
sender: str
type: str
data: str
timestamp: float
@dataclass
class IntroductionMessage(Serializable):
nickname: str
name: str
gender: str
age: int
| SiegfriedWagner/python-chat | chat/shared/message.py | message.py | py | 1,067 | python | en | code | 0 | github-code | 13 |
72104795219 | def longestPalindrome(s):
if len(s) == 1 or len(s) == 2:
return s
res = [s[i: j] for i in range(len(s)) for j in range(i + 1, len(s) + 1)]
print(res)
res = [i for i in res if len(i) > 1 and i == i[::-1]]
print(res)
print(max(res))
return max(res)
s= "abcabcbb"
print(longestPalindrome(s)) | abhishekbudruk007/interview_practise_2022 | Problem Solving/Strings/longest_palindromic_string.py | longest_palindromic_string.py | py | 325 | python | en | code | 0 | github-code | 13 |
13514315436 | from cement.utils.misc import minimal_logger
from ebcli.core import io
from ebcli.lib import elasticbeanstalk, heuristics, utils
from ebcli.objects.exceptions import NotFoundError
from ebcli.objects.platform import PlatformVersion
from ebcli.objects.solutionstack import SolutionStack
from ebcli.operations import commonops, platform_version_ops
from ebcli.resources.strings import alerts, prompts
CUSTOM_PLATFORM_OPTION = 'Custom Platform'
LOG = minimal_logger(__name__)
def get_default_solution_stack():
return commonops.get_config_setting_from_branch_or_default('default_platform')
def get_all_solution_stacks():
return elasticbeanstalk.get_available_solution_stacks()
def find_solution_stack_from_string(solution_string, find_newer=False):
"""
Method returns a SolutionStack object representing the given `solution_string`.
If the `solution_string` matches ARNs and complete names of solution stacks, the exact
match is returned. In the event when there are multiple matches, the latest version is
returned.
:param solution_string: A string in one of the following (case-insensitive) forms:
- PlatformArn:
- EB-managed: 'arn:aws:elasticbeanstalk:us-west-2::platform/Multi-container
Docker running on 64bit Amazon Linux/2.8.0'
- Custom: arn:aws:elasticbeanstalk:us-west-2:123412341234:platform/
custom_platform/1.0.0
- complete name: '64bit Amazon Linux 2017.03 v2.7.5 running Multi-container Docker
17.03.2-ce (Generic)'
- shorthand: 'Multi-container Docker 17.03.2-ce (Generic)'
- language name: 'Multi-container Docker'
- pythonified shorthand: 'multi-container-docker-17.03.2-ce-(generic)'
:param find_newer: If solution_string is a complete name or a PlatformArn that uniquely
matches a solution stack or platform, find the newest version of the
solution stack.
:return: A SolutionStack object representing the latest version of the `solution_string`.
In case of a custom platform, the return value is a PlatformVersion object.
"""
# Compare input with PlatformARNs
match = None
if PlatformVersion.is_eb_managed_platform_arn(solution_string):
if find_newer:
match = platform_version_ops.get_latest_eb_managed_platform(solution_string)
else:
match = platform_arn_to_solution_stack(solution_string)
elif PlatformVersion.is_custom_platform_arn(solution_string):
if find_newer:
match = platform_version_ops.get_latest_custom_platform_version(solution_string)
else:
match = platform_version_ops.find_custom_platform_version_from_string(solution_string)
# Compare input with complete SolutionStack name and retrieve latest SolutionStack
# in the series if `find_newer` is set to True
if not match:
available_solution_stacks = elasticbeanstalk.get_available_solution_stacks()
match = SolutionStack.match_with_complete_solution_string(available_solution_stacks, solution_string)
if match and find_newer:
language_name = SolutionStack(solution_string).language_name
match = SolutionStack.match_with_solution_string_language_name(
available_solution_stacks,
language_name
)
# Compare input with other forms
for solution_string_matcher in [
SolutionStack.match_with_solution_string_shorthand,
SolutionStack.match_with_solution_string_language_name,
SolutionStack.match_with_pythonified_solution_string,
]:
if not match:
match = solution_string_matcher(available_solution_stacks, solution_string)
# Compare input with custom platform names
if not match:
match = platform_version_ops.find_custom_platform_version_from_string(solution_string)
if not match:
raise NotFoundError(alerts['platform.invalidstring'].format(solution_string))
return match
def platform_arn_to_solution_stack(platform_arn):
"""
Method determines the EB-managed solution stack represented by a PlatformArn
:param platform_arn: PlatformArn of a solution stack
:return: SolutionStack representing the PlatformArn if it an EB-managed platform, otherwise None
"""
if not PlatformVersion.is_eb_managed_platform_arn(platform_arn):
return
platform_description = elasticbeanstalk.describe_platform_version(platform_arn)
return SolutionStack(platform_description['SolutionStackName'])
def prompt_for_solution_stack_version(matching_language_versions):
"""
Method prompts customer to pick a solution stack version, given a set of platform
versions of a language
:param matching_language_versions: A list of platform versions of a language to allow
the customer to choose from.
e.g. Given Platform, Ruby, the following options will be presented
1. Ruby 2.4 (Passenger standalone)
2. Ruby 2.4 (Puma)
3. ...
:return: A string representing te platform version the customer would like to use.
"""
io.echo()
io.echo(prompts['sstack.version'])
language_versions_to_display = [version['PlatformShorthand'] for version in matching_language_versions]
return utils.prompt_for_item_in_list(language_versions_to_display)
def resolve_language_version(chosen_language_name, solution_stacks):
"""
Method determines the list of platform versions matching a platform name and
returns a SolutionStack object representing the platform version the customer
would like to use.
:param chosen_language_name: Name of language the customer would like to use
:param solution_stacks: A list of SolutionStack objects to assemble the list
of related platform versions from.
:return: A SolutionStack object representing customer's choice of language
version.
"""
matching_language_versions = SolutionStack.group_solution_stacks_by_platform_shorthand(
solution_stacks,
language_name=chosen_language_name
)
if len(matching_language_versions) > 1:
version = prompt_for_solution_stack_version(matching_language_versions)
else:
version = matching_language_versions[0]['PlatformShorthand']
for language_version in matching_language_versions:
if language_version['PlatformShorthand'] == version:
return language_version['SolutionStack']
| aws/aws-elastic-beanstalk-cli | ebcli/operations/solution_stack_ops.py | solution_stack_ops.py | py | 6,608 | python | en | code | 150 | github-code | 13 |
38221717952 | # Game
import pygame, sys, time, math, numpy, random
from pygame.locals import *
pygame.init()
pygame.mixer.init()
pygame.display.set_caption('Dungeon Crawler')
screen_width = 1920
screen_height = 1080
screenRect = pygame.Rect(0, 0, screen_width, screen_height)
screen = pygame.display.set_mode((screen_width, screen_height))
font = pygame.font.Font(None, 20)
bigFont = pygame.font.Font(None, 60)
fps = 60
target_fps = 240
clock = pygame.time.Clock()
tile_size = 60
def draw_text(text, font, color, surface, x, y):
textobj = font.render(text, 1, color)
textrect = textobj.get_rect()
textrect.topleft = (x, y)
surface.blit(textobj, textrect)
# def BuildWorld(currentLevel, size=None):
# startRoomCount = 5
# roomIncrease = 2
# if size is None:
# worldSize = startRoomCount + currentLevel + roomIncrease
# world = [
# [0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 1, 0, 0],
# [0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0]]
# adjacentTiles = []
# row_count = 0
# for row in world:
# col_count = 0
# for room in row:
# if room == 1:
# adjacentTiles.append(world.index((row - 1)[world.index(room)]))
# adjacentTiles.append(world.index(room - 1))
# adjacentTiles.append(world.index(room + 1))
# adjacentTiles.append(world.index((row + 1)[world.index(room)]))
# col_count += 1
# row_count += 1
class Panel(pygame.sprite.Sprite):
def __init__(self, x, y):
image = pygame.image.load('img/panelDesign.png').convert()
self.image = pygame.transform.scale(image, (360, 1080))
self.rect = self.image.get_rect()
self.x = x
self.y = y
class World():
def __init__(self, data):
self.tile_list = []
# Load Images
dirt_bg_img = pygame.image.load('img/dirt_bg.png').convert()
self.dirt_bg = pygame.transform.scale(dirt_bg_img, (1620, 1080))
stone_bricks_img = pygame.image.load('img/stone_bricks.png').convert()
dirt_img = pygame.image.load('img/dirt.png').convert()
row_count = 0
for row in data:
col_count = 0
for tile in row:
if tile == 1:
img = pygame.transform.scale(stone_bricks_img, (tile_size, tile_size))
img_rect = img.get_rect()
img_rect.x = col_count * tile_size
img_rect.y = row_count * tile_size
tile = (img, img_rect)
self.tile_list.append(tile)
if tile == 2:
img = pygame.transform.scale(dirt_img, (tile_size, tile_size))
img_rect = img.get_rect()
img_rect.x = col_count * tile_size
img_rect.y = row_count * tile_size
tile = (img, img_rect)
self.tile_list.append(tile)
if tile == 3:
star = Star(col_count * tile_size, row_count * tile_size)
star_group.add(star)
if tile == 4:
fragment = Fragment(col_count * tile_size, row_count * tile_size)
fragment_group.add(fragment)
if tile == 5:
enemy = Enemy(col_count * tile_size, row_count * tile_size)
enemy_group.add(enemy)
if tile == 6:
explosiveBarrel = ExplosiveBarrel(col_count * tile_size, row_count * tile_size)
explosiveBarrel_group.add(explosiveBarrel)
if tile == 7:
crate = Crate(col_count * tile_size, row_count * tile_size)
crate_group.add(crate)
if tile == 8:
weaponCrate = WeaponCrate(col_count * tile_size, row_count * tile_size)
weaponCrate_group.add(weaponCrate)
col_count += 1
row_count += 1
def draw(self):
screen.blit(self.dirt_bg, (0, 0))
for tile in self.tile_list:
screen.blit(tile[0], tile[1])
# pygame.draw.rect(screen, (255, 255, 255), tile[1], 2)
class Player(pygame.sprite.Sprite):
def __init__(self, x, y, name, maxHealth, damage):
image = pygame.image.load('img/player.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.isAlive = True
self.update_time = pygame.time.get_ticks()
self.VelocityX = 0
self.VelocityY = 0
self.x = x
self.y = y
self.rect.x = self.x
self.rect.y = self.y
self.position = self.x + (self.rect.width / 2), self.y + (self.rect.height / 2)
self.velocityDiminish = 0.5
self.width = self.image.get_width()
self.height = self.image.get_height()
self.objectCollide = False
# Stats
self.name = name
self.maxHealth = maxHealth
self.health = maxHealth
self.damage = damage
self.scroll = []
self.start_scroll = self.scroll
self.speed = 1
self.maxSpeed = 8
def keyPress(self, keys):
if keys[pygame.K_a]:
self.VelocityX -= self.speed
# print("Left")
if keys[pygame.K_d]:
self.VelocityX += self.speed
# print("Right")
if keys[pygame.K_w]:
self.VelocityY -= self.speed
# print("Up")
if keys[pygame.K_s]:
self.VelocityY += self.speed
# print("Down")
if keys[pygame.K_q]:
pass
print("Ability 1")
if keys[pygame.K_e]:
pass
print("Signature")
if keys[pygame.K_c]:
pass
print("Ability 2")
if keys[pygame.K_x]:
pass
print("Ultimate")
def Update(self):
if self.health <= 0:
self.isAlive = False
# Character Movement
if self.VelocityX > self.maxSpeed:
self.VelocityX = self.maxSpeed
if self.VelocityX < -self.maxSpeed:
self.VelocityX = -self.maxSpeed
if self.VelocityY > self.maxSpeed:
self.VelocityY = self.maxSpeed
if self.VelocityY < -self.maxSpeed:
self.VelocityY = -self.maxSpeed
self.x += self.VelocityX
self.y += self.VelocityY
if self.VelocityX > 0:
self.VelocityX -= self.velocityDiminish
if self.VelocityX < 0:
self.VelocityX += self.velocityDiminish
if self.VelocityY > 0:
self.VelocityY -= self.velocityDiminish
if self.VelocityY < 0:
self.VelocityY += self.velocityDiminish
# Angle
mx, my = pygame.mouse.get_pos()
rel_x, rel_y = mx - self.x, my - self.y
angle = (180 / math.pi) * -math.atan2(rel_y, rel_x)
self.image = pygame.transform.rotate(self.original_image, int(angle) + 270)
self.rect = self.image.get_rect(center=self.position)
# Collision
for tile in world.tile_list:
if tile[1].colliderect(self.rect):
self.VelocityX = 0
self.VelocityY = 0
self.objectCollide = True
self.rect.x = self.x
self.rect.y = self.y
screen.blit(self.image, (self.x, self.y))
class Enemy(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/enemy.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.rect.x = self.x
self.rect.y = self.y
self.width = self.image.get_width()
self.height = self.image.get_height()
self.position = self.x + (self.rect.width / 2), self.y + (self.rect.height / 2)
self.animationStage = 0
# Stats
self.health = 3
self.speed = 0
self.damage = 1
self.shootTimer = -60
def Update(self, px, py):
rel_x, rel_y = px - self.x, py - self.y
self.angle = (180 / math.pi) * -math.atan2(rel_y, rel_x)
self.image = pygame.transform.rotate(self.original_image, int(self.angle))
self.rect = self.image.get_rect(center=self.position)
self.x += math.cos(-self.angle) * self.speed
self.y += math.sin(-self.angle) * self.speed
self.rect.x = self.x
self.rect.y = self.y
def Shoot(self, enemyBullets, px, py):
self.shootTimer += 1
if self.shootTimer >= 60 and len(enemyBullets) < 1000: # Bullet Cap
enemyBullets.append(EnemyProjectile(round(self.x+self.width//2), round(self.y + self.height//2), 6, (255, 100, 100), 90, px + 32, py + 32))
self.shootTimer = 0
pygame.mixer.Sound.play(enemyShootSound)
class Fragment(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/fragment.png').convert_alpha()
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
def Update(self):
self.rect.x = self.x
self.rect.y = self.y
class Star(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/star.png').convert_alpha()
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
def Update(self):
self.rect.x = self.x
self.rect.y = self.y
class ExplosiveBarrel(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/barrel_explosive.png').convert_alpha()
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
def Update(self):
self.rect.x = self.x
self.rect.y = self.y
class Weapons():
class Pistol(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/weapon_pistol.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.position = self.x + (self.rect.width / 2), self.y + (self.rect.height / 2)
self.name = 'Pistol'
# Stats
self.damage = 1
self.shootTimer = 30
self.shootSound = pygame.mixer.Sound('sound/pistolShootSound.wav')
def Update(self, parent):
# Angle
mx, my = pygame.mouse.get_pos()
rel_x, rel_y = mx - self.x, my - self.y
angle = (180 / math.pi) * -math.atan2(rel_y, rel_x)
self.image = pygame.transform.rotate(self.original_image, int(angle))
self.rect = self.image.get_rect(center=self.position)
# Positioning
self.x = parent.x
self.y = parent.y
self.rect.x = self.x
self.rect.y = self.y
class Cumgun(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/weapon_cum_gun.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.position = self.x + (self.rect.width / 2), self.y + (self.rect.height / 2)
self.name = 'Cumgun'
# Stats
self.damage = 1
self.shootTimer = 0
self.shootSound = pygame.mixer.Sound('sound/cumgunShootSound.wav')
def Update(self, parent):
# Angle
mx, my = pygame.mouse.get_pos()
rel_x, rel_y = mx - self.x, my - self.y
angle = (180 / math.pi) * -math.atan2(rel_y, rel_x)
self.image = pygame.transform.rotate(self.original_image, int(angle))
self.rect = self.image.get_rect(center=self.position)
# Positioning
self.x = parent.x
self.y = parent.y
self.rect.x = self.x
self.rect.y = self.y
class Shotgun(pygame.sprite.Sprite): # Currently not implemented yet
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/weapon_pistol.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.position = self.x + (self.rect.width / 2), self.y + (self.rect.height / 2)
self.name = 'Shotgun'
# Stats
self.damage = 1
self.shootTimer = 90
self.shootSound = pygame.mixer.Sound('sound/pistolShootSound.wav')
def Update(self, parent):
# Angle
mx, my = pygame.mouse.get_pos()
rel_x, rel_y = mx - self.x, my - self.y
angle = (180 / math.pi) * -math.atan2(rel_y, rel_x)
self.image = pygame.transform.rotate(self.original_image, int(angle))
self.rect = self.image.get_rect(center=self.position)
# Positioning
self.x = parent.x
self.y = parent.y
self.rect.x = self.x
self.rect.y = self.y
class Crate(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/crate.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.broken = False
def Update(self):
self.rect.x = self.x
self.rect.y = self.y
class WeaponCrate(pygame.sprite.Sprite):
def __init__(self, x, y):
pygame.sprite.Sprite.__init__(self)
image = pygame.image.load('img/weapon_crate.png').convert_alpha()
self.original_image = pygame.transform.scale(image, (64, 64))
self.image = pygame.transform.scale(image, (64, 64))
self.rect = self.image.get_rect()
self.x = x
self.y = y
self.broken = False
def Update(self):
self.rect.x = self.x
self.rect.y = self.y
class Projectile(object):
def __init__(self, x, y, radius, color, direction):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.direction = direction
self.vel = 16
self.rect = pygame.Rect(self.x, self.y, self.radius, self.radius)
mx, my = pygame.mouse.get_pos()
rel_x, rel_y = mx - self.x, my - self.y
self.angle = math.atan2(rel_y, rel_x)
def Draw(self,win):
pygame.draw.circle(win, self.color, (self.x,self.y), self.radius)
def Update(self):
self.x += math.cos(self.angle) * self.vel
self.y += math.sin(self.angle) * self.vel
self.rect.x = self.x
self.rect.y = self.y
class EnemyProjectile(object):
def __init__(self, x, y, radius, color, direction, px, py):
self.x = x
self.y = y
self.radius = radius
self.color = color
self.direction = direction
self.vel = 16
self.rect = pygame.Rect(self.x, self.y, self.radius, self.radius)
rel_x, rel_y = px - self.x, py - self.y
self.angle = math.atan2(rel_y, rel_x)
def Draw(self,win):
pygame.draw.circle(win, self.color, (self.x,self.y), self.radius)
def Update(self):
self.x += math.cos(self.angle) * self.vel
self.y += math.sin(self.angle) * self.vel
self.rect.x = self.x
self.rect.y = self.y
world_data = [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 0, 0, 8, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
# currentLevel = 1
# BuildWorld(currentLevel)
star_group = pygame.sprite.Group()
fragment_group = pygame.sprite.Group()
enemy_group = pygame.sprite.Group()
explosiveBarrel_group = pygame.sprite.Group()
crate_group = pygame.sprite.Group()
weaponCrate_group = pygame.sprite.Group()
world = World(world_data)
# Sounds
hitHurtSound = pygame.mixer.Sound('sound/hitHurt.wav')
playerHurtSound = pygame.mixer.Sound('sound/playerHurt.wav')
explosionSound = pygame.mixer.Sound('sound/explosion.wav')
enemyShootSound = pygame.mixer.Sound('sound/enemyShootSound.wav')
def Game():
fragmentCount = 0
starCount = 0
panel = Panel(1560, 0)
player = Player(300, 300, 'Player', 1000, 1)
# Weapons
pistol = Weapons.Pistol(player.x, player.y)
shotgun = Weapons.Shotgun(player.x, player.y)
cumgun = Weapons.Cumgun(player.x, player.y)
enemies = []
for o in enemy_group:
enemies.append(o)
fragments = []
for f in fragment_group:
fragments.append(f)
stars = []
for s in star_group:
stars.append(s)
explosiveBarrels = []
for b in explosiveBarrel_group:
explosiveBarrels.append(b)
crates = []
for c in crate_group:
crates.append(c)
weaponCrates = []
for c in weaponCrate_group:
weaponCrates.append(c)
weapons = [pistol, cumgun]
if len(weapons) == 0:
activeWeapon = None
inactiveWeapon = None
elif len(weapons) == 1:
activeWeapon = weapons[0]
inactiveWeapon = None
else:
activeWeapon = weapons[0]
inactiveWeapon = weapons[1]
bullets = []
enemyBullets = []
keyCooldown = 0
switchCooldown = 0
shootTimer = 0
printWeapons = ''
debug = False
run = True
while run:
clock.tick(fps)
# Game Updates
screen.fill((0, 0, 0))
world.draw()
for s in stars:
if s.rect.colliderect(player.rect):
starCount += 1
stars.pop(stars.index(s))
s.Update()
screen.blit(s.image, (s.x, s.y))
for f in fragments:
if f.rect.colliderect(player.rect):
fragmentCount += random.randint(1, 3)
fragments.pop(fragments.index(f))
f.Update()
screen.blit(f.image, (f.x, f.y))
for b in explosiveBarrels:
if b.rect.colliderect(player.rect):
player.rect.clamp_ip(b.rect)
b.Update()
screen.blit(b.image, (b.x, b.y))
for c in crates:
for bullet in bullets:
if bullet.rect.colliderect(c):
c.broken = True
if c.broken == True:
crates.pop(crates.index(c))
screen.blit(c.image, (c.x, c.y))
for c in weaponCrates:
for bullet in bullets:
if bullet.rect.colliderect(c):
c.broken = True
if c.broken == True:
crates.pop(crates.index(c))
screen.blit(c.image, (c.x, c.y))
for o in enemies:
o.Update(player.x, player.y)
o.Shoot(enemyBullets, player.x, player.y)
screen.blit(o.image, (o.x, o.y))
keys = pygame.key.get_pressed()
if player.isAlive == True:
player.keyPress(keys)
player.Update()
player.rect.clamp_ip(screenRect)
for w in weapons:
w.Update(player)
screen.blit(activeWeapon.image, (activeWeapon.x, activeWeapon.y))
for bullet in bullets:
bullet.Draw(screen)
for bullet in enemyBullets:
bullet.Draw(screen)
# Side Panel
screen.blit(panel.image, (panel.x, panel.y)) # Must be drawn last
draw_text(f'{fragmentCount}', bigFont, (255, 255, 255), screen, 1630, 420)
draw_text(f'{starCount}', bigFont, (255, 255, 255), screen, 1800, 420)
draw_text(f'{player.speed}', bigFont, (255, 255, 255), screen, 1750, 495)
draw_text(f'{player.damage}', bigFont, (255, 255, 255), screen, 1750, 540)
keyCooldown += 1
switchCooldown += 1
# Input
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
pygame.quit()
sys.exit()
if event.type == QUIT:
pygame.quit()
sys.exit()
if keys[K_i] and keyCooldown > 20:
keyCooldown = 0
if debug == False:
debug = True
else:
debug = False
if keys[pygame.K_SPACE]:
if shootTimer >= activeWeapon.shootTimer and len(bullets) < 1000: # Bullet Cap
pygame.mixer.Sound.play(activeWeapon.shootSound)
if activeWeapon == shotgun:
bullets.append(Projectile(round(player.x+player.width//2), round(player.y + player.height//2), 6, (255, 255, 255), 90))
bullets.append(Projectile(round(player.x+player.width//2), round(player.y + player.height//2), 6, (255, 255, 255), 90))
bullets.append(Projectile(round(player.x+player.width//2), round(player.y + player.height//2), 6, (255, 255, 255), 90))
else:
bullets.append(Projectile(round(player.x+player.width//2), round(player.y + player.height//2), 6, (255, 255, 255), 90))
shootTimer = 0
if keys[pygame.K_1] and switchCooldown > 20:
switchCooldown = 0
activeWeaponPlaceholder = activeWeapon
activeWeapon = inactiveWeapon
inactiveWeapon = activeWeaponPlaceholder
if keys[pygame.K_2] and switchCooldown > 20:
switchCooldown = 0
activeWeaponPlaceholder = activeWeapon
activeWeapon = inactiveWeapon
inactiveWeapon = activeWeaponPlaceholder
mx, my = pygame.mouse.get_pos()
if player.x > 1560:
player.x = -60
player.rect.x = -60
if player.x < -60:
player.x = 1560
player.rect.x = 1560
if player.y > 1140:
player.y = -60
player.rect.y = -60
if player.y < -60:
player.y = 1140
player.rect.y = 1140
# Projectiles
for bullet in bullets:
if 0 < bullet.x < 1920 and 0 < bullet.y < 1080:
for o in enemies:
if bullet.rect.colliderect(o.rect):
bullets.pop(bullets.index(bullet))
o.health -= player.damage
pygame.mixer.Sound.play(hitHurtSound)
if o.health <= 0:
enemies.pop(enemies.index(o))
for b in explosiveBarrels:
if bullet.rect.colliderect(b.rect):
bullets.pop(bullets.index(bullet))
pygame.mixer.Sound.play(explosionSound)
explosiveBarrels.pop(explosiveBarrels.index(b))
bullet.Update()
else:
bullets.pop(bullets.index(bullet))
shootTimer += 1
for bullet in enemyBullets:
if 0 < bullet.x < 1920 and 0 < bullet.y < 1080:
for o in enemies:
if bullet.rect.colliderect(player.rect):
enemyBullets.pop(enemyBullets.index(bullet))
player.health -= o.damage
pygame.mixer.Sound.play(playerHurtSound)
if o.health <= 0:
player.pop(player.index(o))
for b in explosiveBarrels:
if bullet.rect.colliderect(b.rect):
enemyBullets.pop(enemyBullets.index(bullet))
pygame.mixer.Sound.play(explosionSound)
explosiveBarrels.pop(explosiveBarrels.index(b))
bullet.Update()
else:
enemyBullets.pop(enemyBullets.index(bullet))
# Panel Images
screen.blit(activeWeapon.original_image, (1740, 690))
screen.blit(inactiveWeapon.original_image, (1740, 770))
# Identifiers
if debug == True:
draw_text(f'Player: {player.x, player.y}', font, (255, 100, 255), screen, player.x, player.y - 60)
draw_text(f'Health: {player.health}', font, (255, 100, 255), screen, player.x, player.y - 40)
for w in reversed(weapons):
if w.name not in printWeapons:
printWeapons = w.name + ', ' + printWeapons
draw_text(f'Weapons: {printWeapons}', font, (255, 100, 255), screen, player.x, player.y - 20)
for o in enemies:
draw_text(f'Enemy: {o.x, o.y}', font, (255, 0, 0), screen, o.x, o.y - 60)
draw_text(f'Health: {o.health}', font, (255, 0, 0), screen, o.x, o.y - 40)
draw_text(f'Speed: {o.speed}', font, (255, 0, 0), screen, o.x + 80, o.y - 40)
draw_text(f'Damage: {o.damage}', font, (255, 0, 0), screen, o.x, o.y - 20)
draw_text(f'ShootTimer: {o.shootTimer}', font, (255, 0, 0), screen, o.x + 80, o.y - 20)
for s in stars:
draw_text(f'Star: {s.x, s.y}', font, (255, 255, 0), screen, s.x, s.y - 20)
for f in fragments:
draw_text(f'Fragment: {f.x, f.y}', font, (0, 255, 255), screen, f.x, f.y - 20)
for bullet in bullets:
draw_text(f'Bullet: {round(bullet.x, 2), round(bullet.y, 2)}', font, (255, 255, 255), screen, bullet.x, bullet.y - 20)
for bullet in enemyBullets:
draw_text(f'Enemy Bullet: {round(bullet.x, 2), round(bullet.y, 2)}', font, (255, 100, 100), screen, bullet.x, bullet.y - 20)
draw_text(f'FPS: {round(clock.get_fps(), 2)}', font, (255, 255, 255), screen, 10, 10)
draw_text(f'MouseCoords = {mx}, {my}', font, (255, 255, 255), screen, 1400, 10)
draw_text(f'playerCoords = {player.x}, {player.y}', font, (255, 255, 255), screen, 1400, 30)
draw_text(f'playerRect = {player.rect.x}, {player.rect.y}', font, (255, 255, 255), screen, 1400, 50)
draw_text(f'playerCollide = {player.objectCollide}', font, (255, 255, 255), screen, 1400, 70)
pygame.display.update()
Game() | twitchBrittle/pydungeon | Game.py | Game.py | py | 29,346 | python | en | code | 0 | github-code | 13 |
14335682632 | import cv2
import numpy as np
import glob
import new_matriculas
import os
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
C = np.zeros((9251, 100), dtype=np.float32)
E = []
for i in range(0,37):
for j in range(0,250):
E.append(i)
E.append(i)
E = np.array(E, np.float32)
E = E.reshape((E.size, 1))
def filter_training(almacen, image):
all = []
for box in almacen:
(x, y, w, h)= cv2.boundingRect(box)
all.append((w * h, x, y, w, h))
all.sort()
if len(all) == 3:
boxi = all[1]
image_out = image[boxi[2]:boxi[2]+boxi[4], boxi[1]:boxi[1]+boxi[3]]
else:
if (len(all) == 2):
boxi = all[1]
image_out = image[boxi[2]:boxi[2] + boxi[4], boxi[1]:boxi[1] + boxi[3]]
else:
print(len(all)/2)
boxi = all[len(all) // 2]
image_out = image[boxi[2]:boxi[2] + boxi[4], boxi[1]:boxi[1] + boxi[3]]
return image_out
def sacar_vc(image_in, name):
_, thresh = cv2.threshold(image_in, 15, 255, 0)
_, contours, _ = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
print(name)
image_out = filter_training(contours, image_in)
cv2.imwrite('salida_ocr/' + name, image_out)
image_out = redimensionar(image_out)
matrix = np.reshape(image_out, (1, 100))
vc = matrix.flatten()
return vc, image_out
def leer_caracteres():
tag = 0
for file in glob.glob('training_ocr/*.jpg'):
image_in = cv2.imread(file, 0)
name = file.split('\\')
name = name[len(name) - 1]
vc, im_out = sacar_vc(image_in, name)
C[tag] = vc
tag += 1
np.savetxt('c_entrenado.data', C)
np.savetxt('e_entrenado.data', E)
def redimensionar(image):
w, h = image.shape
max_d = max(w, h)
new_img = np.zeros((max_d, max_d), np.uint8)
new_img.fill(255)
image = cv2.threshold(image, 126, 255, 0)[1]
for x in range(w):
for y in range(h):
k = image[x, y]
new_img[x, y] = k
new_img = cv2.resize(new_img,(10,10))
return new_img
samples = np.loadtxt('c_entrenado.data', np.float32)
responses = np.loadtxt('e_entrenado.data', np.float32)
responses = responses.reshape((responses.size, 1))
path = os.getcwd()
modelo = cv2.ml.KNearest_create()
modelo.train(samples, cv2.ml.ROW_SAMPLE, responses)
for file in glob.glob('testing_ocr/*.jpg'):
name = file.split('\\')
name = name[len(name)-1]
name_dir = name.split('.')
name_dir = name_dir[0]
new_path = path+"\\salidas_entrenamiento\\"+name_dir
print(new_path)
if not os.path.exists(new_path):
os.makedirs(new_path)
imgTest = cv2.imread(file, 0)
matricula, recortes = new_matriculas.matricula(imgTest)
t = 0
os.chdir(new_path)
for m in recortes:
t+=1
img = redimensionar(m)
mat = np.reshape(img, (1, 100))
mat = np.asarray(mat, dtype=np.float32)
val, res = modelo.predict(mat)
ret, resultado, vecinos, dist = modelo.findNearest(mat, k=3) # Obtenemos los resultados para los k vecinos mas cercanos
correctosK = np.count_nonzero(resultado == E)
print(str(resultado), str(vecinos), str(dist), str(t))
img = cv2.resize(m, (300, 300))
cv2.imwrite(str(t)+'-'+str(val)+".jpg", img)
os.chdir(path)
| KrakenPredator/PracticaObligatoria2 | entrenamiento.py | entrenamiento.py | py | 3,356 | python | en | code | 0 | github-code | 13 |
27764162638 | from django.shortcuts import get_object_or_404, render
from .models import Category
def category(request, slug):
context = {}
category = get_object_or_404(Category, slug=slug)
context['category'] = category
context['page_title'] = "Latest Posts for {}".format(category.title)
context['page_heading'] = context['page_title']
context['posts'] = category.post_set.order_by('-pub_date').all()
return render(request, 'posts/index.html', context)
| fishisawesome/volrac_blog | categories/views.py | views.py | py | 471 | python | en | code | 0 | github-code | 13 |
72605040339 | N = input()
Tmap = []
for _ in range(int(N)):
Tmap.append(input().split(' '))
answer = []
def divide(x, y, n):
counter = 0
for i in range(y, y+n):
for j in range(x, x+n):
counter += int(Tmap[i][j])
if counter == 0 or counter == n*n:
answer.append('w' if counter == 0 else 'b')
return
divide(x, y, n//2)
divide(x+n//2, y, n//2)
divide(x, y+n//2, n//2)
divide(x+n//2, y+n//2, n//2)
divide(0, 0, len(Tmap))
print(answer.count('w'))
print(answer.count('b'))
| gitdog01/AlgoPratice | levels/level_19/2630/main.py | main.py | py | 530 | python | en | code | 0 | github-code | 13 |
32079616556 | from db import db, dbcursor
def create_drivers():
dbcursor = db.cursor()
dbcursor.execute("USE cars")
Name = input("Enter driver's Names: ")
Email = input("Enter driver's email address: ")
query = "INSERT INTO drivers (Name, Email) VALUES (%s,%s)"
values = (Name, Email)
dbcursor.execute(query, values)
db.commit()
print('User successfully created!!')
# print('unable to add name to database')
dbcursor.close()
create_drivers()
db.close() | ekmenjo/mysql-python | drivers.py | drivers.py | py | 496 | python | en | code | 0 | github-code | 13 |
21495561383 | from django.contrib import admin
from django.urls import path, include
from customer import views
urlpatterns = [
path("register", views.RegisterView.as_view(), name="register"),
path("login", views.LoginView.as_view(), name="login"),
path("home", views.HomeView.as_view(), name="home"),
path("products/<int:id>", views.ProductDetailView.as_view(), name="product-detail"),
path("products/<int:id>/cart/add", views.AddCartView.as_view(), name="add-to-cart"),
path("cart/all", views.MyCartView.as_view(), name="my-cart"),
path("cart/my cart/<int:cid>/<int:pid>", views.CheckOutView.as_view(), name="check-out"),
path("cart/remove/<int:id>", views.cartitem_remove, name="remove"),
path("logout", views.logout_view, name="logout")
]
| Sweethasgar/E-Commerce | customer/urls.py | urls.py | py | 766 | python | en | code | 0 | github-code | 13 |
21335525406 | # -*- coding: utf-8 -*-
import numpy as np
def bin_search(x, z, dx, dz, beta=0.5, precision=0.001):
"""
:array x: (n x 1) matrix
:array z: (n x 1) matrix
:array dx: (n x 1) matrix
:array dz: (n x 1) matrix
:float beta: N_2(beta)
:float precision: threshold
"""
n = x.shape[0]
th_low = 0
th_high = 1
if (dx < 0).sum() > 0 or (dz < 0).sum() > 0:
th_high = min(th_high,
np.min(-x[dx < 0] / dx[dx < 0]),
np.min(-z[dz < 0] / dz[dz < 0]))
x_low = x + th_low * dx
z_low = z + th_low * dz
x_high = x + th_high * dx
z_high = z + th_high * dz
mu_high = np.dot(x_high.T, z_high).item() / n
# from path of centers definition
if beta * mu_high >= np.linalg.norm(x_high * z_high - mu_high):
return th_high
while th_high - th_low > precision:
th_mid = (th_high + th_low) / 2
x_mid = x + th_mid * dx
z_mid = z + th_mid * dz
mu_mid = np.dot(x_mid.T, z_mid).item() / n
if beta * mu_mid >= np.linalg.norm(x_mid * z_mid - mu_mid):
th_low = th_mid
else:
th_high = th_mid
return th_low
if __name__ == '__main__':
x = np.array([[1 / 2], [1]])
dx = np.array([[-1 / 6], [-4 / 3]])
z = np.array([[1], [1]])
dz = np.array([[0.1], [-0.1]])
th = bin_search(x, z, dx, dz)
print(th)
| Greenwind1/misc_py | optimization/primal_dual_path_fm/bin_search.py | bin_search.py | py | 1,398 | python | en | code | 0 | github-code | 13 |
4860243717 | from pathops import (
Path,
PathPen,
OpenPathError,
OpBuilder,
PathOp,
PathVerb,
FillType,
bits2float,
float2bits,
ArcSize,
Direction,
simplify,
NumberOfPointsError,
)
from matplotlib.path import Path as MPath
def mpl2skia(mpl_path, transform=None):
if transform is not None:
mpl_path = transform.transform_path(mpl_path)
ci = iter(mpl_path.codes)
vi = iter(mpl_path.vertices)
path = Path()
pen = path.getPen()
for c in ci:
if c == MPath.MOVETO:
pen.moveTo(next(vi))
elif c == MPath.LINETO:
pen.lineTo(next(vi))
elif c == MPath.CURVE3:
pen.qCurveTo(next(vi), next(vi))
next(ci)
elif c == MPath.CURVE4:
pen.curveTo(next(vi), next(vi), next(vi))
next(ci)
next(ci)
elif c == MPath.CLOSEPOLY:
pen.closePath()
next(vi)
return path
def skia2mpl(skia_path):
codes = []
verts = []
for s, cc in skia_path.segments:
# print(s, cc)
if s == "moveTo":
codes.extend([MPath.MOVETO] * len(cc))
verts.extend(cc)
elif s == "lineTo":
codes.extend([MPath.LINETO] * len(cc))
verts.extend(cc)
elif s == "qCurveTo":
codes.extend([MPath.CURVE3] * len(cc))
# assert len(cc) == 2
verts.extend(cc)
elif s == "curveTo":
codes.extend([MPath.CURVE4] * len(cc))
verts.extend(cc)
elif s == "closePath":
codes.append(MPath.CLOSEPOLY)
verts.extend([(0, 0)])
p = MPath(verts, codes=codes)
return p
# Path([[0, 0], [1, 0], [1, 1], [0, 1]],
# [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]),
def union(path1, path2):
builder = OpBuilder(fix_winding=True, keep_starting_points=False)
builder.add(path1, PathOp.UNION)
builder.add(path2, PathOp.UNION)
result = builder.resolve()
return result
def intersection(path1, path2):
builder = OpBuilder(fix_winding=False, keep_starting_points=False)
builder.add(path1, PathOp.UNION)
builder.add(path2, PathOp.INTERSECTION)
result = builder.resolve()
return result
def difference(path1, path2):
builder = OpBuilder(fix_winding=False, keep_starting_points=False)
builder.add(path1, PathOp.UNION)
builder.add(path2, PathOp.DIFFERENCE)
result = builder.resolve()
return result
def xor(path1, path2):
builder = OpBuilder(fix_winding=False, keep_starting_points=False)
builder.add(path1, PathOp.UNION)
builder.add(path2, PathOp.XOR)
result = builder.resolve()
return result
| leejjoon/mpl-speech-bubble | mpl_speech_bubble/mpl_pathops.py | mpl_pathops.py | py | 2,725 | python | en | code | 0 | github-code | 13 |
26473990218 | from optimization.src.Solution import Solution
from optimization.src.Strategy import Strategy
from optimization.src.TSPOptimizerClosestCityStrategy import TSPOptimizerClosestCityStrategy
class RatioHeuristicStrategy(Strategy):
def __init__(self, origin_city, possible_trip_cities, required_cities, max_trip_time,
max_execution_time):
Strategy.__init__(self, origin_city, possible_trip_cities, required_cities, max_trip_time,
max_execution_time)
def solve(self):
# Add all required cities
solution = Solution(self.origin_city, list(self.required_cities), self.required_cities,
self.max_trip_time)
solution.update_fitness()
if not solution.is_valid():
raise Exception()
# Compute ratios
ratios = []
for city in self.possible_trip_cities:
if city in self.required_cities:
continue
ratios.append([city, city.value / city.stay_time])
ratios.sort(reverse=True, key=lambda x: x[1])
# Add city until knapsack is full
for r in ratios:
city = r[0]
backup = list(solution.cities)
solution.cities.append(city)
solution.update_fitness()
tsp_optimizer = TSPOptimizerClosestCityStrategy(self.origin_city, solution.cities)
solution.cities = tsp_optimizer.optimize()
if not solution.is_valid_total_trip_time():
solution.cities = backup
solution.update_fitness()
break
self.improve_solution(solution)
return solution
| marianoo-andres/EasyTripServer | optimization/src/RatioHeuristicStrategy.py | RatioHeuristicStrategy.py | py | 1,671 | python | en | code | 0 | github-code | 13 |
18634372573 | import math
import pandas as pd
import numpy as np
from read_data import *
import matplotlib.pyplot as plt
def scatterplot(regiondata,inputdata,LOB='LOB1'):
""" Scatter plot of loss for one region against one predictor input
Arguments: regiondata is a pandas dataframe from output of getLOBdata for one region
inputdata is a pandas dataframe from output of predictor for one predictor
LOB is string LOB1, LOB2, LOB3.... of what you want to plot
returns figure """
inputname = list(inputdata)[1]
# Add column to region data
regiondata[inputname] = pd.Series()
for AIR_ID in inputdata['AIRSID']:
if AIR_ID in list(regiondata['AIRSID']):
# Add inputdata[inputname] to regiondata[inputname]
regiondata[inputname] = inputdata[inputname]
plt.plot(regiondata[inputname],regiondata[LOB],'ko')
plt.xlabel(inputname)
plt.ylabel(LOB)
inputfiles=['GEM_HistoricalFreq','Global_NightLights','Global475yrPGA','GlobalAverageSoil','GlobalGDP_Stats','GlobalMIPC_Stats','GlobalPopCounts','GlobalSeismicBudget','USGS_HistoricalFreq']
for inputfile in inputfiles:
regiondata = getLOBdata('../AIR_data/Correct_loss_data/Region_1_DR.csv')
regiondata = regiondata.sort_values('AIRSID')
inputdata = predictor(inputfile+'.csv')
fig = plt.figure()
ax1 = plt.subplot2grid((3,1),(0,0))
ax2 = plt.subplot2grid((3,1),(1,0))
ax3 = plt.subplot2grid((3,1),(2,0))
axes = [ax1,ax2,ax3]
LOBS = ['LOB1','LOB2','LOB3']
for ax,LOB in zip(axes,LOBS):
plt.sca(ax)
scatterplot(regiondata,inputdata,LOB)
plt.savefig('../plots/Region1_%s.png'%inputfile)
| lm2612/mpe-cdt-teamA | scatterplotdata.py | scatterplotdata.py | py | 1,579 | python | en | code | 0 | github-code | 13 |
27802968572 | #20c10 하면 20만 시간복잡도는 충분하다
from itertools import combinations,permutations
N = int(input())
arr = [list(map(int, input().split())) for _ in range(N)]
min_val = 1e9
for comb in list(combinations(range(N), N//2)):
sum_1 = 0
sum_2 = 0
for i, j in permutations(comb, 2):
sum_1 += arr[i][j]
for i, j in permutations(set(range(N)) - set(comb), 2):
sum_2 += arr[i][j]
min_val = min(min_val, abs(sum_1-sum_2))
print(min_val)
| tkdgns8234/DataStructure-Algorithm | Algorithm/백준/판교가는길/완전탐색&백트래킹/스타트와_링크.py | 스타트와_링크.py | py | 480 | python | en | code | 0 | github-code | 13 |
72301403538 | import diffcp
from py_utils.random_program import random_cone_prog
from py_utils.loaders import save_cone_program, save_derivative_and_adjoint, load_derivative_and_adjoint
import numpy as np
np.set_printoptions(precision=5, suppress=True)
# We generate a random cone program with a cone
# defined as a product of a 3-d fixed cone, 3-d positive orthant cone,
# and a 5-d second order cone.
K = {
'f': 3, # ZERO
'l': 3, # POS
'q': [5] # SOC
}
m = 3 + 3 + 5
n = 5
np.random.seed(11)
program = random_cone_prog(m, n, K)
A, b, c = program["A"], program["b"], program["c"]
# We solve the cone program and get the derivative and its adjoint
x, y, s, derivative, adjoint_derivative = diffcp.solve_and_derivative(
A, b, c, K, solve_method="SCS", verbose=False)
save_cone_program("test_programs/scs_test_program.txt", program=dict(A=A, b=b, c=c, x_star=x, y_star=y, s_star=s), dense=False)
print("x =", x)
print("y =", y)
print("s =", s)
dx, dy, ds = derivative(A, b, c)
# We evaluate the gradient of the objective with respect to A, b and c.
dA, db, dc = adjoint_derivative(c, np.zeros(
m), np.zeros(m), atol=1e-10, btol=1e-10)
forward_sensitivities = np.ones(A.shape), np.ones(b.shape), np.ones(c.shape)
backward_sensitivities = np.ones(c.shape), np.zeros(m), np.zeros(m)
save_derivative_and_adjoint("test_programs/scs_test_derivatives.txt", derivative, adjoint_derivative, forward_sensitivities, backward_sensitivities)
# The gradient of the objective with respect to b should be
# equal to minus the dual variable y (see, e.g., page 268 of Convex Optimization by
# Boyd & Vandenberghe).
print("db =", db)
print("-y =", -y)
d = load_derivative_and_adjoint("test_programs/scs_test_derivatives.txt")
| csquires/ConeProgramDiff-benchmarking | diffcp_examples/ecos_example.py | ecos_example.py | py | 1,725 | python | en | code | 0 | github-code | 13 |
7704563530 | class FitnessTrace:
"""
Trace / log the fitness at regular intervals during optimization.
This is used for plotting the optimization progress afterwards.
"""
def __init__(self, trace_len, max_evaluations):
"""
Create the object instance.
:param trace_len: Max length of fitness-trace. Zero if no fitness-trace is wanted.
:param max_evaluations: Number of optimization iterations that will be performed.
:return: Object instance.
"""
# The length of the fitness trace cannot be greater than
# the number of optimization iterations.
self.max_len = min(trace_len, max_evaluations)
# Initialize empty arrays for the fitness-trace and iteration counter.
# See performance note below.
self.iteration = [] # Plot this as the x-axis.
self.fitness = [] # Plot this as the y-axis.
# Initialize the iteration counter used to decide when to trace/log the fitness.
self.next_iteration = 0
if self.max_len > 0:
# The iteration interval between each trace of the fitness.
self.interval = max_evaluations // self.max_len
def trace(self, iteration, fitness):
"""
Trace the fitness at regular intervals.
:param iteration: The number of optimization iterations performed so far.
:param fitness: The best fitness found so far.
:return: Nothing.
"""
# Note:
# Appending to a list is by far the most elegant way of implementing this.
# The performance overhead of extending the list is tiny in comparison to
# the total runtime of the optimization. Using a pre-allocated array instead
# requires math with special cases and makes the implementation error-prone.
# Proceed if a trace is desired.
if self.max_len > 0:
# Trace the fitness at regular intervals.
if iteration >= self.next_iteration:
# Append the fitness to the trace-array.
self.fitness.append(fitness)
# Append the iteration counter to the array.
self.iteration.append(iteration)
# Increment the counter for the next trace.
self.next_iteration = iteration + self.interval
########################################################################
| Hvass-Labs/swarmops | swarmops/FitnessTrace.py | FitnessTrace.py | py | 2,418 | python | en | code | 70 | github-code | 13 |
29507107782 | k = int(input())
encoding_map = {}
for i in range(k):
character, encoding = input().split()
encoding_map[encoding] = character
sequence = input()
message = str()
n = len(sequence)
ptr = 0
while ptr < n:
current_encoding = str()
for j in range(ptr, n):
current_encoding += sequence[j]
if current_encoding in encoding_map:
message += encoding_map[current_encoding]
ptr = j
break
ptr += 1
print(message) | galacticglum/contest-solutions | CCC/S2_2010.py | S2_2010.py | py | 485 | python | en | code | 0 | github-code | 13 |
17333690224 | import logging
from aac.io.parser import parse
from aac.lang.active_context_lifecycle_manager import get_active_context
from aac.lang.constants import DEFINITION_NAME_SCHEMA, PRIMITIVE_TYPE_STRING, ROOT_KEY_VALIDATION
from aac.lang.definitions.collections import get_definition_by_name, get_definitions_by_root_key
from aac.plugins.contributions.contribution_types import DefinitionValidationContribution
from aac.plugins.validators.root_keys import _get_plugin_definitions, _get_plugin_validations, validate_root_keys
from tests.active_context_test_case import ActiveContextTestCase
from tests.helpers.assertion import assert_definitions_equal, assert_validator_result_failure, assert_validator_result_success
from tests.helpers.parsed_definitions import create_field_entry, create_schema_definition
class TestRootKeysValidator(ActiveContextTestCase):
def setUp(self) -> None:
super().setUp()
logging.disable() # Hide the error messages generated by these tests from the console.
def test_module_register_validators(self):
actual_validator_plugins = _get_plugin_validations()
validation_definitions = get_definitions_by_root_key(ROOT_KEY_VALIDATION, _get_plugin_definitions())
self.assertEqual(1, len(validation_definitions))
validation_definition = validation_definitions[0]
expected_definition_validation = DefinitionValidationContribution(
name=validation_definition.name, definition=validation_definition, validation_function=(lambda x: x)
)
self.assertEqual(expected_definition_validation.name, actual_validator_plugins[0].name)
assert_definitions_equal(expected_definition_validation.definition, actual_validator_plugins[0].definition)
def test_validate_root_keys_valid_key(self):
test_primitive_reference_field = create_field_entry("ValidPrimitiveField", PRIMITIVE_TYPE_STRING)
test_definition = create_schema_definition("TestData", fields=[test_primitive_reference_field])
test_active_context = get_active_context()
test_active_context.add_definition_to_context(test_definition)
target_schema_definition = get_definition_by_name(DEFINITION_NAME_SCHEMA, test_active_context.definitions)
actual_result = validate_root_keys(test_definition, target_schema_definition, test_active_context)
assert_validator_result_success(actual_result)
def test_validate_root_keys_invalid_key(self):
fake_root_key = "not_a_root_key"
test_definition = create_schema_definition("Test")
original_root_key = test_definition.get_root_key()
test_definition.structure[fake_root_key] = test_definition.structure[original_root_key]
del test_definition.structure[original_root_key]
# We need to re-parse to make sure lexemes are up-to-date
test_definition, *_ = parse(test_definition.to_yaml())
test_active_context = get_active_context()
test_active_context.add_definition_to_context(test_definition)
target_schema_definition = test_active_context.get_definition_by_name(original_root_key)
actual_result = validate_root_keys(test_definition, target_schema_definition, test_active_context)
assert_validator_result_failure(actual_result, "root", "key", fake_root_key)
def test_validate_root_keys_valid_extended_root_key(self):
fake_extended_root_key = "extended_root_key"
test_definition = create_schema_definition(name="Test", root=fake_extended_root_key)
test_definition.structure[fake_extended_root_key] = test_definition.structure[test_definition.get_root_key()]
del test_definition.structure[test_definition.get_root_key()]
test_active_context = get_active_context()
test_active_context.add_definitions_to_context([test_definition])
target_schema_definition = get_definition_by_name(DEFINITION_NAME_SCHEMA, test_active_context.definitions)
actual_result = validate_root_keys(test_definition, target_schema_definition, test_active_context)
assert_validator_result_success(actual_result)
| jondavid-black/AaC | python/tests/plugins/validators/test__validate_root_keys.py | test__validate_root_keys.py | py | 4,124 | python | en | code | 14 | github-code | 13 |
31112021052 | #Abrir uma imagem colorida, transformar para tom de cinza e aplique a técnica Crescimento de Regiões (Region Growing). Para isto, pegue uma imagem qualquer real, com tanto que a mesma possua um objeto se destaque do fundo. Inicialize a semente com um clique neste objeto, conforme o Tópico 21 e encontre uma regra de adesão que seja capaz de segmentar este objeto. Aplique o Crescimento de Regiões de forma iterativa, em que o algoritmo irá estabilizar apenas quando a região parar de crescer.
import cv2
import numpy as np
#declara logo a semente, uma vez que iremos usar dentro de todas as funções que iremos criar
s = (0,0)
#vamos iniciar a função Crescimento de Regiões ele irá receber duas coisas fundamentais para o crescimento: a imagem que é nela que vai crescer a semente e a semente em si que será introduzida com valor vazio
def CR(imcolor, s=None):
#pega a forma da matriz/imagem
ls, cs = imcolor.shape[:2] #linhas e colunas recebem o shape de imcolor (que é a variável que armazena nossa imagem no computador)
#pega o ponto da semente
xc, yc = s #semente recebe as coordenas xc, yc
#criar a matriz que irá conter a região segmentada. Matriz de zeros com a mesma forma e tipo de a. LOgicamente pra ter a mesma forma e tipo de a, a função tem que receber a(no nosso caso, imcolor)
matrizzero = np.zeros_like(imcolor)
#marcar o ponto, semente
matrizzero[xc, yc] = 255
#cria variáveis para começar o loop afim de fazer a região crescer
cr = 0
pp = 1
#vamos iniciar a pesquisa de vizinhança
while pp != cr:
pp = cr
cr = 0
for l in range(ls):
for c in range(cs):
if matrizzero[l, c] == 255:
if 130 < imcolor[l-1, c-1] < 230:
matrizzero[l-1, c-1] = 255
cr+=1
if 130 < imcolor[l-1, c] < 230:
matrizzero[l-1, c] = 255
cr+=1
if 130 < imcolor[l-1, c+1] < 230:
matrizzero[l-1, c+1] = 255
cr+=1
if 130 < imcolor[l, c-1] < 230:
matrizzero[l, c-1] = 255
cr+=1
if 130 < imcolor[l, c+1] < 230:
matrizzero[l, c+1] = 255
cr+=1
if 130 < imcolor[l+1, c-1] < 230:
matrizzero[l+1, c-1] = 255
cr+=1
if 130 < imcolor[l+1, c] < 230:
matrizzero[l+1, c] = 255
cr+=1
if 130 < imcolor[l+1, c+1] < 230:
matrizzero[l+1, c+1] = 255
cr+=1
return matrizzero
#o clique. Vamos verificar se demos um clique esquerdo com o mouse
def clique(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
#se o evento que ocorreu foi este, vamos retornar a nossa semente como variável global.
global s
#agora atualiza o que a nossa semente deve receber, ou seja, quais coordenadas. Lembre-se de que deve ser a do clique, uma vez que o evento que ocorreu foi verdadeiro e é o que está dentro da função
s = (x, y)
#vamos agora à parte das exibições na tela
if __name__ == '__main__':
#armazena a imagem na variável imcolor
imcolor = cv2.imread('23.jpg')
#transoforma a imagem em tom de cinza
imcinza = cv2.cvtColor(imcolor, cv2.COLOR_BGR2GRAY)
#CRIAR A JANELINHA PARA EXIBIR A IMAGEM ORIGINAL E ESPERAR PELO CLIQUE (observe que não é aqui ainda que iremos mostrar a imagem segmentada!!! cria a janela, mostra a original e espera pelo clique)
cv2.namedWindow('Original', 1)
cv2.imshow('Original', imcinza)
cv2.setMouseCallback('Original', clique)
cv2.waitKey(0)
#vamoos armazena a imagem segmentada em uma variável a fim de que possamos exibi-la. Lembre-se de que a CR sempre vai receber a imagem e a semente
imseg = CR(imcinza, s)
#exibição do resultado final
cv2.imshow('final', imseg)
cv2.waitKey(0)
| VivianeSouza923/ComputerVisionPy_Lapisco | 23/QUESTÃO23.py | QUESTÃO23.py | py | 4,185 | python | pt | code | 0 | github-code | 13 |
2000480594 | import json
import mysql.connector
from typing import Optional
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from fastapi.params import Body
from mysql.connector.utils import NUMERIC_TYPES
from pydantic import BaseModel
app = FastAPI()
origins = [
"http://localhost:3000",
"http://localhost",
"http://localhost:8000",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class Bus(BaseModel):
num_bus: int
immatriculation: str
marque: str
genre: str
model: str
places_assises: int
placesdebouts: int
@app.post("/createacc")
async def account(request: Request):
mydb = mysql.connector.connect(
host="localhost", user="root", database="parc")
cursor = mydb.cursor()
body = json.loads(await request.body())
cursor.execute(f"SELECT * FROM utilisateur WHERE email='{body['email']}'")
res = cursor.fetchall()
if (len(res) == 0):
cursor.execute(
f"INSERT INTO utilisateur (matriculeUtilisateur,Nom,motDePasse,Prenom,Email) VALUES('','{body['Nom']}','{body['Psw']}','{body['Prenom']}','{body['email']}');")
mydb.commit()
data = "Success"
else:
data = "Fail"
return data
@app.post("/bus/add")
def add_bus(new_bus: Bus):
mydb = mysql.connector.connect(host="localhost", user="root", database="parc")
mycursor = mydb.cursor()
mycursor.execute(
f"INSERT INTO bus ( immatriculation, marque,genre,model,places_assises,placesdebouts) VALUES ('{new_bus.immatriculation}','{new_bus.marque}','{new_bus.genre}','{new_bus.model}','{new_bus.places_assises}','{new_bus.placesdebouts}')")
mydb.commit()
return "bus Added"
@app.delete("/bus/delete/num}")
def delete_bus (num: int):
mydb = mysql.connector.connect(host="localhost", user="root", database="parc")
mycursor = mydb.cursor()
mycursor.execute(f"delete from bus where num_bus = {num}")
mydb.commit()
return "Bus_deleted"
@app.post("/signin")
async def sign(request: Request):
mydb = mysql.connector.connect(host="localhost", user="root", database="parc")
cursor = mydb.cursor()
body = json.loads(await request.body())
cursor.execute(
f"SELECT * FROM utilisateur WHERE Email='{body['mail']}' AND motDePasse ='{body['pass']}'")
result = cursor.fetchall()
row_headers = [x[0] for x in cursor.description]
data = []
for res in result:
data.append(dict(zip(row_headers, res)))
if (len(result) == 0):
return "Failed"
else:
print(data)
return data
@app.get("/getOneBus/{num}")
def getOne(num: int):
mydb = mysql.connector.connect(
host="localhost", user="root", database="parc")
cursor = mydb.cursor()
cursor.execute(
f"SELECT num_bus,immatriculation,marque,genre,model,places_assises,placesdebouts FROM bus Where num_bus={num}")
result = cursor.fetchall()
return result
@app.get("/Bus/")
def fetchbus():
mydb = mysql.connector.connect(
host="localhost", user="root", database="parc")
cursor = mydb.cursor()
cursor.execute(
f"SELECT num_bus,immatriculation,marque,genre,model,places_assises,placesdebouts FROM bus;")
result = cursor.fetchall()
return result
@app.post("/bus/update/{nb}")
def update_user (nb: int, new_bus: Bus):
mydb = mysql.connector.connect(host="localhost", user="root", database="parc")
mycursor = mydb.cursor()
mycursor.execute(f"update bus set immatriculation='{new_bus.immatriculation}', marque='{new_bus.marque}', genre='{new_bus.genre}',model='{new_bus.model}', places_assises='{new_bus.places_assises}', placesdebouts='{new_bus.placesdebouts}' where num_bus = '{nb}'")
mydb.commit()
return "User updated" | amalmekni/reactproject | src/back/main.py | main.py | py | 3,984 | python | en | code | 0 | github-code | 13 |
655657946 | # Use this python3 script to create sym links to files at paths in filesCropSubTOM.csv
# ####.rec ordered by subTOM index (ordered 0001-#### in ascending order of Dynamo index)
# Michael Wozny 2020
import numpy as np
import os, shutil, csv
# path to filesCropSubTOM.csv
srcFile = 'filesCropSubTOM.csv'
srcDir = os.getcwd()
# read in output.txt as dataList array, keep header separate
fileObj = open(srcFile)
dataList= []
header = fileObj.readline().splitlines()
# remove \t from header and fileObj
for k in header:
header = k.split('\t')
for line in fileObj:
data = line.split('\t')
dataList.append(data)
# dataList as array, strip \n
dataList = np.asarray(dataList)
dataList = np.char.strip(dataList)
# create sym link named by indexSubTOM to volumePath
for k in range(len(dataList)):
volumePath = dataList[k,0]
indexSubTOM = dataList[k,6]
tomoSymLnk = str(indexSubTOM).zfill(4) + '.rec'
# skip any indices == 0, these were not used for cropping
if int(indexSubTOM) == 0:
continue
tomoSymLnk = srcDir + '/' + tomoSymLnk
os.symlink(volumePath,tomoSymLnk) | mwozn/DYNAMO_dipoles_to_MOTL | symlink_tomos_by_subTOM_idx.py | symlink_tomos_by_subTOM_idx.py | py | 1,109 | python | en | code | 0 | github-code | 13 |
3720770376 | # -*- coding: utf-8 -*-
import time
def reconnector(func):
def decorated_func(*args, **kwargs):
result = func(*args, **kwargs)
if not result:
for _ in range(10):
time.sleep(5)
result = func(*args, **kwargs)
if result:
break
if not result:
print("Ошибка подключения")
raise ValueError("Не удалось получить данные")
return result
return decorated_func
| BloodyPhoenix/Pokemon_Go_scrapper | reconnector.py | reconnector.py | py | 537 | python | ru | code | 0 | github-code | 13 |
5764811 | import json
from datetime import date, datetime, timedelta
import google.oauth2.credentials
from apiclient.discovery import build
from dateutil.parser import parse
from django.shortcuts import get_object_or_404
from accounts.models import CustomUser
def build_service(user_id):
user = get_object_or_404(CustomUser, pk=user_id)
credentials = user.google_credentials
if credentials:
credentials = json.loads(credentials)
credentials = google.oauth2.credentials.Credentials.from_authorized_user_info(
credentials
)
service = build("calendar", "v3", credentials=credentials)
return service
else:
return False
def get_events(user_id):
service = build_service(user_id)
now = datetime.utcnow().isoformat() + "Z" # 'Z' indicates UTC time
events_result = (
service.events()
.list(
calendarId="primary",
timeMin=now,
maxResults=10,
singleEvents=True,
orderBy="startTime",
)
.execute()
)
events = events_result.get("items", [])
if events:
events_simplified = []
for event in events:
event_simple = {}
start = event["start"].get("dateTime", event["start"].get("date"))
start = parse(start)
event_simple["date"] = start.strftime("%Y-%m-%d")
event_simple["weekday"] = start.strftime("%A")
event_simple["month"] = start.strftime("%B")
event_simple["time"] = start.strftime("%I:%M %p")
if event_simple["time"] == "12:00 AM":
event_simple["time"] = ""
event_simple["summary"] = event["summary"]
today = date.today()
soon = today + timedelta(days=3)
pydate = date.fromisoformat(event_simple["date"])
if pydate <= soon:
event_simple["soon"] = "soon"
else:
event_simple["soon"] = ""
events_simplified.append(event_simple)
events_simplified = [
i
for i in events_simplified
if not (i["summary"] == "Change water fountain filter")
]
return events_simplified
else:
return None
| jamescrg/minhome | apps/home/google.py | google.py | py | 2,282 | python | en | code | 0 | github-code | 13 |
27551138936 | #!/usr/bin/python3
"""Module is an introduction to networking with urllib in Python."""
import sys
import urllib.request as request
def url_fetch():
"""Prints response from POST request with parameters to a given url."""
if len(sys.argv) < 3:
return
url = sys.argv[1]
header = {'email': sys.argv[2]}
my_request = request.Request(url, header)
with request.urlopen(my_request) as html:
return html.read().decode('utf-8')
if __name__ == '__main__':
my_str = url_fetch()
if my_str:
print(my_str)
| adobki/alx-higher_level_programming | 0x11-python-network_1/2-post_email.py | 2-post_email.py | py | 554 | python | en | code | 0 | github-code | 13 |
2376693150 | #!/usr/bin/python3
# tutorialspoint.com/python3/os_pipe.htm
# cython > how to use Cyton to compile Python 3 into C
import os, sys, time
def main():
print("The child will write text to a pipe and \n the parent will read the text written by child.")
# File descriptors r, w for reading and writing
r,w = os.pipe()
pid = os.fork()
if pid: # Parent
# Closes file descriptor w
os.close(w)
r = os.fdopen(r, 'r')
print("Parent reading")
str = r.read()
print("text =", str)
sys.exit(0)
else:
os.close(r)
w = os.fdopen(w, 'w')
print("Child writing")
w.write("El que tinc ganes d'escirure")
w.close()
print("Child closing")
sys.exit(0)
| Compilador-Text2Text/1erPrototip | experiments/os_pipe.py | os_pipe.py | py | 766 | python | en | code | 0 | github-code | 13 |
23695986766 | # Imports
import pygame
from pygame.locals import *
from sys import exit
# Init
pygame.init()
# Screen Settings
screenWidth = 1280
screenHeight = 720
screen = pygame.display.set_mode((screenWidth, screenHeight)) # TELA
pygame.display.set_caption('Jogo Teste')
# Game Loop
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit()
pygame.draw.rect(screen, (255, 0, 0), (200, 300, 40, 50))
pygame.draw.circle(screen, (255, 0, 0), (300, 260), 40)
pygame.display.update() | iuritorres/estudos | Python/POO/games/pygame outro/main.py | main.py | py | 564 | python | en | code | 1 | github-code | 13 |
73054982417 | import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.linear_model import LogisticRegression, LinearRegression
import warnings
warnings.filterwarnings('ignore')
class MetaClean:
'''
This class will contain all the functions required for one stop data cleaning.
NOTE:
All the columns have to be converted to the desired datatype before using any function from MAHA. The function does not change any dataype of any column.
'''
''' Constructor '''
def __init__(self):
None
''' Model Finding'''
def model_calc(self, df):
'''
Calculates which model to be used for all the columns in the dataframe.
This creates a model on the input dataset. If the variable/column is of the datatype float it performs Linear Regression on the variable/column. If the variable/column is of the datatype int/category and has 2 unique values it performs Logistic Regression on the variable/column.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe on whose columns the models have to be built.
'''
modes = []
temp = list(df.columns)
print(temp)
for i in temp:
y = df[i]
x = df.drop(i, axis = 1)
if df[i].dtype == 'int64' or df[i].dtype == 'float64':
lr = LinearRegression(normalize = True)
lr.fit(x, y)
modes.append(lr)
elif df[i].dtype == 'category' or df[i].dtype == 'object':
lg = LogisticRegression()
lg.fit(x, y)
modes.append(lg)
return models
''' Splitting DataFrame '''
def splitDataFrame(self, df):
'''
This function splits the dataframe into clean and unclean parts wrt NA values.
This function checks for NA or Null values present in a dataframe and assigns those rows to a separate dataframe. The same is done for the case where NA or Null values are not present in a dataframe, The not-Null values are stored in another datadrame.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe which is to be split into clean and unclean dataframes.
'''
unclean = df[df.isnull().any(axis = 1) == True]
clean = df[df.isnull().any(axis = 1) == False]
return clean, unclean
''' Finding Index Column '''
def indexColDetector(self, df):
'''
This function detects the index colum and drops it.
This function checks for the Sum of rows (as range(rows + 1)) in a dataframe and the sum of the values of a column if they are similar. If yes, the column is dropped.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe from which index column are to be detected and dropped.
'''
t1 = sum(range(df.shape[0] + 1))
t2 = sum(range(df.shape[0]))
for i in list(df.columns):
if df[i].dtype == 'int64' or df[i].dtype == 'float64':
vals = list(df[i])
s = sum(vals)
if t1 == s or t2 == s: df.drop(i, axis = 1, inplace = True)
return df
''' Dropping Unnecesary Columns '''
def dropColumns(self, df, obj_drop = 0.7, drop_cols = 0.6):
'''
This function determines which columns are to be dropped.
This function checks if there are over 70% (of rows) unique values and/or over 60% Null values and/or only 1 unique value in an entire dataframe.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe from which columns are to be dropped.
:type obj_drop: float
:param obj_drop: The ratio of unique object values to number of rows above which a column has to be dropped.
:type drop_cols: float
:param drop_cols: The ratio of Null values to number of rows above which a column has to be dropped.
'''
dummy = df.copy()
dummy = self.indexColDetector(dummy)
print(dummy.columns)
for i in list(dummy.columns):
num = dummy[i].nunique()
if num == 1 or (num >= obj_drop * df.shape[0] and df[i].dtype == 'object'):
dummy = dummy.drop(i, axis = 1)
for i in list(dummy.columns):
c = dummy[i].isnull().sum()
if c/df.shape[0] > drop_cols:
dummy.drop(i, axis = 1, inplace = True)
return dummy
''' Label Encoding Appropriate Columns '''
def label(self, df):
'''
This function automatically label encodes appropriate columns.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe from which columns are to be dropped.'''
le = LabelEncoder()
for col in list(df.columns):
if df[col].dtypes == 'object':
index = ~df[col].isna()
df.loc[index, col] = le.fit_transform(df.loc[index, col])
df[col]=df[col].astype('category')
return df
''' Replace Mean and Mode of Columns'''
def replaceMeanMode(self, df):
'''
This function replaces the NA values with Mean or Mode, depending on the type of the variable passed on.
This function checks the datatype if it is float or int/object to replace with mean and mode respectively.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe where NA/Null values are to be replaced.
'''
for i in list(df.columns):
check = df[i].dtypes
if check == 'int64' or check == 'float64':
a = df[i].astype('float').mean(axis = 0)
df[i].fillna(a, inplace = True)
elif check == 'object' or check == 'category':
df[i].fillna(df[i].mode()[0], inplace = True)
return df
''' Finding Mean and Mode of Columns'''
def findMeanMode(self, df):
'''
This function finds the mean and mode of the variables/columns of the dataframe
:type df: pandas.core.frame.DataFrame
:param df: The dataframe where mean/mode of columns are to be found.
'''
meanMode = []
for i in list(df.columns):
check = df[i].dtypes
if check == 'int64' or check == 'float64':
a = df[i].astype('float').mean(axis = 0)
meanMode.append(a)
elif check == 'object' or check == 'category':
meanMode.append(df[i].mode()[0])
return meanMode
''' The Main Function '''
def MAHA(self, df, obj_drop = 0.7, drop_cols = 0.6, scale = False):
'''
This function is the main function which calls all the functions to provide one line cleaning of a dataset.
:type df: pandas.core.frame.DataFrame
:param df: The dataframe which is to be cleaned.
:type obj_drop: float
:param obj_drop: The ratio of unique object values to number of rows above which a column has to be dropped.
:type drop_cols: float
:param drop_cols: The ratio of Null values to number of rows above which a column has to be dropped.
:type scale: boolean
:param scale: If the dataset has to be scaled or not.
'''
df = self.dropColumns(df, obj_drop, drop_cols)
cols = list(df.columns)
df = self.label(df)
meanMode = self.findMeanMode(df)
cl, ucl = self.splitDataFrame(df)
models = self.model_calc(cl)
newFrame = pd.DataFrame()
for i, n in enumerate(cols):
dummy = ucl.copy()
c = cols[0:i]
p = meanMode[0:i]
for j, k in zip(p, c):
dummy[k].fillna(j, inplace = True)
c = cols[i+1:]
p = meanMode[i+1:]
for j, k in zip(p, c):
dummy[k].fillna(j, inplace = True)
_, xy = self.splitDataFrame(dummy)
z = xy[xy == 'NaN'].index
if not xy.empty:
x = xy.drop(n, axis = 1)
y = xy[n]
pred = models[i].predict(x)
k = 0
for f in range(df.shape[0]):
for g in range(z.shape[0]):
if f == z[g]:
df.iloc[f, i] = pred[k]
k = k + 1
if scale:
for i in list(df.columns):
if df[i].dtypes == 'int64' or df[i].dtypes == 'float64':
sc = StandardScaler()
temp = df[[i]]
temp = sc.fit_transform(temp)
df[i] = temp
return df
if __name__ == '__main__':
None
| FlintyTub49/MAHA | MAHA/MAHA.py | MAHA.py | py | 9,004 | python | en | code | 1 | github-code | 13 |
11322912226 | import pytest
from rdopkg.cli import rdopkg, rdopkg_runner
from rdopkg import exception
import actions
def test_actions_availability():
r = rdopkg('actions')
assert r == 0, "Some action functions are NOT AVAILABLE"
def test_actions_continue_short():
r = rdopkg('-c')
assert r == 1, "-c succeeded with no state"
def test_actions_continue_long():
r = rdopkg('--continue')
assert r == 1, "--continue succeeded with no state"
def test_duplicate_action():
runner = rdopkg_runner()
aman = runner.action_manager
with pytest.raises(exception.DuplicateAction):
aman.add_actions_modules(actions, override=False)
def test_action_override():
runner = rdopkg_runner()
aman = runner.action_manager
aman.add_actions_modules(actions, override=True)
found = False
for action in aman.actions:
if action.name == 'clone':
# make sure clone action is overridden with the custom one
assert action.module == 'foomod'
found = True
break
assert found, "clone action not found ?!"
| softwarefactory-project/rdopkg | tests/test_actions.py | test_actions.py | py | 1,091 | python | en | code | 28 | github-code | 13 |
6767066248 |
import numpy as np
import random
#we are going to have 1 input the map which will be lots of 0s and 1s
#if we visit a 1 then we have an island, unless a 1 connected to that 1
#adjacently has already been visited, ignore diagonals
# 10010
#e.g area=[[1,0,0],[1,1,0],[0,0,1]] => 11001
# 00110
# 11100
#given a map how many islands and what is the largest one?
#first i am going to loop over the rows and ignore all 0s and all visited
#if i find a 1, i am going to set visited=True
#after this, i need to find all 1s (if any) connected to this
#so take a step in all 4 quadrants, ignore 0s
#if a 1 is found set visited =True
#do this repeatedly until no more 1s are found, at which point count 1
#continue with the loop over the rows
def check_island(area,visited,i,j):
inew=jnew=0
if i<0 or i>=len(area) or j<0 or j>=len(area[0]):
return 0
if visited[i][j] ==1:
return 1
if area[i][j]==0:
return 0
if visited[i][j] ==0 and area[i][j]==1:
visited[i][j]=1
for step in ([-1,0],[0,1],[1,0],[0,-1]):
inew=i+step[0]
jnew=j+step[1]
check_island(area,visited,inew,jnew)
return visited[i][j]
def island_problem(visited,area):
count=0
#loop over the row
for i in range (0,len(area)):
#loop over the rows
for j in range (0,len(area[0])):
before=visited[i][j]
visited[i][j]=check_island(area,visited,i,j)
after=visited[i][j]
print(i+j)
if before!=after:
count+=1
print(count)
n=20 ;m=20
visited=np.zeros((n,m))
area=[]
for i in range(0,n):
D=[]
for j in range(0,m):
D.append(0)
print(D)
area.append(D)
print('\n')
print(visited)
#visited=[[0,0,0,0],[0,0,0,0],[0,0,0,0],[0,0,0,0]]
#area=[[1,0,0,1],[1,1,0,0],[0,0,1,1],[1,0,1,0]]
island_problem(visited,area)
| JordanBarton/carrot47 | island_problem.py | island_problem.py | py | 2,199 | python | en | code | 0 | github-code | 13 |
32403506005 | import nltk
dwords = [r'\bgross\b', r'\bdisgusting\b', r'\brevolting\b', r'\brepulsive\b', r'\bicky\b', r'\byucky\b', r'\bnasty\b', r'\bvile\b', r'\brepugnant\b', r'\brepellent\b', r'\bnauseating\b', r'\bheinous\b']
twords = ['gross', 'disgusting', 'revolting', 'repulsive', 'icky', 'yucky', 'nasty', 'vile', 'repugnant', 'repellent', 'nauseating', 'heinous']
def createCorpus(fildir):
my_sent_tokenizer = nltk.RegexpTokenizer('[^.!?]+')
# Create the new corpus reader object.
corpus = nltk.corpus.PlaintextCorpusReader(
fildir, '.*\.txt', sent_tokenizer=my_sent_tokenizer)
return corpus
def linelist(filename, rcnt = False):
lines = []
counts = []
for line in open(filename):
parts = line.strip().split(':')
lines.append(parts[0].strip())
counts.append(parts[1].strip())
if rcnt == True:
return lines, counts
return lines
| lkpinette/pennant | aod/utils.py | utils.py | py | 899 | python | en | code | 0 | github-code | 13 |
70452915217 | # Made by Nanta XE
# Team: Xiuz Code
# OPEN SOURCE
import os
import json
import time
import subprocess
import re
import hashlib
import random
########
for xiuz in ['requests', 'bs4']:
while 1:
try:
exec(f'import {xiuz}')
break
except:
subprocess.check_output(f'python3 -m pip install {xiuz}'.split())
#########
def randomhash():
h = hashlib.new('sha1')
h.update(str(random.random()).encode())
return h.hexdigest()
class Twitter:
def __init__(self):
self.url = 'https://twsaver.com/'
self.banner = '•——————————————————————————•\n| Twitter Video Downloader |\n•——————————————————————————•\n| Author: Nanta XE |\n| Team: XiuzCode |\n•——————————————————————————•\n'
self.s = requests.Session()
self.fn = ''
self.ext = ''
def download(self):
print (self.banner)
self.s.headers.update({'User-Agent': 'Mozilla/5.0 (Linux; Android 4.2.1; en-us; Nexus 5 Build/JOP40D) AppleWebKit/535.19 (KHTML, like Gecko; googleweblight) Chrome/38.0.1025.166 Mobile Safari/535.19'})
link = str(input('! Hanya untuk video twitter, bukan foto.\n~> Link video: '))
if 'twitter.com' not in link: exit('! Bukan link twitter')
q = self.s.get(self.url)
q = bs4.BeautifulSoup(q.text, 'html.parser')
token = q.find('input', {'type': 'hidden', 'name': 'token', 'id': 'token'})['value']
post_url = self.url + 'system/action.php'
data = {}
data.update({'url': link, 'token': token})
try:
q = self.s.post(post_url, data = data).json()
except json.decoder.JSONDecodeError:
exit('! Error saat mengambil data')
z = q.get('links')
if z:
for x,y in enumerate(z):
idx = x + 1
quality = y['quality']
size = y['size']
print (f'{idx}. {quality} - {size}')
c = input('[pilih]> ')
try: c = int(c) - 1
except: exit('! Gunakan angka.')
self.ext = '.' + z[c]['type']
self.dl_file(z[c]['url'])
else:
exit('! Link download tidak ditemukan.')
def dl_file(self, url):
file_data = requests.get(url, stream = True)
cl = file_data.headers.get('content-length')
self.fn = randomhash() + self.ext
df = f'download/{self.fn}'
with open(df, 'wb') as down:
print (f'+ Mengunduh {self.fn}')
if cl is None: down.write(file_data.content)
else:
ac = 0
cl = int(cl)
for xa in file_data.iter_content(chunk_size=4096):
ac += len(xa)
down.write(xa)
bar = 10
load = int(bar * ac / cl)
strips = '=' * load + ' ' * (bar - load)
percent = f'{int(ac / cl * 100)}%'
print (f'\r[{strips}] {percent}', end = "")
down.close()
print ('')
if '__main__' == __name__:
os.system('clear')
try: os.mkdir('download')
except: pass
app = Twitter()
try: app.download()
except KeyboardInterrupt: exit()
| Zusyaku/Termux-And-Kali-Linux-V3 | twetdown.py | twetdown.py | py | 2,898 | python | en | code | 10 | github-code | 13 |
25565894533 | from collections import deque
vowels = deque(input().split())
consonants = deque(input().split())
flowers = {
"rose": "rose",
"tulip": "tulip",
"lotus": "lotus",
"daffodil": "daffodil",
}
is_found_word = False
while vowels and consonants:
letters = [vowels.popleft(), consonants.pop()]
for flower in flowers:
for letter in letters:
flowers[flower] = flowers[flower].replace(letter, "")
if not flowers[flower]:
print(f"Word found: {flower}")
is_found_word = True
break
if is_found_word:
break
else:
print("Cannot find any word!")
if vowels:
print(f"Vowels left: {' '.join(vowels)}")
if consonants:
print(f"Consonants left: {' '.join(consonants)}")
| mustanska/SoftUni | Python_Advanced/Exams/flowers_finder.py | flowers_finder.py | py | 769 | python | en | code | 0 | github-code | 13 |
40217547223 | """Surface velocity of any spinning object with radius rho and local spherical coordinates Phi and Theta."""
import numpy as np
from one_ray_solver.velocities import velocity_abc
class SurfaceVelocityRigidSphere(velocity_abc.VelocityABC):
"""Surface velocities u1 and u3 of a perfect rigid sphere."""
def __init__(self, s, position):
super().__init__(s, position)
# note that position should be an iterable with
# position = (rho, theta, phi)
def _calculate_velocity(self):
rho, theta, phi = self.position
u1 = 5 * self.s / (2 * rho) * np.sin(phi) * np.sin(theta)
u3 = 5 * self.s / (2 * rho) * np.cos(phi) * np.sin(theta)
if 1 - u1 ** 2 - u3 ** 2 < 0:
print('Velocities too high; returning nan.')
return np.nan, np.nan, np.nan
return (-u1, -u3), 1 / np.sqrt(1 - u1 ** 2 - u3 ** 2)
class SurfaceVelocityMaclaurinEllipsoid(velocity_abc.VelocityABC):
def __init__(self, s, position):
super().__init__(s, position)
# note that position should be an iterable with
# position = (a, theta, phi)
def _calculate_velocity(self):
a, theta, phi = self.position
u1 = 5 * self.s / (2 * a) * np.sin(phi) * np.sin(theta)
u3 = 5 * self.s / (2 * a) * np.cos(phi) * np.sin(theta)
if 1 - u1 ** 2 - u3 ** 2 < 0:
print('Velocities too high; returning nan.')
return np.nan, np.nan, np.nan
return (-u1, -u3), 1 / np.sqrt(1 - u1 ** 2 - u3 ** 2) | uhrwecker/Spin | one_ray_solver/velocities/surface_vel.py | surface_vel.py | py | 1,530 | python | en | code | 0 | github-code | 13 |
14880574417 | import socket
import os
import signal
from time import sleep
# https://docs.python.org/3/howto/sockets.html
# https://docs.python.org/3/library/socket.html#module-socket
def handle_signal(signum, frame):
while True:
try:
pid, status = os.waitpid(-1, os.WNOHANG)
if pid == 0:
return None
else:
print('Child PID: {} terminated with status {}'.format(pid, status))
except OSError as msg:
return None
def process_request(s):
sleep(10)
return "<<{}>>".format(s)
def handle_request(conn, addr):
with conn:
print('Connected by', addr)
while True:
request_link = conn.recv(1024)
if not request_link:
break
result = process_request(request_link.decode('utf-8').strip())
resp = str(result).encode('utf-8') + b'\n'
client_socket.send(resp)
if __name__ == '__main__':
listen_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
listen_socket.bind(('', 33000))
listen_socket.listen(1)
signal.signal(signal.SIGCHLD, handle_signal)
while True:
client_socket, client_address = listen_socket.accept()
pid = os.fork()
if pid == 0:
listen_socket.close() # close child copy listen
handle_request(client_socket, client_address)
client_socket.close()
exit(0)
else:
client_socket.close()
| ekomissarov/edu | some-py-examples/fork-example/frk-with-socket.py | frk-with-socket.py | py | 1,569 | python | en | code | 0 | github-code | 13 |
9426795871 | from multiprocessing.spawn import prepare
import os
import hashlib
import subprocess
import re
import tqdm
from typing import List, Tuple, Dict
from . import config, template, utils, dstruct, queries
class CodeQLException(Exception):
pass
class CodeQLTable:
def __init__(self, name, colnames, content):
self.name = name
self.colnames = colnames
self.content = content
self.num_col = len(self.colnames)
self.num_row = len(self.content)
# TODO: query to avoid hardcoding
ignore_kfunctions = [
"_printk", "arch_static_branch_jump", "arch_static_branch",
"____wrong_branch_error", "__dynamic_pr_debug"
] # boring utils func
ignore_kfunctions_pattern = [
r"__builtin.*",
r"__le64.*",
r"__le32.*",
r"__le16.*",
r"__compiletime.*",
r"__dynamic_.*",
r"kasan_.*",
r"kcsan_.*",
]
# added
specific_callback_functions = {}
def prepare_ql_string(s: str):
return '"' + s + '"'
def prepare_ql_string_list(l: list):
result = '['
for s in l:
result += prepare_ql_string(s)
result += ","
# addition comma here
result = result[:-1] + ']'
return result
def useful_function(funcname: str) -> bool:
if funcname in ignore_kfunctions:
return False
for pattern in ignore_kfunctions_pattern:
if re.search(pattern, funcname) != None:
return False
return True
def split_ql_table(table, num_tag):
if table.colnames[0] != "tag":
raise CodeQLException("not a splittable CodeQLTable")
result = [[] for i in range(num_tag)]
for line in table.content:
result[int(line[0])].append(line[1:])
return result
def run_command(cmd, timeout):
ret = subprocess.run(
cmd,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
encoding="utf-8",
timeout=timeout,
)
if ret.returncode != 0:
raise CodeQLException("{} {}".format(cmd, ret.stderr))
return (ret.stdout, ret.stderr)
def parse_output(out, query_name):
lines = out.strip().split("\n")
if len(lines) == 1 and len(lines[0]) == 0:
return CodeQLTable(query_name, [], [[]])
colnames = [x.strip() for x in lines[0].split("|")[1:-1]]
content = [[x.strip() for x in line.split("|")[1:-1]]
for line in lines[2:]]
return CodeQLTable(query_name, colnames, content)
def run_codeql(
query,
database,
timeout=config.default_timeout,
query_name="test",
):
filename = hashlib.md5(query_name.encode()).hexdigest() + ".ql"
path = os.path.join(config.tmp_path, filename)
open(path, "w").write(query)
codeql_cli_path = os.getenv("codeql_cli_path")
codeql_repo_path = os.getenv("codeql_repo_path")
command = "{} query run {} --database {} --search-path {} -j{}".format(
codeql_cli_path, path, database, codeql_repo_path,
config.multithread)
if config.debug:
utils.Logger.log("{} -> {}".format(query_name, path))
utils.Logger.log(query)
out, err = run_command(command, timeout)
if config.debug:
utils.Logger.log(out)
utils.Logger.log(err)
return parse_output(out, query_name)
#
# Queries Wrapper P1 Relevant
#
def ql_get_all_driver_types(database: str) -> List[str]:
result = run_codeql(
query=queries.QueriesDict["getDriverType"],
database=database,
timeout=config.allyes_timeout,
query_name="GetAllDriverTypes"
)
return [r[0] for r in result.content]
def ql_get_driver_unreg_functions(database: str) -> List[dstruct.Unregfunc]:
result = run_codeql(
query=queries.QueriesDict["getDriverUnreg"],
database=database,
timeout=config.allyes_timeout,
query_name="getDriverUnreg"
)
return [dstruct.Unregfunc(r[0], r[1], r[2]) for r in result.content]
def ql_get_upper_unreg_functions(database: str) -> List[dstruct.FuncMeta]:
result = run_codeql(
query=queries.QueriesDict["getUpperUnreg"],
database=database,
timeout=config.allyes_timeout,
query_name="getUpperUnreg"
)
return [dstruct.Unregfunc(r[0], r[1], r[2]) for r in result.content]
def ql_get_upper_unreg_name_from_unreg(unregfunc: dstruct.FuncMeta, database: str) -> List[str]:
result = run_codeql(
query=queries.QueriesDict["UnregUpperName"].format(
funcname=unregfunc.name, funcfile=unregfunc.file),
query_name="UnregUpperName{}".format(unregfunc.getID()),
database=database)
return [r[0] for r in result.content]
def ql_translate_names_to_funcmetas_batch(names: List[str], database: str) -> List[dstruct.Unregfunc]:
parsed_result = []
for i in tqdm.tqdm(range(0, len(names), config.BATCH_SIZE)):
j = min(i + config.BATCH_SIZE, len(names))
# i + BATCH if i + BATCH < len(names) else len(names)
constraints = prepare_ql_string_list(names[i: j])
result = run_codeql(
query=queries.QueriesDict["TranslateName2Meta"].format(constraints = constraints),
query_name="UnregUpper_{}".format(constraints),
database=database,
timeout=config.allyes_timeout)
for r in result.content:
name, file = r
parsed_result.append(dstruct.Unregfunc(name, file, "upper"))
return parsed_result
def ql_find_callsites_batch(functions: List[dstruct.FuncMeta], database: str) -> List[List[str]]:
query = template.CodeQLTemplate(queries.QueriesDict["GetCallSites"])
for function in functions:
query.begin_group()
query.add_item(prepare_ql_string(function.name),
prepare_ql_string(function.file))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetCallSite_{}".format(database),
database=database,
timeout=config.allyes_timeout)
result = split_ql_table(result, query.next_tag)
return result
#
# Queries Wrapper P1.1 Relevant
#
def ql_find_dereffunc_struct(
constaraint: str, database: str) -> Dict[str, List[dstruct.Dereffunc]]:
# better use CodeQL class to implement the logic
# can refer to the old get_all_proto_ops() and get_general_deref_ops()
common_part = queries.QueriesDict["DerefCommon"]
# constraint the variable location
select_code_gvar = queries.QueriesDict["DerefGVar1"] % (constaraint)
gvar_result = run_codeql(common_part + select_code_gvar,
query_name="derefgvar_{}".format(database),
database=database,
timeout=config.default_timeout)
# well, we need to pack the result in structure + variable granularity
gvar_result_dict = {}
for r in gvar_result.content:
struct_name, struct_file, gvar_name, gvar_file, field_name, function_name, function_file = r
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
if struct_id not in gvar_result_dict.keys():
gvar_result_dict[struct_id] = {}
if gvar_name not in gvar_result_dict[struct_id].keys():
gvar_result_dict[struct_id][gvar_name] = []
gvar_result_dict[struct_id][gvar_name].append(
dstruct.Dereffunc(function_name, function_file, field_name,
struct_id))
select_code_gvar2 = queries.QueriesDict["DerefGVar2"] % (constaraint)
gvar_result2_dict = {}
gvar_result2 = run_codeql(common_part + select_code_gvar2,
query_name="derefgvar2_{}".format(database),
database=database,
timeout=config.default_timeout)
for r in gvar_result2.content:
struct_name, struct_file, field_name, function_name, function_file = r
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
if struct_id not in gvar_result2_dict.keys():
gvar_result2_dict[struct_id] = []
gvar_result2_dict[struct_id].append(
dstruct.Dereffunc(function_name, function_file, field_name,
struct_id))
select_code_infunc = queries.QueriesDict["DerefDyn1"] % (constaraint)
infunc_result = run_codeql(common_part + select_code_infunc,
query_name="derefinfunc_{}".format(database),
database=database,
timeout=config.default_timeout)
infunc_result_dict = {}
for r in infunc_result.content:
struct_name, struct_file, initfunc_name, field_name, function_name, function_file = r
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
# do a simple filter, we believe one initfunc will not just init one function
if struct_id not in infunc_result_dict.keys():
infunc_result_dict[struct_id] = {}
if initfunc_name not in infunc_result_dict[struct_id].keys():
infunc_result_dict[struct_id][initfunc_name] = []
infunc_result_dict[struct_id][initfunc_name].append(
dstruct.Dereffunc(function_name, function_file, field_name,
struct_id))
select_code_infunc2 = queries.QueriesDict["DerefDyn2"] % (constaraint)
infunc_result2 = run_codeql(common_part + select_code_infunc2,
query_name="derefinfunc2_{}".format(database),
database=database,
timeout=config.default_timeout)
infunc_result_dict2 = {}
for r in infunc_result2.content:
struct_name, struct_file, initfunc_name, field_name, function_name, function_file = r
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
# do a simple filter, we believe one initfunc will not just init one function
if struct_id not in infunc_result_dict2.keys():
infunc_result_dict2[struct_id] = {}
if initfunc_name not in infunc_result_dict2[struct_id].keys():
infunc_result_dict2[struct_id][initfunc_name] = []
infunc_result_dict2[struct_id][initfunc_name].append(
dstruct.Dereffunc(function_name, function_file, field_name,
struct_id))
# okay, going to merge the result
packed_result = {}
packed_result_contain_log = []
for struct_id, var_dict in gvar_result_dict.items():
for _, funclist in var_dict.items():
if len(funclist
) < 2: # ops structure contains more than one funciton
# just too strict
utils.Logger.warn(
"only find one function in gvar within {}".format(struct_id))
if struct_id not in packed_result.keys():
packed_result[struct_id] = []
for func in funclist:
if func.getID() not in packed_result_contain_log:
packed_result[struct_id].append(func)
packed_result_contain_log.append(func.getID())
for struct_id, functions in gvar_result2_dict.items():
if struct_id in packed_result.keys():
utils.Logger.warn(
"weird, the global var deref is overlapped with recursive global deref at structure {}"
.format(struct_id))
if struct_id not in packed_result.keys():
packed_result[struct_id] = []
for func in functions:
if func.getID() not in packed_result_contain_log:
packed_result[struct_id].append(func)
packed_result_contain_log.append(func.getID())
for struct_id, func_dict in infunc_result_dict.items():
if struct_id in packed_result.keys():
utils.Logger.warn(
"weird, the global var deref is overlapped with infunc deref at structure {}"
.format(struct_name))
for _, funclist in func_dict.items():
if len(
funclist
) < 2: # ops structure contains more than one funciton (init)
# just too strict
utils.Logger.warn(
"only find one function in dynamic struct (pfa) assign within {}".format(struct_id))
if struct_id not in packed_result.keys():
packed_result[struct_id] = []
for func in funclist:
if func.getID() not in packed_result_contain_log:
packed_result[struct_id].append(func)
packed_result_contain_log.append(func.getID())
for struct_id, func_dict in infunc_result_dict2.items():
if struct_id in packed_result.keys():
utils.Logger.warn(
"weird, the global var deref is overlapped with infunc2 deref at structure {}"
.format(struct_name))
for _, funclist in func_dict.items():
if len(
funclist
) < 2: # ops structure contains more than one funciton (init)
utils.Logger.warn(
"only find one function in dynamic struct (vfa) assign within {}".format(struct_id))
if struct_id not in packed_result.keys():
packed_result[struct_id] = []
for func in funclist:
if func.getID() not in packed_result_contain_log:
packed_result[struct_id].append(func)
packed_result_contain_log.append(func.getID())
return packed_result
def ql_check_struct_call_exists(struct_ids: List[str], database: str) -> List[dstruct.StructMeta]:
query = template.CodeQLTemplate(queries.QueriesDict["GetStFieldCallSound"])
for struct_id in struct_ids:
struct_name, struct_file = dstruct.parse_structid(struct_id)
query.begin_group()
query.add_item(prepare_ql_string(struct_name),
prepare_ql_string(struct_file))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetStFieldCallSonud_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
packed_result = []
log = []
for r in result:
for struct_name, struct_file in r:
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
if struct_id not in log:
packed_result.append(struct_meta)
log.append(struct_id)
return packed_result
def ql_check_struct_defined_and_used_somewhere_else(
struct_field_dict: Dict[str, List[str]],
unregfuncs: List[dstruct.Unregfunc],
database: str) -> Dict[str, List[str]]:
structids = list(struct_field_dict.keys())
result = run_codeql(
query=queries.QueriesDict["GetFuncDeclara"].format(
prepare_ql_string_list([func.name for func in unregfuncs])),
query_name="GetFuncDeclara_{}".format(database),
database=database)
declara_files = []
for _, file in result.content:
declara_files.append(file)
declara_files = list(set(declara_files))
# filter out structs that defined in those files
filtered_structids = []
for struct_id in structids:
_, struct_file = dstruct.parse_structid(struct_id)
if struct_file not in filtered_structids:
filtered_structids.append(struct_id)
# cool, we can filter out some structid keys
filer_result_1 = {}
for struct_id, fieldlist in struct_field_dict.items():
if struct_id in filtered_structids:
filer_result_1[struct_id] = fieldlist
# next we have to make sure the field call never happen in current database
query = template.CodeQLTemplate(queries.QueriesDict["GetStFieldCall"])
for struct_id, fieldlist in filer_result_1.items():
struct_name, struct_file = dstruct.parse_structid(struct_id)
query.begin_group()
query.add_item(prepare_ql_string(struct_name),
prepare_ql_string(struct_file),
prepare_ql_string_list(fieldlist))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetStFieldCallrev_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
discard_struct_field_pack = {}
for r in result:
for struct_name, struct_file, field_name in r:
struct_meta = dstruct.StructMeta(struct_name, struct_file)
struct_id = struct_meta.getID()
if struct_id not in discard_struct_field_pack.keys():
discard_struct_field_pack[struct_id] = []
if field_name not in discard_struct_field_pack[struct_id]:
discard_struct_field_pack[struct_id].append(field_name)
filer_result_2 = {}
for struct_id, fieldlist in filer_result_1.items():
for field_name in fieldlist:
if struct_id in discard_struct_field_pack.keys() and \
field_name in discard_struct_field_pack[struct_id]:
continue
if struct_id not in filer_result_2.keys():
filer_result_2[struct_id] = []
filer_result_2[struct_id].append(field_name)
return filer_result_2
def ql_find_netlink_derefs(constaraint: str, database: str) -> List[dstruct.Dereffunc]:
# idea is simple, find initializer for struct genl_ops and small_genl_ops
# get doit function field
query = queries.QueriesDict["GetNetlinkOps"] % (constaraint)
result = run_codeql(query,
query_name="netlink_derefs_{}".format(database),
database=database,
timeout=config.default_timeout)
packed_result = []
logs = []
for r in result.content:
struct_name, struct_file, field_name, function_name, function_file = r
deref_func = dstruct.Dereffunc(
function_name, function_file, field_name, dstruct.StructMeta(struct_name, struct_file).getID)
if deref_func.getID() not in logs:
logs.append(deref_func.getID())
packed_result.append(deref_func)
return packed_result
def ql_find_pre_gather_derefs(constaraint: str, database: str) -> List[dstruct.Dereffunc]:
query = queries.QueriesDict["GetPreGVarOps"] % (constaraint)
result = run_codeql(query,
query_name="derefpregvar_{}".format(database),
database=database,
timeout=config.default_timeout)
# well, we need to pack the result in structure + variable granularity
packed_result = []
logs = []
for r in result.content:
struct_name, struct_file, field_name, function_name, function_file = r
deref_func = dstruct.Dereffunc(
function_name, function_file, field_name, dstruct.StructMeta(struct_name, struct_file).getID)
if deref_func.getID() not in logs:
logs.append(deref_func.getID())
packed_result.append(deref_func)
return packed_result
#
# Queries Wrapper P2 Relevant
#
dealloc_kfunctions = []
def ql_special_callinfo_process_analyze(database: str) -> None:
utils.Logger.log("processing deallocation functions")
global dealloc_kfunctions
result = run_codeql(
query=queries.QueriesDict["HeapCommon"] + queries.QueriesDict["GetDealloc"],
query_name="GetDealloc_{}".format(database),
database=database
)
for r in result.content:
dealloc_kfunctions.append(
[dstruct.FuncMeta(r[0], r[1]), r[2]])
def ql_get_defined_h_funcs(database: str) -> list:
result = run_codeql(
query=queries.QueriesDict["GetHDef"],
query_name="GetHDef_{}".format(database),
database=database)
return result.content
def ql_get_defined_c_funcs(database: str) -> list:
result = run_codeql(
query=queries.QueriesDict["GetCDef"],
query_name="GetCDef_{}".format(database),
database=database)
return result.content
def ql_get_defined_funcs(database: str) -> list:
result = run_codeql(
query=queries.QueriesDict["GetDef"],
query_name="GetDef_{}".format(database),
database=database)
return result.content
def ql_get_basic_blocks(funcmetas: list, database: str):
query = template.CodeQLTemplate(queries.QueriesDict["GetBasicBlock"])
for funcname, funcfile in funcmetas:
query.begin_group()
query.add_item(prepare_ql_string(funcname),
prepare_ql_string(funcfile))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetBasicBlock_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
return result
def ql_get_callinfos_cfgs(
cfgs: Dict[str, dstruct.ControlFlowGraph],
database: str
) -> Dict[str, Dict[str, Dict[str, dstruct.CallInfo]]]:
query = template.CodeQLTemplate(queries.QueriesDict["GetCallInfoBatch"])
funcids = list(cfgs.keys())
# prepare arguments (function enough)
for funcid in funcids:
funcfile, funcname = dstruct.parse_funcid(funcid)
query.begin_group()
query.add_item(prepare_ql_string(funcfile),
prepare_ql_string(funcname))
query.end_group()
result = run_codeql(query=query.get_query(),
query_name="GetCallInfoBatch_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
# now result is indexed by funcid, inside each funcid there will be multiple information
packed_result = {}
for index, rr in enumerate(result):
funcid = funcids[index]
funcname, funcfile = dstruct.parse_funcid(funcid)
funcmeta = dstruct.FuncMeta(funcname, funcfile)
packed_result[funcid] = {}
for r in rr:
calltype, callloc, calleepacked, bbidentifier, constantargval, constantargidx, functionargpacked, functionargidx = r
# the found function may be boring, forget about it
if not useful_function(dstruct.parse_funcid(calleepacked)[0]):
continue
bbstart, bbend = bbidentifier.split('+')
bbid = dstruct.hash_signature(bbstart, bbend)
if bbid not in packed_result[funcid].keys():
packed_result[funcid][bbid] = {}
if callloc not in packed_result[funcid][bbid].keys():
info = dstruct.CallInfo(callloc, calltype, funcmeta, bbid)
info.callee.append(calleepacked)
if constantargval != "lucky":
info.constantarg.append((constantargidx, constantargval))
if functionargpacked != "lucky":
info.functionarg.append(
(functionargpacked, functionargidx))
packed_result[funcid][bbid][callloc] = info
else:
# possible more informations here
info = packed_result[funcid][bbid][callloc]
assert (calltype == info.type)
if calleepacked not in info.callee:
info.callee.append(calleepacked)
if constantargval != "lucky" and (
constantargidx,
constantargval) not in info.constantarg:
info.constantarg.append((constantargidx, constantargval))
if functionargpacked != "lucky" and (
functionargpacked,
functionargidx) not in info.functionarg:
info.functionarg.append(
(functionargpacked, functionargidx))
return packed_result
def ql_get_callinfos_infunction(
funcmeta: dstruct.FuncMeta,
database: str) -> Dict[str, Dict[str, dstruct.CallInfo]]:
query = queries.QueriesDict["GetCallInfo"] % \
(prepare_ql_string(funcmeta.name), prepare_ql_string(funcmeta.file))
result = run_codeql(
query=query,
query_name="GetCallInfo_{}".format(database),
database=database)
packed_result = {}
for r in result.content:
calltype, callloc, calleepacked, bbidentifier, constantargval, constantargidx, functionargpacked, functionargidx = r
# the found function may be boring, forget about it
if not useful_function(dstruct.parse_funcid(calleepacked)[0]):
continue
bbstart, bbend = bbidentifier.split('+')
bbid = dstruct.hash_signature(bbstart, bbend)
if bbid not in packed_result.keys():
packed_result[bbid] = {}
# check if this call is already inside the list
if callloc not in packed_result[bbid].keys():
info = dstruct.CallInfo(callloc, calltype, funcmeta, bbid)
info.callee.append(calleepacked)
# FUTRUE WORK: the async delayed mechanism for now just ignore
# moreover, check if this loc is specific
# if callloc in specific_callback_functions.keys():
# info.callee.append(
# specific_callback_functions[callloc].getID())
if constantargval != "lucky":
info.constantarg.append((constantargidx, constantargval))
if functionargpacked != "lucky":
info.functionarg.append((functionargpacked, functionargidx))
packed_result[bbid][callloc] = info
else:
# possible more informations here
info = packed_result[bbid][callloc]
assert (calltype == info.type)
if calleepacked not in info.callee:
info.callee.append(calleepacked)
if constantargval != "lucky" and (
constantargidx, constantargval) not in info.constantarg:
info.constantarg.append((constantargidx, constantargval))
if functionargpacked != "lucky" and (
functionargpacked, functionargidx) not in info.functionarg:
info.functionarg.append((functionargpacked, functionargidx))
return packed_result
def ql_get_unreg_functions_from_notifier(
unregfunc: dstruct.FuncMeta,
database: str) -> List[dstruct.CallInfo]:
query = queries.QueriesDict["GetUnregFromNotifier"] % (
prepare_ql_string(unregfunc.name),
prepare_ql_string(unregfunc.file)
)
result = run_codeql(query=query,
query_name="GetUnregFromNotifier_{}".format(database),
database=database)
callinfos_ret = []
for r in result.content:
callee = dstruct.FuncMeta(r[1], r[2])
calleeid = callee.getID()
bbstart, bbend = r[3].split('+')
info = dstruct.CallInfo(
r[0],
"direct",
unregfunc,
bbid=dstruct.hash_signature(bbstart, bbend),
)
info.callee.append(calleeid)
callinfos_ret.append(info)
return callinfos_ret
def ql_get_dealloc_funcnode(funcids: list, database = "") -> list:
# recover funcid to meta
deallocfuncids = [info[0].getID() for info in dealloc_kfunctions]
l1 = list(set(funcids).intersection(set(deallocfuncids)))
l2 = [dealloc_kfunctions[deallocfuncids.index(name)][1] for name in l1]
return l1, l2
def ql_get_pointsTo_call(
location: str, argidx: int, identifier: str, database: str) -> list:
query = queries.QueriesDict["HeapCommon"] + queries.QueriesDict["PointsToAnlysisS1"] % (
prepare_ql_string(location),
argidx,
identifier)
result = run_codeql(query=query,
query_name="PointsToAnlysisS1_{}".format(database),
database=database)
return result.content
def ql_get_dereference_sites_with_pointsto(
locations: List[str], identifier: str, database: str) -> list:
locations_parsed = ''
for location in locations:
locations_parsed += prepare_ql_string(location)
locations_parsed += ', '
locations_parsed = locations_parsed[:-2]
query = queries.QueriesDict["HeapCommon"] + queries.QueriesDict["PointsToAnlysisS2"] % (
identifier,
locations_parsed
)
result = run_codeql(query=query,
query_name="PointsToAnlysisS2_{}".format(database),
database=database)
return result.content
def ql_get_pointsTo_allowinacc(
location: str, argidx: int, identifier: str, database: str) -> list:
query = queries.QueriesDict["PointsToAnalysis"] + queries.QueriesDict["PointsToAnalysis"] % (
prepare_ql_string(location),
argidx,
identifier
)
result = run_codeql(query=query,
query_name="PointsToAnlysis_{}".format(database),
database=database)
return result.content
def ql_get_sanitize_conditions(functionsMeta: list, database: str) -> list:
query = template.CodeQLTemplate(
queries.QueriesDict["GetSanitizeConditions"])
for meta in functionsMeta:
funcname, funcfile = dstruct.parse_funcid(meta)
query.begin_group()
query.add_item(prepare_ql_string(funcname),
prepare_ql_string(funcfile))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetSanitizeConditions_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
return result
#
# Queries Wrapper P3 Relevant
#
lock_kfunctions = []
unlock_kfunctions = []
# Add lock types to repor in the future
bits_r_kfunctions = []
bits_w_kfunctions = []
# identifiers
lockunlock_callinfo_identifiers = []
lockunlock_callinfo_identifiers_dict = {}
bits_r_identifiers = []
bits_w_identifiers = []
def ql_get_snullwrite_bbs(bbs, database: str) -> list:
query = template.CodeQLTemplate(queries.QueriesDict["GetSNullWrite"])
for loc in bbs:
query.begin_group()
query.add_item(prepare_ql_string(loc))
query.end_group()
result = run_codeql(query.get_query(),
query_name="GetSNullWrite_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
return result
def ql_get_bbconditions(innerfunc: str, bbidentifiers, database: str) -> list:
query = template.CodeQLTemplate(queries.QueriesDict["GetBBConditions"])
for identifiers in bbidentifiers:
query.begin_group()
query.add_item(prepare_ql_string(identifiers[0]),
prepare_ql_string(identifiers[1]))
query.end_group(prepare_ql_string(innerfunc))
result = run_codeql(query.get_query(),
query_name="GetBBConditions_{}".format(database),
database=database)
result = split_ql_table(result, query.next_tag)
# well, we need to parse return condition
parsed_result = []
for index, r in enumerate(result):
if not len(r):
utils.Logger.error(
"GetBBConditions with argument func: {}, bb {}-{} returns empty result???"
.format(innerfunc, bbidentifiers[index][0], bbidentifiers[index][1]))
parsed_result.append(None)
continue
conditionInfo = r[0]
if len(r) > 1:
# buggy.. find first one not unknown
for index in range(len(r)):
if r[index] != "unknown":
conditionInfo = r[index]
break
expect_tag = conditionInfo[0]
if expect_tag == "unknown":
parsed_result.append(None)
else:
if expect_tag == "true":
expect = True
elif expect_tag == "false":
expect = False
conditionwords = conditionInfo[1].split('---')
parsed_result.append(
dstruct.parseGenCondition(expect, conditionwords))
assert (len(bbidentifiers) == len(parsed_result))
return parsed_result
def ql_special_callinfo_process_detect(database: str) -> None:
# In practice, seperately handle them in everytime is time consuming
# hence we can collect them here to speedup the procedure
do_lockunlock_callinfo_process(database)
do_bitw_callinfo_process(database)
# in future
# add specific indirect callinfo here
# which means: queue_work, queue_delayed_work, and mod_timer
def is_callinfo_lockunlock(info: dstruct.CallInfo) -> dstruct.LockunLockInfo:
callloc = info.location
if callloc not in lockunlock_callinfo_identifiers_dict:
return None
_, _, identifier, lockornot, _, _ = lockunlock_callinfo_identifiers_dict[callloc]
lockornotbool = True if lockornot == "locking" else False
lockinfo = dstruct.LockunLockInfo(info.location, info.type,
info.caller, info.bbid,
lockornotbool, "default",
identifier)
lockinfo.caller = info.callee
lockinfo.constantarg = info.constantarg
lockinfo.functionarg = info.functionarg
return lockinfo
def is_callinfo_lock_wrapper(info: dstruct.CallInfo,
graph: dstruct.CallGraph) -> str:
# simply check if there the callee satisify
# (1) has only one lock call, no more other lock calls
# (2) no unlock call at all
# multi-target indirect call cannot be wrapper
if len(info.callee) > 1:
return ""
# non-direct call target cannot be wrapper
if info.type != "direct":
return ""
calleename, calleefile = dstruct.parse_funcid(info.callee[0])
calleeMeta = dstruct.FuncMeta(calleename, calleefile)
callee_id = calleeMeta.getID()
if callee_id not in graph.graph.nodes:
return ""
callee_node = graph.graph.nodes[callee_id]
if "cfg" not in callee_node or not callee_node["cfg"]:
return ""
callee_cfg = callee_node["cfg"]
callee_callinfos = callee_cfg.getAll_callinfos()
lock_call_count = 0
lock_call_ident = ""
for callinfo in callee_callinfos:
r = is_callinfo_lockunlock(callinfo)
if r:
if r.lock:
lock_call_count += 1
if lock_call_count > 1:
return ""
lock_call_ident = r.identifier
else:
return ""
return lock_call_ident
def is_callinfo_unlock_wrapper(info: dstruct.CallInfo,
graph: dstruct.CallGraph) -> str:
# simply check if there the callee satisify
# (1) has only one unlock call, no more other unlock calls
# (2) no lock call at all
# multi-target indirect call cannot be wrapper
if len(info.callee) > 1:
return ""
# non-direct call target cannot be wrapper
if info.type != "direct":
return ""
calleename, calleefile = dstruct.parse_funcid(info.callee[0])
calleeMeta = dstruct.FuncMeta(calleename, calleefile)
callee_id = calleeMeta.getID()
if callee_id not in graph.graph.nodes:
return ""
callee_node = graph.graph.nodes[callee_id]
if "cfg" not in callee_node or not callee_node["cfg"]:
return ""
callee_cfg = callee_node["cfg"]
callee_callinfos = callee_cfg.getAll_callinfos()
unlock_call_count = 0
unlock_call_ident = ""
for callinfo in callee_callinfos:
r = is_callinfo_lockunlock(callinfo)
if r:
if not r.lock:
unlock_call_count += 1
if unlock_call_count > 1:
return ""
unlock_call_ident = r.identifier
else:
return ""
return unlock_call_ident
def do_lockunlock_callinfo_process(database: str) -> None:
global lockunlock_callinfo_identifiers_dict
utils.Logger.log("processing locking/unlocking functions")
# Type-A: identifier based locks and unlocks
# 1. simple global lock (rwlock_t, spinlock_t, mutex), global variable location as identifier
result = run_codeql(query=queries.QueriesDict["GetGlobalLockSimple"],
query_name="GetGlobalLockSimpleIdent_{}".format(database),
database=database)
# callinfo location | callinfo target function name | callinfo target function file | identifier | locking or unlocking | call enclose function name and file
for r in result.content:
callinfo_loc, callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile = r
lockunlock_callinfo_identifiers_dict[callinfo_loc] = (callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile)
# 2. global lock in struct (rwlock_t, spinlock_t, mutex), also variable location as identifier
result = run_codeql(query=queries.QueriesDict["GetGStructLock"],
query_name="GetGStructLockIdent_{}".format(database),
database=database)
for r in result.content:
callinfo_loc, callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile = r
lockunlock_callinfo_identifiers_dict[callinfo_loc] = (callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile)
# 3. dynamic locks, complex
# we first find lock initialization place (all marco)
# we get their location which will used for later pointsTo analysis
query = queries.QueriesDict["GetDLockLocations"]
result = run_codeql(query=query,
query_name="GetDLockLocations_{}".format(database),
database=database)
for r in result.content:
callinfo_loc, callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile = r
# if callinfo_loc exists, nevermind, just occupy it
if callinfo_loc in lockunlock_callinfo_identifiers_dict and identifier != lockunlock_callinfo_identifiers_dict[callinfo_loc][2]:
utils.Logger.warn("call location overlap, one identifier is {} and another is {}".format(
identifier, lockunlock_callinfo_identifiers_dict[callinfo_loc][2]))
lockunlock_callinfo_identifiers_dict[callinfo_loc] = (callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile)
# Type-B: simple unified lock and unlocks
# --- lock_sock and release_sock ---
# --- rtnl_lock and rtnl_unlock ---
result = run_codeql(query=queries.QueriesDict["GetALock"],
query_name="GetALock_{}".format(database),
database=database)
for r in result.content:
callinfo_loc, callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile = r
lockunlock_callinfo_identifiers_dict[callinfo_loc] = (callinfo_target_funcname, callinfo_target_funcfile, identifier, lockornot, enclosename, enclosefile)
def is_callinfo_bitrw(info: dstruct.CallInfo):
# Traverse with call location
callloc = info.location
for identifiers in bits_w_identifiers:
# 0: call location
# 1: valueMacro
# 2. value
# 3. present
# 4. call function
# 5. call file
# 6. call target
if identifiers[0] == callloc:
# found one
bitw_info = dstruct.BitsRWInfo(info.location, info.type,
info.caller, info.bbid, True,
identifiers[1], identifiers[2],
identifiers[3])
bitw_info.callee = info.callee
bitw_info.constantarg = info.constantarg
bitw_info.functionarg = info.functionarg
return bitw_info
for identifiers in bits_r_identifiers:
if identifiers[0] == callloc:
# found one
bitr_info = dstruct.BitsRWInfo(info.location, info.type,
info.caller, info.bbid, False,
identifiers[1], identifiers[2],
identifiers[3])
bitr_info.callee = info.callee
bitr_info.constantarg = info.constantarg
bitr_info.functionarg = info.functionarg
return bitr_info
return None
def do_bitw_callinfo_process(database: str) -> None:
utils.Logger.log("processing bits read/write functions")
global bits_r_identifiers, bits_w_identifiers
bitsw_funcnames = [
"set_bit",
"clear_bit",
"change_bit",
"test_and_clear_bit",
# "test_and_change_bit"
]
functionsStr = "["
for func in bitsw_funcnames:
functionsStr += prepare_ql_string(func)
functionsStr += ", "
functionsStr += "]"
query = queries.QueriesDict["GetBitsOps"] % (functionsStr)
result1 = run_codeql(query=query,
query_name="GetBitsOps1_{}".format(database),
database=database)
bits_w_identifiers = result1.content
bitsw_funcnames = ["test_bit", "test_and_clear_bit", "test_and_change_bit"]
functionsStr = "["
for func in bitsw_funcnames:
functionsStr += prepare_ql_string(func)
functionsStr += ", "
functionsStr += "]"
query = queries.QueriesDict["GetBitsOps"] % (functionsStr)
result2 = run_codeql(query=query,
query_name="GetBitsOps2_{}".format(database),
database=database)
bits_r_identifiers = result2.content | uacatcher/uacatcher-repo | scripts/components/core/codeql.py | codeql.py | py | 42,108 | python | en | code | 9 | github-code | 13 |
8342236103 | from django.shortcuts import render,redirect
from django.http import HttpResponse
# Create your views here.
from django.contrib.auth import login, authenticate
from django.contrib.auth.forms import UserCreationForm
import nltk
from nltk.sentiment.vader import SentimentIntensityAnalyzer
nltk.download('vader_lexicon')
import json
from django.views.generic.base import TemplateView
from django.views.generic import View
from django.http import JsonResponse
from chatterbot import ChatBot
from chatterbot.ext.django_chatterbot import settings
from chatterbot.trainers import ChatterBotCorpusTrainer
from .tasks import send_data
from django.core.mail import send_mail
from django.conf import settings
chatbot = ChatBot('Robert')
trainer = ChatterBotCorpusTrainer(chatbot)
chatbot.set_trainer(ChatterBotCorpusTrainer)
chatbot.train("chatterbot.corpus.english")
class ChatterBotApiView(View):
"""
Provide an API endpoint to interact with ChatterBot.
"""
#chatterbot = ChatBot(**settings.CHATTERBOT)
def post(self, request, *args, **kwargs):
"""
Return a response to the statement in the posted data.
* The JSON data should contain a 'text' attribute.
"""
input_data = json.loads(request.body.decode('utf-8'))
if 'text' not in input_data:
return JsonResponse({
'text': [
'The attribute "text" is required.'
]
}, status=400)
#response = self.chatterbot.get_response(input_data)
response = chatbot.get_response(input_data)
response_data = response.serialize()
send_data.delay(response_data)
if response =='bye':
return JsonResponse(response_data, status=200)
return JsonResponse(response_data, status=200)
def get(self, request, *args, **kwargs):
"""
Return data corresponding to the current conversation.
"""
return JsonResponse({
'name': self.chatterbot.name
})
def signup(request):
if request.method == 'POST':
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('home')
else:
form = UserCreationForm()
return render(request,'signup.html',{'form': form})
def myview(request):
return render(request,'science_bot.html')
def index(request):
return render(request,'index.html')
def email(request):
subject = 'Thank you for registering to our site'
message = ' it means a world to us '
email_from = settings.EMAIL_HOST_USER
recipient_list = ['dnnsmoyo@gmail.com',]
send_mail( subject, message, email_from, recipient_list )
return redirect('/') | Dnnsmoyo/botlearnai | api/views.py | views.py | py | 2,997 | python | en | code | 0 | github-code | 13 |
70722677778 | def F(b):
return (b)*5/9+32
def C(d):
return ((d)-32)/5*9
#b為輸入的攝氏溫度
#d為輸入的華氏溫度
def main():
o=(input("Enter a action:"))
if o.isdigit():
a=int(o)
if a==1:
b=input("Enter Celsius temperature:")
if b.isdigit():
c=F(int(b))
print("Fahrenheit tempreature:"+str(c))
else:
print("Error, please enter numeric input")
return main()
if a==2:
d=input("Enter Fahrenheit temperature:")
if d.isdigit():
e=C(int(d))
print("Celsius temperature:"+str(e))
else:
print("Error, please enter numeric input")
return main()
if a==3:
print("Exit\nHave a nice day!")
else:
return main()
else:
return main()
main()
| alisonsyue100/py4e | W1-7.py | W1-7.py | py | 962 | python | en | code | 0 | github-code | 13 |
4593941785 | class Solution:
# @param A, B: Two string.
# @return: the length of the longest common substring.
def longestCommonSubstring(self, A, B):
m = len(A)
n = len(B)
if m == 0 or n == 0:
return 0
ans = 0
for i in range(m):
for j in range(n):
l = 0
while i + l < m and j + l < n and A[i + l] == B[j + l]:
l += 1
if l > ans:
ans = l
return ans
def longestCommonSubstring2(self, A, B):
m = len(A)
n = len(B)
if m == 0 or n == 0:
return 0
dp = [[0] * (n + 1) for i in range(m + 1)]
maxLen = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
if A[i - 1] == B[j - 1]:
dp[i][j] = dp[i - 1][j - 1] + 1
maxLen = max(dp[i][j], maxLen)
else:
dp[i][j] = 0
return maxLen
def longestCommonSubstring1(self, A, B):
# write your code here
# Brute force
lenA, lenB = len(A), len(B)
for subStrLen in range(lenA, 0, -1): # Please mind the boundary
for startPos in range(lenA - subStrLen + 1):
subStr = A[startPos: startPos + subStrLen]
if B.find(subStr) != -1:
return subStrLen
return 0 | ultimate010/codes_and_notes | 79_longest-common-substring/longest-common-substring.py | longest-common-substring.py | py | 1,489 | python | en | code | 0 | github-code | 13 |
39828788430 | #150 DAYS PYTHON CODING
#DAY2 PYTHON CODE
#TO FIND A FACTORIAL OF A GIVEN INPUT NUMBER
#4!=4*3*2*1=24
def fact(number):
if number==0:
return 1
return number*fact(number-1)
print("enter the value of the number")
number=int(input())
print("factorial of number is :", fact(number)) | Bodlavikram/BASIC-PYTHON-CODE-FOR-PARTICE | day2codefactorial.py | day2codefactorial.py | py | 341 | python | en | code | 0 | github-code | 13 |
17058508794 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class QrcodeEntity(object):
def __init__(self):
self._desk_id = None
self._qrcode_id = None
self._relation_id = None
self._shop_id = None
@property
def desk_id(self):
return self._desk_id
@desk_id.setter
def desk_id(self, value):
self._desk_id = value
@property
def qrcode_id(self):
return self._qrcode_id
@qrcode_id.setter
def qrcode_id(self, value):
self._qrcode_id = value
@property
def relation_id(self):
return self._relation_id
@relation_id.setter
def relation_id(self, value):
self._relation_id = value
@property
def shop_id(self):
return self._shop_id
@shop_id.setter
def shop_id(self, value):
self._shop_id = value
def to_alipay_dict(self):
params = dict()
if self.desk_id:
if hasattr(self.desk_id, 'to_alipay_dict'):
params['desk_id'] = self.desk_id.to_alipay_dict()
else:
params['desk_id'] = self.desk_id
if self.qrcode_id:
if hasattr(self.qrcode_id, 'to_alipay_dict'):
params['qrcode_id'] = self.qrcode_id.to_alipay_dict()
else:
params['qrcode_id'] = self.qrcode_id
if self.relation_id:
if hasattr(self.relation_id, 'to_alipay_dict'):
params['relation_id'] = self.relation_id.to_alipay_dict()
else:
params['relation_id'] = self.relation_id
if self.shop_id:
if hasattr(self.shop_id, 'to_alipay_dict'):
params['shop_id'] = self.shop_id.to_alipay_dict()
else:
params['shop_id'] = self.shop_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = QrcodeEntity()
if 'desk_id' in d:
o.desk_id = d['desk_id']
if 'qrcode_id' in d:
o.qrcode_id = d['qrcode_id']
if 'relation_id' in d:
o.relation_id = d['relation_id']
if 'shop_id' in d:
o.shop_id = d['shop_id']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/QrcodeEntity.py | QrcodeEntity.py | py | 2,288 | python | en | code | 241 | github-code | 13 |
27736447652 | import torch
import torchaudio
from torch.utils.data import Dataset, random_split
from sklearn.model_selection import train_test_split
from .preprocess import preprocess_audio
from torchvision import transforms
# Define your dataset class
class CustomDataset(Dataset):
def __init__(self,
data,
targets,
Wave2Vec:bool = False):
self.data = data
self.targets = targets
self.Wave2Vec = Wave2Vec
self.new_sample_rate = 16000
desired_duration = 4 #was 2 seconds
desired_samples = int(desired_duration * self.new_sample_rate)
self.transform = transforms.Compose([transforms.Lambda(lambda x: x[:, :desired_samples] if x.size(1) >= desired_samples else x)])
def __getitem__(self, index):
if self.Wave2Vec==True:
x, sample_rate = torchaudio.load(self.data[index])
resampler = torchaudio.transforms.Resample(orig_freq=sample_rate,
new_freq=self.new_sample_rate)
x = resampler(x)
x = self.transform(x)
y = self.targets[index]
return x.squeeze(0), y
x = preprocess_audio(self.data[index])
y = self.targets[index]
return x, y
def __len__(self):
return len(self.data)
def collate_fn(batch):
"""
This is a function for collating a batch of variable-length sequences into a single tensor, which is
useful for training a neural network with PyTorch.
The input to this function is a batch of samples, each containing a source and target sequence.
The function extracts the source and target sequences from each sample, and then pads them to ensure
that all sequences in the batch have the same length. This is necessary because PyTorch requires all
inputs to a neural network to have the same shape.
The function uses the PyTorch pad_sequence function to pad the sequences. pad_sequence is called with
the batch_first=True argument to ensure that the batch dimension is the first dimension of the output
tensor. The padding_value argument is set to 0 to pad with zeros.
The function returns the padded source and target sequences as a tuple.
"""
sources = [item[0] for item in batch]
targets = [item[1] for item in batch]
sources = torch.nn.utils.rnn.pad_sequence(sources, batch_first=True)
# targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)
return sources, torch.tensor(targets) | yriyazi/Hubert-Emotion_Detection | dataloaders/datasets.py | datasets.py | py | 2,667 | python | en | code | 2 | github-code | 13 |
25667998966 | from datetime import datetime
from os import path
import time
USE = True
def backup(pxf):
''' Backup Data extension'''
lb = path.join(pxf.Settings.etc_folder, 'last_backup')
if not path.isfile(lb):
with open(lb, 'wb') as f:
f.write(str(time.time()))
with open(lb, 'rb') as f:
if float(f.read().strip()) + pxf.Settings.backup_at >= time.time():
pxf._db.backup(path.join(pxf.Settings.backup_folder, '%s_%s_%s_backup.db.gz' % (
str(datetime.month), str(datetime.day), str(datetime.year))))
def setup(pxf):
pxf.tasks.append(backup) | Skarlett/proxbox | src/tasks/standard.py | standard.py | py | 573 | python | en | code | 0 | github-code | 13 |
40004671384 | import sys
import os
import argparse
import json
import readline
from chunkypipes.util.commands import BaseCommand
ARGV_PIPELINE_NAME = 0
ARGV_FIRST_ARGUMENT = 0
EXIT_CMD_SUCCESS = 0
EXIT_CMD_SYNTAX_ERROR = 2
readline.set_completer_delims(' \t\n;')
readline.parse_and_bind('tab: complete')
class Command(BaseCommand):
@staticmethod
def usage():
return 'chunky configure <pipeline-name> [-h] [--location LOCATION] [--blank]'
def configure(self, config_dict, current_config, blank=False):
for key in config_dict:
if type(config_dict[key]) == dict:
self.configure(config_dict[key], current_config.get(key, {}), blank)
else:
if not blank:
prompt = config_dict[key].strip().strip(':')
config_dict[key] = (raw_input(prompt + ' [{}]: '.format(current_config.get(key, ''))) or
current_config.get(key, ''))
else:
config_dict[key] = ''
def help_text(self):
return 'Create a configuration file for a pipeline.'
def run(self, command_args):
parser = argparse.ArgumentParser(prog='chunky configure', usage=self.usage(), description=self.help_text())
parser.add_argument('--location', help=('Path to which to save this config file. ' +
'Defaults to install directory.'))
parser.add_argument('--blank', action='store_true',
help='Skip configuration and create a blank configuration file.')
if not command_args or command_args[ARGV_FIRST_ARGUMENT].lower() in ['-h', '--help', 'help']:
parser.print_help()
sys.exit(EXIT_CMD_SUCCESS)
pipeline_name = command_args[ARGV_PIPELINE_NAME]
pipeline_class = self.get_pipeline_class(pipeline_name)
if pipeline_class is not None:
# Parse configure options
config_args_parser = argparse.ArgumentParser(prog='chunky configure {}'.format(pipeline_name))
config_args_parser.add_argument('--location',
default=os.path.join(self.home_configs,
'{}.json'.format(pipeline_name)),
help=('Path to which to save this config file. ' +
'Defaults to install directory.'))
config_args_parser.add_argument('--blank', action='store_true',
help='Skip configuration and create a blank configuration file.')
configure_args = vars(config_args_parser.parse_args(command_args[1:]))
save_location = configure_args['location']
is_blank = configure_args['blank']
# If this config already exists, prompt user before overwrite
if os.path.isfile(save_location):
overwrite = raw_input('Config for {} already exists at {}, overwrite? [y/n] '.format(
pipeline_name,
save_location
))
# If user responds no, exit immediately
if overwrite.lower() in {'no', 'n'}:
sys.stderr.write('\nUser aborted configuration.\n')
sys.exit(EXIT_CMD_SUCCESS)
# Get configuration from pipeline, recursively prompt user to fill in info
config_dict = pipeline_class.configure()
try:
current_config = json.loads(open(os.path.join(self.home_configs,
'{}.json'.format(pipeline_name))).read())
except:
current_config = {}
try:
self.configure(config_dict, current_config, is_blank)
if is_blank:
sys.stderr.write('Blank configuration generated.\n')
except (KeyboardInterrupt, EOFError):
sys.stderr.write('\nUser aborted configuration.\n')
sys.exit(EXIT_CMD_SUCCESS)
# Write config out to file
try:
with open(save_location, 'w') as config_output:
config_output.write(json.dumps(config_dict, indent=4) + '\n')
sys.stderr.write('Configuration file successfully written.\n')
except IOError:
sys.stderr.write('Could not open file for writing.\n')
sys.exit(1)
else:
# If pipeline class doesn't exist, exit immediately
sys.stdout.write('Pipeline {name} does not exist in {home}\n'.format(
name=pipeline_name,
home=self.home_pipelines + '/'
))
sys.exit(EXIT_CMD_SYNTAX_ERROR)
| djf604/chunky-pipes | chunkypipes/util/commands/configure.py | configure.py | py | 4,872 | python | en | code | 5 | github-code | 13 |
40980396253 | #Name: Juan Gonzalez
#ID: 1808943
num1 = int(input())
num2 = int(input())
num3 = int(input())
num4 = int(input())
num5 = int(input())
num6 = int(input())
works = False
x = 0
y = 0
for i in range(-10,11):
for j in range(-10, 11):
if ((((num1 * i) + (num2 * j)) == num3) and (((num4 * i) + (num5 * j)) == num6)):
x = i
y = j
works = True
break
if works == True:
break
if works == True:
print(x, y)
else:
print("No solution") | jagonz-coding/hello-world | Homework2/6.22.py | 6.22.py | py | 533 | python | en | code | 0 | github-code | 13 |
32183778921 | import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
from konlpy.tag import Komoran, Hannanum, Kkma, Okt
from tqdm import tqdm
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.layers import Embedding, Dense, LSTM
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
data = pd.read_csv('/nl/filmrate/data/filmrate_data_ansi.csv', encoding='cp949')
text = data.iloc[:,[0,3,5]]
random.seed(1)
text['text'] = text['text'].str.replace("[^ㄱ-ㅎㅏ-ㅣ가-힣 ]","")
text['rate'] = text['rate'].replace('보고싶어요', np.nan, regex=True ) # 공백은 Null 값으로 변경
text['rate'] = text['rate'].replace('보는중', np.nan, regex=True ) # 공백은 Null 값으로 변경
text = text.dropna(how = 'any') # Null 값이 존재하는 행 제거
train_data = text[:2000]
test_data = text[2000:]
print("Train: {} | Val: {}".format(len(train_data), len(test_data)))
komoran = Komoran()
hannanum = Hannanum()
kkma = Kkma()
okt = Okt()
tokenizer = Tokenizer()
stopwords = ['의','가','이','은','들','는','좀','잘','걍','과','도','를','으로','자','에','와','한','하다']
X_train = []
for sentence in tqdm(train_data['text']):
tokenized_sentence = okt.morphs(sentence, stem=True) # 토큰화
stopwords_removed_sentence = [word for word in tokenized_sentence if not word in stopwords] # 불용어 제거
X_train.append(stopwords_removed_sentence)
X_test = []
for sentence in tqdm(test_data['text']):
tokenized_sentence = okt.morphs(sentence, stem=True) # 토큰화
stopwords_removed_sentence = [word for word in tokenized_sentence if not word in stopwords] # 불용어 제거
X_test.append(stopwords_removed_sentence)
tokenizer.fit_on_texts(X_train)
threshold = 3
total_cnt = len(tokenizer.word_index) # 단어의 수
rare_cnt = 0 # 등장 빈도수가 threshold보다 작은 단어의 개수를 카운트
total_freq = 0 # 훈련 데이터의 전체 단어 빈도수 총 합
rare_freq = 0 # 등장 빈도수가 threshold보다 작은 단어의 등장 빈도수의 총 합
# 단어와 빈도수의 쌍(pair)을 key와 value로 받는다.
for key, value in tokenizer.word_counts.items():
total_freq = total_freq + value
# 단어의 등장 빈도수가 threshold보다 작으면
if(value < threshold):
rare_cnt = rare_cnt + 1
rare_freq = rare_freq + value
print('단어 집합(vocabulary)의 크기 :',total_cnt)
print('등장 빈도가 %s번 이하인 희귀 단어의 수: %s'%(threshold - 1, rare_cnt))
print("단어 집합에서 희귀 단어의 비율:", (rare_cnt / total_cnt)*100)
print("전체 등장 빈도에서 희귀 단어 등장 빈도 비율:", (rare_freq / total_freq)*100)
vocab_size = total_cnt - rare_cnt + 1
print('단어 집합의 크기 :',vocab_size)
tokenizer = Tokenizer(vocab_size) # 빈도수 2 이하인 단어는 제거
tokenizer.fit_on_texts(X_train)
X_train = tokenizer.texts_to_sequences(X_train)
X_test = tokenizer.texts_to_sequences(X_test)
y_train = np.array(pd.to_numeric(train_data['rate'])*2)
y_test = np.array(pd.to_numeric(test_data['rate'])*2)
y_train.astype(np.int);
y_test.astype(np.int);
y_train.shape
# print('리뷰의 최대 길이 :',max(len(l) for l in X_train))
# print('리뷰의 평균 길이 :',sum(map(len, X_train))/len(X_train))
# plt.hist([len(s) for s in X_train], bins=50)
# plt.xlabel('length of samples')
# plt.ylabel('number of samples')
# plt.show()
def below_threshold_len(max_len, nested_list):
cnt = 0
for s in nested_list:
if(len(s) <= max_len):
cnt = cnt + 1
# print('전체 샘플 중 길이가 %s 이하인 샘플의 비율: %s'%(max_len, (cnt / len(nested_list))*100))
max_len = 100
below_threshold_len(max_len, X_train)
# 전체 데이터의 길이는 30으로 맞춘다.
X_train = pad_sequences(X_train, maxlen = max_len)
X_test = pad_sequences(X_test, maxlen = max_len)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print("X_val:", X_train.shape) # X_val: (10000, 784)
print("Y_val:", y_train.shape) # Y_val: (10000, 28, 28, 10)
y_train[1]
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=4)
mc = ModelCheckpoint('best_model.h5', monitor='val_acc', mode='max', verbose=1, save_best_only=True)
model = Sequential()
model.add(Embedding(vocab_size, 100))
model.add(LSTM(128))
model.add(Dense(64, activation='softmax'))
model.add(Dense(11, activation='softmax'))
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc'])
history = model.fit(X_train, y_train, epochs=100, callbacks=[es, mc], batch_size=64, validation_split=0.2)
def sentiment_predict(new_sentence):
new_sentence = okt.morphs(new_sentence, stem=True) # 토큰화
new_sentence = [word for word in new_sentence if not word in stopwords] # 불용어 제거
encoded = tokenizer.texts_to_sequences([new_sentence]) # 정수 인코딩
pad_new = pad_sequences(encoded, maxlen = max_len) # 패딩
score = model.predict(pad_new)# 예측
sentiment_predict('와 개쩐다 정말 세계관 최강자들의 영화다')
sentiment_predict('이 영화 핵노잼 ㅠㅠ')
sentiment_predict('감독 뭐하는 놈이냐?') | kkkkang1009/AxperIance | ai/nl/filmrate/filmrate_modeling.py | filmrate_modeling.py | py | 5,442 | python | ko | code | 0 | github-code | 13 |
6270352567 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 19:43:18 2019
@author: E442282
"""
import numpy as np
import cv2
import os
import sys
from matplotlib import pyplot as plt
def getColorSpaces(image):
rgb = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
gray = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
return rgb,gray
def getImageDimnesion(image):
height,width = image.shape[:2]
return height,width
def showImage(image,title,cmap):
plt.imshow(image,cmap=cmap)
plt.axis('off')
plt.title(title)
def splitRGBChannels(image):
red, green, blue= cv2.split(image)
return red, green, blue
def getBinaryImage(gray,thr=127):
ret,thresh= cv2.threshold(gray,thr,255,cv2.THRESH_BINARY)
return thresh
def getHistogramAdjusted(bgr):
lab = cv2.cvtColor(bgr, cv2.COLOR_BGR2LAB)
lab_planes = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=2.0,tileGridSize=(8,8))
lab_planes[0] = clahe.apply(lab_planes[0])
lab = cv2.merge(lab_planes)
adjusted = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return adjusted
def applySobel(gray):
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=3)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=3)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
return abs_sobelx+abs_sobely
images_path=r'C:\SAI\IIIT\2019_Monsoon\DIP\Project\input'
images=os.listdir(images_path)
output_path=r'C:\SAI\IIIT\2019_Monsoon\DIP\Project\output_hsv_mask'
#for im in images[:]:
#
# img = cv2.imread(os.path.join(images_path,im))
#
# adjusted=getHistogramAdjusted(img)
# bilateral = cv2.bilateralFilter(img, 7, sigmaSpace = 75, sigmaColor =75)
# hsv_mask=getHSVMask(bilateral)
#
# _,gray=getColorSpaces(bilateral)
#
# _,gray2=getColorSpaces(hsv_mask)
#
# imgs_comb = np.hstack([img,hsv_mask])
# plt.figure(figsize=(12, 12))
# plt.axis('off')
# plt.title(im)
# plt.imshow(imgs_comb,cmap='gray')
for im in images[:]:
img = cv2.imread(os.path.join(images_path,im))
image=getHistogramAdjusted(img)
hMin = 0
sMin = 0
vMin = 210
hMax = 179
sMax = 180
vMax = 255
# Set minimum and max HSV values to display
lower = np.array([hMin, sMin, vMin])
upper = np.array([hMax, sMax, vMax])
# Create HSV Image and threshold into a range.
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
mask1 = cv2.inRange(hsv, lower, upper)
output = cv2.bitwise_and(image,image, mask= mask1)
_,gray=getColorSpaces(output)
# ret_thresh,im_bw = cv2.threshold(gray,127,255,cv2.THRESH_BINARY)
kernel = np.ones((5,5),np.uint8)
erosion = cv2.erode(im_bw,kernel,iterations =1)
init_mask=erosion.copy()
mask = np.zeros(image.shape[:2],np.uint8)
mask[init_mask == 255] = 1
mask[init_mask == 0] = 2 #Guess everything else is background
bgdModel = np.zeros((1,65),np.float64)
fgdModel = np.zeros((1,65),np.float64)
mask, bgdModel, fgdModel = cv2.grabCut(image,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
mask[mask == 1] = 255
# _,gray=getColorSpaces(output)
# _,contours,h = cv2.findContours(gray,1,2)
#
# for cnt in contours:
# approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
## print( len(approx))
# if len(approx)==5:
## print( "pentagon")
# cv2.drawContours(output,[cnt],0,(0,0,255),-1)
# elif len(approx)==3:
## print ("triangle")
# cv2.drawContours(output,[cnt],0,(0,0,255),-1)
# elif len(approx)==4:
## print( "rectangle/square")
# cv2.drawContours(output,[cnt],0,(0,0,255),-1)
# elif len(approx) == 9:
## print( "half-circle")
# cv2.drawContours(output,[cnt],0,(0,0,255),-1)
# elif len(approx) > 15:
## print( "circle")
# cv2.drawContours(output,[cnt],0,(0,0,255),-1)
# imgs_comb = np.hstack([image,output])
plt.figure(figsize=(8, 8))
plt.axis('off')
plt.title(im)
plt.imshow(mask,cmap='gray')
#images_path=r'C:\SAI\IIIT\2019_Monsoon\DIP\Project\input_hsv_mask'
#images=os.listdir(images_path)
#
#output_path=r'C:\SAI\IIIT\2019_Monsoon\DIP\Project\output_hsv_mask'
#
#for im in images[:]:
# print(im)
# img = cv2.imread(os.path.join(images_path,im))
#
# adjusted=getHistogramAdjusted(img)
# bilateral = cv2.bilateralFilter(adjusted, 7, sigmaSpace = 75, sigmaColor =75)
#
# rgb,gray=getColorSpaces(bilateral)
# canny=CannyThreshold(100,gray,img)
# plt.axis('off')
# plt.title('Bilateral')
# plt.imshow(bilateral,cmap='gray')
# plt.show()
#
# plt.axis('off')
# plt.title('Binary after Bilateral')
# plt.imshow(binary,cmap='gray')
# plt.show()
#
# plt.axis('off')
# plt.title('HSV Mask-Trial and Error')
# plt.imshow(hsv_mask,cmap='gray')
# plt.show()
#
# kernel = np.ones((5,5),np.uint8)
# erosion = cv2.erode(hsv_mask,kernel,iterations = 1)
#
# opening = cv2.morphologyEx(hsv_mask, cv2.MORPH_OPEN, kernel)
#
# plt.axis('off')
# plt.title('Erosion')
# plt.imshow(erosion,cmap='gray')
# #plt.imshow(opening,cmap='gray')
# plt.show()
# imgs_comb = np.hstack([img,canny])
# plt.axis('off')
# plt.title('Canny')
# plt.imshow(imgs_comb,cmap='gray')
# plt.show()
#
#
# #https://stackoverflow.com/questions/53887425/opencv-grabcut-doesnt-update-mask-when-on-gc-init-with-mask-mode
#
# init_mask=hsv_mask.copy()
# mask = np.zeros(img.shape[:2],np.uint8)
# mask[init_mask == 255] = 1
# mask[init_mask == 0] = 2 #Guess everything else is background
#
# bgdModel = np.zeros((1,65),np.float64)
# fgdModel = np.zeros((1,65),np.float64)
#
# mask, bgdModel, fgdModel = cv2.grabCut(img,mask,None,bgdModel,fgdModel,5,cv2.GC_INIT_WITH_MASK)
#
# mask = np.where((mask==2)|(mask==0),0,1).astype('uint8')
# mask[mask == 1] = 255
#
## plt.axis('off')
## plt.title('Foreground-Stripes')
## plt.imshow(mask,cmap='gray')
## plt.show()
##
## cv2.imwrite(os.path.join(output_path,im), mask)
# _,contours,h = cv2.findContours(mask,1,2)
#
# for cnt in contours:
# approx = cv2.approxPolyDP(cnt,0.01*cv2.arcLength(cnt,True),True)
# print( len(approx))
# if len(approx)==5:
# print( "pentagon")
# cv2.drawContours(img,[cnt],0,255,-1)
# elif len(approx)==3:
# print ("triangle")
# cv2.drawContours(img,[cnt],0,(0,255,0),-1)
# elif len(approx)==4:
# print( "rectangle/square")
# cv2.drawContours(img,[cnt],0,(0,0,255),-1)
# elif len(approx) == 9:
# print( "half-circle")
# cv2.drawContours(img,[cnt],0,(255,255,0),-1)
# elif len(approx) > 15:
# print( "circle")
# cv2.drawContours(img,[cnt],0,(0,255,255),-1)
#
# cv2.imwrite(os.path.join(output_path,im), img)
# plt.axis('off')
# plt.title('Foreground-Stripes')
# plt.imshow(img,cmap='gray')
# plt.show()
#plt.figure(figsize=(12, 12))
#gray = cv2.imread('C:/SAI/IIIT/2019_Monsoon/DIP/Project/zebra.jpg',0)
#
#f = np.fft.fft2(gray)
#fshift = np.fft.fftshift(f)
#magnitude_spectrum = 20*np.log(np.abs(fshift))
#
#imgs_comb = np.hstack([gray,magnitude_spectrum])
#
##plt.subplot(2,3,2)
#plt.axis('off')
#plt.title('magnitude_spectrum')
#plt.imshow(imgs_comb,cmap='gray')
#
#plt.show()
| ddurgaprasad/DIP | Project/test2.py | test2.py | py | 7,703 | python | en | code | 0 | github-code | 13 |
72489872977 | from logic_gate import LogicGate
class BinaryGate(LogicGate):
def __init__(self, label):
# LogicGate.__init__(self, label)
super().__init__(label)
self.pin_a = None
self.pin_b = None
def get_pin_a(self):
if self.pin_a == None:
return int(
input(f"Enter pin A input for gate {self.get_label()}: ")
)
else:
return self.pin_a.get_from_gate().get_output()
def get_pin_b(self):
if self.pin_b == None:
return int(
input(f"Enter pin B input for gate {self.get_label()}: ")
)
else:
return self.pin_b.get_from_gate().get_output()
def set_next_pin(self, source):
if self.pin_a == None:
self.pin_a = source
else:
if self.pin_b == None:
self.pin_b == source
else:
raise RuntimeError("Error: NO EMPTY PINS")
| ldnicolasmay/RunestonePythonDS3 | src/Chapter01/binary_gate.py | binary_gate.py | py | 998 | python | en | code | 0 | github-code | 13 |
5453499097 | import pygame
from settings import WINDOW_WIDTH, WINDOW_HEIGHT, WHITE
from entities.player import Player
class Game:
def __init__(self):
pygame.init()
window_size = (WINDOW_WIDTH, WINDOW_HEIGHT)
self.screen = pygame.display.set_mode(window_size)
pygame.display.set_caption('Game')
self.running = True
self.entities = []
self.player = Player()
self.entities.append(self.player)
def is_running(self) -> bool:
return self.running
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
def update(self):
pass
def draw(self):
self.screen.fill(WHITE)
for entity in self.entities:
entity.draw(self.screen)
pygame.display.flip()
| soupss/roguelike | roguelike/game.py | game.py | py | 859 | python | en | code | 0 | github-code | 13 |
14528595885 | #import cc.arduino.*
#import org.firmata.*
#Firmata firmata
from funciones import Arduino, guardarArchivo
#variables para almacenar el valor de los sensores medidos en arduino
sensor1=0 #sensor1=alargamiento
sensor2=0 #sensor2=fuerza
VV=45 #VV= valor de PWM para la valvula proporcional
VIR=3470 #VIR=Valor Inicial de Recorrido
VIF=2200 #VIF=Valor Inicial de Fuerza
#inicializar una instancia de Arduino para comunicarse
arduino=Arduino(VIR=3470,VIF=2200)
#Variables para trazar el gráfico
x1,x2,y1,y2 = 0,0,0,0 #coordenadas para trazar lineas en el gráfico
ax,ay,divisionesx,divisionesy = 0,0,10,10 #diviones del gráfico
nombreArchivo,ensayofiles = "","" #variables para almacenar los datos en un archivo
pin4=4 #Bomba
pin5=5 #Valvula de paso
pin6=6 #Valvular Proporcional
Pmax,Rmax = 0,0
#coordenadas del gráfico
Xmin=450
Ymin=200
Xmax=Xmin+500
Ymax=Ymin+500
#Variables de Control de la Maquina
area=0.0 #area= area de la probeta en mm2
LI=0.0 #LI=Longitud inicial de la probeta
LP=0.0 #LP=longitud de la probeta al final de la precarga
bomba=False
valvula=False
ritmo=False
ensayo=False
#datos de salida medidos y válvula proporcional
recorrido=0.0
deformacion=0.0
fuerza=0.0
esfuerzo=0.0
proporcional=0.0
VF=0
millisa=0.0 #millisa=tiempo anterior
fuerzaant=0.0 #fuerzaant=fuerza anterior
dt=0.0 #dt=delta de tiempo entre una medicion y otra
df=0.0 #df=variación de fuerza
relevo=0
relevo1=0
comparacion=10
debug1=0
valores=0
#variables para los loops de control de fase
CP=False #CP=Comienzo de Precarga
FP=False #FP=Fin de Precarga
FA=False #FA=Fuerza Alargamiento
CE=False #CE=comienzo de Ensayo
FE=False #FE=Fin de Ensayo
NS=False #NS=nuevo valor sensado
PV=False #PV=Parada Virtual, detiene la valvula proporcional para que deje de aumentar
#la presion, y por lo tanto es necesario volver a empezar
#Variables para almacenar los datos de recorrido y presión
valuesa=[]
valuesf=[]
valuesd=[]
valuese=[]
#bytes recibidos por el puerto serial
serialData=0
def setup():
size(1024,768,JAVA2D) #dibuja la ventana
background(230) #colorea el fondo
createGUI() # crea la interfaz con los botonos y campos de texto
customGUI() #crea el gráfico y otras partes de la interfaz
FA=False
cambiarGrafico(FA)#defmax.setText("")
#esfmax.("")
#println(Arduino.list())
#arduino = new Arduino(this, Arduino.list()[1], 57600)
#printArray(Serial.list())
delay(2000)
#myport.bufferUntil(START_SYSEX)
arduino.adsConfig(arduino.DIFERENCIALMODE,arduino.GAIN_1)
#recorrido=float(sensor1)
#valuesr.append(map(recorrido, 0, 1024, 0, 300))
#recorridot.setText(str(recorrido))
#presion=float(sensor2)
#valuesp.append(map(presion, 0, 1024, 0, 300))#Revisar map function and float lenght
#presiont.setText(str(presion))
def draw():
if((valvula)and(not FP)and(NS)and(not PV)):#fuerza menor a 100kg --
if(not CP):
LI=float(longitud.getText())
area=float(areatext.getText())
CP=True
millisa=millis()
FA=False
cambiarGrafico(FA)
if(fuerza>200):
FP=True
VV+=1
Mensaje("PRECARGA FINALIZADA","Puede continuar con el ensayo")
"""if(((millis()-millisa)/1000)>5){
VV--
mill#isa=millis()
analogWrite(pin6,VV)
proporcionalb.setValue(VV)
println(VV,"unidades")
}"""
NS=False
if(relevo>10):
if(FP):
print(sensor1," ",recorrido,"mm",VV)
print(sensor2," ",fuerza,"kg")
print()
else:
print(sensor1," ",sensor1*0.1875,"mV ",recorrido,"mm")
print(sensor2," ",sensor2*0.1875,"mV ",fuerza,"kg")
print()
if(not CE): relevo=0
if( (ensayo) and (FP) and (NS) and (not PV)):
if(not CE):
Rmax=int(defmax.getText())
Pmax=int(esfmax.getText())
area=float(areatext.getText())
LI=float(longitud.getText())
VF=20#proporcionalb.getValueF()
CE=True
relevo=0
customGUI()
#Reconocimiento de rotura y de la velocidad de aplicacion de la fuerza
dt=(millis()-millisa)/1000
deformacion=recorrido/LI
esfuerzo=(fuerza/area)/100
valuesa.append(recorrido)
valuesf.append(fuerza)
valuesd.append(deformacion)
valuese.append(esfuerzo)
if((CE)and(relevo>5)):
if dt>0 and relevo1 > comparacion:
df=(fuerza-fuerzaant)/dt
if(fuerza<150):
arduino.analogWrite(pin6,255)
guardarArchivo(valuesf,valuesa,valuese,valuesd)
ensayo=~ensayo
if(df<VF and VV>0 and ensayo==True):
VV-=1
arduino.analogWrite(pin6,VV)
comparacion+=1
#if(ndf>VF and VV<50){
# VV++
# analogWrite(pin6,VV)
#}
millisa=millis()
fuerzaant=fuerza
proporcionalb.setValue(VV)
relevo1=0
valores=valuesf.size()
if(FA):
x1=map(valuesa.get(valores-2),0,Rmax,Xmin,Xmax)
y1=map(valuesf.get(valores-2),0,Pmax,Ymax,Ymin)
x2=map(valuesa.get(valores-1),0,Rmax,Xmin,Xmax)
y2=map(valuesf.get(valores-1),0,Pmax,Ymax,Ymin)
line(x1, y1, x2, y2)
recorridot.setText(str(recorrido))
esfuerzot.setText(str(fuerza))
else:
x1=map(valuesd.get(valores-2),0,Rmax,Xmin,Xmax)
y1=map(valuese.get(valores-2),0,Pmax,Ymax,Ymin)
x2=map(valuesd.get(valores-1),0,Rmax,Xmin,Xmax)
y2=map(valuese.get(valores-1),0,Pmax,Ymax,Ymin)
line(x1, y1, x2, y2)
recorridot.setText(str(deformacion))
esfuerzot.setText(str(esfuerzo))
relevo=0
relevo1+=1
if(PV):
proporcionalb.setValue(50)
arduino.analogWrite(pin6,255)
NS=False
relevo+=1
delay(100) #demorar 100 ms entre mediciones
def customGUI():
Rmax=int(defmax.getText())
Pmax=int(esfmax.getText())
divisionesx=int(unidades.getText())
divisionesy=divisionesx
rectMode(CORNERS)
fill(230)
rect(Xmin-40, Ymin-20, Xmax+20, Ymax+40)
""" Esto va directamente en los botones
if(bomba) bombab.setLocalColorScheme(GCScheme.GREEN_SCHEME)
else bombab.setLocalColorScheme(GCScheme.RED_SCHEME)
if(valvula) valvulab.setLocalColorScheme(GCScheme.GREEN_SCHEME)
else valvulab.setLocalColorScheme(GCScheme.RED_SCHEME)
if(ensayo) ensayob.setLocalColorScheme(GCScheme.GREEN_SCHEME)
else ensayob.setLocalColorScheme(GCScheme.RED_SCHEME)
"""
for i in range(divisionesx):
#print("tamaño: ",valuesr.size())
ax=int(map(i,0,divisionesx,Xmin,Xmax))
stroke(150)
line(ax, Ymax, ax, Ymin)
textSize(12)
fill(0, 102, 153)
textAlign(CENTER,TOP)
text(i*Rmax/divisionesx, ax, Ymax+5)
for i in range(divisionesy):
#print("tamaño: ",valuesr.size())
ay=int(map(i,0,divisionesy,Ymax,Ymin))
stroke(150)
line(Xmin, ay, Xmax, ay)
textSize(12)
fill(0, 102, 153)
textAlign(RIGHT,CENTER)
text(i*Pmax/divisionesy, Xmin-5, ay)
if(CE):
if(FA):
for i in range(len(valuesa)):
#print("tamaño: ",valuesf.size())
x1=map(valuesa[i],0,Rmax,Xmin,Xmax)
y1=map(valuesf[i],0,Pmax,Ymax,Ymin)
x2=map(valuesa[i+1],0,Rmax,Xmin,Xmax)
y2=map(valuesf[i+1],0,Pmax,Ymax,Ymin)
line(x1, y1, x2, y2)
fill(0, 102, 153)
textAlign(CENTER,TOP)
text("Alargamiento (mm)", (Xmin+Xmax)/2, Ymax+30)
textAlign(CENTER,CENTER)
pushMatrix()
translate(Xmin-45,(Ymin+Ymax)/2)
rotate(-HALF_PI)
text("Fuerza (kg)", 0, 0)
popMatrix()
else:
for i in range(len(valuesd)):
#print("tamaño: ",valuesd.size())
x1=map(valuesd[i],0,Rmax,Xmin,Xmax)
y1=map(valuese[i],0,Pmax,Ymax,Ymin)
x2=map(valuesd[i+1],0,Rmax,Xmin,Xmax)
y2=map(valuese[i+1],0,Pmax,Ymax,Ymin)
line(x1, y1, x2, y2)
textSize(18)
fill(0, 102, 153)
textAlign(CENTER,TOP)
text("Deformación Específica (mm/mm)", (Xmin+Xmax)/2, Ymax+30)
textAlign(CENTER,CENTER)
pushMatrix()
translate(Xmin-45,(Ymin+Ymax)/2)
rotate(-HALF_PI)
text("Esfuerzo (kg/cm2)", 0, 0)
popMatrix()
def serialEvent(serial):
serialData=serial.read()
arduino.processInput(serialData)
if((serialData==arduino.END_SYSEX)and(arduino.storedInputData[0]==arduino.DIFERENCIALMODE)and not NS):
NS=True
def cambiarGrafico(FAR:bool):
if(not FAR):
label5.setText("ALARGAMIENTO (mm)")
label6.setText("FUERZA (Kg)")
label4.setText("FUERZA MAX. \n(kg)")
label3.setText("ALARGAMIENTO MAX. (mm)")
esfmax.setText(str(int(esfmax.getText())*float(areatext.getText())))
defmax.setText(str(int(defmax.getText())*float(longitud.getText())))
FAB.setText("FUERZA / ALARGAMIENTO")
FA=True
else:
label5.setText("DEF. ESP.\n(mm / mm)")
label6.setText("ESFUERZO\n(Kg / cm2)")
label4.setText("ESFUERZO MAX.\n(kg / cm2)")
label3.setText("DEF. ESP. MAX.\n(mm / mm)")
esfmax.setText(str(int(esfmax.getText())/float(areatext.getText())))
defmax.setText(str(int(defmax.getText())/float(longitud.getText())))
FAB.setText("ESFUERZO / DEF. ESP.")
FA=False
customGUI()
| jfquinones/tensile-test-machine | maquinadetraccion.py | maquinadetraccion.py | py | 9,202 | python | es | code | 0 | github-code | 13 |
26544026949 | from __future__ import print_function
from __future__ import unicode_literals
import atexit
import getopt
import os
import sys
from .blame import Blame
from .changes import Changes
from .config import GitConfig
from .metrics import MetricsLogic
from . import (basedir, clone, extensions, filtering, format, help, interval,
localization, optval, terminal, version)
from .output import outputable
from .output.blameoutput import BlameOutput
from .output.changesoutput import ChangesOutput
from .output.extensionsoutput import ExtensionsOutput
from .output.filteringoutput import FilteringOutput
from .output.metricsoutput import MetricsOutput
from .output.responsibilitiesoutput import ResponsibilitiesOutput
from .output.timelineoutput import TimelineOutput
localization.init()
class Runner(object):
def __init__(self):
self.hard = False
self.include_metrics = False
self.list_file_types = False
self.localize_output = False
self.responsibilities = False
self.grading = False
self.timeline = False
self.useweeks = False
def process(self, repos):
localization.check_compatibility(version.__version__)
if not self.localize_output:
localization.disable()
terminal.skip_escapes(not sys.stdout.isatty())
terminal.set_stdout_encoding()
previous_directory = os.getcwd()
summed_blames = Blame.__new__(Blame)
summed_changes = Changes.__new__(Changes)
summed_metrics = MetricsLogic.__new__(MetricsLogic)
for repo in repos:
os.chdir(repo.location)
repo = repo if len(repos) > 1 else None
changes = Changes(repo, self.hard)
summed_blames += Blame(repo, self.hard, self.useweeks, changes)
summed_changes += changes
if self.include_metrics:
summed_metrics += MetricsLogic()
if sys.stdout.isatty() and format.is_interactive_format():
terminal.clear_row()
else:
os.chdir(previous_directory)
format.output_header(repos)
outputable.output(ChangesOutput(summed_changes))
if summed_changes.get_commits():
outputable.output(BlameOutput(summed_changes, summed_blames))
if self.timeline:
outputable.output(TimelineOutput(summed_changes, self.useweeks))
if self.include_metrics:
outputable.output(MetricsOutput(summed_metrics))
if self.responsibilities:
outputable.output(ResponsibilitiesOutput(summed_changes, summed_blames))
outputable.output(FilteringOutput())
if self.list_file_types:
outputable.output(ExtensionsOutput())
format.output_footer()
os.chdir(previous_directory)
def __check_python_version__():
if sys.version_info < (2, 6):
python_version = str(sys.version_info[0]) + "." + str(sys.version_info[1])
sys.exit(_("gitinspector requires at least Python 2.6 to run (version {0} was found).").format(python_version))
def __get_validated_git_repos__(repos_relative):
if not repos_relative:
repos_relative = "."
repos = []
#Try to clone the repos or return the same directory and bail out.
for repo in repos_relative:
cloned_repo = clone.create(repo)
if cloned_repo.name == None:
cloned_repo.location = basedir.get_basedir_git(cloned_repo.location)
cloned_repo.name = os.path.basename(cloned_repo.location)
repos.append(cloned_repo)
return repos
def main():
terminal.check_terminal_encoding()
terminal.set_stdin_encoding()
argv = terminal.convert_command_line_to_utf8()
run = Runner()
repos = []
try:
opts, args = optval.gnu_getopt(argv[1:], "f:F:hHlLmrTwx:", ["exclude=", "file-types=", "format=",
"hard:true", "help", "list-file-types:true", "localize-output:true",
"metrics:true", "responsibilities:true", "since=", "grading:true",
"timeline:true", "until=", "version", "weeks:true"])
repos = __get_validated_git_repos__(set(args))
#We need the repos above to be set before we read the git config.
GitConfig(run, repos[-1].location).read()
clear_x_on_next_pass = True
for o, a in opts:
if o in("-h", "--help"):
help.output()
sys.exit(0)
elif o in("-f", "--file-types"):
extensions.define(a)
elif o in("-F", "--format"):
if not format.select(a):
raise format.InvalidFormatError(_("specified output format not supported."))
elif o == "-H":
run.hard = True
elif o == "--hard":
run.hard = optval.get_boolean_argument(a)
elif o == "-l":
run.list_file_types = True
elif o == "--list-file-types":
run.list_file_types = optval.get_boolean_argument(a)
elif o == "-L":
run.localize_output = True
elif o == "--localize-output":
run.localize_output = optval.get_boolean_argument(a)
elif o == "-m":
run.include_metrics = True
elif o == "--metrics":
run.include_metrics = optval.get_boolean_argument(a)
elif o == "-r":
run.responsibilities = True
elif o == "--responsibilities":
run.responsibilities = optval.get_boolean_argument(a)
elif o == "--since":
interval.set_since(a)
elif o == "--version":
version.output()
sys.exit(0)
elif o == "--grading":
grading = optval.get_boolean_argument(a)
run.include_metrics = grading
run.list_file_types = grading
run.responsibilities = grading
run.grading = grading
run.hard = grading
run.timeline = grading
run.useweeks = grading
elif o == "-T":
run.timeline = True
elif o == "--timeline":
run.timeline = optval.get_boolean_argument(a)
elif o == "--until":
interval.set_until(a)
elif o == "-w":
run.useweeks = True
elif o == "--weeks":
run.useweeks = optval.get_boolean_argument(a)
elif o in("-x", "--exclude"):
if clear_x_on_next_pass:
clear_x_on_next_pass = False
filtering.clear()
filtering.add(a)
__check_python_version__()
run.process(repos)
except (filtering.InvalidRegExpError, format.InvalidFormatError, optval.InvalidOptionArgument, getopt.error) as exception:
print(sys.argv[0], "\b:", exception.msg, file=sys.stderr)
print(_("Try `{0} --help' for more information.").format(sys.argv[0]), file=sys.stderr)
sys.exit(2)
@atexit.register
def cleanup():
clone.delete()
if __name__ == "__main__":
main()
| ejwa/gitinspector | gitinspector/gitinspector.py | gitinspector.py | py | 6,178 | python | en | code | 2,282 | github-code | 13 |
32786379086 | import telebot
from telebot import types # кнопки
from string import Template
bot = telebot.TeleBot("1191699863:AAGisx_Riems732iRr-2eDbdiNbaA0sMZ54")
user_dict = {}
class User:
def __init__(self, city):
self.city = city
keys = ['fullname', 'phone', 'driverSeria',
'driverNumber', 'driverDate', 'car',
'carModel', 'carColor', 'carNumber', 'carDate']
for key in keys:
self.key = None
# если /help, /start
@bot.message_handler(commands=['help', 'start'])
def send_welcome(message):
markup = types.ReplyKeyboardMarkup(resize_keyboard=True, row_width=2)
itembtn1 = types.KeyboardButton('/about')
itembtn2 = types.KeyboardButton('/reg')
itembtn3 = types.KeyboardButton('/reg2')
markup.add(itembtn1, itembtn2, itembtn3)
bot.send_message(message.chat.id, "Здравствуйте "
+ message.from_user.first_name
+ ", я бот, чтобы вы хотели узнать?", reply_markup=markup)
# /about
@bot.message_handler(commands=['about'])
def send_about(message):
bot.send_message(message.chat.id, "Мы надежная компания"
+ " company. 10 лет на рынке.")
# /reg
@bot.message_handler(commands=["reg"])
def user_reg(message):
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
itembtn1 = types.KeyboardButton('Киев')
itembtn2 = types.KeyboardButton('Одесса')
itembtn3 = types.KeyboardButton('Харьков')
itembtn4 = types.KeyboardButton('Днепр')
itembtn5 = types.KeyboardButton('Запорожье')
itembtn6 = types.KeyboardButton('Львов')
markup.add(itembtn1, itembtn2, itembtn3, itembtn4, itembtn5, itembtn6)
msg = bot.send_message(message.chat.id, 'Ваш город?', reply_markup=markup)
bot.register_next_step_handler(msg, process_city_step)
def process_city_step(message):
try:
chat_id = message.chat.id
user_dict[chat_id] = User(message.text)
# удалить старую клавиатуру
markup = types.ReplyKeyboardRemove(selective=False)
msg = bot.send_message(chat_id, 'Фамилия Имя Отчество', reply_markup=markup)
bot.register_next_step_handler(msg, process_fullname_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_fullname_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.fullname = message.text
msg = bot.send_message(chat_id, 'Ваш номер телефона')
bot.register_next_step_handler(msg, process_phone_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_phone_step(message):
try:
int(message.text)
chat_id = message.chat.id
user = user_dict[chat_id]
user.phone = message.text
msg = bot.send_message(chat_id, 'Серия водительского удостоверения')
bot.register_next_step_handler(msg, process_driverSeria_step)
except Exception as e:
msg = bot.reply_to(message, 'Вы ввели что то другое. Пожалуйста введите номер телефона.')
bot.register_next_step_handler(msg, process_phone_step)
def process_driverSeria_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.driverSeria = message.text
msg = bot.send_message(chat_id, 'Номер водительского удостоверения')
bot.register_next_step_handler(msg, process_driverNumber_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_driverNumber_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.driverNumber = message.text
msg = bot.send_message(chat_id, 'Дата выдачи водительского удостоверения\nВ формате: День.Месяц.Год')
bot.register_next_step_handler(msg, process_driverDate_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_driverDate_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.driverDate = message.text
msg = bot.send_message(chat_id, 'Марка автомобиля')
bot.register_next_step_handler(msg, process_car_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_car_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.car = message.text
msg = bot.send_message(chat_id, 'Модель автомобиля')
bot.register_next_step_handler(msg, process_carModel_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_carModel_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.carModel = message.text
markup = types.ReplyKeyboardMarkup(one_time_keyboard=True, resize_keyboard=True)
itembtn1 = types.KeyboardButton('Бежевый')
itembtn2 = types.KeyboardButton('Белый')
itembtn3 = types.KeyboardButton('Голубой')
itembtn4 = types.KeyboardButton('Желтый')
itembtn5 = types.KeyboardButton('Зеленый')
itembtn6 = types.KeyboardButton('Коричневый')
itembtn7 = types.KeyboardButton('Красный')
itembtn8 = types.KeyboardButton('Оранжевый')
itembtn9 = types.KeyboardButton('Розовый')
itembtn10 = types.KeyboardButton('Серый')
itembtn11 = types.KeyboardButton('Синий')
itembtn12 = types.KeyboardButton('Фиолетовый')
itembtn13 = types.KeyboardButton('Черный')
markup.add(itembtn1, itembtn2, itembtn3, itembtn4, itembtn5, itembtn6, itembtn7, itembtn8, itembtn9, itembtn10, itembtn11, itembtn12, itembtn13)
msg = bot.send_message(chat_id, 'Цвет автомобиля', reply_markup=markup)
bot.register_next_step_handler(msg, process_carColor_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_carColor_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.carColor = message.text
msg = bot.send_message(chat_id, 'Гос. номер автомобиля')
bot.register_next_step_handler(msg, process_carNumber_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_carNumber_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.carNumber = message.text
msg = bot.send_message(chat_id, 'Год выпуска')
bot.register_next_step_handler(msg, process_carDate_step)
except Exception as e:
bot.reply_to(message, 'ooops!!')
def process_carDate_step(message):
try:
chat_id = message.chat.id
user = user_dict[chat_id]
user.carDate = message.text
# ваша заявка "Имя пользователя"
bot.send_message(chat_id, getRegData(user, 'Ваша заявка', message.from_user.first_name), parse_mode="Markdown")
# отправить в группу
bot.send_message(config.chat_id, getRegData(user, 'Заявка от бота', bot.get_me().username), parse_mode="Markdown")
except Exception as e:
bot.reply_to(message, 'ooops!!')
# формирует вид заявки регистрации
# нельзя делать перенос строки Template
# в send_message должно стоять parse_mode="Markdown"
def getRegData(user, title, name):
t = Template('$title *$name* \n Город: *$userCity* \n ФИО: *$fullname* \n Телефон: *$phone* \n Серия водительского удостоверения: *$driverSeria* \n Номер водительского удостоверения: *$driverNumber* \n Дата выдачи водительского удостоверения: *$driverDate* \n Марка автомобиля: *$car* \n Модель автомобиля: *$carModel* \n Цвет автомобиля: *$carColor* \n Гос. номер автомобиля: *$carNumber* \n Год выпуска: *$carDate*')
return t.substitute({
'title': title,
'name': name,
'userCity': user.city,
'fullname': user.fullname,
'phone': user.phone,
'driverSeria': user.driverSeria,
'driverNumber': user.driverNumber,
'driverDate': user.driverDate,
'car': user.car,
'carModel': user.carModel,
'carColor': user.carColor,
'carNumber': user.carNumber,
'carDate': user.carDate,
})
# произвольный текст
@bot.message_handler(content_types=["text"])
def send_help(message):
bot.send_message(message.chat.id, 'О нас - /about\nРегистрация - /reg\nПомощь - /help')
# произвольное фото
@bot.message_handler(content_types=["photo"])
def send_help_text(message):
bot.send_message(message.chat.id, 'Напишите текст')
# Enable saving next step handlers to file "./.handlers-saves/step.save".
# Delay=2 means that after any change in next step handlers (e.g. calling register_next_step_handler())
# saving will hapen after delay 2 seconds.
bot.enable_save_next_step_handlers(delay=2)
# Load next_step_handlers from save file (default "./.handlers-saves/step.save")
# WARNING It will work only if enable_save_next_step_handlers was called!
bot.load_next_step_handlers()
if __name__ == '__main__':
bot.polling(none_stop=True)
| MyrzatayEldar/FirstRepository | primer.py | primer.py | py | 9,892 | python | en | code | 1 | github-code | 13 |
20602298563 | import unittest
from ecdsa import SigningKey, curves
from ecdsa.util import sha256, sigdecode_der, sigencode_der
from eth_kms_signer.utils import to_v_r_s
class TestSignUtils(unittest.TestCase):
def test_signing(self):
priv_key = SigningKey.generate(curve=curves.SECP256k1)
pub_key = priv_key.verifying_key.to_string(encoding="compressed").hex()
message = b"This is a test message"
message_digest = sha256(b"This is a test message").digest()
sig_der = priv_key.sign(message, hashfunc=sha256, sigencode=sigencode_der)
expected_rs = sigdecode_der(sig_der, curves.SECP256k1.order)
(v, r, s) = to_v_r_s(message_digest, bytes.fromhex(pub_key), sig_der)
assert (r, s) == expected_rs
| viswanathkgp12/eth_kms_signer | tests/test_sign_utils.py | test_sign_utils.py | py | 751 | python | en | code | 5 | github-code | 13 |
73730647377 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""webhook for hexo blog"""
__author__ = 'Lavenkin'
import os
class FormatTxt(object):
def getFilePath(self, path):
self._filePath = path
return self
def fileOpen(self):
try:
with open(self._filePath, encoding = 'utf-8') as f:
self._content = []
for line in f.readlines():
self._content.append(line.strip())
return self
except Exception as e:
print(e)
def move(self, newfile):
if os.path.exists(newfile):
os.remove(newfile)
# print(newfile)
with open(newfile, 'w') as f:
f.write(','.join(self._content))
return self
if __name__ == '__main__':
formatTxt = FormatTxt()
formatTxt.getFilePath('/var/www/py/id.txt').fileOpen().move('/var/www/py/format.txt')
| larkin-keith/export-data-ttigame | FormatTxt.py | FormatTxt.py | py | 765 | python | en | code | 0 | github-code | 13 |
17943255175 | from login import *
import codecs
import configparser
import os
from genologics.lims import *
from genologics import config
from genologics import entities
@then('check all samples')
def get(context):
lims = login(context)
samples = lims.get_samples()
assert len(samples) > 0
# sample = samples[0]
submitter, artifact = [], []
for sample in samples[:2]:
submitter.append(sample.submitter)
artifact.append(sample.artifact)
return submitter, artifact
@then('get the sample by id with {LIMSid}')
def get(context, LIMSid):
lims = login(context)
for row in context.table:
project = Project(lims, id = row['LIMSid'])
samples = lims.get_samples(projectlimsid=project.id)
assert len(samples) > 0
sample = samples[0]
assert sample is not None
return len(samples), project,sample.id, sample.name, sample.date_received, sample.uri
# for key, value in sample.udf.items():
# print (' ', key, '=', value)
# for note in sample.notes:
# print (sample.submitter.first_name, sample.artifact.uri,sample.project.id,sample.project.uri)
# for file in sample.files:
# print ('File', file.content_location)
@then("get the sample by name with {name}")
def get(context, name):
lims = login(context)
for row in context.table:
samples = lims.get_samples(name=row['name'])
assert len(samples) > 0
return samples
| viaboxxsystems/genologics-behave | features/steps/samples.py | samples.py | py | 1,336 | python | en | code | 0 | github-code | 13 |
35856368582 | import cv2
import numpy as np
import torchvision.transforms as transforms
class SimCLRTrainDataTransform(object):
"""
Transforms for SimCLR
Transform::
RandomResizedCrop(size=self.input_height)
RandomHorizontalFlip()
RandomApply([color_jitter], p=0.8)
RandomGrayscale(p=0.2)
GaussianBlur(kernel_size=int(0.1 * self.input_height))
transforms.ToTensor()
Example::
from pl_bolts.models.self_supervised.simclr.transforms import SimCLRTrainDataTransform
transform = SimCLRTrainDataTransform(input_height=32)
x = sample()
(xi, xj) = transform(x)
"""
def __init__(self, input_height, s=1):
self.s = s
self.input_height = input_height
color_jitter = transforms.ColorJitter(0.8 * self.s, 0.8 * self.s, 0.8 * self.s, 0.2 * self.s)
data_transforms = transforms.Compose([transforms.RandomResizedCrop(size=self.input_height),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([color_jitter], p=0.5),
transforms.RandomGrayscale(p=0.2),
GaussianBlur(kernel_size=int(0.1 * self.input_height)),
transforms.ToTensor()])
self.train_transform = data_transforms
def __call__(self, sample):
transform = self.train_transform
xi = transform(sample)
xj = transform(sample)
return xi, xj
class SimCLREvalDataTransform(object):
"""
Transforms for SimCLR
Transform::
Resize(input_height + 10, interpolation=3)
transforms.CenterCrop(input_height),
transforms.ToTensor()
Example::
from pl_bolts.models.self_supervised.simclr.transforms import SimCLREvalDataTransform
transform = SimCLREvalDataTransform(input_height=32)
x = sample()
(xi, xj) = transform(x)
"""
def __init__(self, input_height, s=1):
self.s = s
self.input_height = input_height
self.test_transform = transforms.Compose([
transforms.Resize(input_height + 10, interpolation=3),
transforms.CenterCrop(input_height),
transforms.ToTensor(),
])
def __call__(self, sample):
transform = self.test_transform
xi = transform(sample)
xj = transform(sample)
return xi, xj
class GaussianBlur(object):
# Implements Gaussian blur as described in the SimCLR paper
def __init__(self, kernel_size, min=0.1, max=2.0):
self.min = min
self.max = max
# kernel size is set to be 10% of the image height/width
self.kernel_size = kernel_size
def __call__(self, sample):
sample = np.array(sample)
# blur the image with a 50% chance
prob = np.random.random_sample()
if prob < 0.5:
sigma = (self.max - self.min) * np.random.random_sample() + self.min
sample = cv2.GaussianBlur(sample, (self.kernel_size, self.kernel_size), sigma)
return sample
| lebrice/pytorch-lightning-bolts | pl_bolts/models/self_supervised/simclr/simclr_transforms.py | simclr_transforms.py | py | 3,183 | python | en | code | null | github-code | 13 |
22624834142 | import os
import threading
from datetime import datetime
from otree.api import *
from otree.database import db
import common.SessionConfigFunctions as scf
doc = """
Landing app used to queue up users.
"""
lock = threading.Lock()
COUNT = [0]
def inc_and_get():
cnt = 0
with lock:
COUNT[0] += 1
cnt = COUNT[0]
return cnt
LIMIT_ENV = os.getenv('LANDING_LIMIT')
URL = os.getenv('EXPERIMENT_URL')
class C(BaseConstants):
NAME_IN_URL = 'landing'
PLAYERS_PER_GROUP = None
NUM_ROUNDS = 1
if LIMIT_ENV:
LIMIT = int(LIMIT_ENV)
else:
LIMIT = 0
def creating_session(subsession):
COUNT[0] = 0
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
count = models.IntegerField(blank=True)
clicked = models.BooleanField(initial=False)
# Live methods
def button_page_live(player, d):
func = d['func']
if func == 'click':
cnt = inc_and_get()
player.count = cnt
player.clicked = True
db.commit()
bar_info = get_bar_info(cnt)
filled = cnt >= C.LIMIT
return {0: dict(func='bar', bar_info=bar_info, filled=filled)}
elif func == 'is_in':
return {player.id_in_group: dict(func='is_in', is_in=player.count <= C.LIMIT)}
def get_bar_info(cnt):
# percentage should never exceed 100%
# cnt should never exceed n
cnt_capped = min(cnt, C.LIMIT)
if cnt == 0:
pct_as_decimal = .1
else:
pct_as_decimal = max(cnt_capped / C.LIMIT, .1)
pct = f"{pct_as_decimal: .0%}"
bar_info = dict(pct=pct, count=cnt_capped, N=C.LIMIT)
return bar_info
def get_url(player):
base_url = URL
if not base_url:
base_url = scf.get_default_url(player)
label = player.participant.label
if not label:
label = player.participant.code
url = base_url.format(label)
return url
# PAGES
class LandingPage(Page):
timer_text = ''
@staticmethod
def get_timeout_seconds(player: Player):
start_time = scf.get_start_time(player)
now = datetime.now()
# dattime.timestamp() returns the number of seconds since the epoch
diff = (start_time.timestamp() - now.timestamp())
return diff
@staticmethod
def vars_for_template(player: Player):
start_time = scf.get_start_time(player)
start_time_txt = datetime.strftime(start_time, '%I:%M %p')
show_next = scf.show_next_button(player)
return {'start_time': start_time_txt,
'show_next': show_next}
class ButtonPage(Page):
@staticmethod
def vars_for_template(player: Player):
cnt = COUNT[0]
ret = get_bar_info(cnt)
return ret
@staticmethod
def js_vars(player: Player):
return {'clicked': player.clicked}
live_method = button_page_live
class ExperimentRedirect(Page):
@staticmethod
def js_vars(player: Player):
return {'url': get_url(player)}
def is_displayed(player: Player):
return player.field_maybe_none('count') is not None and player.count <= C.LIMIT
class ExperimentFull(Page):
pass
page_sequence = [LandingPage, ButtonPage, ExperimentRedirect, ExperimentFull]
| rossspoon/market-prefs | landing/__init__.py | __init__.py | py | 3,257 | python | en | code | 0 | github-code | 13 |
17113610754 | """
author_model.py
===============
"""
from typing import Dict
from sqlalchemy import (ARRAY, Boolean, Column, ForeignKey, Integer,
String)
from sqlalchemy.orm import relationship
from agr_literature_service.api.database.base import Base
from agr_literature_service.api.database.versioning import enable_versioning
from agr_literature_service.api.models.audited_model import AuditedModel
enable_versioning()
class AuthorModel(Base, AuditedModel):
__tablename__ = "author"
__versioned__: Dict = {}
author_id = Column(
Integer,
primary_key=True,
autoincrement=True
)
reference_id = Column(
Integer,
ForeignKey("reference.reference_id", ondelete="CASCADE"),
index=True
)
reference = relationship(
"ReferenceModel",
back_populates="author"
)
orcid = Column(
String(),
index=True,
nullable=True
)
first_author = Column(
Boolean,
nullable=True,
unique=False
)
order = Column(
Integer,
nullable=True
)
corresponding_author = Column(
Boolean(),
nullable=True
)
name = Column(
String(),
unique=False,
nullable=True
)
affiliations = Column(
ARRAY(String),
unique=False,
nullable=True
)
first_name = Column(
String(),
unique=False,
nullable=True
)
last_name = Column(
String(),
unique=False,
nullable=True
)
first_initial = Column(
String(),
unique=False,
nullable=True
)
def __str__(self):
"""
Overwrite the default output.
"""
return "{} 1st({})".format(self.name, self.first_author)
| alliance-genome/agr_literature_service | agr_literature_service/api/models/author_model.py | author_model.py | py | 1,823 | python | en | code | 1 | github-code | 13 |
4078943871 | #!/usr/bin/python3
"""
All of the routes for place resource
"""
from flask import jsonify, abort, request, Blueprint
from models import storage
from models.place import Place
from models.review import Review
places = Blueprint("places", __name__)
@places.route("/<string:place_id>", methods=['GET'])
def get_place_with_id(place_id):
"""Get place with a particular ID"""
places = storage.all("Place").values()
for place in places:
if place.id == place_id:
matching_place = place.to_dict()
return jsonify(matching_place)
abort(404)
@places.route("/<string:place_id>", methods=['DELETE'])
def delete_place_with_id(place_id):
"""Deletes a place with a particular ID"""
places = storage.all("Place").values()
for place in places:
if place.id == place_id:
storage.delete(place)
storage.save()
storage.close()
return jsonify({})
abort(404)
@places.route("/<string:place_id>", methods=['PUT'])
def update_place_with_id(place_id):
"""Updates a place with a particular ID"""
if not request.is_json:
abort(400, "Not a JSON")
dict_updates = request.get_json()
matching_place = storage.get("Place", place_id)
forbidden_keys = ["id", "created_at", "updated_at", "user_id", "city_id"]
if matching_place:
for key, val in dict_updates.items():
if key not in forbidden_keys:
setattr(matching_place, key, val)
storage.save()
storage.close()
return jsonify(matching_place.to_dict())
abort(404)
@places.route("/<string:place_id>/reviews", methods=['GET'])
def all_review(place_id):
"""Route to get all of the review"""
reviews = storage.all("Review").values()
all_review = []
matching_place = storage.get("Place", place_id)
if matching_place:
for review in reviews:
if review.place_id == place_id:
dict_form = review.to_dict()
all_review.append(dict_form)
return jsonify(all_review)
abort(404)
@places.route("/<string:place_id>/reviews", methods=['POST'])
def create_review(place_id):
"""Create a new review"""
if not storage.get("Place", place_id):
abort(404)
if not request.is_json:
abort(400, "Not a JSON")
new_review = request.get_json()
if new_review.get("user_id") is None:
abort(400, "Missing user_id")
user_id = new_review.get("user_id")
if not storage.get("User", user_id):
abort(404)
if new_review.get("text") is None:
abort(400, "Missing text")
matching_place = storage.get("Place", place_id)
matching_user = storage.get("User", user_id)
if matching_place and matching_user:
new_review["place_id"] = place_id
review_obj = Review(**new_review)
storage.new(review_obj)
storage.save()
storage.close()
return jsonify(review_obj.to_dict()), 201
| srinitude/AirBnB_clone_v3 | api/v1/views/places.py | places.py | py | 2,973 | python | en | code | 0 | github-code | 13 |
25541784167 | # Guessing Game Two www.practicepython.org/exercise/2015/11/01/25-guessing-game-two.html
# In a previous exercise, we’ve written a program that “knows” a number and asks a user to guess it.
# This time, we’re going to do exactly the opposite.
# You, the user, will have in your head a number between 0 and 100.
# The program will guess a number, and you, the user, will say whether it is too high, too low, or your number.
# At the end of this exchange, your program should print out how many guesses it took to get your number.
# As the writer of this program, you will have to choose how your program will strategically guess.
# A naive strategy can be to simply start the guessing at 1, and keep going (2, 3, 4, etc.) until you hit the number.
# But that’s not an optimal guessing strategy. An alternate strategy might be to guess 50
# (right in the middle of the range), and then increase / decrease by 1 as needed.
# After you’ve written the program, try to find the optimal strategy!
# (We’ll talk about what is the optimal one next week with the solution.)
"""FYI: This code is probably wrongly coded, doesn't use binary search and is not that interactive."""
from random import randint
from time import sleep
def random_number() -> int:
return randint(0, 50)
def guesses(r):
g = []
while True:
g.append(r)
print(f'Is your number {r}? (type too low, too high or exact for answer)')
usr_answer = input().lower()
if usr_answer == 'too low':
r += randint(1, 4)
continue
elif usr_answer == 'too high':
r -= randint(1, 4)
continue
elif usr_answer == 'exact':
print('Nice! I got it. \n' \
f'It only took me {len(g)} guesses!')
break
if __name__ == '__main__':
print('Guess a number in your head between 0 and 50!')
sleep(2)
num = random_number()
guesses(num)
| lupp1/practicepy_solutions | guessing_game_two.py | guessing_game_two.py | py | 1,974 | python | en | code | 0 | github-code | 13 |
25194141911 | from requests_html import HTMLSession
session = HTMLSession()
url = 'http://gutenberg.org/files/11/11-0.txt'
r = session.get(url)
contents = r.html.text
with open('E:/GitPro/alice.txt','w',encoding = 'utf-8') as f:
f.write(contents)
| moxuanranm/newlearn | spider.py | spider.py | py | 262 | python | en | code | 0 | github-code | 13 |
14970982527 | # 卷积神经网络
import time
import torch
import torch.utils.data as Data
import torchvision
import sys
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def loadData(batch_size):
# 可下载 训练集,转化为Tensor格式
mnist_train = torchvision.datasets.FashionMNIST(root='D:/PycharmProjects/pytorch_data/Datasets/FashionMNIST',
train=True, download=True, transform=torchvision.transforms.ToTensor())
mnist_test = torchvision.datasets.FashionMNIST(root='D:/PycharmProjects/pytorch_data/Datasets/FashionMNIST',
train=False, download=True, transform=torchvision.transforms.ToTensor())
# 批量导入
if sys.platform.startswith("win"):
works_num = 0
else:
works_num = 4
train_batch = Data.DataLoader(mnist_train, batch_size=batch_size, shuffle=True, num_workers=works_num)
test_batch = Data.DataLoader(mnist_test, batch_size=batch_size, shuffle=False, num_workers=works_num)
return train_batch, test_batch
'''
卷积层保留输入形状,使图像的像素在高和宽两个方向上的相关性均可能被有效识别
卷积层通过滑动窗口将同一卷积核与不同位置的输入重复计算,从而避免参数尺寸过大
'''
class LeNet(torch.nn.Module):
def __init__(self):
super(LeNet, self).__init__()
'''
在卷积层块中,每个卷积层都使用5×5的窗口,并在输出上使用sigmoid激活函数。
第一个卷积层输出通道数为6,第二个卷积层输出通道数则增加到16。
这是因为第二个卷积层比第一个卷积层的输入的高和宽要小,
所以增加输出通道使两个卷积层的参数尺寸类似。
卷积层块的两个最大池化层的窗口形状均为2×2,且步幅为2。
由于池化窗口与步幅形状相同,池化窗口在输入上每次滑动所覆盖的区域互不重叠
'''
# 卷积层
self.conv = torch.nn.Sequential(
# 卷积层,输入通道数, 输出通道数, 核大小
torch.nn.Conv2d(1, 6, 5),
# 卷积后 为 (1, 6, 24, 24), 24 = 28 - 5 + 1 即输入层大小-核矩阵大小+1
# 激活函数
torch.nn.Sigmoid(),
# 池化层,池化核方阵大小,步长
torch.nn.MaxPool2d(2, 2),
# 池化后 为(1, 6, 12, 12),因为池化矩阵大小为2 且步长为2 所以就是除以2
# 增加输出通道使两个卷积层的参数尺寸类似
torch.nn.Conv2d(6, 16, 5),
# 卷积后 为(1, 16, 8, 8),因为8 = 12 - 5 + 1
torch.nn.Sigmoid(),
torch.nn.MaxPool2d(2, 2)
# 池化后 为(1,16,4,4)
)
# 全连接层
self.fc = torch.nn.Sequential(
# 输入样本大小,输出样本大小
# 通道*高*宽
torch.nn.Linear(16*4*4, 120),
torch.nn.Sigmoid(),
torch.nn.Linear(120, 84),
torch.nn.Sigmoid(),
# 10个类别
torch.nn.Linear(84, 10)
)
# 前向传播
def forward(self, image):
# 批次*通道*高*宽 = 256, 1, 28, 28
# print("图像大小为" + str(image.shape))
feature = self.conv(image)
output = self.fc(feature.view(image.shape[0], -1))
return output
def evaluate_accuracy(data_batch, model, device = None):
if device is None and isinstance(model, torch.nn.Module):
device = list(model.parameters())[0].device
acc_sum, n = 0, 0
with torch.no_grad():
for X, y in data_batch:
if isinstance(model, torch.nn.Module):
# 评估模式,关闭dropout
model.eval()
acc_sum += (model(X.to(device)).argmax(dim=1) == y.to(device)).float().sum().cpu().item()
# 改回训练模式
model.train()
else:
if ('is_training' in model.__code__.co_varnames):
# 如果有is_training这个参数
# 将is_training设置成False
acc_sum += (model(X, is_training=False).argmax(dim=1) == y).float().sum().item()
else:
acc_sum += (model(X).argmax(dim=1) == y).float().sum().item()
n += y.shape[0]
return acc_sum / n
def train(model, train_batch, test_batch, batch_size, optimizer, device, num_epochs):
model = model.to(device)
print("运行在:" , device)
# 损失函数,交叉熵函数
loss = torch.nn.CrossEntropyLoss()
for epoch in range(num_epochs):
train_loss_sum, train_acc_sum, n, batch_count = 0.0, 0.0, 0, 0
start = time.time()
for X, y in train_batch:
X = X.to(device)
y = y.to(device)
# 前向计算
y_pre = model(X)
l = loss(y_pre, y)
# 梯度清零
optimizer.zero_grad()
l.backward()
optimizer.step()
train_loss_sum += l.cpu().item()
train_acc_sum += (y_pre.argmax(dim=1) == y).sum().cpu().item()
n += y.shape[0]
batch_count += 1
test_acc = evaluate_accuracy(test_batch, model)
print("第%d轮的损失为%.4f,训练acc为%.3f,测试acc %.3f,耗时%.1f sec" %
(epoch + 1, train_loss_sum / batch_count, train_acc_sum / n, test_acc, time.time() - start))
def predict(model, test_batch, device = None):
if device is None and isinstance(model, torch.nn.Module):
device = list(model.parameters())[0].device
predX, predy = iter(test_batch).next()
acc_sum, n = 0, 0
with torch.no_grad():
if isinstance(model, torch.nn.Module):
acc_sum += (model(predX.to(device)).argmax(dim=1) == predy.to(device)).float().sum().cpu().item()
else:
if ('is_training' in model.__code__.co_varnames):
# 如果有is_training这个参数
# 将is_training设置成False
acc_sum += (model(predX, is_training=False).argmax(dim=1) == predy).float().sum().item()
else:
acc_sum += (model(predX).argmax(dim=1) == predy).float().sum().item()
print("预测值:", model(predX).argmax(dim=1))
return acc_sum
if __name__ == "__main__":
num_epochs = 1
batch_size = 256
# 加载数据
train_batch, test_batch = loadData(batch_size)
model = LeNet()
print(model)
# 学习率
lr = 0.001
# 优化器选择Adam
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
train(model, train_batch, test_batch, batch_size, optimizer, device, num_epochs)
pre_acc = predict(model, test_batch)
print("预测精准度为", pre_acc)
| Money8888/pytorch_learn | CNN/LeNet.py | LeNet.py | py | 6,845 | python | en | code | 1 | github-code | 13 |
14517529413 | import random
on=-1
while on!=1:
x=int (input('entrez votre chiffre pour misez '))
wow=[]
for i in range (0,10):
wow.append (random.randint (1,10))
x2=wow.pop(0)
print (x2)
if x!=x2:
print ('vous avez perdu')
else:
print ('vous avez gagné')
on=1
input()
| Tadeu-Luc/Python | Pari inutile 0 à 10.py | Pari inutile 0 à 10.py | py | 356 | python | fr | code | 0 | github-code | 13 |
3612909545 | from collections import Counter
import requests
url = "https://sites.google.com/site/dr2fundamentospython/arquivos/Video_Games_Sales_as_at_22_Dec_2016.csv"
csv = requests.get(url).text
linhas = csv.splitlines()
lista_marcas = []
lista_vendas = []
tipo_jogos = []
for i in range(1, len(linhas) - 1):
if 'Action' in linhas[i]:
tipo_jogos.append(linhas[i])
elif 'Shooter' in linhas[i]:
tipo_jogos.append(linhas[i])
elif 'Platform' in linhas[i]:
tipo_jogos.append(linhas[i])
for j in range(1, len(tipo_jogos) - 1):
coluna = tipo_jogos[j].split(',')
lista_marcas.append(coluna[4])
lista_vendas.append(coluna[9])
for i in range(len(lista_vendas)):
lista_vendas[i] = float(lista_vendas[i])
dic = dict(zip(lista_marcas, lista_vendas))
for g in tipo_jogos:
coluna = g.split(',')
palavra = coluna[4]
valor = float(coluna[9])
if palavra in dic:
valor_antigo = dic[palavra]
valor_novo = valor_antigo + valor
dic[palavra] = valor_novo
contador = Counter(dic).most_common(3)
for x in range(len(contador)):
print(x + 1,' - ',contador[x][0],':', '%.2f em vendas' % contador[x][1])
| thamyresr/fundamentos-python | Exercicio 11 B.py | Exercicio 11 B.py | py | 1,170 | python | pt | code | 0 | github-code | 13 |
44624108854 | #!/usr/bin/env python
import os
import jinja2
import yaml
from optparse import OptionParser
def render(tpl_path, context):
path, filename = os.path.split(tpl_path)
return jinja2.Environment(
loader=jinja2.FileSystemLoader(path or './')
).get_template(filename).render(context)
usage = "usage: %prog [options] TEMPLATE"
parser = OptionParser(usage=usage)
parser.add_option("-f", "--file",
dest="file",
help="Load variables from yaml file",
metavar="FILE")
parser.add_option("-s", "--set",
action="append",
dest="environment",
help="Set variables for template",
metavar="\"VAR1=VALUE1\"")
(options, args) = parser.parse_args()
if len(args) != 1:
parser.error("Filename is missing")
tpl_file = args[0]
environment = {}
if options.file:
with open(options.file, 'r') as stream:
try:
environment = yaml.load(stream, Loader=yaml.FullLoader)
except yaml.YAMLError as exc:
print(exc)
if options.environment:
for e in options.environment:
fields = e.split('=')
environment[fields[0]] = "=".join(fields[1:])
result = render(tpl_file, environment)
print(result)
| snovikov/j2parser | j2parse.py | j2parse.py | py | 1,273 | python | en | code | 0 | github-code | 13 |
72690437458 | import numpy as np
from typing import Optional, Callable, List
from torchvision.datasets import CIFAR10
class CIFAR10Subset(CIFAR10):
def __init__(self,
root: str,
all_classes: List[int],
classes_to_learn: List[int] = None,
dreamed_data=None,
train: bool = True,
transform: Optional[Callable] = None,
download: bool = False):
super().__init__(root=root, train=train, transform=transform, download=download)
self.classes = [self.classes[i] for i in all_classes]
self.class_to_idx = {cls: i for i, cls in enumerate(self.classes)}
if classes_to_learn:
data_idx = [i for i, cls_idx in enumerate(self.targets) if cls_idx in classes_to_learn]
self.targets = [self.targets[i] for i in data_idx] + dreamed_data.tensors[1].tolist()
dreamed_imgs = dreamed_data.tensors[0].cpu().numpy().transpose([0, 2, 3, 1]).astype('uint8')
self.data = np.concatenate((self.data[data_idx], dreamed_imgs))
else:
data_idx = [i for i, cls_idx in enumerate(self.targets) if cls_idx in all_classes]
self.targets = [self.targets[i] for i in data_idx]
self.data = self.data[data_idx]
| Rolkarolka/Dreaming-CL | models/CIFAR10Subset.py | CIFAR10Subset.py | py | 1,304 | python | en | code | 1 | github-code | 13 |
11505251086 | """Unit tests for Caravel"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime
import unittest
from mock import Mock, patch
from caravel import db
from caravel.models import DruidCluster
from .base_tests import CaravelTestCase
SEGMENT_METADATA = [{
"id": "some_id",
"intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
"columns": {
"__time": {
"type": "LONG", "hasMultipleValues": False,
"size": 407240380, "cardinality": None, "errorMessage": None},
"dim1": {
"type": "STRING", "hasMultipleValues": False,
"size": 100000, "cardinality": 1944, "errorMessage": None},
"dim2": {
"type": "STRING", "hasMultipleValues": True,
"size": 100000, "cardinality": 1504, "errorMessage": None},
"metric1": {
"type": "FLOAT", "hasMultipleValues": False,
"size": 100000, "cardinality": None, "errorMessage": None}
},
"aggregators": {
"metric1": {
"type": "longSum",
"name": "metric1",
"fieldName": "metric1"}
},
"size": 300000,
"numRows": 5000000
}]
GB_RESULT_SET = [
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"name": 'Canada',
"sum__num": 12345678,
}
},
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"name": 'USA',
"sum__num": 12345678 / 2,
}
},
]
class DruidTests(CaravelTestCase):
"""Testing interactions with Druid"""
def __init__(self, *args, **kwargs):
super(DruidTests, self).__init__(*args, **kwargs)
@patch('caravel.models.PyDruid')
def test_client(self, PyDruid):
self.login(username='admin')
instance = PyDruid.return_value
instance.time_boundary.return_value = [
{'result': {'maxTime': '2016-01-01'}}]
instance.segment_metadata.return_value = SEGMENT_METADATA
cluster = (
db.session
.query(DruidCluster)
.filter_by(cluster_name='test_cluster')
.first()
)
if cluster:
db.session.delete(cluster)
db.session.commit()
cluster = DruidCluster(
cluster_name='test_cluster',
coordinator_host='localhost',
coordinator_port=7979,
broker_host='localhost',
broker_port=7980,
metadata_last_refreshed=datetime.now())
db.session.add(cluster)
cluster.get_datasources = Mock(return_value=['test_datasource'])
cluster.get_druid_version = Mock(return_value='0.9.1')
cluster.refresh_datasources()
datasource_id = cluster.datasources[0].id
db.session.commit()
resp = self.client.get('/caravel/explore/druid/{}/'.format(
datasource_id))
assert "[test_cluster].[test_datasource]" in resp.data.decode('utf-8')
nres = [
list(v['event'].items()) + [('timestamp', v['timestamp'])]
for v in GB_RESULT_SET]
nres = [dict(v) for v in nres]
import pandas as pd
df = pd.DataFrame(nres)
instance.export_pandas.return_value = df
instance.query_dict = {}
instance.query_builder.last_query.query_dict = {}
resp = self.client.get(
'/caravel/explore/druid/{}/?viz_type=table&granularity=one+day&'
'druid_time_origin=&since=7+days+ago&until=now&row_limit=5000&'
'include_search=false&metrics=count&groupby=name&flt_col_0=dim1&'
'flt_op_0=in&flt_eq_0=&slice_id=&slice_name=&collapsed_fieldsets=&'
'action=&datasource_name=test_datasource&datasource_id={}&'
'datasource_type=druid&previous_viz_type=table&json=true&'
'force=true'.format(datasource_id, datasource_id))
assert "Canada" in resp.data.decode('utf-8')
if __name__ == '__main__':
unittest.main()
| francisliyy/caravel-aidp | tests/druid_tests.py | druid_tests.py | py | 4,015 | python | en | code | 0 | github-code | 13 |
70441165457 | """
Problem Statement:-
Write a simple python program and declare a tuple initially having all vowels of the English alphabet in it and unpack its contents and store them in some variables as v1,v2,v3,v4,v5 and then assign the elements as v5,v4,v3,v2,v1 sequence.
# The required output is as:
The initial tuple is: ('a', 'e', 'i', 'o', 'u')
The updated tuple is: ('u', 'o', 'i', 'e', 'a')
"""
t = ('a','e','i','o','u')
print("The initial tuple is:", t )
v1, v2, v3, v4, v5 = t
t = v5, v4, v3, v2, v1
print("The updated tuple is:", t) | Jayprakash-SE/Engineering | Semester4/PythonProgramming/Prutor/Week3/Q4.py | Q4.py | py | 536 | python | en | code | 0 | github-code | 13 |
21891682904 | import unittest
from mock import Mock
from datetime import datetime
from mock import Mock, patch
from grok_test_case import GrokTestCase
from grokpy.connection import Connection
from grokpy.exceptions import GrokError
from grokpy.model import Model
from grokpy.stream import Stream
from grokpy.client import Client
class ModelTestCase(GrokTestCase):
def setUp(self):
# Create a mock client
self.client = Mock(spec=Client)
self.client.c = Mock(spec=Connection)
# Create our minimal streamDef
self.modelDef = {
'dataUrl':'http://example.com',
'url': 'http://example.com'
}
# Instantiate the stream
self.m = Model(self.client, self.modelDef)
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelSetAnomalyAutoDetectThreshold(self, runCommandMock):
self.m.setAnomalyAutoDetectThreshold('mockThreshold')
runCommandMock.assert_called_once_with('setAutoDetectThreshold',
autoDetectThreshold='mockThreshold')
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.setAnomalyAutoDetectThreshold,
badParam='test')
self.assertRaises(Exception, self.m.setAnomalyAutoDetectThreshold)
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelGetAnomalyAutoDetectThreshold(self, runCommandMock):
self.m.getAnomalyAutoDetectThreshold()
runCommandMock.assert_called_once_with('getAutoDetectThreshold')
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.getAnomalyAutoDetectThreshold,
badParam='test')
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelSetAnomalyAutoDetectWaitRecords(self, runCommandMock):
self.m.setAnomalyAutoDetectWaitRecords('mockWaitRecords')
runCommandMock.assert_called_once_with('setAutoDetectWaitRecords',
autoDetectWaitRecords='mockWaitRecords')
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.setAnomalyAutoDetectWaitRecords,
badParam='test')
self.assertRaises(Exception, self.m.setAnomalyAutoDetectWaitRecords)
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelGetAnomalyAutoDetectWaitRecords(self, runCommandMock):
self.m.getAnomalyAutoDetectWaitRecords()
runCommandMock.assert_called_once_with('getAutoDetectWaitRecords')
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.getAnomalyAutoDetectWaitRecords,
badParam='test')
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelGetLabels(self, runCommandMock):
self.m.getLabels()
runCommandMock.assert_called_once_with('getLabels',
startRecordID=None,
endRecordID=None)
runCommandMock.reset_mock()
self.m.getLabels(startRecordID=10)
runCommandMock.assert_called_once_with('getLabels',
startRecordID=10,
endRecordID=None)
runCommandMock.reset_mock()
self.m.getLabels(endRecordID=10)
runCommandMock.assert_called_once_with('getLabels',
startRecordID=None, endRecordID=10)
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.addLabel, badParam='test')
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelAddLabels(self, runCommandMock):
self.m.addLabel(startRecordID=10, endRecordID=15, labelName='test')
runCommandMock.assert_called_once_with(
'addLabel',
startRecordID= 10,
endRecordID= 15,
labelName= 'test'
)
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.addLabel, startRecordID=10)
self.assertRaises(Exception, self.m.addLabel, badParam='test')
@patch.object(Model, '_runCommand', spec=Model._runCommand)
def testModelRemoveLabels(self, runCommandMock):
self.m.removeLabels(startRecordID=10)
runCommandMock.assert_called_once_with(
'removeLabels',
startRecordID= 10,
endRecordID=None,
labelFilter=None
)
runCommandMock.reset_mock()
self.m.removeLabels(startRecordID=10, endRecordID=15)
runCommandMock.assert_called_once_with(
'removeLabels',
startRecordID= 10,
endRecordID= 15,
labelFilter= None
)
runCommandMock.reset_mock()
self.m.removeLabels(startRecordID=10, endRecordID=15, labelFilter='test')
runCommandMock.assert_called_once_with(
'removeLabels',
startRecordID= 10,
endRecordID= 15,
labelFilter= 'test'
)
runCommandMock.reset_mock()
self.assertRaises(Exception, self.m.removeLabels, badParam='test')
if __name__ == '__main__':
debug = 0
if debug:
single = unittest.TestSuite()
single.addTest(ModelTestCase('testInstantiation'))
unittest.TextTestRunner().run(single)
else:
unittest.main() | Komeil1978/grok-py | tests/unit/test_model.py | test_model.py | py | 4,769 | python | en | code | 0 | github-code | 13 |
5169521091 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
import time
import requests
driver = webdriver.Firefox()
# driver.get("https://dxarid.uzex.uz/")
# https://dxarid.uzex.uz/ru/trade/lot/5356270/
url = "https://dxarid.uzex.uz/ru/ajax/filter?LotID=&PriceMin=&PriceMax=&RegionID=&DistrictID=&INN=&CategoryID=&EndDate=03.02.2022&PageSize=1000&Src=AllMarkets&PageIndex=1&Type=trade&Tnved=&StartDate=03.12.2021"
driver.get(url)
time.sleep(3)
tables = driver.find_elements_by_xpath("//table[@class='table_main thead_fixer table_printable']")
#print(tables)
for table in tables:
print("table-->",table)
for row in table.find_elements_by_css_selector('tr'):
cells = row.find_elements_by_tag_name('td')
len1 = len(cells)
if len1 == 9:
id = cells[0].text
lot = cells[1].text
enddate = cells[2].text
reg = cells[3].text
ray = cells[4].text
nom = cells[5].text
cost = cells[6].text
info = cells[7].text
print(lot,"|",nom,"|",cost)
# https://dxarid.uzex.uz/ru/trade/lot/5356270/
#url1 = 'https://dxarid.uzex.uz/ru/trade/lot/'+lot
#print(url1)
#r = requests.get(url1)
#print(dir(r))
#print(r.status_code)
#print(r.content)
#print(r.headers)
#driver.close()
#exit(0)
driver.close() | Miracle-byte/zero_corruption | crawler/dxarid.py | dxarid.py | py | 1,501 | python | en | code | 1 | github-code | 13 |
5281142633 | """This module processes the arguments given by the .ini file"""
from decimal import Decimal
from distutils.util import strtobool
from pathlib import Path
import ast
import configparser
import os
import shutil
class ArgProcessor():
"""Class that handles the .ini arguments"""
def __init__(self, ini_path):
"""
Reads the arguments from the input .ini file and checks their validity
Args:
ini_path: The path to the input .ini file
"""
# Read the .ini file
config = configparser.ConfigParser()
config.read(ini_path)
# Read the argument
self.input_train_path = Path(config['INPUT']['InputTrainPath'])
self.input_val_path = Path(config['INPUT']['InputValPath'])
self.input_test_path = Path(config['INPUT']['InputTestPath'])
self.input_model_path = Path(config['INPUT']['InputModelPath'])
self.output_base_path = Path(config['OUTPUT']['OutputBasePath'])
self.model_name = config['MODEL']['ModelName'].lower()
self.head_size = int(config['MODEL']['HeadSize'])
self.embed_size = int(config['MODEL']['EmbedSize'])
self.num_heads = int(config['MODEL']['NumHeads'])
self.ff_dim = int(config['MODEL']['FFDim'])
self.num_trf_blocks = int(config['MODEL']['NuMTrfBlocks'])
self.mlp_units = ast.literal_eval(config['MODEL']['MLPUnits'])
self.dropout = float(config['MODEL']['Dropout'])
self.mlp_dropout = float(config['MODEL']['MLPDropout'])
self.gpu = config['RESOURCE']['GPU']
self.do_training = strtobool(config['TRAINING']['DoTraining'])
self.batch_size = int(config['TRAINING']['BatchSize'])
self.num_epochs = int(config['TRAINING']['NumEpochs'])
self.do_testing = strtobool(config['TESTING']['DoTesting'].lower())
self.output_base_path.mkdir(parents=True, exist_ok=True)
shutil.copy(ini_path, self.output_base_path)
| david-tedjopurnomo/TrafFormer | trafformer/arg_processor.py | arg_processor.py | py | 2,197 | python | en | code | 0 | github-code | 13 |
4368753709 | #!/usr/bin/env python
import argparse, json, os, requests
# Utilities
def write_json(filename, data):
with open(filename, "w") as f:
json.dump(data, f, indent=2, separators=(",", ": "))
f.write("\n")
# General processing
def process(issues):
summary = []
for issue in issues:
if is_ignorable_issue(issue):
continue
summary_item = {"id": issue["html_url"]}
summary_item.update(process_labels(issue["labels"]))
summary_item.update(process_body(issue))
summary.append(summary_item)
write_json("summary.json", summary)
def is_ignorable_issue(issue):
if "pull_request" in issue:
return True
for label in issue["labels"]:
if label["name"] in ("duplicate", "invalid", "meta", "proposal withdrawn"):
return True
return False
def process_labels(labels):
position = None
venues = []
concerns = []
topics = []
for label in labels:
# Position
if label["name"] == "blocked":
assert position is None
position = "blocked"
elif label["name"].startswith("position: "):
assert position is None
position = label["name"][len("position: ") :]
# Venue
elif label["name"] == "venue: AOM":
venues.append("AOM")
elif label["name"] == "venue: Ecma TC39":
venues.append("TC39")
elif label["name"].startswith("venue: IETF"):
venues.append("IETF")
elif label["name"].startswith("venue: WHATWG"):
venues.append("WHATWG")
elif label["name"].startswith("venue: W3C"):
venues.append("W3C")
elif label["name"].startswith("venue: "):
venues.append("Other")
# Concerns
elif label["name"].startswith("concerns: "):
concerns.append(label["name"][len("concerns: ") :])
# Topics
elif label["name"].startswith("topic: "):
topics.append(label["name"][len("topic: ") :])
return {
"position": position,
"venues": list(dict.fromkeys(venues)),
"concerns": concerns,
"topics": topics,
}
def process_body(issue):
lines = issue["body"].splitlines()
body = {
"title": None,
"url": None,
"github": None,
"issues": None,
"explainer": None,
"tag": None,
"mozilla": None,
"bugzilla": None,
"radar": None,
}
legacy_mapping = {
"Spec Title": "title",
"Title": "title",
"Spec URL": "url",
"URL": "url",
"GitHub repository": "github",
"Issue Tracker (if not the repository's issue tracker)": "issues",
"Explainer (if not README.md in the repository)": "explainer",
"TAG Design Review": "tag",
"Mozilla standards-positions issue": "mozilla",
"WebKit Bugzilla": "bugzilla",
"Radar": "radar",
}
yaml_mapping = {
"Title of the spec": "title",
"URL to the spec": "url",
"URL to the spec's repository": "github",
"Issue Tracker URL": "issues",
"Explainer URL": "explainer",
"TAG Design Review URL": "tag",
"Mozilla standards-positions issue URL": "mozilla",
"WebKit Bugzilla URL": "bugzilla",
"Radar URL": "radar",
}
# Legacy mapping applies until the YAML change
if issue["number"] < 162:
for line in lines:
for prefix, key in legacy_mapping.items():
text_prefix = f"* {prefix}: "
if line.startswith(text_prefix):
assert body[key] is None
value = line[len(text_prefix) :].strip()
if value:
body[key] = value
else:
expect_response = None
skip = False
for line in lines:
if line == "### Description":
break
for title, key in yaml_mapping.items():
text_title = f"### {title}"
if line == text_title:
expect_response = key
skip = True
break
if skip:
skip = False
continue
if expect_response:
value = line.strip()
if value and value != "_No response_":
body[expect_response] = value
expect_response = None
return body
# Setup
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"-u",
"--update",
action="store_true",
help="get the latest issue data from GitHub",
)
parser.add_argument("-p", "--process", action="store_true", help="process the data")
args = parser.parse_args()
if args.update:
# GitHub allows us to read issues in increments of 100, called pages. As we don't have more
# than 3 pages we're not optimizing this for now.
data = []
page = 1
while True:
try:
response = requests.get(
f"https://api.github.com/repos/WebKit/standards-positions/issues?direction=asc&state=all&per_page=100&page={page}",
timeout=5,
)
response.raise_for_status()
except Exception:
print("Updated failed, network failure or request timed out.")
exit(1)
temp_data = response.json()
if not temp_data:
break
data.extend(temp_data)
page += 1
write_json("summary-data.json", data)
print("Done, thanks for updating!")
exit(0)
if args.process:
if not os.path.exists("summary-data.json"):
print("Sorry, you have to update first.")
exit(1)
with open("summary-data.json", "rb") as f:
data = json.load(f)
process(data)
if __name__ == "__main__":
main()
| WebKit/standards-positions | summary.py | summary.py | py | 6,036 | python | en | code | 215 | github-code | 13 |
39215215564 | loaded_items = []
with open('knapsack1.txt') as f:
first = True
for line in f:
split_line = line.split()
if first:
knapsack_size = int(split_line[0])
else:
loaded_items.append((int(split_line[0]), int(split_line[1])))
first = False
def knapsack(items, capacity):
scores = [[0] * (capacity+1)]
counter = 1
for item in items:
scores.append([0] * (capacity+1))
testing_weight = 0
while testing_weight <= capacity:
previous = scores[counter-1][testing_weight]
if item[1] > testing_weight:
scores[counter][testing_weight] = previous
else:
remaining_weight = testing_weight - item[1]
candidate = scores[counter-1][remaining_weight] + item[0]
scores[counter][testing_weight] = max(candidate, previous)
testing_weight += 1
counter += 1
return scores
results = knapsack(loaded_items, knapsack_size)
print(results[len(loaded_items)][knapsack_size])
| elliotjberman/algorithms | pt2_week3/knapsack.py | knapsack.py | py | 902 | python | en | code | 0 | github-code | 13 |
72915299858 | import enum
import pathlib
import itertools
import functools
import dataclasses
from typing import (cast, TYPE_CHECKING, Any, Callable, Iterable, List, Optional,
Sequence, Set, Type, Union, Tuple)
from qutebrowser.qt import machinery
from qutebrowser.qt.core import (pyqtSignal, pyqtSlot, QUrl, QObject, QSizeF, Qt,
QEvent, QPoint, QRect)
from qutebrowser.qt.gui import QKeyEvent, QIcon, QPixmap
from qutebrowser.qt.widgets import QApplication, QWidget
from qutebrowser.qt.printsupport import QPrintDialog, QPrinter
from qutebrowser.qt.network import QNetworkAccessManager
if TYPE_CHECKING:
from qutebrowser.qt.webkit import QWebHistory, QWebHistoryItem
from qutebrowser.qt.webkitwidgets import QWebPage, QWebView
from qutebrowser.qt.webenginecore import (
QWebEngineHistory, QWebEngineHistoryItem, QWebEnginePage)
from qutebrowser.qt.webenginewidgets import QWebEngineView
from qutebrowser.keyinput import modeman
from qutebrowser.config import config, websettings
from qutebrowser.utils import (utils, objreg, usertypes, log, qtutils,
urlutils, message, jinja, version)
from qutebrowser.misc import miscwidgets, objects, sessions
from qutebrowser.browser import eventfilter, inspector
from qutebrowser.qt import sip
if TYPE_CHECKING:
from qutebrowser.browser import webelem
from qutebrowser.browser.inspector import AbstractWebInspector
tab_id_gen = itertools.count(0)
_WidgetType = Union["QWebView", "QWebEngineView"]
def create(win_id: int,
private: bool,
parent: QWidget = None) -> 'AbstractTab':
"""Get a QtWebKit/QtWebEngine tab object.
Args:
win_id: The window ID where the tab will be shown.
private: Whether the tab is a private/off the record tab.
parent: The Qt parent to set.
"""
# Importing modules here so we don't depend on QtWebEngine without the
# argument and to avoid circular imports.
mode_manager = modeman.instance(win_id)
if objects.backend == usertypes.Backend.QtWebEngine:
from qutebrowser.browser.webengine import webenginetab
tab_class: Type[AbstractTab] = webenginetab.WebEngineTab
elif objects.backend == usertypes.Backend.QtWebKit:
from qutebrowser.browser.webkit import webkittab
tab_class = webkittab.WebKitTab
else:
raise utils.Unreachable(objects.backend)
return tab_class(win_id=win_id, mode_manager=mode_manager, private=private,
parent=parent)
class WebTabError(Exception):
"""Base class for various errors."""
class UnsupportedOperationError(WebTabError):
"""Raised when an operation is not supported with the given backend."""
class TerminationStatus(enum.Enum):
"""How a QtWebEngine renderer process terminated.
Also see QWebEnginePage::RenderProcessTerminationStatus
"""
#: Unknown render process status value gotten from Qt.
unknown = -1
#: The render process terminated normally.
normal = 0
#: The render process terminated with with a non-zero exit status.
abnormal = 1
#: The render process crashed, for example because of a segmentation fault.
crashed = 2
#: The render process was killed, for example by SIGKILL or task manager kill.
killed = 3
@dataclasses.dataclass
class TabData:
"""A simple namespace with a fixed set of attributes.
Attributes:
keep_icon: Whether the (e.g. cloned) icon should not be cleared on page
load.
inspector: The QWebInspector used for this webview.
viewing_source: Set if we're currently showing a source view.
Only used when sources are shown via pygments.
open_target: Where to open the next link.
Only used for QtWebKit.
override_target: Override for open_target for fake clicks (like hints).
Only used for QtWebKit.
pinned: Flag to pin the tab.
fullscreen: Whether the tab has a video shown fullscreen currently.
netrc_used: Whether netrc authentication was performed.
input_mode: current input mode for the tab.
splitter: InspectorSplitter used to show inspector inside the tab.
"""
keep_icon: bool = False
viewing_source: bool = False
inspector: Optional['AbstractWebInspector'] = None
open_target: usertypes.ClickTarget = usertypes.ClickTarget.normal
override_target: Optional[usertypes.ClickTarget] = None
pinned: bool = False
fullscreen: bool = False
netrc_used: bool = False
input_mode: usertypes.KeyMode = usertypes.KeyMode.normal
last_navigation: Optional[usertypes.NavigationRequest] = None
splitter: Optional[miscwidgets.InspectorSplitter] = None
def should_show_icon(self) -> bool:
return (config.val.tabs.favicons.show == 'always' or
config.val.tabs.favicons.show == 'pinned' and self.pinned)
class AbstractAction:
"""Attribute ``action`` of AbstractTab for Qt WebActions."""
action_base: Type[Union['QWebPage.WebAction', 'QWebEnginePage.WebAction']]
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = cast(_WidgetType, None)
self._tab = tab
def exit_fullscreen(self) -> None:
"""Exit the fullscreen mode."""
raise NotImplementedError
def save_page(self) -> None:
"""Save the current page."""
raise NotImplementedError
def run_string(self, name: str) -> None:
"""Run a webaction based on its name."""
try:
member = getattr(self.action_base, name)
except AttributeError:
raise WebTabError(f"{name} is not a valid web action!")
self._widget.triggerPageAction(member)
def show_source(self, pygments: bool = False) -> None:
"""Show the source of the current page in a new tab."""
raise NotImplementedError
def _show_html_source(self, html: str) -> None:
"""Show the given HTML as source page."""
tb = objreg.get('tabbed-browser', scope='window', window=self._tab.win_id)
new_tab = tb.tabopen(background=False, related=True)
new_tab.set_html(html, self._tab.url())
new_tab.data.viewing_source = True
def _show_source_fallback(self, source: str) -> None:
"""Show source with pygments unavailable."""
html = jinja.render(
'pre.html',
title='Source',
content=source,
preamble="Note: The optional Pygments dependency wasn't found - "
"showing unhighlighted source.",
)
self._show_html_source(html)
def _show_source_pygments(self) -> None:
def show_source_cb(source: str) -> None:
"""Show source as soon as it's ready."""
try:
import pygments
import pygments.lexers
import pygments.formatters
except ImportError:
# Pygments is an optional dependency
self._show_source_fallback(source)
return
try:
lexer = pygments.lexers.HtmlLexer()
formatter = pygments.formatters.HtmlFormatter(
full=True, linenos='table')
except AttributeError:
# Remaining namespace package from Pygments
self._show_source_fallback(source)
return
html = pygments.highlight(source, lexer, formatter)
self._show_html_source(html)
self._tab.dump_async(show_source_cb)
class AbstractPrinting(QObject):
"""Attribute ``printing`` of AbstractTab for printing the page."""
printing_finished = pyqtSignal(bool)
pdf_printing_finished = pyqtSignal(str, bool) # filename, ok
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(_WidgetType, None)
self._tab = tab
self._dialog: Optional[QPrintDialog] = None
self.printing_finished.connect(self._on_printing_finished)
self.pdf_printing_finished.connect(self._on_pdf_printing_finished)
@pyqtSlot(bool)
def _on_printing_finished(self, ok: bool) -> None:
# Only reporting error here, as the user has feedback from the dialog
# (and probably their printer) already.
if not ok:
message.error("Printing failed!")
if self._dialog is not None:
self._dialog.deleteLater()
self._dialog = None
@pyqtSlot(str, bool)
def _on_pdf_printing_finished(self, path: str, ok: bool) -> None:
if ok:
message.info(f"Printed to {path}")
else:
message.error(f"Printing to {path} failed!")
def check_pdf_support(self) -> None:
"""Check whether writing to PDFs is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def check_preview_support(self) -> None:
"""Check whether showing a print preview is supported.
If it's not supported (by the current Qt version), a WebTabError is
raised.
"""
raise NotImplementedError
def to_pdf(self, path: pathlib.Path) -> None:
"""Print the tab to a PDF with the given filename."""
raise NotImplementedError
def to_printer(self, printer: QPrinter) -> None:
"""Print the tab.
Args:
printer: The QPrinter to print to.
"""
raise NotImplementedError
def _do_print(self) -> None:
assert self._dialog is not None
printer = self._dialog.printer()
assert printer is not None
self.to_printer(printer)
def show_dialog(self) -> None:
"""Print with a QPrintDialog."""
self._dialog = QPrintDialog(self._tab)
self._dialog.open(self._do_print)
# Gets cleaned up in on_printing_finished
@dataclasses.dataclass
class SearchMatch:
"""The currently highlighted search match.
Attributes:
current: The currently active search match on the page.
0 if no search is active or the feature isn't available.
total: The total number of search matches on the page.
0 if no search is active or the feature isn't available.
"""
current: int = 0
total: int = 0
def reset(self) -> None:
"""Reset match counter information.
Stale information could lead to next_result or prev_result misbehaving.
"""
self.current = 0
self.total = 0
def is_null(self) -> bool:
"""Whether the SearchMatch is set to zero."""
return self.current == 0 and self.total == 0
def at_limit(self, going_up: bool) -> bool:
"""Whether the SearchMatch is currently at the first/last result."""
return (
self.total != 0 and
(
going_up and self.current == 1 or
not going_up and self.current == self.total
)
)
def __str__(self) -> str:
return f"{self.current}/{self.total}"
class SearchNavigationResult(enum.Enum):
"""The outcome of calling prev_/next_result."""
found = enum.auto()
not_found = enum.auto()
wrapped_bottom = enum.auto()
wrap_prevented_bottom = enum.auto()
wrapped_top = enum.auto()
wrap_prevented_top = enum.auto()
class AbstractSearch(QObject):
"""Attribute ``search`` of AbstractTab for doing searches.
Attributes:
text: The last thing this view was searched for.
search_displayed: Whether we're currently displaying search results in
this view.
match: The currently active search match.
_flags: The flags of the last search (needs to be set by subclasses).
_widget: The underlying WebView widget.
Signals:
finished: A search has finished. True if the text was found, false otherwise.
match_changed: The currently active search match has changed.
Emits SearchMatch(0, 0) if no search is active.
Will not be emitted if search matches are not available.
cleared: An existing search was cleared.
"""
finished = pyqtSignal(bool)
match_changed = pyqtSignal(SearchMatch)
cleared = pyqtSignal()
_Callback = Callable[[bool], None]
_NavCallback = Callable[[SearchNavigationResult], None]
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = cast(_WidgetType, None)
self.text: Optional[str] = None
self.search_displayed = False
self.match = SearchMatch()
def _is_case_sensitive(self, ignore_case: usertypes.IgnoreCase) -> bool:
"""Check if case-sensitivity should be used.
This assumes self.text is already set properly.
Arguments:
ignore_case: The ignore_case value from the config.
"""
assert self.text is not None
mapping = {
usertypes.IgnoreCase.smart: not self.text.islower(),
usertypes.IgnoreCase.never: True,
usertypes.IgnoreCase.always: False,
}
return mapping[ignore_case]
def search(self, text: str, *,
ignore_case: usertypes.IgnoreCase = usertypes.IgnoreCase.never,
reverse: bool = False,
result_cb: _Callback = None) -> None:
"""Find the given text on the page.
Args:
text: The text to search for.
ignore_case: Search case-insensitively.
reverse: Reverse search direction.
result_cb: Called with a bool indicating whether a match was found.
"""
raise NotImplementedError
def clear(self) -> None:
"""Clear the current search."""
raise NotImplementedError
def prev_result(self, *, wrap: bool = False, callback: _NavCallback = None) -> None:
"""Go to the previous result of the current search.
Args:
wrap: Allow wrapping at the top or bottom of the page.
callback: Called with a SearchNavigationResult.
"""
raise NotImplementedError
def next_result(self, *, wrap: bool = False, callback: _NavCallback = None) -> None:
"""Go to the next result of the current search.
Args:
wrap: Allow wrapping at the top or bottom of the page.
callback: Called with a SearchNavigationResult.
"""
raise NotImplementedError
class AbstractZoom(QObject):
"""Attribute ``zoom`` of AbstractTab for controlling zoom."""
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._tab = tab
self._widget = cast(_WidgetType, None)
# Whether zoom was changed from the default.
self._default_zoom_changed = False
self._init_neighborlist()
config.instance.changed.connect(self._on_config_changed)
self._zoom_factor = float(config.val.zoom.default) / 100
@pyqtSlot(str)
def _on_config_changed(self, option: str) -> None:
if option in ['zoom.levels', 'zoom.default']:
if not self._default_zoom_changed:
factor = float(config.val.zoom.default) / 100
self.set_factor(factor)
self._init_neighborlist()
def _init_neighborlist(self) -> None:
"""Initialize self._neighborlist.
It is a NeighborList with the zoom levels."""
levels = config.val.zoom.levels
self._neighborlist: usertypes.NeighborList[float] = usertypes.NeighborList(
levels, mode=usertypes.NeighborList.Modes.edge)
self._neighborlist.fuzzyval = config.val.zoom.default
def apply_offset(self, offset: int) -> float:
"""Increase/Decrease the zoom level by the given offset.
Args:
offset: The offset in the zoom level list.
Return:
The new zoom level.
"""
level = self._neighborlist.getitem(offset)
self.set_factor(float(level) / 100, fuzzyval=False)
return level
def _set_factor_internal(self, factor: float) -> None:
raise NotImplementedError
def set_factor(self, factor: float, *, fuzzyval: bool = True) -> None:
"""Zoom to a given zoom factor.
Args:
factor: The zoom factor as float.
fuzzyval: Whether to set the NeighborLists fuzzyval.
"""
if fuzzyval:
self._neighborlist.fuzzyval = int(factor * 100)
if factor < 0:
raise ValueError("Can't zoom to factor {}!".format(factor))
default_zoom_factor = float(config.val.zoom.default) / 100
self._default_zoom_changed = factor != default_zoom_factor
self._zoom_factor = factor
self._set_factor_internal(factor)
def factor(self) -> float:
return self._zoom_factor
def apply_default(self) -> None:
self._set_factor_internal(float(config.val.zoom.default) / 100)
def reapply(self) -> None:
self._set_factor_internal(self._zoom_factor)
class SelectionState(enum.Enum):
"""Possible states of selection in caret mode.
NOTE: Names need to line up with SelectionState in caret.js!
"""
none = enum.auto()
normal = enum.auto()
line = enum.auto()
class AbstractCaret(QObject):
"""Attribute ``caret`` of AbstractTab for caret browsing."""
#: Signal emitted when the selection was toggled.
selection_toggled = pyqtSignal(SelectionState)
#: Emitted when a ``follow_selection`` action is done.
follow_selected_done = pyqtSignal()
def __init__(self,
tab: 'AbstractTab',
mode_manager: modeman.ModeManager,
parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(_WidgetType, None)
self._mode_manager = mode_manager
mode_manager.entered.connect(self._on_mode_entered)
mode_manager.left.connect(self._on_mode_left)
self._tab = tab
def _on_mode_entered(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def _on_mode_left(self, mode: usertypes.KeyMode) -> None:
raise NotImplementedError
def move_to_next_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_line(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_char(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_next_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_prev_word(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_line(self) -> None:
raise NotImplementedError
def move_to_end_of_line(self) -> None:
raise NotImplementedError
def move_to_start_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_next_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_end_of_prev_block(self, count: int = 1) -> None:
raise NotImplementedError
def move_to_start_of_document(self) -> None:
raise NotImplementedError
def move_to_end_of_document(self) -> None:
raise NotImplementedError
def toggle_selection(self, line: bool = False) -> None:
raise NotImplementedError
def drop_selection(self) -> None:
raise NotImplementedError
def selection(self, callback: Callable[[str], None]) -> None:
raise NotImplementedError
def reverse_selection(self) -> None:
raise NotImplementedError
def _follow_enter(self, tab: bool) -> None:
"""Follow a link by faking an enter press."""
if tab:
self._tab.fake_key_press(Qt.Key.Key_Enter, modifier=Qt.KeyboardModifier.ControlModifier)
else:
self._tab.fake_key_press(Qt.Key.Key_Enter)
def follow_selected(self, *, tab: bool = False) -> None:
raise NotImplementedError
class AbstractScroller(QObject):
"""Attribute ``scroller`` of AbstractTab to manage scroll position."""
#: Signal emitted when the scroll position changed (int, int)
perc_changed = pyqtSignal(int, int)
#: Signal emitted before the user requested a jump.
#: Used to set the special ' mark so the user can return.
before_jump_requested = pyqtSignal()
def __init__(self, tab: 'AbstractTab', parent: QWidget = None):
super().__init__(parent)
self._tab = tab
self._widget = cast(_WidgetType, None)
if 'log-scroll-pos' in objects.debug_flags:
self.perc_changed.connect(self._log_scroll_pos_change)
@pyqtSlot()
def _log_scroll_pos_change(self) -> None:
log.webview.vdebug( # type: ignore[attr-defined]
"Scroll position changed to {}".format(self.pos_px()))
def _init_widget(self, widget: _WidgetType) -> None:
self._widget = widget
def pos_px(self) -> QPoint:
raise NotImplementedError
def pos_perc(self) -> Tuple[int, int]:
raise NotImplementedError
def to_perc(self, x: float = None, y: float = None) -> None:
raise NotImplementedError
def to_point(self, point: QPoint) -> None:
raise NotImplementedError
def to_anchor(self, name: str) -> None:
raise NotImplementedError
def delta(self, x: int = 0, y: int = 0) -> None:
raise NotImplementedError
def delta_page(self, x: float = 0, y: float = 0) -> None:
raise NotImplementedError
def up(self, count: int = 1) -> None:
raise NotImplementedError
def down(self, count: int = 1) -> None:
raise NotImplementedError
def left(self, count: int = 1) -> None:
raise NotImplementedError
def right(self, count: int = 1) -> None:
raise NotImplementedError
def top(self) -> None:
raise NotImplementedError
def bottom(self) -> None:
raise NotImplementedError
def page_up(self, count: int = 1) -> None:
raise NotImplementedError
def page_down(self, count: int = 1) -> None:
raise NotImplementedError
def at_top(self) -> bool:
raise NotImplementedError
def at_bottom(self) -> bool:
raise NotImplementedError
class AbstractHistoryPrivate:
"""Private API related to the history."""
_history: Union["QWebHistory", "QWebEngineHistory"]
def serialize(self) -> bytes:
"""Serialize into an opaque format understood by self.deserialize."""
raise NotImplementedError
def deserialize(self, data: bytes) -> None:
"""Deserialize from a format produced by self.serialize."""
raise NotImplementedError
def load_items(self, items: Sequence[sessions.TabHistoryItem]) -> None:
"""Deserialize from a list of TabHistoryItems."""
raise NotImplementedError
class AbstractHistory:
"""The history attribute of a AbstractTab."""
def __init__(self, tab: 'AbstractTab') -> None:
self._tab = tab
self._history = cast(Union['QWebHistory', 'QWebEngineHistory'], None)
self.private_api = AbstractHistoryPrivate()
def __len__(self) -> int:
raise NotImplementedError
def __iter__(self) -> Iterable[Union['QWebHistoryItem', 'QWebEngineHistoryItem']]:
raise NotImplementedError
def _check_count(self, count: int) -> None:
"""Check whether the count is positive."""
if count < 0:
raise WebTabError("count needs to be positive!")
def current_idx(self) -> int:
raise NotImplementedError
def current_item(self) -> Union['QWebHistoryItem', 'QWebEngineHistoryItem']:
raise NotImplementedError
def back(self, count: int = 1) -> None:
"""Go back in the tab's history."""
self._check_count(count)
idx = self.current_idx() - count
if idx >= 0:
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(0))
raise WebTabError("At beginning of history.")
def forward(self, count: int = 1) -> None:
"""Go forward in the tab's history."""
self._check_count(count)
idx = self.current_idx() + count
if idx < len(self):
self._go_to_item(self._item_at(idx))
else:
self._go_to_item(self._item_at(len(self) - 1))
raise WebTabError("At end of history.")
def can_go_back(self) -> bool:
raise NotImplementedError
def can_go_forward(self) -> bool:
raise NotImplementedError
def _item_at(self, i: int) -> Any:
raise NotImplementedError
def _go_to_item(self, item: Any) -> None:
raise NotImplementedError
def back_items(self) -> List[Any]:
raise NotImplementedError
def forward_items(self) -> List[Any]:
raise NotImplementedError
class AbstractElements:
"""Finding and handling of elements on the page."""
_MultiCallback = Callable[[Sequence['webelem.AbstractWebElement']], None]
_SingleCallback = Callable[[Optional['webelem.AbstractWebElement']], None]
_ErrorCallback = Callable[[Exception], None]
def __init__(self, tab: 'AbstractTab') -> None:
self._widget = cast(_WidgetType, None)
self._tab = tab
def find_css(self, selector: str,
callback: _MultiCallback,
error_cb: _ErrorCallback, *,
only_visible: bool = False) -> None:
"""Find all HTML elements matching a given selector async.
If there's an error, the callback is called with a webelem.Error
instance.
Args:
callback: The callback to be called when the search finished.
error_cb: The callback to be called when an error occurred.
selector: The CSS selector to search for.
only_visible: Only show elements which are visible on screen.
"""
raise NotImplementedError
def find_id(self, elem_id: str, callback: _SingleCallback) -> None:
"""Find the HTML element with the given ID async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
elem_id: The ID to search for.
"""
raise NotImplementedError
def find_focused(self, callback: _SingleCallback) -> None:
"""Find the focused element on the page async.
Args:
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
def find_at_pos(self, pos: QPoint, callback: _SingleCallback) -> None:
"""Find the element at the given position async.
This is also called "hit test" elsewhere.
Args:
pos: The QPoint to get the element for.
callback: The callback to be called when the search finished.
Called with a WebEngineElement or None.
"""
raise NotImplementedError
class AbstractAudio(QObject):
"""Handling of audio/muting for this tab."""
muted_changed = pyqtSignal(bool)
recently_audible_changed = pyqtSignal(bool)
def __init__(self, tab: 'AbstractTab', parent: QWidget = None) -> None:
super().__init__(parent)
self._widget = cast(_WidgetType, None)
self._tab = tab
def set_muted(self, muted: bool, override: bool = False) -> None:
"""Set this tab as muted or not.
Arguments:
muted: Whether the tab is currently muted.
override: If set to True, muting/unmuting was done manually and
overrides future automatic mute/unmute changes based on
the URL.
"""
raise NotImplementedError
def is_muted(self) -> bool:
raise NotImplementedError
def is_recently_audible(self) -> bool:
"""Whether this tab has had audio playing recently."""
raise NotImplementedError
class AbstractTabPrivate:
"""Tab-related methods which are only needed in the core.
Those methods are not part of the API which is exposed to extensions, and
should ideally be removed at some point in the future.
"""
def __init__(self, mode_manager: modeman.ModeManager,
tab: 'AbstractTab') -> None:
self._widget = cast(_WidgetType, None)
self._tab = tab
self._mode_manager = mode_manager
def event_target(self) -> Optional[QWidget]:
"""Return the widget events should be sent to."""
raise NotImplementedError
def handle_auto_insert_mode(self, ok: bool) -> None:
"""Handle `input.insert_mode.auto_load` after loading finished."""
if not ok or not config.cache['input.insert_mode.auto_load']:
return
cur_mode = self._mode_manager.mode
if cur_mode == usertypes.KeyMode.insert:
return
def _auto_insert_mode_cb(
elem: Optional['webelem.AbstractWebElement']
) -> None:
"""Called from JS after finding the focused element."""
if elem is None:
log.webview.debug("No focused element!")
return
if elem.is_editable():
modeman.enter(self._tab.win_id, usertypes.KeyMode.insert,
'load finished', only_if_normal=True)
self._tab.elements.find_focused(_auto_insert_mode_cb)
def clear_ssl_errors(self) -> None:
raise NotImplementedError
def networkaccessmanager(self) -> Optional[QNetworkAccessManager]:
"""Get the QNetworkAccessManager for this tab.
This is only implemented for QtWebKit.
For QtWebEngine, always returns None.
"""
raise NotImplementedError
def shutdown(self) -> None:
raise NotImplementedError
def run_js_sync(self, code: str) -> Any:
"""Run javascript sync.
Result will be returned when running JS is complete.
This is only implemented for QtWebKit.
For QtWebEngine, always raises UnsupportedOperationError.
"""
raise NotImplementedError
def _recreate_inspector(self) -> None:
"""Recreate the inspector when detached to a window.
This is needed to circumvent a QtWebEngine bug (which wasn't
investigated further) which sometimes results in the window not
appearing anymore.
"""
self._tab.data.inspector = None
self.toggle_inspector(inspector.Position.window)
def toggle_inspector(self, position: Optional[inspector.Position]) -> None:
"""Show/hide (and if needed, create) the web inspector for this tab."""
tabdata = self._tab.data
if tabdata.inspector is None:
assert tabdata.splitter is not None
tabdata.inspector = self._init_inspector(
splitter=tabdata.splitter,
win_id=self._tab.win_id)
self._tab.shutting_down.connect(tabdata.inspector.shutdown)
tabdata.inspector.recreate.connect(self._recreate_inspector)
tabdata.inspector.inspect(self._widget.page())
tabdata.inspector.set_position(position)
def _init_inspector(self, splitter: 'miscwidgets.InspectorSplitter',
win_id: int,
parent: QWidget = None) -> 'AbstractWebInspector':
"""Get a WebKitInspector/WebEngineInspector.
Args:
splitter: InspectorSplitter where the inspector can be placed.
win_id: The window ID this inspector is associated with.
parent: The Qt parent to set.
"""
raise NotImplementedError
class AbstractTab(QWidget):
"""An adapter for QWebView/QWebEngineView representing a single tab."""
#: Signal emitted when a website requests to close this tab.
window_close_requested = pyqtSignal()
#: Signal emitted when a link is hovered (the hover text)
link_hovered = pyqtSignal(str)
#: Signal emitted when a page started loading
load_started = pyqtSignal()
#: Signal emitted when a page is loading (progress percentage)
load_progress = pyqtSignal(int)
#: Signal emitted when a page finished loading (success as bool)
load_finished = pyqtSignal(bool)
#: Signal emitted when a page's favicon changed (icon as QIcon)
icon_changed = pyqtSignal(QIcon)
#: Signal emitted when a page's title changed (new title as str)
title_changed = pyqtSignal(str)
#: Signal emitted when this tab was pinned/unpinned (new pinned state as bool)
pinned_changed = pyqtSignal(bool)
#: Signal emitted when a new tab should be opened (url as QUrl)
new_tab_requested = pyqtSignal(QUrl)
#: Signal emitted when a page's URL changed (url as QUrl)
url_changed = pyqtSignal(QUrl)
#: Signal emitted when a tab's content size changed
#: (new size as QSizeF)
contents_size_changed = pyqtSignal(QSizeF)
#: Signal emitted when a page requested full-screen (bool)
fullscreen_requested = pyqtSignal(bool)
#: Signal emitted before load starts (URL as QUrl)
before_load_started = pyqtSignal(QUrl)
# Signal emitted when a page's load status changed
# (argument: usertypes.LoadStatus)
load_status_changed = pyqtSignal(usertypes.LoadStatus)
# Signal emitted before shutting down
shutting_down = pyqtSignal()
# Signal emitted when a history item should be added
history_item_triggered = pyqtSignal(QUrl, QUrl, str)
# Signal emitted when the underlying renderer process terminated.
# arg 0: A TerminationStatus member.
# arg 1: The exit code.
renderer_process_terminated = pyqtSignal(TerminationStatus, int)
# Hosts for which a certificate error happened. Shared between all tabs.
#
# Note that we remember hosts here, without scheme/port:
# QtWebEngine/Chromium also only remembers hostnames, and certificates are
# for a given hostname anyways.
_insecure_hosts: Set[str] = set()
# Sub-APIs initialized by subclasses
history: AbstractHistory
scroller: AbstractScroller
caret: AbstractCaret
zoom: AbstractZoom
search: AbstractSearch
printing: AbstractPrinting
action: AbstractAction
elements: AbstractElements
audio: AbstractAudio
private_api: AbstractTabPrivate
settings: websettings.AbstractSettings
def __init__(self, *, win_id: int,
mode_manager: 'modeman.ModeManager',
private: bool,
parent: QWidget = None) -> None:
utils.unused(mode_manager) # needed for mypy
self.is_private = private
self.win_id = win_id
self.tab_id = next(tab_id_gen)
super().__init__(parent)
self.registry = objreg.ObjectRegistry()
tab_registry = objreg.get('tab-registry', scope='window',
window=win_id)
tab_registry[self.tab_id] = self
objreg.register('tab', self, registry=self.registry)
self.data = TabData()
self._layout = miscwidgets.WrapperLayout(self)
self._widget = cast(_WidgetType, None)
self._progress = 0
self._load_status = usertypes.LoadStatus.none
self._tab_event_filter = eventfilter.TabEventFilter(
self, parent=self)
self.backend: Optional[usertypes.Backend] = None
# If true, this tab has been requested to be removed (or is removed).
self.pending_removal = False
self.shutting_down.connect(functools.partial(
setattr, self, 'pending_removal', True))
self.before_load_started.connect(self._on_before_load_started)
def _set_widget(self, widget: Union["QWebView", "QWebEngineView"]) -> None:
# pylint: disable=protected-access
self._widget = widget
# FIXME:v4 ignore needed for QtWebKit
self.data.splitter = miscwidgets.InspectorSplitter(
win_id=self.win_id,
main_webview=widget, # type: ignore[arg-type,unused-ignore]
)
self._layout.wrap(self, self.data.splitter)
self.history._history = widget.history()
self.history.private_api._history = widget.history()
self.scroller._init_widget(widget)
self.caret._widget = widget
self.zoom._widget = widget
self.search._widget = widget
self.printing._widget = widget
self.action._widget = widget
self.elements._widget = widget
self.audio._widget = widget
self.private_api._widget = widget
self.settings._settings = widget.settings()
self._install_event_filter()
self.zoom.apply_default()
def _install_event_filter(self) -> None:
raise NotImplementedError
def _set_load_status(self, val: usertypes.LoadStatus) -> None:
"""Setter for load_status."""
if not isinstance(val, usertypes.LoadStatus):
raise TypeError("Type {} is no LoadStatus member!".format(val))
log.webview.debug("load status for {}: {}".format(repr(self), val))
self._load_status = val
self.load_status_changed.emit(val)
def send_event(self, evt: QEvent) -> None:
"""Send the given event to the underlying widget.
The event will be sent via QApplication.postEvent.
Note that a posted event must not be re-used in any way!
"""
# This only gives us some mild protection against re-using events, but
# it's certainly better than a segfault.
if getattr(evt, 'posted', False):
raise utils.Unreachable("Can't re-use an event which was already "
"posted!")
recipient = self.private_api.event_target()
if recipient is None:
# https://github.com/qutebrowser/qutebrowser/issues/3888
log.webview.warning("Unable to find event target!")
return
evt.posted = True # type: ignore[attr-defined]
QApplication.postEvent(recipient, evt)
def navigation_blocked(self) -> bool:
"""Test if navigation is allowed on the current tab."""
return self.data.pinned and config.val.tabs.pinned.frozen
@pyqtSlot(QUrl)
def _on_before_load_started(self, url: QUrl) -> None:
"""Adjust the title if we are going to visit a URL soon."""
qtutils.ensure_valid(url)
url_string = url.toDisplayString()
log.webview.debug("Going to start loading: {}".format(url_string))
self.title_changed.emit(url_string)
@pyqtSlot(QUrl)
def _on_url_changed(self, url: QUrl) -> None:
"""Update title when URL has changed and no title is available."""
if url.isValid() and not self.title():
self.title_changed.emit(url.toDisplayString())
self.url_changed.emit(url)
@pyqtSlot()
def _on_load_started(self) -> None:
self._progress = 0
self.data.viewing_source = False
self._set_load_status(usertypes.LoadStatus.loading)
self.load_started.emit()
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(
self,
navigation: usertypes.NavigationRequest
) -> None:
"""Handle common acceptNavigationRequest code."""
url = utils.elide(navigation.url.toDisplayString(), 100)
log.webview.debug(
f"navigation request: url {url} (current {self.url().toDisplayString()}), "
f"type {navigation.navigation_type.name}, "
f"is_main_frame {navigation.is_main_frame}"
)
if navigation.is_main_frame:
self.data.last_navigation = navigation
if not navigation.url.isValid():
if navigation.navigation_type == navigation.Type.link_clicked:
msg = urlutils.get_errstring(navigation.url,
"Invalid link clicked")
message.error(msg)
self.data.open_target = usertypes.ClickTarget.normal
log.webview.debug("Ignoring invalid URL {} in "
"acceptNavigationRequest: {}".format(
navigation.url.toDisplayString(),
navigation.url.errorString()))
navigation.accepted = False
# WORKAROUND for QtWebEngine >= 6.2 not allowing form requests from
# qute:// to outside domains.
needs_load_workarounds = (
objects.backend == usertypes.Backend.QtWebEngine and
version.qtwebengine_versions().webengine >= utils.VersionNumber(6, 2)
)
if (
needs_load_workarounds and
self.url() == QUrl("qute://start/") and
navigation.navigation_type == navigation.Type.form_submitted and
navigation.url.matches(
QUrl(config.val.url.searchengines['DEFAULT']),
urlutils.FormatOption.REMOVE_QUERY)
):
log.webview.debug(
"Working around qute://start loading issue for "
f"{navigation.url.toDisplayString()}")
navigation.accepted = False
self.load_url(navigation.url)
if (
needs_load_workarounds and
self.url() == QUrl("qute://bookmarks/") and
navigation.navigation_type == navigation.Type.back_forward
):
log.webview.debug(
"Working around qute://bookmarks loading issue for "
f"{navigation.url.toDisplayString()}")
navigation.accepted = False
self.load_url(navigation.url)
@pyqtSlot(bool)
def _on_load_finished(self, ok: bool) -> None:
assert self._widget is not None
if self.is_deleted():
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if sessions.session_manager is not None:
sessions.session_manager.save_autosave()
self.load_finished.emit(ok)
if not self.title():
self.title_changed.emit(self.url().toDisplayString())
self.zoom.reapply()
def _update_load_status(self, ok: bool) -> None:
"""Update the load status after a page finished loading.
Needs to be called by subclasses to trigger a load status update, e.g.
as a response to a loadFinished signal.
"""
url = self.url()
is_https = url.scheme() == 'https'
if not ok:
loadstatus = usertypes.LoadStatus.error
elif is_https and url.host() in self._insecure_hosts:
loadstatus = usertypes.LoadStatus.warn
elif is_https:
loadstatus = usertypes.LoadStatus.success_https
else:
loadstatus = usertypes.LoadStatus.success
self._set_load_status(loadstatus)
@pyqtSlot()
def _on_history_trigger(self) -> None:
"""Emit history_item_triggered based on backend-specific signal."""
raise NotImplementedError
@pyqtSlot(int)
def _on_load_progress(self, perc: int) -> None:
self._progress = perc
self.load_progress.emit(perc)
def url(self, *, requested: bool = False) -> QUrl:
raise NotImplementedError
def progress(self) -> int:
return self._progress
def load_status(self) -> usertypes.LoadStatus:
return self._load_status
def _load_url_prepare(self, url: QUrl) -> None:
qtutils.ensure_valid(url)
self.before_load_started.emit(url)
def load_url(self, url: QUrl) -> None:
raise NotImplementedError
def reload(self, *, force: bool = False) -> None:
raise NotImplementedError
def stop(self) -> None:
raise NotImplementedError
def fake_key_press(self,
key: Qt.Key,
modifier: Qt.KeyboardModifier = Qt.KeyboardModifier.NoModifier) -> None:
"""Send a fake key event to this tab."""
press_evt = QKeyEvent(QEvent.Type.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.Type.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def dump_async(self,
callback: Callable[[str], None], *,
plain: bool = False) -> None:
"""Dump the current page's html asynchronously.
The given callback will be called with the result when dumping is
complete.
"""
raise NotImplementedError
def run_js_async(
self,
code: str,
callback: Callable[[Any], None] = None, *,
world: Union[usertypes.JsWorld, int] = None
) -> None:
"""Run javascript async.
The given callback will be called with the result when running JS is
complete.
Args:
code: The javascript code to run.
callback: The callback to call with the result, or None.
world: A world ID (int or usertypes.JsWorld member) to run the JS
in the main world or in another isolated world.
"""
raise NotImplementedError
def title(self) -> str:
raise NotImplementedError
def icon(self) -> QIcon:
raise NotImplementedError
def set_html(self, html: str, base_url: QUrl = QUrl()) -> None:
raise NotImplementedError
def set_pinned(self, pinned: bool) -> None:
self.data.pinned = pinned
self.pinned_changed.emit(pinned)
def renderer_process_pid(self) -> Optional[int]:
"""Get the PID of the underlying renderer process.
Returns None if the PID can't be determined or if getting the PID isn't
supported.
"""
raise NotImplementedError
def grab_pixmap(self, rect: QRect = None) -> Optional[QPixmap]:
"""Grab a QPixmap of the displayed page.
Returns None if we got a null pixmap from Qt.
"""
if rect is None:
pic = self._widget.grab()
else:
qtutils.ensure_valid(rect)
# FIXME:v4 ignore needed for QtWebKit
pic = self._widget.grab(rect) # type: ignore[arg-type,unused-ignore]
if pic.isNull():
return None
if machinery.IS_QT6:
# FIXME:v4 cast needed for QtWebKit
pic = cast(QPixmap, pic)
return pic
def __repr__(self) -> str:
try:
qurl = self.url()
url = qurl.toDisplayString(urlutils.FormatOption.ENCODE_UNICODE)
except (AttributeError, RuntimeError) as exc:
url = '<{}>'.format(exc.__class__.__name__)
else:
url = utils.elide(url, 100)
return utils.get_repr(self, tab_id=self.tab_id, url=url)
def is_deleted(self) -> bool:
"""Check if the tab has been deleted."""
assert self._widget is not None
# FIXME:v4 cast needed for QtWebKit
if machinery.IS_QT6:
widget = cast(QWidget, self._widget)
else:
widget = self._widget
return sip.isdeleted(widget)
| qutebrowser/qutebrowser | qutebrowser/browser/browsertab.py | browsertab.py | py | 47,756 | python | en | code | 9,084 | github-code | 13 |
69981743059 | from xbmcswift2 import Plugin, xbmcgui
from resources.lib import mainaddon
plugin = Plugin()
url1 = "https://audioboom.com/channels/4829847.rss"
@plugin.route('/')
def main_menu():
items = [
{
'label': plugin.get_string(30001),
'path': plugin.url_for('episodes1'),
'thumbnail': "https://github.com/leopheard/TheScathingAtheist/blob/eaf30a25511fd135ac79ce974e0f8a407236bafd/resources/media/1.jpg?raw=true"},
{
'label': plugin.get_string(30000),
'path': plugin.url_for('episodes'),
'thumbnail': "https://github.com/leopheard/TheScathingAtheist/blob/eaf30a25511fd135ac79ce974e0f8a407236bafd/resources/media/1.jpg?raw=true"},
]
return items
@plugin.route('/episodes1/')
def episodes1():
soup1 = mainaddon.get_soup1(url1)
playable_podcast1 = mainaddon.get_playable_podcast1(soup1)
items = mainaddon.compile_playable_podcast1(playable_podcast1)
return items
@plugin.route('/episodes/')
def episodes():
soup1 = mainaddon.get_soup1(url1)
playable_podcast = mainaddon.get_playable_podcast(soup1)
items = mainaddon.compile_playable_podcast(playable_podcast)
return items
if __name__ == '__main__':
plugin.run()
| leopheard/TheScathingAtheist | addon.py | addon.py | py | 1,271 | python | en | code | 0 | github-code | 13 |
29765124599 | # -*- coding: utf-8 -*-
"""
Created on Thu May 12 11:44:28 2022
-This class is used to do model training
- We can also use transfer learning but there are
two things to consider(input_size, output_size)
@author: aceso
"""
#%% module
import pandas as pd
import os
from sklearn.preprocessing import OneHotEncoder
import numpy as np
import datetime
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import TensorBoard
from sentiment_analysis_function import ExploratoryDataAnalysis, ModelBuilding # import file that we just created
from sentiment_analysis_function import ModelEvaluation
#%% Constant
URL = "https://raw.githubusercontent.com/Ankit152/IMDB-sentiment-analysis/master/IMDB-Dataset.csv"
TOKENIZER_PATH = os.path.join(os.getcwd(), 'tokenizer_data2.json')
PATH_LOGS = os.path.join(os.getcwd(), 'log')
log_dir = os.path.join(PATH_LOGS, datetime.datetime.now().strftime('%Y%m%d-%H%M%S'))
MODEL_SAVE_PATH = os.path.join(os.getcwd(), 'model.h5')
#%% EDA
# Step 1) Import data
df = pd.read_csv(URL)
review = df["review"]
sentiment = df["sentiment"]
# Step 2) Data cleaning
eda = ExploratoryDataAnalysis() # class
review = eda.remove_tags(review) # Remove tags
review = eda.lower_split(review) #lowercase
# Step 3) Feature Selection
# Step 4) Data vectorization
# Tokenization
review = eda.sentiment_tokenizer(review, TOKENIZER_PATH)
print(review[2])
# Pad Sequence
review = eda.sentiment_pad_sequences(review)
# Step 5) Data preprocessing <-- not in function bcos only use once
# One hot encoding
hot_encoder = OneHotEncoder(sparse=False)
nb_categories = len(sentiment.unique())
encoded_sentiment = hot_encoder.fit_transform(np.expand_dims(sentiment, axis=-1))
# Train test split(X = review, y = sentiment)
X_train, X_test, y_train, y_test = train_test_split(review,
encoded_sentiment,
test_size=0.3,
random_state=123)
# expand training data into 3D array
X_train = np.expand_dims(X_train, axis=-1)
X_test = np.expand_dims(X_test, axis=-1)
# inverse to see either it postive or negative
# positive
print(y_train[0]) #[0,1]
print(hot_encoder.inverse_transform(np.expand_dims(y_train[0], axis=0)))
# negative
print(y_train[1]) #[1,0]
print(hot_encoder.inverse_transform(np.expand_dims(y_train[1], axis=0)))
#%% Model Building
mb = ModelBuilding()
num_words = 10000
nb_categories = len(sentiment.unique())
model = mb.lstm_layer(num_words, nb_categories)
model.compile(optimizer="adam",
loss="categorical_crossentropy",
metrics="acc")
tensorboard = TensorBoard(log_dir, histogram_freq=1)
model.fit(X_train, y_train, epochs=3,
validation_data=(X_test, y_test),
callbacks=tensorboard)
#%% Model Evaluation & Analysis
predicted_advanced = np.empty([len(X_test), 2])
for i, test in enumerate(X_test):
predicted_advanced[i,:] = model.predict(np.expand_dims(test, axis=0))
# Model analysis
y_pred = np.argmax(predicted_advanced, axis=1)
y_true = np.argmax(y_test, axis=1)
evals = ModelEvaluation()
result = evals.evaluation(y_true, y_pred)
#%% Model Saving
model.save(MODEL_SAVE_PATH)
| AceSongip/Sentiment_Analysis | sentiment_analysis_training.py | sentiment_analysis_training.py | py | 3,260 | python | en | code | 0 | github-code | 13 |
35794183853 | from __future__ import division
from common import *
def tfqmr(B, A, x, b, tolerance, maxiter, progress, relativeconv=False, callback=None):
#####
# Adapted from PyKrylov (https://github.com/dpo/pykrylov; LGPL license)
#####
r0 = b - A*x
rho = inner(r0,r0)
alphas = []
betas = []
residuals = [sqrt(rho)]
if relativeconv:
tolerance *= residuals[0]
if residuals[-1] < tolerance:
return x, residuals, [], []
y = r0.copy() # Initial residual vector
w = r0.copy()
d = 0*b
theta = 0.0
eta = 0.0
k = 0
z = B*y
u = A*z
v = u.copy()
while k < maxiter:
k += 1
progress += 1
sigma = inner(r0,v)
alpha = rho/sigma
# First pass
w -= alpha * u
d *= theta * theta * eta / alpha
d += z
residNorm = residuals[-1]
theta = norm(w)/residNorm
c = 1.0/sqrt(1 + theta*theta)
residNorm *= theta * c
eta = c * c * alpha
x += eta * d
m = 2.0 * k - 1.0
if residNorm * sqrt(m+1) < tolerance:
break
# Second pass
m += 1
y -= alpha * v
z = B*y
u = A*z
w -= alpha * u
d *= theta * theta * eta / alpha
d += z
theta = norm(w)/residNorm
c = 1.0/sqrt(1 + theta*theta)
residNorm *= theta * c
eta = c * c * alpha
x += eta * d
residual = residNorm * sqrt(m+1)
# Call user provided callback with solution
if callable(callback):
callback(k=k, x=x, r=residual)
residuals.append(residual)
if residual < tolerance or k >= maxiter:
break
# Final updates
rho_next = inner(r0,w)
beta = rho_next/rho
rho = rho_next
alphas.append(alpha)
betas.append(beta)
# Update y
y *= beta
y += w
# Partial update of v with current u
v *= beta
v += u
v *= beta
# Update u
z = B*y
u = A*z
# Complete update of v
v += u
return x, residuals, alphas, betas
| kamccormack/EQporoelasticity | local_lib/block/iterative/tfqmr.py | tfqmr.py | py | 2,169 | python | en | code | 6 | github-code | 13 |
3542555182 | from songthread.services import SongthreadService
from django.test import TestCase
from music.models import Track
from math import ceil
class SongthreadServiceTestCase(TestCase):
def test_populate_track_using_spotify_lookup_returns_correct_results(self):
track = Track
track.spotify_uri = 'spotify:track:6NmXV4o6bmp704aPGyTVVG'
songthread_service = SongthreadService()
songthread_service.populate_track_using_spotify_lookup(track)
self.assertEqual(u'B\xf8n Fra Helvete - Live', track.name)
self.assertEqual(u'Mann Mot Mann (2002)', track.album)
self.assertEqual(u'Kaizers Orchestra', track.artists)
self.assertEqual(318, ceil(track.length))
self.assertEqual(u'2', track.track_number)
def test_make_spotify_api_call_returns_valid_json(self):
track = Track
track.spotify_uri = 'spotify:track:6NmXV4o6bmp704aPGyTVVG'
songthread_service = SongthreadService()
track_json = songthread_service.make_spotify_api_call(track)
self.assertTrue('name' in track_json)
self.assertTrue('album' in track_json)
self.assertTrue('artists' in track_json)
self.assertTrue('length' in track_json)
self.assertTrue('track-number' in track_json)
| abbas123456/solocover | songthread/tests.py | tests.py | py | 1,276 | python | en | code | 0 | github-code | 13 |
73533696976 | from lib.embedding.vectorizer import FaceVectorizer
from lib.detection.detector import Detector
from PIL import Image
from torchvision import transforms
import numpy as np
import pandas as pd
from PIL import Image, ImageDraw, ImageFont
import torch
from utils.utils import *
#def compare_imgs(name, img):
# CUDA for PyTorch
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
torch.backends.cudnn.benchmark = True
anchor_embeddings = pd.read_csv("../database/embeddings/embeddings.csv",index_col=0)
embeddings_input_size = (160, 160)
def show_detections(img, detections):
img_drawable = img.copy()
draw = ImageDraw.Draw(img_drawable)
for bbox in detections:
x1, x2 = bbox[0], bbox[2]
y1, y2 = bbox[1], bbox[3]
draw.rectangle([x1,y1,x2,y2], width = 4)
img_drawable.show()
def show_recognitions(img, recognitions):
img_drawable = img.copy()
draw = ImageDraw.Draw(img_drawable)
font = ImageFont.truetype("../fonts/arial.ttf", int(img.size[0]*0.04))
for recognition in recognitions.values():
identification = recognition[0]
bbox = recognition[1]
color = '#00FF00'
if identification == "":
color = '#FF0000'
x1, x2 = bbox[0], bbox[2]
y1, y2 = bbox[1], bbox[3]
draw.rectangle([x1,y1,x2,y2], width = 4, outline = color)
draw.text([x1+10,y1],identification.capitalize(),font=font, fill=color)
img_drawable.show()
def recognize_detections(img, detections, embeddings):
recognitions = {}
for idx, bbox in enumerate(detections):
recognitions[idx] = bbox
face_vector = embeddings[idx]
person_identified = ""
min_distance = 10
for person_name in anchor_embeddings.index.unique():
anchor_vector = np.mean(anchor_embeddings.loc[person_name],axis=0).to_numpy()
distance = np.linalg.norm(face_vector-anchor_vector)
if (distance < 0.45) and (distance < min_distance):
person_identified = person_name
min_distance = distance
recognitions[idx] = [person_identified,bbox]
return recognitions
if __name__ == '__main__':
img_path = '/home/javier/Ramblings/FaceRecognition/sample_imgs/lore_boat.jpg'
img = Image.open(img_path)
det = Detector("yolo")
faceVec = FaceVectorizer("facenet")
detections = det.detect_people(img)
embeddings = faceVec.find_embeddings(img, detections)
recognitions = recognize_detections(img, detections, embeddings)
show_recognitions(img, recognitions)
| jsmithdlc/FaceRecognition | src/recognize.py | recognize.py | py | 2,611 | python | en | code | 0 | github-code | 13 |
14263943030 | from django.conf.urls import patterns,include, url
urlpatterns = patterns('accounts.views',
#url(r'^$','index',name='accounts_index'),
# Signup, signin and signout
url(r'^signup/$','signup',name='signup'),
url(r'^signin/$','signin',name='signin'),
url(r'^signout/$','signout',name='signout'),
url(r'^confirm/(?P<code>\w{40})/$','confirm',name='confirm'),
) | lihm09/SmartCar | accounts/urls.py | urls.py | py | 390 | python | en | code | 1 | github-code | 13 |
32496609786 | """-----------------------------------------
一、采集我的人脸数据集
获取本人的人脸数据集10000张,使用的是dlib来
识别人脸,虽然速度比OpenCV识别慢,但是识别效
果更好。
人脸大小:64*64
-----------------------------------------"""
import cv2
import dlib
import os
import random
import tkinter as tk
from tkinter import messagebox
def img_change(img, light=1, bias=0):
width = img.shape[1]
height = img.shape[0]
for i in range(0, width):
for j in range(0, height):
for k in range(3):
tmp = int(img[j, i, k] * light + bias)
if tmp > 255:
tmp = 255
elif tmp < 0:
tmp = 0
img[j, i, k] = tmp
return img
"""特征提取器:dlib自带的frontal_face_detector"""
detector = dlib.get_frontal_face_detector()
def jzrl():
name = e.get()
print(name)
print(name.encode('UTF-8').isalpha())
if name.encode('UTF-8').isalpha():
faces_my_path = './face/'
size = 64
if not os.path.exists(faces_my_path+name):
os.makedirs(faces_my_path+name)
cap = cv2.VideoCapture(0)
num = 1
print(str(num))
#print(cv2.COLOR_BGR2GRAY)
while True:
if (num <= 100): # 10000
print('Being processed picture %s' % num)
success, img = cap.read()
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
"""使用特征提取器进行人脸检测"""
dets = detector(gray_img, 1)
"""--------------------------------------------------------------------
使用enumerate 函数遍历序列中的元素以及它们的下标,i为人脸序号,d为i对应的元素;
left:人脸左边距离图片左边界的距离 ;right:人脸右边距离图片左边界的距离
top:人脸上边距离图片上边界的距离 ;bottom:人脸下边距离图片上边界的距离
----------------------------------------------------------------------"""
for i, d in enumerate(dets):
x1 = d.top() if d.top() > 0 else 0
y1 = d.bottom() if d.bottom() > 0 else 0
x2 = d.left() if d.left() > 0 else 0
y2 = d.right() if d.right() > 0 else 0
face = img[x1:y1, x2:y2]
"""调整图片的对比度与亮度, 对比度与亮度值都取随机数,这样能增加样本的多样性"""
face = img_change(face, random.uniform(0.5, 1.5), random.randint(-50, 50))
face = cv2.resize(face, (size, size))
cv2.imshow('image', face)
cv2.imwrite(faces_my_path + name+ '/' + str(num) + '.jpg', face)
num += 1
key = cv2.waitKey(30)
if key == 27:
break
else:
print('Finished!')
tk.messagebox.showinfo(title='完成', message='人脸加载完成')
break
else:
tk.messagebox.showinfo(title='请重试', message='人脸已存在')
print('已存在人脸数据')
else:
tk.messagebox.showinfo(title='请重试', message='请使用英文字符')
e.delete(0, "end")
window = tk.Tk()
window.title('加载人脸')
window.geometry('240x200')
l = tk.Label(window, text='输入姓名', font=('Arial', 12), width=15, height=2)
l.pack()
e = tk.Entry(window, show=None)
e.pack()
b1 = tk.Button(window,text='加载人脸', width=15, height=2, command=jzrl)
b1.pack()
b2 = tk.Button(window,text='退出', width=15, height=2, command=window.quit)
b2.pack()
window.mainloop()
#### | sytsunboy2008/face-recognition | load.py | load.py | py | 4,079 | python | en | code | 0 | github-code | 13 |
19265583660 | '''
obj serilization
reg expression
pip
'''
import pickle; #obj serilization
'''
dumps -> obj to binary serial
loads -> bin to obj deserial
dump -> obj to bin but save as file
load -> load bin data from file and convert back to obj
'''
'''
L1= list(range(100));
print(L1);
L1_b=pickle.dumps(L1);
print(L1_b);
L2=pickle.loads(L1_b);
print(L2);
'''
class Test():
def __init__(self):
self.i =100;
self.f =111.23;
self.b =True;
self.s ="printndk";
def display(self):
print("{],{},{},{}".format(self.i,self.f,self.b,self.s));
'''
t1=Test();
t1_b= pickle.dumps(t1);
t2=pickle.loads(t1_b);
t1=Test();
file=open("MyTest.dat","wb");
pickle.dump(t1,file);
'''
t1=Test();
file=open("MyTest.dat","rb");
pickle.load(t1);
file.close(); | 99002531/python | pick11.py | pick11.py | py | 808 | python | en | code | 0 | github-code | 13 |
71366116177 | import warnings
from benchopt import BaseSolver, safe_import_context
with safe_import_context() as import_ctx:
from sklearn.exceptions import ConvergenceWarning
from sklearn.svm import LinearSVC
class Solver(BaseSolver):
name = 'sklearn'
install_cmd = 'pip'
requirements = ['scikit-learn']
parameters = {
'solver': ['liblinear'],
}
parameter_template = "solver={solver}"
def set_objective(self, X, y, C):
self.X, self.y, self.C = X, y, C
n, d = X.shape
warnings.filterwarnings('ignore', category=ConvergenceWarning)
self.clf = LinearSVC(C=self.C/n, penalty='l2', dual=True,
fit_intercept=False, tol=1e-12,
loss='hinge')
def run(self, n_iter):
self.clf.max_iter = n_iter
self.clf.fit(self.X, self.y)
def get_result(self):
return dict(beta=self.clf.coef_.flatten())
| softmin/ReHLine-benchmark | benchmark_SVM/solvers/sklearn.py | sklearn.py | py | 941 | python | en | code | 2 | github-code | 13 |
21145568360 | from contextlib import suppress
from os import remove
from secrets import token_hex
import math
import time
import boto3
from pyrogram import Client, filters
from pyrogram.errors import RPCError, MessageNotModified
from pyrogram.filters import media, poll, private, user
from pyrogram.types import Message, InlineKeyboardButton, InlineKeyboardMarkup
ENDPOINT = "https://ba9816a848610aed92b1359ca60ff37a.r2.cloudflarestorage.com/"
ACCESS_KEY = "24c2515db578726cd36ea0772946474b"
SECRET_KEY = "b79167444ad27359f7e587bff2bf72bea9003add5a6411d0ce98276645133e36"
APP_URL = "https://dumpstore.online"
app2 = Client("bot", 2992000, "235b12e862d71234ea222082052822fd",
bot_token="5822153402:AAGcbKRO1aKU2zsvRNDHRw3YKHsP6uJb6X0")
def genetare_key():
return token_hex(4)
async def progress_for_pyrogram(
current,
total,
ud_type,
message,
start
):
now = time.time()
diff = now - start
if round(diff % 10.00) == 0 or current == total:
percentage = current * 100 / total
speed = current / diff
elapsed_time = round(diff) * 1000
time_to_completion = round((total - current) / speed) * 1000
estimated_total_time = elapsed_time + time_to_completion
elapsed_time = TimeFormatter(milliseconds=elapsed_time)
estimated_total_time = TimeFormatter(milliseconds=estimated_total_time)
progress = "[{0}{1}] \n**Process**: {2}%\n".format(
''.join(["â—†" for i in range(math.floor(percentage / 5))]),
''.join(["â—‡" for i in range(20 - math.floor(percentage / 5))]),
round(percentage, 2))
tmp = progress + "{0} of {1}\n**Speed:** {2}/s\n**ETA:** {3}\n".format(
humanbytes(current),
humanbytes(total),
humanbytes(speed),
estimated_total_time if estimated_total_time != '' else "0 s"
)
try:
await message.edit(
text="{}\n {}".format(
ud_type,
tmp
)
)
except:
pass
def humanbytes(size):
if not size:
return ""
power = 2**10
n = 0
Dic_powerN = {0: ' ', 1: 'Ki', 2: 'Mi', 3: 'Gi', 4: 'Ti'}
while size > power:
size /= power
n += 1
return str(round(size, 2)) + " " + Dic_powerN[n] + 'B'
def TimeFormatter(milliseconds: int) -> str:
seconds, milliseconds = divmod(int(milliseconds), 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
days, hours = divmod(hours, 24)
tmp = ((str(days) + "d, ") if days else "") + \
((str(hours) + "h, ") if hours else "") + \
((str(minutes) + "m, ") if minutes else "") + \
((str(seconds) + "s, ") if seconds else "") + \
((str(milliseconds) + "ms, ") if milliseconds else "")
return tmp[:-2]
class CloudStorage:
def __init__(self, file: str = None):
self.s3 = boto3.resource(
"s3",
endpoint_url=ENDPOINT,
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
)
self.file = file
self.bucket = self.s3.Bucket("f2lstorage")
def upload(self):
token = f"{genetare_key()}/{self.file.split('/')[-1]}"
self.bucket.upload_file(self.file, token)
return f"{APP_URL}/{token}"
@app2.on_message(filters.command("start") & private)
async def start(_, m: Message):
await m.reply_text(
"Send Me a File and I will send You a link for it.\n\nIts that simple. 😄\nPowered by : @MAK_Dump"
)
return
@app2.on_message(~poll & private & media)
async def download(_, m: Message):
msg1 = await m.reply_text("Downloading...")
try:
file = await m.download(progress=progress_for_pyrogram, progress_args=("Downloading...", msg1, time.time()))
except (RPCError, MessageNotModified):
pass
await msg1.edit("Uploading to cloudflare R2 storage...")
await m.forward(-1001883346027)
cloud = CloudStorage(file).upload()
with suppress(OSError):
remove(file)
await msg1.edit(f"Your file is ready:\n\n `{cloud}`", reply_markup=InlineKeyboardMarkup(
[[InlineKeyboardButton("🔗 Download link", url=cloud)]]
))
print("Started!")
app2.run()
print("Bye!")
| saintcurfew/filetolink | main.py | main.py | py | 4,318 | python | en | code | 0 | github-code | 13 |
16376292326 | from BSTNode import Node
class BST:
size = 0
def __init__(self) -> None:
self.root = None
def insert(self, value):
"""
Returns 'True' if node was successfully inserted.
Returns 'False' if node was not successfully inserted.
"""
newNode = Node(value)
if not self.root:
self.root = newNode
BST.size += 1
return True
else:
temp = self.root
while True:
if newNode.value == temp.value:
return False
elif newNode.value < temp.value:
if not temp.left:
temp.left = newNode
BST.size += 1
return True
temp = temp.left
elif newNode.value > temp.value:
if not temp.right:
temp.right = newNode
BST.size += 1
return True
temp = temp.right
def isContains(self, value):
"""
Returns 'True' if BST contains the value.
Returns 'False' if BST doesn't contain the value.
"""
if not self.root:
return False
temp = self.root
while temp:
if temp.value == value:
return True
elif value < temp.value:
temp = temp.left
elif value > temp.value:
temp = temp.right
return False
def minimumNode(self):
if not self.root:
return -1
temp = self.root
while temp.left:
temp = temp.left
return temp
def minimumValue(self):
return self.minimumNode().value
def maximumNode(self):
if not self.root:
return -1
temp = self.root
while temp.right:
temp = temp.right
return temp
def maximumValue(self):
return self.maximumNode().value
| hilmiguner/Python-Projects | Data Structures/BinarySearchTree/BST.py | BST.py | py | 2,054 | python | en | code | 1 | github-code | 13 |
20174806453 | import json
from datetime import datetime, timedelta
from channels.layers import get_channel_layer
from celery import shared_task
from common.serializers import UUIDEncoder
from common.timer import Timer
from databases.classes import DatabaseConnector
from databases.models import Database
from dataframes import DataframeFromDB
from nn_models import TrialLinearModel, BasicLinearModel, PremiumLinearModel
from projects.models import Project, ProjectConfigFile
from projects.models import ProjectConfiguration
from projects.serializers import ProjectSerializer
from projects.tasks import call_socket, update_project_configuration, create_stats
channel_layer = get_channel_layer()
# from datetime import datetime, timedelta
@shared_task(name="Regression Model Retraining")
def retrain_regression_model(request, project_id, token, account_type):
project = Project.objects.get(id=project_id)
project_config = ProjectConfiguration.objects.get(project=project)
project_config_file = ProjectConfigFile.objects.get(
project_configuration=project_config)
if project_config.created_from_database:
database = Database.objects.get(id=project_config.database.id)
model_url = project_config_file.file_url
table_name = project.project_name
db_connector = DatabaseConnector(
database_host=database.database_host,
database_name=database.database_name,
database_port=database.database_port,
database_type=database.database_type,
database_user=database.database_user,
database_password=database.database_password,
)
db_connector.connect()
# query = f'select * from {table_name} except all select * from revolve_{table_name}_records'
query = f'select * from {table_name}'
results = db_connector.execute_query(query)
db_connector.disconnect()
dataframe = DataframeFromDB(
data=results,
all_columns=project_config_file.all_columns,
deleted_columns=project_config_file.deleted_columns,
project_configuration_id=project_config.id,
label=project_config_file.label,
path=model_url,
is_retrain=True
)
df_features, df_labels = dataframe.get_transformed_data()
timer = Timer()
timer.start()
if account_type == 1:
model = BasicLinearModel(df_features, df_labels, model_url)
elif account_type == 2:
model = PremiumLinearModel(df_features, df_labels, model_url)
else:
model = TrialLinearModel(df_features, df_labels, model_url)
model.train_and_save()
error, accuracy = model.get_metrics()
elapsed_time = timer.stop()
# modify project config
project_configuration = update_project_configuration(
project_config.id, error, accuracy, database)
# pass info to websocket
call_socket(message_type='updated_project',
message_data=json.dumps(ProjectSerializer(project_configuration.project).data,
cls=UUIDEncoder), token=token)
# create stats
create_stats(project_configuration=project_configuration, df_features=df_features,
elapsed_time=elapsed_time, error=error, accuracy=accuracy, token=token)
time = datetime.utcnow() + timedelta(days=int(request['days']))
retrain_basic_regression_model.apply_async(
args=[request, project_id, token], eta=time)
| AlvaroJSnish/revolve | revolve/retrains/tasks.py | tasks.py | py | 3,590 | python | en | code | 0 | github-code | 13 |
10079562030 | from math import sqrt
import random
import Person
from Restaurants import Restaurant
# me=Person.Person()
# me.first_name="Ceyda"
# me.second_name="Günes"
#
# me.print_my_name()
#
# user2=Person.Person()
# user2.first_name="Erdem"
# user2.second_name="Cimenoglu"
# user2.print_my_name()
#
# user3=Person.Person()
# user3.first_name="Enes"
# user3.second_name="Akay"
# user3.official =False
#
# user2.greet()
# user3.greet()
all_restaurants = []
user_1 = Person.Person(first_name="Ceyda", second_name="Günes", x_coord=10., y_coord=12., range=100)
# def define_personen():
#
# global all_personen
#
# user_1 = Person(first_name="Ceyda", second_name="Günes", x_coord=10., y_coord=12., range=2000)
# #user2 = Person.Person(first_name="Erdem", second_name="Cimenoglu", x_coord=12., y_coord=13., range=3000)
# #user3 = Person.Person(first_name="Enes", second_name="Akay", x_coord=15., y_coord=20., range=1500)
def define_restaurants():
global all_restaurants
all_restaurants.append(Restaurant(name="Pizzaria", coordinates=(5, 12), openinghours=None, freeSeats=100))
all_restaurants.append(Restaurant(name="Pizzaria2", coordinates=(35, 113), openinghours=None, freeSeats=80))
all_restaurants.append(Restaurant(name="Pizzaria3", coordinates=(22, 58), openinghours=None, freeSeats=90))
all_restaurants.append(Restaurant(name="Pizzaria4", coordinates=(17, 72), openinghours=None, freeSeats=120))
all_restaurants.append(Restaurant(name="Pizzaria5", coordinates=(15, 65), openinghours=None, freeSeats=30))
def print_all_restaurant_information( restaurant_list):
for rr in restaurant_list:
print (rr)
def main():
#load all data
define_restaurants()
print_all_restaurant_information(all_restaurants)
def distance(point_1, point_2):
distance = sqrt((point_1[0] - point_2[0]) ** 2 +
(point_1[1] - point_2[1]) ** 2 )
return distance
def get_restaurants_in_range(all_restaurants, personal_coordinates, range):
restaurants_in_range = []
for rr in all_restaurants:
dd = distance(personal_coordinates, rr.coordinates)
if dd <= range:
restaurants_in_range.append(rr)
return restaurants_in_range
def main():
#load all data
define_restaurants()
print_all_restaurant_information(all_restaurants)
restaurants_in_range =get_restaurants_in_range(all_restaurants=all_restaurants,personal_coordinates = (100,100),range=90)
the_restaurant_nr = int(random.uniform(0, len(restaurants_in_range) - 1))
print("your restautrant is:" , str(restaurants_in_range[the_restaurant_nr]))
if __name__ == """__main__""":
main()
#############
#############
# def bubble(badlist):
# length = len(badlist-1)
# unsorted = False
#
# while not unsorted:
# unsorted = True
# for element in range(0 , length):
# if badlist[element] > badlist[element - 1]:
# unsorted = False
# hold = badlist[element + 1]
# badlist[element + 1] = badlist[element]
# badlist[element] = hold
#
# return badlist
#
# RankedList = bubble(distance)
#
# def ShowResturant(List):
# length = len(List-1)
#
# for sayac in range(0, length):
# for sayac2 in range(0,length):
# if RankedList(sayac) == distance[sayac2]:
# ShowresturantList[sayac] = Resturant[sayac2]
# return ShowResturantList
#
# ShowResturantList = ShowResturant(RankedList) | marektdu/losgehts | losgeht.py | losgeht.py | py | 3,511 | python | en | code | 0 | github-code | 13 |
70213070098 | import algosdk.encoding
from algosdk.constants import PAYMENT_TXN, APPCALL_TXN, ASSETTRANSFER_TXN
from flask import Flask, request, jsonify, render_template, Response, session, url_for, redirect
import ipfshttpclient
import os, tempfile, mimetypes
from algosdk import mnemonic, account
from algosdk.future.transaction import OnComplete, ApplicationCallTxn, PaymentTxn, AssetTransferTxn, calculate_group_id, AssetConfigTxn, LogicSig, LogicSigTransaction
import algorand
import royalties
api = Flask(__name__)
api.secret_key = os.urandom(24)
@api.route('/')
def index():
#session['publickey'] = 'EV5PYVBEVS4PPWAWO3JJQIHD3L36BA5DS5RH34FGTVLUQW6Y3JVWTWM64Y'
if session.get("publickey"):
return redirect("/myassets")
return render_template('index.html')
@api.route('/login', methods=['POST'])
def login():
try:
public_key = request.json['publickey']
passphrase = request.json['passphrase']
private_key = algorand.get_private_key_from_mnemonic(passphrase)
public_key_from_passphrase = account.address_from_private_key(private_key)
if (public_key_from_passphrase != public_key):
return Response(
"Unauthoried",
status=401,
)
session['publickey'] = public_key
return redirect(url_for('myassets'), code=302)
except Exception as e:
return Response(
"Unauthoried, {0}".format(e),
status=401,
)
@api.route('/logout', methods=['POST'])
def logout():
if not session.get("publickey"):
return redirect("/")
session.pop('publickey', None)
return redirect('/')
@api.route('/myassets')
def myassets():
if not session.get("publickey"):
return redirect("/")
return render_template('myassets.html')
@api.route('/createasset')
def createasset():
if not session.get("publickey"):
return redirect("/")
return render_template('createasset.html')
@api.route('/marketplace')
def marketplace():
if not session.get("publickey"):
return redirect("/")
return render_template('marketplace.html')
@api.route('/mytransactions')
def mytransactions():
if not session.get("publickey"):
return redirect("/")
return render_template('mytransactions.html')
@api.route('/generateAccount', methods=['GET'])
def generate_account():
acct = account.generate_account()
return jsonify(
publickey=acct[1],
passphrase=mnemonic.from_private_key(acct[0])
)
@api.route('/getaccountinfo', methods=['GET'])
def get_account_info():
try:
if not session.get("publickey"):
return redirect("/")
indexer_client = algorand.get_indexer_client()
result = indexer_client.account_info(address=session.get("publickey"))
return jsonify(
accountinfo=result,
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/getassets', methods=['GET'])
def get_assets():
try:
if not session.get("publickey"):
return redirect("/")
indexer_client = algorand.get_indexer_client()
result = indexer_client.search_applications()
return jsonify(
assets=algorand.get_assets_from_applications(result['applications']),
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/getmyassets', methods=['GET'])
def get_my_assets():
try:
if not session.get("publickey"):
return redirect("/")
algod_client = algorand.get_algod_client()
account_info = algod_client.account_info(address=session.get("publickey"))
foreign_accouns = {}
my_assets_ids = {}
for asset in account_info['assets']:
if asset['amount'] > 0:
foreign_accouns[asset['creator']] = True
my_assets_ids[asset['asset-id']] = True
foreign_assets = []
for key in foreign_accouns:
foreign_account_info = algod_client.account_info(address=key)
assets = algorand.get_assets_from_applications(foreign_account_info['created-apps'])
foreign_assets = foreign_assets + assets
my_assets = []
for asset in foreign_assets:
if int(asset['asset_id']) in my_assets_ids:
my_assets.append(asset)
return jsonify(
assets=my_assets
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/getassetinfo', methods=['GET'])
def get_asset_info():
try:
if not session.get("publickey"):
return redirect("/")
asset_id = int(request.args['asset_id'])
result = algorand.get_algod_client().asset_info(asset_id=asset_id)
return jsonify(
assetinfo=result,
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/gettransactions', methods=['GET'])
def gettransactions():
try:
if not session.get("publickey"):
return redirect("/")
result = algorand.get_indexer_client().search_transactions_by_address(address=session.get("publickey"), txn_type="pay")
return jsonify(
transactions=result,
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/getappinfo', methods=['GET'])
def get_app_info():
try:
if not session.get("publickey"):
return redirect("/")
application_id = int(request.args['app_id'])
result = algorand.get_algod_client().application_info(application_id=application_id)
return jsonify(
appinfo=result,
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/uploadToIPFS', methods=['POST'])
def upload_to_ipfs():
if not session.get("publickey"):
return redirect("/")
file = request.files['file']
extension = mimetypes.guess_extension(file.content_type, strict=True)
tmp = tempfile.NamedTemporaryFile(suffix=extension, delete=False)
try:
client = ipfshttpclient.connect()
data = file.stream.read()
tmp.write(data)
res = client.add(tmp.name)
return jsonify(
hash=res['Hash'],
name=res['Name'],
url='http://127.0.0.1:8080/ipfs/{0}'.format(res['Hash'])
)
except Exception as e:
return Response(
str(e),
status=400,
)
finally:
tmp.close()
os.unlink(tmp.name)
@api.route('/createNFT', methods=['POST'])
def create_nft():
try:
if not session.get("publickey"):
return redirect("/")
nft_metadata = request.json['nftMetadata']
passphrase = request.json['passphrase']
private_key = algorand.get_private_key_from_mnemonic(passphrase)
public_key = account.address_from_private_key(private_key)
assetid, txid = algorand.create_non_fungible_token(public_key, private_key, nft_metadata)
appid = algorand.create_application(passphrase, assetid)
return redirect(url_for('myassets'), code=302)
except Exception as e:
return Response(
"Error executing API: {}".format(e),
status=400,
)
@api.route('/listNFTs', methods=['GET'])
def list_nfts():
try:
public_key = request.args['publicKey']
asset_id = request.args['assetId']
result = algorand.get_indexer_client().search_assets(asset_id=asset_id)
return jsonify(
nftList=result,
)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/placeNFTForSale', methods=['POST'])
def place_nft_for_sale():
try:
if not session.get("publickey"):
return redirect("/")
passphrase = request.json['passphrase']
app_id = request.json['app_id']
asset_id = request.json['asset_id']
price = request.json['price']
royalty = request.json['royalty']
signin_key = algorand.get_private_key_from_mnemonic(passphrase)
public_key = account.address_from_private_key(signin_key)
algod_client = algorand.get_algod_client()
royalty_args = royalties.get_new_royalty_structure_arguments(public_key, royalty['state'],
royalty['royalty_groups'], royalty['chain_groups'])
result = algorand.get_algod_client().application_info(application_id=app_id)
asset = algorand.get_assets_from_applications([result])[0]
if asset['creator_str'] != public_key:
royalty_args = royalties.get_new_royalty_structure_arguments(asset['owner_str'], asset['royalty']['state'],
asset['royalty']['royalty_groups'],
asset['royalty']['chain_groups'])
if royalty['state'] == royalties.CHAIN and 'royalty' in asset:
royalty_args = royalties.get_new_royalty_structure_arguments(public_key, asset['royalty']['state'],
asset['royalty']['royalty_groups'],
asset['royalty']['chain_groups'])
app_args = [
"place_nft_for_sale_with_royalties",
price,
]
app_args = app_args + royalty_args
params = algod_client.suggested_params()
txn1 = ApplicationCallTxn(sender=public_key,
sp=params,
index=app_id,
app_args=app_args,
on_complete=OnComplete.NoOpOC)
escrow_address = algorand.get_escrow_address(asset_id, app_id)
txn3 = PaymentTxn(sender=public_key,
sp=params,
receiver=escrow_address,
amt=1000000)
txns = []
txns.append(txn1)
txns.append(txn3)
if asset['creator_str'] == public_key:
txn2 = AssetConfigTxn(
sender=public_key,
sp=params,
index=asset_id,
default_frozen=False,
manager=public_key,
reserve=public_key,
freeze=public_key,
clawback=escrow_address,
strict_empty_address_check=False)
txns.append(txn2)
group_id = calculate_group_id(txns)
stxns = []
for txn in txns:
txn.group = group_id
stxn = txn.sign(signin_key)
stxns.append(stxn)
txid = algod_client.send_transactions(stxns)
algorand.wait_for_confirmation(algod_client, txid, 4)
return redirect(url_for('myassets'), code=302)
except Exception as e:
return Response(
str(e),
status=400,
)
@api.route('/buyNFT', methods=['POST'])
def buy_nft():
try:
if not session.get("publickey"):
return redirect("/")
passphrase = request.json['passphrase']
app_id = request.json['app_id']
asset_id = request.json['asset_id']
signin_key = algorand.get_private_key_from_mnemonic(passphrase)
public_key = account.address_from_private_key(signin_key)
result = algorand.get_algod_client().application_info(application_id=app_id)
asset = algorand.get_assets_from_applications([result])[0]
algod_client = algorand.get_algod_client()
app_args = [
"buy_nft",
public_key
]
params = algod_client.suggested_params()
txn = AssetTransferTxn(sender=public_key,
sp=params,
receiver=public_key,
amt=0,
index=asset_id)
stxn = txn.sign(signin_key)
txid = algod_client.send_transaction(stxn)
algorand.wait_for_confirmation(algod_client, txid, 4)
escrow_address = algorand.get_escrow_address(asset_id, app_id)
logic_sign_in_key = algorand.get_escrow_signature(asset_id, app_id)
txn1 = ApplicationCallTxn(sender=public_key,
sp=params,
index=app_id,
app_args=app_args,
on_complete=OnComplete.NoOpOC)
txns = royalties.get_royalty_transactions(asset['royalty'], public_key, asset['owner_str'], asset['price'], params)
txn3 = AssetTransferTxn(sender=escrow_address,
sp=params,
receiver=public_key,
amt=1,
index=asset_id,
revocation_target=asset['owner_str'])
txns.append(txn1)
txns.append(txn3)
group_id = calculate_group_id(txns)
stxns = []
for txn in txns:
txn.group = group_id
if txn.type == PAYMENT_TXN:
stxn = txn.sign(signin_key)
stxns.append(stxn)
continue
if txn.type == APPCALL_TXN:
stxn = txn.sign(signin_key)
stxns.append(stxn)
continue
if txn.type == ASSETTRANSFER_TXN:
stxn = LogicSigTransaction(txn, logic_sign_in_key)
stxns.append(stxn)
continue
txid = algod_client.send_transactions(stxns)
algorand.wait_for_confirmation(algod_client, txid, 4)
return redirect(url_for('myassets'), code=302)
except Exception as e:
return Response(
str(e),
status=400,
)
if __name__ == '__main__':
api.run() | jaysingh/AlgoRoyalty | main.py | main.py | py | 14,201 | python | en | code | 0 | github-code | 13 |
6683912977 | #################################################
# File Name:bamtofragments.py
# Author: Pengwei.Xing
# Mail: xingwei421@qq.com,pengwei.xing@igp.uu.se,xpw1992@gmail.com
# Created Time: Tue Nov 29 15:46:09 2022
#################################################
import pysam
import sys
import argparse
def fargv():
parser = argparse.ArgumentParser(usage="python ")
parser.add_argument('-i',"--input",help="the name-sorted bam file ", required=True)
parser.add_argument('-of',"--frag_output",help="the fragments file ", required=True)
parser.add_argument('-ob',"--bam_output",help="the outputed bam file ", required=True)
parser.add_argument('-m',"--mode",help="the outputed bam file ", required=True, choices=['SE','PE'])
args = parser.parse_args()
return args
class convertbamtofrags:
def __init__(self,f_bam_input='',f_frag_output='',f_bam_output=''):
self.bam_input = f_bam_input
self.fragoutput = f_frag_output
self.f_bam_output = f_bam_output
def tofragments_se(self):
def assign(header,query_name,mychr,left,right,cell_name):
temp_bam = pysam.AlignedSegment(header)
temp_bam.query_name = query_name
temp_bam.flag = 0
temp_bam.reference_name = mychr
temp_bam.reference_start = left
temp_bam.mapping_quality = 255
length = right-left
temp_bam.cigar = ((0,length),)
temp_bam.next_reference_name = "="
temp_bam.next_reference_start = 0
temp_bam.template_length = 0
temp_bam.tags = (("RG", cell_name),)
return temp_bam
for a in self.bam_input:
query_name = a.query_name
cell_name = "_".join([query_name.split('_')[0]]+query_name.split('_')[5:])
read_start = a.reference_start
read_end = read_start + a.query_alignment_end
mychr = a.reference_name
self.fragoutput.write("%s\t%s\t%s\t%s\n" % (mychr,read_start,read_end,cell_name))
temp_bam = assign(self.bam_input.header,query_name,mychr,read_start,read_end,cell_name)
self.f_bam_output.write(temp_bam)
def tofragments_pe(self):
def assign(header,query_name,mychr,left,right,cell_name):
temp_bam = pysam.AlignedSegment(header)
temp_bam.query_name = query_name
temp_bam.flag = 0
temp_bam.reference_name = mychr
temp_bam.reference_start = left
temp_bam.mapping_quality = 255
length = right-left
temp_bam.cigar = ((0,length),)
temp_bam.next_reference_name = "="
temp_bam.next_reference_start = 0
temp_bam.template_length = 0
temp_bam.tags = (("RG", cell_name),)
return temp_bam
index = -1
temp = []
temp_name = ''
temp_chr = ''
temp_cell = ''
for a in self.bam_input:
if index == -1:
query_name = a.query_name
cell_name = "_".join([query_name.split('_')[0]]+query_name.split('_')[5:])
read_start = a.reference_start
read_end = read_start + a.query_alignment_end
temp.extend([read_start,read_end])
index = index * -1
temp_name = query_name
mychr = a.reference_name
temp_chr = a.reference_name
temp_cell = cell_name
else:
if a.query_name == temp_name:
query_name = a.query_name
mychr = a.reference_name
cell_name = "_".join([query_name.split('_')[0]]+query_name.split('_')[5:])
read_start = a.reference_start
read_end = read_start + a.query_alignment_end
temp.extend([read_start,read_end])
index = index * -1
left = min(temp)
right = max(temp)
if (right - left) < 1000:
self.fragoutput.write("%s\t%s\t%s\t%s\n" % (mychr,left,right,cell_name))
temp_bam = assign(self.bam_input.header,query_name,mychr,left,right,cell_name)
self.f_bam_output.write(temp_bam)
temp = []
else:
self.fragoutput.write("%s\t%s\t%s\t%s\n" % (temp_chr,temp[0],temp[1],temp_cell))
temp_bam = assign(self.bam_input.header,query_name,temp_chr,left,right,cell_name)
self.f_bam_output.write(temp_bam)
temp = []
temp_name = a.query_name
mychr = a.reference_name
temp_chr = a.reference_name
temp_cell = cell_name
read_start = a.reference_start
read_end = read_start + a.query_alignment_end
temp.extend([read_start,read_end])
index = 1
def main(kwargs):
f_bam_input = pysam.AlignmentFile(kwargs.input,mode='rb',check_header=True,threads=3)
f_bam_output = pysam.AlignmentFile(kwargs.bam_output,mode='wb',template=f_bam_input,threads=3)
f_frag_output = open(kwargs.frag_output,'w')
bamobject = convertbamtofrags(f_bam_input=f_bam_input,f_frag_output=f_frag_output,f_bam_output=f_bam_output)
if kwargs.mode == "PE":
bamobject.tofragments_pe()
elif kwargs.mode == "SE":
bamobject.tofragments_se()
if __name__ == "__main__":
kwargs = fargv()
main(kwargs)
| pengweixing/scFFPE | Snakemake/bamtofragments.py | bamtofragments.py | py | 5,691 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.