seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
25169171416 | """This module defines all the config parameters."""
FEATURE_FORMAT = {
"TIMESTAMP": 0,
"TRACK_ID": 1,
"OBJECT_TYPE": 2,
"X": 3,
"Y": 4,
"CITY_NAME": 5,
"MIN_DISTANCE_FRONT": 6,
"MIN_DISTANCE_BACK": 7,
"NUM_NEIGHBORS": 8,
"OFFSET_FROM_CENTERLINE": 9,
"DISTANCE_ALONG_CENTERLINE": 10,
}
RAW_DATA_FORMAT = {
"TIMESTAMP": 0,
"TRACK_ID": 1,
"OBJECT_TYPE": 2,
"X": 3,
"Y": 4,
"CITY_NAME": 5,
}
LSTM_HELPER_DICT_IDX = {
"CENTROIDS": 0,
"CITY_NAMES": 1,
"CANDIDATE_CENTERLINES": 2,
"CANDIDATE_NT_DISTANCES": 3,
"TRANSLATION": 4,
"ROTATION": 5,
"CANDIDATE_DELTA_REFERENCES": 6,
"DELTA_REFERENCE": 7,
"SEQ_PATHS": 8,
}
BASELINE_INPUT_FEATURES = {
"social":
["X", "Y", "MIN_DISTANCE_FRONT", "MIN_DISTANCE_BACK", "NUM_NEIGHBORS"],
"map": ["OFFSET_FROM_CENTERLINE", "DISTANCE_ALONG_CENTERLINE"],
"map_social": [
"OFFSET_FROM_CENTERLINE",
"DISTANCE_ALONG_CENTERLINE",
"MIN_DISTANCE_FRONT",
"MIN_DISTANCE_BACK",
"NUM_NEIGHBORS",
],
"none": ["X", "Y"],
}
BASELINE_OUTPUT_FEATURES = {
"social": ["X", "Y"],
"map": ["OFFSET_FROM_CENTERLINE", "DISTANCE_ALONG_CENTERLINE"],
"map_social": ["OFFSET_FROM_CENTERLINE", "DISTANCE_ALONG_CENTERLINE"],
"none": ["X", "Y"],
}
# Feature computation
_FEATURES_SMALL_SIZE = 100
# Map Feature computations
_MANHATTAN_THRESHOLD = 5.0 # meters
_DFS_THRESHOLD_FRONT_SCALE = 45.0 # meters
_DFS_THRESHOLD_BACK_SCALE = 40.0 # meters
_MAX_SEARCH_RADIUS_CENTERLINES = 50.0 # meters
_MAX_CENTERLINE_CANDIDATES_TEST = 10
# Social Feature computation
PADDING_TYPE = "REPEAT" # Padding type for partial sequences
STATIONARY_THRESHOLD = (
13) # index of the sorted velocity to look at, to call it as stationary
VELOCITY_THRESHOLD = 1.0 # Velocity threshold for stationary
EXIST_THRESHOLD = (
15
) # Number of timesteps the track should exist to be considered in social context
DEFAULT_MIN_DIST_FRONT_AND_BACK = 100.0 # Default front/back distance
NEARBY_DISTANCE_THRESHOLD = 50.0 # Distance threshold to call a track as neighbor
FRONT_OR_BACK_OFFSET_THRESHOLD = 5.0 # Offset threshold from direction of travel
| jagjeet-singh/argoverse-forecasting | utils/baseline_config.py | baseline_config.py | py | 2,226 | python | en | code | 228 | github-code | 36 |
946081942 | pkgname = "lua5.1-libluv"
pkgver = "1.45.0.0"
pkgrel = 0
_distver = "-".join(pkgver.rsplit(".", 1))
build_style = "cmake"
configure_args = [
"-DLUA_BUILD_TYPE=System",
"-DWITH_SHARED_LIBUV=ON",
"-DBUILD_MODULE=OFF",
"-DBUILD_SHARED_LIBS=ON",
"-DWITH_LUA_ENGINE=Lua",
]
hostmakedepends = ["cmake", "ninja", "pkgconf"]
makedepends = ["libuv-devel", "lua5.1-devel"]
pkgdesc = "Bare libuv bindings for Lua"
maintainer = "yopito <pierre.bourgin@free.fr>"
license = "Apache-2.0"
url = "https://github.com/luvit/luv"
source = f"https://github.com/luvit/luv/releases/download/{_distver}/luv-{_distver}.tar.gz"
sha256 = "fa6c46fb09f88320afa7f88017efd7b0d2b3a0158c5ba5b6851340b0332a2b81"
# no tests provided by upstream
options = ["!check"]
@subpackage("lua5.1-libluv-devel")
def _devel(self):
return self.default_devel()
| chimera-linux/cports | contrib/lua5.1-libluv/template.py | template.py | py | 838 | python | en | code | 119 | github-code | 36 |
7373532913 | from tornado import ioloop, httpclient as hc, gen, escape
from . import _compat as _
from .graphite import GraphiteRecord
from .utils import convert_to_format, parse_interval, parse_rule, HISTORICAL, interval_to_graphite, gen_log
import math
from collections import deque, defaultdict
from itertools import islice
LOGGER = gen_log
METHODS = "average", "last_value"
LEVELS = {
'critical': 0,
'warning': 10,
'normal': 20,
}
class sliceable_deque(deque):
def __getitem__(self, index):
try:
return deque.__getitem__(self, index)
except TypeError:
return type(self)(islice(self, index.start, index.stop, index.step))
class AlertFabric(type):
""" Register alert's classes and produce an alert by source. """
alerts = {}
def __new__(mcs, name, bases, params):
source = params.get('source')
cls = super(AlertFabric, mcs).__new__(mcs, name, bases, params)
if source:
mcs.alerts[source] = cls
LOGGER.info('Register Alert: %s' % source)
return cls
def get(cls, reactor, source='graphite', **options):
acls = cls.alerts[source]
return acls(reactor, **options)
class BaseAlert(_.with_metaclass(AlertFabric)):
""" Abstract basic alert class. """
source = None
def __init__(self, reactor, **options):
self.reactor = reactor
self.options = options
self.client = hc.AsyncHTTPClient()
try:
self.configure(**options)
except Exception as e:
raise ValueError("Invalid alert configuration: %s" % e)
self.waiting = False
self.state = {None: "normal", "waiting": "normal", "loading": "normal"}
self.history = defaultdict(lambda: sliceable_deque([], self.history_size))
LOGGER.info("Alert '%s': has inited" % self)
def __hash__(self):
return hash(self.name) ^ hash(self.source)
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "%s (%s)" % (self.name, self.interval)
def configure(self, name=None, rules=None, query=None, **options):
assert name, "Alert's name is invalid"
self.name = name
assert rules, "%s: Alert's rules is invalid" % name
self.rules = [parse_rule(rule) for rule in rules]
self.rules = list(sorted(self.rules, key=lambda r: LEVELS.get(r.get('level'), 99)))
assert query, "%s: Alert's query is invalid" % self.name
self.query = query
self.interval = interval_to_graphite(
options.get('interval', self.reactor.options['interval']))
interval = parse_interval(self.interval)
self._format = options.get('format', self.reactor.options['format'])
self.request_timeout = options.get(
'request_timeout', self.reactor.options['request_timeout'])
self.history_size = options.get('history_size', self.reactor.options['history_size'])
self.history_size = parse_interval(self.history_size)
self.history_size = int(math.ceil(self.history_size / interval))
if self.reactor.options.get('debug'):
self.callback = ioloop.PeriodicCallback(self.load, 5000)
else:
self.callback = ioloop.PeriodicCallback(self.load, interval)
def convert(self, value):
return convert_to_format(value, self._format)
def reset(self):
""" Reset state to normal for all targets.
It will repeat notification if a metric is still failed.
"""
for target in self.state:
self.state[target] = "normal"
def start(self):
self.callback.start()
self.load()
return self
def stop(self):
self.callback.stop()
return self
def check(self, records):
for value, target in records:
LOGGER.info("%s [%s]: %s", self.name, target, value)
for rule in self.rules:
rvalue = self.get_value_for_rule(rule, target)
if rvalue is None:
continue
if rule['op'](value, rvalue):
self.notify(rule['level'], value, target, rule=rule)
break
else:
self.notify('normal', value, target, rule=rule)
self.history[target].append(value)
def get_value_for_rule(self, rule, target):
rvalue = rule['value']
if rvalue == HISTORICAL:
history = self.history[target]
if len(history) < self.history_size:
return None
rvalue = sum(history) / len(history)
rvalue = rule['mod'](rvalue)
return rvalue
def notify(self, level, value, target=None, ntype=None, rule=None):
""" Notify main reactor about event. """
# Did we see the event before?
if target in self.state and level == self.state[target]:
return False
# Do we see the event first time?
if target not in self.state and level == 'normal' \
and not self.reactor.options['send_initial']:
return False
self.state[target] = level
return self.reactor.notify(level, self, value, target=target, ntype=ntype, rule=rule)
def load(self):
raise NotImplementedError()
class GraphiteAlert(BaseAlert):
source = 'graphite'
def configure(self, **options):
super(GraphiteAlert, self).configure(**options)
self.method = options.get('method', self.reactor.options['method'])
assert self.method in METHODS, "Method is invalid"
self.auth_username = self.reactor.options.get('auth_username')
self.auth_password = self.reactor.options.get('auth_password')
query = escape.url_escape(self.query)
self.url = "%(base)s/render/?target=%(query)s&rawData=true&from=-%(interval)s" % {
'base': self.reactor.options['graphite_url'], 'query': query,
'interval': self.interval}
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.url, auth_username=self.auth_username,
auth_password=self.auth_password,
request_timeout=self.request_timeout)
records = (GraphiteRecord(line.decode('utf-8')) for line in response.buffer)
self.check([(getattr(record, self.method), record.target) for record in records])
self.notify('normal', 'Metrics are loaded', target='loading', ntype='common')
except Exception as e:
# self.notify('critical', 'Loading error: %s' % e, target=getattr(e, '_target', 'loading'), ntype=self.source) # 'common')
self.notify('critical', '%s' % e, target=getattr(e, '_target', 'loading'))
self.waiting = False
def get_graph_url(self, target, graphite_url=None):
query = escape.url_escape(target)
return "%(base)s/render/?target=%(query)s&from=-%(interval)s" % {
'base': graphite_url or self.reactor.options['graphite_url'], 'query': query,
'interval': self.interval}
class URLAlert(BaseAlert):
source = 'url'
@gen.coroutine
def load(self):
LOGGER.debug('%s: start checking: %s' % (self.name, self.query))
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.query,
method=self.options.get('method', 'GET'),
request_timeout=self.request_timeout)
self.check([(response.code, self.query)])
self.notify('normal', 'Metrics are loaded', target='loading')
except Exception as e:
self.notify('critical', str(e), target='loading')
self.waiting = False
| lixiaocheng18/testops | graphite/lib/beacon/alerts.py | alerts.py | py | 8,351 | python | en | code | 0 | github-code | 36 |
3292692782 | from random import random
def randint(a,b):
"""Our implementation of random.randint.
The Python random.randint is not consistent between python versions
and produces a series that is different in 3.x than 2.x. So that we
can support deterministic testing (i.e., setting the random.seed and
expecting the same sequence), we will implement a simple, but stable
version of randint()."""
return int((b-a+1)*random())
def unique_component_name(instance, name):
# test if this name already exists in model. If not, we're good.
# Else, we add random numbers until it doesn't
if instance.component(name) is None:
return name
name += '_%d' % (randint(0,9),)
while True:
if instance.component(name) is None:
return name
else:
name += str(randint(0,9))
| igorsowa9/vpp | venv/lib/python3.6/site-packages/pyomo/util/modeling.py | modeling.py | py | 843 | python | en | code | 3 | github-code | 36 |
35029813116 | from pyglossary.plugins.formats_common import *
from struct import unpack
from zlib import decompress
from datetime import datetime
enable = True
lname = "appledict_bin"
format = "AppleDictBin"
description = "AppleDict Binary"
extensions = (".dictionary", ".data",)
extensionCreate = ""
singleFile = True
kind = "binary"
wiki = ""
website = (
"https://support.apple.com/en-gu/guide/dictionary/welcome/mac",
"Dictionary User Guide for Mac",
)
optionsProp = {
"html": BoolOption(comment="Entries are HTML"),
"html_full": BoolOption(
comment="Turn every entry's definition into an HTML document",
),
}
class Reader(object):
depends = {
"lxml": "lxml",
}
_html: bool = True
_html_full: bool = False
def __init__(self, glos):
self._glos = glos
self._filename = ""
self._file = None
self._encoding = "utf-8"
self._buf = ""
self._defiFormat = "m"
self._re_link = re.compile(f'<a [^<>]*>')
self._titleById = {}
self._wordCount = 0
try:
from lxml import etree
except ModuleNotFoundError as e:
e.msg += f", run `{pip} install lxml` to install"
raise e
def sub_link(self, m: "Match"):
from lxml.html import fromstring, tostring
a_raw = m.group(0)
a = fromstring(a_raw)
href = a.attrib.get("href", "")
if href.startswith("x-dictionary:d:"):
word = href[len("x-dictionary:d:"):]
a.attrib["href"] = href = f"bword://{word}"
elif href.startswith("x-dictionary:r:"):
# https://github.com/ilius/pyglossary/issues/343
id_i = len("x-dictionary:r:")
id_j = href.find(":", id_i)
_id = href[id_i:id_j]
title = self._titleById.get(_id)
if title:
a.attrib["href"] = href = f"bword://{title}"
else:
title = a.attrib.get("title")
if title:
a.attrib["href"] = href = f"bword://{title}"
elif href.startswith("http://") or href.startswith("https://"):
pass
else:
a.attrib["href"] = href = f"bword://{href}"
a_new = tostring(a).decode("utf-8")
a_new = a_new[:-4] # remove '</a>'
return a_new
def fixLinksInDefi(self, defi: str) -> str:
defi = self._re_link.sub(self.sub_link, defi)
return defi
def open(self, filename):
self._defiFormat = "h" if self._html else "m"
parts = split(filename)
dbname = parts[-1]
if isdir(filename):
if parts[-1] == "Contents":
filename = join(filename, "Body.data")
if len(parts) > 2:
dbname = parts[-2]
elif isfile(join(filename, "Contents/Body.data")):
filename = join(filename, "Contents/Body.data")
elif isfile(join(filename, "Contents/Resources/Body.data")):
filename = join(filename, "Contents/Resources/Body.data")
else:
raise IOError(
"could not find Body.data file, "
"please select Body.data file instead of directory"
)
elif dbname == "Body.data" and len(parts) > 1:
dbname = parts[-2]
if len(parts) > 2:
if dbname == "Contents":
dbname = parts[-3]
elif dbname == "Resources" and len(parts) > 3:
dbname = parts[-4]
if not isfile(filename):
raise IOError(f"no such file: {filename}")
if dbname.endswith(".dictionary"):
dbname = dbname[:-len(".dictionary")]
self._glos.setInfo("name", dbname)
self._filename = filename
self._file = open(filename, "rb")
self._file.seek(0x40)
self._limit = 0x40 + unpack("i", self._file.read(4))[0]
self._file.seek(0x60)
t0 = datetime.now()
self.readEntryIds()
dt = datetime.now() - t0
log.info(
f"Reading entry IDs took {int(dt.total_seconds() * 1000)} ms, "
f"number of entries: {self._wordCount}"
)
def __len__(self):
return self._wordCount
def close(self):
if self._file is not None:
self._file.close()
self._file = None
def getChunkSize(self, pos):
plus = self._buf[pos:pos + 12].find(b"<d:entry")
if plus < 1:
return 0, 0
bs = self._buf[pos:pos + plus]
if plus < 4:
bs = b"\x00" * (4 - plus) + bs
try:
chunkSize, = unpack("i", bs)
except Exception as e:
log.error(f"{self._buf[pos:pos+100]}")
raise e
return chunkSize, plus
def _getDefi(self, entryElem: "Element") -> str:
from lxml import etree
if not self._html:
# FIXME: this produces duplicate text for Idioms.dictionary, see #301
return "".join([
etree.tostring(
child,
encoding="utf-8",
).decode("utf-8")
for child in entryElem.iterdescendants()
])
defi = etree.tostring(
entryElem,
encoding="utf-8",
).decode("utf-8")
defi = self.fixLinksInDefi(defi)
if self._html_full:
defi = (
f'<!DOCTYPE html><html><head>'
f'<link rel="stylesheet" href="style.css">'
f'</head><body>{defi}</body></html>'
)
return defi
def _readEntryData(self, pos: int) -> "Tuple[bytes, int]":
chunkSize, plus = self.getChunkSize(pos)
pos += plus
if chunkSize == 0:
endI = self._buf[pos:].find(b"</d:entry>")
if endI == -1:
chunkSize = len(self._buf) - pos
else:
chunkSize = endI + 10
entryBytes = self._buf[pos:pos + chunkSize]
pos += chunkSize
return entryBytes, pos
def _readEntry(self, pos: int) -> "Tuple[BaseEntry, int]":
"""
returns (entry, pos)
"""
from lxml import etree
entryBytes, pos = self._readEntryData(pos)
entryFull = entryBytes.decode(self._encoding, errors="replace")
entryFull = entryFull.strip()
if not entryFull:
return None, pos
try:
entryRoot = etree.fromstring(entryFull)
except etree.XMLSyntaxError as e:
log.error(
f"pos={pos}, len(buf)={len(self._buf)}, "
f"entryFull={entryFull!r}"
)
raise e
entryElems = entryRoot.xpath("/d:entry", namespaces=entryRoot.nsmap)
if not entryElems:
return None, pos
word = entryElems[0].xpath("./@d:title", namespaces=entryRoot.nsmap)[0]
defi = self._getDefi(entryElems[0])
if self._limit <= 0:
raise ValueError(f"self._limit = {self._limit}")
return self._glos.newEntry(
word, defi,
defiFormat=self._defiFormat,
byteProgress=(self._absPos, self._limit),
), pos
def readEntryIds(self):
_file = self._file
limit = self._limit
titleById = {}
while True:
absPos = _file.tell()
if absPos >= limit:
break
bufSizeB = _file.read(4) # type: bytes
bufSize, = unpack("i", bufSizeB) # type: int
self._buf = decompress(_file.read(bufSize)[8:])
pos = 0
while pos < len(self._buf):
b_entry, pos = self._readEntryData(pos)
b_entry = b_entry.strip()
if not b_entry:
continue
id_i = b_entry.find(b'id="')
if id_i < 0:
log.error(f"id not found: {b_entry}, pos={pos}, buf={self._buf}")
continue
id_j = b_entry.find(b'"', id_i + 4)
if id_j < 0:
log.error(f"id closing not found: {b_entry.decode(self._encoding)}")
continue
_id = b_entry[id_i + 4: id_j].decode(self._encoding)
title_i = b_entry.find(b'd:title="')
if title_i < 0:
log.error(f"title not found: {b_entry.decode(self._encoding)}")
continue
title_j = b_entry.find(b'"', title_i + 9)
if title_j < 0:
log.error(f"title closing not found: {b_entry.decode(self._encoding)}")
continue
titleById[_id] = b_entry[title_i + 9: title_j].decode(self._encoding)
self._titleById = titleById
_file.seek(0x60)
self._wordCount = len(titleById)
def __iter__(self):
from os.path import dirname
if self._file is None:
raise RuntimeError("iterating over a reader while it's not open")
glos = self._glos
cssFilename = join(dirname(self._filename), "DefaultStyle.css")
if isfile(cssFilename):
with open(cssFilename, mode="rb") as cssFile:
cssBytes = cssFile.read()
yield glos.newDataEntry("style.css", cssBytes)
_file = self._file
limit = self._limit
while True:
self._absPos = _file.tell()
if self._absPos >= limit:
break
bufSizeB = _file.read(4) # type: bytes
# alternative for buf, bufSize is calculated
# ~ flag = f.tell()
# ~ bufSize = 0
# ~ while True:
# ~ zipp = f.read(bufSize)
# ~ try:
# ~ # print(zipp)
# ~ input(zipp.decode(self._encoding))
# ~ buf = decompress(zipp[8:])
# ~ # print(buf)
# ~ break
# ~ except:
# ~ print(bufSize)
# ~ f.seek(flag)
# ~ bufSize = bufSize+1
bufSize, = unpack("i", bufSizeB) # type: int
self._buf = decompress(_file.read(bufSize)[8:])
pos = 0
while pos < len(self._buf):
entry, pos = self._readEntry(pos)
if entry is not None:
yield entry
| xiuxi/pyglossary | pyglossary/plugins/appledict_bin.py | appledict_bin.py | py | 8,315 | python | en | code | null | github-code | 36 |
18444612580 | from django.contrib.auth import get_user_model
from .models import Chat
def create_chat(user_id1, user_id2, room_name):
# Get participants
participants = get_user_model().objects.filter(id__in=[user_id1, user_id2])
# Get Chat instance
chat, _ = Chat.objects.get_or_create(name=room_name)
# Add participants to chat
chat.participants.add(participants[0])
chat.participants.add(participants[1])
chat.save()
return chat | Dosu333/studentity-backend | app/chat/utils.py | utils.py | py | 456 | python | en | code | 0 | github-code | 36 |
74612445225 | import socket
import threading
#创建一个socket
server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# 绑定IP端口
server.bind(('192.168.31.144', 8080))
#绑定监听
server.listen(5)
print("服务器启动成功!")
'''
print("等待连接....")
clientSocket, clientAddress = server.accept()
print("新连接")
print("IP is %s" % clientAddress[0])
print("port is %d\n" % clientAddress[1])
while True:
#接收数据
msg = clientSocket.recv(1024)
print("服务端接收:", msg.decode("utf-8")) # 把接收到的数据进行解码
'''
local = threading.local()
def func(clientSocket, clientAddress):
local.socket = clientSocket
local.address = clientAddress
print("新连接")
print("IP is %s" % local.address[0])
print("port is %d\n" % local.address[1])
while True:
#接收数据
msg = local.socket.recv(1024)
print("client %s:%s"%(local.address[0], msg.decode("utf-8")))
if msg == b"EOF":
break
if msg == b"quit":
local.socket.close()
server.close()
print("程序结束\n")
exit()
#给客户端发送数据
sendData = input("server>>")
local.socket.send(sendData.encode("utf-8"))
if sendData == "EOF":
break
if sendData == "quit":
local.socket.close()
server.close()
print("程序结束\n")
exit()
#使用线程进行多个连接
print("等待连接....")
while True:
clientSocket, clientAddress = server.accept()
threading.Thread(target=func, args=(clientSocket, clientAddress)).start()
| hanyb-sudo/hanyb | 网络编程(socket通信)/TCP编程/2、客户端与服务端的数据交互/server.py | server.py | py | 1,657 | python | en | code | 0 | github-code | 36 |
35829929249 | from django.contrib import admin
from .models import Page, Carousel
class PageAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("title",)}
list_display = (
'pk',
'title',
'slug',
'status',
'updated_at',
)
list_filter = ('status', )
list_editable = (
'title',
'status',
)
class CarouselAdmin(admin.ModelAdmin):
list_display = [
'pk',
'title',
'cover_image',
'status',
]
list_filter = ['status', ]
list_editable = list_filter
admin.site.register(Page, PageAdmin)
admin.site.register(Carousel, CarouselAdmin) | hakanyalcinkaya/kodluyoruz-org-python-ve-django-egitimi | kaft_clone/page/admin.py | admin.py | py | 648 | python | en | code | 81 | github-code | 36 |
19167657838 | '''
Programmer: Jessica Robertson
Date Written: 12-1-2022
Problem Link: adventofcode.com/2022/day1
Sources Used:
Test file for day 1 of 2022
'''
import day1_2022 as day1
import numpy as np
TEST_ARRAY = [6000, 4000, 11000, 24000, 10000]
TEST_TOP_CAL = 24000
TEST_TOP_THREE_CAL = 45000
def test_find_top_num():
_, top_num = day1.find_most_cal(TEST_ARRAY)
assert top_num == TEST_TOP_CAL
def test_find_top_three():
top_three = day1.find_top_three(TEST_ARRAY)
assert top_three == TEST_TOP_THREE_CAL
| jrobertson627/adventofcode | year_2022/day1/test_day1_2022.py | test_day1_2022.py | py | 536 | python | en | code | 0 | github-code | 36 |
15561174682 | import os
import platform
from build_swift.build_swift import cache_utils
from build_swift.build_swift.shell import which
from build_swift.build_swift.wrappers import xcrun
from . import shell
__all__ = [
'host_toolchain',
]
class Toolchain(object):
"""Represents native host toolchain
"""
def find_tool(self, *names):
raise NotImplementedError('Subclasses must implement this method')
# Declare properties for each tools.
# These properties are loaded lazily and assignable.
def _register(name, *tool):
def _getter(self):
return self.find_tool(*tool)
_getter.__name__ = name
setattr(Toolchain, name, cache_utils.reify(_getter))
if platform.system() == 'Windows':
_register("cc", "clang-cl")
_register("cxx", "clang-cl")
else:
_register("cc", "clang")
_register("cxx", "clang++")
_register("ninja", "ninja", "ninja-build")
_register("cmake", "cmake")
_register("distcc", "distcc")
_register("distcc_pump", "distcc-pump", "pump")
_register("llvm_profdata", "llvm-profdata")
_register("llvm_cov", "llvm-cov")
_register("lipo", "lipo")
_register("libtool", "libtool")
_register("ld", "ld")
if 'ANDROID_DATA' in os.environ:
_register("ranlib", "llvm-ranlib")
_register("ar", "llvm-ar")
else:
_register("ranlib", "ranlib")
_register("ar", "ar")
_register("sccache", "sccache")
_register("swiftc", "swiftc")
class Darwin(Toolchain):
def __init__(self, sdk, toolchain):
super(Darwin, self).__init__()
self.xcrun_sdk = sdk
self.xcrun_toolchain = toolchain
def find_tool(self, *names):
for name in names:
# NOTE: xcrun searches from developer tools directory *and* from
# PATH. Relatively slow, but we don't need `which` for
# Darwin.
found = xcrun.find(name,
sdk=self.xcrun_sdk,
toolchain=self.xcrun_toolchain)
if found is not None:
return found
return None
class GenericUnix(Toolchain):
def __init__(self, suffixes):
super(GenericUnix, self).__init__()
# On these platforms, search 'clang', 'clang++' unconditionally.
# To determine the llvm_suffix.
ret = self.find_clang(['clang', 'clang++'], suffixes)
if ret is None:
self.cc = None
self.cxx = None
# We don't have clang, then we don't have any llvm tools.
self.llvm_suffixes = []
else:
found, suffix = ret
self.cc, self.cxx = found
if suffix == '':
# Some platform may have `clang`, `clang++`, `llvm-cov-3.6`
# but not `llvm-cov`. In that case, we assume `clang` is
# corresponding to the best version of llvm tools found.
self.llvm_suffixes = suffixes
else:
# Otherwise, we must have llvm tools with the same suffix as
# `clang` or `clang++`
self.llvm_suffixes = [suffix]
def find_clang(self, tools, suffixes):
for suffix in suffixes:
ret = [which(t + suffix) for t in tools]
if all(t is not None for t in ret):
return (ret, suffix)
return None
def find_llvm_tool(self, tool):
for suffix in self.llvm_suffixes:
found = which(tool + suffix)
if found is not None:
# If we found the tool with the suffix, lock suffixes to it.
self.llvm_suffix = [suffix]
return found
return None
def find_tool(self, *names):
for name in names:
if name.startswith('llvm-'):
found = self.find_llvm_tool(name)
else:
found = which(name)
if found is not None:
return found
return None
class MacOSX(Darwin):
def __init__(self, toolchain='default'):
super(MacOSX, self).__init__(sdk='macosx', toolchain=toolchain)
class Linux(GenericUnix):
def __init__(self):
super(Linux, self).__init__(['', '-3.8', '-3.7', '-3.6', '-3.5'])
class FreeBSD(GenericUnix):
def __init__(self):
# For testing toolchain initializer on non-FreeBSD systems
sys = platform.system()
if sys != 'FreeBSD':
suffixes = ['']
# See: https://github.com/apple/swift/pull/169
# Building Swift from source requires a recent version of the Clang
# compiler with C++14 support.
elif self._release_date and self._release_date >= 1100000:
suffixes = ['']
else:
suffixes = ['38', '37', '36', '35']
super(FreeBSD, self).__init__(suffixes)
@cache_utils.reify
def _release_date(self):
"""Return the release date for FreeBSD operating system on this host.
If the release date cannot be ascertained, return None.
"""
# For details on `sysctl`, see:
# http://www.freebsd.org/cgi/man.cgi?sysctl(8)
out = shell.capture(['sysctl', '-n', 'kern.osreldate'],
dry_run=False, echo=False, optional=True)
if out is None:
return None
return int(out)
class OpenBSD(GenericUnix):
def __init__(self):
super(OpenBSD, self).__init__([''])
class Cygwin(Linux):
# Currently, Cygwin is considered as the same as Linux.
pass
class Windows(Toolchain):
def find_tool(self, *names):
for name in names:
found = which(name)
if found is not None:
return found
return None
class Haiku(GenericUnix):
def __init__(self):
super(Haiku, self)
def host_toolchain(**kwargs):
sys = platform.system()
if sys == 'Darwin':
return MacOSX(kwargs.pop('xcrun_toolchain', 'default'))
elif sys == 'Linux':
return Linux()
elif sys == 'FreeBSD':
return FreeBSD()
elif sys == 'OpenBSD':
return OpenBSD()
elif sys.startswith('CYGWIN'):
return Cygwin()
elif sys == 'Windows':
return Windows()
elif sys == 'Haiku':
return Haiku()
else:
raise NotImplementedError('The platform "%s" does not have a defined '
'toolchain.' % sys)
| apple/swift | utils/swift_build_support/swift_build_support/toolchain.py | toolchain.py | py | 6,353 | python | en | code | 64,554 | github-code | 36 |
75174467624 | # Task 1.1.1
# 1.Given two whole numbers - the lengths of the legs of a right-angled triangle - output its area.
a = int(input())
b = int(input())
area = a * b / 2
print('The area of right angled triangle is equal: ', area)
# 2.Input a natural number n and output its last digit.
n = int(input())
lastDigit = n%10
print('last digit of n is:', lastDigit )
# 3.Input a two-digit natural number and output the sum of its digits.
n = int(input())
firstDigit = n//10
lastDigit = n%10
sum = firstDigit + lastDigit
print(sum)
# 4.You are given the first and second number in an arithmetic progression and natural number n. Find n-th element of arithmetic progression.
a0 = int(input())
a1 = int(input())
n = int(input())
dif = a1 - a0;
nth = a0 + (n-1)*dif
print (nth) | ArmineHovhannisyan/Python-Introduction-to-Data-Science | src/first_month/task_1_1_1.py | task_1_1_1.py | py | 773 | python | en | code | 0 | github-code | 36 |
12029350008 | #!/usr/bin/env python3
import socket
import threading
import pickle
PORT = 6969
SERVER = socket.gethostbyname(socket.gethostname())
ADDR = (SERVER, PORT)
#print(SERVER)
HEADER = 64
FORMAT = 'utf-8'
DISCONNECT_MESSAGE = "gbye"
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.connect(ADDR)
def send(msg):
message = msg.encode(FORMAT)
msg_length = len(message)
# send_length = str(msg_length).encode(FORMAT)
send_length = pickle.dumps(msg_length)
send_length += b' ' * (HEADER - len(send_length))
client.send(send_length)
client.send(message)
server_msg = pickle.loads(client.recv(2048))
print(server_msg)
try:
while True:
send(input())
except KeyboardInterrupt:
send(DISCONNECT_MESSAGE)
exit()
| NateDreier/Learn_Python | challenges/tcp_client.py | tcp_client.py | py | 746 | python | en | code | 0 | github-code | 36 |
26299686956 | import mysql.connector
from datetime import datetime, timedelta
import os
import sys
from pathlib import Path
sql_pass =os.environ["MYSQLPASSWORD"]
railway_host =os.environ["MYSQLHOST"]
railway_user =os.environ["MYSQLUSER"]
railway_database =os.environ["MYSQLDATABASE"]
railway_port = int(os.environ["MYSQLPORT"])
def get_connection():
connection = mysql.connector.connect(
host = railway_host,
user= railway_user,
password= sql_pass,
database=railway_database,
port = railway_port,
)
return connection
#未完了データの取得
def get_tasks(user_id):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
cursor.execute(
"SELECT * FROM tasks WHERE user_id = %s AND status !='完了' ",
(user_id,)
)
tasks = cursor.fetchall()
cursor.close()
connection.close()
return tasks
#全件データの取得
def get_all_tasks(user_id):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
cursor.execute(
"SELECT * FROM tasks WHERE user_id = %s ",
(user_id,)
)
tasks = cursor.fetchall()
cursor.close()
connection.close()
return tasks
#新規のデータの追加
def add_task(user_id, startdatetime_str, task,status,priority,enddatetime_str):
connection = get_connection()
cursor = connection.cursor()
cursor.execute( "INSERT INTO tasks (user_id, starttime, task,status,priority,endtime) VALUES (%s, %s, %s,%s,%s,%s)",
(user_id, startdatetime_str, task,status,priority,enddatetime_str)
)
connection.commit()
cursor.close()
connection.close()
#ステータス変更機能
def update_status(task_id, user_id, status):
connection = get_connection()
cursor = connection.cursor()
try:
cursor.execute(
"UPDATE tasks SET status = %s WHERE id = %s AND user_id = %s",
(status, task_id, user_id)
)
connection.commit()
finally:
cursor.close()
connection.close()
#24時間経過かつ未了のタスクの出力
def get_expired_tasks(current_time):
connection = get_connection()
cursor = connection.cursor(dictionary=True)
expired_time = current_time - timedelta(days=1)
cursor.execute(
"SELECT * FROM tasks WHERE endtime <= %s AND status != '完了' ",
(expired_time,)
)
expired_tasks = cursor.fetchall()
cursor.close()
connection.close()
return expired_tasks
| uninin3141/task_manage_bot | app/dataset/db.py | db.py | py | 2,618 | python | en | code | 0 | github-code | 36 |
71593353703 | # Nick Wise
# 10/27/2020
# NOTES: Program currently has only been tested with 1 file.
# TODO: add validation check for file input, user entered word.
# This program will:
# ask a user to enter a file/files.
# ask user for word to look up in file.
# find word and convert it to uppercase and print total occurrences
# for each occurrence. Print line number and local context.
# function to read file and something else
import os
def main():
concordance = {}
files = get_files()
for file in files:
contents = read_files(file)
# removes extension to be placed as proper key in dictionary
title = os.path.splitext(file)[0]
# makes dictionary with title and content of file
concordance[title] = contents
# converting list to lowercase to analyse in remove stop words
lower_list = [word.lower() for word in contents]
# reads and removes stop words from file.
updated_contents = remove_stop_words(lower_list)
# gets user input for word to search
word = get_user_word()
# counts occurrences
occurrences = count_occurrence(word, updated_contents)
bolded_user_word = uppercase_occurrences(word, contents)
# print(bolded_user_word) # this is for testing
line_of_word = find_line(word, bolded_user_word)
# Display information back to user
display_info(word, occurrences, line_of_word, bolded_user_word)
def get_files():
# file = 'itsy_bitsy_spider.txt'
files = []
# set flag so user can enter manly different files.
more_files = True
while more_files:
file = input("Please enter file path.\nNote: press the enter key when you are done inputting your files.\n")
files.append(file)
# if user pressed enter key, delete '' entry and return files
if file == '':
files.pop()
return files
# function to read files and place them in list.
def read_files(file):
with open(file, 'r') as f:
all_lines = f.readlines()
return all_lines
# removes stop words from files
def remove_stop_words(contents):
stop_words = "english_stop.txt"
with open(stop_words, 'r') as w:
lines = w.readlines()
# strips stop word file of new line character
lines = [i.strip() for i in lines]
# strips file content of new line character
contents = [i.strip() for i in contents]
# makes list of words from contents of file
split_words = [i for item in contents for i in item.split()]
# makes a new list comparing the list of stop words to list of words in file content
content_without_stop_words = [i for i in split_words if i not in lines]
return content_without_stop_words
# Function to get a word from user
def get_user_word():
word = input("Please enter word to look for in your files.\n")
return word
# Function to count occurrences
def count_occurrence(word, contents):
if word in contents:
count = contents.count(word)
return count
def uppercase_occurrences(word, contents):
# will strip the new line character off file contents
# contents = [i.strip() for i in contents]
# sets up variables to compare user word to the word in contents of file.
exact_word = f" {word} "
title_case_word = f" {word.title()} "
lowercase_word = f" {word.lower()} "
upper_word = f"{word.upper()}"
# iterates over the lines in file contents and if it matches our local variables
for line in contents:
if exact_word in line:
# replace every word in contents that matches exact word to uppercase word and return the value
make_uppercase = [sub.replace(word, upper_word) for sub in contents]
return make_uppercase
elif title_case_word in line:
# replace every word in contents that matches exact word to uppercase word and return the value
make_uppercase = [sub.replace(word.title(), upper_word) for sub in contents]
return make_uppercase
elif lowercase_word in line:
# replace every word in contents that matches exact word to uppercase word and return the value
make_uppercase = [sub.replace(word.lower(), upper_word) for sub in contents]
return make_uppercase
# Function for finding and indexing the users word.
def find_line(user_word, bolded_user_word):
lines_that_contain_word = []
upper = user_word.upper()
# stores each sentence as a list inside another list.
split_sentence = [[item] for item in bolded_user_word]
# iterates over lists within a list to find line that word occurred
for i in split_sentence:
for e in i:
if upper in e:
# finds the index of the sentence words appears in and
# stores those values in an empty list to be returned
line_of_word = split_sentence.index(i)
lines_that_contain_word.append(line_of_word)
return lines_that_contain_word
# function to display user word, occurrences, the line number the word appears on and the line itself.
def display_info(word, occurrences, line_of_word, bolded_user_word):
message = f"{word}: Total Count:{occurrences}\n"
print(message)
for line in line_of_word:
split_sentence = [item for item in bolded_user_word]
print(f"\tLine {line + 1}: {split_sentence[line]}")
main()
| YcleptInsan/DictionaryConcordance- | Wise-DictionaryConcordance/Wise-concordance.py | Wise-concordance.py | py | 5,628 | python | en | code | 0 | github-code | 36 |
72485988903 | import logging
import sshpubkeys
from django.core.exceptions import ValidationError
LOGGER = logging.getLogger(__name__)
def ssh_public_key_validator(public_key):
'''
validate public key string
'''
try:
key = sshpubkeys.SSHKey(public_key)
key.parse()
except (sshpubkeys.InvalidKeyError, sshpubkeys.exceptions.MalformedDataError, UnicodeEncodeError) as exc:
LOGGER.exception(exc)
raise ValidationError('Malformed SSH Public Key') from exc
| bpereto/borg-hive | src/borghive/lib/validators.py | validators.py | py | 495 | python | en | code | 35 | github-code | 36 |
35553862778 | from Twitter import *
import re
class StockAlertInfo():
def __init__(self, alert_info : dict):
self.__alert_info : dict = alert_info
self.stock = self.__alert_info["stock"]
self.buy_price = self.__alert_info["buy_price"]
self.option = self.__alert_info["option"]
self.alert_type = self.__alert_info["alert_type"]
self.lotto = self.__alert_info["lotto"]
self.alert_time = self.__alert_info["time"]
self.time = datetime.now()
if(self.option):
self.strike_price = self.__alert_info["strike_price"]
self.contract_type = self.__alert_info["contract_type"]
def change_alert(self, alert_type):
alert_info = self.__alert_info
alert_info["alert_type"] = alert_type
stock_alert = StockAlertInfo(alert_info)
return stock_alert
class DailyRecap():
def __init__(self, daily_alerts : list[StockAlertInfo, float]):
self.__daily_alerts : list[StockAlertInfo, float] = daily_alerts
def get_stocks(self):
for stock_alert, sell_price in self.__daily_alerts:
yield stock_alert, sell_price
class SGTwitterTDParser():
def __init__(self):
self.__stock_alerts = []
self.__old_tweets = []
self.daily_recaps = []
self.__daily_alerts = []
def parse_messages(self, twitter_messages : list[TwitterMessage]) -> list[StockAlertInfo]:
stock_alerts = []
for message in twitter_messages:
stock_alert = self.parse_message(message)
if stock_alert:
stock_alerts.append(stock_alert)
return stock_alerts
def parse_message(self, twitter_message : TwitterMessage) -> StockAlertInfo:
for old_tweets in self.__old_tweets:
if twitter_message.id == old_tweets.id:
return None
self.__old_tweets.append(twitter_message)
tweet_text = twitter_message.text
split_text = tweet_text.split(" ")
if "DAILY RECAP" in tweet_text:
daily_alerts = []
split_text_copy = split_text
daily_alerts_filtered = []
for daily_alert in self.__daily_alerts:
daily_alert : StockAlertInfo
adjusted_split_text : list = split_text_copy
for idx, split in enumerate(split_text):
if daily_alert.stock in split:
price = re.sub("[^0123456789\.]","",split_text[idx+1])
if price == "":
pass
else:
price = float(price)
if "@" not in split_text[idx+2] and daily_alert.buy_price == price:
if "(" in split_text[idx+3]:
potential_sell = split_text[idx+3].split("(")[0]
if potential_sell != "":
sell_price = potential_sell
else:
sell_price = split_text[idx+2]
else:
sell_price = split_text[idx+3]
daily_alerts_filtered.append([daily_alert, float(re.sub("[^0123456789\.]","",sell_price))])
adjusted_split_text.pop(idx)
break
else:
daily_alerts.append(daily_alert)
split_text_copy = adjusted_split_text
daily_recap = DailyRecap(daily_alerts_filtered)
self.daily_recaps.append(daily_recap)
self.__daily_alerts = daily_alerts
alert_type = split_text[0]
stock_alert = None
option = True
if alert_type == "entry":
symbol = split_text[1][1:]
strike_price = split_text[2][:-1]
contract_type = ""
if split_text[2][-1] == "c":
contract_type = "CALL"
else:
contract_type = "PUT"
buy_price = None
for idx, split in enumerate(split_text):
if split == "@":
buy_price = float(split_text[idx+1])
break
if buy_price == None:
buy_price = float(split_text[3])
lotto = False
for split in split_text:
if split == "lotto" or split == "LOTTO":
lotto = True
alert_info = {
"stock" : symbol,
"buy_price" : buy_price,
"option" : option,
"alert_type" : alert_type,
"lotto" : lotto,
"strike_price" : strike_price,
"contract_type" : contract_type,
"time" : twitter_message.created
}
stock_alert = StockAlertInfo(alert_info)
self.__daily_alerts.append(stock_alert)
self.__stock_alerts.append(stock_alert)
elif alert_type == "scale" or alert_type == "exit":
symbol = ""
for text in split_text:
if "$" in text:
symbol = text[1:]
reversed_alerts = self.__stock_alerts
reversed_alerts.reverse()
for alert in reversed_alerts:
alert : StockAlertInfo
if symbol == alert.stock:
stock_alert = alert.change_alert(alert_type)
return stock_alert
| gatordevin/TradingBot | v4/Parsers.py | Parsers.py | py | 5,539 | python | en | code | 1 | github-code | 36 |
34125354328 | import matplotlib.pyplot as pt
import math
dt = [1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 1e-2]
dtl = [math.log(x) for x in dt]
dr = [1e-6, 2e-6, 4e-6, 2e-5, 9e-5, 6e-4, 1e-3]
dr2 = [2e-2, 7e-2, 1e-1, 5e-1, 1e0, 1e9, 1e9]
dr3 = [3e-1, 7e-1, 2e0, 8e0, 1.3e1, 1e9, 1e9]
drl = [math.log(x) for x in dr]
drl2 = [math.log(x) for x in dr2]
drl3 = [math.log(x) for x in dr3]
pt.plot(dtl, drl, label = 'Verlet')
pt.plot(dtl, drl2, label = 'velocity Verlet')
pt.plot(dtl, drl3, label = 'Euler')
pt.xlim((-10, -4))
pt.ylim((-14, 4))
pt.xlabel(r'ln dt')
pt.ylabel(r'ln drift')
pt.legend()
pt.show() | Platinum-Berlitz/TCCA-CCME | Library/Molecular Simulation/10/10_5.py | 10_5.py | py | 604 | python | en | code | 4 | github-code | 36 |
27625090373 | """empty message
Revision ID: 187613429bc6
Revises: f493fd2f04fa
Create Date: 2023-03-11 20:54:05.004095
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '187613429bc6'
down_revision = 'f493fd2f04fa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('cat',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_name', sa.String(length=16), nullable=True),
sa.Column('d_eat', sa.String(length=32), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('customer',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('c_name', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('dog',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_name', sa.String(length=16), nullable=True),
sa.Column('d_legs', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('goods',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('g_name', sa.String(length=64), nullable=True),
sa.Column('g_price', sa.Float(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('student',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('u_name', sa.String(length=16), nullable=True),
sa.Column('u_des', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('u_name')
)
op.create_table('address',
sa.Column('id', sa.Integer(), autoincrement=True, nullable=False),
sa.Column('a_position', sa.String(length=128), nullable=True),
sa.Column('a_customer_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['a_customer_id'], ['customer.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('address')
op.drop_table('user')
op.drop_table('student')
op.drop_table('goods')
op.drop_table('dog')
op.drop_table('customer')
op.drop_table('cat')
# ### end Alembic commands ###
| operatorhs/python-flask | flask-stu/migrations/versions/187613429bc6_.py | 187613429bc6_.py | py | 2,518 | python | en | code | 0 | github-code | 36 |
7813713746 | """add tree_parameters column to groups
Create Date: 2022-05-02 21:53:26.704275
"""
import enumtables # noqa: F401
import sqlalchemy as sa
from alembic import op
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = "20220502_215324"
down_revision = "20220502_171903"
branch_labels = None
depends_on = None
def upgrade():
op.add_column(
"groups",
sa.Column(
"tree_parameters",
postgresql.JSONB(astext_type=sa.Text()),
server_default=sa.text("'{}'::jsonb"),
nullable=True,
),
schema="aspen",
)
def downgrade():
raise NotImplementedError("don't downgrade")
| chanzuckerberg/czgenepi | src/backend/database_migrations/versions/20220502_215324_add_tree_parameters_column_to_groups.py | 20220502_215324_add_tree_parameters_column_to_groups.py | py | 696 | python | en | code | 11 | github-code | 36 |
72184794663 | """
This sorting algorithm has time complexity of O(n*log n) but in corner cases (sorted or almost sorted list)
can be slower up to O(n^2)
1. Divide and conquer algorithm
2. Can be implemented as In place algorithm, (doesn't create additional sub-lists (still requires some memory for
function call stack))
3. Can be implemented as a Stable or Unstable algorithm
Base idea is in recurrent splitting of a collection into parts of elements that are less than, equal to, or greater than
some chosen border element (pivot)
"""
def quicksort(unsorted_list: list) -> list:
"""
The function takes an unsorted list as an argument and returns a sorted list using a recurrent algorithm
(not an In place implementation, but a Stable one)
:param unsorted_list: an unsorted list
:return: a sorted list
"""
if unsorted_list:
pivot = unsorted_list[0]
left = [i for i in unsorted_list[1:] if i < pivot] # left for less than pivot
middle = unsorted_list[0:1] + [i for i in unsorted_list[1:] if i == pivot] # middle for equal to pivot
right = [i for i in unsorted_list[1:] if i > pivot] # right for greater than pivot
return quicksort(left) + middle + quicksort(right)
else:
return unsorted_list
def quicksort_in_place(given_list):
"""
The function takes an unsorted list as an argument and returns a sorted list using a recurrent algorithm
and Hoare partition scheme, see https://en.wikipedia.org/wiki/Quicksort
:param given_list: an unsorted list
:return: a sorted list
"""
def _quicksort(given_list, left, right):
# must run partition on sections with 2 elements or more
if left < right:
splitter = partition(given_list, left, right)
_quicksort(given_list, left, splitter)
_quicksort(given_list, splitter + 1, right)
def partition(given_list, left, right):
pivot = given_list[left]
while True:
while given_list[left] < pivot:
left += 1
while given_list[right] > pivot:
right -= 1
if left >= right:
return right
given_list[left], given_list[right] = given_list[right], given_list[left]
left += 1
right -= 1
_quicksort(given_list, 0, len(given_list)-1)
return given_list
example1 = [4, 5, 1, 2, 3]
exp_result1 = [1, 2, 3, 4, 5]
res1 = quicksort(example1)
res1_2 = quicksort_in_place(example1)
print(res1, res1 == exp_result1)
print(res1_2, res1_2 == exp_result1)
example2 = [2, 5, 6, 1, 4, 6, 2, 4, 7, 8, 2]
exp_result2 = [1, 2, 2, 2, 4, 4, 5, 6, 6, 7, 8]
res2 = quicksort(example2)
res2_2 = quicksort_in_place(example2)
print(res2, res2 == exp_result2)
print(res2_2, res2_2 == exp_result2)
| Delacrua/LearningPython | Algorithms/Sortings/QuickSortHoare.py | QuickSortHoare.py | py | 2,794 | python | en | code | 0 | github-code | 36 |
4390242213 | from django.contrib import admin
from django.urls import path
from . import views
urlpatterns = [
path("",views.index,name="loginmain"),
path("login/",views.login,name="login"),
path("logout/",views.logout,name="logout"),
path("change/",views.change,name="change"),
path("sendcode/",views.send_code,name="sendcode"),
path("verify/",views.verify,name="verify"),
path('reset/',views.reset,name="reset"),
]
| Hardik01101/BlogSite-1 | login/urls.py | urls.py | py | 447 | python | en | code | null | github-code | 36 |
70582641703 |
from sxm_manager.settings import common
soft_prefix = '2xfmS1:'
ioc_prefix = '2xfm:'
cam_prefix = 'MMPAD3x2:cam1:'
xfd_prefix = 'dxpXMAP2xfm3:'
# Syntax: {pvname: function_name}
# When pvname changes function_name will be called with pvname's value
callbacks = {
soft_prefix+'scan_axes_select.VAL': 'select_scan_axes',
soft_prefix+'scan_type_select.VAL': 'select_scan_type',
soft_prefix+'stepfly.VAL': 'toggle_stepfly_state',
soft_prefix+'dwell.VAL': 'update_dwell',
ioc_prefix+'userTran1.P': 'update_dwell',
ioc_prefix+'FlySetup:DwellTime.VAL': 'update_dwell',
soft_prefix+'begin_scan_1d.VAL': 'do_1d_scan',
soft_prefix+'begin_scan_2d.VAL': 'do_2d_scan',
soft_prefix+'pinhole_define.VAL': 'pinhole_define',
soft_prefix+'pinhole_move.VAL': 'pinhole_move',
soft_prefix+'toggle_lock.VAL': 'toggle_lock_state',
soft_prefix+'heartbeat.VAL': 'toggle_heartbeat',
soft_prefix+'stage_stack_move.VAL': 'stage_stack_move',
soft_prefix+'stage_stack_define.VAL': 'stage_stack_define',
soft_prefix+'alignment.VAL': 'alignment_mode',
soft_prefix+'to_batch.VAL': 'push_to_batch',
soft_prefix+'generate_config.VAL': 'generate_config',
soft_prefix+'reprocess_all.VAL': 'maps_reprocess_all',
soft_prefix+'process_now.VAL': 'maps_process_now',
soft_prefix+'setup_user_dirs.VAL': 'generate_user_dirs',
soft_prefix+'update_user.VAL': 'update_user',
ioc_prefix+'scan2.EXSC': 'scan_ends_process_now',
ioc_prefix+'Fscan1.EXSC': 'scan_ends_process_now',
soft_prefix+'take_standards.VAL': 'take_standards',
soft_prefix+'zp1_state.VAL': 'zp_stack_state_change',
soft_prefix+'zp2_state.VAL': 'zp_stack_state_change',
soft_prefix+'zp3_state.VAL': 'zp_stack_state_change',
soft_prefix+'zp4_state.VAL': 'zp_stack_state_change',
soft_prefix+'zp_config_move.VAL': 'zp_stack_in_out',
}
scan_records = ['scanH','scan1','scan2','scan3','FscanH','Fscan1','Fscan2','Fscan3']
stage_stacks = {
'filter_wheel': {
'angle': ioc_prefix+'m15',
'lock_state': soft_prefix+'filter_wheel_locked.VAL',
},
'pinhole': {
'x': ioc_prefix+'m9',
'y': ioc_prefix+'m10',
'lock_state': soft_prefix+'pinhole_locked.VAL',
},
'zp_stack': {
'x': ioc_prefix+'m30',
'y': ioc_prefix+'m12',
'z': ioc_prefix+'m29',
'lock_state': soft_prefix+'zp_stack_locked.VAL',
},
'zp1': {
'x': ioc_prefix+'m56',
'y': ioc_prefix+'m57',
'z': ioc_prefix+'m58',
'lock_state': soft_prefix+'zp1_locked.VAL',
},
'zp2': {
'x': ioc_prefix+'m59',
'y': ioc_prefix+'m60',
'z': ioc_prefix+'m61',
'lock_state': soft_prefix+'zp2_locked.VAL',
},
'zp3': {
'x': ioc_prefix+'m62',
'y': ioc_prefix+'m63',
'z': ioc_prefix+'m64',
'lock_state': soft_prefix+'zp3_locked.VAL',
},
'zp4': {
'x': ioc_prefix+'m65',
'y': ioc_prefix+'m66',
'z': ioc_prefix+'m67',
'lock_state': soft_prefix+'zp4_locked.VAL',
},
'osa': {
'x': ioc_prefix+'m18',
'y': ioc_prefix+'m19',
'lock_state': soft_prefix+'osa_locked.VAL',
},
'sample': {
'x': ioc_prefix+'m200',
'y': ioc_prefix+'m13',
'z': ioc_prefix+'m23',
'top': ioc_prefix+'m4',
'middle': ioc_prefix+'m5',
'theta': ioc_prefix+'m6',
'lock_state': soft_prefix+'sample_locked.VAL',
},
'tx_det': {
'x': ioc_prefix+'m31',
'y': ioc_prefix+'m32',
'lock_state': soft_prefix+'tx_det_locked.VAL',
}
}
scan_axes = {
0: ['sample.x', 'sample.y'],
1: ['sample.y', 'sample.x'],
2: ['sample.x', 'zp_stack.z'],
3: ['sample.y', 'zp_stack.z'],
4: ['sample.x', 'zp1.z'],
5: ['sample.y', 'zp1.z'],
6: ['sample.x', 'zp2.z'],
7: ['sample.y', 'zp2.z'],
8: ['sample.x', 'zp3.z'],
9: ['sample.y', 'zp3.z'],
10: ['sample.x', 'zp4.z'],
11: ['sample.y', 'zp4.z'],
12: ['tx_det.x', 'tx_det.y'],
13: ['tx_det.y', 'tx_det.x'],
14: ['osa.x', 'osa.y'],
}
lock_state = {
1: 'filter_wheel',
2: 'pinhole',
3: 'zp_stack',
4: 'zp1',
5: 'zp2',
6: 'zp3',
7: 'zp4',
8: 'osa',
9: 'sample',
10: 'tx_det'
}
move_define_axes = {
1: ['zp1', '1'],
2: ['zp1', '2'],
3: ['zp1', '3'],
4: ['zp1', '4'],
5: ['zp1', 'out'],
6: ['zp2', '1'],
7: ['zp2', '2'],
8: ['zp2', '3'],
9: ['zp2', '4'],
10: ['zp2', 'out'],
11: ['zp3', '1'],
12: ['zp3', '2'],
13: ['zp3', '3'],
14: ['zp3', '4'],
15: ['zp3', 'out'],
16: ['zp4', '1'],
17: ['zp4', '2'],
18: ['zp4', 'out'],
19: ['zp_stack', '1'],
20: ['zp_stack', '2'],
21: ['zp_stack', '3'],
22: ['zp_stack', '4'],
23: ['zp_stack', 'out'],
24: ['osa', 'in'],
25: ['osa', 'out'],
26: ['sample', '0'],
27: ['sample', '1'],
28: ['sample', '2'],
29: ['sample', '3'],
30: ['sample', '4'],
31: ['tx_det', '1'], # CCD
32: ['tx_det', '2'], # CFG
33: ['tx_det', '3'], # Ptycho
}
active_zp = {
# Syntax axis: move_define_axis value in, out
1: [1, 6, 11],
2: [2, 7, 12],
3: [3, 8, 13],
4: [4, 9, 14],
5: [16],
6: [17],
'all_out': [5, 10, 15, 18],
}
# Scan estimate overhead - used to accurately estimate scan time
time_estimate_overhead = {
# 'mode': [constant per pt, multiplier per pt, constant per line, multiplier per line]
'step': [9.4406, 1.784, 0.0, 1.1],
'fly': [0.1, 1.05, 0.0, 1.1]
}
| djvine/sxm_manager | sxm_manager/settings/xfm.py | xfm.py | py | 6,215 | python | en | code | 0 | github-code | 36 |
22634572785 | import numpy
import subprocess as sp
FFMPEG_BIN = "ffmpeg.exe"
command = [ FFMPEG_BIN,
'-i', '003_camera_p3.mp4',
'-f', 'image2pipe',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe = sp.Popen(command, stdout = sp.PIPE, bufsize=10**8)
# read 420*360*3 bytes (= 1 frame)
raw_image = pipe.stdout.read(420*360*3)
# transform the byte read into a numpy array
image = numpy.fromstring(raw_image, dtype='uint8')
image = image.reshape((360,420,3))
# throw away the data in the pipe's buffer.
pipe.stdout.flush()
| APPLabUofA/Pi_Experiments | GoPro_Visual_Grid/Video_Analysis/Intro_opencv/Read_MP4_Convert_RAW_Save.py | Read_MP4_Convert_RAW_Save.py | py | 565 | python | en | code | 4 | github-code | 36 |
74791819944 | import click
import requests
from tabulate import tabulate
class github:
def __init__(self,ghf):
self.ghf=ghf
# def repos(self,org):
# if self.ghf.debug: click.echo("org:"+org+" token:"+self.ghf.token)
# url='https://api.github.com/orgs/'+org+'/repos'
# headers=self.get_auth_header()
# data=''
# r = requests.get(url, headers=headers, data=data)
# fields = ['full_name']
# if len(self.ghf.fields) > 0:
# fields = self.ghf.fields
# if self.ghf.profile:
# self.profile_keys(r.json())
# else:
# self.print_object_array_with_fields(r.json(),fields)
def repos(self,org):
url='https://api.github.com/orgs/'+org+'/repos'
fields = ['full_name']
headers=self.get_auth_header()
self.make_github_get_request(fields,url,headers)
def make_github_get_request(self,default_fields,url,headers,data=''):
if self.ghf.debug: click.echo("url:"+url+" token:"+self.ghf.token+" fields:"+",".join(default_fields))
r = requests.get(url, headers=headers, data=data)
fields = default_fields
if len(self.ghf.fields) > 0:
fields = self.ghf.fields
table = []
if self.ghf.profile:
(fields,table) = self.get_profile_table(r.json())
else:
table = self.get_table_from_object_array_with_fields(fields,r.json())
self.print_table(fields,table)
def print_table(self, field_names, table):
if self.ghf.export_csv:
separator = self.ghf.csv_separator
if self.ghf.print_header_row:
click.echo(separator.join(field_names))
for entry in table:
click.echo(separator.join(entry))
else:
if self.ghf.print_header_row:
click.echo(tabulate(table, field_names, tablefmt="simple"))
else:
click.echo(tabulate(table, tablefmt="simple"))
def test_post(self,url,headers,data):
r = requests.post(url, headers=headers, data=data)
if r.status_code == 201:
click.echo(r.json())
elif r.status_code == 401:
click.echo("Error:"+r.json()['message'])
else:
click.echo('status:'+str(r.status_code))
click.echo(r.text)
def test_get(self,url,headers,data):
r = requests.get(url, headers=headers, data=data)
if r.status_code == 201:
click.echo(r.json())
elif r.status_code == 401:
click.echo("Error:"+r.json()['message'])
else:
click.echo('status:'+str(r.status_code))
click.echo(r.text)
def get_auth_header(self):
headers={'Authorization' : 'token '+self.ghf.token}
return headers
def get_profile_table(self,json):
outter_key_hash = {}
inner_key_hash = {}
if type(json) == type([]):
for item in json:
if type(item) == type(u''):
outter_key_hash[item] = 1 if item not in outter_key_hash else outter_key_hash[item] + 1
if type(item) == type({}):
for inner_item in item:
if type(inner_item) == type(u''):
inner_key_hash[inner_item] = 1 if inner_item not in inner_key_hash else inner_key_hash[inner_item] + 1
# elif type(json) == type({}):
# None
table = []
for key, value in outter_key_hash.items():
table.append(['level1',key,str(value)])
for key, value in inner_key_hash.items():
table.append(['level2',key,str(value)])
field_names = ['level', 'name','count']
table = sorted(table, key=lambda key: key[1])
return (field_names,table)
# click.echo(tabulate(table, field_names, tablefmt="simple"))
def get_table_from_object_array_with_fields(self,fields,json):
table = []
if type(json) == type([]):
for item in json:
if type(item) == type({}):
row = []
for field in fields:
if field in item:
row.append(item[field])
else:
row.append('')
table.append(row)
headers = fields
return table
# click.echo(tabulate(table, headers, tablefmt="simple"))
# >>> js = ['name1', 'name2', {'iname1':11,'iname2':12}]
# >>> for item in js:
# ... print type(item)
# ...
# <type 'str'>
# <type 'str'>
# <type 'dict'> | DemandCube/github-flow | src/githubflow/github.py | github.py | py | 4,784 | python | en | code | 5 | github-code | 36 |
26431478290 | import numpy as np
import pandas as pd
mashroom = pd.read_csv('mushroom edibility classification dataset.csv')
mashroom.head()
mashroom.shape
mashroom.isnull().sum()
mashroom_corr = mashroom.corr()
import seaborn as sns
sns.heatmap(mashroom_corr, cmap= 'YlGnBu')
#removing redundant columns that has no distinguishing features
mashroom.drop('veil-type',axis=1,inplace=True) #all the values are 0
mashroom.drop('veil-color',axis=1,inplace=True) #all the values are 2
mashroom.drop('ring-number',axis=1,inplace=True) #all the values are 1
mashroom.drop('Unnamed: 0',axis=1,inplace=True)
mashroom_corr = mashroom.corr()
sns.heatmap(mashroom_corr, cmap= 'YlGnBu')
# handling NaN values
from sklearn.impute import SimpleImputer
impute = SimpleImputer(missing_values = np.nan, strategy = 'mean')
impute.fit(mashroom[['cap-shape']])
mashroom['cap-shape'] = impute.transform(mashroom[['cap-shape']])
impute.fit(mashroom[['cap-color']])
mashroom['cap-color'] = impute.transform(mashroom[['cap-color']])
# mashroom.iloc[302]
mashroom.isnull().sum()
#encode
from sklearn.preprocessing import LabelEncoder
enc = LabelEncoder()
mashroom['class'] = enc.fit_transform(mashroom['class'])
mashroom['bruises'] = enc.fit_transform(mashroom['bruises'])
mashroom.info()
from sklearn.model_selection import train_test_split
mashroom_target = mashroom['class']
mashroom_data = mashroom.drop('class',axis=1)
X_train, X_test, y_train, y_test = train_test_split(mashroom_data, mashroom_target, test_size = 0.25, stratify = mashroom_target, random_state = 0)
#scale
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
print("per-feature minimum after scaling:\n {}".format(
X_train.min(axis=0)))
print("per-feature maximum after scaling:\n {}".format(
X_train.max(axis=0)))
print('label :') # The class is the label
label = pd.DataFrame(mashroom['class'])
label
print('features :')
mashroom_data | Rapheo/Basic-of-ML | Lab_5(data Pre-Processing)/data_preprocessing.py | data_preprocessing.py | py | 2,009 | python | en | code | 0 | github-code | 36 |
28798312451 | #this program accepts a list of numbers
#then takes the list and sqaures each value
#I pledge my honor that I have abided by the Stevens Honor System
def main():
print("This program accepts a list of numbers and squares them")
x = 0
numbers = int(input("Enter the amount of numbers you have: "))
for i in range(numbers):
square = int(input("Enter a number: "))
squared = (square*square)
print("The value squared is ", squared)
print("Have a great day!")
main()
| Eric-Wonbin-Sang/CS110Manager | 2020F_hw5_submissions/vinkdennis/squareslistofnumbers.py | squareslistofnumbers.py | py | 526 | python | en | code | 0 | github-code | 36 |
25903248374 | # -*- coding: utf-8 -*-
# @Time : 2018/3/27 8:18
# @Author : glacier
# @Email : 2284711614@qq.com
# @File : get_plan_to_md.py
# @Software: PyCharm
import os,time
import pymysql
import datetime
if __name__ == '__main__':
# 格式化
# today = time.strftime('%Y-%m-%d',time.localtime(time.time()))
today = '2018-03-23'
db = pymysql.connect(
"123.206.84.216",
"user001",
"123456",
"glacier",
charset='utf8'
)
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "SELECT user_id,plan_content,plan_type FROM plan_list " \
"WHERE create_time like '%"+ today +"%'"
try:
# 执行SQL语句
cursor.execute(sql)
table = cursor.fetchall()
# 提交到数据库执行
db.commit()
for tt in table:
with open('C:\\Users\\Administrator\\Desktop\\今日计划.md','a+',encoding='UTF-8') as f:
if tt[2] == 0:
f.write('- [ ] ' + tt[1] + '\n')
elif tt[2] == 1:
f.write('- [ ] ' + tt[1] + ' - 进行中 \n')
elif tt[2] == 2:
f.write('- [x] ' + tt[1] + '\n')
f.close()
except:
print("出错啦!")
# 发生错误时回滚
db.rollback()
# 关闭数据库连接
db.close()
# with open('C:\\Users\\Administrator\\Desktop\\今日计划.md') as f:
# pass
| GlacierBo/python_learn | python_base/get_plan_to_md.py | get_plan_to_md.py | py | 1,501 | python | en | code | 0 | github-code | 36 |
30838793023 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 11 18:11:32 2021
@author: mathisagathe
"""
from pymongo import MongoClient
client = MongoClient("10.35.7.4", username = "mathis", password = "MathisM21", authsource = "mathisdb")
db=client.mathisdb
collection = db["TripAdvisor"]
r1 = {"country":"France"}
nbrestoFR = collection.find((r1)).count()
print("Le nombre total de restaurants en France sur TA est de : ",nbrestoFR)
#Nombre de restaurants en France servant des repas végétariens et sans gluten
r2 = {"$and":
[
{"country":"France"},
{"vegetarian_friendly":"Y"},
{"gluten_free":"Y"}
]
}
nbr2 = collection.find((r2)).count()
print("Le nombre total de restaurants en France servant des repas végétariens et sans gluten est de : ",nbr2)
#Top 5 des villes européennes avec le plus de restaurants
r3 = collection.aggregate([
{"$group":{"_id":"$city","nb":{"$sum":1}}},
{"$sort":{"nb":-1}},
{"$limit":6}
])
for i in r3:
print(i)
| romanelollier/School_Project_BigData | requetes.py | requetes.py | py | 1,040 | python | fr | code | 0 | github-code | 36 |
10719930631 | import math
# i call this solution "the bruce lee". ie: i had to think like water.
# ___. __ _____ .__ .___
# \_ |__ ____ __ _ _______ _/ |_ ___________ _____ ___.__. _/ ____\______|__| ____ ____ __| _/
# | __ \_/ __ \ \ \/ \/ /\__ \\ __\/ __ \_ __ \ / < | | \ __\\_ __ \ |/ __ \ / \ / __ |
# | \_\ \ ___/ \ / / __ \| | \ ___/| | \/ | Y Y \___ | | | | | \/ \ ___/| | \/ /_/ |
# |___ /\___ > \/\_/ (____ /__| \___ >__| /\ |__|_| / ____| |__| |__| |__|\___ >___| /\____ |
# \/ \/ \/ \/ )/ \/\/ \/ \/ \/
# https://www.youtube.com/watch?v=V4A37PDduls
def collect_rain(walls):
if not walls or len(walls) < 2 or max(walls) == 0:
return 0
walls = walls.copy()
# reduce, if necessary, to a maximum that occurs at least 2 times.
total_water_collected = 0
while max(walls) > 0:
max_result = twoMaxesOrMaxIndex(walls)
if isinstance(max_result, dict):
# a distinct L max and R max
# the higher max must be reduced because no water will be collected in the area of the difference between the 2
if max_result['right_max'] > max_result['left_max']:
walls[max_result['i_right_max']] = max_result['left_max']
elif max_result['right_max'] < max_result['left_max']:
walls[max_result['i_left_max']] = max_result['right_max']
# regardless of whatever else we discover about walls,
# we know that there are no higher walls between these 2 maxes.
# we can confidently set to zero all walls in between them.
for iz in range(max_result['i_left_max']+1, max_result['i_right_max']):
walls[iz] = 0
# a pool will collect between these 2 maxes. the width of which is:
pool_width = max_result['i_right_max'] - max_result['i_left_max']
# the total depth would be the value of these maxes, but we should calc only to the height of the next max
# which we would know if we max() the walls with these 2 maxes removed.
latest_max = walls[max_result['i_right_max']]
walls[max_result['i_left_max']] = 0
walls[max_result['i_right_max']] = 0
new_max = max(walls)
pool_layer_depth = latest_max - new_max
water_layer_volume = pool_width * pool_layer_depth
total_water_collected += water_layer_volume
# reduce the 2 maxes to this new max
walls[max_result['i_left_max']] = new_max
walls[max_result['i_right_max']] = new_max
# ready to find the next left & right maxes
else:
# one max
# useless for collecting water
# reduce this wall's height to the next max
walls[max_result] = 0
next_max = max(walls)
walls[max_result] = next_max
# ready to find the next left & right maxes
return total_water_collected
def twoMaxesOrMaxIndex(walls):
left_max = 0
i_left_max = 0
right_max = 0
i_right_max = len(walls) - 1
l = 0
r = len(walls) - 1
while l < r:
if walls[l] > left_max:
left_max = walls[l]
i_left_max = l
if walls[r] > right_max:
right_max = walls[r]
i_right_max = r
l += 1
r -= 1
if l == r:
# just happens to be odd walls.length
# if center wall > either left_max or right_max, return this central max index
if walls[l] > max(left_max, right_max):
return l
elif walls[l] > right_max:
right_max = walls[l]
i_right_max = l
elif walls[l] > left_max:
left_max = walls[l]
i_left_max = l
return {
"left_max": left_max,
"i_left_max": i_left_max,
"right_max": right_max,
"i_right_max": i_right_max
}
# be like a flood, my friend
# .___ .__ .__ .__
# __| _/____ _____|__| ____ ____ |__| ______ ___ __ ___________ ___.__. | |__ __ __ _____ _____ ____
# / __ |/ __ \ / ___/ |/ ___\ / \ | |/ ___/ \ \/ // __ \_ __ < | | | | \| | \/ \\__ \ / \
# / /_/ \ ___/ \___ \| / /_/ > | \ | |\___ \ \ /\ ___/| | \/\___ | | Y \ | / Y Y \/ __ \| | \
# \____ |\___ >____ >__\___ /|___| / |__/____ > \_/ \___ >__| / ____| |___| /____/|__|_| (____ /___| /
# \/ \/ \/ /_____/ \/ \/ \/ \/ \/ \/ \/ \/
# https://www.tiktok.com/@rhapilucky/video/7256329396527631658
def collect_flood(walls):
if not walls or len(walls) < 2 or max(walls) == 0:
return 0
walls = walls.copy()
left = 0
right = len(walls) - 1
previous_max_left = 0
max_left = walls[left]
previous_max_right = 0
max_right = walls[right]
total_water_captured = 0
while left < right:
# DRY this up
next_max_left = max(walls[left], max_left)
if next_max_left != max_left:
previous_max_left = max_left
max_left = next_max_left
# DRY this up
next_max_right = max(walls[right], max_right)
if next_max_right != max_right:
previous_max_right = max_right
max_right = next_max_right
while walls[left] <= walls[right] and walls[left] <= max_left:
if max_left > 0:
total_water_captured += max_left - previous_max_left
left += 1
# DRY this up
max_left = max(max_left, walls[left])
if next_max_left != max_left:
previous_max_left = max_left
max_left = next_max_left
if left == right:
break
while walls[left] > walls[right]:
if max_right > 0:
total_water_captured += max_right - previous_max_right
right -= 1
max_right = max(max_right, walls[right])
# DRY this up
if next_max_right != max_right:
previous_max_right = max_right
max_right = next_max_right
if left == right:
break
return total_water_captured
def expect(walls, actual, expected):
if (actual != expected):
print(f"wallGorithm fails for wall config: {walls}, expected {expected}, calculated {actual}")
if __name__ == '__main__':
# simple zeros & 1s:
expect([], collect_rain([]), 0)
expect([0], collect_rain([0]), 0)
expect([1], collect_rain([1]), 0)
expect([9991], collect_rain([9991]), 0)
# simple 2s:
expect([0, 1], collect_rain([0, 1]), 0)
expect([1, 0], collect_rain([1, 0]), 0)
expect([1, 1], collect_rain([1, 1]), 1)
expect([0, 0], collect_rain([0, 0]), 0)
# the rest:
wall_expectations = [
([0, 0, 0], 0),
([1, 0, 0], 0),
([0, 1, 0], 0),
([0, 0, 1], 0),
([1, 1, 0], 1),
([1, 0, 1], 2),
([0, 1, 1], 1),
([1, 1, 1], 2),
([100, 1, 1], 2),
([1, 100, 1], 2),
([1, 1, 100], 2),
([3, 1, 100], 6),
([3, 10, 0], 3),
([4, 10, 2], 6),
([3, 10, 0, 8], 19),
([3, 0, 10, 0], 6),
([0, 0, 10, 8], 8),
([0, 10, 0, 8], 16),
([0, 0, 12, 0, 0], 0),
([0, 11, 12, 0, 0], 11),
([0, 12, 12, 0, 0], 12),
]
for expectation in wall_expectations:
expect(expectation[0].copy(), collect_rain(expectation[0]), expectation[1])
for expectation in wall_expectations:
expect(expectation[0].copy(), collect_flood(expectation[0]), expectation[1])
| jazzhammer/bruce-lee-water-collection | main.py | main.py | py | 7,949 | python | en | code | 0 | github-code | 36 |
74069869225 | ## Deprecated - see XNATUpload comment. ##
from nipype.interfaces.base import (
traits, BaseInterfaceInputSpec, TraitedSpec,
BaseInterface, InputMultiPath, File)
import qixnat
class XNATUploadInputSpec(BaseInterfaceInputSpec):
project = traits.Str(mandatory=True, desc='The XNAT project id')
subject = traits.Str(mandatory=True, desc='The XNAT subject name')
session = traits.Str(desc='The XNAT session name')
scan = traits.Either(traits.Int, traits.Str, desc='The XNAT scan name')
reconstruction = traits.Str(desc='The XNAT reconstruction name')
assessor = traits.Str(desc='The XNAT assessor name')
resource = traits.Str(mandatory=True, desc='The XNAT resource name')
inout = traits.Str(desc='The XNAT reconstruction or assessor resource'
' in/out qualifier')
force = traits.Bool(desc='Flag indicating whether to replace an existing'
' XNAT file')
skip_existing = traits.Bool(desc='Flag indicating whether to skip upload'
' to an existing target XNAT file')
in_files = InputMultiPath(File(exists=True), mandatory=True,
desc='The files to upload')
modality = traits.Str(desc="The XNAT scan modality, e.g. 'MR'")
class XNATUploadOutputSpec(TraitedSpec):
xnat_files = traits.List(traits.Str, desc='The XNAT file object labels')
class XNATUpload(BaseInterface):
"""
The ``XNATUpload`` Nipype interface wraps the
:meth:`qixnat.facade.XNAT.upload` method.
"""
input_spec = XNATUploadInputSpec
output_spec = XNATUploadOutputSpec
def _run_interface(self, runtime):
# The upload options.
find_opts = {}
if self.inputs.resource:
find_opts['resource'] = self.inputs.resource
if self.inputs.inout:
find_opts['inout'] = self.inputs.inout
if self.inputs.modality:
find_opts['modality'] = self.inputs.modality
if self.inputs.scan:
find_opts['scan'] = self.inputs.scan
elif self.inputs.reconstruction:
find_opts['reconstruction'] = self.inputs.reconstruction
elif self.inputs.assessor:
find_opts['assessor'] = self.inputs.assessor
upload_opts = {}
if self.inputs.force:
upload_opts['force'] = True
if self.inputs.skip_existing:
upload_opts['skip_existing'] = True
# Upload the files.
with qixnat.connect() as xnat:
# The target XNAT scan resource object.
rsc = xnat.find_or_create(self.inputs.project, self.inputs.subject,
self.inputs.session, **find_opts)
self._xnat_files = xnat.upload(rsc, *self.inputs.in_files,
**upload_opts)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
if hasattr(self, '_xnat_files'):
outputs['xnat_files'] = self._xnat_files
return outputs
| ohsu-qin/qipipe | qipipe/interfaces/xnat_upload.py | xnat_upload.py | py | 3,087 | python | en | code | 0 | github-code | 36 |
28912430156 | #The code looks to distinguish between cats and dogs using the AlexNet model set up
#This model does not work at high percentage of correctness over 20 epochs due to underfitting as AlexNet is set up to work over larger datasets
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import model_selection
from sklearn.metrics import accuracy_score
from collections import Counter
import keras
from keras.models import Sequential
from keras.layers import Dense, Conv2D
from keras.layers import Activation, MaxPooling2D, Dropout, Flatten, Reshape
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import cross_val_score
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
def categorical_to_numpy(labels_in):
labels = []
for label in labels_in:
if label == 'dog':
labels.append(np.array([1, 0]))
else:
labels.append(np.array([0, 1]))
return np.array(labels)
def load_data():
# Run this cell to download our data into a file called 'cifar_data'
import gdown
gdown.download('https://drive.google.com/uc?id=1-BjeqccJdLiBA6PnNinmXSQ6w5BluLem','cifar_data','True'); # dogs v road;
# now load the data from our cloud computer
import pickle
data_dict = pickle.load(open( "cifar_data", "rb" ));
data = data_dict['data']
labels = data_dict['labels']
return data, labels
def plot_one_image(data, labels, img_idx):
from google.colab.patches import cv2_imshow
import cv2
import matplotlib.pyplot as plt
my_img = data[img_idx, :].squeeze().reshape([32,32,3]).copy()
my_label = labels[img_idx]
print('label: %s'%my_label)
plt.imshow(my_img)
plt.show()
def CNNClassifier(num_epochs=2, layers=1, dropout=0.15):
def create_model():
model = Sequential()
model.add(Reshape((32, 32, 3)))
for i in range(layers):
model.add(Conv2D(32, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(2))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
return KerasClassifier(build_fn=create_model, epochs=num_epochs, batch_size=10, verbose=2)
def plot_acc(history, ax = None, xlabel = 'Epoch #'):
history = history.history
history.update({'epoch':list(range(len(history['val_accuracy'])))})
history = pd.DataFrame.from_dict(history)
best_epoch = history.sort_values(by = 'val_accuracy', ascending = False).iloc[0]['epoch']
if not ax:
f, ax = plt.subplots(1,1)
sns.lineplot(x = 'epoch', y = 'val_accuracy', data = history, label = 'Validation', ax = ax)
sns.lineplot(x = 'epoch', y = 'accuracy', data = history, label = 'Training', ax = ax)
ax.axhline(0.5, linestyle = '--',color='red', label = 'Chance')
ax.axvline(x = best_epoch, linestyle = '--', color = 'green', label = 'Best Epoch')
ax.legend(loc = 1)
ax.set_ylim([0.4, 1])
ax.set_xlabel(xlabel)
ax.set_ylabel('Accuracy (Fraction)')
plt.show()
def model_to_string(model):
import re
stringlist = []
model.summary(print_fn=lambda x: stringlist.append(x))
sms = "\n".join(stringlist)
sms = re.sub('_\d\d\d','', sms)
sms = re.sub('_\d\d','', sms)
sms = re.sub('_\d','', sms)
return sms
import tensorflow as tf
import os
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from google.colab.patches import cv2_imshow
import cv2
import matplotlib.pyplot as plt
try:
road_model = model
road_saved = True
except NameError:
road_saved = False
IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
train_image_generator = ImageDataGenerator() # Generator for our training data
validation_image_generator = ImageDataGenerator() # Generator for our validation data
train_data = train_image_generator.flow_from_directory(batch_size=2000,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary').next()
val_data = validation_image_generator.flow_from_directory(batch_size=1000,
directory=validation_dir,
shuffle=False,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary').next()
cd_train_inputs, cd_train_labels = train_data
cd_test_inputs, cd_test_labels = val_data
model = Sequential()
#TODO: Your AlexNet code here:
model.add(Conv2D(96, 11, strides = 3))
model.add(Activation('relu'))
model.add(Conv2D(256, 5))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Conv2D(384, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Conv2D(384, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(Conv2D(256, 3, padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(2))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(4096))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(2))
model.add(Activation('softmax'))
# initiate RMSprop optimizer
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
# Let's train the model using RMSprop
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# Train the CNN and plot accuracy.
# Substituted new dataset names; to_categorical converts to one-hot, as ValueError suggests
history = model.fit(cd_train_inputs, to_categorical(cd_train_labels), \
validation_data=(cd_test_inputs, to_categorical(cd_test_labels)), \
epochs=70)
plot_acc(history)
print (model.summary())
| arulverma/Inspirit-AI-programs | Cat vs Dog AlexNet.py | Cat vs Dog AlexNet.py | py | 7,976 | python | en | code | 0 | github-code | 36 |
28613035846 | #!/usr/bin/env python
"""PyQt4 port of the layouts/basiclayout example from Qt v4.x"""
from PySide import QtCore, QtGui
class Dialog(QtGui.QDialog):
NumGridRows = 3
NumButtons = 4
def __init__(self):
super(Dialog, self).__init__()
self.createMenu()
self.createHorizontalGroupBox()
self.createGridGroupBox()
self.createFormGroupBox()
bigEditor = QtGui.QTextEdit()
bigEditor.setPlainText("This widget takes up all the remaining space "
"in the top-level layout.")
buttonBox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonBox.accepted.connect(self.accept)
buttonBox.rejected.connect(self.reject)
mainLayout = QtGui.QVBoxLayout()
mainLayout.setMenuBar(self.menuBar)
mainLayout.addWidget(self.horizontalGroupBox)
mainLayout.addWidget(self.gridGroupBox)
mainLayout.addWidget(self.formGroupBox)
mainLayout.addWidget(bigEditor)
mainLayout.addWidget(buttonBox)
self.setLayout(mainLayout)
self.setWindowTitle("Basic Layouts")
def createMenu(self):
self.menuBar = QtGui.QMenuBar()
self.fileMenu = QtGui.QMenu("&File", self)
self.exitAction = self.fileMenu.addAction("E&xit")
self.menuBar.addMenu(self.fileMenu)
self.exitAction.triggered.connect(self.accept)
def createHorizontalGroupBox(self):
self.horizontalGroupBox = QtGui.QGroupBox("Horizontal layout")
layout = QtGui.QHBoxLayout()
for i in range(Dialog.NumButtons):
button = QtGui.QPushButton("Button %d" % (i + 1))
layout.addWidget(button)
self.horizontalGroupBox.setLayout(layout)
def createGridGroupBox(self):
self.gridGroupBox = QtGui.QGroupBox("Grid layout")
layout = QtGui.QGridLayout()
for i in range(Dialog.NumGridRows):
label = QtGui.QLabel("Line %d:" % (i + 1))
lineEdit = QtGui.QLineEdit()
layout.addWidget(label, i + 1, 0)
layout.addWidget(lineEdit, i + 1, 1)
self.smallEditor = QtGui.QTextEdit()
self.smallEditor.setPlainText("This widget takes up about two thirds "
"of the grid layout.")
layout.addWidget(self.smallEditor, 0, 2, 4, 1)
layout.setColumnStretch(1, 10)
layout.setColumnStretch(2, 20)
self.gridGroupBox.setLayout(layout)
def createFormGroupBox(self):
self.formGroupBox = QtGui.QGroupBox("Form layout")
layout = QtGui.QFormLayout()
layout.addRow(QtGui.QLabel("Line 1:"), QtGui.QLineEdit())
layout.addRow(QtGui.QLabel("Line 2, long text:"), QtGui.QComboBox())
layout.addRow(QtGui.QLabel("Line 3:"), QtGui.QSpinBox())
self.formGroupBox.setLayout(layout)
if __name__ == '__main__':
import sys
app = QtGui.QApplication(sys.argv)
dialog = Dialog()
sys.exit(dialog.exec_())
| pyside/Examples | examples/layouts/basiclayouts.py | basiclayouts.py | py | 3,000 | python | en | code | 357 | github-code | 36 |
12117651948 | parameters = command.split ()
try:
if parameters[0] == "divide":
print ( "The value of your division is: {0}".format ( float(parameters[1])/float(parameters[2])))
elif parameters[0] == "showfile":
file = open ( parameters[1] )
print ( file.read () )
file.close ()
except:
print("There was an error") | HOIg3r/LINFO1101-Intro-a-la-progra | Exercices INGI/Session 6/Traitement d'exceptions.py | Traitement d'exceptions.py | py | 354 | python | en | code | 4 | github-code | 36 |
35952937567 | # coding=utf-8
from utils.dataPreprocess import *
if __name__ == "__main__":
data_path = "./data/"
vocab = build_vocab_list([data_path + 'train.csv', data_path + 'test.csv', data_path + 'val.csv'])
with open(data_path + 'vocab_freq.pkl', 'wb') as f:
pickle.dump(vocab, file=f)
w2i, i2w, i2v = build_word_vec(vocab, w2v_path='./embedding/InferSent/dataset/fastText/crawl-300d-2M.vec')
with open(data_path + 'vocab.pkl', 'wb') as f:
pickle.dump((w2i, i2w), file=f)
with open(data_path + 'vocab_embed.pkl', 'wb') as f:
pickle.dump(i2v, file=f)
train_corpus = corpus_tokenize(data_path + 'train.csv', 'train')
with open(data_path + 'train.pkl', 'wb') as f:
pickle.dump(train_corpus, file=f)
test_corpus = corpus_tokenize(data_path + 'test.csv', 'test')
with open(data_path + 'test.pkl', 'wb') as f:
pickle.dump(test_corpus, file=f)
val_corpus = corpus_tokenize(data_path + 'val.csv', 'val')
with open(data_path + 'val.pkl', 'wb') as f:
pickle.dump(val_corpus, file=f) | ChemJeff/StoryCloze_ROCStories | data.py | data.py | py | 1,061 | python | en | code | 4 | github-code | 36 |
2711278776 |
from math import sqrt
C=50
H=30
def calculate(D):
D = int(D)
a=str(int(sqrt((2*C*D)/H)))
return a
D= input("Please eneter a few integars separated by comma: ")
D = D.split(",")
D=list(map(calculate,D))
print(",".join(D)) | dineshgyawali/pythontasks3-7 | Task7/Question1.py | Question1.py | py | 234 | python | en | code | 0 | github-code | 36 |
37770608391 | test_case = int(input())
input_list = []
count = 0
for _ in range(test_case):
user_input = input()
prev_char = ''
appeared = set()
isGroup = True
for char_idx in range(len(user_input)):
current_char = user_input[char_idx]
if prev_char == current_char:
continue
else:
# 이전에 등장한 char != 현재 char
if current_char in appeared:
isGroup = False
break
else:
# 현재 char가 처음 등장한 경우
appeared.add(current_char)
prev_char = current_char
if isGroup:
count += 1
print(count) | TB2715/python-for-coding-test | BaekJoon/Implement/1316.py | 1316.py | py | 682 | python | en | code | 0 | github-code | 36 |
12487874050 | """
Programming Fundamentals Mid Exam - 30 June 2019 Group 2
Check your code: https://judge.softuni.bg/Contests/Practice/Index/1683#1
SUPyF2 P.-Mid-Exam/30 June 2019/2. - Tasks Planner
Problem:
Create a program that helps you organize your daily tasks.
First, you are going to receive the hours each task takes оn a single line, separated by space, in the following format:
"{task1} {task2} {task3}… {taskn}"
Each task takes from 1 to 5 hours. If its time is set to 0 – it is completed. If its time is set to a negative number –
the task is dropped.
Then you will start receiving commands until you read the "End" message. There are six possible commands:
• "Complete {index}"
o Find the task on this index in your collection and complete it, if the index exists.
• "Change {index} {time}"
o Replace the time needed of the task on the given index with the time given, if the index exists.
• "Drop {index}"
o Drop the task on the given index, setting its hour to -1, if the index exists.
• "Count Completed"
o Print the number of completed tasks.
• "Count Incomplete"
o Print the number of incomplete tasks (this doesn’t include the dropped tasks).
• "Count Dropped"
o Print the number of dropped tasks (this doesn’t include the incomplete tasks).
In the end, print the incomplete tasks on a single line, separated by a single space in the following format:
"{task1} {task2} {task3}… {taskn}"
Input
• On the 1st line you are going to receive the time of each task, separated by a single space.
• On the next lines, until the "End" command is received, you will be receiving commands.
Output:
• Print the tasks in the format described above.
Examples:
Input:
1 -1 2 3 4 5
Complete 4
Change 0 4
Drop 3
Count Dropped
End
Output:
2
4 2 5
Comments:
First, we receive the command "Complete 4" and we to complete the task on index 4. After this command,
the task collection looks like this:
1 -1 2 3 0 5
Afterwards, we receive the "Change 0 4" command and we need to change the time of the task on index 0.
The collection looks like this now:
4 -1 2 3 0 5
After, we receive the "Drop 3" command, which means we need to drop the task on index 3. The collection looks like this:
4 -1 2 -1 0 5
Then, we receive the "Count Dropped" command. The result is 2 as we have only 2 dropped tasks.
In the end, we print all of the incomplete tasks. This is the result collection:
4 2 5
Input:
1 2 3 4 5 4 0 3 2 1
Complete 0
Complete 1
Complete 2
Drop 3
Change 4 1
Count Completed
End
Output:
4
1 4 3 2 1
"""
tasks = [int(task) for task in input().split()]
while True:
command = input().split()
if command[0] == "End":
break
elif command[0] == "Complete":
index = int(command[1])
if 0 <= index < len(tasks):
tasks[index] = 0
elif command[0] == "Change":
index = int(command[1])
if 0 <= index < len(tasks):
tasks[index] = int(command[2])
elif command[0] == "Drop":
index = int(command[1])
if 0 <= index < len(tasks):
tasks[index] = -1
elif command[0] == "Count":
if command[1] == "Completed":
print(len([item for item in tasks if item == 0]))
elif command[1] == "Incomplete":
print(len([item for item in tasks if item > 0]))
elif command[1] == "Dropped":
print(len([item for item in tasks if item == -1]))
print(*[item for item in tasks if item > 0])
| SimeonTsvetanov/Coding-Lessons | SoftUni Lessons/Python Development/Python Fundamentals September 2019/Problems And Files/41 PAST EXAMS/Mid Exams/02. 30 June 2019 Mid Exam Group 2/02.Tasks Planner.py | 02.Tasks Planner.py | py | 3,555 | python | en | code | 9 | github-code | 36 |
40538429997 | #!/usr/bin/python3
"""Module def pascal_triangle(n): that returns a
list of lists of integers representing the
Pascal’s triangle of n:"""
def pascal_triangle(n=5):
"""Implements the pascal's triangle"""
pscl = [[0]*i for i in range(1, n+1)]
for i in range(n):
pscl[i][0] = 1
pscl[i][-1] = 1
for j in range(0, i//2):
pscl[i][j+1] = pscl[i-1][j] + pscl[i-1][j+1]
pscl[i][i-j-1] = pscl[i-1][j] + pscl[i-1][j+1]
return pscl
| g091/alx-higher_level_programming | 0x0B-python-input_output/12-pascal_triangle.py | 12-pascal_triangle.py | py | 489 | python | en | code | 1 | github-code | 36 |
43776692283 | import csv
import re
from functools import lru_cache
from pathlib import Path
from rows.fields import slug
CITY_DATA_FILENAME = Path(__file__).parent / "data" / "municipios.csv"
REGEXP_RS = re.compile("^RIO GRANDE DO SUL (.*)$")
STATE_NAMES = {
"acre": "AC",
"alagoas": "AL",
"amapa": "AP",
"amazonas": "AM",
"bahia": "BA",
"ceara": "CE",
"distrito_federal": "DF",
"espirito_santo": "ES",
"goias": "GO",
"maranhao": "MA",
"mato_grosso": "MT",
"mato_grosso_do_sul": "MS",
"minas_gerais": "MG",
"para": "PA",
"pernambuco": "PE",
"parana": "PR",
"paraiba": "PB",
"piaui": "PI",
"rio_de_janeiro": "RJ",
"rio_grande_do_norte": "RN",
"rio_grande_do_sul": "RS",
"rondonia": "RO",
"roraima": "RR",
"santa_catarina": "SC",
"sao_paulo": "SP",
"sergipe": "SE",
"tocantins": "TO",
}
BLOCK_WORDS = ("da", "das", "de", "do", "dos", "e")
WORD_MAP = {
"thome": "tome",
"thome": "tome",
}
CITY_SPELL_MAP = {
("CE", "itapage"): "itapaje",
("MA", "governador_edson_lobao"): "governador_edison_lobao",
("MG", "brasopolis"): "brazopolis",
("MG", "dona_eusebia"): "dona_euzebia",
("MT", "poxoreo"): "poxoreu",
("PA", "santa_isabel_do_para"): "santa_izabel_do_para",
("PB", "serido"): "junco_do_serido",
("PE", "iguaraci"): "iguaracy",
("RJ", "parati"): "paraty",
("RJ", "trajano_de_morais"): "trajano_de_moraes",
("RN", "assu"): "acu", # Açu
("SC", "passos_de_torres"): "passo_de_torres",
("SC", "picarras"): "balneario_picarras",
("SC", "presidente_castelo_branco"): "presidente_castello_branco",
("SE", "gracho_cardoso"): "graccho_cardoso",
("SP", "florinia"): "florinea",
("SP", "moji_mirim"): "mogi_mirim",
("SP", "sao_luis_do_paraitinga"): "sao_luiz_do_paraitinga",
("TO", "fortaleza_do_tabocao"): "tabocao",
("TO", "sao_valerio_da_natividade"): "sao_valerio",
}
@lru_cache(maxsize=1)
def read_state_codes():
with CITY_DATA_FILENAME.open() as fobj:
return {row["city_ibge_code"][:2]: row["state"] for row in csv.DictReader(fobj)}
@lru_cache(maxsize=5570 * 2)
def city_key(state, city):
state, city = state.upper().strip(), slug(city).replace("sant_ana", "santana")
city = CITY_SPELL_MAP.get((state, city), city)
city = " ".join(
WORD_MAP.get(word, word)
for word in city.split("_")
if word not in BLOCK_WORDS
)
return slug(state + " " + city)
@lru_cache(maxsize=5570 * 2)
def split_state_city(text):
words = text.split()
if len(words[0]) == 2: # State acronym
return words[0], " ".join(words[1:])
else: # This row has full state name
for index, _ in enumerate(words, start=1):
key = slug(" ".join(words[:index]))
if key in STATE_NAMES:
return STATE_NAMES[key], " ".join(words[index:])
raise ValueError(f"Cannot recognize state/city: {text}")
@lru_cache(maxsize=1)
def city_map():
with CITY_DATA_FILENAME.open() as fobj:
reader = csv.DictReader(fobj)
return {city_key(row["state"], row["city"]): row for row in reader}
@lru_cache(maxsize=5570 * 2)
def get_city(state, city):
# TODO: Fix 'Cannot parse city/state:' for:
# 'AUGUSTO SEVERO/RN'
# 'DO SUL/MT'
# 'EMBU/SP'
# 'MUNICIPIO PROVISORIO/DF'
# 'SAO LUIZ DO ANUAA/RR'
result_rs = REGEXP_RS.findall(city)
if result_rs:
state, city = 'RS', result_rs[0]
elif "/" in city:
city2, state2 = city.split("/")
if state and state != state2:
raise ValueError(f"Conflict in state for: {city}/{state}")
city, state = city2, state2
city = city_map().get(city_key(state, city))
if city is None:
raise ValueError(f"City/state {repr(city)}/{repr(state)} not found")
return city["state"], city["city"], city["city_ibge_code"]
STATE_CODES = read_state_codes()
| turicas/autuacoes-ambientais-ibama | autuacoes/cities.py | cities.py | py | 3,944 | python | en | code | 8 | github-code | 36 |
16172969453 | """
Write a method named getExponent(n,p) that returns the largest integer exponent x such that px evenly divides n.
if p<=1 the method should return null/None (throw an ArgumentOutOfRange exception in C#).
"""
def get_exponent(n, p=None):
if p > 1:
l, x = [], 0
while abs(n) // (p ** x) >= 1:
if n % (p ** x) == 0:
l.append(x)
x += 1
else:
x += 1
else:
return max(l)
a = -250
b = 5
print(get_exponent(a, b))
"""
Best Practices
def get_exponent(n, p):
if p > 1:
x = 0
while not n % p:
x += 1
n //= p
return x
and
def get_exponent(n, p, i = 0):
if p <= 1: return None
return get_exponent(n / p, p, i + 1) if n / p == n // p else i
"""
| genievy/codewars | tasks_from_codewars/6kyu/Largest integer exponent.py | Largest integer exponent.py | py | 834 | python | en | code | 0 | github-code | 36 |
42324051999 | # schedule_post.py
# Author: Daniel Edades
# Last Modified: 11/21/2017
# Description: Formats a database row intended to represent a post scheduled
# for a future time, then inserts that row into a database table for later
# retrieval and posting at that actual time.
import sqlite3
def schedule_post(table_name, time, content):
conn = sqlite3.connect('scheduled_posts.db')
c = conn.cursor()
command_string = "INSERT INTO " + "\'" + table_name + "\'"
command_string += "(\'post_time\', \'contents\', \'posted\')"
command_string += " VALUES (?, ?, 0)"
c.execute(command_string, (time, content))
conn.commit() | edadesd/sunrisebot | schedule_post.py | schedule_post.py | py | 661 | python | en | code | 0 | github-code | 36 |
70387110823 | "Converter with PySimpleGUI"
import PySimpleGUI as sg
layout = [[sg.Input(key="-INPUT-", size=(40, 40)),
sg.Spin(["kilometer to meter", "meter to decimeter", "dosimeter to centimeter"], background_color="black", text_color="white", key="-SPIN-"),
sg.Button("convert", key="-CONVERT-", button_color="black")],
[sg.Text("output", key="-OUTPUT-", background_color="white", text_color="black")]]
window = sg.Window("converter", layout, background_color="white", size=(500, 100))
while True:
event, values = window.read()
if event == sg.WIN_CLOSED:
break
if event == "-CONVERT-":
input_number = values["-INPUT-"]
if input_number.isnumeric():
if values["-SPIN-"] == "kilometer to meter":
output = round(float(input_number) * 1000, 2)
output_str = f"{input_number} km is {output}m"
if values["-SPIN-"] == "meter to decimeter":
output = round(float(input_number) * 10, 2)
output_str = f"{input_number} m is {output}dm"
if values["-SPIN-"] == "dosimeter to centimeter":
output = round(float(input_number) * 10, 2)
output_str = f"{input_number} dm is {output}cm"
window["-OUTPUT-"].update(output_str)
else:
window["-OUTPUT-"].update("please enter a number!!!!!!!!!!!!!!!! not a text!")
window.close()
| HadisehMirzaei/converter-PySimpleGUI | main.py | main.py | py | 1,436 | python | en | code | 0 | github-code | 36 |
23268546540 | import pytz
from datetime import datetime
from datetime import timedelta
import asyncio
class DateError(Exception):
pass
class TimeError(Exception):
pass
class DateTimeError(TimeError, DateError):
pass
class DateTime:
@classmethod
async def at(cls, date, time):
self = DateTime()
await (self.init())
await (self.setDate(date))
await (self.setTime(time))
return self
@classmethod
async def utc(cls, hours = 0):
self = DateTime()
await (self.init())
t = str(datetime.utcnow().replace(tzinfo=pytz.utc))
t = t[0:t.find('.')]
await (self.setAll(t))
await (self.offset(hours = hours))
return self
@classmethod
async def fromString(cls, s):
self = DateTime()
await (self.init())
await (self.setAll(s))
return self
async def init(self):
self.__dn = [
"year",
"month",
"day",
"hour",
"minute",
"second"
]
self.__dt = {}
for n in self.__dn:
self.__dt[n] = None
async def _max(self, n):
i = self.__dn.index(n)
switcher = {
0: 9999,
1: 12,
2: -1,
3: 23,
4: 59,
5: 59
}
rm = switcher.get(i)
if (rm < 0):
dic = {
31: [1, 3, 5, 7, 8, 10, 12],
30: [4, 6, 9, 11],
28: [2],
29: []
}
if (self.__dt["year"] % 4 == 0):
dic[29].append(disc[28].pop(0))
for days, months in dic.items():
for m in months:
if (m == self.__dt["month"]):
rm = days
return rm
async def _min(self, n):
i = self.__dn.index(n)
switcher = {
0: 0,
1: 1,
2: 1,
3: 0,
4: 0,
5: 0
}
return switcher.get(i)
async def offset(self, **args):
for t, v in args.items():
t = t[:-1]
if t in self.__dn:
adder = int(v)
self.__dt[t] = int(self.__dt[t]) + adder
mi = (await self._min(t))
while True:
ma = (await self._max(t))
if (self.__dt[t] <= ma):
break
self.__dt[t] = (mi + (self.__dt[t] - ma)) - 1
dic = {}
dic[str(self.__dn[self.__dn.index(t) - 1]) + 's'] = 1
await (self.offset(**dic))
else:
raise DateTimeError("Invalid string for offset (" + str(t) + ")")
async def get(self, n):
return self.__dt[n]
async def set(self, **args):
for t, v in args.items():
if t in self.__dn:
self.__dt[t] = int(v)
else:
raise DateTimeError("Invalid string for set")
async def setAll(self, dt):
dt = str(dt).split(' ')
if (len(dt) == 2):
await (self.setDate(dt[0]))
await (self.setTime(dt[1]))
else:
raise DateTimeError("Invalid DateTime format!")
async def setDate(self, date):
date = str(date).replace(' ', '')
dp = date.split('-')
if (len(dp) == 3):
await (self.set(
year = int(dp[0]),
month = int(dp[1]),
day = int(dp[2])
))
else:
raise DateError("Invalid date passed!")
async def setTime(self, time):
time = str(time).replace(' ', '')
tp = time.split(':')
if (len(tp) == 3):
tp[0] = tp[0][0:2]
tp[1] = tp[1][0:2]
tp[2] = tp[2][0:2]
await (self.set(
hour = int(tp[0]),
minute = int(tp[1]),
second = int(tp[2])
))
else:
raise TimeError("Invalid time passed! " + str(tp))
async def _asString(self, data, l = 2):
data = str(self.__dt[str(data)])
while (len(data) < l):
data = '0' + data
return data
async def dateAsString(self):
yyyy = (await self._asString("year", 4))
mm = (await self._asString("month"))
dd = (await self._asString("day"))
return (yyyy + '-' + mm + '-' + dd)
async def timeAsString(self):
hh = (await self._asString("hour"))
mm = (await self._asString("minute"))
ss = (await self._asString("second"))
return (hh + ':' + mm + ':' + ss)
async def asString(self):
return ((await self.dateAsString()) + ' ' + (await self.timeAsString()))
async def _compare(self, other):
for i in range(0, len(self.__dn)):
c = self.__dn[i]
si = self.__dt[c]
oi = (await other.get(c))
if (si == oi):
continue
elif (si > oi):
return 0
else:
return 1
return -1
async def asRecentAs(self, other):
return ((await self._compare(other)) == -1)
async def lessRecentThan(self, other):
return ((await self._compare(other)) == 1)
async def moreRecentThan(self, other):
return ((await self._compare(other)) == 0)
| Liyara/Tracker | date_time_handler.py | date_time_handler.py | py | 4,172 | python | en | code | 0 | github-code | 36 |
37105984612 | import csv
import smtplib
def mail(email): #function for sending mail to student"""
message = "Congratulation are registered!!"
server = smtplib.SMTP("smtp.gmail.com", 587)
server.starttls()
server.login("svraj157@gmail.com","Usa@1234")
server.sendmail("svraj157@gmail.com",email,message)
def listdata(): #list all the student who already registered"""
file = open("data.csv","r")
reder = csv.reader(file)
students = list(reder)
file.close()
return students
| daksh5/Python-Flask | support.py | support.py | py | 500 | python | en | code | 0 | github-code | 36 |
21928235273 | #반복문 - for문, while문
'''
반복적인 작업의 코드로 작성하기 위해 사용
시퀀스 자료형
순서가 있는 자료형
종류 : 리스트, 문자열, range 객체, 튜플, 딕셔너리
for 변수 in 시퀀스 자료 :
명령문
range 명령어
range(숫자) / 0~(숫자-1)까지의 범위 데이터를 만들어줌
range(시작, 끝+1, 단계) / 단계는 생략하면 +1
# while문 - 반복할 횟수가 정해지지 않은 경우 사용!
초기식
while 조건식 : / False가 되면 while 루프를 빠져나온다!
반복할 명령
증감식
ex>
i = 0 / 초기식
while i < 10 :
print(i, "번째")
i += 1
# 무한 루프
while True :
반복할 명령
if 조건식 :
break / break를 만나면 while 루프를 빠져나온다!
* continue는 continue 아래 코드를 실행하지 않고 다시 루프의 처음으로 돌아가 루프 실행
'''
'''
for a in [1,2,3,4] :
print(a)
# for문 - 리스트 사용
champions = ["티모", "이즈리얼", "리신"]
for champion in champions :
print("선택할 챔피언은", champion, "입니다.")
# for문 - 문자열 사용
message = "자신감을 가지자!"
for word in message :
print(word)
# for문 - range 사용
for i in range(1,10):
print(i)
'''
# while문
i = 0 # 초기식
while i < 10 : # 조건식
print(i, "번째")
i += 2 #증감식
# 무한루프
while True :
x = input("종료하려면 exit를 입력하세요 >>> ")
if x == 'exit' :
break | sh95fit/Python_study | Python_Basic/PyStudy_06.py | PyStudy_06.py | py | 1,529 | python | ko | code | 1 | github-code | 36 |
30527541130 | import random
import string
from fastapi import HTTPException
from passlib.context import CryptContext
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import Session
from starlette import status
from . import models, schemas
def get_user(db: Session, user_id: int):
return db.query(models.User).filter(models.User.id == user_id).first()
def get_user_by_username(db: Session, username: str):
return db.query(models.User).filter(models.User.username == username).first()
def get_users(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.User).offset(skip).limit(limit).all()
def delete_user(db: Session, user: schemas.UserDelete):
try:
db.query(models.User).filter(models.User.username == user.username).delete()
db.commit()
return True
except Exception:
return False
def create_user(db: Session, user: schemas.UserCreate, is_super=False):
pwd_context = CryptContext(schemes=["bcrypt"], deprecated="auto")
hashed_password = pwd_context.hash(user.password)
db_user = models.User(username=user.username, hashed_password=hashed_password, is_super=is_super)
db.add(db_user)
try:
db.commit()
db.refresh(db_user)
return db_user
except IntegrityError:
credentials_exception = HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="User already created",
headers={"WWW-Authenticate": "Bearer"},
)
raise credentials_exception
def get_client(db: Session, key: str, uuid: str = None):
if key and uuid:
return db.query(models.Clients).filter((models.Clients.key == key) &
(models.Clients.uuid == uuid)).first()
elif key:
return db.query(models.Clients).filter(models.Clients.key == key).first()
else:
return db.query(models.Clients).filter(models.Clients.uuid == uuid).first()
def get_clients(db: Session, skip: int = 0, limit: int = 100):
return db.query(models.Clients).offset(skip).limit(limit).all()
def delete_client(db: Session, client_id: int):
try:
db.query(models.Clients).filter(models.Clients.id == client_id).delete()
db.commit()
return True
except Exception:
return False
def create_client(db: Session, user_id: int):
key = ''.join((random.choice(string.ascii_letters.upper()) for x in range(16)))
db_item = models.Clients(creator_id=user_id, key=key)
db.add(db_item)
db.commit()
db.refresh(db_item)
return db_item
| eugenfaust/projectsAPI | sql_app/crud.py | crud.py | py | 2,586 | python | en | code | 0 | github-code | 36 |
42542824319 | from django.conf.urls import url
from cart import views
app_name = 'cart'
urlpatterns = [
url(r'^$', views.my_cart, name='my_cart'),
url(r'^add_cart/$', views.add_cart, name='add_cart'),
] | hikaru32/pro_tt | cart/urls.py | urls.py | py | 199 | python | en | code | 0 | github-code | 36 |
29069087850 | from config import db
from flask import abort, session
from models import Recipe, Ingredient,recipes_schema,recipe_schema, RecipeSchema
#####
def create_recipe(recipe):
name = recipe.get("name")
ingredients = recipe.get("ingredients")
ingredients_list = []
# check if recipe with same name already exists
existing_recipe = Recipe.query.filter_by(name=name).first()
if existing_recipe is not None:
abort(406, f"Recipe {name} already exists")
# add ingredients to recipe
for ingredient_data in ingredients:
ingredient_name = ingredient_data.get("name")
#check if the ingredient exist in the data base
existing_ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
if existing_ingredient is not None:
ingredients_list.append(existing_ingredient)
else:
abort(404, f"Ingredient {ingredient_name} not found, you should created first !")
# create new recipe
new_recipe = Recipe(
name=name,
cookTime=recipe.get("cookTime"),
serving=recipe.get("serving"),
preparation=recipe.get("preparation"),
ingredients = ingredients_list
)
# save recipe to database
db.session.add(new_recipe)
db.session.commit()
return recipe_schema.dump(new_recipe), 201
#as user :list all recipes
def list_all_recipes():
if 'name' in session:
recipes = Recipe.query.all()
return recipes_schema.dump(recipes)
else:
abort(401, f"User Unauthorized")
#list recipes with a particular ingredient
def filter_recipe(ingredient_name):
if 'name' in session:
ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
if ingredient :
recipes = ingredient.recipe
recipe_schema = RecipeSchema(many=True)
return recipe_schema .dump(recipes)
else:
abort (404, f"Ingredient {ingredient_name} not found")
else:
abort(401, f"User Unauthorized") | nor5/welshProject | views/recipes.py | recipes.py | py | 2,064 | python | en | code | 0 | github-code | 36 |
2723281649 | #!/usr/bin/env python3
import html
import random
trivia= {
"category": "Entertainment: Film",
"type": "multiple",
"question": "Which of the following is NOT a quote from the 1942 film Casablanca? ",
"correct_answer": ""Frankly, my dear, I don't give a damn."",
"incorrect_answers": [
""Here's lookin' at you, kid."",
"“Of all the gin joints, in all the towns, in all the world, she walks into mine…”",
""Round up the usual suspects.""
]
}
def main():
question= trivia["question"]
answer= trivia["correct_answer"]
wrong1= trivia["incorrect_answers"][0]
wrong2= trivia["incorrect_answers"][1]
wrong3= trivia["incorrect_answers"][2]
answers= [answer,wrong1,wrong2,wrong3]
print(question)
random.shuffle(answers)
print("1",html.unescape(answers[0]))
print("2",html.unescape(answers[1]))
print("3",html.unescape(answers[2]))
print("4",html.unescape(answers[3]))
if int(input("Enter your answer: "))-1 == answers.index(answer):
print("Correct, ya filthy animal.")
else:
print("Incorrect!")
if __name__ == "__main__":
main() | chadkellum/mycode | challenge57.py | challenge57.py | py | 1,284 | python | en | code | 0 | github-code | 36 |
34484792129 | import numpy as np
from DataUtils import DataUtils
import argparse
import os
import torch
from torchvision import datasets, models, transforms
if __name__ == "__main__":
# setting the hyper parameters
parser = argparse.ArgumentParser(description="Analysis Diatoms Research CNR-ISASI")
parser.add_argument('--batch_size', default=256, type=int,
help="Size of the batch")
parser.add_argument('--data_dir', default='../data/Dataset_4',
help="Directory of data. If no data, use \'--download\' flag to download it")
parser.add_argument('--save_dir', default='results',
help="Directory to save the results!")
parser.add_argument('--dataset', default='results',
help="Directory where is the data!")
parser.add_argument('-t', '--testing', action='store_true',
help="Test the trained model on testing dataset")
parser.add_argument('-w', '--weights', default=None,
help="The path of the saved weights. Should be specified when testing")
parser.add_argument('--images_per_class', default=10, help="how many images will be used per class")
parser.add_argument('--classes_training', default=50, help="how many classes there are in the training")
parser.add_argument('--perplexy', default=30, help="TSNE perplexy")
parser.add_argument('--n_iter', default=300, help="TSNE iterations")
args = parser.parse_args()
print(args)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
image_size = 224
data_transforms = transforms.Compose([transforms.Resize(256),
transforms.CenterCrop(224),
transforms.Grayscale(1),
transforms.ToTensor()])
#transforms.Normalize(_mean, _std)])
data = DataUtils(transformations=data_transforms, device = device, args = args)
dataloaders = data.load_data()
#print(data.train_size, data.valid_size)
X = np.zeros((data.train_size, image_size*image_size))
y = np.zeros((data.train_size))
for i, sample in enumerate(dataloaders['train']):
(inputs, labels),(_,_) = sample
for j in range(len(inputs)):
img = inputs[j]
X[j,:] = img.view(-1, image_size*image_size)
y[j] = labels[j]
#X = X.numpy()
#y = y.numpy()
print(X.shape)
import pandas as pd
feat_cols = ['pixel'+str(i) for i in range(X.shape[1])]
df = pd.DataFrame(X,columns=feat_cols)
df['label'] = y
df['label'] = df['label'].apply(lambda i: str(i))
X, y = None, None
print('Size of the dataframe: {}'.format(df.shape))
import matplotlib.pyplot as plt
rndperm = np.random.permutation(df.shape[0])
# Plot the graph
#plt.gray()
#fig = plt.figure( figsize=(16,7) )
#for i in range(0,15):
# ax = fig.add_subplot(3,5,i+1, title='class: ' + str(df.loc[rndperm[i],'label']) )
# ax.matshow(df.loc[rndperm[i],feat_cols].values.reshape((224,224)).astype(float))
#plt.show()
from sklearn.decomposition import PCA
#pca = PCA(n_components=30)
#pca_result = pca.fit_transform(df[feat_cols].values)
#df['pca-one'] = pca_result[:,0]
#df['pca-two'] = pca_result[:,1]
#df['pca-three'] = pca_result[:,2]
#print('Explained variation per principal component: {}'.format(pca.explained_variance_ratio_))
from ggplot import *
#chart = ggplot( df.loc[rndperm[:3000],:], aes(x='pca-one', y='pca-two', color='label') ) + geom_point(size=75,alpha=0.8) + ggtitle("First and Second Principal Components colored by digit")
#print(chart)
import time
from sklearn.manifold import TSNE
n_sne = 6000
time_start = time.time()
tsne = TSNE(n_components=2, verbose=1, perplexity=int(args.perplexy), n_iter=int(args.n_iter))
tsne_results = tsne.fit_transform(df.loc[rndperm[:n_sne],feat_cols].values)
print('t-SNE done! Time elapsed: {} seconds'.format(time.time()-time_start))
df_tsne = df.loc[rndperm[:n_sne],:].copy()
df_tsne['x-tsne'] = tsne_results[:,0]
df_tsne['y-tsne'] = tsne_results[:,1]
chart = ggplot( df_tsne, aes(x='x-tsne', y='y-tsne', color='label') ) \
+ geom_point(size=70,alpha=0.1) \
+ ggtitle("tSNE dimensions colored by digit")
print(chart) | andouglasjr/ProjectDiatoms | analysis.py | analysis.py | py | 4,569 | python | en | code | 0 | github-code | 36 |
42233326363 | # -*- coding: utf-8 -*-
################################################################
# #
# Seth Cram #
# ECE351-53 #
# Project 9 #
# Due: 3/29/2022 #
# Any other necessary information needed to navigate the file #
#
#
################################################################
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
import control as con
#default vals
R = 1000
L = 27e-3
C = 100e-9
steps = 1000
#in rad/s (DONT USE 1**3 AS ARG (fails))
w = np.arange(1e3, 1e6+steps, steps)
#TASK 1.1
#prelab eqts
mag = (w/(R*C)) / np.sqrt(w**4 + ((1/(R*C))**2 - 2/(L*C))*(w**2) + (1/(L*C))**2)
phase = np.pi/2 - np.arctan((w/(R*C)) / (-1*(w**2)+(1/(L*C))))
#conv to db and degs
dbMag = 20*np.log10(mag)
degPhase = np.rad2deg(phase)
#need to shift by 180 degs later half to make look better
i=0
while(i < len(w)):
if(degPhase[i] > 90):
degPhase[i] -= 180
i += 1
#TASK 1.2
num = [1/(R*C), 0]
denom = [1, 1/(R*C), 1/(L*C)]
syst = sig.lti(num,denom)
(bodeW, bodeMag, bodePhase) = sig.bode(syst, w)
#PLOTS
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 22}
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(w, dbMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.1')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(w, degPhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (rads/s)')
plt.show() #display figure
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(bodeW, bodeMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.2')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(bodeW, bodePhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (rads/s)')
plt.show() #display figure
#TASK 1.3
sys3 = con.TransferFunction(num, denom)
plt.figure(figsize = (15,15))
#used _ = .... to suppress the output (function auto-plots)
_ = con.bode(sys3, w, dB = True, Hz = True, deg = True, plot = True)
plt.title('Bode plot for Task 1.3')
"""#(conMag, conPhase, conW) = con.bode(sys3, w, dB = True, Hz = True, deg = True, plot = True)
#plot1:
plt.rc('font', **font)
plt.figure(figsize = (30,30))
plt.subplot(2, 1, 1)
plt.semilogx(conW, conMag)
plt.grid() #add a grid to graph
plt.title('Bode plot for Task 1.3')
plt.ylabel('|H(jw)| (dB)')
#plot2:
plt.subplot(2, 1, 2)
plt.semilogx(conW, conPhase)
plt.grid() #add a grid to graph
plt.ylabel('angle of H(jw) (degs)')
plt.xlabel('w (Hz)')
plt.show() #display figure
"""
#TASK 2.1
#chosen as x(t)'s max w_0
fs = 2*np.pi*50000
steps = 1/fs
t = np.arange(0, 1e-2 + steps , steps)
x = np.cos(2*np.pi*100*t) + np.cos(2*np.pi*3024*t) + np.sin(2*np.pi*50000*t)
#plot:
plt.figure(figsize = (15,15))
plt.plot(t, x)
plt.grid() #add a grid to graph
plt.title('x(t) vs t')
plt.ylabel('x(t)')
plt.xlabel('t (s)')
plt.show() #display figure
#TASK 2.2: conv H(s) to z-dom
(zNum, zDenom) = sig.bilinear(num, denom, 500000)
#TASK 2.3: pass x(t) thru z-dom filter
y = sig.lfilter(zNum, zDenom, x)
#plot:
plt.figure(figsize = (15,15))
plt.plot(t, y)
plt.grid() #add a grid to graph
plt.title('y(t) vs t')
plt.ylabel('y(t)')
plt.xlabel('t (s)')
plt.show() #display figure
| SethCram/Signals-and-Systems-Code | proj10_main.py | proj10_main.py | py | 3,559 | python | en | code | 0 | github-code | 36 |
23634749756 | from sqlalchemy.orm import sessionmaker
from fichero_sql_tablas import Estudiante, create_engine
engine = create_engine('sqlite:///estudiantes1.db', echo=True)
# crear sesion a la bbdd
Session = sessionmaker(bind=engine)
# una vez conectados mediante esta sesion creamos las instancias
session = Session()
# Crear los registros en la db
usuario = Estudiante('juan', 'Juan', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
usuario = Estudiante('ana', 'Ana', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
usuario = Estudiante('Laura', 'Laura', 'Perez', 'Lopez', 'Complu')
session.add(usuario)
#Agregar a la bbdd
session.commit()
| andreagro17/pythonCourseTest | fichero_sql_datos.py | fichero_sql_datos.py | py | 639 | python | es | code | 0 | github-code | 36 |
15267569253 | from django.shortcuts import render,redirect
import book_guide
from book_guide.models import Book_guide
from book_guide.forms import GuideForm
# Create your views here.
def guide(request):
guides=Book_guide.objects.raw('select * from book_guide')
return render(request,"guide/book_guide.html",{'guides':guides})
def add_guide(request):
print(request.FILES)
if request.method=="POST":
forms=GuideForm(request.POST,request.FILES)
forms.save()
return redirect ("/partneracc_addproperty")
else:
guides=GuideForm()
return render(request,'list_property/add_guide.html',{'guides':guides})
def guide_info(request):
if request.method == "POST":
guide_name1 = request.POST['searched']
guides=Book_guide.objects.filter(guide_name__contains=guide_name1)
return render(request,"list_property/update_guide.html",{'guide_name1':guide_name1,'guides': guides})
else:
return render(request,"list_property/update_guide.html",{})
def edit(request,edit):
guides=Book_guide.objects.get(guide_name=edit)
return render (request,"list_property/update_guide.html",{'guides':guides})
def update1(request,guide_name):
print(request.POST)
guides=Book_guide.objects.get(guide_name=guide_name)
#bind data in form with instance of customer
form = GuideForm(request.POST, instance=guides)
if form.is_valid():
try:
form.save()
return redirect("/home")
except:
print("validation false")
return render(request,"list_property/update_guide.html",{'guides':guides})
def search_guide(request):
if request.method == "POST":
searched=request.POST['searched']
venues=Book_guide.objects.filter(guide_address__icontains=searched)
return render(request,'find_guide/searched_guide.html',{'searched':searched,'venues':venues})
else:
return render(request,'find_guide/searched_guide.html',{})
def guide_form(request,p_id):
guides=Book_guide.objects.get(guide_id=p_id)
return render (request,"find_guide/guide_form2.html",{'guides':guides})
| Marinagansi/3rd-sem-project-django | book_guide/views.py | views.py | py | 2,162 | python | en | code | 0 | github-code | 36 |
10322811313 | import os
import tqdm
import argparse
import pandas as pd
max_row = 0
def trans(value, dict, unknown=0):
new_value = dict.get(int(value), unknown)
if pd.isna(new_value):
new_value = unknown
return str(int(new_value))
def transform_paths(row, map_dict):
paths_ids = row['path'].split()
new_ids = []
for id in paths_ids:
new_id = trans(id, map_dict)
new_ids.append(new_id)
new_path_id = ' '.join(new_ids)
row['path'] = new_path_id
return row
def transform_paths_content(row, token_map, path_map):
row = row.split(',')
start_token_id = row[0]
path_id = row[1]
end_token_id = row[2]
new_start_token_id = trans(start_token_id, token_map)
new_path_id = trans(path_id, path_map)
new_end_token_id = trans(end_token_id, token_map)
return '{},{},{}'.format(new_start_token_id, new_path_id,
new_end_token_id)
def fill_nan(vocab_map):
global max_row
max_row = vocab_map['id_y'].max()
def apply_new_id(row):
global max_row
if pd.isna(row['id_y']):
row['id_y'] = int(max_row + 1)
max_row = row['id_y']
return row
vocab_map = vocab_map.apply(apply_new_id, axis=1)
return vocab_map
def vocab_merge(vocab_a, vocab_b, on, method):
vocab = vocab_a.merge(vocab_b, on=on, how=method)
if method == 'outer':
vocab = fill_nan(vocab)
return vocab
def save_vocab(vocab, path, columns=None):
vocab = vocab.iloc[:, 1:]
if columns is not None:
vocab.columns = columns
try:
vocab = vocab[[columns[1], columns[0]]].astype({'id': 'int32'})
except ValueError:
print(vocab)
vocab.to_csv(path, index=False)
def map2dict(vocab_map):
map_dict = {}
for i, row in vocab_map.iterrows():
if pd.isna(row[0]):
continue
map_dict[int(row[0])] = row[2]
return map_dict
def parse_args():
parser = argparse.ArgumentParser("CodePath Vocab Generation!!")
# 数据的路径
parser.add_argument('--data_path', type=str, default='/.../APathCS/github/path_data/',help="data location")
# 数据的类型
parser.add_argument('--data_name', type=str, default='example', help="dataset name")
# 语言的类型
parser.add_argument('--lang_type', type=str, default='java', help="different code type")
# 数据的分片
parser.add_argument('--train_path', type=str, default='train_i', help="train path dataset")
# 语言的类型
parser.add_argument('--test_path', type=str, default='test', help="test path dataset")
# 输出的目录
parser.add_argument('--out_path', type=str, default=' ', help="path output")
parser.add_argument("--merge_vocab", type=bool, default=False, help="need merge vocab")
return parser.parse_args()
def main():
# 配置
args = parse_args()
# /.../APathCS/github/path_data/XXX/java
lang_path = os.path.join(args.data_path, args.data_name, args.lang_type)
# 训练数据的路径
train_path = os.path.join(lang_path, args.train_path)
# 测试数据的路径
test_path = os.path.join(lang_path, args.test_path)
# 输出文件的目录
out_path = os.path.join(lang_path, args.out_path)
if not os.path.exists(out_path):
# 创建 code_path/train
os.makedirs(out_path)
# 训练集
token_vocab_train = pd.read_csv(os.path.join(train_path, 'tokens.csv'))
node_vocab_train = pd.read_csv(os.path.join(train_path, 'node_types.csv'))
path_vocab_train = pd.read_csv(os.path.join(train_path, 'paths.csv'))
# 测试集
token_vocab_test = pd.read_csv(os.path.join(test_path, 'tokens.csv'))
node_vocab_test = pd.read_csv(os.path.join(test_path, 'node_types.csv'))
path_vocab_test = pd.read_csv(os.path.join(test_path, 'paths.csv'))
need_merge = args.merge_vocab
method = 'outer' if need_merge else 'left'
node_vocab_map = vocab_merge(node_vocab_test, node_vocab_train, on=['node_type'], method=method)
token_vocab_map = vocab_merge(token_vocab_test,token_vocab_train, on=['token'], method='outer')
node_dict = map2dict(node_vocab_map)
token_dict = map2dict(token_vocab_map)
path_vocab_test = path_vocab_test.apply(lambda row: transform_paths(row, node_dict), axis=1)
path_vocab_map = vocab_merge(path_vocab_test, path_vocab_train, on=['path'], method='outer')
path_dict = map2dict(path_vocab_map)
path_context_test = []
for root, dirs, files in os.walk(test_path):
for f_name in tqdm.tqdm(files):
if 'path_contexts' in f_name:
f_path = os.path.join(root, f_name)
with open(f_path) as f:
f_list = f.readlines()
for row in f_list:
path_list = row.split()
id = path_list[0]
paths = path_list[1:]
new_paths = []
for path_item in paths:
new_path = transform_paths_content(path_item, token_dict, path_dict)
new_paths.append(new_path)
new_row = ' '.join([str(id)] + new_paths) + '\n'
path_context_test.append(new_row)
if need_merge:
path_context_train = []
for root, dirs, files in os.walk(train_path):
for f_name in tqdm.tqdm(files):
if 'path_contexts' in f_name:
f_path = os.path.join(root, f_name)
with open(f_path) as f:
f_list = f.readlines()
path_context_train = path_context_train + f_list
path_context_train = path_context_test + path_context_train
f = open(os.path.join(out_path, 'path_contexts.csv'), 'w')
f.write(''.join(path_context_train))
f.close()
save_vocab(node_vocab_map, os.path.join(out_path, 'node_types.csv'),
columns=['node_type', 'id'])
save_vocab(token_vocab_map, os.path.join(out_path, 'tokens.csv'),
columns=['token', 'id'])
save_vocab(path_vocab_map, os.path.join(out_path, 'paths.csv'),
columns=['path', 'id'])
else:
f = open(os.path.join(out_path, 'path_contexts.csv'), 'w')
f.write(''.join(path_context_test))
f.close()
save_vocab(path_vocab_map, os.path.join(train_path, 'paths.csv'),
columns=['path', 'id'])
save_vocab(token_vocab_map, os.path.join(train_path,'tokens.csv'),
columns=['token', 'id'])
if __name__ == '__main__':
main()
| miaoshenga/APathCS | scripts/share_vocab.py | share_vocab.py | py | 6,662 | python | en | code | 1 | github-code | 36 |
15746047617 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 4 11:43:31 2018
@author: MGGG
"""
#####For creating a spanning tree
import networkx as nx
import random
from equi_partition_tools import equi_split, almost_equi_split, check_delta_equi_split
from projection_tools import remove_edges_map
from walk_tools import propose_step, propose_Broder_step
from Broder_Wilson_algorithms import random_spanning_tree_wilson, random_spanning_tree
#################
'''
'''
def random_equi_partitions(graph, num_partitions, num_blocks, algorithm = "Wilson"):
'''
Here is the code that makes equi partitions.
:graph:
:num_partitions:
:num_blocks: Number of blocks in each partition
'''
found_partitions = []
counter = 0
while len(found_partitions) < num_partitions:
counter += 1
if algorithm == "Broder":
tree = random_spanning_tree(graph)
if algorithm == "Wilson":
tree = random_spanning_tree_wilson(graph)
edge_list = equi_split(tree, num_blocks)
#edge_list will return None if there is no equi_split
if edge_list != None:
found_partitions.append(remove_edges_map(graph, tree, edge_list))
print(len(found_partitions), "waiting time:", counter)
counter = 0
#keeps track of how many trees it went through to find the one
#that could be equi split
return found_partitions
def random_equi_partition_fast(graph, log2_num_blocks):
'''This is a divide and conquer algorithm that speeds up the search
for an equipartition...
The way it works is that it find a tree that equi-splits graph into two
subgraphs, and then repeats this procedure on each subgraph until we have
the desired number of blocks in the subgraph.
'''
blocks = random_equi_partitions(graph, 1, 2)[0]
while len(blocks) < 2**log2_num_blocks:
subgraph_splits = []
for subgraph in blocks:
subgraph_splits += random_equi_partitions(subgraph, 1, 2)[0]
blocks = subgraph_splits
return blocks
def random_equi_partitions_fast(graph, num_partitions, log2_num_blocks):
'''This calls random_equi_partition_fast until you have num_partitions
partitions
'''
found_partitions = []
while len(found_partitions) < num_partitions:
found_partitions.append(random_equi_partition_fast(graph, log2_num_blocks))
return found_partitions
############ Almost equi-partitions:
def random_almost_equi_partitions(graph, num_partitions, num_blocks, delta):
'''This produces a delta almost equi partition... it keeps looping until it finds
the required amounts
'''
found_partitions = []
counter = 0
while len(found_partitions) < num_partitions:
counter += 1
tree = random_spanning_tree_wilson(graph)
edge_list = almost_equi_split(tree, num_blocks, delta)
#If the almost equi split was not a delta split, then it returns none...
if edge_list != None:
blocks = remove_edges_map(graph, tree, edge_list)
found_partitions.append(blocks)
print(len(found_partitions), "waiting time:", counter)
counter = 0
return found_partitions
##
def random_almost_equi_partition_fast(graph, log2_num_blocks, delta):
'''Divide and conquer approach to finding almost equi-partitions.
Similar idea to random_equi_partition_fast
'''
blocks = random_equi_partitions(graph, 1, 2)[0]
while len(blocks) < 2**log2_num_blocks:
subgraph_splits = []
for subgraph in blocks:
subgraph_splits += random_almost_equi_partitions(subgraph, 1, 2, delta)[0]
blocks = subgraph_splits
return blocks
def random_almost_equi_partitions_fast(graph, num_partitions, log2_num_blocks, delta):
'''This builds up almost-equi partitions, it called random_almost_equi_partitoins_fast
which does a divide and consquer to build up partitions...
'''
found_partitions = []
while len(found_partitions) < num_partitions:
found_partitions.append(random_almost_equi_partition_fast(graph, log2_num_blocks, delta))
return found_partitions
############ Almost equi-partitions using sampling, then MH on trees
'''To be filled in -- this will draw a random spanning tree, and check if it can be
almost equi split (delta can be set to be zero...)
[Aside: You can clean up the code by putting delta =0 to be equi-partitions...]
then it will run a the tree walk, updated the labels dynamically, until it gets to a tree
that can be equi split...
'''
def random_almost_equi_partitions_with_walk(graph, num_partitions, num_blocks, delta, step = "Basis", jump_size = 50):
'''This produces a delta almost equi partition... it keeps looping until it finds
the required amounts
'''
# print("am here")
# print(step)
found_partitions = []
counter = 0
tree = random_spanning_tree_wilson(graph)
while len(found_partitions) < num_partitions:
counter += 1
if step == "Basis":
for i in range(jump_size):
tree, edge_to_remove, edge_to_add = propose_step(graph, tree)
if step == "Broder":
for i in range(jump_size):
tree, edge_to_remove, edge_to_add = propose_Broder_step(graph, tree)
edge_list = almost_equi_split(tree, num_blocks, delta)
#If the almost equi split was not a delta split, then it returns none...
if edge_list != None:
blocks = remove_edges_map(graph, tree, edge_list)
found_partitions.append(blocks)
print(len(found_partitions), "waiting time:", counter)
counter = 0
return found_partitions
##
def random_almost_equi_partition_fast_with_walk(graph, log2_num_blocks, delta, step, jump_size = 50):
'''Divide and conquer approach to finding almost equi-partitions.
Similar idea to random_equi_partition_fast
'''
blocks = random_almost_equi_partitions_with_walk(graph, 1, 2, delta, step, jump_size)[0]
while len(blocks) < 2**log2_num_blocks:
subgraph_splits = []
for subgraph in blocks:
subgraph_splits += random_almost_equi_partitions_with_walk(subgraph, 1, 2, delta, step, jump_size)[0]
blocks = subgraph_splits
return blocks
def random_almost_equi_partitions_fast_with_walk(graph, num_partitions, log2_num_blocks, delta, step = "Basis", jump_size = 50):
'''This builds up almost-equi partitions, it called random_almost_equi_partitoins_fast
which does a divide and consquer to build up partitions...
'''
found_partitions = []
while len(found_partitions) < num_partitions:
found_partitions.append(random_almost_equi_partition_fast_with_walk(graph, log2_num_blocks, delta, step, jump_size = 50))
return found_partitions
| gerrymandr/ent_walk | tree_sampling_tools.py | tree_sampling_tools.py | py | 6,931 | python | en | code | 1 | github-code | 36 |
31298546243 | from collections import deque
def solution(board):
n = len(board)
# dir & dx,dy : 0 1 2 3 동 남 서 북
visited = [[[False for _ in range(4)] for _ in range(len(board))]
for _ in range(len(board))]
dx = [0, 1, 0, -1]
dy = [1, 0, -1, 0]
def canGo(x, y, d):
x2 = x + dx[d]
y2 = y + dy[d]
if 0 <= x2 < n and 0 <= y2 < n:
if board[x][y] == 0 and board[x2][y2] == 0:
return True
return False
def canChangeDir(x, y, d, nextD):
x2 = x + dx[d]
y2 = y + dy[d]
nextX2 = x + dx[nextD]
nextY2 = y + dy[nextD]
if not (0 <= x2 < n and 0 <= y2 < n):
return False
if not (0 <= nextX2 < n and 0 <= nextY2 < n):
return False
if board[nextX2][nextY2] == 1:
return False
# 동, 서
if d == 0 or d == 2:
if board[nextX2][y2] == 1:
return False
# 남, 북
else:
if board[x2][nextY2] == 1:
return False
return True
visited[0][0][0] = True
visited[dx[0]][dy[0]][2] = True
x, y, d, t = 0, 0, 0, 0
dq = deque()
dq.append((x, y, d, t))
while dq:
(x, y, d, t) = dq.popleft()
if (x == n-1 and y == n-1) or (x + dx[d] == n-1 and y + dy[d] == n-1):
return t
# x,y 기준 dir 변경! +1 or -1
for i in [-1, 1]:
nextD = (d + i) % 4
if 0 <= x + dx[nextD] < n and 0 <= y + dy[nextD] < n:
if visited[x][y][nextD] == False and canChangeDir(x, y, d, nextD):
visited[x][y][nextD] = True
dq.append((x, y, nextD, t+1))
# x2,y2 기준 dir 변경! +1 or -1
for i in [-1, 1]:
counterD = (d+2) % 4
counterX = x + dx[d]
counterY = y + dy[d]
nextD = (counterD + i) % 4
if 0 <= counterX + dx[nextD] < n and 0 <= counterY + dy[nextD] < n:
if visited[counterX][counterY][nextD] == False and canChangeDir(counterX, counterY, counterD, nextD):
visited[counterX][counterY][nextD] = True
dq.append((counterX, counterY, nextD, t+1))
# 동서남북 이동!
for i in range(4):
nextX = x + dx[i]
nextY = y + dy[i]
if 0 <= nextX < n and 0 <= nextY < n:
if visited[nextX][nextY][d] == False and canGo(nextX, nextY, d):
visited[nextX][nextY][d] = True
dq.append((nextX, nextY, d, t+1))
print(solution([[0, 0, 0, 1, 1],
[0, 0, 0, 1, 0],
[0, 1, 0, 1, 1],
[1, 1, 0, 0, 1],
[0, 0, 0, 0, 0]]))
| shwjdgh34/algorithms-python | codingTest/2020kakao/블록이동하기.py | 블록이동하기.py | py | 2,786 | python | en | code | 2 | github-code | 36 |
20052985170 | import pandas as pd
from src.utils.path import DATA_ROOTPATH
POLICY_INDICES = [
'StringencyIndex',
'GovernmentResponseIndex',
'ContainmentHealthIndex',
'EconomicSupportIndex'
]
POLICY_MEASURES = [
'C1_School_closing',
'C1_Flag',
'C2_Workplace_closing',
'C2_Flag',
'C3_Cancel_public_events',
'C3_Flag',
'C4_Restrictions_on_gatherings',
'C4_Flag',
'C5_Close_public_transport',
'C5_Flag',
'C6_Stay_at_home_requirements',
'C6_Flag',
'C7_Restrictions_on_internal_movement',
'C7_Flag',
'C8_International_travel_controls',
'E1_Income_support',
'E1_Flag',
'E2_Debt_contract_relief',
'E3_Fiscal_measures',
'E4_International_support',
'H1_Public_information_campaigns',
'H1_Flag',
'H2_Testing_policy',
'H3_Contact_tracing',
'H4_Emergency_investment_in_healthcare',
'H5_Investment_in_vaccines',
'M1_Wildcard'
]
def load_oxford_policy_measures():
return _load_oxford(POLICY_MEASURES, "measure")
def load_oxford_policy_indices():
return _load_oxford(POLICY_INDICES, "index")
def _load_oxford(policies, suffix):
result = {}
for policy in policies:
result[policy] = pd.read_csv(DATA_ROOTPATH / f"clean/coronavirus_oxford_{policy}_{suffix}.csv", index_col=0, parse_dates=[0])
return result
| Thopiax/pydemic | src/data/covid19/oxford.py | oxford.py | py | 1,337 | python | en | code | 1 | github-code | 36 |
16418467084 | #!/usr/bin/env python3
import re
import random
import time
import sys
from math import ceil
from datetime import datetime
def get_mem_usage():
pattern = re.compile(r'^(.*):[\s]*([\d]+)[\s]*(.B).*$')
mem_total = None
mem_free = None
bytes_by_units = {'kB': 1024}
lines = [line.strip('\n') for line in open('/proc/meminfo')]
for line in lines:
matches = pattern.fullmatch(line)
if not matches:
continue
if 'MemTotal' == matches.group(1):
mem_total = int(matches.group(2)) * bytes_by_units[matches.group(3)]
elif 'MemFree' == matches.group(1):
mem_free = int(matches.group(2)) * bytes_by_units[matches.group(3)]
return mem_free, mem_total
def allocate_random_array(num_bytes):
return tuple(random.getrandbits(64) for _ in range(ceil(num_bytes / 64)))
def allocate_ram_perc(target_pct):
data = []
start_ts = time.time()
while True:
mem_free, mem_total = get_mem_usage()
alloc_pct = int(((mem_total - mem_free) / mem_total) * 100)
curr_ts = time.time()
print('time={:.2f} total={} free={} alloc {}% target {}%'.format(curr_ts - start_ts, mem_total, mem_free, alloc_pct, target_pct))
if alloc_pct >= target_pct:
break
else:
data.append(allocate_random_array(int(mem_total / 100)))
return data
def do_some_array_math(data):
return (sum(i) for i in data)
def spinning_cursor():
while True:
for cursor in '|/-':
yield cursor
def main():
data = allocate_ram_perc(int(sys.argv[1]))
sys.stdout.write("done ")
sys.stdout.flush()
spinner = spinning_cursor()
while True:
sys.stdout.write(next(spinner))
sys.stdout.flush()
do_some_array_math(data)
time.sleep(0.25)
sys.stdout.write('\b')
if __name__ == '__main__':
main()
| arighi/opportunistic-memory-reclaim | stress-vm.py | stress-vm.py | py | 1,894 | python | en | code | 0 | github-code | 36 |
36549687329 | from setuptools import setup, find_packages
import rbnfrbnf
readme = ""
setup(
name='rbnfrbnf',
version=rbnfrbnf.__version__,
keywords='parser generation, LR parser, efficient, JIT',
description='A best LR parser generator',
long_description=readme,
long_description_content_type='text/markdown',
license='MIT',
python_requires='>=3.6.0',
url='https://github.com/thautwarm/rbnfrbnf',
author='thautwarm, lfkdsk',
author_email='twshere@outlook.com',
packages=find_packages(),
# entry_points={'console_scripts': ['yapypy=yapypy.cmd.cli:python_ex_cli']},
install_requires=['rbnf'],
package_data={'rbnfrbnf': ['bootstrap/*.rbnf']},
platforms='any',
classifiers=[
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: Implementation :: CPython'
],
zip_safe=False)
| thautwarm/rbnfrbnf | setup.py | setup.py | py | 922 | python | en | code | 4 | github-code | 36 |
2705037908 | import os
from PIL import ImageFont
def FindFonts():
fontdir = 'C:\\Windows\\Fonts'
files = os.listdir(fontdir)
fonts = dict()
for f in files:
if (f.split('.')[1] == 'ttf'):
tmp = ImageFont.truetype(os.path.join(fontdir,f),1)
if(tmp.font.style == "Regular"):
fonts[tmp.font.family]= f
return fonts | suever/Date-Stamper | FontFinder.py | FontFinder.py | py | 371 | python | en | code | 0 | github-code | 36 |
19815428666 | import json
import urllib.request
url = 'http://ec2-35-158-239-16.eu-central-1.compute.amazonaws.com'
post_port = 8000
tracking_port = 8001
headers = {"Content-Type":"application/json"}
packet = {'sender_name' : 'Otto Hahn',
'sender_street' : 'Veilchenweg 2324',
'sender_zip' : '12345',
'sender_city' : 'Hamburg',
'receiver_name' : 'Lise Meitner',
'receiver_street' : 'Amselstraße 7',
'receiver_zip' : '01234',
'receiver_city' : 'Berlin',
'size' : 'big',
'weight' : '200'}
def registerPacket():
registerRequest = urllib.request.Request(url + ':' + str(post_port) + '/register',
data=json.dumps(packet).encode('utf8'),
headers = {"Content-Type":"application/json"})
try:
response = urllib.request.urlopen(registerRequest)
responseJson = json.loads(response.read().decode('utf8'))
print('Register completed.')
return responseJson
except:
print('Register went wrong')
exit(0)
def trackPacket(packet_id):
trackingRequest = urllib.request.Request(url + ':' + str(tracking_port) + '/packetStatus/' + packet_id,
headers = {"Content-Type":"application/json"})
try:
response = urllib.request.urlopen(trackingRequest)
responseJson = json.loads(response.read().decode('utf8'))
print('Tracking completed.')
return responseJson
except Exception as e:
print('Tracking went wrong')
exit(0)
if __name__ == '__main__':
packet_id = registerPacket()['packet_id']
trackPacket(packet_id)
| CodingCamp2017/pakete | services/tests/test_rest_tracking_service.py | test_rest_tracking_service.py | py | 1,711 | python | en | code | 0 | github-code | 36 |
37735076181 | from __future__ import division
import os
import time
import math
from glob import glob
import tensorflow as tf
import numpy as np
from six.moves import xrange
from ops import *
from utils import *
class DCGAN(object):
def __init__(self, sess, input_size=28,
batch_size=64, sample_num=64, output_size=28,
z_dim=62, c_dim=1, dataset_name='default',
checkpoint_dir=None, sample_dir=None):
"""
Args:
sess: TensorFlow session
input_size: The size of input image.
batch_size: The size of batch. Should be specified before training.
z_dim: (optional) Dimension of dim for Z. [100]
c_dim: (optional) Dimension of image color. For grayscale input, set to 1. [1]
"""
self.sess = sess
self.batch_size = batch_size
self.sample_num = sample_num
self.input_size = input_size
self.output_size = output_size
self.z_dim = z_dim
self.c_dim = c_dim
self.dataset_name = dataset_name
self.checkpoint_dir = checkpoint_dir
self.counter = 1
self.build_model()
def discriminator(self, image, reuse=False, train=True):
with tf.variable_scope("discriminator", reuse=reuse):
#######################################################
# TODO: Define discrminator network structure here. op.py
# includes some basic layer functions for you to use.
# Please use batch normalization layer after conv layer.
# And use 'train' argument to indicate the mode of bn.
#######################################################
d = lrelu(conv2d(image, 32, 4, 4, 2, 2, name="d_conv1"), name="d_lrelu1")
# self.d1_shape = d.shape
d = lrelu(batch_norm(conv2d(d, 64, 4, 4, 2, 2, name="d_conv2"), train=train, name="d_bn2"), name="d_lrelu2")
# self.d2_shape = d.shape
d = lrelu(batch_norm(conv2d(d, 256, 4, 4, 2, 2, name="d_conv2_1"), train=train, name="d_2_1_bn"),
name="d_2_1_lrelu")
d = tf.reshape(d, [self.batch_size, -1])
# self.d2_flat_shape = d.shape
d = lrelu(batch_norm(linear(d, 512, 'd_conv3'), train=train, name="d_bn3"), name="d_lrelu3")
# self.d3_shape = d.shape
out_logit = linear(d, 1, "d_fc4")
out = tf.nn.sigmoid(out_logit)
return out, out_logit
#######################################################
# end of your code
#######################################################
def generator(self, z, reuse=False, train=True):
with tf.variable_scope("generator", reuse=reuse):
#######################################################
# TODO: Define decoder network structure here. The size
# of output should match the size of images. Image scale
# in DCGAN is [-1, +1], so you need to add a tanh layer
# before the output. Also use batch normalization layer
# after deconv layer, and use 'train' argument to indicate
# the mode of bn layer. Note that when sampling images
# using trained model, you need to set train='False'.
#######################################################
g = tf.nn.relu(batch_norm(linear(z, 1024, "g_fc1"), train=train, name="g_bn1"))
g = tf.nn.relu(batch_norm(linear(g, 128*7*7, "g_fc2"), train=train, name="g_bn2"))
g = tf.reshape(g, [self.batch_size, 7, 7, 128])
g = tf.nn.relu(batch_norm(deconv2d(g, [self.batch_size, 14, 14, 64], 4, 4, 2, 2, name="g_deconv3"),
train=train, name="g_bn3"))
g = tf.nn.sigmoid(deconv2d(g, [self.batch_size, 28, 28, 1], 4, 4, 2, 2, name="g_deconv4"))
return g
#######################################################
# end of your code
#######################################################
def build_model(self):
#######################################################
# TODO: In this build_model function, define inputs,
# operations on inputs and loss of DCGAN. For input,
# you need to define it as placeholders. Discriminator
# loss has two parts: cross entropy for real images and
# cross entropy for fake images generated by generator.
# Set reuse=True for discriminator when calculating the
# second cross entropy. Define two different loss terms
# for discriminator and generator, and save them as
# self.d_loss and self.g_loss respectively.
#######################################################
# Inputs
self.x = tf.placeholder(tf.float32, shape=[self.batch_size, self.input_size, self.input_size, self.c_dim],
name='real_images')
self.z = tf.placeholder(tf.float32, shape=[self.batch_size, self.z_dim], name='z')
# Gaussian White noise for training
g_noise = tf.random_normal(shape=self.x.shape, mean=0, stddev= 1 / (self.counter**0.5))
# Real data with Discriminator
D_real, D_real_logits = self.discriminator(self.x + g_noise, train=True, reuse=False) #
# Fake data from Generator with Discriminator
G = self.generator(self.z, train=True, reuse=False)
D_fake, D_fake_logits = self.discriminator(G, train=True, reuse=True)
# Loss of Discriminator
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_real_logits,
labels=tf.ones_like(D_real) * 0.7))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,
labels=tf.zeros_like(D_fake)))
self.d_loss = d_loss_real + d_loss_fake
# Loss of Generator
self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits,
labels=tf.ones_like(D_fake)))
# Test
self.x_fake = self.generator(self.z, reuse=True, train=False)
#######################################################
# end of your code
#######################################################
# define var lists for generator and discriminator
t_vars = tf.trainable_variables()
self.d_vars = [var for var in t_vars if 'd_' in var.name]
self.g_vars = [var for var in t_vars if 'g_' in var.name]
self.saver = tf.train.Saver()
def train(self, config):
# create two optimizers for generator and discriminator,
# and only update the corresponding variables.
self.sample_z = np.random.uniform(-1, 1, size=(self.batch_size, self.z_dim))
d_optim = tf.train.AdamOptimizer(config.learning_rate / 5, beta1=config.beta1) \
.minimize(self.d_loss, var_list=self.d_vars)
g_optim = tf.train.AdamOptimizer(config.learning_rate, beta1=config.beta1) \
.minimize(self.g_loss, var_list=self.g_vars)
try:
self.sess.run(tf.global_variables_initializer())
except:
tf.initialize_all_variables().run()
# load MNIST data
mnist = tf.contrib.learn.datasets.load_dataset("mnist")
data = mnist.train.images
data = data.astype(np.float32)
data_len = data.shape[0]
data = np.reshape(data, [-1, 28, 28, 1])
data = data * 2.0 - 1.0
could_load, checkpoint_counter = self.load(self.checkpoint_dir)
if could_load:
self.counter = checkpoint_counter
print(" [*] Load SUCCESS")
else:
print(" [!] Load failed...")
for epoch in xrange(config.epoch):
batch_idxs = min(data_len, config.train_size) // config.batch_size
for idx in xrange(0, batch_idxs):
batch_images = data[idx * config.batch_size:(idx + 1) * config.batch_size, :]
#######################################################
# TODO: Train your model here. Sample hidden z from
# standard uniform distribution. In each step, run g_optim
# twice to make sure that d_loss does not go to zero.
# print the loss terms at each training step to monitor
# the training process. Print sample images every
# config.print_step steps.You may use function
# save_images in utils.py to save images.
#######################################################
batch_z = np.random.uniform(-1, 1, [self.batch_size, self.z_dim]).astype(np.float32)
_, d_loss = self.sess.run([d_optim, self.d_loss], feed_dict={self.x: batch_images, self.z: batch_z})
_, g_loss = self.sess.run([g_optim, self.g_loss], feed_dict={self.x: batch_images, self.z: batch_z})
if np.mod(self.counter, 10) == 1:
print("Epoch: [%2d] [%4d/%4d], d_loss: %.8f, g_loss: %.8f" % (epoch, idx, batch_idxs, d_loss,
g_loss))
#######################################################
# end of your code
#######################################################
self.counter += 1
if np.mod(self.counter, 500) == 1:
self.save(config.checkpoint_dir, self.counter)
if np.mod(self.counter, 100) == 0:
samples = self.sess.run(self.x_fake, feed_dict={self.z: self.sample_z})
save_images(samples, image_manifold_size(samples.shape[0]),
'./{}/train_{:02d}_{:04d}.png'.format(config.sample_dir, epoch, idx + 1))
@property
def model_dir(self):
return "{}_{}_{}_{}".format(
self.dataset_name, self.batch_size,
self.output_size, self.output_size)
def save(self, checkpoint_dir, step):
model_name = "DCGAN.model"
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
self.saver.save(self.sess,
os.path.join(checkpoint_dir, model_name),
global_step=step)
def load(self, checkpoint_dir):
import re
print(" [*] Reading checkpoints...")
checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir)
ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
if ckpt and ckpt.model_checkpoint_path:
ckpt_name = os.path.basename(ckpt.model_checkpoint_path)
self.saver.restore(self.sess, os.path.join(checkpoint_dir, ckpt_name))
self.counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))
print(" [*] Success to read {}".format(ckpt_name))
return True, self.counter
else:
print(" [*] Failed to find a checkpoint")
return False, 0
| riemanli/UCLA_STATS_232A_Statistical_Modeling_and_Learning_in_Vision_and_Cognition | project4/gan/model_gan.py | model_gan.py | py | 11,459 | python | en | code | 0 | github-code | 36 |
35219043762 | from itertools import product
import sys
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import json
import re
sys.path.append('../../..')
from lib import excelUtils
from lib import httpUtils
from lib import textUtil
from lib.htmlEleUtils import getNodeText
from lib.htmlEleUtils import getInnerHtml
import math
products1 = []
headers1=[
'link','Breadcrumb','Product Name','size/price','Category'
]
def addHeader(header, title):
if title not in header and len(title) > 0:
header.append(title)
def getProductInfo(url):
print(str(len(products1))+"====="+url)
sope = httpUtils.getHtmlFromUrl(url)
nav = sope.find("div", attrs={"class":"breadcrumbs"})
pInfo={
"link":url
}
pInfo["Breadcrumb"] = getNodeText(nav)
pInfo["Product Name"] = getNodeText(sope.find("h1", attrs={"class":"page-title"}))
attrs = sope.find_all("div", attrs={"class":"product attribute sku"})
for attr in attrs:
title = getNodeText(attr.find("strong"))
value = getNodeText(attr.find("div", attrs={"class":"value"}))
pInfo[title] = value
addHeader(headers1, title)
sizes = sope.find_all("div", attrs={"class":"field choice admin__field admin__field-option required"})
sizeStr = ""
for size in sizes:
option = size.find("input")
sizeStr += getNodeText(size.find("label")) + "-" + option["price"]+","
pInfo["size/price"] = sizeStr
category = sope.find("div", attrs={"class":"product category"})
pInfo["Category"] = getNodeText(category.find("div", attrs={"class":"value"}))
trs = sope.find_all("tr")
for tr in trs:
tds = tr.find_all("td")
ths = tr.find_all("th")
if len(tds) == 1 and len(ths) == 1:
title = getNodeText(ths[0])
value = getNodeText(tds[0])
pInfo[title] = value
addHeader(headers1, title)
products1.append(pInfo.copy())
def getProductList(url):
sope = httpUtils.getHtmlFromUrl(url)
ps = sope.find_all("li", attrs={"class":"item product product-item"})
for p in ps:
pLink = p.find("a")
getProductInfo(pLink["href"])
for pIndex in range(1, 9):
getProductList('https://www.arp1.com/catalogsearch/result/index/?p='+str(pIndex)+'&product_list_limit=100&q=autoimmune')
# getProductInfo('https://www.arp1.com/aire-antibody-csb-pa001502ha01hu.html')
excelUtils.generateExcelMultipleSheet('arp1.xlsx', [
{
"name": 'arp1',
"header": headers1 ,
"data": products1
}
]) | Just-Doing/python-caiji | src/work/Common/arp1/arp1.py | arp1.py | py | 2,360 | python | en | code | 1 | github-code | 36 |
34153413716 | import datetime
def add_records(obj, db):
"""
@param obj = JSON object
@param db = SQL database
"""
entries = dict()
for o in obj.items():
if str(o[0]) != 'Name' and str(o[0]) != 'Date':
entries[int(o[0])] = o[1]
for e in entries.items():
e[1]['Event'] = str(obj['Name'])
e[1]['Date'] = datetime.date(int(obj['Date']['Year']),
int(obj['Date']['Month']),
int(obj['Date']['Day']))
e[1]['Position'] = e[0]
cursor = db.cursor()
for e in entries.items():
cursor.execute("INSERT OR IGNORE INTO entry VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)",
(e[1]['Hash'], e[1]['Position'], e[1]['Team Name'],
e[1]['Number'], e[1]['Class'], e[1]['Year'],
e[1]['Make'], e[1]['Model'], e[1]['Laps'],
e[1]['Best Time'], e[1]['BS Penalty Laps'],
e[1]['Black Flag Laps'], e[1]['Event'], e[1]['Date']))
db.commit()
def create_table(db):
cursor = db.cursor()
command = """
CREATE TABLE IF NOT EXISTS entry (
hash BLOB PRIMARY KEY,
position int,
team_name text,
vic_no int,
class CHAR(1),
year int,
make text,
model text,
laps int,
best_time text,
bs_laps int,
flag_laps int,
event_name text,
event_date text,
UNIQUE (hash));
"""
cursor.execute(command)
db.commit()
| segfaultmagnet/sweet-db | util/sqlloader.py | sqlloader.py | py | 1,535 | python | en | code | 0 | github-code | 36 |
2336217386 | #!/usr/bin/env python
# coding: utf-8
import sys
import getopt
from classifier import l1c_classifier
import os
import warnings
warnings.filterwarnings('ignore')
def error():
print( 'main.py -i <inputdirectory> -o <outputdirectory>')
sys.exit()
def getRelevantDirectories(argv):
inputDir = ''
outputDir = ''
modelDir = ''
try:
opts, args = getopt.getopt(argv,"hi:o:",["ifile=","ofile="])
except getopt.GetoptError:
error()
for opt, arg in opts:
if opt == '-h' or len(arg) == 0:
error()
if opt in ("-i", "--ifile"):
inputDir = arg
if opt in ("-o", "--ofile"):
outputDir = arg
if len(opts) == 0 or inputDir == '' or outputDir == '':
print( 'main.py -i <inputdirectory> -o <outputdirectory>')
#print( 'The input directory should contain all the training files. \nThe output directory will be where the models are stored.')
sys.exit()
modelDir = os.path.abspath(modelDir)
inputDir = os.path.abspath(inputDir)
outputDir = os.path.abspath(outputDir)
return inputDir, outputDir, modelDir
def main():
inputDir, outputDir, modelDir = getRelevantDirectories(sys.argv[1:])
l1c_classifier(inputDir, outputDir, modelDir)
if __name__ == '__main__':
main() | kraiyani/Sentinel_2_image_scene_classifier | main.py | main.py | py | 1,191 | python | en | code | 0 | github-code | 36 |
71749583783 | from z3 import substitute, Not, And
from collections import defaultdict
class Synthesizer:
def __init__(self, clauses, model, all_vars, step, prop, hist, length):
cond = hist.pc_ante[0]
self.all_clauses = set(clauses)
self.safe_clauses = set(clauses)
self.trigger_clauses = set(clauses)
self.model = model
self.prop = prop
self.all_vars = all_vars
sbst = self.get_subs_for_var_to_next_var()
self.safe_clauses.update(
Not(substitute(interp, sbst)) for interp in list(self.safe_clauses)
)
for i in range(0, step):
self.check_clauses_on_model_and_step(
self.safe_clauses, i, negate=True, cond=cond
)
self.check_clauses_on_model_and_step(
self.safe_clauses, step, negate=False, cond=cond
)
self.check_clauses_on_model_and_step(
self.trigger_clauses, step, negate=False, cond=cond
)
for i in range(step + 1, length - 1):
self.check_clauses_on_model_and_step(
self.trigger_clauses, i, negate=True, cond=cond
)
self.used_vars = defaultdict(int)
self.used_funcs = defaultdict(int)
self.used_consts = defaultdict(int)
inner_prop = self.prop.consequents[-1].children()[0].children()[0]
self.setup_ranking_dicts(inner_prop)
self.rank_clauses()
def check_clauses_on_model_and_step(self, clauses, step, negate, cond):
old_clauses = list(clauses)
clauses.clear()
for cur_clause in old_clauses:
sub_clause = substitute(
substitute(And(cur_clause, cond), self.get_subs_for_cex_step(step)),
self.get_subs_for_next_cex_step(step + 1),
)
eval_bool = self.model.eval(sub_clause)
if negate and not eval_bool or not negate and eval_bool:
clauses.add(cur_clause)
def get_top_interpolant(self):
try:
top = sorted(self.ranking)[-1]
top_interp = self.ranking[top]
if top_interp in self.trigger_clauses:
return "trigger", top_interp
else:
return "safe", top_interp
except IndexError as e:
return "trigger", list(self.all_clauses)[-1]
# def generate_interpolants(self):
# print("TRIGGERS:")
# for tc in self.trigger_clauses:
# print(tc)
# print("SAFE:")
# for sc in self.safe_clauses:
# print(sc)
# ranking = self.rank_clauses()
# print(f"Prop: {self.prop.consequents}")
# pprint.pprint(ranking)
# for rank in reversed(sorted(ranking)):
# interp = ranking[rank]
# if interp in self.safe_clauses:
# yield "safe", interp
# else:
# yield "trigger", interp
def setup_ranking_dicts(self, cur_prop_term):
if cur_prop_term.children():
self.used_funcs[str(cur_prop_term.decl())] += 1
for child in cur_prop_term.children():
self.setup_ranking_dicts(child)
else:
str_term = str(cur_prop_term)
if self.is_var(str_term):
self.used_vars[str_term] += 1
else:
self.used_consts[str_term] += 1
def is_var(self, term):
for var in self.all_vars:
if var.match_name(term):
return True
return False
def rank_clauses(self):
ranking = {}
for clause in self.trigger_clauses:
# prefer triggers
ranking[self.get_rank(clause) * 2] = clause
for clause in self.safe_clauses:
ranking[self.get_rank(clause)] = clause
self.ranking = ranking
def get_rank(self, clause):
str_clause = str(clause)
rank = -1 * len(str_clause) # prefer shorter
for v in self.used_vars:
if v in str_clause:
rank += 20 * self.used_vars[v]
for f in self.used_funcs:
if f in str_clause:
rank += 15 * self.used_funcs[f]
for c in self.used_consts:
if c in str_clause:
rank += 10 * self.used_consts[c]
return rank
def get_subs_for_cex_step(self, step):
return [var.make_step_var_sub(step) for var in self.all_vars]
def get_subs_for_next_cex_step(self, step):
return [var.make_step_next_var_sub(step) for var in self.all_vars]
def get_subs_for_var_to_next_var(self):
return [var.make_cur_var_to_next_sub() for var in self.all_vars]
| cvick32/ConditionalHistory | src/synthesizer.py | synthesizer.py | py | 4,666 | python | en | code | 5 | github-code | 36 |
9395923393 | import oci
import paramiko
import json
def submit_hadoop_job(job_params):
ssh_client = paramiko.SSHClient()
ssh_client.load_system_host_keys()
instance_ip = "YOUR_INSTANCE_IP"
private_key_path = "/path/to/your/private/key"
# Connect to the Hadoop cluster using SSH
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_key = paramiko.RSAKey(filename=private_key_path)
ssh_client.connect(hostname=instance_ip, username="opc", pkey=ssh_key)
# Submit the Hadoop job using SSH
command = f'hadoop jar {job_params["jar_path"]} {job_params["job_class"]} {job_params["input_path"]} {job_params["output_path"]}'
stdin, stdout, stderr = ssh_client.exec_command(command)
# Close SSH connection
ssh_client.close()
return stdout.read()
def handle_request(request):
try:
job_params = {
"jar_path": request.get("jar_path"),
"job_class": request.get("job_class"),
"input_path": request.get("input_path"),
"output_path": request.get("output_path")
}
job_status = submit_hadoop_job(job_params)
return {
"message": "Hadoop job submitted successfully",
"job_status": job_status.decode('utf-8')
}
except Exception as e:
return {
"error": str(e)
}
def handler(ctx, data):
try:
request = json.loads(data.decode('utf-8'))
response = handle_request(request)
return response
except Exception as e:
return {
"error": str(e)
}
| rclevenger-hm/oci-hadoop-job-automation | function/submit_hadoop_job.py | submit_hadoop_job.py | py | 1,588 | python | en | code | 0 | github-code | 36 |
13780268319 | import sys
from collections import deque
n, m = map(int, sys.stdin.readline().strip().split())
paper = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(n)]
visited = [[0 for _ in range(m)] for _ in range(n)]
max_pic = 0
pic_cnt = 0
for i in range(n):
for j in range(m):
if not visited[i][j] and paper[i][j] == 1:
q = deque([[i, j]])
visited[i][j] = 1
cnt = 0
while q:
x, y = q.popleft()
cnt += 1
for n_X, n_y in [[x - 1, y], [x + 1, y], [x, y - 1], [x, y + 1]]:
if 0<= n_X < n and 0<= n_y < m:
if visited[n_X][n_y] == 0 and paper[n_X][n_y] == 1:
visited[n_X][n_y] = 1
q.append([n_X, n_y])
max_pic = max(max_pic, cnt)
pic_cnt += 1
print(pic_cnt)
print(max_pic)
| Yangseyeon/BOJ | 03. Gold/1926.py | 1926.py | py | 922 | python | en | code | 0 | github-code | 36 |
34016955487 | import torch
import torch.nn as nn
import transformers
class BertForSeqClf(nn.Module):
def __init__(self, pretrained_model_name: str, num_labels: int):
super().__init__()
config = transformers.BertConfig.from_pretrained(pretrained_model_name,
num_labels=num_labels)
self.num_labels = num_labels
self.bert = transformers.BertModel.from_pretrained(pretrained_model_name)
self.classifier = nn.Linear(config.hidden_size,
num_labels)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids: torch.LongTensor,
attention_mask: torch.LongTensor,
token_type_ids: torch.LongTensor):
outputs = self.bert(input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
| ayeffkay/Distillation | bert.py | bert.py | py | 1,092 | python | en | code | 1 | github-code | 36 |
29050704231 | from flask.ext.wtf import Form
from wtforms import StringField, BooleanField
from wtforms.validators import DataRequired
class LoginForm(Form):
openid = StringField('openid', validators=[DataRequired()])
remember_me = BooleanField('remember_me', default=False)
def __init__(self, *args, **kwargs):
kwargs['csrf_enabled'] = False
super(LoginForm, self).__init__(*args, **kwargs)
class SearchForm(Form):
firstName = StringField('firstName')
surname = StringField('surname')
dob = StringField('dob')
identifier = StringField('identifier')
def __init__(self, *args, **kwargs):
kwargs['csrf_enabled'] = False
super(SearchForm, self).__init__(*args, **kwargs)
| AndreasThinks/ASB_DB | app/forms.py | forms.py | py | 722 | python | en | code | 0 | github-code | 36 |
26473361587 | import cv2
import os
import matplotlib.pyplot as plt
import numpy as np
def show_image(img):
plt.imshow(img)
plt.show()
def show_class(idx):
celing = (img[:, :] == [idx, idx, idx]) * 1.0
plt.imshow(celing)
plt.show()
# input image in order calibration
INPUT_DIR = 'data/seg'
PATH = os.path.join(os.getcwd(), INPUT_DIR)
images = os.listdir(PATH)
img = cv2.imread(os.path.join(PATH, images[0]))
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mi, ma = np.min(img), np.max(img)
n_classes = ma - mi + 1
print('Class:{}'.format(n_classes))
# get shape of image
W, H, _ = img.shape
show_class(0)
show_class(1)
show_class(2)
show_class(3)
show_class(4)
show_class(5)
# celing = (img[:,:]==[187, 188,67])*1.0
# show_image(celing)
# celing [187, 188,67]
plt.imshow(img)
plt.show()
# mi, ma = np.min(img), np.max(img)
# n_classes = ma - mi + 1
| Naxalov/Seg2Dataset | main.py | main.py | py | 858 | python | en | code | 0 | github-code | 36 |
3156896101 | def verificaTermos(item):
if len(item) == 1:
l = []
l.append("0")
l.append(item)
item = ''.join(l)
return item
vezes = int(input())
cont = 0
while cont < vezes:
entrada = input().split()
hora = entrada[0]
minuto = entrada[1]
ocorrencia = entrada[2]
hora = verificaTermos(hora)
minuto = verificaTermos(minuto)
if ocorrencia == str(1):
print(hora + ":" + minuto + " - A porta abriu!")
else:
print(hora + ":" + minuto + " - A porta fechou!")
cont += 1
| MarceloBritoWD/URI-online-judge-responses | Iniciante/2152.py | 2152.py | py | 477 | python | pt | code | 2 | github-code | 36 |
19989928968 | # Import Python packages
import json
import os
# Import Bottle
import bottle
from bottle import Bottle, request, Response, run, static_file
import requests
from truckpad.bottle.cors import CorsPlugin, enable_cors
# Define dirs
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_DIR = os.path.join(BASE_DIR, 'dist')
# App Config
bottle.debug(False)
app = Bottle()
@app.get('/')
def show_index():
"""Show Index page"""
return static_file('index.html', root=STATIC_DIR)
@app.post('/comment')
def post_comment():
comment = request.json.get('comment')
email = request.json.get('email')
payload = {
'eventData': {
'eventType': 'Comment',
}
}
if comment:
payload['eventData']['comment'] = comment
if email:
payload['eventData']['email'] = email
res = requests.post('https://api.kevalin.io/v0/collections/SERVICE.R4R.COMMENTS/events',
headers={'Authorization': f'X-API-Key {os.getenv("KEVALIN_API_KEY")}'},
json=payload)
res.raise_for_status()
# Static files route
@app.get('/<filename:path>')
def get_static_files(filename):
"""Get Static files"""
return static_file(filename, root=STATIC_DIR)
app.install(CorsPlugin(origins=['*.rax.io']))
# Run server
run(app, server='auto', host='0.0.0.0', port=8080, reloader=True)
| IDPLAT/tes-engagement | tes-engagement/app.py | app.py | py | 1,373 | python | en | code | 1 | github-code | 36 |
23252374328 | """Convenience functions go here"""
import discord
# region Constants
is_modified = False # Set this to True if you modify the code for your own use.
GITHUB_URL = "https://github.com/Mehehehehe82/BotInnit"
postmessage = f"It's open source, check it out on github! {GITHUB_URL}"
# endregion
# region Functions
async def prettymsg(ctx,
msg: str = "Sample text be like bruh",
Header: str = "Hey!",
RawText: str = "", # Good for mentioning people
ThumbnailURI: str = "https://cdn.discordapp.com/avatars/783773656227512331/e6db612b2f469225fda5522f3e915d7a.webp",
colorHex: int = 0xb86767
):
'''A simple embed creator with customizable defaults,'''
embed=discord.Embed(title=Header, description=msg, color=colorHex)
embed.set_thumbnail(url=ThumbnailURI)
if is_modified:
embed.set_footer(text=f"Based on the Discord bot created by hyperboid. {postmessage}")
else:
embed.set_footer(text=f"Discord bot created by hyperboid. {postmessage}")
await ctx.send(RawText, embed=embed)
# endregion
| polypoyo/DiscordBot | conv.py | conv.py | py | 999 | python | en | code | 0 | github-code | 36 |
7369258562 | from typing import Union
import pandas as pd
import numpy as np
from Functions.date_parser import parse_dates
from Functions.data_reader import read_data
def get_historical_volatility(main_df: pd.DataFrame,
period_start: Union[str],
period_end: Union[str, None],
lambda_factor: Union[None, float]):
# Return error when start date is None
if period_start is None:
rate_of_return = "ERROR: Start Date is required"
return rate_of_return
# using defaults where passed value is none
period_end = 'Latest' if period_end is None else period_end
# Get parsed start and end dates
start_date, end_date = parse_dates(period_start, period_end,
main_df)
if isinstance(start_date, str):
volatility_val = start_date
return volatility_val
if pd.isnull(start_date):
volatility_val = "ERROR: No data found prior to start date"
return volatility_val
# Filter data
main_df = main_df.set_index('Date')
main_df = main_df[(main_df.index >= start_date) &
(main_df.index <= end_date)]
# Order by date
main_df = main_df.sort_values(by='Date')
# Compute performance
main_df['Performance'] = np.log(main_df.Price / main_df.
Price.shift())
main_df = main_df[1:]
# Calculate volatility with Lambda
if lambda_factor is None:
main_df['Vol'] = (main_df['Performance'] -
main_df['Performance'].mean()) ** 2
volatility_val = np.sqrt(((main_df['Vol'].sum() * 252)
/ main_df.shape[0]))
volatility_val = np.round(volatility_val, 6)
# Calculate volatility without Lambda
else:
main_df = main_df.sort_values(by='Date', ascending=False)
main_df['Weight'] = (1 - lambda_factor) * lambda_factor \
** np.arange(len(main_df))
volatility_val = np.round(
np.sqrt(
((main_df['Weight'] * main_df['Performance'] ** 2).sum()
* 252) / (main_df['Weight'].sum())
), 6
)
return volatility_val
def historical_volatility(maven_asset_code: str, price_type: str,
currency: str, period_start: list,
period_end: list,
lambda_factor: Union[None, float] = None
) -> dict:
"""
:param lambda_factor:
:param currency:
:param maven_asset_code: Asset Code str
:param price_type: Price Type str
:param period_start: Period Start str
:param period_end: Period End str
:return:
"""
# NotImplementedError for currency (will be removed later)
if currency is not None:
raise NotImplementedError('ERROR: Currency is not supported')
# read data
main_df = read_data(maven_asset_code, price_type)
list_it = iter([period_start, period_end])
list_lens = len(next(list_it))
if not all(len(l) == list_lens for l in list_it):
raise ValueError('ERROR: Ensure all passed list are '
'of same length!')
volatility_list = []
for start_date, end_date in zip(period_start, period_end):
try:
volatility_val = get_historical_volatility(main_df,
start_date,
end_date,
lambda_factor)
volatility_list.append(volatility_val)
except (Exception,):
volatility_list.append(None)
result_dict = {'Volatility': volatility_list}
return result_dict
# #
# mvn_historical_volatility (SPY US, PR, , [1Y,2W,6M,3Q,95D,Inception],,0.9)
# maven_asset_code = 'SPY US'
# price_type = 'PR'
# period_start = ['1Y','2W','6M','3Q','95D','Inception']
# period_end = [None, None, None, None, None, None]
# # lambda_factor = 0.9
# #
# # result = historical_volatility(maven_asset_code, price_type, None,period_start, period_end, lambda_factor)
# # print(result)
# #
# result = historical_volatility(maven_asset_code, price_type, None,period_start, period_end)
# print(result) | fhashim/time_series_test | Functions/mvn_historical_volatility.py | mvn_historical_volatility.py | py | 4,342 | python | en | code | 0 | github-code | 36 |
37649351161 | '''
Recommendation Systems: the ML algorithm will learn our likes and
recommend what option would be best for us. These learning algorithms
are getting accurate as time passes
Types:
1)Collaborative Systems: predict what you like based on other similar
users have liked in the past
2)Content-Based: predict what you like based on what you have liked
in the past
eg:Netflix combines both approaches to predict your likes more accurately
APP: this script reads in a dataset of movie ratings and recommends new
movies for users
Dependencies: numpy, scipy, lightfm
lightfm: helps in performing bunch of recommendation algos,a great lib
to start with for building recommendation systems
'''
import numpy as np
#lets use the 'fetch_movielens' method from submodule datasets
#try diff methods to obtain diff results and compare the accuracy
from lightfm.datasets import fetch_movielens
from lightfm import LightFM
#fetch the dataset and format it
#we will be using MovieLens dataset(available on Kaggle)
data = fetch_movielens(min_rating=4.0)
#make interaction matrix from the csv and store it in data as a dictionary
print(repr(data['train']))
print(repr(data['test']))
#loss means loss func which measures loss = (model pred - desired output)
#we minimize it during the training to gain more accuracy
#Weighted Approx Rank Pairwise-warp
model = LightFM(loss='warp')
#epochs-no of runs, num_threads = parallel computation
model.fit(data['train'], epochs=30, num_threads=2)
def sample_recommendation(model, data, user_ids):
#no of users and movies using shape attribute of dicts
n_users, n_items = data['train'].shape
for user_id in user_ids:
#csr - compressed sparse row format
#tocsr is a subarray which will retrieve using the indices attributes
known_positives = data['item_labels'][data['train'].tocsr()[user_id].indices]
#movies our model will predict
scores = model.predict(user_id, np.arange(n_items))
#sort them in order of their scores
#scores in desc order because of the negative sign
top_items = data['item_labels'][np.argsort(-scores)]
#print out user_ids
print("Users %s" % user_id)
print(" Known Positives:")
#top 3 known_positives the user has picked
for x in known_positives[:3]:
print(" %s" % x)
#top 3 recommended movies predicted by our model
print(" Recommended:")
for x in top_items[:3]:
print(" %s" %x)
'''
print('Enter 3 random ids:')
idList = []
for i in range(3):
idList = int(input('ENTER:'))
'''
#enter in 3 random userids
sample_recommendation(model, data, [4, 45, 89])
'''
def main():
input('Enter 3 random user ids:')
idList = []
for i in range(3):
idList = int(input('ENTER:'))
sample_recommendation(model, data, idList)
if __name__ == '__main__':
main()
''' | ketanp05/MovieRecommendation | app.py | app.py | py | 3,004 | python | en | code | 0 | github-code | 36 |
73118984424 | import socket
def encrypt_word(word):
return word
def send_encrypted_words(words):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as server_socket:
server_socket.connect(('localhost', 12345))
encrypted_words = [encrypt_word(word) for word in words]
request = ','.join(encrypted_words).encode()
server_socket.send(request)
response = server_socket.recv(1024).decode().split(',')
return response
if __name__ == '__main__':
words = ['apple', 'banana', 'cherry']
decrypted_words = send_encrypted_words(words)
print('Decrypted words:', decrypted_words)
| IlyaOrlov/PythonCourse2.0_September23 | Practice/tgridneva/Practica 11client.py | Practica 11client.py | py | 630 | python | en | code | 2 | github-code | 36 |
72673672743 | from itertools import combinations
import numpy as np
import copy
def converse_to_canonical(var_num, non_neg_rest_num, non_pos_rest_num, eq_rest_num, positive_indexes, func_coefs,
rest_coefs, rest_b):
#############################
# начальная проверка
# проверка количества переменных
# проверка пустоты матрицы
# проверка соответствия вектора правой части
#############################
start_vars_count = var_num
new_rest_coefs = copy.deepcopy(rest_coefs)
new_rest_b = rest_b.copy()
new_func_coefs = func_coefs.copy()
# заменяем >= на <=
for i in range(non_neg_rest_num):
for j in range(len(new_rest_coefs[i])):
new_rest_coefs[i][j] *= -1
new_rest_b[i] *= -1
# количество новых переменных, которые появятся после превращения неравенств в равенства
new_neq_vars_count = non_neg_rest_num + non_pos_rest_num
for i in range(new_neq_vars_count):
new_func_coefs.append(0)
new_matrix = np.matrix(new_rest_coefs)
# добавляем справа от неравенств единичную квадратную матрицу, оставшееся пространство заполняем нулями
right_matrix = np.eye(new_neq_vars_count)
if eq_rest_num > 0:
right_matrix = np.vstack((right_matrix, np.zeros((eq_rest_num, new_neq_vars_count))))
new_matrix = np.hstack((new_matrix, right_matrix))
# замена знаконезависимых переменных
transform_matrix = []
additional_matrix = []
columns_deleted = 0
for i in range(start_vars_count):
if i in positive_indexes:
new_column = np.zeros(start_vars_count)
new_column[i] = 1
transform_matrix.append(new_column.tolist())
else:
# заменяем знаконезависимые переменные на разность двух новых положительных
new_vars = np.zeros((new_matrix.shape[0], 2))
for j in range(new_matrix.shape[0]):
new_vars[j][0] = new_matrix.item((j, i - columns_deleted))
new_vars[j][1] = -new_matrix.item((j, i - columns_deleted))
new_matrix = np.delete(new_matrix, i - columns_deleted, 1)
new_matrix = np.hstack((new_matrix, new_vars))
new_func_coefs.append(new_func_coefs[i - columns_deleted])
new_func_coefs.append(-new_func_coefs[i - columns_deleted])
new_func_coefs.pop(i - columns_deleted)
columns_deleted += 1
# делаем столбцы для матрицы обратного перехода
new_column = np.zeros(start_vars_count)
new_column[i] = 1
additional_matrix.append(new_column.tolist())
new_column[i] = -1
additional_matrix.append(new_column.tolist())
for i in range(new_neq_vars_count):
transform_matrix.append(np.zeros(start_vars_count).tolist())
for i in additional_matrix:
transform_matrix.append(i)
transform_matrix = np.matrix(transform_matrix).transpose()
return new_matrix, transform_matrix, new_rest_b, new_func_coefs
def find_all_matrices(A, M, N):
"""
функция для перебора наборов из N переменных по M ненулевых. Возвращает соответствующие тамим наборам вектор матриц
:param A: матрица ограничений в каноническом виде
:param M: количество строк в матрице A
:param N: количество столбцов в матрице A
:return matrices: вектор невырожденных матриц составленных из столбцов A
:return indexes: вектор с наборами индексов. В каждом наборе индексы соответствующих им столбцов расположены в том
же порядке, что и в матрице из вектора matrices
"""
start_matrix = np.matrix(A)
index_set = [i for i in range(N)]
matrices = []
indexes = []
for i in combinations(index_set, M):
new_matrix = start_matrix[:, i]
if abs(np.linalg.det(new_matrix)) > 1e-7:
matrices.append(new_matrix)
indexes.append(i)
return matrices, indexes
def find_all_vectors(A, b, M, N):
"""
функция для поиска всех опорных векторов
:param A: матрица коэффициентов органичений в каноническом виде
:param b: вектор правой части ограничений в каноническом виде
:param M: количество строк в матрице A
:param N: количество столбцов в матрице A
:return: массив всех опорных векторов
"""
vectors = []
if M >= N:
return []
matrices, indexes = find_all_matrices(A, M, N)
for i in range(len(indexes)):
solution = np.linalg.solve(matrices[i], b)
solution[abs(solution) < 1e-15] = 0
if (len(solution[solution < 0]) != 0):
continue
if (len(solution[solution > 1e+15]) != 0):
continue
vector = [0 for i in range(N)]
for j in range(len(indexes[i])):
vector[indexes[i][j]] = solution[j]
vectors.append(vector)
return vectors
def EnumMethod(A, b, c, M, N, transform, max=False):
"""
Метод перебора крайних точек
:param M: количесво ограничений
:param N: количество переменных
:param A: матрица коэффициентов ограничений
:param b: правый вектор ограничений
:param c: вектор коэффициентов целевой функции
:param transform: матрица перевода вектора к изначальной задаче (нужно только для логирования)
:param max: True если нужно решать задачу максимизации вместо минимизации
:return: опорный вектор при котором достигается оптимальное решение
"""
mult = -1 if max else 1
if max:
for i in range(len(c)):
c[i] *= mult
f = open('EnumMethod.txt', 'w')
vectors = find_all_vectors(A, b, M, N)
if len(vectors) == 0:
return []
best_vector = vectors[0]
min = np.dot(best_vector, c)
i = 1
min_i = 1
for tmp in vectors:
current_val = np.dot(tmp, c)
f.write("step " + str(i) + ":\n")
f.writelines(map(lambda x: str(x) + ' ', np.dot(transform, np.matrix(tmp).transpose()).transpose().tolist()[0]))
f.write("\nf(X_" + str(i) + ") =" + str(current_val) + '\n')
if current_val < min:
min = current_val
best_vector = tmp
min_i = i
i += 1
f.write("\nbest vector on step " + str(min_i) + ":\n")
f.writelines(
map(lambda x: str(x) + ' ', np.dot(transform, np.matrix(best_vector).transpose()).transpose().tolist()[0]))
f.write("\n\nsolution:")
f.writelines(map(lambda y: str(y) + ' ', np.dot(transform, best_vector)))
f.write("\nf(X) = " + str(np.dot(c, best_vector)))
f.close()
return (np.array(np.dot(transform, best_vector)) * mult).tolist()
def print_canon_task_human_readable(A, c, b):
"""
функция для вывода канонической задачи ЛП на экран в читаемом для человека формате
:param A: матрица ограничений
:param c: вектор коэффициентов при функции
:param b: вектор правой части ограниченй
"""
new_A = np.matrix(A)
new_c = np.matrix(c)
new_b = np.matrix(b)
s = "f(X) = "
for i in range(new_c.shape[1]):
if abs(new_c.item(i)) > 1e-13:
if new_c.item(i) > 0 and i != 0:
s += "+ "
elif i != 0:
s += "- "
s += str(abs(new_c.item(i))) + "x_" + str(i + 1) + ' '
s += "-> min\n"
for i in range(new_A.shape[0]):
for j in range(new_A.shape[1]):
if abs(new_A.item(i, j)) > 1e-13:
if new_A.item(i, j) > 0 and j != 0:
s += "+ "
elif j != 0:
s += "- "
s += str(abs(new_A.item(i, j))) + "x_" + str(j + 1) + ' '
s += "= " + str(new_b.item(i)) + '\n'
print(s)
def convertToDual(var_num, non_neg_rest_num, non_pos_rest_num, eq_rest_num, positive_indexes, func_coefs,
rest_coefs, rest_b):
# 1
new_func_coefs = rest_b
# 2
new_rest_b = func_coefs
# 3
new_var_num = non_neg_rest_num + non_pos_rest_num + eq_rest_num
new_non_neg_rest_num = 0
new_non_pos_rest_num = 0
new_eq_rest_num = 0
A = []
new_positive_indexes = []
for i in range(non_neg_rest_num + non_pos_rest_num):
new_positive_indexes.append(i)
if i >= non_neg_rest_num:
row = (-np.array(rest_coefs[i])).tolist()
else:
row = (np.array(rest_coefs[i])).tolist()
A.append(row)
positive_indexes_count = len(new_positive_indexes)
for i in range(eq_rest_num):
A.append(rest_coefs[i + non_neg_rest_num + non_pos_rest_num])
A = np.matrix(A).transpose().tolist()
new_rest_coefs = []
buf = []
for i in range(len(A)):
if i in positive_indexes:
new_rest_coefs.append(A[i])
new_non_pos_rest_num += 1
else:
buf.append(A[i])
new_eq_rest_num += 1
for i in buf:
new_rest_coefs.append(i)
return new_var_num, new_non_neg_rest_num, new_non_pos_rest_num, new_eq_rest_num, new_positive_indexes, new_func_coefs, new_rest_coefs, new_rest_b
| Hembos/optimization-method | linear_programming/EnumerationSimplexMethod.py | EnumerationSimplexMethod.py | py | 10,391 | python | ru | code | 0 | github-code | 36 |
9232282628 | def fizzbuzz(n):
ret = ""
if not (n % 3):
ret += "fizz"
if not (n % 5):
ret += "buzz"
return ret or str(n)
def fizzbuzz_test(f):
if f(3) == "fizz" and f(5) == "buzz" and f(15) == "fizzbuzz":
print("Success!")
else:
print("Nope. Try again.")
fizzbuzz_test(fizzbuzz)
| seafoodfriedrice/thinkful-python | examples/fizzbuzz_unit_test.py | fizzbuzz_unit_test.py | py | 323 | python | en | code | 0 | github-code | 36 |
32349305968 | from pythonds.graphs import PriorityQueue, Graph, Vertex
def prim(graph, source):
pq = PriorityQueue()
source.setDistance(0)
total_weight = 0
for n in graph:
n.setDistance(float('Inf'))
n.setPred(None)
pq.buildHeap([(n.getDistance(), n) for n in graph]) #(distance, node)
while pq:
curr_node = pq.delMin() #get min value from priority queue
for adj in curr_node.getConnections(): #for all adjacacent nodes to the current node
weight = curr_node.getWeight(adj) #weight of adj node
if adj in pq: #if it is in the pq ie not already in the mst
if weight < adj.getDistance(): #new weight is minimum
adj.setPred(curr_node) #set the previous node of adjacent node
adj.setDistance(weight) #set weight
total_weight += weight
pq.decreaseKey(adj, weight) #take the adj node out of the pq
return total_weight
| bkim1/algorithms-bonus | src/prim.py | prim.py | py | 979 | python | en | code | 0 | github-code | 36 |
70862456745 | #! /usr/bin/env python3
import time
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
from wallwalking.srv import FindWall, FindWallResponse
class FindWallService():
move = Twist()
value_front = int()
minimum_position = int()
def __init__(self):
self.subScan = rospy.Subscriber('/scan', LaserScan, self.callback_scan)
while self.subScan.get_num_connections() < 1:
rospy.loginfo("Waiting for subsccription to /scan")
time.sleep(0.1)
self.pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
while self.pub.get_num_connections() < 1:
rospy.loginfo("Waiting for connection to /cmd_vel")
time.sleep(0.1)
self.srv = rospy.Service('/find_wall', FindWall, self.callback_srv)
self.rate = rospy.Rate(10)
def callback_scan(self, msg):
self.minimum_position = 0
minimum_value = msg.ranges[0]
for i in range(720):
if msg.ranges[i] < minimum_value:
minimum_value = msg.ranges[i]
self.minimum_position = i
self.value_front = msg.ranges[360]
rospy.loginfo("[srv_scan] Minimum position: " +
str(self.minimum_position))
def callback_srv(self, request):
rospy.loginfo("[srv] Call Service Server")
while abs(self.minimum_position - 360) > 15:
rospy.loginfo("[srv] Rotate")
self.move.linear.x = 0
self.move.angular.z = 0.25
self.pub.publish(self.move)
rospy.sleep(0.5)
rospy.loginfo("[srv] Wall is in front of the robot")
while self.value_front > 0.3:
rospy.loginfo("[srv] Move forward")
self.move.linear.x = 0.05
self.move.angular.z = 0
self.pub.publish(self.move)
rospy.sleep(0.5)
rospy.loginfo("[srv] Wall is closer than 30cm")
while abs(self.minimum_position - 180) > 15:
self.move.linear.x = 0
self.move.angular.z = 0.25
self.pub.publish(self.move)
rospy.sleep(0.5)
rospy.loginfo("[srv] Wall is on the right side")
self.move.linear.x = 0
self.move.angular.z = 0
self.pub.publish(self.move)
result = FindWallResponse()
result.wallfound = True
rospy.loginfo("[srv] Service Server Finished")
return result
if __name__ == '__main__':
rospy.init_node('find_wall_node')
FindWallService()
rospy.spin()
| eugene-elk/ros-basics-rosject | src/find_wall_service_server.py | find_wall_service_server.py | py | 2,561 | python | en | code | 0 | github-code | 36 |
26151403562 | from player import Player
from moves import Moves
class Board(Moves):
"""
The Board class allows the players to setup the game by creating
the necessary set of pieces needed for each player.
It also extends the class Moves, in which all the allowed moves
are calculated based on the current position of all the pieces that
exist on the board at a given point in time.
The Board class includes the following elements:
- player: a 2-dimensional array that contains all the pieces
of the board, grouped by player.
- turn: It controls which player turn is the current one.
"""
def __init__(self):
Moves.__init__(self)
self.p1 = Player('white', 'down')
self.p2 = Player('black', 'up')
self.p1.enemy = self.p2
self.p2.enemy = self.p1
self.turn = self.p1
"""
getMap(): creates a dictionary that contains all the pieces on the board
and uses its positions as keys.
This function allows to draw the board in conbination with the
chess_ui class.
"""
def getSimpleMap(self):
return dict((k, p.short) for k, p in self.getFullMap().iteritems())
"""
getPlayerMap(): creates a set that contains all the pieces on the board.
"""
def getFullMap(self):
fullMap = self.p1.map.copy()
fullMap.update(self.p2.map)
return fullMap | marcialpuchi/Chess | board.py | board.py | py | 1,289 | python | en | code | 0 | github-code | 36 |
2884996289 | # coding:utf-8
# @Time : 2020/6/4 14:10
# @Author: Xiawang
# Description:
import time
import pytest
from api_script.open_lagou_com.resume import get_resume_list, get_online_resume, get_attachment_resume, get_contact, \
get_interview, get_obsolete
from utils.util import assert_equal, assert_in
@pytest.mark.incremental
class TestResume:
@pytest.mark.parametrize("stage", [('OBSOLETE'), ('LINK'), ('INTERVIEW'), ('NEW')])
def test_get_resume_list(self, get_access_token, stage):
time.sleep(1.5)
res = get_resume_list(access_token=get_access_token, stage=stage)
assert_equal(0, res.get('code', 1), f'获取{stage}阶段请求成功', te='foxtang')
if len(res.get('data', [])) > 0:
assert_equal(stage, res['data'][0]['stage'], f'获取{stage}的简历用例通过', f'获取{stage}的简历用例失败', 'foxtang')
global resume_id
resume_id = res['data'][0]['resume_id']
def test_get_online_resume(self, get_access_token):
res = get_online_resume(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'获取在线简历信息请求成功', te='foxtang')
assert_equal(resume_id, res['data']['resumes']['resume_id'], '获取在线简历用例通过', f'获取在线简历{resume_id}用例失败', 'foxtang')
def test_get_attachment_resume(self, get_access_token):
res = get_attachment_resume(access_token=get_access_token, resume_id=resume_id)
assert_equal(200, res.status_code, f'获取附件简历信息请求成功', te='foxtang')
assert_in(res.headers.get('Attachment-Suffix'), ['pdf', 'doc', 'docx'], '获取附件简历用例通过', f'获取附件简历{resume_id}用例失败',
'foxtang')
def test_get_contact(self, get_access_token):
res = get_contact(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'标记初筛请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '标记初筛用例通过', f'标记初筛{resume_id}用例失败', 'foxtang')
def test_get_interview(self, get_access_token):
res = get_interview(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'邀约面试请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '邀约面试用例通过', f'邀约面试{resume_id}用例失败', 'foxtang')
def test_get_obsolete(self, get_access_token):
res = get_obsolete(access_token=get_access_token, resume_id=resume_id)
assert_equal(0, res.get('code', 1), f'淘汰候选人请求成功', te='foxtang')
assert_equal(resume_id, int(res['data']['resumeVo']['id']), '淘汰候选人用例通过', f'淘汰候选人{resume_id}用例失败', 'foxtang')
| Ariaxie-1985/aria | tests/test_open_api_lagou_com/test_resume.py | test_resume.py | py | 2,896 | python | en | code | 0 | github-code | 36 |
10346867984 | from PySide.QtGui import QLabel
from EncoderTools import EncoderTools
from GuiTools import CustomComboBox, CustomHFormLayout
from FileAudio import FileAudio
from Tools import CustomProcess
class EncoderFLACTools(EncoderTools):
"""Provides Tools like Widgets, methods and objects for the FLAC encoder."""
def __init__(self):
"""Constructor of the class"""
super().__init__()
self.layout = CustomHFormLayout(self.preferencesWidget)
self.layout.setContentsMargin(0)
self.compressionLevelBox = CustomComboBox()
self.compressionLevelBox.addItems(self.compressionLevelsText)
self.compressionLevelBox.setCurrentIndex(3)
self.layout.addField(QLabel("Compression Level"), self.compressionLevelBox)
self.containerBox = CustomComboBox()
self.containerBox.addItems(self.containerList)
self.containerBox.setCurrentIndex(0)
self.layout.addField(QLabel("Container"), self.containerBox)
def defineItems(self):
"""Defines the tool items."""
self.formatName = "FLAC | Free Lossless Audio Codec"
self.compressionLevels = "8 7 6 5 4 3 2 1 0".split(" ")
self.compressionLevelsText = []
for level in self.compressionLevels:
if level == "8":
self.compressionLevelsText.append(level+" (Best)")
elif level == "5":
self.compressionLevelsText.append(level+" (Default)")
elif level == "0":
self.compressionLevelsText.append(level+" (Fast)")
else:
self.compressionLevelsText.append(level)
self.containerList = [".flac", ".ogg"]
def defineTagsMapping(self):
"""Defines the mapping of the tags needed for the use in the encoder CLI"""
self.tagsMapping["<title>"] = "TITLE"
self.tagsMapping["<albumartist>"] = "ALBUMARTIST"
self.tagsMapping["<artist>"] = "ARTIST"
self.tagsMapping["<album>"] = "ALBUM"
self.tagsMapping["<tracknumber>"] = "TRACKNUMBER"
self.tagsMapping["<tracktotal>"] = "TOTALTRACKS"
self.tagsMapping["<discnumber>"] = "DISCNUMBER"
self.tagsMapping["<disctotal>"] = "TOTALDISCS"
self.tagsMapping["<genre>"] = "GENRE"
self.tagsMapping["<year>"] = "YEAR"
self.tagsMapping["<comment>"] = "COMMENT"
self.tagsMapping["<lyrics>"] = "LYRICS"
def prepareProcess(self, audioFile: FileAudio, outputPath: str) -> CustomProcess:
"""Returns the CustomProcess with commandline arguments defined"""
process = CustomProcess()
process.setProgram("resources\\tools\\flac")
process.extendArg(["--totally-silent", "-f", "--ignore-chunk-sizes"])
if self.containerBox.currentIndex() == 1:
process.appendArg("--ogg")
process.appendArg("-"+self.compressionLevels[self.compressionLevelBox.currentIndex()])
if audioFile.metadata["<coverfile>"] and audioFile.metadata["<covermime>"]:
process.appendArg("--picture=3|{0}|||{1}".format(audioFile.metadata["<covermime>"], audioFile.metadata["<coverfile>"]))
process.extendArg(self.getTagArgs(audioFile))
process.appendArg('--output-name={0}'.format(outputPath))
process.appendArg("-")
return process
def getTagArgs(self, audioFile: FileAudio) -> list:
"""Returns the tags values formatted for the use in the CLI. Recieves the audioFile
with its corresponding tags"""
args = []
for field, value in audioFile.metadata.items():
if field in self.tagsMapping and value is not None:
args.append('--tag={0}={1}'.format(self.tagsMapping[field], value))
return args
def getExtension(self) -> str:
"""Returns the extension selected in the GUI"""
if self.containerBox.currentIndex() == 1:
return ".ogg"
return ".flac"
| gregsanz182/PyRus | src/EncoderFLACTools.py | EncoderFLACTools.py | py | 3,935 | python | en | code | 0 | github-code | 36 |
23411369560 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('clientes', '0002_auto_20150927_2202'),
]
operations = [
migrations.CreateModel(
name='ReservaEstado',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('reserva_estado', models.CharField(max_length=1)),
('descripcion', models.CharField(max_length=50)),
],
),
migrations.RenameField(
model_name='reserva',
old_name='reserva_descripcion',
new_name='descripcion',
),
migrations.RenameField(
model_name='reserva',
old_name='reserva_fecha_hora',
new_name='fecha_hora',
),
migrations.RemoveField(
model_name='reserva',
name='reserva_estado',
),
migrations.AlterField(
model_name='cliente',
name='fecha_nacimiento',
field=models.DateField(default=datetime.datetime(2015, 9, 28, 5, 32, 10, 889000, tzinfo=utc)),
),
migrations.AddField(
model_name='reserva',
name='estado',
field=models.ForeignKey(default=b'V', to='clientes.ReservaEstado'),
),
]
| pmmrpy/SIGB | clientes/migrations_2/0003_auto_20150928_0132.py | 0003_auto_20150928_0132.py | py | 1,491 | python | en | code | 0 | github-code | 36 |
33146570859 | import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.cross_validation import StratifiedKFold
def training_and_testing(X_inputfile, Y_inputfile):
input_features = pd.read_csv(X_inputfile)
X_values = input_features.as_matrix()
target_values = pd.read_csv(Y_inputfile)
Y_values = target_values.as_matrix()
X_train,X_test, Y_train, Y_test = train_test_split(X_values,Y_values, train_size = 0.7)
random_forest_clf = RandomForestRegressor(n_estimators=110, max_features='auto', max_depth=300, oob_score=True, min_impurity_decrease=0.000)
model = random_forest_clf.fit(X_train,Y_train)
print(X_train.shape)
print(model.score(X_train, Y_train))
print(model.score(X_test,Y_test))
predictions = random_forest_clf.predict(X_test)
plt.scatter(list(predictions), list(Y_test))
plt.xlabel("gross")
plt.ylabel("error rate")
plt.legend(loc="upper right")
plt.show()
| alexwaweru/MovieForests | training_and_testing_gross/training_and_testing.py | training_and_testing.py | py | 1,124 | python | en | code | 0 | github-code | 36 |
32181630269 | import sys
import markdown
import json
import os
import re
from bs4 import BeautifulSoup
# This is a WIP unused script to
# write data back to the GSD database
advisories_dir = sys.argv[1]
gsd_dir = sys.argv[2]
CVE_REGEX = r"CVE-\d{4}-\d{4,7}"
FILE_FORMAT = "/Security-Updates-{version}.md"
ADVISORY_URL = "https://github.com/vmware/photon/wiki/Security-Update-{slug}"
PHOTON_VERSIONS = range(1, 5)
def advisory_slug(os_version, advisory):
_id = int(advisory.split("-")[2])
return f"{os_version}.0-{_id}"
def generate_cve_mapping():
mapping = {}
for version in PHOTON_VERSIONS:
filename = FILE_FORMAT.format(version=version)
file = advisories_dir + filename
with open(file, "r") as f:
table_html = markdown.markdown(
f.read(), extensions=["markdown.extensions.tables"]
)
soup = BeautifulSoup(table_html, "html.parser")
for tr in soup.find("tbody").find_all("tr"):
(advisory, severity, date, packages, cves) = [
x.text for x in tr.find_all("td")
]
cves = re.findall(CVE_REGEX, cves)
for cve in cves:
slug = advisory_slug(version, advisory)
if cve in mapping:
mapping[cve].append(slug)
else:
mapping[cve] = [slug]
return mapping
def __main__():
mapping = generate_cve_mapping()
for cve in mapping:
(_, year, _id) = cve.split("-")
grouping_id = _id[:-3] + "xxx"
gsd = f"GSD-{year}-{_id}"
path = f"{gsd_dir}/{year}/{grouping_id}/{gsd}.json"
if os.path.exists(path):
updated = False
data = None
with open(path, "r") as f:
data = json.loads(f.read())
slugs = mapping[cve]
urls = [ADVISORY_URL.format(slug=slug) for slug in slugs]
if 'gsd' in data:
existing_links = [x['url'] for x in data['gsd']['references']]
missing_links = existing_links - urls
if len(missing_links) > 0:
for url in urls:
data['gsd']['references'].append({
"type": "ADVISORY",
"url": url
})
elif 'GSD' in data and 'references' in data['GSD']:
data['GSD']['references'].extend(urls)
elif 'GSD' in data:
data['GSD']['references'] = urls
else:
try:
description = data['namespaces']['cve.org']['description']['description_data'][0]['value']
except KeyError:
description = data['namespaces']['nvd.nist.gov']['cve']['description']['description_data'][0]['value']
data['GSD'] = {
"alias": cve,
"description": description,
"id": gsd,
"references": urls
}
with open(path, 'w') as f:
f.write(json.dumps(data, indent=4))
else:
print(f"Could not find {cve}")
if __name__ == "__main__":
__main__()
| captn3m0/photon-os-advisories | update.py | update.py | py | 3,376 | python | en | code | 0 | github-code | 36 |
35612628358 | from flask import Flask, render_template, request
from flask_wtf import FlaskForm
from wtforms import StringField
from wtforms.validators import DataRequired
from datetime import datetime
from flask_moment import Moment
from flask_sqlalchemy import SQLAlchemy
from flask_security import Security, SQLAlchemyUserDatastore, UserMixin, RoleMixin, login_required
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_googlemaps import GoogleMaps, Map
app = Flask(__name__)
app.config['DEBUG'] = True
app.config['SECRET_KEY'] = '임희연'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///test.db'
app.config['SECURITY_PASSWORD_SALT'] = '임희연'
app.config['GOOGLEMAPS_KEY'] = "AIzaSyCqXCWpsYcokf52FhcNNWfZ8Ib5ScUJv9U"
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
moment = Moment(app)
db = SQLAlchemy(app)
map = GoogleMaps(app)
admin = Admin(app, name='HeeYeon')
roles_users = db.Table('roles_users', db.Column('user_id', db.Integer(), db.ForeignKey('user.id')),
db.Column('role_id', db.Integer(), db.ForeignKey('role.id')))
class SearchForm(FlaskForm):
search = StringField('검색', validators=[DataRequired()])
class MyForm(FlaskForm):
text = StringField('text', validators=[DataRequired()])
class Role(db.Model, RoleMixin):
id = db.Column(db.Integer(), primary_key=True)
name = db.Column(db.String(80), unique=True)
description = db.Column(db.String(255))
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(255), unique=True)
password = db.Column(db.String(255))
active = db.Column(db.Boolean())
confirmed_at = db.Column(db.DateTime())
roles = db.relationship('Role', secondary=roles_users, backref=db.backref('users', lazy='dynamic'))
user_datastore = SQLAlchemyUserDatastore(db, User, Role)
security = Security(app, user_datastore)
admin.add_view(ModelView(User, db.session))
def create_user():
db.create_all()
user_datastore.create_user(email='heeyeon@gmail.net', password='password')
db.session.commit()
# Views
@app.route('/login')
@login_required
def home():
return render_template('index.html')
@app.route('/')
def index():
dt = datetime.utcnow()
return render_template('index.html', dt=dt)
@app.route('/graph')
def graph():
from data import plotdata
script, div = plotdata()
return render_template('graph.html', script=script, div=div)
@app.route('/0703', methods=['GET', 'POST'])
def hello_0703():
dt = datetime(2018, 7, 3)
return render_template('0703.html', dt=dt)
@app.route('/0705', methods=['GET', 'POST'])
def hello_0705():
dt = datetime(2018, 7, 5)
return render_template('0705.html', dt=dt)
@app.route('/0709', methods=['GET', 'POST'])
def hello_0709():
dt = datetime(2018, 7, 9)
return render_template('0709.html', dt=dt)
@app.route('/0717', methods=['GET', 'POST'])
def hello_0717():
dt = datetime(2018, 7, 17)
return render_template('0717.html', dt=dt)
@app.route('/0718', methods=['GET', 'POST'])
def hello_0718():
dt = datetime(2018, 7, 18)
return render_template('0718.html', dt=dt)
@app.route('/search', methods=['GET', 'POST'])
def search():
form = MyForm()
if request.method == 'GET': # GET 으로 하면 비밀번호가 다 보인다 그러므로 POST 로 해야한다.
if form.validate_on_submit():
return render_template('index.html')
# print(a)
# b = request.args['a']
# print(b)
# c = request.args.a
# print(c)
return render_template('search.html', form2=form)
else:
return render_template('search.html', form2=form) # template 에서 사용하는 것 = 파이썬에서 쓰는 이름 (똑같이 쓰는 것을 추천)
@app.route('/form', methods=['GET', 'POST'])
def pandas_index():
name = request.args.get('name')
from pandas_ import pandas_index
data = pandas_index()
data2 = data[[name]]
data2.to_html('/Users/limheeyeon/PycharmProjects/0727/templates/hhhh.html')
return render_template('hhhh.html')
@app.route('/booking')
def booking():
return render_template('booking.html')
@app.route('/map')
def googlemap():
sndmap = Map(
identifier="sun",
lat=37.5665,
lng=126.9780,
zoom=7,
style=(
"height:100%;"
"width:100%;"
"top:64;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
language='ko',
markers=[
{
'icon': 'http://maps.google.com/mapfiles/ms/icons/red-dot.png',
'lat': 37.751855,
'lng': 128.876057,
'infobox': "<h1>강릉</h1>"
},
{
'icon': 'http://maps.google.com/mapfiles/ms/icons/blue-dot.png',
'lat': 35.3744136,
'lng': 127.13759,
'infobox': "<h1>순창</h1>"
}
]
)
return render_template('map.html', sndmap=sndmap)
@app.route('/crime')
def crime_map():
from crime import pandas_index2
marker = pandas_index2()
sndmap = Map(
identifier="sun",
lat=39.72606,
lng=-104.949973,
style=(
"height:100%;"
"width:100%;"
"top:64;"
"left:0;"
"position:absolute;"
"z-index:200;"
),
language='ko',
markers=marker
)
return render_template('map1.html', sndmap=sndmap)
if __name__ == '__main__':
app.run()
| gmldusdkwk/Big-Data | 0727/app.py | app.py | py | 5,629 | python | en | code | 0 | github-code | 36 |
36840712959 | """Unit tests for the resmokelib.testing.executor module."""
import logging
import threading
import unittest
import mock
from opentelemetry.context.context import Context
from buildscripts.resmokelib import errors
from buildscripts.resmokelib.testing import job
from buildscripts.resmokelib.testing import queue_element
from buildscripts.resmokelib.testing.fixtures import interface as _fixtures
from buildscripts.resmokelib.testing.fixtures.fixturelib import FixtureLib
from buildscripts.resmokelib.utils import queue as _queue
# pylint: disable=protected-access
class TestJob(unittest.TestCase):
TESTS = ["jstests/core/and.js", "jstests/core/or.js"]
@staticmethod
def mock_testcase(test_name):
testcase = mock.Mock()
testcase.test_name = test_name
testcase.REGISTERED_NAME = "js_test"
testcase.logger = logging.getLogger("job_unittest")
return testcase
@staticmethod
def mock_interrupt_flag():
interrupt_flag = mock.Mock()
interrupt_flag.is_set = lambda: False
return interrupt_flag
@staticmethod
def get_suite_options(num_repeat_tests=None, time_repeat_tests_secs=None,
num_repeat_tests_min=None, num_repeat_tests_max=None):
suite_options = mock.Mock()
suite_options.num_repeat_tests = num_repeat_tests
suite_options.time_repeat_tests_secs = time_repeat_tests_secs
suite_options.num_repeat_tests_min = num_repeat_tests_min
suite_options.num_repeat_tests_max = num_repeat_tests_max
return suite_options
@staticmethod
def queue_tests(tests, queue, queue_elem_type, suite_options):
for test in tests:
queue_elem = queue_elem_type(TestJob.mock_testcase(test), {}, suite_options)
queue.put(queue_elem)
@staticmethod
def expected_run_num(time_repeat_tests_secs, test_time_secs):
"""Return the number of times a test is expected to run."""
return time_repeat_tests_secs / test_time_secs
def test__run_num_repeat(self):
num_repeat_tests = 1
queue = _queue.Queue()
suite_options = self.get_suite_options(num_repeat_tests=num_repeat_tests)
mock_time = MockTime(1)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElem, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests)
def test__run_time_repeat_time_no_min_max(self):
increment = 1
time_repeat_tests_secs = 10
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, expected_tests_run * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time_no_min(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_max = 100
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertLess(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time_no_max(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_min = 1
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertGreater(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_time(self):
increment = 1
time_repeat_tests_secs = 10
num_repeat_tests_min = 1
num_repeat_tests_max = 100
expected_tests_run = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertGreater(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
self.assertLess(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], expected_tests_run)
def test__run_time_repeat_min(self):
increment = 1
time_repeat_tests_secs = 2
num_repeat_tests_min = 3
num_repeat_tests_max = 100
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests_min * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests_min)
def test__run_time_repeat_max(self):
increment = 1
time_repeat_tests_secs = 30
num_repeat_tests_min = 1
num_repeat_tests_max = 10
expected_time_repeat_tests = self.expected_run_num(time_repeat_tests_secs, increment)
queue = _queue.Queue()
suite_options = self.get_suite_options(time_repeat_tests_secs=time_repeat_tests_secs,
num_repeat_tests_min=num_repeat_tests_min,
num_repeat_tests_max=num_repeat_tests_max)
mock_time = MockTime(increment)
job_object = UnitJob(suite_options)
self.queue_tests(self.TESTS, queue, queue_element.QueueElemRepeatTime, suite_options)
job_object._get_time = mock_time.time
job_object._run(queue, self.mock_interrupt_flag())
self.assertEqual(job_object.total_test_num, num_repeat_tests_max * len(self.TESTS))
for test in self.TESTS:
self.assertEqual(job_object.tests[test], num_repeat_tests_max)
self.assertLess(job_object.tests[test], expected_time_repeat_tests)
class MockTime(object):
"""Class to mock time.time."""
def __init__(self, increment):
"""Initialize with an increment which simulates a time increment."""
self._time = 0
self._increment = increment
def time(self):
"""Simulate time.time by incrementing for every invocation."""
cur_time = self._time
self._time += self._increment
return cur_time
class UnitJob(job.Job):
def __init__(self, suite_options):
super(UnitJob, self).__init__(0, logging.getLogger("job_unittest"), None, [], None, None,
suite_options, logging.getLogger("job_unittest"))
self.total_test_num = 0
self.tests = {}
def _execute_test(self, test, hook_failure_flag=None):
self.total_test_num += 1
if test.test_name not in self.tests:
self.tests[test.test_name] = 0
self.tests[test.test_name] += 1
class TestFixtureSetupAndTeardown(unittest.TestCase):
"""Test cases for error handling around setup_fixture() and teardown_fixture()."""
def setUp(self):
logger = logging.getLogger("job_unittest")
self.__job_object = job.Job(job_num=0, logger=logger, fixture=None, hooks=[], report=None,
archival=None, suite_options=None, test_queue_logger=logger)
self.__context = Context(trace_id=0, span_id=0, is_remote=False)
# Initialize the Job instance such that its setup_fixture() and teardown_fixture() methods
# always indicate success. The settings for these mocked method will be changed in the
# individual test cases below.
self.__job_object.manager.setup_fixture = mock.Mock(return_value=True)
self.__job_object.manager.teardown_fixture = mock.Mock(return_value=True)
def __assert_when_run_tests(self, setup_succeeded=True, teardown_succeeded=True):
queue = _queue.Queue()
interrupt_flag = threading.Event()
setup_flag = threading.Event()
teardown_flag = threading.Event()
self.__job_object(queue, interrupt_flag, self.__context, setup_flag, teardown_flag)
self.assertEqual(setup_succeeded, not interrupt_flag.is_set())
self.assertEqual(setup_succeeded, not setup_flag.is_set())
self.assertEqual(teardown_succeeded, not teardown_flag.is_set())
# teardown_fixture() should be called even if setup_fixture() raises an exception.
self.__job_object.manager.setup_fixture.assert_called()
self.__job_object.manager.teardown_fixture.assert_called()
def test_setup_and_teardown_both_succeed(self):
self.__assert_when_run_tests()
def test_setup_returns_failure(self):
self.__job_object.manager.setup_fixture.return_value = False
self.__assert_when_run_tests(setup_succeeded=False)
def test_setup_raises_logging_config_exception(self):
self.__job_object.manager.setup_fixture.side_effect = errors.LoggerRuntimeConfigError(
"Logging configuration error intentionally raised in unit test")
self.__assert_when_run_tests(setup_succeeded=False)
def test_setup_raises_unexpected_exception(self):
self.__job_object.manager.setup_fixture.side_effect = Exception(
"Generic error intentionally raised in unit test")
self.__assert_when_run_tests(setup_succeeded=False)
def test_teardown_returns_failure(self):
self.__job_object.manager.teardown_fixture.return_value = False
self.__assert_when_run_tests(teardown_succeeded=False)
def test_teardown_raises_logging_config_exception(self):
self.__job_object.manager.teardown_fixture.side_effect = errors.LoggerRuntimeConfigError(
"Logging configuration error intentionally raised in unit test")
self.__assert_when_run_tests(teardown_succeeded=False)
def test_teardown_raises_unexpected_exception(self):
self.__job_object.manager.teardown_fixture.side_effect = Exception(
"Generic error intentionally raised in unit test")
self.__assert_when_run_tests(teardown_succeeded=False)
class TestNoOpFixtureSetupAndTeardown(unittest.TestCase):
"""Test cases for NoOpFixture handling in setup_fixture() and teardown_fixture()."""
def setUp(self):
self.logger = logging.getLogger("job_unittest")
fixturelib = FixtureLib()
self.__noop_fixture = _fixtures.NoOpFixture(logger=self.logger, job_num=0,
fixturelib=fixturelib)
self.__noop_fixture.setup = mock.Mock()
self.__noop_fixture.teardown = mock.Mock()
test_report = mock.Mock()
test_report.find_test_info().status = "pass"
self.__job_object = job.Job(job_num=0, logger=self.logger, fixture=self.__noop_fixture,
hooks=[], report=test_report, archival=None, suite_options=None,
test_queue_logger=self.logger)
def test_setup_called_for_noop_fixture(self):
self.assertTrue(self.__job_object.manager.setup_fixture(self.logger))
self.__noop_fixture.setup.assert_called_once_with()
def test_teardown_called_for_noop_fixture(self):
self.assertTrue(self.__job_object.manager.teardown_fixture(self.logger))
self.__noop_fixture.teardown.assert_called_once_with(finished=True)
| mongodb/mongo | buildscripts/tests/resmokelib/testing/test_job.py | test_job.py | py | 13,936 | python | en | code | 24,670 | github-code | 36 |
29620255242 | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
import time
import codecs
import json
import os
class AiqichademoPipeline:
def __init__(self):
super().__init__() # 执行父类的构造方法
today = time.strftime('%Y-%m-%d', time.localtime())
self.fp = codecs.open('scraped_data'+today +
'.json', 'w', encoding='utf-8')
self.fp.write('[')
def process_item(self, item, spider):
# 将item转为字典
d = dict(item)
# 将字典转为json格式
string = json.dumps(d, ensure_ascii=False)
self.fp.write(string + ',\n') # 每行数据之后加入逗号和换行
return item
def close_spider(self, spider):
self.fp.seek(-2, os.SEEK_END) # 定位到倒数第二个字符,即最后一个逗号
self.fp.truncate() # 删除最后一个逗号
self.fp.write(']') # 文件末尾加入一个‘]’
self.fp.close() # 关闭文件
| hua345/myBlog | python/scrapy/aiqichaDemo/aiqichaDemo/pipelines.py | pipelines.py | py | 1,210 | python | en | code | 0 | github-code | 36 |
35883196632 | from src.domain.complex import Complex
import copy
class ComplexServices:
def __init__(self):
self._stack = [[]]
self._complex_numbers = []
ComplexServices.start_up(self)
def add_number(self, a, b):
"""
:param a: the real part of the number we add
:param b: the imaginary part of the number we add
:return : list of complex numbers
"""
self._complex_numbers.append(Complex(a, b))
self.save_stack()
return self._complex_numbers
def start_up(self):
"""
Generate 10 entries at start-up
"""
self.add_number(2, -1)
self.add_number(-3, 5)
self.add_number(4, 2)
self.add_number(-1, 1)
self.add_number(6, -3)
self.add_number(-2, 0)
self.add_number(0, 1)
self.add_number(3, 3)
self.add_number(-1, 0)
self.add_number(9, -2)
def generate_list(self): # generates the list of complex numbers
return self._complex_numbers
def filter_list(self, start, end):
"""
:param start: first position for filter functionality
:param end: last position for filter functionality
Creates a new list containing the filtered values
"""
new_list = []
if start > end:
raise IndexError("Start position must be before end position")
index = 1
for c in self._complex_numbers:
if start <= index <= end:
new_list.append(c)
index += 1
self._complex_numbers = new_list
self.save_stack()
def undo_last_command(self):
if len(self._stack) != 1:
self._complex_numbers[:] = copy.deepcopy(self._stack[-2])
self._stack.pop()
def save_stack(self):
"""
Saves the list of complex numbers in stack every time before certain commands, so that we can undo that command
"""
self._stack.append(self._complex_numbers)
self._stack[-1] = copy.deepcopy(self._complex_numbers)
def test_add_number():
obj = ComplexServices()
test_list = obj.add_number(2, -2)
assert test_list[-1].get_a == 2
assert test_list[-1].get_b == -2
test_add_number()
| Cibu-Clara/University-Projects | Semester1/FP/A5/services/services.py | services.py | py | 2,253 | python | en | code | 2 | github-code | 36 |
29965495383 | #!/usr/bin/env python
# import sys
import logging
import struct
import serial
from . import errors
from . import fio
from . import ops
# import settings this as settings_module to avoid name conflicts
from . import settings as settings_module
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
logger.setLevel(logging.DEBUG)
class Interface(object):
# undivided clock rate, in Hz, from testing with OBLS
protocol_version = '1.0'
def __init__(
self, path='/dev/ttyACM0', baud=115200,
timeout=None, settings=None, **kwargs):
self.timeout = timeout
if settings is None:
self.settings = settings_module.Settings(**kwargs)
else:
self.settings = settings
if len(kwargs):
for kw in kwargs:
setattr(self.settings, kw, kwargs[kw])
self.port = serial.Serial(path, baud, timeout=self.timeout)
self.debug_logger = None
self.reset()
self.metadata = self.query_metadata()
self.send_settings()
def reset(self):
logger.debug("reset")
self.port.write('\x00\x00\x00\x00\x00')
def capture(self, send_settings=True):
'''Request a capture.'''
logger.debug("capture")
if send_settings:
self.send_settings()
# get local references to objects for faster execution ..
logger.debug("building unpack functions")
ufs = []
for i in xrange(4):
if not (self.settings.channel_groups & (0b1 << i)):
ufs.append(lambda c, si=i: ord(c) << (8 * si))
d = []
self.port.timeout = self.settings.timeout
logger.debug("starting capture")
self.port.write('\x01') # start the capture
logger.debug("reading capture")
for i in xrange(self.settings.read_count):
v = 0
for uf in ufs:
v |= uf(self.port.read(1))
d.append(v)
self.reset() # TODO is this needed?
if self.settings.latest_first:
return d[::-1]
else:
return d
def save(self, capture, filename, meta=None):
logger.debug("save %s", filename)
fio.save(capture, filename, self.settings, meta)
def id_string(self):
'''Return device's SUMP ID string.'''
logger.debug("id_string")
self.port.write('\x02')
# TODO check protocol version here
val = self.port.read(4) # 4 bytes as a small-endian int
return val[::-1]
def xon(self):
logger.debug("xon")
self.port.write('\x11')
def xoff(self):
logger.debug("xoff")
self.port.write('\x13')
def _send_trigger_mask(self, stage, mask):
logger.debug("send_trigger_mask %s %s", stage, mask)
#w = self.port.write
#w = self._trace_control('Trigger mask')
msg = struct.pack('<Bi', 0xC0 | (stage << 2), mask)
self.port.write(msg)
#w(chr(0xC0 | (stage << 2)))
#w(chr(mask & 0xFF))
#w(chr((mask >> 8) & 0xFF))
#w(chr((mask >> 16) & 0xFF))
#w(chr((mask >> 24) & 0xFF))
def _send_trigger_value(self, stage, value):
logger.debug("send_trigger_value %s %s", stage, value)
#w = self.port.write
#w = self._trace_control('Trigger values')
msg = struct.pack('<Bi', 0xC1 | (stage << 2), value)
self.port.write(msg)
#w(chr(0xC1 | (stage << 2)))
#w(chr(values & 0xFF))
#w(chr((values >> 8) & 0xFF))
#w(chr((values >> 16) & 0xFF))
#w(chr((values >> 24) & 0xFF))
def _send_trigger_configuration(
self, stage, delay, channel, level, start, serial):
logger.debug(
"send_trigger_configuration %s %s %s %s %s %s",
stage, delay, channel, level, start, serial)
msg = struct.pack(
'<BHBB',
0xC2 | (stage << 2),
delay,
((channel & 0x0F) << 4) | level,
(start << 3) | (serial << 2) | ((channel & 0x10) >> 4))
self.port.write(msg)
#w = self.port.write
#w = self._trace_control('Trigger config')
#w(chr(0xC2 | (stage << 2)))
#d = delay
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#c = channel
#w(chr(((c & 0x0F) << 4) | level))
#w(chr((start << 3) | (serial << 2) | ((c & 0x10) >> 4)))
def send_divider_settings(self, settings):
logger.debug("send_divider_settings %s", settings.divider)
d = settings.divider - 1 # offset 1 correction for SUMP hardware
msg = struct.pack('<cHBx', '\x80', d & 0xFFFF, d >> 16)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Divider')
#w('\x80')
#d = settings.divider - 1 # offset 1 correction for SUMP hardware
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
#w(chr((d >> 16) & 0xFF))
#w('\x00')
def send_read_and_delay_count_settings(self, settings):
logger.debug("send_read_and_delay_count_settings")
#r = (settings.read_count + 3) >> 2
r = (settings.read_count // 4)
settings.read_count = r * 4
#d = (settings.delay_count + 3) >> 2
d = (settings.delay_count // 4)
settings.delay_count = d * 4
msg = struct.pack('<cHH', '\x81', r, d)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Read/Delay')
#w('\x81')
## factor 4 correction for SUMP hardware
#r = (settings.read_count + 3) >> 2
#w(chr(r & 0xFF))
#w(chr((r >> 8) & 0xFF))
## factor 4 correction for SUMP hardware
#d = (settings.delay_count + 3) >> 2
#w(chr(d & 0xFF))
#w(chr((d >> 8) & 0xFF))
def send_flags_settings(self, settings):
logger.debug("send_flag_settings")
msg = struct.pack(
'<cBxxx', '\x82',
(settings.inverted << 7) | (settings.external << 6) |
(settings.channel_groups << 2) | (settings.filter << 1) |
settings.demux)
self.port.write(msg)
#w = self.port.write
##w = self._trace_control('Flags')
#w('\x82')
#w(chr((settings.inverted << 7)
# | (settings.external << 6)
# | (settings.channel_groups << 2)
# | (settings.filter << 1)
# | settings.demux
# ))
## disable RLE compression, alternate number scheme, test modes
#w('\x00')
#w('\x00')
#w('\x00')
def send_settings(self):
"""
The order of things in this function are CRITICAL
"""
logger.debug("send_settings")
self.send_divider_settings(self.settings)
trigger_enable = self.settings.trigger_enable
if trigger_enable == 'None':
# send always-trigger trigger settings
for stage in xrange(self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, True, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Simple':
# set settings from stage 0, no-op for stages 1..3
self._send_trigger_configuration(
0, self.settings.trigger_stages[0].delay,
self.settings.trigger_stages[0].channel,
0, True, self.settings.trigger_stages[0].serial)
self._send_trigger_mask(0, self.settings.trigger_stages[0].mask)
self._send_trigger_value(0, self.settings.trigger_stages[0].value)
for stage in xrange(1, self.self.settings.trigger_max_stages):
self._send_trigger_configuration(stage, 0, 0, 0, False, False)
self._send_trigger_mask(stage, 0)
self._send_trigger_value(stage, 0)
elif trigger_enable == 'Complex':
for (i, stage) in enumerate(self.settings.trigger_stages):
# OLS needs things in this order
self._send_trigger_mask(i, stage.mask)
self._send_trigger_value(i, stage.value)
self._send_trigger_configuration(
i, stage.delay, stage.channel, stage.level, stage.start,
stage.serial)
else:
raise errors.TriggerEnableError
self.send_read_and_delay_count_settings(self.settings)
self.send_flags_settings(self.settings)
def query_metadata(self):
'''Return metadata identifying the SUMP device,
firmware, version, etc.'''
logger.debug("query_metadata")
result = []
self.reset()
r = self.port.read
timeout = self.port.timeout # save timeout setting to restore later
try:
# only wait 2 seconds for devices that don't do metadata
self.port.timeout = 2
self.port.write('\x04')
while True:
token = r(1)
if not token: # end-of-file
break
token = ord(token)
if not token: # binary 0 end-of-metadata marker
break
elif token <= 0x1F: # C-string follows token
v = []
while True:
x = r(1)
if x != '\0':
v .append(x)
else:
break
result.append((token, ''.join(v)))
elif token <= 0x3F: # 32-bit int follows token
result.append((token, ops.big_endian(r(4))))
elif token <= 0x5F: # 8-bit int follows token
result.append((token, ord(r(1))))
else:
result.append((token, None))
finally:
self.port.timeout = timeout # restore timeout setting
return result
def close(self):
logger.debug("close")
self.port.close()
self.port = None
def open_interface(port='/dev/ttyACM0', baud=115200, **kwargs):
i = Interface(port, baud, **kwargs)
return i
| braingram/pysump | sump/interface.py | interface.py | py | 10,286 | python | en | code | 1 | github-code | 36 |
10507224268 | #! /mnt/NewDiskSim/stefano/stefano/CondaInstallation/envs/Experiments/bin/python
import os
import subprocess
import sys
import speech_recognition as sr
import tensorflow as tf
from spellchecker import SpellChecker
import pyautogui
# from utilities_sm import *
# Definire la lista di comandi vocali predefiniti
def open_terminal():
subprocess.run(["gnome-terminal"])
def play_music():
subprocess.run(["rhythmbox", "music.mp3"])
def open_website(url):
subprocess.run(["firefox", url])
# Definire un dizionario di comandi vocali predefiniti
commands = [ ("Apri terminale", open_terminal),
("riproduci musica", play_music),
("apri sito web", open_website)]
def correct_text(text):
spell = SpellChecker()
corrected_text = spell.correction(text)
return corrected_text
def use_google_speech_recognition(audio,r):
recognized_text = ""
# Gestisci l'eccezione per quando non viene riconosciuto alcun testo
try:
recognized_text = r.recognize_google(audio,language="it-IT", show_all=False)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
return
print("Google Speech Recognition thinks you said: " + recognized_text)
corrected_text = correct_text(recognized_text)
# Check if the corrected_text is a NoneType
if corrected_text is None:
print("Google Speech Recognition could not understand audio")
else:
print("Google Speech Recognition thinks you said: " + corrected_text)
# Save the corrected text as audio file
with open("corrected_text.wav", "wb") as f:
f.write(audio.get_wav_data())
if corrected_text == "aggiungi comando rapido":
new_command = input("What is the new quick command you want to add? ")
new_function = input("What function should be executed when the command is triggered? ")
commands.append((new_command, eval(new_function)))
with open("quick_commands.txt", "a") as f:
f.write(f"{new_command},{new_function}\n")
else:
# Stampa il testo dove c'è il cursore
pyautogui.typewrite(recognized_text)
for command, function in commands:
if recognized_text == command:
print(command, function)
function()
def use_deep_speech(audio, model):
audio_data = audio.get_wav_data()
input_data = tf.constant(audio_data, dtype=tf.float32)
input_data = tf.reshape(input_data, [1, -1, 1])
prediction = model.predict(input_data)
recognized_text = prediction.numpy()[0]
print("DeepSpeech thinks you said: " + recognized_text)
if recognized_text == "aggiungi comando rapido":
new_command = input("What is the new quick command you want to add? ")
new_function = input("What function should be executed when the command is triggered? ")
commands.append((new_command, eval(new_function)))
with open("quick_commands.txt", "a") as f:
f.write(f"{new_command},{new_function}\n")
else:
# Stampa il testo dove c'è il cursore
pyautogui.typewrite(recognized_text)
for command, function in commands:
if recognized_text == command:
function()
def inizialized():
print("Start Inizialization")
choice = input("Do you want to use Google Speech Recognition or DeepSpeech? (g/d): ")
if choice == "g":
# Inizializza il recognizer e il correttore ortografico
r = sr.Recognizer()
# Imposta la soglia di energia
r.energy_threshold = 4000
elif choice == "d":
# Carica il modello DeepSpeech
model = tf.saved_model.load("deepspeech-0.6.1-models/deepspeech-0.6.1-models")
# use_deep_speech(audio, model=model)
else:
print("Invalid choice")
return r
# Carica i comandi rapidi da un file
try:
with open("quick_commands.txt", "r") as f:
for line in f:
command, function = line.strip().split(",")
commands.append((command, eval(function)))
except FileNotFoundError:
# Create an empty file in the current directory if it doesn't exist
with open("quick_commands.txt", "w") as f:
pass
pass
r = inizialized()
while True:
# Acquisisci l'audio dal microfono
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
use_google_speech_recognition(audio,r=r)
| StefanoMuscat/SpeechRec | Main02.py | Main02.py | py | 4,590 | python | en | code | 0 | github-code | 36 |
27102778456 | from django.shortcuts import render
import json
from django.core import serializers
from django.http import (
HttpResponse,
HttpResponseRedirect,
JsonResponse,
)
from django.template import loader
from django.core.urlresolvers import reverse_lazy
from django.contrib.auth.decorators import login_required
from django.views.generic import (
ListView,
View
)
from django.views.generic.detail import DetailView
from django.views.generic.edit import (
CreateView,
UpdateView,
DeleteView
)
from django.http import JsonResponse
from django.views.generic import TemplateView
from django.contrib.messages.views import SuccessMessageMixin
from .models import MateriaPrima
from proveedores.mixins import JSONResponseMixin
from .forms import MateriaPrimaForm
from loginusers.mixins import LoginRequiredMixin
class ListarMateriaPrima(LoginRequiredMixin, JSONResponseMixin, ListView):
model = MateriaPrima
template_name = 'materiaprima_list.html'
paginate_by = 5
def get(self, request, *args, **kwargs):
self.object_list = self.get_queryset()
return self.render_to_json_response()
def get_data(self):
data = [{
'id': materiaprima.id,
'value': materiaprima.nombre,
} for materiaprima in self.object_list]
return data
def get_queryset(self):
nom = self.request.GET.get('term', None)
if nom:
queryset = self.model.objects.filter(nombre__icontains=nom)
else:
queryset = super(ListarMateriaPrima, self).get_queryset()
return queryset
class CrearMateriaPrima(LoginRequiredMixin, SuccessMessageMixin, CreateView):
model = MateriaPrima
success_url = reverse_lazy('materiaprim:materiaPrimaForm')
form_class = MateriaPrimaForm
success_message = 'La materia prima %(nombre)s se registro en el sistema'
class ModificarMateriaPrima(LoginRequiredMixin, SuccessMessageMixin, UpdateView):
model = MateriaPrima
slug_field = 'id'
slug_url_kwarg = 'id'
form_class = MateriaPrimaForm
success_url = reverse_lazy('materiaprim:materiaPrimaForm')
success_message = 'Los datos de la materia prima %(nombre)s se actualizaron'
class ActualizarEstadoView(JSONResponseMixin, View):
object = None
relacion = None
def post(self, request):
id = self.request.POST.get('id', None)
materia = None
try:
materia = MateriaPrima.objects.get(id=id)
except MateriaPrima.DoesNotExist as e:
self.object = materia
if materia is not None:
materia.estado = False
materia.save()
self.object = materia
return self.render_to_json_response()
def get_data(self):
if self.object is not None:
data = {
'message': 'Se inhabilito la materia rima',
}
else:
data = {
'message': 'Esta materia prima se encuentra asociada'
}
return data
class ConsultarMateriaPrima(LoginRequiredMixin, JSONResponseMixin, DetailView):
model = MateriaPrima
slug_field = 'id'
slug_url_kwarg = 'id'
def get(self, request, *args, **kwargs):
self.object = self.get_object()
return self.render_to_json_response()
def get_data(self):
if self.object is not None:
data = {
'status': 200,
'materia':{
'id': self.object.id,
'nombre': self.object.nombre,
'descripcion': self.object.descripcion,
'unidad_medida': self.object.unidad_medida.nombre,
'categoria': self.object.categoria.nombre,
'cantidad': self.object.cantidad,
'estado': self.object.estado
}
}
else:
data = {
'status': 404,
'message': 'La materia prima no se encuentra registrada'
}
return data
def get_object(self, queryset=None):
if queryset is None:
queryset = self.get_queryset()
pk = self.kwargs.get(self.pk_url_kwarg)
slug = self.kwargs.get(self.slug_url_kwarg)
if pk is not None:
queryset = queryset.filter(pk=pk)
if slug is not None and (pk is None or self.query_pk_and_slug):
slug_field = self.get_slug_field()
queryset = queryset.filter(**{slug_field: slug})
if pk is None and slug is None:
raise AttributeError("Generic detail view %s must be called with "
"either an object pk or a slug."
% self.__class__.__name__)
try:
obj = queryset.get()
except queryset.model.DoesNotExist:
obj = None
return obj
class MateriaPrimaView(LoginRequiredMixin, TemplateView):
template_name = 'materiaprima/materiaprima_form.html'
def get_context_data(self, **kwargs):
context = super(MateriaPrimaView, self).get_context_data(**kwargs)
context.update({'form': MateriaPrimaForm()})
return context
| IvanVilla1585/RefrescosChupiFlum | ChupiFlum/materiaprima/views.py | views.py | py | 5,202 | python | en | code | 1 | github-code | 36 |
7027004421 | from cgitb import small
from string import ascii_lowercase
def createDict(brailleLetters, brailleKey):
unicode = [ord(elem) for elem in [char for char in ascii_lowercase]]
for key in unicode:
for value in brailleLetters:
brailleKey[key] = value
brailleLetters.remove(value)
break
def split(word):
return [char for char in word]
def printLetters(dict, word):
string = ""
word = split(word)
for letter in word:
letterUnicode = ord(letter)
if letterUnicode in dict:
string += dict[letterUnicode]
elif ord(letter) < 97 and ord(letter) != 32:
string += capitalCalc(dict, letterUnicode)
else:
string += "000000"
return string
def capitalCalc(dict, unicode):
letter = ""
capital = "000001"
neededLetter = unicode + 32
letter = capital + dict[neededLetter]
return letter
def solution(word):
brailleLetters = ["100000", "110000", "100100", "100110", "100010", "110100", "110110", "110010", "010100", "010110",
"101000", "111000", "101100", "101110", "101010", "111100", "111110", "111010", "011100", "011110",
"101001", "111001", "010111", "101101", "101111", "101011"]
brailleKey = {}
createDict(brailleLetters= brailleLetters, brailleKey= brailleKey)
result = printLetters(brailleKey, word)
return result
solution("code") | AllenNotAlan/googleFoobar | brailleSolution.py | brailleSolution.py | py | 1,412 | python | en | code | 0 | github-code | 36 |
41649203624 | from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, Http404
from django.urls import reverse
from app.models import Product, Cart, CartItem, Category
def add_product(request):
if request.method == 'GET':
return render(request, 'add_product.html', {})
if request.method == 'POST':
name = request.POST.get('name')
quantity = request.POST.get('quantity')
try:
Product.objects.create(name=name, quantity=quantity)
return render(request, 'add_product.html', {'error': 'product created successfully'})
except ValueError:
return render(request, 'add_product.html', {'error': 'complete all fields'})
def product_list(request):
if request.method == 'GET':
q = request.GET.get('q')
# p_list = []
# for p in Product.objects.all():
# p_list.append(
# {
# 'name': p.name,
# 'quantity': p.quantity
# }
# )
p_list = Product.objects.all()
if q:
p_list = p_list.filter(name__icontains=q)
cat_list = Category.objects.filter(child__isnull=True)
return render(request, 'product_list.html', {'p_list': p_list, 'cat_list': cat_list})
def product_info(request, pk):
product = get_object_or_404(Product, pk=pk)
return render(request, 'product_info.html', {'product': product})
@login_required
def add_to_cart(request):
if request.method == 'POST':
quantity = request.POST.get('quantity') or '0'
pk = request.POST.get('pk')
user = request.user
cart, created = Cart.objects.get_or_create(user=user, status='open')
product = get_object_or_404(Product, pk=pk)
cart_item, created2 = CartItem.objects.get_or_create(products=product, cart=cart)
cart_item.quantity += int(quantity)
cart_item.save()
return HttpResponseRedirect(reverse('cart'))
def cart(request):
user = request.user
cart_obj = user.carts.filter(status='open')
if cart_obj:
return render(request, 'cart.html', {'cart_items': cart_obj[0].items.all()})
else:
return render(request, 'cart.html', {'error': 'You have any cart.'})
def remove_from_cart(request):
if request.method == 'POST':
pk = request.POST.get('pk')
cart_item = get_object_or_404(CartItem, pk=pk)
cart_item.delete()
return HttpResponseRedirect(reverse('cart'))
def category_view(request, name):
if request.method == 'GET':
category = get_object_or_404(Category, name=name)
if category.child.count() >= 1:
raise Http404()
return render(request, 'category.html', {'p_list': category.product_set.all()})
| arash-ataei-solut/shop-practice | app/views.py | views.py | py | 2,928 | python | en | code | 0 | github-code | 36 |
17106762661 | import os
import copy
from typing import Set
from collections import defaultdict
inputPath = os.path.join(os.path.dirname(__file__), "input")
with open(inputPath, "r") as inputFile:
lines = [line.strip() for line in inputFile.readlines() if line.strip()]
class Pos:
def __init__(self, x: int, y: int, z: int) -> None:
self.x = x
self.y = y
self.z = z
def neighbours(self, reach=1, includeSelf=False):
if includeSelf:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
yield Pos(self.x + xi, self.y + yi, self.z + zi)
else:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
if not (zi == 0 and yi == 0 and xi == 0):
yield Pos(self.x + xi, self.y + yi, self.z + zi)
def __hash__(self) -> int:
return hash(repr(self))
def __eq__(self, o) -> bool:
return self.x == o.x and self.y == o.y and self.z == o.z
def __str__(self) -> str:
return "(%s,%s,%s)" % (self.x, self.y, self.z)
__repr__ = __str__
class HyperPos(Pos):
def __init__(self, x: int, y: int, z: int, w: int) -> None:
super(HyperPos, self).__init__(x, y, z)
self.w = w
def neighbours(self, reach=1, includeSelf=False):
if includeSelf:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
for wi in range(-reach, reach + 1, 1):
yield HyperPos(self.x + xi, self.y + yi, self.z + zi, self.w + wi)
else:
for zi in range(-reach, reach + 1, 1):
for yi in range(-reach, reach + 1, 1):
for xi in range(-reach, reach + 1, 1):
for wi in range(-reach, reach + 1, 1):
if not (zi == 0 and yi == 0 and xi == 0 and wi == 0):
yield HyperPos(self.x + xi, self.y + yi, self.z + zi, self.w + wi)
def __hash__(self) -> int:
return hash(repr(self))
def __eq__(self, o) -> bool:
return self.x == o.x and self.y == o.y and self.z == o.z and self.w == o.w
def __str__(self) -> str:
return "(%s,%s,%s,%s)" % (self.x, self.y, self.z, self.w)
__repr__ = __str__
class Dimension:
def __init__(self, cubes: Set[Pos]):
self.cubes = cubes
@staticmethod
def FromLines(lines):
cubes: Set[Pos] = set()
for y, line in enumerate(lines):
for x, char in enumerate(list(line)):
active = char == "#"
if active:
pos = Pos(x, y, 0)
cubes.add(pos)
return Dimension(cubes)
def neighbourhood(self) -> Set[Pos]:
neighbourhood = set()
for cube in self.cubes:
for neighbour in cube.neighbours(reach=1, includeSelf=True):
neighbourhood.add(neighbour)
return neighbourhood
def isActive(self, pos: Pos) -> bool:
return pos in self.cubes
def neighboursActiveInRange(self, pos: Pos, min=None, max=None):
activeNeighbours = 0
for neighbour in pos.neighbours(reach=1):
if self.isActive(neighbour):
activeNeighbours += 1
if activeNeighbours > max:
return False
return activeNeighbours >= min
def firstRule(self, cube: Pos) -> bool:
"""Return True if the cube shall be activated by the first rule:
The cube must be active.
If exactly 2 or 3 neighbors are active, the cube remains active.
Otherwise, the cube becomes inactive.
"""
assert self.isActive(cube)
return self.neighboursActiveInRange(cube, min=2, max=3)
def secondRule(self, cube: Pos) -> bool:
"""Return True if the cube shall be activated by the second rule:
The cube must be inactive.
If exactly 3 neighbors are active, the cube becomes active.
Otherwise, the cube remains inactive.
"""
assert not self.isActive(cube)
return self.neighboursActiveInRange(cube, min=3, max=3)
def layer(self, z: int) -> str:
radius = len(lines[0]) * 2
center = int((len(lines[0])) / 2)
cubes = [["." for i in range(radius)] for i in range(radius)]
for cube in [c for c in self.cubes if c.z == z]:
cubes[cube.y + center][cube.x + center] = "#"
return "\n".join("".join(c) for c in cubes)
def cycle(self):
buffer = set()
for pos in self.neighbourhood():
if self.isActive(pos):
if self.firstRule(pos):
buffer.add(pos)
else:
if self.secondRule(pos):
buffer.add(pos)
self.cubes = copy.deepcopy(buffer)
class HyperDimension(Dimension):
@staticmethod
def FromLines(lines):
cubes: Set[HyperPos] = set()
for y, line in enumerate(lines):
for x, char in enumerate(list(line)):
active = char == "#"
if active:
pos = HyperPos(x, y, 0, 0)
cubes.add(pos)
return HyperDimension(cubes)
def layer(self, z: int, w: int) -> str:
radius = len(lines[0]) * 2
center = int((len(lines[0])) / 2)
cubes = [["." for i in range(radius)] for i in range(radius)]
for cube in [c for c in self.cubes if c.z == z and c.w == w]:
cubes[cube.y + center][cube.x + center] = "#"
return "\n".join("".join(c) for c in cubes)
def cycle(self):
buffer = set()
for pos in self.neighbourhood():
if self.isActive(pos):
if self.firstRule(pos):
buffer.add(pos)
else:
if self.secondRule(pos):
buffer.add(pos)
self.cubes = copy.deepcopy(buffer)
def solve1():
dim = Dimension.FromLines(lines)
print(dim.layer(0))
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
return len(dim.cubes)
def solve2():
dim = HyperDimension.FromLines(lines)
print(dim.layer(0, 0))
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
dim.cycle()
return len(dim.cubes)
print(solve1())
print(solve2())
| mmmaxou/advent-of-code | 2020/day-17/answer.py | answer.py | py | 6,664 | python | en | code | 0 | github-code | 36 |
73881391462 | import os
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import nltk
from KaggleWord2VecUtility import KaggleWord2VecUtilityClass
from textblob import TextBlob
# if __name__ == '__main__':
# Read the data
train = pd.read_csv(os.path.join(os.path.dirname(
__file__), 'data', 'labeledTrainData.tsv'), header=0, delimiter="\t", quoting=3)
test = pd.read_csv(os.path.join(os.path.dirname(__file__),
'data', 'testData.tsv'), header=0, delimiter="\t", quoting=3)
unlabeled_train = pd.read_csv(os.path.join(os.path.dirname(
__file__), 'data', "unlabeledTrainData.tsv"), header=0, delimiter="\t", quoting=3)
print("The first review is:")
print(train["review"][0])
input("Press Enter to continue...")
# [2] Clean the training and test sets
# print("Download text data sets.")
# nltk.download() # Download text data sets, including stop words
clean_train_reviews = []
print("Cleaning and parsing the training set movie reviews...\n")
for i in range(0, len(train["review"])):
clean_train_reviews.append(
" ".join(KaggleWord2VecUtilityClass.review_to_wordlist(train["review"][i], True)))
print("Creating the bag of words...\n")
vectorizer = CountVectorizer(analyzer="word", tokenizer=None,
preprocessor=None, stop_words=None, max_features=5000)
train_data_features = vectorizer.fit_transform(clean_train_reviews)
train_data_features = train_data_features.toarray()
print("Training the random forest (this may take a while)...")
forest = RandomForestClassifier(n_estimators=1000)
forest = forest.fit(train_data_features, train["sentiment"])
clean_test_reviews = []
print("Cleaning and parsing the test set movie reviews...\n")
for i in range(0, len(test["review"])):
clean_test_reviews.append(
" ".join(KaggleWord2VecUtilityClass.review_to_wordlist(test["review"][i], True)))
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
print("Predicting test labels...\n")
result = forest.predict(test_data_features)
output = pd.DataFrame(data={"id": test["id"], "sentiment": result})
output.to_csv(os.path.join(os.path.dirname(__file__), 'data',
'Bag_of_Words_model.csv'), index=False, quoting=3)
print("Wrote results to Bag_of_Words_model.csv")
# Textblob sentiment analysis to compare
predicted_sentiments = []
for review in clean_test_reviews:
analysis = TextBlob(review)
# TextBlob returns polarity in the range [-1, 1].
# We'll classify reviews with polarity > 0 as positive (sentiment = 1)
if analysis.sentiment.polarity > 0:
predicted_sentiments.append(1)
else:
predicted_sentiments.append(0)
output = pd.DataFrame(
data={"id": test["id"], "sentiment": predicted_sentiments})
output.to_csv(os.path.join(os.path.dirname(__file__), 'data',
'TextBlob_Predictions.csv'), index=False, quoting=3)
print("Wrote results to TextBlob_Predictions.csv")
"""# [3] Evaluate the model
# 1. Load the CSV file into a DataFrame
df = pd.read_csv('Bag_of_Words_model.csv')
# 2. Extract the ratings from the `id` column
df['rating'] = df['id'].str.split('_').str[-1].astype(int)
# 3. Compute the predicted sentiment based on the extracted ratings
df['predicted_sentiment'] = df['rating'].apply(lambda x: 1 if x >= 5 else 0)
# 4. Compare the predicted sentiment with the actual sentiment to compute the accuracy
correct_predictions = (df['sentiment'] == df['predicted_sentiment']).sum()
total_predictions = len(df)
accuracy = correct_predictions / total_predictions * 100
print(f'Accuracy: {accuracy:.2f}%')"""
| Jacques-Ludik/SentimentAnalysis | main.py | main.py | py | 3,702 | python | en | code | 0 | github-code | 36 |
6348823299 | # This is a sample Python script.
# Press Shift+F10 to execute it or replace it with your code.
# Press Double Shift to search everywhere for classes, files, tool windows, actions, and settings.
# Press the green button in the gutter to run the script.
import requests
import bs4
def get_document(url):
req = requests.get(url)
if req.status_code == 200:
html = req.text
soup = bs4.BeautifulSoup(html, "html.parser")
return soup # return the instance of bs4
else:
print("Getting code is failure")
return None
def find_image_links(soup):
# init variables
paths = list()
result = list()
paths = soup.select("div.separator > a")
for path in paths:
result.append(path.get('href'))
return result
url = "https://deblur99.blogspot.com/2021/07/uploading21-07-21.html"
soup = get_document(url)
f = open("./link_list.txt", 'w')
if soup is not None:
linkList = find_image_links(soup)
# additional deletion
# linkList[6] = ""
# linkList[24] = ""
for link in linkList:
if link != "":
f.write(f'<img src=\"{link}\">\n')
f.close()
# See PyCharm help at https://www.jetbrains.com/help/pycharm/
| deblur99/getURLsFromBlogger | main.py | main.py | py | 1,225 | python | en | code | 0 | github-code | 36 |
11369140618 | from http import HTTPStatus
from typing import Any
import httpx
from config import config
class UserClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/users/'
def registrate(self, username: str, tgid: int):
users = {'username': username, 'tgid': tgid}
response = httpx.post(self.url, json=users)
if response.status_code == HTTPStatus.CONFLICT:
return False
response.raise_for_status()
return True
def get_by_tgid(self, tgid: int):
response = httpx.get(f'{self.url}telegram/{tgid}')
response.raise_for_status()
return response.json()
def get_products_by_user(self, user_id: int):
response = httpx.get(f'{self.url}{user_id}/products/')
response.raise_for_status()
return response.json()
class CategoriesClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/categories/'
def get_categories(self) -> list[dict[str, Any]]:
response = httpx.get(self.url)
response.raise_for_status()
return response.json()
def get_categories_by_name(self, name: str) -> list[dict[str, Any]]:
response = httpx.get(self.url, params={'title': name})
response.raise_for_status()
return response.json()
def get_products(self, category_id: int) -> list[dict[str, Any]]:
response = httpx.get(f'{self.url}{category_id}/products/')
response.raise_for_status()
return response.json()
class ProductsClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/products/'
def add(self, category_id: int, title: str, user_id: int) -> dict[str, Any]:
payload = {
'category_id': category_id,
'title': title,
'user_id': user_id,
}
response = httpx.post(self.url, json=payload)
response.raise_for_status()
return response.json()
class ChoosesClient:
def __init__(self, url: str):
self.url = f'{url}/api/v1/chooses/'
def choose_products(self, source_product_id: int, target_product_id: int) -> dict[str, Any]:
payload = {
'source_product_id': source_product_id,
'target_product_id': target_product_id,
}
response = httpx.post(self.url, json=payload)
response.raise_for_status()
return response.json()
class ApiClient:
def __init__(self, url: str):
self.products = ProductsClient(url=url)
self.categories = CategoriesClient(url=url)
self.users = UserClient(url=url)
self.chooses = ChoosesClient(url=url)
api = ApiClient(url=config.http_key)
| learn-python-sfnl/tgbot | tgbot/api.py | api.py | py | 2,675 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.