seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
47206175294 | # -*- coding: utf-8 -*-
class Record(object):
def __init__(self,artist,aside,bside,year,genre):
self.artist = artist
self.aside = aside
self.bside = bside
self.year = year
self.genre = genre
a_1 = Record("MC5","Kick Out The Jams","Motor City Is Burning",1969,"Rock")
a_2 = Record("808 State","Pacific-707","Pacific-B",1989,"Techno")
a_3 = Record("De La Soul","Breakadawn","En Focus",1993,"Rap")
a_4 = Record("Iron Maiden","2 Minutes To Midnight","Rainbows Gold",1984,"Metal")
a_5 = Record("Doug E. Fresh And The Get Fresh Crew","The Show","La Di Da Di",1985,"Rap")
a_6 = Record("Fela Kuti & Africa 70","Lady","Shakara Oloje",1974,"World")
a_7 = Record("Gil Scott-Heron","The Revolution Will Not Be Televised","Home Is Where The Hatred Is",1971,"Soul")
a_8 = Record("Donna Summer","I Feel Love","Spring Affair",1977,"Disco")
b_1 = Record("Anthrax","I Am The Law","Bud E. Luvbomb And Satan\'s Lounge Band",1987,"Metal")
b_2 = Record("Wu-Tang Clan","C.R.E.A.M. (Cash Rules Everything Around Me)","Da Mystery Of Chessboxin\'",1993,"Rap")
b_3 = Record("Prince","1999","How Come U Don\'t Call Me Anymore?",1982,"Funk")
b_4 = Record("Black Sabbath","Sweet Leaf","After Forever",1971,"Metal")
b_5 = Record("The Stooges","Search And Destroy","Penetration",1973,"Rock")
b_6 = Record("Big Moe","Purple Stuff","Confidential Playa",2000,"Rap")
b_7 = Record("Fear","I Love Livin In The City","Now Your Dead (Musta Bin Somthin You Said)",1978,"Rock")
b_8 = Record("Adonis","Do It Properly","No Way Back",1986,"House")
c_1 = Record("XTC","Earn Enough For Us","Sacrificial Bonfire",1986,"Rock")
c_2 = Record("Dick Dale", "Misirlou Twist","Peppermint Man",1963,"Rock")
c_3 = Record("Megadeth","Wake Up Dead","Black Friday",1986,"Metal")
c_4 = Record("Run-D.M.C.","My Adidas","Peter Piper",1986,"Rap")
c_5 = Record("Aretha Franklin","Chain Of Fools","Prove It",1967,"Soul")
c_6 = Record("The Selecter","Three Minute Hero","James Bond",1980,"Ska")
c_7 = Record("Screamin\' Jay Hawkins","I Put A Spell On You","Little Demon",1956,"Blues")
c_8 = Record("Lipps, Inc.","Funkytown","All Night Dancing",1979,"Disco")
d_1 = Record("The Police","Message In A Bottle","Landlord",1979,"Rock")
d_2 = Record("Faith No More","From Out Of Nowhere","Cowboy Song",1989,"Metal")
d_3 = Record("James Brown","Get Up Offa That Thing","Release The Pressure",1976,"Funk")
d_4 = Record("Killing Joke","Wardance","Pssyche",1980,"Rock")
d_5 = Record("LFO","LFO (The Leeds Warehouse Mix)","Track 4",1990,"Techno")
d_6 = Record("Marvin Gaye","What\'s Going On","God Is Love",1970,"Soul")
d_7 = Record("Biz Markie","Biz Is Goin\' Off","The Do Do (Bonus Beats)",1988,"Rap")
d_8 = Record("David Bowie","Heroes","V2 Schneider",1977,"Rock")
e_1 = Record("Johnny Cash","Folsom Prison Blues","The Folk Singer",1968,"Country")
e_2 = Record("Nina Simone","Four Women","What More Can I Say",1966,"Jazz")
e_3 = Record("Michael Jackson","P.Y.T. (Pretty Young Thing)","Working Day And Night",1982,"Pop")
e_4 = Record("Mobb Deep","Shook Ones, Part II", "Shook Ones, Part I", 1995,"Rap")
e_5 = Record("Frank Zappa","Cosmik Debris","Uncle Remus",1974,"Rock")
e_6 = Record("Yes","Roundabout","Long Distance Runaround",1972,"Rock")
e_7 = Record("Funkadelic","I\'ll Bet You","Qualify & Satisfy",1969,"Funk")
e_8 = Record("Dead Kennedys","Halloween","Saturday Night Holocaust",1982,"Rock")
f_1 = Record("LL Cool J","Go Cut Creator Go","Kanday",1987,"Rap")
f_2 = Record("Madonna","Vogue","Keep It Together",1990,"Pop")
f_3 = Record("Pete Rock & C.L. Smooth", "They Reminisce Over You (T.R.O.Y.)","Straighten It Out", 1992, "Rap")
f_4 = Record("The Beach Boys","Good Vibrations","Wouldn\'t It Be Nice",1967,"Pop")
f_5 = Record("Public Enemy", "Bring The Noise","Sophisticated",1987,"Rap")
f_6 = Record("Madness","One Step Beyond","Mistakes",1979,"Ska")
f_7 = Record("The White Stripes","Seven Nation Army","Good To Me",2003,"Rock")
f_8 = Record("Talking Heads","Once In A Lifetime","Seen And Not Seen",1980,"Rock")
g_1 = Record("Simon & Garfunkel","Mrs. Robinson","Scarborough Fair (Canticle)",1968,"Folk")
g_2 = Record("Creedence Clearwater Revival","Run Through The Jungle","Up Around The Bend",1970,"Rock")
g_3 = Record("Ice Cube","Steady Mobbin\'","Us",1991,"Rap")
g_4 = Record("Laura Branigan","Self Control","Silent Partners",1984,"Pop")
g_5 = Record("St. Vincent","Digital Witness","Rio",2014,"Rock")
g_6 = Record("The Meters","Cissy Strut","Here Comes The Meter Man",1969,"Funk")
g_7 = Record("Echo & The Bunnymen","The Killing Moon","Do It Clean",1983,"Rock")
g_8 = Record("Howlin\' Wolf","Smoke Stack Lightning","You Can\'t Be Beat",1956,"Blues")
h_1 = Record("Jerry Reed","Amos Moses","The Preacher And The Bear",1970,"Country")
h_2 = Record("Raekwon","Ice Cream","Incarcerated Scarfaces",1995,"Rap")
h_3 = Record("Patsy Cline","Crazy","Who Can I Count On",1961,"Country")
h_4 = Record("France Gall","La Cloche","Jazz à Gogo",1964,"World")
h_5 = Record("João Gilberto & Stan Getz","The Girl From Ipanema","Vivo Sonhando",1964,"World")
h_6 = Record("The Flamingos","I Only Have Eyes For You","Goodnight Sweetheart",1959,"Doo Wop")
h_7 = Record("Bobby \'Blue\' Bland","I\'ll Take Care Of You","That\'s Why",1959,"Soul")
h_8 = Record("DJ Quik","Jus Like Compton","Tonite",1992,"Rap")
i_1 = Record("Gang Starr","Take It Personal","DWYCK",1992,"Rap")
i_2 = Record("Jon Lucien","Lady Love","Satan",1973,"Soul")
i_3 = Record("The Beatles","Strawberry Fields Forever","Penny Lane",1967,"Rock")
i_4 = Record("Blondie","Heart of Glass","Hanging On The Telephone",1978,"Rock")
i_5 = Record("Depeche Mode","Personal Jesus","Dangerous",1989,"Pop")
i_6 = Record("The Go! Team","Ladyflash","Ladyflash (Hot Chip Remix)",2004,"Pop")
i_7 = Record("Otis Redding","(Sittin\' On) The Dock Of The Bay","Sweet Lorene",1967,"Soul")
i_8 = Record("Talk Talk","It\'s My Life","Does Caroline Know?",1984,"Pop")
j_1 = Record("Nirvana","Heart-Shaped Box","Marigold",1993,"Rock")
j_2 = Record("Stevie Wonder","Higher Ground","Too High",1973,"Funk")
j_3 = Record("Tears For Fears","Everybody Wants To The Rule The World","Pharaohs",1985,"Pop")
j_4 = Record("The Fall","Cruiser\'s Creek","L.A.",1985,"Rock")
j_5 = Record("Sly & The Family Stone","Thank You (Falettinme Be Mice Elf Agin)","Everybody Is A Star",1969,"Funk")
j_6 = Record("The Kinks","Lola","Victoria",1970,"Rock")
j_7 = Record("Motorhead","Ace Of Spades","Dirty Love",1980,"Metal")
j_8 = Record("The Surfaris","Wipe Out","Surfer Joe",1963,"Rock")
| rubinoAM/JukeboxPy | record.py | record.py | py | 6,468 | python | en | code | 0 | github-code | 13 |
24367938105 | # Ejercicio02
def every_other(array):
# Cuando encuentre un número par dentro del array
# mostraŕa las sumas con los demás números del mismo
for index, num in enumerate(array): #O(n)
if index % 2 == 0:
for ob in array: #O(n)
print(str(num) + " + " + str(ob))
# Complejidad de O(n²)
every_other([1, 2, 3, 4, 5])
| julio-1610/LaboratorioADA-GC | Laboratorio04/every_other.py | every_other.py | py | 370 | python | es | code | 0 | github-code | 13 |
6434022729 | # pylint: disable=R0903
"""
'E1102': ('%s is not callable',
'Used when an object being called has been infered to a non \
callable object'),
"""
__revision__ = None
__revision__()
def correct():
"""callable object"""
return 1
__revision__ = correct()
class Correct(object):
"""callable object"""
class MetaCorrect(object):
"""callable object"""
def __call__(self):
return self
INSTANCE = Correct()
CALLABLE_INSTANCE = MetaCorrect()
CORRECT = CALLABLE_INSTANCE()
INCORRECT = INSTANCE()
LIST = []
INCORRECT = LIST()
DICT = {}
INCORRECT = DICT()
TUPLE = ()
INCORRECT = TUPLE()
INT = 1
INCORRECT = INT()
| raymondbutcher/perfectpython | pylib/pylint/test/input/func_typecheck_non_callable_call.py | func_typecheck_non_callable_call.py | py | 671 | python | en | code | 11 | github-code | 13 |
7407915901 | import os
import json
import pandas as pd
from barbell2_castor.api import CastorApiClient
""" -------------------------------------------------------------------------------------------
"""
class CastorToDict:
COLUMNS_TO_SKIP = [
'Participant Id',
'Participant Status',
'Site Abbreviation',
'Participant Creation Date',
'dpca_BMI',
]
FIELD_TYPES_TO_SKIP = ['remark', 'calculation']
""" -------------------------------------------------------------------------------------------
"""
class CastorApiToDict(CastorToDict):
def __init__(self, study_name, client_id, client_secret):
self.client = CastorApiClient(client_id, client_secret)
self.study_id = self.client.get_study_id(self.client.get_study(study_name))
def execute(self):
return self.client.get_study_data(self.study_id)
""" -------------------------------------------------------------------------------------------
"""
class CastorExcelToDict(CastorToDict):
def __init__(self, excel_file):
self.excel_file = excel_file
def execute(self):
# load options
df_opts = pd.read_excel(self.excel_file, sheet_name='Field options')
option_groups = {}
for _, row in df_opts.iterrows():
option_group_name = row['Option group name']
if option_group_name not in option_groups.keys():
option_groups[option_group_name] = {}
option_groups[option_group_name][str(row['Option value'])] = row['Option name']
# load variable definitions
df_vars = pd.read_excel(self.excel_file, sheet_name='Study variable list')
data = {}
for _, row in df_vars.iterrows():
field_type = row['Original field type']
if field_type in CastorToDict.FIELD_TYPES_TO_SKIP:
continue
field_name = row['Variable name']
data[field_name] = {
'field_type': field_type,
'field_options': None,
'field_values': [],
}
if field_type == 'radio' or field_type == 'dropdown':
data[field_name]['field_options'] = option_groups[row['Optiongroup name']]
else:
pass
# load data
df_data = pd.read_excel(self.excel_file, sheet_name='Study results')
for _, row in df_data.iterrows():
for field_name in row.keys():
if field_name in CastorToDict.COLUMNS_TO_SKIP or field_name.endswith('_calc'):
continue
field_value = str(row[field_name])
if pd.isna(field_value) or field_value == 'nan':
data[field_name]['field_values'].append('')
else:
try:
field_type = data[field_name]['field_type']
if field_type == 'radio' or field_type == 'dropdown':
data[field_name]['field_values'].append(str(int(float(field_value))))
elif field_type == 'numeric':
data[field_name]['field_values'].append(str(float(field_value)))
elif field_type == 'string' or field_type == 'textarea':
data[field_name]['field_values'].append(str(field_value))
elif field_type == 'date':
data[field_name]['field_values'].append(str(field_value))
elif field_type == 'year':
data[field_name]['field_values'].append(str(int(float(field_value))))
else:
raise RuntimeError(f'Unknown field type: {field_type}')
except:
print()
# check that field_value arrays are all the same length
required_length = 0
for column in data.keys():
length = len(data[column]['field_values'])
if required_length == 0:
required_length = length
if length != required_length:
raise RuntimeError(f'Length for column {column} is not correct, should be {required_length} but is {length}')
return data
class CastorDictToDataFrame:
def __init__(self, data):
self.data = data
def execute(self):
data = {}
# create table columns by expanding option group with one-hot encoding
for field_name in self.data.keys():
field_type = self.data[field_name]['field_type']
if field_type == 'radio' or field_type == 'dropdown':
field_options = self.data[field_name]['field_options']
for option_value in field_options.keys():
k = f'{field_name}${option_value}'
data[k] = []
else:
data[field_name] = []
# add values to columns
for field_name in self.data.keys():
field_type = self.data[field_name]['field_type']
if field_type == 'radio' or field_type == 'dropdown':
field_options = self.data[field_name]['field_options']
for value in self.data[field_name]['field_values']:
for option_value in field_options.keys():
k = f'{field_name}${option_value}'
if option_value == value:
data[k].append('1')
else:
data[k].append('0')
else:
for value in self.data[field_name]['field_values']:
data[field_name].append(value)
return pd.DataFrame(data=data)
if __name__ == '__main__':
def main():
CSV_FILE = 'castor.csv'
CASTOR_STUDY_NAME = 'ESPRESSO_v2.0_DPCA'
a2d = CastorApiToDict(
study_name=CASTOR_STUDY_NAME,
client_id=open(os.path.join(os.environ['HOME'], 'castorclientid.txt')).readline().strip(),
client_secret=open(os.path.join(os.environ['HOME'], 'castorclientsecret.txt')).readline().strip(),
)
data = a2d.execute()
d2df = CastorDictToDataFrame(data)
df = d2df.execute()
df.to_csv(CSV_FILE, index=False, sep=';', decimal='.')
main()
| rbrecheisen/barbell2_castor | barbell2_castor/castor2df.py | castor2df.py | py | 6,438 | python | en | code | 0 | github-code | 13 |
34445068158 | from __future__ import print_function
from dronekit import VehicleMode, mavutil, LocationGlobal, LocationGlobalRelative
from pymavlink import mavutil # Needed for command message definitions
import time
import math
def arm_and_takeoff(aTargetAltitude, vehicle):
"""
Arms vehicle and fly to aTargetAltitude.
"""
print(vehicle)
print("Basic pre-arm checks")
# Don't let the user try to arm until autopilot is ready
# while not vehicle.is_armable:
# print(" Waiting for vehicle to initialise...")
# time.sleep(1)
print("Arming motors")
# Copter should arm in GUIDED mode
vehicle.mode = VehicleMode("GUIDED")
vehicle.armed = True
while not vehicle.armed:
print(" Waiting for arming...")
time.sleep(1)
print("Taking off!")
vehicle.simple_takeoff(aTargetAltitude) # Take off to target altitude
# Wait until the vehicle reaches a safe height before processing the goto (otherwise the command
# after Vehicle.simple_takeoff will execute immediately).
while True:
print(" Altitude: ", vehicle.location.global_relative_frame.alt)
# Trigger just below target alt.
if vehicle.location.global_relative_frame.alt >= aTargetAltitude*0.95:
print("Reached target altitude")
return True
# break
time.sleep(.2)
def condition_yaw(heading, vehicle, relative=False):
"""
Send MAV_CMD_CONDITION_YAW message to point vehicle at a specified heading (in degrees).
This method sets an absolute heading by default, but you can set the `relative` parameter
to `True` to set yaw relative to the current yaw heading.
By default the yaw of the vehicle will follow the direction of travel. After setting
the yaw using this function there is no way to return to the default yaw "follow direction
of travel" behaviour (https://github.com/diydrones/ardupilot/issues/2427)
For more information see:
http://copter.ardupilot.com/wiki/common-mavlink-mission-command-messages-mav_cmd/#mav_cmd_condition_yaw
"""
if relative:
is_relative = 1 # yaw relative to direction of travel
else:
is_relative = 0 # yaw is an absolute angle
# create the CONDITION_YAW command using command_long_encode()
msg = vehicle.message_factory.command_long_encode(
0, 0, # target system, target component
mavutil.mavlink.MAV_CMD_CONDITION_YAW, # command
0, # confirmation
heading, # param 1, yaw in degrees
0, # param 2, yaw speed deg/s
1, # param 3, direction -1 ccw, 1 cw
is_relative, # param 4, relative offset 1, absolute angle 0
0, 0, 0) # param 5 ~ 7 not used
# send command to vehicle
vehicle.send_mavlink(msg)
def land(vehicle):
vehicle.mode = VehicleMode('LAND')
def goto(dNorth, dEast, vehicle, gotoFunction):
"""
Moves the vehicle to a position dNorth metres North and dEast metres East of the current position.
The method takes a function pointer argument with a single `dronekit.lib.LocationGlobal` parameter for
the target position. This allows it to be called with different position-setting commands.
By default it uses the standard method: dronekit.lib.Vehicle.simple_goto().
The method reports the distance to target every two seconds.
"""
currentLocation = vehicle.location.global_relative_frame
targetLocation = get_location_metres(currentLocation, dNorth, dEast)
targetDistance = get_distance_metres(currentLocation, targetLocation)
gotoFunction(targetLocation)
#print "DEBUG: targetLocation: %s" % targetLocation
#print "DEBUG: targetLocation: %s" % targetDistance
# Stop action if we are no longer in guided mode.
while vehicle.mode.name == "GUIDED":
#print "DEBUG: mode: %s" % vehicle.mode.name
remainingDistance = get_distance_metres(
vehicle.location.global_relative_frame, targetLocation)
print("Distance to target: ", remainingDistance)
# Just below target, in case of undershoot.
if remainingDistance <= targetDistance*0.01:
print("Reached target")
break
time.sleep(2)
def get_location_metres(original_location, dNorth, dEast):
"""
Returns a LocationGlobal object containing the latitude/longitude `dNorth` and `dEast` metres from the
specified `original_location`. The returned LocationGlobal has the same `alt` value
as `original_location`.
The function is useful when you want to move the vehicle around specifying locations relative to
the current vehicle position.
The algorithm is relatively accurate over small distances (10m within 1km) except close to the poles.
For more information see:
http://gis.stackexchange.com/questions/2951/algorithm-for-offsetting-a-latitude-longitude-by-some-amount-of-meters
"""
earth_radius = 6378137.0 # Radius of "spherical" earth
# Coordinate offsets in radians
dLat = dNorth/earth_radius
dLon = dEast/(earth_radius*math.cos(math.pi*original_location.lat/180))
# New position in decimal degrees
newlat = original_location.lat + (dLat * 180/math.pi)
newlon = original_location.lon + (dLon * 180/math.pi)
if type(original_location) is LocationGlobal:
targetlocation = LocationGlobal(newlat, newlon, original_location.alt)
elif type(original_location) is LocationGlobalRelative:
targetlocation = LocationGlobalRelative(
newlat, newlon, original_location.alt)
else:
raise Exception("Invalid Location object passed")
return targetlocation
def get_distance_metres(aLocation1, aLocation2):
"""
Returns the ground distance in metres between two LocationGlobal objects.
This method is an approximation, and will not be accurate over large distances and close to the
earth's poles. It comes from the ArduPilot test code:
https://github.com/diydrones/ardupilot/blob/master/Tools/autotest/common.py
"""
dlat = aLocation2.lat - aLocation1.lat
dlong = aLocation2.lon - aLocation1.lon
return math.sqrt((dlat*dlat) + (dlong*dlong)) * 1.113195e5
| Johnnysboys/Dilbert | flight.py | flight.py | py | 6,217 | python | en | code | 0 | github-code | 13 |
15023015194 | import sys
import os.path
import subprocess
from collections import defaultdict
import traceback
import json
from PyQt5.QtCore import (
QObject,
QThread,
QTimer,
QPoint,
QRect,
QSize,
Qt,
pyqtSignal,
)
from PyQt5.QtGui import (
QIcon,
QPixmap,
)
import patricia
import polytaxis_monitor.common as ptcommon
import appdirs
from .qtwrapper import *
from .common import *
from .settings import res
# TODO
# element types to vars
# save queries
# tag editor
# launcher editor
one_file = '{one-file}'
all_files = '{all-files}'
icon_size = QSize(24, 24)
icon_remove = None
icons = {}
eltype_labels = {
'inc': 'include',
'exc': 'exclude',
'sort_asc': 'sort ascending',
'sort_desc': 'sort descending',
'sort_rand': 'randomize',
'col': 'show column',
}
known_columns = patricia.trie()
elements = []
drag_targets = []
def to_gen(outer):
return outer()
unwrap_root = os.path.join(
appdirs.user_data_dir('polytaxis-unwrap', 'zarbosoft'),
'mount',
)
def unwrap(path):
return os.path.join(unwrap_root, path[1:])
def collapse(callback):
timer = QTimer()
timer.setSingleShot(True)
timer.setInterval(200)
def out(*pargs, **kwargs):
out.pargs = pargs
out.kwargs = kwargs
timer.start()
out.stop = lambda: timer.stop()
timer.timeout.connect(lambda: callback(*out.pargs, **out.kwargs))
return out
class DragTarget(QFrame):
dropped = pyqtSignal()
def __init__(self, *pargs, parent=None, **kwargs):
super(DragTarget, self).__init__(*pargs, **kwargs)
self.setAcceptDrops(True)
def dragEnterEvent(self, event):
self.setProperty('drag-hover', True)
self.style().unpolish(self)
self.style().polish(self)
self.update()
event.accept()
def dragLeaveEvent(self, event):
self.setProperty('drag-hover', False)
self.style().unpolish(self)
self.style().polish(self)
self.update()
super().dragLeaveEvent(event)
def dropEvent(self, event):
self.dropped.emit()
@enable_drag
class ElementButton(LayoutPushButton):
pass
class ElementBuilder(QObject):
worker_result = pyqtSignal(int, list)
def __init__(self):
super(ElementBuilder, self).__init__()
self.element = None
self.worker = None
self.query_unique = 0
self.text = None
self.label = QLabel(tags=['builder-label'])
self.entry = QLineEdit(tags=['builder-entry'])
self.entry_layout = QHBoxLayout(tags=['builder-head-layout'])
self.entry_layout.addWidget(self.label)
self.entry_layout.addWidget(self.entry)
self.results = QListWidget(tags=['builder-list'])
layout = QVBoxLayout(tags=['builder-layout'])
layout.addLayout(self.entry_layout)
layout.addWidget(self.results)
self.outer_widget = QFrame(tags=['builder-widget'])
self.outer_widget.setLayout(layout)
suppress_row_select = [False]
@self.entry.textEdited.connect
def edited(text):
self.element.set_value(text)
self.element.last_query = text
self.change_query()
@self.results.currentTextChanged.connect
def selected(text):
if suppress_row_select[0]:
return
if not text:
return
self.element.set_value(text)
self.entry.setText(text)
@self.worker_result.connect
def handle_result(unique, values):
if unique != self.query_unique:
return
for row, value in enumerate(values):
self.results.addItem(value)
if value == self.text:
suppress_row_select[0] = True
self.results.setCurrentRow(row)
suppress_row_select[0] = False
@collapse
def _reset_query(self):
self.query_unique += 1
self.text = self.entry.text()
if self.element.type in ('sort_asc', 'sort_desc', 'sort_rand', 'col'):
for row, column in enumerate(known_columns.iter(self.element.last_query)):
self.results.addItem(column)
if column == self.text:
self.results.setCurrentRow(row)
elif self.element.type in ('inc', 'exc'):
self.worker.build_query.emit(self.query_unique, self.element.last_query)
def change_query(self):
self.worker.build_query.emit(-1, '')
self.results.clear()
self._reset_query()
def set_element(self, element):
if element == self.element:
return
if self.element:
self.element.auto_deselect()
self.element = element
if element is None:
self.worker.build_query.emit(-1, '')
self.outer_widget.hide()
else:
self.label.setPixmap(icons[element.type])
self.entry.setText(element.value)
self.change_query()
self.outer_widget.show()
self.entry.setFocus()
class Display(QObject):
worker_result = pyqtSignal(int, list)
def __init__(self):
super(Display, self).__init__()
self.worker = None
self.query_unique = 0
self.columns = None
self.sort = None
self.filters = None
self.includes = None
self.excludes = None
self.raw = []
self.launchers = []
context_menu = QMenu()
open_menu = QMenu()
self.results = MouseLMRTreeWidget(tags=['display-tree'])
self.results.setSelectionMode(QTreeWidget.ExtendedSelection)
@self.results.customContextMenuRequested.connect
def callback(point):
context_menu.exec(point)
self.results.header().hide()
actions = QToolBar(tags=['display-toolbar'])
tool_open = actions.addAction('Open')
tool_open.setMenu(open_menu)
tool_open.setEnabled(False)
layout = QVBoxLayout(tags=['display-layout'])
layout.addWidget(self.results)
layout.addWidget(actions)
self.outer_widget = QFrame(tags=['display-widget'])
self.outer_widget.setLayout(layout)
def spawn(command):
print('spawning {}'.format(command))
proc = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
proc.stdin.close()
proc.stdout.close()
proc.stderr.close()
def do_open(option):
print('callin open')
rows = [
self.raw[index.row()]
for index in self.results.selectedIndexes()
if index.column() == 0
]
if not rows:
rows = self.raw
if not rows:
return
paths = [
unwrap(path) if option.get('unwrap', True) else path
for path in [
next(iter(row['tags']['path']))
for row in rows
]
]
if one_file in option['command']:
for path in paths:
spawn([
path if arg == one_file else arg
for arg in option['command']
])
else:
if all_files not in option['command']:
raise RuntimeError('Option {} doesn\'t have any filepath arguments.'.format(option['name']))
args = []
for arg in option['command']:
if arg == all_files:
args.extend(paths)
else:
args.append(arg)
spawn(args)
@collapse
def update_launch_concensus():
launch_sums = defaultdict(lambda: {'count': 0, 'launcher': None})
rows = [
self.raw[index.row()]
for index in self.results.selectedIndexes()
]
if not rows:
rows = self.raw
for data in rows:
for key in data['tags']['launch_keys']:
launcher_bunch = keyed_launchers.get(key)
if launcher_bunch:
launch_sum = launch_sums.get(id(launcher_bunch))
if not launch_sum:
launch_sum = {
'count': 0,
'launcher_bunch': launcher_bunch,
}
launch_sums[id(launcher_bunch)] = launch_sum
launch_sum['count'] += 1
self.launchers = []
for top in sorted(launch_sums.items(), key=lambda x: x[1]['count']):
if len(self.launchers) > 5:
break
launcher_bunch = top[1]['launcher_bunch']
if not launcher_bunch:
continue
self.launchers.extend(launcher_bunch)
self.launchers.extend(
wildcard_launchers
)
def build_action(menu, launcher):
action = menu.addAction(launcher['name'])
def launch():
do_open(launcher)
action.triggered.connect(launch)
for menu in [open_menu, context_menu]:
menu.clear()
for launcher in self.launchers:
build_action(menu, launcher)
if self.launchers:
tool_open.setText('Open with ' + self.launchers[0]['name'])
tool_open.setEnabled(bool(self.launchers))
@self.worker_result.connect
def handle_result(unique, rows):
if unique != self.query_unique:
return
for row in rows:
for key in row['tags'].keys():
known_columns[key] = None
self.raw.extend(rows)
self._redisplay()
update_launch_concensus()
@self.results.m_clicked_anywhere.connect
def handle_m_clicked():
self.results.clearSelection()
@self.results.itemSelectionChanged.connect
def callback():
update_launch_concensus()
@self.results.r_clicked_anywhere.connect
def handle_r_clicked(position):
context_menu.exec(position)
@self.results.doubleClicked.connect
def handle_clicked(index):
if not self.launchers:
return
do_open(self.launchers[0])
@tool_open.triggered.connect
def handle_clicked(index):
if not self.launchers:
return
do_open(self.launchers[0])
@collapse
def _reset_query(self, unique):
self.worker.display_query.emit(unique, self.includes, self.excludes)
def _redisplay(self):
self.raw = ptcommon.sort(self.sort, self.raw)
self.results.clear()
#self.results.setRowCount(0)
for row in self.raw:
self.results.addTopLevelItem(QTreeWidgetItem([
', '.join(list(row['tags'].get(column, [])))
for column in self.columns]))
self.results.header().resizeSections(QHeaderView.ResizeToContents)
def change_query(self):
self.worker.display_query.emit(-1, set(), set())
cleared = [False]
def clear():
if not cleared[0]:
cleared[0] = True
self.results.clear()
includes = {
element.value for element in elements if element.type == 'inc'}
excludes = {
element.value for element in elements if element.type == 'exc'}
columns = []
sort = []
for element in elements:
if (
element.type in ('col', 'sort_asc', 'sort_desc', 'sort_rand')
and element.value
and element.value not in columns
):
columns.append(element.value)
if element.type == 'sort_asc':
sort.append(('asc', element.value))
elif element.type == 'sort_desc':
sort.append(('desc', element.value))
elif element.type == 'sort_rand':
sort.append(('rand', element.value))
if includes != self.includes or excludes != self.excludes:
self.query_unique += 1
known_columns = patricia.trie()
self.raw = []
self.includes = includes
self.excludes = excludes
clear()
if self.includes or self.excludes:
self._reset_query(self.query_unique)
if columns != self.columns or sort != self.sort:
self.columns = columns
self.sort = sort
if self.columns:
self.results.setColumnCount(len(self.columns))
self.results.setHeaderLabels(self.columns)
self.results.header().show()
else:
self.results.header().hide()
self.results.setColumnCount(1)
self.columns = ['filename']
self.sort.append(('asc', 'filename'))
clear()
self._redisplay()
wildcard_launchers = []
keyed_launchers = defaultdict(lambda: [])
def main():
launchers_path = os.path.join(
appdirs.user_config_dir('polytaxis-adventure'),
'launchers.json',
)
try:
with open(launchers_path, 'r') as launchers_file:
launchers = json.load(launchers_file)
for launcher in launchers:
for key in launcher['keys']:
if key == '*':
wildcard_launchers.append(launcher)
else:
keyed_launchers[key].append(launcher)
except:
print('Failed to load {}:\n{}'.format(
launchers_path,
traceback.format_exc())
)
app = QApplication(sys.argv)
global icon_remove
icon_remove = QPixmap(res('icon_remove.png'))
global icons
icons = {
key: QPixmap(res('icon_{}.png'.format(key)))
for key in [
'exc',
'inc',
'col',
'sort_asc',
'sort_desc',
'sort_rand',
]
}
icons['logo'] = QPixmap(res('logo.png'))
for icon in icons.values():
if not icon:
raise RuntimeError('Unable to load icon {}'.format(icon))
class Worker(QObject):
build_query = pyqtSignal(int, str)
display_query = pyqtSignal(int, set, set)
def __init__(self):
super(Worker, self).__init__()
self.build = None
self.display = None
self.current_build_query = None
self.current_display_query = None
self.db = ptcommon.QueryDB()
self.queue = []
self.idle = QTimer()
self.idle.setInterval(0)
@self.idle.timeout.connect
def idle():
if not self.queue:
self.idle.stop()
return
work = self.queue.pop()
try:
next(work)
except StopIteration:
return
self.queue.append(work)
self.idle.start()
@self.build_query.connect
def build_query_handler(unique, arg):
try:
self.queue.remove(self.current_build_query)
except ValueError:
pass
if unique == -1:
self.current_build_query = None
return
@to_gen
def work():
count = 0
work = self.db.query_tags('prefix', arg)
while True:
rows = list(limit(100, work))
if not rows:
raise StopIteration()
count += len(rows)
self.build.worker_result.emit(unique, rows)
if count >= 1000:
break
yield
self.queue.append(work)
self.current_build_query = work
self.idle.start()
@self.display_query.connect
def display_query_handler(unique, includes, excludes):
try:
self.queue.remove(self.current_display_query)
except ValueError:
pass
if unique == -1:
self.current_display_query = None
self.db.clear_cache()
return
@to_gen
def work():
count = 0
work = self.db.query(
includes, excludes,
add_path=True)
while True:
rows = list(limit(100, work))
if not rows:
raise StopIteration()
count += len(rows)
for row in rows:
path = next(iter(row['tags']['path']))
filename = ptcommon.split_abs_path(path)[-1]
row['tags']['filename'] = {filename}
filename_splits = filename.split('.')
launch_keys = set()
for index in range(1, len(filename_splits)):
launch_keys.add(
'.' + '.'.join(filename_splits[index:]))
row['tags']['launch_keys'] = launch_keys
self.display.worker_result.emit(unique, rows)
if count >= 1000:
break
yield
self.queue.append(work)
self.current_display_query = work
self.idle.start()
worker_thread = QThread()
worker_thread.start()
worker = Worker()
worker.moveToThread(worker_thread)
# Query element specification
build = ElementBuilder()
build.worker = worker
worker.build = build
# Result display and interaction
display = Display()
display.worker = worker
worker.display = display
# Query bar
appicon = QToolButton(tags=['appicon'])
appicon.setIcon(QIcon(icons['logo']))
appicon.setIconSize(QSize(48, 48))
appicon.setToolTip('Clear query')
@appicon.clicked.connect
def callback(checked):
for element in elements[:]:
element.destroy()
query = FlowLayout(tags=['query-layout'])
query_empty = QLabel('empty query', tags=['query-empty'])
query.addWidget(query_empty)
query_toolbar = QToolBar(tags=['query-toolbar'])
query_toolbar.setToolButtonStyle(Qt.ToolButtonTextBesideIcon)
def create_query_element_action(eltype):
ellabel = eltype_labels[eltype]
action = QAction(QIcon(icons[eltype]), ellabel, query_toolbar, tags=['query-tool', eltype])
query_toolbar.addAction(action)
def create(ign1):
class Element():
type = eltype
value = ''
last_query = ''
def recreate_widgets(self):
layout = QHBoxLayout(tags=['element-layout', eltype])
icon = QLabel(tags=['element-icon', eltype])
icon.setPixmap(icons[eltype])
layout.addWidget(icon)
self.text = QLabel(self.value, tags=['element-text', eltype])
layout.addWidget(self.text)
self.delete = QToolButton(tags=['element-remove', eltype])
self.delete.setIcon(QIcon(icon_remove))
layout.addWidget(self.delete)
self.toggle = ElementButton(tags=['element-toggle', eltype])
self.toggle.setLayout(layout)
self.delete.hide()
@self.toggle.toggled.connect
def click_action(checked):
if checked:
self.select()
else:
self.deselect()
def clear_drag():
for target in drag_targets:
query.removeWidget(target)
del drag_targets[:]
@self.toggle.drag_start.connect
def callback():
sindex = elements.index(self)
for index in range(len(elements) + 1):
if index in (sindex, sindex + 1):
continue
target = DragTarget(tags=['drag-target'])
@target.dropped.connect
def callback(index=index, sindex=sindex):
clear_drag()
dindex = index if index < sindex else index - 1
elements.insert(dindex, elements.pop(sindex))
display.change_query()
was_checked = self.toggle.checked()
query.removeWidget(self.toggle)
self.recreate_widgets()
query.insertWidget(dindex + 1, self.toggle)
if was_checked:
self.select()
query.insertWidget(index + len(drag_targets) + 1, target)
drag_targets.append(target)
@self.toggle.drag_stop.connect
def callback():
clear_drag()
@self.delete.clicked.connect
def delete_action(checked):
self.destroy()
def __init__(self):
self.recreate_widgets()
query.addWidget(self.toggle)
query_empty.hide()
def set_value(self, value):
self.value = value
self.text.setText(value)
display.change_query()
def auto_deselect(self):
self.toggle.setChecked(False)
self.delete.hide()
def deselect(self):
build.set_element(None)
self.delete.hide()
def select(self):
build.set_element(self)
self.toggle.setChecked(True)
self.delete.show()
def destroy(self):
self.deselect()
elements.remove(self)
display.change_query()
query.removeWidget(self.toggle)
if not elements:
query_empty.show()
element = Element()
element.select()
elements.append(element)
action.triggered.connect(create)
create_query_element_action('inc')
create_query_element_action('exc')
create_query_element_action('sort_asc')
create_query_element_action('sort_desc')
create_query_element_action('sort_rand')
create_query_element_action('col')
query_layout = QVBoxLayout(tags=['outer-query-layout'])
query_layout.addLayout(query, 1)
query_layout.addWidget(query_toolbar)
total_query_layout = QHBoxLayout(tags=['top-layout'])
total_query_layout.addWidget(appicon)
total_query_layout.addLayout(query_layout, 1)
# Assemblage
bottom_splitter = QSplitter(Qt.Horizontal, tags=['bottom-splitter'])
bottom_splitter.addWidget(build.outer_widget)
bottom_splitter.addWidget(display.outer_widget)
total_layout = QVBoxLayout(tags=['window-layout'])
total_layout.addLayout(total_query_layout)
total_layout.addWidget(bottom_splitter, 1)
window = QFrame(tags=['window'])
window.setObjectName('window')
window.setLayout(total_layout)
window.show()
build.outer_widget.hide()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| rendaw/ptadventure | polytaxis_adventure/main.py | main.py | py | 24,735 | python | en | code | 0 | github-code | 13 |
31114568479 | """changed users role_id
Revision ID: 443b821478c1
Revises: 25a3ee0f9951
Create Date: 2022-04-11 02:19:53.722244
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '443b821478c1'
down_revision = '25a3ee0f9951'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('role_id', sa.Integer(), nullable=True))
op.drop_constraint('fk_users_roleId_roles', 'users', type_='foreignkey')
op.create_foreign_key(op.f('fk_users_role_id_roles'), 'users', 'roles', ['role_id'], ['id'], ondelete='SET NULL')
op.drop_column('users', 'roleId')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('roleId', sa.INTEGER(), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_users_role_id_roles'), 'users', type_='foreignkey')
op.create_foreign_key('fk_users_roleId_roles', 'users', 'roles', ['roleId'], ['id'], ondelete='SET NULL')
op.drop_column('users', 'role_id')
# ### end Alembic commands ###
| vishIGOR/MyCodeHedgehog | project/migrations/versions/443b821478c1_changed_users_role_id.py | 443b821478c1_changed_users_role_id.py | py | 1,185 | python | en | code | 0 | github-code | 13 |
20843373574 | from __future__ import with_statement, print_function, absolute_import
from itertools import *
from functools import *
import numpy
import pandas
from stitch.core.stitch_parser import StitchParser
from stitch.core.utils import *
# ------------------------------------------------------------------------------
'''
.. module:: stitch_interpreter
:platform: Unix
:synopsis: Stitch Query Langauge interpreter
.. moduleauthor:: Alex Braun <alexander.g.braun@gmail.com>
'''
class StitchInterpreter(StitchParser):
'''
Subclass of StitchParser used for performing stitchql queries on supplied DataFrames
Attributes:
last_search (str): Last stitchql query generated. Default: None.
search_stats(str): Print statistics about the last query made.
'''
def __init__(self):
super(StitchInterpreter, self).__init__()
# --------------------------------------------------------------------------
def _gen_dataframe_query(self, dataframe, fields=['all'], operator='==', values=[''], field_operator='=='):
'''
Semi-private method for processing invidual stitchql queries.
Args:
dataframe (DataFrame): DataFrame to query.
fields (list, optional): Fields to query. Default: ['all'].
operator (str, optional): stitchql operator to use in the query. Default '=='.
values (list, optional): Values to look for. Default [''].
Returns:
Results DataFrame
'''
columns = dataframe.columns.to_series()
if fields != ['all']:
mask = columns.apply(lambda x: bool_test(x, field_operator, fields))
columns = columns[mask]
columns = columns.tolist()
mask = dataframe[columns].applymap(lambda x: bool_test(x, operator, values))
# This method avoids including prexisting nan values in the mask
mask[mask == False] = numpy.nan
mask.dropna(how='all', subset=columns, inplace=True)
return mask.index
def dataframe_query(self, dataframe, field_operator='=='):
'''
Query supplied DataFrame using last search.
Args:
dataframe (DataFrame): DataFrame to query.
field_operator (str, optional): Operator used for determining matching fields. Default '=='.
Returns:
Results DataFrame
'''
if dataframe.index.has_duplicates:
raise IndexError('DataFrame has non-unique values in its index')
mask = pandas.Index([])
for queries in self._last_search:
and_mask = dataframe.index
for q in queries:
and_mask = self._gen_dataframe_query(dataframe.ix[and_mask], q['fields'], q['operator'], q['values'], field_operator=field_operator)
mask = mask.union(and_mask)
return dataframe.ix[mask]
# ------------------------------------------------------------------------------
def main():
'''
Run help if called directly
'''
import __main__
help(__main__)
__all__ = ['StitchInterpreter']
if __name__ == '__main__':
main()
| theNewFlesh/stitch | python/stitch/core/stitch_interpreter.py | stitch_interpreter.py | py | 2,783 | python | en | code | 2 | github-code | 13 |
8310443184 | import numpy as np
import scipy.optimize
import time
import matplotlib.pyplot as plt
import pandas as pd
import math
def log_reg(theta,x,y):
"""
Arguments:
theta - A vector containing the parameter values to optimize.
X - The examples stored in a matrix.
X(i,j) is the i'th coordinate of the j'th example.
y - The target value for each example. y(j) is the target for example j.
Basic function to compute cost and gradient for given arguments.
"""
no_of_ex = x.shape[1]
cost = 0
grad = np.zeros(theta.shape)
for i in range(no_of_ex):
val = np.sum(theta[:]*x[:,i])
val = 1/ (1 + math.exp(-val))
cost = cost + (y[i]*math.log(val)) + (1-y[i])*math.log(1-val)
grad = grad + x[:,i]*(val-y[i])
cost = -cost
return cost,grad
def log_rec_vec(theta,x,y):
"""
An optimized function to compute cost and gradient for given arguments
"""
val = np.dot(theta,x)
val = 1/(1+np.exp(-val))
grad = np.transpose(np.dot(x,np.transpose(val - y)))
cost = -np.sum(y*np.log(val) + (1-y)*np.log(1-val))
return grad,cost
def cost_fun(theta,x,y):
"""
Function to calculate cost
"""
val = np.dot(theta,x)
val = 1/(1+np.exp(-val))
cost = -np.sum(y*np.log(val) + (1-y)*np.log(1-val))
return cost
def grad_fun(theta,x,y):
"""
Function to calculate gradient
"""
val = np.dot(theta,x)
val = 1/(1+np.exp(-val))
grad = np.transpose(np.dot(x,np.transpose(val - y)))
return grad
def safe_log(x):
"""
Function to calculate safe_log i.e. replace nan/inf with -1e+4
"""
l = np.log(x)
l[np.logical_or(np.isnan(l),np.isinf(l)) ] = -1e+4
return l
def safe_cost_fun(theta,x,y):
"""
Function to calculate cost using safe_log
"""
val = np.dot(theta,x)
val = 1/(1+np.exp(-val))
cost = -np.sum(y*safe_log(val) + (1-y)*safe_log(1-val))
return cost
def accuracy(theta,x,y):
"""
Function to calculate accuracy of the logistic regression model
"""
val = np.dot(theta,x)
val = 1/(1+np.exp(-val))
correct = np.sum(np.equal(y, val>0.5))
return correct/y.size
data = pd.read_csv("mnist.csv") #specify the path to csv file of MNIST database
data = np.array(data)
data = np.insert(data,1,1,axis=1)
np.random.shuffle(data)
train = data[0:30000]
test = data[30000:]
#taking data rows with label digit = 0 or label digit = 1
train_data = train[np.logical_or(train[:,0] == 0, train[:,0] == 1), 1:]
train_label = train[np.logical_or(train[:,0] == 0, train[:,0] == 1), 0]
test_data = test[np.logical_or(test[:,0] == 0, test[:,0] == 1), 1:]
test_label = test[np.logical_or(test[:,0] == 0, test[:,0] == 1), 0]
#normalizing database
train_data[train_data>0] = 1
test_data[test_data>0] = 1
train_data = np.transpose(train_data)
test_data = np.transpose(test_data)
j_hist = []
t0 = time.time()
res = scipy.optimize.minimize(
fun=cost_fun,
x0=np.random.rand(train_data.shape[0])*0.001,
args=(train_data, train_label),
method='L-BFGS-B',
jac=grad_fun,
options={'maxiter': 100, 'disp': True},
callback=lambda x: j_hist.append(cost_fun(x, train_data, train_label)),
)
t1 = time.time()
optimal_theta = res.x
print ("Optimization using lbfgs took %r seconds" %(t1-t0))
plt.plot(j_hist, marker='o')
plt.xlabel('Iterations')
plt.ylabel('J(theta)')
j_hist = []
t0 = time.time()
res = scipy.optimize.minimize(
fun=safe_cost_fun,
x0=np.random.rand(train_data.shape[0])*0.001,
args=(train_data, train_label),
method='bfgs',
jac=grad_fun,
options={'maxiter': 100, 'disp': True},
callback=lambda x: j_hist.append(safe_cost_fun(x, train_data, train_label)),
)
t1 = time.time()
optimal_theta = res.x
print ("Optimization using bfgs and safe log took %r seconds" %(t1-t0))
plt.plot(j_hist, marker='o')
plt.xlabel('Iterations')
plt.ylabel('J(theta)')
print ("training accuracy = %r" %(accuracy(optimal_theta,train_data,train_label)))
print ("testing accuracy = %r" %(accuracy(optimal_theta,test_data,test_label)))
| Tandon-A/ufldl-python-solutions | Logitsic_Regression.py | Logitsic_Regression.py | py | 4,119 | python | en | code | 2 | github-code | 13 |
37933960492 | import random
import math
def buyTickets(masterList, lottoNums, tixs):
bought = []
for j in range(0, 5):
bought.append(random.randint(0, lottoRange))
bought.sort()
bought.append(random.randint(0, powerRange))
if bought == lottoNums:
print("Winning ticket numbers: " + str(bought))
print("Purchased a total of " + str(tixs) + " tickets!")
exit()
else:
for i in masterList:
if i == bought:
print("Alrady picked those numbers.")
return False
print(bought)
masterList.append(bought)
return True
print("\n================POWER BALL!!!!================")
while True:
lottoNums = []
bought = []
masterList = []
try:
lottoRange = int(input("What is the range of the numbers: "))
powerRange = int(input("What is the range of the power number: "))
if lottoRange < 0 or powerRange < 0:
print("Invalid choice")
else:
odds = math.factorial(lottoRange)
odds = odds / (math.factorial(5) * math.factorial(lottoRange - 5))
odds *= powerRange
print("The odds are 1 to: " + str(odds))
buy = int(input("How many tickets do you want to buy: "))
break
except ValueError:
print("Invaild choice.")
for i in range(0, 5):
lottoNums.append(random.randint(0, lottoRange))
lottoNums.sort()
lottoNums.append(random.randint(0, powerRange))
print("\n==========Tonights winning numbers are============")
print("\t" + str(lottoNums))
input("Press enter to start the purchases")
while True:
count = 0
tickets = 1
while count < buy:
if buyTickets(masterList, lottoNums, tickets):
tickets += 1
else:
count -= 1
count += 1
print("You didnt win.")
buy = int(input("How many more tickets do you want to buy: "))
if buy < 1:
exit()
| RiskyClick/40Challenges | PowerballSimulation.py | PowerballSimulation.py | py | 1,956 | python | en | code | 0 | github-code | 13 |
70662519059 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from settings import settings
class PlotService:
def __init__(self, df: pd.DataFrame):
self.df = df
def confusion_matrix(self, confusion_matrix: list[list[int]], categories: np.array, showfliers = True, filename = "confusion_matrix.png"):
"""Given a matrix and a list o labels, plot the confusion_matrix.
Args:
confusion_matrix (list[list[int]]): Confusion Matrix.
categories (np.array): Columns used in the confusion matrix
showfliers (bool): Whether to show outliers in the boxplot.
filename (str): Name of the file to save the boxplot.
"""
row_sums = confusion_matrix.sum(axis=1)
normalized_confusion_matrix = confusion_matrix / row_sums[:, np.newaxis]
plt.figure(figsize=(8, 6))
sns.heatmap(normalized_confusion_matrix, annot=True, fmt=".4f", cmap="Blues",
xticklabels=categories, yticklabels=categories)
plt.xlabel("Predicted")
plt.ylabel("Real")
plt.tight_layout()
plt.savefig(f"{settings.Config.out_dir}/{filename}")
plt.show()
plt.close()
def roc_curve(self, categories, TVPs, TFPs, filename = "roc_curve.png"):
"""Given a list of categories, true positive rates and false positive rates, plot the ROC curve.
Args:
categories (list): Categories used in the ROC curve.
TVPs (dict(list)): True positive rates.
TFPs (dict(list)): False positive rates.
filename (str): Name of the file to save the ROC curve.
"""
plt.figure(figsize=(8, 6))
for category in categories:
plt.plot(TFPs[category], TVPs[category], label=f"{category}", marker='o', markersize=3, alpha=0.7)
plt.plot([0, 1], [0, 1], color='black', linestyle='--', label="Random Classifier")
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.legend()
plt.tight_layout()
plt.savefig(f"{settings.Config.out_dir}/{filename}")
plt.show()
plt.close()
| lvvittor/ml | tps/tp1/app/services/plot_service.py | plot_service.py | py | 2,195 | python | en | code | 0 | github-code | 13 |
38340516426 | from PyQt5.QtGui import QPainter, QPixmap, QColor
from PyQt5.QtCore import QCoreApplication, QEventLoop
from math import isclose
from geometry import Point, Segment, Vector
from errors import callInfo, callError
EPS = 1e-6
class Cutter:
def __init__(self, scene, painter, img, color):
self.scene = scene
self.painter = painter
self.img = img
self.color = color
def getNormal(self, selector, index):
edgeVec = Vector(
selector[index],
selector[(index + 1) % len(selector)]
)
normal = Vector(1, 0) if not edgeVec.x else Vector(-edgeVec.y / edgeVec.x, 1)
if Vector(
selector[(index + 1) % len(selector)],
selector[(index + 2) % len(selector)]
).scalarProd(normal) < 0:
normal.neg()
return normal
def cutSegment(self, seg, selector):
tIn = 0
tOut = 1
vecDir = Vector(seg.begin, seg.end)
vertNum = len(selector)
for i in range(vertNum):
weight = Vector(selector[i], seg.begin)
normal = self.getNormal(selector, i)
Dsc = vecDir.scalarProd(normal)
Wsc = weight.scalarProd(normal)
if isclose(Dsc, 0, abs_tol=EPS):
if Wsc < 0 and not isclose(Wsc, 0, abs_tol=EPS):
return
else:
continue
t = - Wsc / Dsc
if Dsc > 0:
if t > 1 and not isclose(t, 1, abs_tol=EPS):
return
if t > tIn:
tIn = t
else:
if t < 0 and not isclose(t, 0, abs_tol=EPS):
return
if t < tOut:
tOut = t
if tIn < tOut:
p1 = Point(
round(seg.begin.x + vecDir.x * tIn),
round(seg.begin.y + vecDir.y * tIn)
)
p2 = Point(
round(seg.begin.x + vecDir.x * tOut),
round(seg.begin.y + vecDir.y * tOut)
)
self.painter.drawLine(
p1.x,
p1.y,
p2.x,
p2.y
)
def run(self, segments, selector):
if not selector.isConvex():
callError(
"Невыпуклый отсекатель!",
"Отсекатель не является выпуклым!"
)
return
for seg in segments:
self.cutSegment(seg, selector)
| MyMiDiII/bmstu-cg | lab_08/cut.py | cut.py | py | 2,571 | python | en | code | 0 | github-code | 13 |
36168109999 | from chembl_webresource_client.new_client import new_client
import pandas as pd
import math
from rdkit.Chem import PandasTools
def BioactivityIC50(CHEMBLID):
bioact = bioactivities.filter(target_chembl_id = CHEMBLID) \
.filter(type = 'IC50') \
.only('activity_id', 'assay_description', 'assay_type', \
'molecule_chembl_id','type', 'units', 'relation', 'value', \
'target_chembl_id')
bioact_df = pd.DataFrame.from_records(bioact)
return(bioact_df)
def Compoundlist(record):
#record = bioact_df
cmpd_id_list = list(record['molecule_chembl_id'])
compound_list = compounds.filter(molecule_chembl_id__in = cmpd_id_list) \
.only('molecule_chembl_id','molecule_structures')
compound_df = pd.DataFrame.from_records(compound_list)
compound_df = compound_df.drop_duplicates('molecule_chembl_id', keep = 'first')
for i, cmpd in compound_df.iterrows():
if compound_df.loc[i]['molecule_structures'] != None:
compound_df.loc[i]['molecule_structures'] = cmpd['molecule_structures']['canonical_smiles']
output_df = pd.merge(record[['target_chembl_id','molecule_chembl_id','units','value']], compound_df, on='molecule_chembl_id')
output_df = output_df.rename(columns= {'molecule_structures':'smiles', 'value':'IC50'})
return(output_df)
targets = new_client.target
compounds = new_client.molecule
bioactivities = new_client.activity
#chembl_id = 'CHEMBL2052035' ## Define the input CHEMBL targte ID
listID = ['CHEMBL4222','CHEMBL2052035','CHEMBL4968','CHEMBL3559677','CHEMBL6147','CHEMBL3486','CHEMBL2169724']
#listID = ['CHEMBL4222']
for i in listID:
print(i)
bioact = BioactivityIC50(i)
print(bioact.shape[0])
if bioact.shape[0] > 0:
complist = Compoundlist(bioact)
name = i+".csv"
complist.to_csv("/home/pawan/Downloads/"+name+"")
print(complist)
| Pawansit/LigandAnalysis | Chembl-Target-Info.py | Chembl-Target-Info.py | py | 1,901 | python | en | code | 1 | github-code | 13 |
27449572416 | import aiofiles
__all__ = ['srun','read_text']
def srun(async_func, *args,extra_context_var: dict={} ,show_progress=False, **kwargs):
"""
Run asyncio function in synchronous way
Input:
func (function): function to run
*args: arguments to pass to function
extra_context_var (dict): extra variable to pass to function
show_progress (bool): show progress bar
**kwargs: keyword arguments to pass to function
Output:
result (object): result of function
"""
try:
context_vars = {}
context_vars.update(extra_context_var)
core = async_func(*args, context_vars,**kwargs)
core.send(None)
core.close()
except StopIteration as e:
return e.value
async def read_text(filepath, size: int = None, context_vars: dict = {}) -> str:
"""An asyn function that opens a text file and reads the content.
Parameters
----------
filepath : str
path to the file
size : int
size to read from the beginning of the file, in bytes. If None is given, read the whole
file.
context_vars : dict
a dictionary of context variables within which the function runs. It must include
`context_vars['async']` to tell whether to invoke the function asynchronously or not.
Returns
-------
str
the content read from file
"""
if context_vars["async"]:
async with aiofiles.open(filepath, mode="rt") as f:
return await f.read(size)
else:
with open(filepath, mode="rt") as f:
return f.read(size)
| bigmb/mb_pandas | mb_pandas/src/aio.py | aio.py | py | 1,612 | python | en | code | 0 | github-code | 13 |
37261514299 | import os
import sys
import psycopg2
from aiogram import types
from PIL import Image, ImageFont, ImageDraw, ImageFilter, ImageOps
from bot.dispatcher import bot
def get_main_menu():
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
buttons = ["Моя карта", "Другой игрок"]
keyboard.add(*buttons)
buttons = ["Техническая поддержка"]
keyboard.add(*buttons)
#хотелось бы расширить меню
return keyboard
def get_card_menu():
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
buttons = ["Изменить изображение", "Изменить никнейм"]
keyboard.add(*buttons)
buttons = ["Главное меню"]
keyboard.add(*buttons)
return keyboard
def goto_menu():
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
buttons = ["Главное меню"]
keyboard.add(*buttons)
return keyboard
def profile_circular_process(nickname):
try:
mask = Image.open('/~/BonchMafia/bot/pictures/profile/avatar_mask.png').convert('L')
profile_image = Image.open(f'/~/BonchMafia/bot/pictures/profile/{nickname}_temp.png')
output = ImageOps.fit(profile_image, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output.save(f'/~/BonchMafia/bot/pictures/profile/{nickname}.png')
if os.path.exists(f'/~/BonchMafia/bot/pictures/profile/{nickname}_temp.png'):
os.remove(f'/~/BonchMafia/bot/pictures/profile/{nickname}_temp.png')
else:
print(f"File f'/~/BonchMafia/bot/pictures/profile/{nickname}_temp.png' doesn't exist")
except Exception as e:
print(f'Found an exception at profile_circular_process: {e}')
def get_textbox(draw, msg, font):
_, _, w, h = draw.textbbox((0, 0), msg, font=font)
return w, h
def card_process(nickname, league, don, don_total, mafia, mafia_total, sheriff, sheriff_total, citizen, citizen_total, won, lost, total, mentor):
try:
background = Image.open(f"/~/BonchMafia/bot/pictures/{league}/background.png")
backplate = Image.open(f"/~/BonchMafia/bot/pictures/{league}/backplate.png")
profile_picture = Image.open(f"/~/BonchMafia/bot/pictures/profile/{nickname}.png")
logo = Image.open(f"/~/BonchMafia/bot/pictures/{league}/logo.png")
don_icon = Image.open(f"/~/BonchMafia/bot/pictures/{league}/don.png")
citizen_icon = Image.open(f"/~/BonchMafia/bot/pictures/{league}/citizen.png")
mafia_icon = Image.open(f"/~/BonchMafia/bot/pictures/{league}/mafia.png")
sheriff_icon = Image.open(f"/~/BonchMafia/bot/pictures/{league}/sheriff.png")
total_icon = Image.open(f"/~/BonchMafia/bot/pictures/{league}/total.png")
icon_size = (55, 55)
don_icon = don_icon.resize(icon_size)
citizen_icon = citizen_icon.resize(icon_size)
mafia_icon = mafia_icon.resize(icon_size)
sheriff_icon = sheriff_icon.resize(icon_size)
total_icon = total_icon.resize(icon_size)
stats_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-Bold.otf", 24)
your_league_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-Regular.otf", 36)
league_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-ExtraBold.otf", 36)
nickname_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-SemiBold.otf", 48)
your_mentor_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-Light.otf", 20)
mentor_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-Light.otf", 20)
roles_font = ImageFont.truetype("/~/BonchMafia/bot/pictures/fonts/VelaSans-Light.otf", 14)
card = background.copy()
backplate_size = (360, 360)
backplate = backplate.resize(backplate_size)
card.paste(backplate, (180, 182), backplate)
card.paste(profile_picture, (185, 187), profile_picture)
card.paste(logo, (499, 1032), logo)
card.paste(total_icon, (333, 913), total_icon)
card.paste(sheriff_icon, (483, 699), sheriff_icon)
card.paste(citizen_icon, (182, 699), citizen_icon)
card.paste(don_icon, (483, 795), don_icon)
card.paste(mafia_icon, (182, 795), mafia_icon)
draw = ImageDraw.Draw(im=card)
if league == "calibration":
league_text = "ОПРЕДЕЛЯЕТСЯ"
stats_color = "#BB7F41"
your_league_color = "#BB7F41"
nickname_color = "#BB7F41"
mentor_color = "#BB7F41"
elif league == "bronze":
stats_color = "#E6BC97"
your_league_color = "#E6BC97"
nickname_color = "#E6BC97"
mentor_color = "#E6BC97"
league_text = "БРОНЗА"
elif league == "silver":
league_text = "СЕРЕБРО"
stats_color = "#ECEAE4"
your_league_color = "#ECEAE4"
nickname_color = "#ECEAE4"
mentor_color = "#ECEAE4"
elif league == "gold":
league_text = "ЗОЛОТО"
stats_color = "#EEBB6C"
your_league_color = "#EEBB6C"
nickname_color = "#EEBB6C"
mentor_color = "#EEBB6C"
elif league == "platinum":
league_text = "ПЛАТИНА"
stats_color = "#C5C5C5"
your_league_color = "#C5C5C5"
nickname_color = "#C5C5C5"
mentor_color = "#C5C5C5"
elif league == "ruby":
league_text = "РУБИН"
stats_color = "#FF8585"
your_league_color = "#FF8585"
nickname_color = "#FF8585"
mentor_color = "#FF8585"
elif league == "diamond":
league_text = "АЛМАЗ"
stats_color = "#5ED2F8"
your_league_color = "#5ED2F8"
nickname_color = "#5ED2F8"
mentor_color = "#5ED2F8"
else:
league_text = "UNKNOWN"
stats_color = "#f"
your_league_color = "#f"
nickname_color = "#f"
mentor_color = "#f"
print(f"Found wrong league at card processing: {league}")
w, h = get_textbox(draw, nickname, nickname_font)
draw.text(((720-w)/2, 563), nickname, nickname_color, nickname_font)
draw.text((423, 699), "ШЕРИФ", stats_color, roles_font, align="right")
draw.text((430, 718), f"{sheriff}/{sheriff_total}", stats_color, stats_font, align="left")
draw.text((247, 699), "МИРНЫЙ", stats_color, roles_font, align="left")
draw.text((247, 718), f"{citizen}/{citizen_total}", stats_color, stats_font, align="right")
draw.text((443, 795), "ДОН", stats_color, roles_font, align="right")
draw.text((430, 814), f"{don}/{don_total}", stats_color, stats_font, align="left")
draw.text((247, 795), "МАФИЯ", stats_color, roles_font, align="left")
draw.text((247, 814), f"{mafia}/{mafia_total}", stats_color, stats_font, align="right")
draw.text((314, 862), "ВСЕГО ПОБЕД", stats_color, roles_font)
w, h = get_textbox(draw, f"{won}/{total}", stats_font)
draw.text(((720-w)/2, 879), f"{won}/{total}", stats_color, stats_font, align="center")
w, h = get_textbox(draw, "ТВОЙ РАНГ:", your_league_font)
draw.text(((720-w)/2, 62), "ТВОЙ РАНГ:", your_league_color, your_league_font)
w, h = get_textbox(draw, league_text, league_font)
draw.text(((720-w)/2, 110), league_text, your_league_color, league_font)
if (mentor!='-') and (mentor!='blank'):
w, h = get_textbox(draw, "НАСТАВНИК:", mentor_font)
draw.text(((728-w)/2, 624), "НАСТАВНИК:", mentor_color, mentor_font)
w, h = get_textbox(draw, mentor, mentor_font)
draw.text(((723-w)/2, 647), mentor, mentor_color, mentor_font)
card.save(f"/~/BonchMafia/bot/pictures/cards/{nickname}.png")
return 0
except Exception as e:
print(f'Found an exception at controllers.card_process: {e}')
return 1
return
def get_league(won, total):
if total < 10:
winrate = -1
elif 10 <= total <= 48:
winrate = round(won/48)*100
else:
winrate = round(won/total)*100
if winrate == -1:
league = "calibration"
elif 0 <= winrate <= 16:
league = "bronze"
elif 17 <= winrate <= 26:
league = "silver"
elif 27 <= winrate <= 37:
league = "gold"
elif 38 <= winrate <= 48:
league = "platinum"
elif 49 <= winrate <= 59:
league = "ruby"
elif winrate >= 60:
league = "diamond"
return league
def get_admin_menu():
keyboard = types.InlineKeyboardMarkup(row_width=2)
buttons = [
#types.InlineKeyboardButton(text='Сделать оповещение', callback_data='admin_notify'),
#types.InlineKeyboardButton(text='Забанить', callback_data='admin_ban'),
#types.InlineKeyboardButton(text='Разбанить', callback_data='admin_pardon',),
types.InlineKeyboardButton(text='Назначить наставника', callback_data='admin_mentor'),
types.InlineKeyboardButton(text='Внести протокол игры', callback_data='admin_game')
]
keyboard.add(*buttons)
return keyboard
async def get_username(user_id):
try:
chat = await bot.get_chat(user_id)
username = chat.username
return username
except Exception as e:
print("Error while getting username:", e)
return "404n0tF0uNd" | hiimluck3r/BonchMafia | bot/controllers.py | controllers.py | py | 9,831 | python | en | code | 0 | github-code | 13 |
38711443021 | class People:
def __init__(self,name,age):
self.name=name
self.age=age
def full_info(self):
full_information=f"Name-{self.name} age -{self.age}"
print(full_information)
name_second=input("Write name")
age_second=int(input("Write age"))
frolova_nata=People(name_second,age_second)
frolova_nata.full_info()
| VigularIgnat/python | mygr/03.10/input_class.py | input_class.py | py | 355 | python | en | code | 0 | github-code | 13 |
30793992926 | # Debugger module by Ky Eltis.
class DebugStream():
"""Houses debug data for one stream/table."""
def __init__(self, header, row1) -> None:
"""Init."""
self.header: list[str] = header
self.rows: list[list[str]] = [row1]
self.lens: list[list[int]] = [[len(str(title)) for title in header],
[len(str(item)) for item in row1]]
def add_row(self, row: list[str]) -> None:
"""Gathers debug data to be passed onto debug_table."""
self.rows.append(row)
self.lens.append([len(str(var)) for var in row])
def merge_data(self, var_names, row) -> None:
"""Merge incompatible debug calls on the same stream."""
pass
class Debugger():
"""Debug Class."""
def __init__(self):
"""Init."""
self.streams: dict[str, DebugStream] = {}
def debug(self, locals: dict, var_string: str,
stream: str = 'Stream 1') -> None:
"""Gathers debug data to be passed onto debug_table.
Example use: debug(locals(), "[var1,var2]")
"""
vars = {var.strip(): locals[var.strip()]
for var in var_string.strip('][').split(',')}
var_names = [var for var in vars]
try:
mystream = self.streams[stream]
if all([var in mystream.header for var in var_names]):
# All variables already accounted for in header
mystream.add_row([locals[var] for var in vars])
else:
# Merge data streams
mystream.merge_data(var_names, vars)
except KeyError:
# New data stream
row1 = [locals[var] for var in vars]
self.streams[stream] = DebugStream(var_names, row1)
def table(self, stream: str = 'Stream 1', max_size: int = 15,
min_size: int = 8, precision: int = -1,
quiet: bool = False) -> list:
"""Tabulates debug data."""
# lens = [max(len(str(locals[var])), debug.lens[i], 6)
# for i, var in enumerate(vars)]
try:
mystream = self.streams[stream]
header = mystream.header
rows = mystream.rows
lens = [max(i) for i in zip(*mystream.lens)]
col_sizes = [max(min_size, min(i, max_size)) for i in lens]
sep = ["-"*i for i in col_sizes]
table: list[list[str]] = []
warning = False
for i, row_items in enumerate([header, sep, *rows]):
row: list[str] = []
for j, item in enumerate(row_items):
size = col_sizes[j]
if i == 0:
row.append(("{:^%i}" % (size)).format(item))
else:
try:
_ = int(item)
formatter = "{:> %i.%ig}"
prec = size-3 if precision == -1 else precision
except (ValueError, TypeError):
item = str(item)
if len(item) > size and not warning:
print("---DEBUGGER--- \tWARNING! \t" +
f"Item truncated for '{stream}'")
warning = True
formatter = "{:<%i.%i}"
prec = size if precision == -1 else precision
finally:
row.append((formatter % (size, prec)).format(item))
table.append(row)
if not quiet:
print(f"---DEBUGGER--- \tTABLE {stream} BEGIN")
[print(' \t'.join(row)) for row in table]
print(f"---DEBUGGER--- \tTABLE {stream} END")
return table
except KeyError:
print(f"---DEBUGGER--- ERROR! \tNo debug set for '{stream}'")
if stream == "Stream 1":
print("Did you set the table stream for d.table()?")
return []
| Skylite73/Python-Debugging-Modules | Modules/debugger.py | debugger.py | py | 4,077 | python | en | code | 0 | github-code | 13 |
40997458925 | # # #
# make the aerosol time series plot using xarray. Way shorter and faster. Should re-write it all at some point.
# July 2018
# # #
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import xarray as xr
if __name__ == "__main__":
datafile = xr.open_mfdataset('/home/kimberlee/OsirisData/Level2/pre-release/aerosol/*.nc').load()
datafile = datafile.swap_dims({'profile_id': 'time'}, inplace=True)
datafile = datafile.sel(time=slice('20020101', '20151231'))
tropics = datafile.volume_extinction_coefficient_in_air_due_to_ambient_aerosol_particles.where(
(datafile.latitude > -20) & (datafile.latitude < 20))
tropics = tropics.resample(time='D').mean('time') # daily mean
tropics = tropics.where(tropics > 0, drop=True) # shouldn't be negative extinctions
# remove values outside 3 standard deviations from the mean
means = tropics.mean('time', skipna=True) # mean at each altitude
stdevs = tropics.std('time', skipna=True) # std at each altitude
high = np.where(tropics > means + (3 * stdevs))
low = np.where(tropics < means - (3 * stdevs))
tropics.values[high] = np.nan
tropics.values[low] = np.nan
# Calculate anomaly
means = tropics.mean('time', skipna=True)
anomalies = tropics - means
anomalies = 100 * anomalies / means
# Interpolate missing values
anomalies = anomalies.interpolate_na(dim='time', method='linear')
# Smooth
anomalies = anomalies.rolling(time=6, center=True).mean()
sns.set(context="talk", style="white", rc={'font.family': [u'serif']})
fig, ax = plt.subplots(figsize=(8, 4))
fax = anomalies.plot.contourf(x='time', y='altitude', robust=True, levels=np.arange(-100, 100, 2),
cmap="seismic", extend='both', add_colorbar=0)
plt.ylim([19.5, 34.5])
plt.ylabel('Altitude [km]')
plt.xlabel('Year')
locs, labels = plt.xticks()
plt.setp(labels, rotation=0, horizontalalignment='center')
sns.set(context="talk", style="white", rc={'font.family': [u'serif']})
cb = fig.colorbar(fax, orientation='horizontal', fraction=0.2, aspect=50, pad=0.2)
cb.set_label("Anomaly [%]")
plt.tight_layout()
plt.savefig("/home/kimberlee/Masters/Thesis/Figures/aerosolsmooth.png", format='png', dpi=150)
plt.show() | KimDube/Masters-O3-Aerosol-Temp | AltTimeSeries_xarray_version.py | AltTimeSeries_xarray_version.py | py | 2,327 | python | en | code | 0 | github-code | 13 |
30928221425 | import io
import torch
from .hdfs_io import hopen
def load(filepath: str, **kwargs):
""" load model """
if not filepath.startswith("hdfs://"):
return torch.load(filepath, **kwargs)
with hopen(filepath, "rb") as reader:
accessor = io.BytesIO(reader.read())
state_dict = torch.load(accessor, **kwargs)
del accessor
return state_dict
def save(obj, filepath: str, **kwargs):
""" save model """
if filepath.startswith("hdfs://"):
with hopen(filepath, "wb") as writer:
torch.save(obj, writer, **kwargs)
else:
torch.save(obj, filepath, **kwargs)
| zengyan-97/X-VLM | utils/torch_io.py | torch_io.py | py | 636 | python | en | code | 411 | github-code | 13 |
19093079343 | # coding=utf-8
"""Decorators for ``cmd2`` commands"""
import argparse
import types
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from . import constants
from .exceptions import Cmd2ArgparseError
from .parsing import Statement
if TYPE_CHECKING: # pragma: no cover
import cmd2
def with_category(category: str) -> Callable:
"""A decorator to apply a category to a ``do_*`` command method.
:param category: the name of the category in which this command should
be grouped when displaying the list of commands.
:Example:
>>> class MyApp(cmd2.Cmd):
>>> @cmd2.with_category('Text Functions')
>>> def do_echo(self, args)
>>> self.poutput(args)
For an alternative approach to categorizing commands using a function, see
:func:`~cmd2.utils.categorize`
"""
def cat_decorator(func):
from .utils import categorize
categorize(func, category)
return func
return cat_decorator
##########################
# The _parse_positionals and _swap_args decorators allow for additional positional args to be preserved
# in cmd2 command functions/callables. As long as the 2-ple of arguments we expect to be there can be
# found we can swap out the statement with each decorator's specific parameters
##########################
def _parse_positionals(args: Tuple) -> Tuple[Union['cmd2.Cmd', 'cmd2.CommandSet'], Union[Statement, str]]:
"""
Helper function for cmd2 decorators to inspect the positional arguments until the cmd2.Cmd argument is found
Assumes that we will find cmd2.Cmd followed by the command statement object or string.
:arg args: The positional arguments to inspect
:return: The cmd2.Cmd reference and the command line statement
"""
for pos, arg in enumerate(args):
from cmd2 import Cmd, CommandSet
if (isinstance(arg, Cmd) or isinstance(arg, CommandSet)) and len(args) > pos:
if isinstance(arg, CommandSet):
arg = arg._cmd
next_arg = args[pos + 1]
if isinstance(next_arg, (Statement, str)):
return arg, args[pos + 1]
# This shouldn't happen unless we forget to pass statement in `Cmd.onecmd` or
# somehow call the unbound class method.
raise TypeError('Expected arguments: cmd: cmd2.Cmd, statement: Union[Statement, str] Not found') # pragma: no cover
def _arg_swap(args: Union[Tuple[Any], List[Any]], search_arg: Any, *replace_arg: Any) -> List[Any]:
"""
Helper function for cmd2 decorators to swap the Statement parameter with one or more decorator-specific parameters
:param args: The original positional arguments
:param search_arg: The argument to search for (usually the Statement)
:param replace_arg: The arguments to substitute in
:return: The new set of arguments to pass to the command function
"""
index = args.index(search_arg)
args_list = list(args)
args_list[index:index + 1] = replace_arg
return args_list
def with_argument_list(*args: List[Callable], preserve_quotes: bool = False) -> Callable[[List], Optional[bool]]:
"""
A decorator to alter the arguments passed to a ``do_*`` method. Default
passes a string of whatever the user typed. With this decorator, the
decorated method will receive a list of arguments parsed from user input.
:param args: Single-element positional argument list containing ``do_*`` method
this decorator is wrapping
:param preserve_quotes: if ``True``, then argument quotes will not be stripped
:return: function that gets passed a list of argument strings
:Example:
>>> class MyApp(cmd2.Cmd):
>>> @cmd2.with_argument_list
>>> def do_echo(self, arglist):
>>> self.poutput(' '.join(arglist)
"""
import functools
def arg_decorator(func: Callable):
@functools.wraps(func)
def cmd_wrapper(*args, **kwargs: Dict[str, Any]) -> Optional[bool]:
"""
Command function wrapper which translates command line into an argument list and calls actual command function
:param args: All positional arguments to this function. We're expecting there to be:
cmd2_app, statement: Union[Statement, str]
contiguously somewhere in the list
:param kwargs: any keyword arguments being passed to command function
:return: return value of command function
"""
cmd2_app, statement = _parse_positionals(args)
_, parsed_arglist = cmd2_app.statement_parser.get_command_arg_list(command_name,
statement,
preserve_quotes)
args_list = _arg_swap(args, statement, parsed_arglist)
return func(*args_list, **kwargs)
command_name = func.__name__[len(constants.COMMAND_FUNC_PREFIX):]
cmd_wrapper.__doc__ = func.__doc__
return cmd_wrapper
if len(args) == 1 and callable(args[0]):
# noinspection PyTypeChecker
return arg_decorator(args[0])
else:
# noinspection PyTypeChecker
return arg_decorator
# noinspection PyProtectedMember
def _set_parser_prog(parser: argparse.ArgumentParser, prog: str):
"""
Recursively set prog attribute of a parser and all of its subparsers so that the root command
is a command name and not sys.argv[0].
:param parser: the parser being edited
:param prog: new value for the parser's prog attribute
"""
# Set the prog value for this parser
parser.prog = prog
# Set the prog value for the parser's subcommands
for action in parser._actions:
if isinstance(action, argparse._SubParsersAction):
# Set the _SubParsersAction's _prog_prefix value. That way if its add_parser() method is called later,
# the correct prog value will be set on the parser being added.
action._prog_prefix = parser.prog
# The keys of action.choices are subcommand names as well as subcommand aliases. The aliases point to the
# same parser as the actual subcommand. We want to avoid placing an alias into a parser's prog value.
# Unfortunately there is nothing about an action.choices entry which tells us it's an alias. In most cases
# we can filter out the aliases by checking the contents of action._choices_actions. This list only contains
# help information and names for the subcommands and not aliases. However, subcommands without help text
# won't show up in that list. Since dictionaries are ordered in Python 3.6 and above and argparse inserts the
# subcommand name into choices dictionary before aliases, we should be OK assuming the first time we see a
# parser, the dictionary key is a subcommand and not alias.
processed_parsers = []
# Set the prog value for each subcommand's parser
for subcmd_name, subcmd_parser in action.choices.items():
# Check if we've already edited this parser
if subcmd_parser in processed_parsers:
continue
subcmd_prog = parser.prog + ' ' + subcmd_name
_set_parser_prog(subcmd_parser, subcmd_prog)
processed_parsers.append(subcmd_parser)
# We can break since argparse only allows 1 group of subcommands per level
break
def with_argparser_and_unknown_args(parser: argparse.ArgumentParser, *,
ns_provider: Optional[Callable[..., argparse.Namespace]] = None,
preserve_quotes: bool = False) -> \
Callable[[argparse.Namespace, List], Optional[bool]]:
"""
Deprecated decorator. Use `with_argparser(parser, with_unknown_args=True)` instead.
A decorator to alter a cmd2 method to populate its ``args`` argument by parsing
arguments with the given instance of argparse.ArgumentParser, but also returning
unknown args as a list.
:param parser: unique instance of ArgumentParser
:param ns_provider: An optional function that accepts a cmd2.Cmd object as an argument
and returns an argparse.Namespace. This is useful if the Namespace
needs to be prepopulated with state data that affects parsing.
:param preserve_quotes: if ``True``, then arguments passed to argparse maintain their quotes
:return: function that gets passed argparse-parsed args in a ``Namespace`` and a list
of unknown argument strings. A member called ``__statement__`` is added to the
``Namespace`` to provide command functions access to the :class:`cmd2.Statement`
object. This can be useful if the command function needs to know the command line.
:Example:
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay')
>>> parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE')
>>> parser.add_argument('-r', '--repeat', type=int, help='output [n] times')
>>>
>>> class MyApp(cmd2.Cmd):
>>> @cmd2.with_argparser(parser, with_unknown_args=True)
>>> def do_argprint(self, args, unknown):
>>> "Print the options and argument list this options command was called with."
>>> self.poutput('args: {!r}'.format(args))
>>> self.poutput('unknowns: {}'.format(unknown))
"""
import warnings
warnings.warn('This decorator will be deprecated. Use `with_argparser(parser, with_unknown_args=True)`.',
PendingDeprecationWarning, stacklevel=2)
return with_argparser(parser, ns_provider=ns_provider, preserve_quotes=preserve_quotes, with_unknown_args=True)
def with_argparser(parser: argparse.ArgumentParser, *,
ns_provider: Optional[Callable[..., argparse.Namespace]] = None,
preserve_quotes: bool = False,
with_unknown_args: bool = False) -> Callable[[argparse.Namespace], Optional[bool]]:
"""A decorator to alter a cmd2 method to populate its ``args`` argument by parsing arguments
with the given instance of argparse.ArgumentParser.
:param parser: unique instance of ArgumentParser
:param ns_provider: An optional function that accepts a cmd2.Cmd object as an argument and returns an
argparse.Namespace. This is useful if the Namespace needs to be prepopulated with
state data that affects parsing.
:param preserve_quotes: if True, then arguments passed to argparse maintain their quotes
:param with_unknown_args: if true, then capture unknown args
:return: function that gets passed the argparse-parsed args in a Namespace
A member called __statement__ is added to the Namespace to provide command functions access to the
Statement object. This can be useful if the command function needs to know the command line.
:Example:
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay')
>>> parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE')
>>> parser.add_argument('-r', '--repeat', type=int, help='output [n] times')
>>> parser.add_argument('words', nargs='+', help='words to print')
>>>
>>> class MyApp(cmd2.Cmd):
>>> @cmd2.with_argparser(parser, preserve_quotes=True)
>>> def do_argprint(self, args):
>>> "Print the options and argument list this options command was called with."
>>> self.poutput('args: {!r}'.format(args))
:Example with unknown args:
>>> parser = argparse.ArgumentParser()
>>> parser.add_argument('-p', '--piglatin', action='store_true', help='atinLay')
>>> parser.add_argument('-s', '--shout', action='store_true', help='N00B EMULATION MODE')
>>> parser.add_argument('-r', '--repeat', type=int, help='output [n] times')
>>>
>>> class MyApp(cmd2.Cmd):
>>> @cmd2.with_argparser(parser, with_unknown_args=True)
>>> def do_argprint(self, args, unknown):
>>> "Print the options and argument list this options command was called with."
>>> self.poutput('args: {!r}'.format(args))
>>> self.poutput('unknowns: {}'.format(unknown))
"""
import functools
def arg_decorator(func: Callable):
@functools.wraps(func)
def cmd_wrapper(*args: Any, **kwargs: Dict[str, Any]) -> Optional[bool]:
"""
Command function wrapper which translates command line into argparse Namespace and calls actual
command function
:param args: All positional arguments to this function. We're expecting there to be:
cmd2_app, statement: Union[Statement, str]
contiguously somewhere in the list
:param kwargs: any keyword arguments being passed to command function
:return: return value of command function
:raises: Cmd2ArgparseError if argparse has error parsing command line
"""
cmd2_app, statement = _parse_positionals(args)
statement, parsed_arglist = cmd2_app.statement_parser.get_command_arg_list(command_name,
statement,
preserve_quotes)
if ns_provider is None:
namespace = None
else:
# The namespace provider may or may not be defined in the same class as the command. Since provider
# functions are registered with the command argparser before anything is instantiated, we
# need to find an instance at runtime that matches the types during declaration
provider_self = cmd2_app._resolve_func_self(ns_provider, args[0])
namespace = ns_provider(provider_self if not None else cmd2_app)
try:
if with_unknown_args:
new_args = parser.parse_known_args(parsed_arglist, namespace)
else:
new_args = (parser.parse_args(parsed_arglist, namespace), )
ns = new_args[0]
except SystemExit:
raise Cmd2ArgparseError
else:
setattr(ns, '__statement__', statement)
def get_handler(ns_self: argparse.Namespace) -> Optional[Callable]:
return getattr(ns_self, constants.SUBCMD_HANDLER, None)
setattr(ns, 'get_handler', types.MethodType(get_handler, ns))
args_list = _arg_swap(args, statement, *new_args)
return func(*args_list, **kwargs)
# argparser defaults the program name to sys.argv[0], but we want it to be the name of our command
command_name = func.__name__[len(constants.COMMAND_FUNC_PREFIX):]
_set_parser_prog(parser, command_name)
# If the description has not been set, then use the method docstring if one exists
if parser.description is None and func.__doc__:
parser.description = func.__doc__
# Set the command's help text as argparser.description (which can be None)
cmd_wrapper.__doc__ = parser.description
# Set some custom attributes for this command
setattr(cmd_wrapper, constants.CMD_ATTR_ARGPARSER, parser)
setattr(cmd_wrapper, constants.CMD_ATTR_PRESERVE_QUOTES, preserve_quotes)
return cmd_wrapper
# noinspection PyTypeChecker
return arg_decorator
def as_subcommand_to(command: str,
subcommand: str,
parser: argparse.ArgumentParser,
*,
help: Optional[str] = None,
aliases: Iterable[str] = None) -> Callable[[argparse.Namespace], Optional[bool]]:
"""
Tag this method as a subcommand to an existing argparse decorated command.
:param command: Command Name. Space-delimited subcommands may optionally be specified
:param subcommand: Subcommand name
:param parser: argparse Parser for this subcommand
:param help: Help message for this subcommand which displays in the list of subcommands of the command we are adding to.
This is passed as the help argument to ArgumentParser.add_subparser().
:param aliases: Alternative names for this subcommand. This is passed as the alias argument to
ArgumentParser.add_subparser().
:return: Wrapper function that can receive an argparse.Namespace
"""
def arg_decorator(func: Callable):
_set_parser_prog(parser, command + ' ' + subcommand)
# If the description has not been set, then use the method docstring if one exists
if parser.description is None and func.__doc__:
parser.description = func.__doc__
# Set some custom attributes for this command
setattr(func, constants.SUBCMD_ATTR_COMMAND, command)
setattr(func, constants.CMD_ATTR_ARGPARSER, parser)
setattr(func, constants.SUBCMD_ATTR_NAME, subcommand)
# Keyword arguments for ArgumentParser.add_subparser()
add_parser_kwargs = dict()
if help is not None:
add_parser_kwargs['help'] = help
if aliases is not None:
add_parser_kwargs['aliases'] = aliases[:]
setattr(func, constants.SUBCMD_ATTR_ADD_PARSER_KWARGS, add_parser_kwargs)
return func
# noinspection PyTypeChecker
return arg_decorator
| etherkit/OpenBeacon2 | macos/venv/lib/python3.8/site-packages/cmd2/decorators.py | decorators.py | py | 17,978 | python | en | code | 13 | github-code | 13 |
13726381394 | import numpy as np
import pickle
import pandas as pd
from ruffus import *
from tqdm import tqdm
from rdkit import Chem
import pickle
import os
from glob import glob
import time
import util
import nets
import relnets
from rdkit.Chem import AllChem
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import rdMolDescriptors as rdMD
import torch
from torch import nn
from tensorboardX import SummaryWriter
from tqdm import tqdm
from netdataio import *
import netdataio
import itertools
DATASET_DIR = "graph_conv_many_nuc_pipeline.datasets"
td = lambda x : os.path.join(DATASET_DIR, x)
CV_SETS = [np.arange(4) + i*4 for i in range(5)]
default_spect_fname = 'dataset.named/spectra.nmrshiftdb_{}.feather'
solv_spect_fname = 'dataset.named/spectra.nmrshiftdb_{}_{}.feather'
NMRSHIFTDB_SUBSET_FILENAME = 'predict.atomic/molconf.nmrshiftdb_hconfspcl_nmrshiftdb.subsets.pickle'
spectra_sets = {#'13C_1H' : [('13C', default_spect_fname.format('13C')),
# ('1H', default_spect_fname.format('1H'))],
'13C' : [('13C', default_spect_fname.format('13C'))],
'1H' : [('1H', default_spect_fname.format('1H'))],
'13C_cdcl3' : [('13C', solv_spect_fname.format('13C', 'cdcl3'))],
'13C_13C_cdcl3' : [ ('13Call', default_spect_fname.format('13C')),
('13C', solv_spect_fname.format('13C', 'cdcl3'))],
'1H_cdcl3' : [('1H', solv_spect_fname.format('1H', 'cdcl3'))],
# ### All Hs as bonded
# '13C_1HasBonded' : [('13C', default_spect_fname.format('13C')),
# ('1H', default_spect_fname.format('hconfspcl_1H_as_bonded'))],
# ## Hs bonded to C, as H
# '13C_1Hcbonded' : [('13C', default_spect_fname.format('13C')),
# ('1H', default_spect_fname.format('hconfspcl_1H_Cbonded'))],
# ### only Hs bonded to C
# '13C_1HcbondedasBonded' : [('13C', default_spect_fname.format('13C')),
# ('1H', default_spect_fname.format('hconfspcl_1HCbonded_as_bonded'))]
}
def dataset_params():
for CV_I in [0, 1, 2, 3, 4]: # range(len(CV_SETS)):
for kekulize_prop in ['aromatic']: # ['kekulize', 'aromatic']:
for dataset_name in ['nmrshiftdb_hconfspcl_nmrshiftdb']:
for spectra_set_name, spectra_config in spectra_sets.items():
for MAX_N in [64]: # [32, 64]:
outfile = 'graph_conv_many_nuc_pipeline.data.{}.{}.{}.{}.{}.mol_dict.pickle'.format(spectra_set_name, dataset_name,
kekulize_prop, MAX_N, CV_I)
mol_filename = f'dataset.named/molconf.{dataset_name}.pickle'
yield ([sc[1] for sc in spectra_config] + [NMRSHIFTDB_SUBSET_FILENAME] + [mol_filename],
td(outfile), CV_I,
kekulize_prop, spectra_set_name, spectra_config,
MAX_N)
qm9_spectra_sets = {'13C_1H' : [('13C', "dataset.named/spectra.qm9.cheshire_g09_01_nmr.13C.feather"),
('1H', "dataset.named/spectra.qm9.cheshire_g09_01_nmr.1H.feather")],
'13C' : [('13C', "dataset.named/spectra.qm9.cheshire_g09_01_nmr.13C.feather"),],
'1H' : [('1H', "dataset.named/spectra.qm9.cheshire_g09_01_nmr.1H.feather")],
}
QM9_SUBSET_FILENAME = 'dataset.named/qm9.subsets.pickle'
for CV_I in [0]: # range(len(CV_SETS)):
for kekulize_prop in ['kekulize', 'aromatic']:
for dataset_name in ['qm9']:
for spectra_set_name, spectra_config in qm9_spectra_sets.items():
for MAX_N in [32]:
outfile = 'graph_conv_many_nuc_pipeline.data.{}.{}.{}.{}.{}.mol_dict.pickle'.format(spectra_set_name, dataset_name,
kekulize_prop, MAX_N, CV_I)
mol_filename = f'dataset.named/molconf.{dataset_name}.pickle'
yield ([sc[1] for sc in spectra_config] + [QM9_SUBSET_FILENAME] + [mol_filename],
td(outfile), CV_I,
kekulize_prop, spectra_set_name, spectra_config,
MAX_N)
@mkdir(DATASET_DIR)
@files(dataset_params)
def create_dataset(infiles, outfile, cv_i, kekulize_prop,
spectra_set_name, spectra_config, MAX_N):
mol_filename = infiles[-1]
mol_subset_filename = infiles[-2]
cv_mol_subset = CV_SETS[cv_i]
mol_subsets = pickle.load(open(mol_subset_filename, 'rb'))['splits_df']
tgt_nucs = [sc[0] for sc in spectra_config]
spectra_dfs = []
for nuc, spectra_filename in spectra_config:
df = pd.read_feather(spectra_filename)
df = df.rename(columns={'id' : 'peak_id'})
df['nucleus'] = nuc
spectra_dfs.append(df)
spectra_df = pd.concat(spectra_dfs)
molecules_df = pickle.load(open(mol_filename, 'rb'))['df']
molecules_df['atom_n'] = molecules_df.rdmol.apply(lambda x: x.GetNumAtoms())
molecules_df = molecules_df[molecules_df.atom_n <= MAX_N]
def s_dict(r):
return dict(zip(r.atom_idx, r.value))
spect_dict_df = spectra_df.groupby(['molecule_id', 'spectrum_id', 'nucleus']).apply(s_dict )
data_df = spect_dict_df.reset_index()\
.rename(columns={0 : 'value'})\
.join(molecules_df, on='molecule_id').dropna()
for row_i, row in tqdm(data_df.iterrows(), total=len(data_df)):
mol = row.rdmol
try:
Chem.SanitizeMol(mol, Chem.rdmolops.SanitizeFlags.SANITIZE_ALL,
catchErrors=True)
mol.UpdatePropertyCache()
Chem.SetAromaticity(mol)
if kekulize_prop == 'kekulize':
Chem.rdmolops.Kekulize(mol)
except ValueError:
pass
### Combine and create the cartesian product across different nuclei
### of various spectra
mol_data = []
for mol_id, mol_df in tqdm(data_df.groupby('molecule_id')):
sp = {n : list([(-1, {})]) for n in tgt_nucs}
for row_i, row in mol_df.iterrows():
sp[row.nucleus].append((row.spectrum_id, row.value))
for nuc_lists in itertools.product(*[sp[n] for n in tgt_nucs]):
spectra_ids = [a[0] for a in nuc_lists]
if (np.array(spectra_ids) == -1).all():
continue
values = [a[1] for a in nuc_lists]
mol_data.append({'molecule_id' : mol_id,
'rdmol' : row.rdmol,
'spectra_ids' : spectra_ids,
'value': values})
#mol_data.append({'molecule_id' : mol_id, ''})
data_df = pd.DataFrame(mol_data)
### Train/test split
train_test_split = mol_subsets.subset20_i.isin(cv_mol_subset)
train_mols = mol_subsets[~train_test_split].index.values
test_mols = mol_subsets[train_test_split].index.values
train_df = data_df[data_df.molecule_id.isin(train_mols)]
test_df = data_df[data_df.molecule_id.isin(test_mols)]
pickle.dump({'train_df' : train_df,
'test_df' : test_df,
'MAX_N' : MAX_N,
'spectra_config' : spectra_config,
'tgt_nucs' : tgt_nucs},
open(outfile, 'wb'), -1)
if __name__ == "__main__":
pipeline_run([create_dataset])
| stefhk3/nmrfilter | respredict/create_datasets_many_nuc.py | create_datasets_many_nuc.py | py | 7,919 | python | en | code | 1 | github-code | 13 |
14137405318 | from .loghandler import logger
from .lxdClient import lxd
from .lxdClient import lxdException
from . import overlay
# dispatch based on cli args
def dispatch(cmd, args):
# launch a new container
if cmd == "launch":
# get container name
if args:
containerName = args[0]
else:
import requests
link = "https://frightanic.com/goodies_content/docker-names.php"
logger.debug("fetching docker-like name")
containerName = requests.get(link).text.strip().replace("_", "-")
# create base if it doesn't exist
try:
lxd.containers.get('base')
except lxdException.NotFound:
logger.debug("initializing base environment")
overlay.create_base()
# new container on overlayfs
try:
logger.debug("creating {}".format(containerName))
container = overlay.launch(containerName)
logger.debug("mounting overlayfs")
overlay.mount(container.name)
container.start(wait=True)
logger.debug("{} state {}".format(container.name, container.status))
logger.info("created {}".format(container.name))
except Exception:
container.delete()
raise
# umount overlay and delete container
elif cmd == "delete":
if args:
container = lxd.containers.get(args[0])
else:
raise RuntimeError("which one?")
# test if we need to stop it
if container.status == "Running":
logger.debug("{} state {}".format(container.name, container.status))
raise RuntimeError("{} is running, refusing to delete".format(container.name))
logger.info("note: umount not yet implemented")
container.delete(wait=True)
logger.info("deleted {}".format(container.name))
# catch-all
else:
raise RuntimeError("{}: unknown action".format(cmd))
| Jayfrown/pycor | pycor/dispatcher.py | dispatcher.py | py | 1,986 | python | en | code | 3 | github-code | 13 |
9376065077 | from pathlib import Path
import streamlit as st
from PIL import Image
#Ajustes de rutas
this_dir = Path(__file__).parent if "__file__" in locals() else Path.cwd()
assets_dir = this_dir / "assets"
styles_dir = this_dir / "styles"
css_file = styles_dir / "main.css"
#Ajustes generales. Editar con los datos de tu producto
stripe_chekout_url = "url de stripe"
contact_email = "tu email"
product_name = "nombre del producto"
product_tagline = "Lema del producto"
demo_video_url = "https://www.youtube.com/watch?v=jNQXAC9IVRw"
product_description = """
Esta es la descripción del producto digital que prentendemos vender en nuestra landing page. Vamos a poner a continuación algunos puntos interesantes a saber sobre nuestro producto:
- Punto 1
- Punto 2
- Punto 3
- Punto 4
***esto es un ejemplo de texto en negrita***
"""
#Ajustes visuales de la página. No es necestario editar nada
def local_css(file_name):
with open(file_name) as f:
st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
#Ajustes de la página. No es necestario editar nada
st.set_page_config(
page_title= product_name,
page_icon= ":star:",
layout= "centered",
initial_sidebar_state= "collapsed",
)
local_css(css_file)
#SECCIÓN PRINCIPAL. No es necesario editar nada
st.header(product_name)
st.subheader(product_tagline)
left_column, right_column = st.columns((2,1))
with left_column:
st.text("")
st.write(product_description)
st.markdown(
f'<a href="{stripe_chekout_url}" class="button">👉🏼 Comprar aquí</a>',
unsafe_allow_html=True,
)
with right_column:
product_image = Image.open(assets_dir / "product_image.png")
st.image(product_image, width=450)
#SECCIÓN DE CARACTERÍSTICAS. Editar cabeceras y descripciones de las características de tu producto
st.write("")
st.write("---")
st.subheader(":rocket: Características")
features = {
"feature_1.png": ["cabecera de la característica 1",
"Aquí vendría la descripción de la característica 1",],
"feature_2.png": ["cabecera de la característica 2",
"Aquí vendría la descripción de la característica 2",],
"feature_3.png": ["cabecera de la característica 3",
"Aquí vendría la descripción de la característica 3",],
}
for image, description in features.items():
image = Image.open(assets_dir / image)
st.write("")
left_column, right_column = st.columns(2)
left_column.image(image, use_column_width=True)
right_column.subheader(f"**{description[0]}**")
right_column.write(description[1])
#SECCIÓN DEMO VÍDEO. Editar con el enlace de tu vídeo de demostración. Si no tienes puedes eliminar esta sección
st.write("")
st.write("---")
st.subheader(":tv: Demo video")
st.video(demo_video_url, format="video/mp4", start_time=0)
#SECCIÓN FAQ. Editar con las preguntas frecuentes de tu producto
st.write("")
st.write("---")
st.subheader(":raising_hand: FAQ")
faq = {
"¿Pregunta 1?": "Respuesta 1",
"¿Pregunta 2?": "Respuesta 2",
"¿Pregunta 3?": "Respuesta 3",
"¿Pregunta 4?": "Respuesta 4",
"¿Pregunta 5?": "Respuesta 5",
}
for question, answer in faq.items():
with st.expander(question):
st.write(answer)
#SECCION DE CONTACTO. Aqui no necesitas editar nada pero en ajustes generales debes indicar tu email de contacto
st.write("")
st.write("---")
st.subheader(":mailbox: Tienes alguna duda? Contacta con nosotros")
contact_form = f"""
<form action="https://formsubmit.co/{contact_email}" method="POST">
<input type="hidden" name="_captcha" value="false">
<input type="text" name="name" placeholder = "Tu nombre" required>
<input type="email" name="email" placeholder = "Tu email" required>
<textarea name="message" placeholder = "Tu mensaje aquí" required></textarea>
<button type="submit" class ="button" >Enviar ✉️</button>
</form>
"""
st.markdown(contact_form, unsafe_allow_html=True)
| valantoni/pagina-web-venta-pd | app.py | app.py | py | 3,949 | python | es | code | 0 | github-code | 13 |
39724701567 | #!/usr/bin/env python3
a, b = input().split('+')
_rdecode = dict(zip('MDCLXVI', (1000, 500, 100, 50, 10, 5, 1)))
def decode_roman(roman):
result = 0
for r, r1 in zip(roman, roman[1:]):
rd, rd1 = _rdecode[r], _rdecode[r1]
result += -rd if rd < rd1 else rd
return result + _rdecode[roman[-1]]
anums = [1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1]
rnums = "M CM D CD C XC L XL X IX V IV I".split()
def encode_roman(x):
ret = []
for a,r in zip(anums, rnums):
n,x = divmod(x,a)
ret.append(r*n)
return ''.join(ret)
a = decode_roman(a)
b = decode_roman(b)
print (encode_roman(a + b))
| esix/competitive-programming | e-olymp/0xxx/0007/main.py | main.py | py | 653 | python | en | code | 15 | github-code | 13 |
42591082173 | import sys
from Block import Block
from Physics import Physics
class Player(Block):
player_width = 16
player_height = 32
color = (97, 169, 188)
velocity_x = float(0)
velocity_y = float(0)
gravity_x = float(0)
gravity_y = float(30)
acceleration_x = float(100)
acceleration_y = float(500)
direction = 1
max_velocity_x = 7
max_velocity_y = 30
is_blocked_left = False
is_blocked_right = False
is_blocked_top = False
is_on_ground = False
physic = Physics()
def __init__(self, game, x, y):
self.player_width = 16
self.player_height = 32
super(Player, self).__init__(game, x, y, self.player_width, self.player_height, self.color)
self.x_raw = float(self.x)
self.y_raw = float(self.y)
self.debug_value_1 = ''
self.debug_value_2 = ''
def update(self, delta_time, events):
super(Player, self).update(delta_time, events)
self.move(delta_time=delta_time)
def draw(self, screen):
super(Player, self).draw(screen)
def move(self, x=0, y=0, delta_time=0):
self.apply_gravity(delta_time)
self.jump(delta_time)
self.detect_objective_y()
self.move_y()
self.walk(delta_time)
self.detect_objective_x()
self.move_x()
def apply_gravity(self, delta_time):
if abs(self.velocity_y) < self.max_velocity_y:
self.velocity_y += self.gravity_y * delta_time
def jump(self, delta_time):
if self.game.input.jump and self.is_on_ground:
self.velocity_y = 0
self.velocity_y -= self.acceleration_y * delta_time
self.is_on_ground = False
def detect_objective_y(self):
self.is_on_ground = False
self.is_blocked_top = False
for objective in self.game.map.objectives:
self.physic.detect_collision_y(self, objective)
if self.is_on_ground or self.is_blocked_top:
sys.exit()
def move_y(self):
self.y_raw += self.velocity_y
self.y = round(self.y_raw, 0)
for block in self.game.map.blocks:
self.physic.detect_collision_y(self, block)
self.physic.resolve_collision_y(self, block)
self.y = round(self.y_raw, 0)
def walk(self, delta_time):
if self.game.input.is_exclusive_direction():
if self.game.input.right:
if self.velocity_x < 0 and self.is_on_ground:
self.velocity_x = 0
if self.velocity_x <= self.max_velocity_x:
self.velocity_x += self.acceleration_x * delta_time
if self.velocity_x > self.max_velocity_x:
self.velocity_x = self.max_velocity_x
else:
if self.velocity_x > 0 and self.is_on_ground:
self.velocity_x = 0
if abs(self.velocity_x) <= self.max_velocity_x:
self.velocity_x -= self.acceleration_x * delta_time
if abs(self.velocity_x) > self.max_velocity_x:
self.velocity_x = -self.max_velocity_x
def detect_objective_x(self):
if self.is_on_ground:
self.velocity_x = self.velocity_x * .8
self.is_blocked_left = False
self.is_blocked_right = False
for objective in self.game.map.objectives:
self.physic.detect_collision_x(self, objective)
if self.is_blocked_left or self.is_blocked_right:
sys.exit()
def move_x(self):
self.x_raw += self.velocity_x
self.x = round(self.x_raw, 0)
for block in self.game.map.blocks:
self.physic.detect_collision_y(self, block)
self.physic.resolve_collision_x(self, block)
self.x = round(self.x_raw, 0)
| Axdecces/Bounce | Player.py | Player.py | py | 3,844 | python | en | code | 0 | github-code | 13 |
27169930776 | from benchopt import BaseSolver
from benchopt import safe_import_context
from benchopt.utils.stream_redirection import SuppressStd
with safe_import_context() as import_ctx:
import numpy as np
from scipy.optimize import fmin_l_bfgs_b
class Solver(BaseSolver):
name = "scipy L-BFGS"
install_cmd = "conda"
requirements = ["scipy"]
def set_objective(self, X, y, reg):
self.X, self.y = X, y
self.reg = reg
def run(self, n_iter):
_, n = self.X.shape
x0 = np.zeros((n,))
def func(w):
residual = self.X.dot(w) - self.y
f = 0.5 * residual.dot(residual) + 0.5 * self.reg * w.dot(w)
grad = self.X.T @ residual + self.reg * w
return f, grad
bounds = [(0, np.inf)] * n
out = SuppressStd()
try:
self.w, _, _ = fmin_l_bfgs_b(
func, x0, bounds=bounds, pgtol=0.0, factr=0.0, maxiter=n_iter
)
except BaseException:
print(out.output)
raise
def get_result(self):
return self.w
| agramfort/benchmark_ridge_positive | solvers/lbfgs_scipy.py | lbfgs_scipy.py | py | 1,092 | python | en | code | 0 | github-code | 13 |
15172121998 | # coding=utf-8
import os
from gensim import corpora
from sklearn.externals import joblib
from scipy.sparse import csr_matrix
from setup import *
import re
import json
from src.score import *
def init():
cate_ids = init_setup(config_cat_id)
return cate_ids
def cleanhtml(raw_html):
"""
Clear tag <.*?> in the content of news
"""
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
def get_sparse_representation(doc, vocab_size):
""" Run program classify news """
row = []
col = []
data = []
total_sum = 0.0
for i in range(len(doc)):
(wid, fre) = doc[i]
row.append(0)
col.append(wid)
data.append(fre)
total_sum += fre
for i in range(len(doc)):
data[i] = data[i] / total_sum
vector = csr_matrix((data, (row, col)), shape=(1, vocab_size), dtype=float)
return vector
def get_cat(text, file_model,file_dict):
""" Return the id of news """
classifier = joblib.load(file_model)
dictionary = corpora.Dictionary.load_from_text(file_dict)
doc = dictionary.doc2bow(text.split())
caterogy = []
if (len(doc) > 0):
vecto = get_sparse_representation(doc, len(dictionary.token2id))
caterogy = classifier.predict(vecto)
return caterogy[0]
def get_news_none_cat():
"""
:return: the array of news dont have id of category and update
if it have id of category
"""
cate_ids = init()
data_all = get_content_none_cateId()
result = []
for news in data_all :
if news[u'topic_name'].lower() not in category_id :
result.append(news)
elif news[u'topic_name'].lower() in category_id :
print(news[u'topic_name'].lower(),news['id'] ,cate_ids[news[u'topic_name'].lower()] )
update_cateId(news['id'], cate_ids[news[u'topic_name'].lower()])
return result
def update_category_news(data):
for news in data :
title = news['title']
sapo = news['sapo']
content = cleanhtml(news['content'])
text = title.lower() + " " + sapo.lower() + " " + content.lower()
cate_id = get_cat(text, file_model, file_dict)
update_cateId(news['id'], cate_id)
| loilethanh/news_trending | src/category_classify.py | category_classify.py | py | 2,266 | python | en | code | 0 | github-code | 13 |
30477232073 | N = int(input())
S = input()
a = 0
b = N - 1
ans = ""
while a <= b:
left = False
for i in range(b - a + 1):
if S[a + i] < S[b - i]:
left = True
break
elif S[a + i] > S[b - i]:
left = False
break
if left:
ans += S[a]
a += 1
else:
ans += S[b]
b -= 1
print(ans) | ShimizuKo/study | 蟻本/2章/p45.py | p45.py | py | 319 | python | en | code | 1 | github-code | 13 |
27074968495 | import requests
import os
import time
import re
import json
from openpyxl import Workbook
from openpyxl import load_workbook
from selenium import webdriver
from selenium.webdriver import ChromeOptions
from time import sleep
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from threading import Timer
import threading
capa = DesiredCapabilities.CHROME
capa["pageLoadStrategy"] = "eager"
option=ChromeOptions()
# option.add_argument('--headless')
option.add_argument('--no-sandbox')
option.add_argument('log-level=3') #INFO = 0 WARNING = 1 LOG_ERROR = 2 LOG_FATAL = 3 default is 0
option.add_experimental_option('excludeSwitches',['enable-automation'])
# option.add_argument('--blink-settings=imagesEnabled=false')
# option.add_experimental_option("debuggerAddress", "127.0.0.1:9527")
import multiprocessing
from multiprocessing import Process,Lock
# import multiprocessing_win
headers={
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'
}
baseurl='https://api.plantnet.org/v1/projects/'
countrycodels=['the-plant-list','useful','weeds','invasion','prota','prosea',
'weurope','canada','namerica','central-america','antilles','colombia','guyane','brazil','lapaz','martinique',
'afn','aft','reunion','maurice','comores','medor','malaysia','japan',
'nepal','endemia','hawai','polynesiefr']
countryls=['WorldFlora','UsefulPlants','Weeds','InvasivePlants','UsefulPlantsTropicalAfrica','UsefulPlantsAsia',
'WesternEurope','Canada','USA','CentralAmerica','Caribbean','Colombia','Amazonia','Brazil','TropicalAndes','Martinique',
'NorthAfrica','TropicalAfrica','Reunion','Mauritius','ComoroIslands','EasternMediterranean','Malaysia','Japan',
'Nepal','NewCaledonia','Hawaii','FrenchPolynesia']
def get(i):
if not os.path.exists(countryls[i]+'.xls'):
wb=Workbook()
ws=wb.active
ws.append(['Species','common name','photo','observation','Family'])
wb.save(countryls[i]+'.xlsx')
else:
print('please remove '+countryls[i]+'.xls'+' and then try again')
return
for page in range(10000):
time1=time.mktime(time.localtime())
data=[[countryls[i]]]
url2=baseurl+countrycodels[i] + '/species?pageSize=400&page='+str(page)+'&lang=en&sortBy=images_count&sortOrder=desc&illustratedOnly=true'
browser.get(url2)
r=browser.find_element(By.CSS_SELECTOR,'pre').text
# r=requests.get(url2,headers=headers)
b=re.findall('name(.*?)searchTerms"',r)
time2=time.mktime(time.localtime())
print('---------------------------')
print(countryls[i]+' page NO.' + str(page)+' lenth:'+str(len(b)))
if (len(b)==0):
print(countryls[i]+' over')
return
for item in b:
name=re.findall('":"(.*?)","auth',item)
auth=re.findall('author":"(.*?)"',item)
commonname=re.findall('commonNames":\["(.*?)"',item)
photo=re.findall('imagesCount":(.*?),"',item)
observation=re.findall('observationsCount":(.*?),"',item)
family=re.findall('family":"(.*?)"',item)
if len(name)==0:
name='NULL'
else:
name=name[0]
if len(auth) == 0:
auth='NULL'
else:
auth=auth[0]
if len(commonname)==0:
commonname='NULL'
else:
commonname=commonname[0]
if len(photo)==0:
photo='NULL'
else:
photo=photo[0]
if len(observation)==0:
observation='NULL'
else:
observation=observation[0]
if len(family)==0:
family='NULL'
else:
family=family[0]
name=name+' '+auth
data.append([name,commonname,photo,observation,family])
t=multiprocessing.Process(target=savedata,args=(data,))
t.start()
# wb=load_workbook(countryls[i]+'.xlsx')
# ws=wb.active
# ws.append([name+' '+auth,commonname,photo,observation,family])
# wb.save(countryls[i]+'.xlsx')
print('timecost ' +str(time2-time1))
print('---------------------------')
specie_type_ls=['Flower',
'Leaf',
'Fruit',
'Bark',
'Habit',
'Other'
]
def savedata(data):
bookname=data[0][0]
data.remove(data[0])
wb=load_workbook(bookname+'.xlsx')
ws=wb.active
for item in data:
ws.append(item)
wb.save(bookname+'.xlsx')
def savedata_2(data):
name=data[0][0]
specie_type=data[0][1]
data.remove(data[0])
wb=load_workbook(name+"//"+name+"_"+specie_type+'.xlsx')
ws=wb.active
for item in data:
ws.append(item)
wb.save(name+"//"+name+"_"+specie_type+'.xlsx')
def download_species(name,num,typeindex):
if not os.path.exists(name+'//'):
os.makedirs(name+'//')
else:
print(name+' already exists,pass')
return
print(name+' searching...')
itemnum=0
nameurl='https://api.plantnet.org/v1/projects/the-plant-list/species?pageSize=50&page=0&lang=en&search='+name+'&sortBy=images_count&sortOrder=desc'
r=requests.get(nameurl,headers=headers)
try:
auth=re.findall('"author":"(.*?)","fam',r.text)[0]
except:
print(name+' not found please try again later')
return
searchname=name.replace(' ','%20')
if auth!='':
searchname=searchname+'%20'+auth
url='https://identify.plantnet.org/the-plant-list/species/'+searchname+'/data'
print(searchname)
browser.get(url)
js="var q=document.documentElement.scrollTop=100000"
browser.execute_script(js)
sleep(120)
try:
father=browser.find_element(By.CSS_SELECTOR,'nav.card-header')
except:
print(name+' not found please try again later')
# browser.quit()
return
if not os.path.exists(name+'//'+name+'_'+specie_type_ls[typeindex]+'.xlsx'):
wb=Workbook()
ws=wb.active
ws.append(['图片名','来源','网址'])
wb.save(name+'//'+name+'_'+specie_type_ls[typeindex]+'.xlsx')
flag=True
for i in range(6):
# i=species_t%6
if flag==False:
break
type=father.find_elements(By.CSS_SELECTOR,'li.nav-item')
print(name+' types: '+str(len(type)))
if i>=len(type):
break
b=type[i].find_element(By.CSS_SELECTOR,'a')
b.send_keys(Keys.ENTER)
species_type=b.find_element(By.CSS_SELECTOR,'img').get_attribute('alt')
species_type=species_type.capitalize()
print(name + ' '+species_type)
if typeindex!=9:
# print(name + ' '+species_type+' '+specie_type_ls[typeindex])
if species_type!=specie_type_ls[typeindex]:
# print('ssssssssssssssssssssssssssssssssssssssssssssssssssssss')
continue
else:
flag=False
# print('--------------------------------------------------')
sleep(4)
# species_type=specie_type_ls[i]
while True:
a=browser.find_elements(By.CSS_SELECTOR,"img.img-fluid")
js="var q=document.documentElement.scrollTop=100000"
browser.execute_script(js)
sleep(8)
# hei=0
# for y in range(5):
# hei+=1000
# browser.execute_script(f'window.scrollto(0,{hei})')
# for item in a:
# item.send_keys(Keys.ENTER)
# sleep(10)
# info=browser.find_element(By.CSS_SELECTOR,'header.modal-header')
# print(info)
# sleep(5)
if itemnum==len(a):
print('not increasing')
break
itemnum=len(a)
# print(itemnum)
if itemnum>num:
break
print(name+' '+species_type+' total:'+str(itemnum))
ls1=[]
ls2=[]
ls3=[]
j=0
for i in range(itemnum):
if i>5000:
break
picurl=a[i].get_attribute('src')
if j>num-1:
break
if picurl==None:
# print('-1')
picurl=a[i].get_attribute('data-src')
if picurl==None:
print('-2')
continue
else:
pass
# print('ok '+picurl)
j=j+1
picurl=picurl.replace('/s/','/o/')
ls1.append(picurl)
picid=re.findall('/o/(.*)',picurl)[0]
ls2.append(picid)
ls3.append(name+'_'+species_type+'_'+picid+'.jpg')
# if species_t>5:
data=[[name,species_type]]
for i in range (len(ls3)):
data.append([ls3[i],"",ls1[i]])
t=multiprocessing.Process(target=savedata_2,args=(data,))
t.start()
# # print(picid)
# r=requests.get(picurl,headers=headers)
# r.raise_for_status()
# # if os.path.exists(name+'\\'+name+'_'+picid+'.jpg'):
# # continue
# f=open(name+'//'+name+'_'+species_type+'_'+picid+'.jpg','wb')
# f.write(r.content)
# print(name+' '+species_type+' +1')
# f.close()
ppls=[]
i=0
flag=1
while flag:
for j in range(20):
if i>len(ls1)-1:
flag=0
break
data=[ls1[i],ls2[i],name,species_type]
pp=Process(target=download,args=(data,))
i=i+1
# if i>len(bb):
# break
pp.start()
ppls.append(pp)
# f=open(target+'\\'+target+'.txt','a')
# f.write('\n'+als[i]+'_'+str(i)+'.jpg''\t'+bls[i]+'\t'+bls[i].replace('midthumb','smthumb')+'\t'+cls[i]+'\t'+dls[i])
# f.close()
for thread in ppls:
thread.join()
# browser.quit()
def download(data):
picurl=data[0]
picid=data[1]
name=data[2]
species_type=data[3]
# print(picurl)
if os.path.exists(name+'\\'+name+'_'+species_type+'_'+picid+'.jpg'):
print(name+'_'+species_type+'_'+picid+'.jpg already exists')
return
try:
r=requests.get(picurl,headers=headers)#,verify=False)
except:
print('error')
return
f=open(name+'\\'+name+'_'+species_type+'_'+picid+'.jpg','wb')
# r.raise_for_status()
f.write(r.content)
# i=i+1
print(name+' '+species_type+' ''+1')
f.close()
if __name__ == "__main__":
multiprocessing.freeze_support()
browser=webdriver.Chrome(options=option,desired_capabilities=capa)
browser.implicitly_wait(6)
#1. get list workbook
# print('请输入要下载的序号,如0,1,2,输入999下载全部')
# i=0
# for item in countryls:
# print(str(i)+'.'+item+'\t')
# i=i+1
# choice=input()
# if(choice=='999'):
# for i in range(len(countrycodels)):
# get(i)
# else:
# get(int(choice))
#2. download
print('请输入下载的品种编号:0.Flower 1.Leaf 2.Fruit 3.Bark 4.Habit 5.Other 9.All')
typeindex=int(input())
print('请输入每个品种下载个数:')
num=int(input())
# num=40
f = open("plantnet.txt",encoding='utf-8')
line = f.readline()
ls=[]
# errorls=[]
while line:
ls.append(line.replace('\n',''))
line = f.readline()
f.close()
for item in ls:
download_species(item,num,typeindex)
# download_species(name,num)
os.system ("pause") | carryyangorz/pythonprojects | plantnet.py | plantnet.py | py | 12,996 | python | en | code | 0 | github-code | 13 |
549007509 | # coding: utf-8
__author__ = "wolf"
"""
文件发送端
"""
import socket
import os
import sys
import struct
import re
def socket_client():
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
while 1:
IP = input("请求建立socket通道ip地址为:")
if check_Ip(IP):
break
ip_port = (IP, 9911)
s.connect(ip_port)
except socket.error as msg:
print (msg)
sys.exit(1)
print (s.recv(1024))
while 1:
filepath = input('root:输入文件路径: ')
if os.path.isfile(filepath):
# 定义定义文件信息。128s表示文件名为128bytes长,l表示一个int或log文件类型,在此为文件大小
fileinfo_size = struct.calcsize('128sl')
# 定义文件头信息,包含文件名和文件大小
fhead = struct.pack('128sl', bytes(os.path.basename(filepath).encode('utf-8')), os.stat(filepath).st_size)
s.send(fhead)
#print ('root:文件路径: {0}'.format(filepath))
fp = open(filepath, 'rb')
while 1:
data = fp.read(1024)
if not data:
print ('root:文件-{0} 传输完毕!'.format(filepath))
break
s.send(data)
s.close()
break
def get_host_ip():
"""
查询本机ip地址
:return: ip
"""
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(('1.1.1.1', 80))
ip = s.getsockname()[0]
finally:
s.close()
return ip
#检验输入IP
def check_Ip(ip):
if re.match(r"^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$",
ip):
print ('root:当前输入 '+ip+'正则通过!')
return True
else:
print ('root:当前输入 '+ip + '正则失败,重新输入!')
return False
if __name__ == '__main__':
socket_client()
| nhz94259/transport_socket | send.py | send.py | py | 2,028 | python | en | code | 0 | github-code | 13 |
3791633151 | from flask import Flask, render_template, request, url_for, flash, redirect
from init_db import init_db
import sqlite3
from werkzeug.exceptions import abort
app = Flask(__name__)
app.config['SECRET_KEY'] = 'bravaproject'
init_db()
# establish database connection, returns rows as python dicts
def get_db_connection():
conn = sqlite3.connect('database.db')
conn.row_factory = sqlite3.Row
return conn
# display a blog post
def get_post(post_id):
conn = get_db_connection()
post = conn.execute('SELECT * FROM posts WHERE id = ?',
(post_id,)).fetchone()
conn.close()
if post is None:
abort(404)
return post
@app.route('/')
def index():
return render_template('index.html')
@app.route('/community')
def community():
conn = get_db_connection()
posts = conn.execute('SELECT * FROM posts').fetchall()
conn.close()
return render_template('community.html', posts=posts)
@app.route('/new', methods=('GET', 'POST'))
def new():
if request.method == 'POST':
option = request.form['option']
email = request.form['email']
title = request.form['title']
content = request.form['content']
if not title:
flash('Title is required.')
else:
conn = get_db_connection()
conn.execute('INSERT INTO posts (option, email, title, content) VALUES (?, ?, ?, ?)',
(option, email, title, content))
conn.commit()
conn.close()
return redirect(url_for('community'))
return render_template('newpostform.html')
@app.route('/<int:post_id>')
def post(post_id):
post = get_post(post_id)
return render_template('post.html', post=post)
if __name__ == '__main__':
app.run()
| sakuray10/BravaProject | app.py | app.py | py | 1,796 | python | en | code | 0 | github-code | 13 |
41489427539 | import math, torch
import torch.nn as nn
import torch.nn.functional as F
# RoI-wise attention vector generating network
class RAVNet(nn.Module):
def __init__(self, num_classes, d_k=64):
super(RAVNet, self).__init__()
# list of number of instances(RoI predictions) in each image
# len(num_instances): RoI-head batch size
# num_instances[x]: number of RoIs in image x
self.num_instances = None
# class relevance information
self.prior_rel = nn.Parameter(torch.zeros(81, 81), requires_grad=False)
# single-head self attention
self.query_fc = nn.Linear(num_classes+1, d_k)
self.key_fc = nn.Linear(num_classes+1, d_k)
self.value_fc = nn.Linear(num_classes+1, d_k)
# unpack FC
self.unpack_fc = nn.Linear(d_k, num_classes+1)
# save class relevance information
def set_rel(self, rel):
self.prior_rel.data = rel
# save number of instances
def pass_hint(self, num_instances):
self.num_instances = num_instances
def forward(self, x):
# parse predictions by RoI source and add 0 paddings
out = torch.split(x, self.num_instances)
out = [F.pad(y, pad=[0, 0, 0, max(self.num_instances)-y.size(0)], mode='constant', value=0) for y in out]
out = torch.stack(out)
# calculate query, key, and relevance based value
q = self.query_fc(out)
k = self.key_fc(out)
v = self.value_fc(torch.matmul(out, self.prior_rel))
# calculate single head attention
out = torch.matmul(q, k.transpose(1, 2))
out = out / math.sqrt(k.size(2))
for y in range(0, len(self.num_instances)):
out[y][:, self.num_instances[y]:] = -math.inf
out = F.softmax(out, dim=2)
# multiply attention and value
out = torch.matmul(out, v)
# recover dimension
out = self.unpack_fc(out)
# flatten each attnetion scores
out = out.view(-1, out.size(2))
d = max(self.num_instances)
out = torch.cat([out[d*y: d*y+self.num_instances[y]] for y in range(0, len(self.num_instances))])
return out
| rach-rgb/RoI-wise-Attention-Vector | AttentionVector/src/components/rav_net.py | rav_net.py | py | 2,172 | python | en | code | 0 | github-code | 13 |
26148839048 | """Classifier tests."""
from sklearn.naive_bayes import GaussianNB
import strlearn as sl
def get_stream():
return sl.streams.StreamGenerator(n_chunks=10)
def test_ACS_Prequential():
"Bare ACS for Prequential"
stream = get_stream()
clf = sl.classifiers.ASC(base_clf=GaussianNB())
evaluator = sl.evaluators.Prequential()
evaluator.process(stream, clf)
def test_ACS_fit_and_proba():
"Bare ACS for Prequential"
stream = get_stream()
X, y = stream.get_chunk()
clf = sl.classifiers.ASC(base_clf=GaussianNB())
clf.fit(X, y)
proba = clf.predict_proba(X)
def test_MetaEstimator_TestThanTrain():
"Bare ACS for TTT"
stream = get_stream()
base = sl.classifiers.SampleWeightedMetaEstimator(base_classifier=GaussianNB())
clf = sl.ensembles.OOB(base_estimator=base)
evaluator = sl.evaluators.TestThenTrain()
evaluator.process(stream, clf)
def test_MetaEstimator_fit():
"Bare ACS for TTT"
stream = get_stream()
X, y = stream.get_chunk()
clf = sl.classifiers.SampleWeightedMetaEstimator(base_classifier=GaussianNB())
clf.fit(X, y)
clf.predict(X)
| w4k2/stream-learn | strlearn/tests/test_classifiers.py | test_classifiers.py | py | 1,137 | python | en | code | 58 | github-code | 13 |
9051914597 | import discord
class SelectMenu(discord.ui.Select):
def __init__(self,command_name:str,args:list,placeholder:str=""):
options=[]
for item in args:
options.append(discord.SelectOption(label=item, value=f"{command_name}:{item}", description=""))
super().__init__(placeholder=placeholder, min_values=1, max_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
command_name, command_option = self.values[0].split(":")[:2]
command = interaction._client.tree.get_command(command_name)
cogs = interaction._client.cogs.get(command_name.capitalize())
await interaction.response.defer(thinking=True)
await command.callback(cogs, interaction, command_option) | sai11121209/Discord-EFT-V2-Bot | src/cogs/select_menu.py | select_menu.py | py | 767 | python | en | code | 0 | github-code | 13 |
37432022725 | from django.shortcuts import render
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
from django.http import HttpResponseRedirect,HttpResponse,FileResponse
import os,uuid
blob_lists = []
conn_string = os.environ["AZURE_STORAGE_CONNECTION_STRING"]
blob_service_client = BlobServiceClient.from_connection_string(conn_string)
container_name="blob-container-01"
def index(request):
if request.method =='POST':
uploaded_file = request.FILES['document']
blob_client = BlobClient.from_connection_string(
conn_string,
container_name="blob-container-01",
blob_name=uploaded_file.name,
)
blob_client.upload_blob(uploaded_file)
print(f"Uploaded apple.jpg to {blob_client.url}")
container_client = blob_service_client.get_container_client(container_name)
blob_lists = container_client.list_blobs()
return render(request,'index.html',{'blob_list':blob_lists})
def download(request, i):
file_name = i
container_client = blob_service_client.get_container_client(container= container_name)
with open(file=file_name, mode="wb") as download_file:
download_file.write(container_client.download_blob(file_name).readall())
response= FileResponse(open(file_name,'rb'))
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response | RekhaBalamurugan/Upload-Download-Files-Django | upload_download/views.py | views.py | py | 1,390 | python | en | code | 0 | github-code | 13 |
11997795563 |
'''
Functions for case 9 simulations
Author: Telma Afonso
'''
from phenomenaly.simulation import fba, fva, pfba, lmoma
from phenomenaly.variables import Media
from types import *
import pickle
import string
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import linregress
from matplotlib.backends.backend_pdf import PdfPages
import warnings
warnings.filterwarnings('ignore')
pd.set_option('display.max_colwidth', -1)
from yeastpack_test import PhenomenalySim
class Case9 (PhenomenalySim):
def __int__(self, cobra_model = None):
super(Case9, self).__int__(self, cobra_model)
def dictsForCase9 (self):
# Carbon source lb
# cs_lb = {'glucose': -1.5} #Shophia's values
cs_lb = {'batch': -15.9, 'chemostat': -1.17} #Authors's values
# Carbon source exchange reaction
cs_reaction = {'batch': 'r_1714', 'chemostat': 'r_1714'}
self.cs_lb, self.cs_reaction = cs_lb, cs_reaction
def loadExperimentalRes (self, filename_exp, sep = ';'):
exp_fluxes = pd.read_csv(filename_exp, sep = sep)
exp_fluxes.set_index('Yeast7_ID', inplace = True)
reactions = exp_fluxes.iloc[:,0]
sub_exp_fluxes = exp_fluxes.drop([col for col in list(exp_fluxes.columns) if 'exp flux' not in col], 1)
sub_exp_fluxes = sub_exp_fluxes.apply(lambda x: x.str.replace(',', '.'))
#sub_exp_fluxes = sub_exp_fluxes.apply(lambda x: x.astype('float') if str(x).isdigit() else None)
#Add Biomass Values (r_4041)
reactions['r_4041'] = 'Biomass'
sub_exp_fluxes.loc['r_4041'] = [0.37, 0.1] #growth rate
return sub_exp_fluxes, reactions
def getColumnWithoutNAs (self, dataframe, column_index, na = 'x'):
df = dataframe[dataframe.ix[:, column_index] != na].ix[:, column_index]
return df.astype('float')
def getDFWithoutExtremeFluxes (self, dataframe, column_index = 1, val = 900):
df = dataframe[(dataframe.ix[:, column_index] > -val) & (dataframe.ix[:, column_index] < val)]
return df.astype('float')
def createDatasetExpVsSimul (self, exp_fluxes, sim_fluxes):
dsim = sim_fluxes.copy()
df = pd.concat([exp_fluxes, sim_fluxes], axis = 1, join = 'inner')
return df
def createDatasetWithAbsRelError (self, dataset_exp_vs_sim):
df = dataset_exp_vs_sim.copy()
ae = self.absoluteError(df.ix[:, 0], df.ix[:, 1])
df.insert(loc = 2, column = 'Abs Error', value = ae)
re = self.relativeError(df.ix[:, 0], df.ix[:, 1])
df.insert(loc = 3, column = 'Rel Error', value = re)
return df
def simulationPipeline (self, exp_dataset, cs = 'glucose', o2_lb = None, geneko = None, type = 'fba', res_exists = False, fname = None):
if res_exists:
res = self.loadObjectFromFile(fname)
else:
res = self.singleSimulation(carbon_source = self.cs_reaction[cs], cs_lb = self.cs_lb[cs], geneko = geneko, o2_lb = o2_lb, type = type)
self.saveObjectToFile(res, fname)
if type == 'fva':
fluxes = res #pandas df
elif hasattr(res, 'x_dict'): #if legacy solution
fluxes = pd.DataFrame(list(res.x_dict.items())).set_index(0)
else:
fluxes = res.fluxes
# Dataset with experimental vs simulated fluxes
df_exp_sim = self.createDatasetExpVsSimul(exp_dataset, self.correctReversedReactions(fluxes))
if type != 'fva':
df_exp_sim = df_exp_sim.rename(columns = {1: 'Sim Flux'})
# Dataset with absolute and realtive errors
df_exp_sim_errors = self.createDatasetWithAbsRelError(df_exp_sim)
return res, df_exp_sim, df_exp_sim_errors
def testO2EthanolProd (self, g_knockout = None, react_id = 'r_2115', cs = 'glucose', range_o2 = list(np.arange(-20, 0, 2))):
res = {}
for i in range_o2:
with self.model as m:
m.set_carbon_source(self.cs_reaction[cs], lb = self.cs_lb[cs])
m.reactions.get_by_id('r_1992').lower_bound = float(i)
if g_knockout is not None:
m.set_environmental_conditions(gene_knockout = g_knockout)
r = pfba(m)
if hasattr(r, 'x_dict'): #if legacy solution
fluxes = pd.DataFrame(list(r.x_dict.items())).set_index(0)
else:
fluxes = r.fluxes
res[str(i)] = fluxes.loc[react_id]
for key, val in sorted(res.items()): print(key, '\t', val)
return res
def plotO2vsEtOH (self, dict_EtOH_res, real_EtOH_flux = 0, xlab = 'O2 Flux', ylab = 'EtOH Flux', title = 'Ethanol production with O2 flux', legend = 'Wild Type', fname = None):
plt.figure(figsize = (10, 5))
try:
x = sorted([float(x) for x in dict_EtOH_res.keys()])
y = [float(dict_EtOH_res[str(key)]) for key in x]
except:
x = sorted([int(x) for x in dict_EtOH_res.keys()])
y = [int(dict_EtOH_res[str(key)]) for key in x]
slope, intercept, r_value, p_value, std_err = linregress(x, y)
line = [slope * x + intercept for x in x]
real_O2 = lambda x0: (y0 - intercept) / slope
y0 = real_EtOH_flux
plt.plot(x, y, 'o', x, line)
plt.axhline(y = real_EtOH_flux, ls = 'dashed')
plt.xlabel(xlab)
plt.ylabel(ylab)
plt.title(title)
plt.legend([legend, 'R2: %.4f' % r_value**2, 'Real EtOH flux: %.2f (O2 flux of %.2f)' % (real_EtOH_flux, real_O2(y0))])
plt.show()
if fname is not None:
plt.savefig(fname)
return round(real_O2(y0), 4)
if __name__ == '__main__':
#Initialization
case9 = Case9()
case9.model = case9.loadObjectFromFile('model_yeast_76.sav')
case9.model.solver = 'optlang-cplex'
case9.setMedium('MINIMAL')
case9.dictsForCase9()
#General datasets
exp_dataset, reactions = case9.loadExperimentalRes('Results/Case 9/case9_experimental_fluxes.csv')
# ====== BATCH ======
b_exp_df = case9.getColumnWithoutNAs(exp_dataset, 0, 'X')
# O2 FLUX ESTIMATION - O2 flux of -2.02
# b_etOH = case9.testO2EthanolProd(cs = 'batch', range_o2 = list(np.arange(-3, -1, 0.1)))
# case9.saveObjectToFile(b_etOH, 'Results/Case 9/b_dict_etOH_O2_fluxes.sav')
# b_etOH = case9.loadObjectFromFile('Results/Case 9/b_dict_etOH_O2_fluxes.sav')
# b_o2_lb = case9.plotO2vsEtOH(b_etOH, real_EtOH_flux = 25.2333, legend = 'Batch Culture', fname = 'Results/Case 9/b_etOH_plot.png')
# plt.close('all')
#FBA
b_fba_res, b_fba_exp_sim, b_fba_exp_sim_errors = case9.simulationPipeline(b_exp_df, o2_lb = -2.02, cs = 'batch', type = 'fba', res_exists = True, fname = 'Results/Case 9/res_fba_batch_case9.sav')
b_fba_exp_sim_errors = case9.getDFWithoutExtremeFluxes(b_fba_exp_sim_errors) #without extreme fluxes (for plotting)
case9.plotExpVsSim(b_fba_exp_sim_errors, save_fig_path = 'Results/Case 9/b_fba_exp_sim_plot.png', title = 'FBA Batch')
plt.close('all')
#pFBA
b_pfba_res, b_pfba_exp_sim, b_pfba_exp_sim_errors = case9.simulationPipeline(b_exp_df, o2_lb = -2.02, cs = 'batch',type = 'pfba', res_exists = True, fname = 'Results/Case 9/res_pfba_batch_case9.sav')
case9.plotExpVsSim(b_pfba_exp_sim_errors, save_fig_path = 'Results/Case 9/b_pfba_exp_sim_plot.png', title = 'pFBA Batch')
plt.close('all')
# case9.getListOfMetabolitesSummary(b_pfba_res)
# case9.getMetaboliteSummaryWithNames('s_0727', b_pfba_res)
#FVA
b_fva_res, b_fva_exp_sim, _ = case9.simulationPipeline(b_exp_df, o2_lb = -2.02, cs = 'batch', type = 'fva', res_exists = True, fname = 'Results/Case 9/res_fva_batch_case9.sav')
# ====== CHEMOSTAT ======
c_exp_df = case9.getColumnWithoutNAs(exp_dataset, 1, 'X')
# O2 FLUX ESTIMATION - No EtOH production experimentally (EtOH production starts above O2 flux of -1.90)
# c_etOH = case9.testO2EthanolProd(cs = 'chemostat', range_o2 = list(np.arange(-2, 0, 0.1)))
# case9.saveObjectToFile(c_etOH, 'Results/Case 9/c_dict_etOH_O2_fluxes.sav')
# c_etOH = case9.loadObjectFromFile('Results/Case 9/c_dict_etOH_O2_fluxes.sav')
# c_o2_lb = case9.plotO2vsEtOH(c_etOH, real_EtOH_flux = 0, legend = 'Chemostat Culture', fname = 'Results/Case 9/c_etOH_plot.png')
# plt.close('all')
#FBA
c_fba_res, c_fba_exp_sim, c_fba_exp_sim_errors = case9.simulationPipeline(c_exp_df, cs = 'chemostat', type = 'fba', res_exists = True, fname = 'Results/Case 9/res_fba_chemostat_case9.sav')
c_fba_exp_sim_errors = case9.getDFWithoutExtremeFluxes(c_fba_exp_sim_errors) #without extreme fluxes (for plotting)
case9.plotExpVsSim(c_fba_exp_sim_errors, save_fig_path = 'Results/Case 9/c_fba_exp_sim_plot.png', title = 'FBA Chemostat')
plt.close('all')
#pFBA 0.73 r2 com o2_lb = 0.52
c_pfba_res, c_pfba_exp_sim, c_pfba_exp_sim_errors = case9.simulationPipeline(c_exp_df, cs = 'chemostat', type = 'pfba', res_exists = True, fname = 'Results/Case 9/res_pfba_chemostat_case9.sav')
case9.plotExpVsSim(c_pfba_exp_sim_errors, save_fig_path = 'Results/Case 9/c_pfba_exp_sim_plot.png', title = 'pFBA Chemostat')
plt.close('all')
# case9.getListOfMetabolitesSummary(c_pfba_res)
# case9.getMetaboliteSummaryWithNames('s_0727', c_pfba_res)
#FVA
c_fva_res, c_fva_exp_sim, _ = case9.simulationPipeline(c_exp_df, cs = 'chemostat', type = 'fva', res_exists = True, fname = 'Results/Case 9/res_fva_chemostat_case9.sav')
# =========================================
# Save all results into a binary file
# =========================================
all_res = {'d9_batch_fba': b_fba_exp_sim, 'd9_batch_pfba': b_pfba_exp_sim, 'd9_batch_fva': b_fva_exp_sim,
'd9_chem_fba': c_fba_exp_sim, 'd9_chem_pfba': c_pfba_exp_sim, 'd9_chem_fva': c_fva_exp_sim, 'd9_reactions': reactions}
case9.saveObjectToFile(all_res, 'Results/case9_all_res.sav')
| TelmaAfonso/DeYeast | case_9.py | case_9.py | py | 9,995 | python | en | code | 0 | github-code | 13 |
33466459696 |
from keras.engine import Input
from keras.engine import Model
from keras.layers import Convolution2D, MaxPooling2D, Deconvolution2D, Dropout, Activation, Reshape
from keras.layers import merge
class Fcn_8(object):
'''
exsample:
model = Fcn_8(batch_size=batch_size, input_shape=(block_size,block_size), n_channels=3, no_classes=11)
model = model.build_model()
block_size = 240
'''
def __init__(self, batch_size, input_shape, n_channels, no_classes, weight_file=None):
self.batch_size = batch_size
self.patch_size = input_shape[0], input_shape[1]
self.input_channels = n_channels
self.input_shape = (self.batch_size,) + self.patch_size + (self.input_channels,)
self.out_channels = no_classes
self.output_shape = [self.batch_size, input_shape[0], input_shape[1], self.out_channels]
self.no_classes = no_classes
self.weight_file = weight_file
def upconv2_2(self, input, concat_tensor, no_features):
out_shape = [dim.value for dim in concat_tensor.get_shape()]
up_conv = Deconvolution2D(no_features, 4, 4, output_shape=out_shape, subsample=(2, 2))(input)
# up_conv = Convolution2D(no_features, 2, 2)(UpSampling2D()(input))
merged = merge([concat_tensor, up_conv], mode='concat', concat_axis=3)
return merged
def build_model(self):
input = Input(batch_shape=self.input_shape, name='input_1')
fileter_size = 3
# Block 1
conv1_1 = Convolution2D(64, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv1_1')(
input)
conv1_2 = Convolution2D(64, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv1_2')(
conv1_1)
conv1_out = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same', name='pool1')(conv1_2)
# Block 2
conv2_1 = Convolution2D(128, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv2_1')(
conv1_out)
conv2_2 = Convolution2D(128, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv2_2')(
conv2_1)
conv2_out = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same', name='pool2')(conv2_2)
# Block 3
conv3_1 = Convolution2D(256, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv3_1')(
conv2_out)
conv3_2 = Convolution2D(256, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv3_2')(
conv3_1)
conv3_3 = Convolution2D(256, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv3_3')(
conv3_2)
conv3_out = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same', name='pool3')(conv3_3)
# Block 4
conv4_1 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv4_1')(
conv3_out)
conv4_2 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv4_2')(
conv4_1)
conv4_3 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv4_3')(
conv4_2)
conv4_out = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same', name='pool4')(conv4_3)
# Block 5
conv5_1 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv5_1')(
conv4_out)
conv5_2 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv5_2')(
conv5_1)
conv5_3 = Convolution2D(512, fileter_size, fileter_size, activation='relu', border_mode='same', name='conv5_3')(
conv5_2)
conv5_out = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), border_mode='same', name='pool5')(conv5_3)
# Block 6
conv6_1 = Convolution2D(4096, 7, 7, activation='relu', border_mode='same', name='conv6_1')(conv5_out)
conv6_out = Dropout(0.5)(conv6_1)
# Block 7
conv7_1 = Convolution2D(4096, 1, 1, activation='relu', border_mode='same', name='conv7_1')(conv6_out)
conv7_out = Dropout(0.5)(conv7_1)
# De1
score_conv7_out = Convolution2D(self.no_classes, 1, 1, border_mode='same')(conv7_out)
score_pool4 = Convolution2D(self.no_classes, 1, 1, border_mode='same')(conv4_out)
out_shape = [dim.value for dim in score_pool4.get_shape()]
up_conv_1 = Deconvolution2D(self.no_classes, 4, 4, output_shape=out_shape, border_mode="same",
subsample=(2, 2))(score_conv7_out)
upscore_1 = merge([score_pool4, up_conv_1], mode='sum', concat_axis=-1)
# De2
score_pool3 = Convolution2D(self.no_classes, 1, 1, border_mode='same')(conv3_out)
out_shape = [dim.value for dim in score_pool3.get_shape()]
up_conv_2 = Deconvolution2D(self.no_classes, 4, 4, output_shape=out_shape, border_mode="same",
subsample=(2, 2))(upscore_1)
upscore_2 = merge([score_pool3, up_conv_2], mode='sum', concat_axis=-1)
# up_conv1 = self.upconv2_2(conv7_out, conv4_out, 512)
# conv6_out = self.convfileter_size_fileter_size(up_conv1, 512)
# up_conv2 = self.upconv2_2(up_conv1, conv3_out, 256)
# conv7_out = self.convfileter_size_fileter_size(up_conv2, 256)
out_shape = [dim.value for dim in input.get_shape()]
out_shape = [self.batch_size] + out_shape[1:fileter_size] + [self.no_classes]
output = Deconvolution2D(self.no_classes, 16, 16, output_shape=out_shape, border_mode="same", subsample=(8, 8))(upscore_2)
output = Reshape((self.input_shape[1] * self.input_shape[2], self.no_classes))(output)
output = Activation(activation='softmax', name='class_out')(output)
model = Model(input, output)
return model
# model = Fcn_8(batch_size=1, input_shape=(480,480), n_channels=3, no_classes=11)
# model = model.build_model()
# #print model.summary()
#
# for layer in model.layers:
# layer_configuration = layer.get_config()
# print "layer name is", layer_configuration["name"]
# print "the input shape", layer.input_shape
# print "the output shape", layer.output_shape
| gawain93/handsegment_fcn | model.py | model.py | py | 6,403 | python | en | code | 0 | github-code | 13 |
14115853149 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
name = "sender"
pub_topic = "my_topic"
rospy.init_node(name)
pub = rospy.Publisher(pub_topic, Int32, queue_size=1)
rate = rospy.Rate(1000)
count = 1
while(pub.get_num_connections() ==0):
count = 1
while not rospy.is_shutdown():
pub.publish(count)
count +=1
rate.sleep()
| JaeHongChoe/ROS_study | src/msg_send/src/sender_overflow.py | sender_overflow.py | py | 354 | python | en | code | 1 | github-code | 13 |
7517129902 | import scipy as sp
from simulated_tools import get_simulated_im, WINDOW_WIDTH
from caffe_tools import fill_database
"""IDs of file to be used to build the training set"""
TRAINING_IDS = ['exp_low ({0})'.format(i) for i in range(1, 25)]
"""The mapping from types of pixels to classifier labels"""
LABEL_ENUM = {'inside': 1,
'outside': 0,
'inside_damaged': 1,
'outside_damaged': 0,
'block_border': 0,
'between': 0}
def make_damaged_spot_mask(truth):
"""Returns a mask indicating which pixels lie inside damaged spots"""
damaged_pixel = (0.75 < truth) & (truth < 1)
damaged_area = sp.ndimage.binary_closing(damaged_pixel, structure=sp.ones((3, 3)))
damaged_spot = damaged_area & (0.75 < truth)
return damaged_spot
def make_outside_near_damaged_spot_mask(truth):
"""Returns a mask indicating which pixels lie just outside damaged spots"""
damaged_spot = make_damaged_spot_mask(truth)
near_damaged_spot = sp.ndimage.binary_dilation(damaged_spot, structure=sp.ones((3,3)), iterations=5)
outside_near_damaged_spot = near_damaged_spot & (truth < 0.25)
return outside_near_damaged_spot
def make_block_border_mask(truth):
"""Returns a mask indicating which pixels lie just outside a block of spots"""
very_near_block = sp.ndimage.binary_dilation(0.75 < truth , structure=sp.ones((3,3)), iterations=3)
near_block = sp.ndimage.binary_dilation(0.75 < truth , structure=sp.ones((3,3)), iterations=15)
block_border = near_block & ~very_near_block
return block_border
def make_between_spot_mask(truth):
"""Returns a mask indicating which pixels lie between two spots"""
near_spot = sp.ndimage.binary_dilation(0.75 < truth, structure=sp.ones((3, 3)), iterations=4)
outside_near_spot = near_spot & (truth < 0.25)
return outside_near_spot
def get_centers_single_image(truth, im_no, border=20):
"""Returns a dict of arrays, one for each pixel type. The arrays are compatible with caffe_tools.fill_database.
The last row of each array is equal to ``im_num``, indicating which image those centers were created from."""
indices = sp.indices(truth.shape)
im_nos = im_no*sp.ones((1, truth.shape[0], truth.shape[1]), dtype=int)
indices = sp.concatenate((indices, im_nos))
away_from_border = sp.zeros(truth.shape, dtype=bool)
away_from_border[border:-border, border:-border] = True
results = {
'inside': indices[:, (0.75 < truth) & away_from_border]
,'outside': indices[:, (truth < 0.25) & away_from_border]
,'inside_damaged' : indices[:, make_damaged_spot_mask(truth) & away_from_border]
,'outside_damaged': indices[:, make_outside_near_damaged_spot_mask(truth) & away_from_border]
,'block_border': indices[:, make_block_border_mask(truth) & away_from_border]
,'between': indices[:, make_between_spot_mask(truth) & away_from_border]
}
return results
def get_centers(truths, border=20):
"""Uses the truths to create a dict of arrays indexed by pixel type. The arrays are compatible with
``caffe_tools.fill_database``."""
centers = []
for i, truth in enumerate(truths):
centers.append(get_centers_single_image(truth, i, border=border))
result = {}
for name in centers[0]:
result[name] = sp.concatenate([cs[name] for cs in centers], 1)
return result
def make_labelled_sets(centers, test_split=0.1):
"""Uses a dict of arrays like those created by ``get_centers`` to build test and training sets for training
a Caffe model to distinguish different types of pixel. The arrays returned are centers and labels compatible with
``caffe_tools.fill_database``"""
counts = {'inside': 2e5, 'outside': 1e5, 'inside_damaged': 2e5, 'outside_damaged': 1e5, 'block_border': 1e5, 'between': 1e5}
choices = {name: sp.random.choice(sp.arange(centers[name].shape[1]), counts[name]) for name in centers}
center_sets = {name: centers[name][:, choices[name]] for name in centers}
label_sets = {name: sp.repeat(LABEL_ENUM[name], counts[name]) for name in centers}
center_set = sp.concatenate([center_sets[name] for name in centers], 1)
label_set = sp.concatenate([label_sets[name] for name in centers])
order = sp.random.permutation(sp.arange(center_set.shape[1]))
ordered_centers = center_set[:, order]
ordered_labels = label_set[order]
n_training = int((1-test_split)*center_set.shape[1])
training_centers = ordered_centers[:, :n_training]
training_labels = ordered_labels[:n_training]
test_centers = ordered_centers[:, n_training:]
test_labels = ordered_labels[n_training:]
return training_centers, training_labels, test_centers, test_labels
def create_caffe_input_file(file_ids, width):
"""Creates LMDB databases containing training and test sets derived from the ground truths of the simulated data.
``width`` is the size of the windows to use."""
im_padding = ((width/2, width/2), (width/2, width/2), (0, 0))
ims = [get_simulated_im(file_id)[0] for file_id in file_ids]
ims = [(im - im.mean())/im.std() for im in ims]
ims = [sp.pad(im, im_padding, mode='reflect') for im in ims]
truth_padding = ((width/2, width/2), (width/2, width/2))
truths = [get_simulated_im(file_id)[1] for file_id in file_ids]
truths = [sp.pad(truth, truth_padding, mode='reflect') for truth in truths]
centers = get_centers(truths, width/2)
training_centers, training_labels, test_centers, test_labels = make_labelled_sets(centers)
fill_database('temporary/train_simulated.db', ims, training_centers, training_labels, width)
fill_database('temporary/test_simulated.db', ims, test_centers, test_labels, width)
def make_training_files():
"""Uses the ground truths of the simulated data to create LMDB databases containing training and test
sets for a Caffe neural network.
The databases can be found in the ``temporary`` directory."""
create_caffe_input_file(TRAINING_IDS, WINDOW_WIDTH) | andyljones/NeuralNetworkMicroarraySegmentation | simulated_training.py | simulated_training.py | py | 6,144 | python | en | code | 5 | github-code | 13 |
35794302213 | from dolfin import Vector
import numpy as np
class TimeDependentVector:
"""
A class to store time dependent vectors.
Snapshots are stored/retrieved by specifying
the time of the snapshot.
Times at which the snapshot are taken must be
specified in the constructor.
"""
def __init__(self, times, tol=1e-10):
"""
Constructor:
- times: time frame at which snapshots are stored
- tol : tolerance to identify the frame of the
snapshot.
"""
self.nsteps = len(times)
self.data = [];
for i in range(self.nsteps):
self.data.append( Vector() )
self.times = times
self.tol = tol
def copy(self, other):
"""
Copy all the time frames and snapshot from other to self.
"""
self.nsteps = other.nsteps
self.times = other.times
self.tol = other.tol
self.data = []
for v in other.data:
self.data.append( v.copy() )
def initialize(self,M,dim):
"""
Initialize all the snapshot to be compatible
with the range/domain of an operator M.
"""
for d in self.data:
M.init_vector(d,dim)
d.zero()
def randn_perturb(self,std_dev):
"""
Add a random perturbation eta_i ~ N(0, std_dev^2 I)
to each snapshots.
"""
for d in self.data:
noise = std_dev * np.random.normal(0, 1, len(d.array()))
d.set_local(d.array() + noise)
def axpy(self, a, other):
"""
Compute x = x + a*other snapshot per snapshot.
"""
for i in range(self.nsteps):
self.data[i].axpy(a,other.data[i])
def zero(self):
"""
Zero out each snapshot.
"""
for d in self.data:
d.zero()
def store(self, u, t):
"""
Store snapshot u relative to time t.
If t does not belong to the list of time frame an error is raised.
"""
i = 0
while i < self.nsteps-1 and 2*t > self.times[i] + self.times[i+1]:
i += 1
assert abs(t - self.times[i]) < self.tol
self.data[i].set_local( u.array() )
def retrieve(self, u, t):
"""
Retrieve snapshot u relative to time t.
If t does not belong to the list of time frame an error is raised.
"""
i = 0
while i < self.nsteps-1 and 2*t > self.times[i] + self.times[i+1]:
i += 1
assert abs(t - self.times[i]) < self.tol
u.set_local( self.data[i].array() )
def norm(self, time_norm, space_norm):
"""
Compute the space-time norm of the snapshot.
"""
assert time_norm == "linf"
s_norm = 0
for i in range(self.nsteps):
tmp = self.data[i].norm(space_norm)
if tmp > s_norm:
s_norm = tmp
return s_norm
| kamccormack/EQporoelasticity | local_lib/hippylib/timeDependentVector.py | timeDependentVector.py | py | 3,153 | python | en | code | 6 | github-code | 13 |
71838069779 | from ella.utils.settings import Settings
ACTIVITY_NOT_YET_ACTIVE = 0
ACTIVITY_ACTIVE = 1
ACTIVITY_CLOSED = 2
IP_VOTE_TRESHOLD = 10 * 60
POLL_COOKIE_NAME = 'polls_voted'
POLL_JUST_VOTED_COOKIE_NAME = 'polls_just_voted_voted'
POLL_NO_CHOICE_COOKIE_NAME = 'polls_no_choice'
POLL_MAX_COOKIE_LENGTH = 20
POLL_MAX_COOKIE_AGE = 604800
SURVEY_COOKIE_NAME = 'surveys_voted'
SURVEY_JUST_VOTED_COOKIE_NAME = 'surveys_just_voted_voted'
SURVEY_NO_CHOICE_COOKIE_NAME = 'surveys_no_choice'
SURVEY_MAX_COOKIE_LENGTH = 20
SURVEY_MAX_COOKIE_AGE = 604800
USER_NOT_YET_VOTED = 0
USER_JUST_VOTED = 1
USER_ALLREADY_VOTED = 2
USER_NO_CHOICE = 3
polls_settings = Settings('ella_polls.conf', 'POLLS')
| ella/ella-polls | ella_polls/conf.py | conf.py | py | 683 | python | en | code | 3 | github-code | 13 |
247542092 | from .settings import *
DEBUG = False
ALLOWED_HOSTS = ['1ww.me', 'www.1ww.me', 'api.1ww.me', ]
# ALLOWED_HOSTS = ['*', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'haumea',
'USER': 'haumea',
'PASSWORD': 'haumea123',
# 'HOST': '/tmp/mysql.sock',
'HOST': '10.136.62.181',
'PORT': 3306,
'OPTIONS': {
'charset': 'utf8',
'init_command': 'SET storage_engine=INNODB',
}
}
}
STATIC_URL = '//static.1ww.me/static/'
STATIC_ROOT = '/data/static/haumea/'
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.solr_backend.SolrEngine',
'URL': 'http://127.0.0.1:8983/solr'
},
} | edison7500/haumea | haumea/settings/production.py | production.py | py | 767 | python | en | code | 0 | github-code | 13 |
6616278304 | import numpy as np
import cvxpy as cp
import json
POS = {
"W" : 0,
"N" : 1,
"E" : 2,
"S" : 3,
"C" : 4
}
MAT = {
"0" : 0,
"1" : 1,
"2" : 2
}
ARW = {
"0" : 0,
"1" : 1,
"2" : 2,
"3" : 3
}
MM = {
"D" : 0,
"R" : 1
}
HEL = {
"0" : 0,
"25" : 1,
"50" : 2,
"75" : 3,
"100" : 4
}
ACT = {
"UP" : 0,
"LEFT" : 1,
"DOWN" : 2,
"RIGHT" : 3,
"STAY" : 4,
"SHOOT" : 5,
"HIT" : 6,
"CRAFT" : 7,
"GATHER" : 8,
"NONE" : 9
}
rPOS = {
0 : "W",
1 : "N",
2 : "E",
3 : "S",
4 : "C"
}
rMAT = {
0 :"0",
1 : "1",
2 : "2"
}
rARW = {
0 : "0",
1 : "1",
2 : "2",
3 : "3"
}
rMM = {
0 : "D",
1 : "R"
}
rHEL = {
0 : "0",
1 : "25",
2 : "50",
3 : "75",
4 : "100"
}
rACT = {
0 : "UP",
1 :"LEFT",
2 : "DOWN",
3 : "RIGHT",
4 : "STAY",
5 : "SHOOT",
6 : "HIT",
7 : "CRAFT",
8 : "GATHER",
9 : "NONE"
}
def getIndex(pos, mat, arw, mm, hel):
return 120*POS[pos]+40*MAT[mat]+10*ARW[arw]+5*MM[mm]+HEL[hel]
def getState(ind):
pos = ind//120
ind %= 120
mat = ind//40
ind %= 40
arw = ind//10
ind %= 10
mm = ind//5
ind = ind % 5
hel = ind
return (rPOS[pos], rMAT[mat], rARW[arw], rMM[mm], rHEL[hel])
def actionPossible(pos, mat, arw, mm, hel, act):
if hel == "0" and act != "NONE":
return False
if act == "UP":
if pos == "C" or pos == "S":
return True
return False
if act == "DOWN":
if pos == "C" or pos == "N":
return True
return False
if act == "LEFT":
if pos == "C" or pos == "E":
return True
return False
if act == "RIGHT":
if pos == "C" or pos == "W":
return True
return False
if act == "STAY":
return True
if act == "SHOOT":
if arw != "0" and pos != "N" and pos != "S":
return True
return False
if act == "HIT":
if pos == "C" or pos == "E":
return True
return False
if act == "CRAFT":
if pos == "N" and mat != "0":
return True
return False
if act == "GATHER":
if pos == "S":
return True
if act == "NONE":
if hel == "0":
return True
return False
def takeAction(pos, mat, arw, mm, hel, act):
ret = []
if act == "UP":
if pos == "C":
ret.append({0.85 : ("N", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "S":
ret.append({0.85 : ("C", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if act == "DOWN":
if pos == "C":
ret.append({0.85 : ("S", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "N":
ret.append({0.85 : ("C", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if act == "LEFT":
if pos == "C":
ret.append({0.85 : ("W", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "E":
ret.append({1.00 : ("C", mat, arw, mm, hel)})
if act == "RIGHT":
if pos == "C":
ret.append({1.00 : ("E", mat, arw, mm, hel)})
if pos == "W":
ret.append({1.00 : ("C", mat, arw, mm, hel)})
if act == "STAY":
if pos == "C":
ret.append({0.85 : ("C", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "N":
ret.append({0.85 : ("N", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "S":
ret.append({0.85 : ("S", mat, arw, mm, hel)})
ret.append({0.15 : ("E", mat, arw, mm, hel)})
if pos == "W":
ret.append({1.00 : ("W", mat, arw, mm, hel)})
if pos == "E":
ret.append({1.00 : ("E", mat, arw, mm, hel)})
if act == "SHOOT":
if pos == "C":
ret.append({0.50 : (pos, mat, str(int(arw)-1), mm, str(int(hel)-25))})
ret.append({0.50 : (pos, mat, str(int(arw)-1), mm, hel)})
if pos == "E":
ret.append({0.90 : (pos, mat, str(int(arw)-1), mm, str(int(hel)-25))})
ret.append({0.10 : (pos, mat, str(int(arw)-1), mm, hel)})
if pos == "W":
ret.append({0.25 : (pos, mat, str(int(arw)-1), mm, str(int(hel)-25))})
ret.append({0.75 : (pos, mat, str(int(arw)-1), mm, hel)})
if act == "HIT":
if pos == "C":
ret.append({0.10 : (pos, mat, arw, mm, str(max(0, int(hel)-50)))})
ret.append({0.90 : (pos, mat, arw, mm, hel)})
if pos == "E":
ret.append({0.20 : (pos, mat, arw, mm, str(max(0, int(hel)-50)))})
ret.append({0.80 : (pos, mat, arw, mm, hel)})
if act == "CRAFT":
ret.append({0.50 : (pos, str(int(mat)-1), str(min(3, int(arw)+1)), mm, hel)})
ret.append({0.35 : (pos, str(int(mat)-1), str(min(3, int(arw)+2)), mm, hel)})
ret.append({0.15 : (pos, str(int(mat)-1), str(min(3, int(arw)+3)), mm, hel)})
if act == "GATHER":
ret.append({0.75 : (pos, str(min(2, int(mat)+1)), arw, mm, hel)})
ret.append({0.25 : (pos, mat, arw, mm, hel)})
return ret
stateActions = {}
rstateActions = {}
ind = 0
for i in range(600):
for act in ACT:
pos, mat, arw, mm, hel = getState(i)
if actionPossible(pos, mat, arw, mm, hel, act):
stateActions[(pos, mat, arw, mm, hel, act)] = ind
rstateActions[ind] = (pos, mat, arw, mm, hel, act)
ind += 1
x = cp.Variable(shape=(len(stateActions), 1), name="x")
A = [[0 for i in range(len(stateActions))] for j in range(600)]
alpha = [0 for i in range(600)]
R = [0 for i in range(len(stateActions))]
mmReach = ["C", "E"]
gamma = 0.999
stepCost = -20
atkRew = -40
np.set_printoptions(threshold=np.inf)
for i in range(600):
pos, mat, arw, mm, hel = getState(i)
if pos == "C" and mat == "2" and arw == "3" and mm == "R" and hel == "100":
alpha[i] = 1
for i in range(len(stateActions)):
pos, mat, arw, mm, hel, act = rstateActions[i]
ret = takeAction(pos, mat, arw, mm, hel, act)
if hel == "0":
if act == "NONE":
A[getIndex(pos, mat, arw, mm, hel)][i] += 1
continue
if mm == "D":
for it in ret:
prob = list(it.keys())[0]
dat = it[prob]
A[getIndex(dat[0], dat[1], dat[2], dat[3], dat[4])][i] -= prob*0.8
A[getIndex(dat[0], dat[1], dat[2], "R", dat[4])][i] -= prob*0.2
A[getIndex(pos, mat, arw, mm, hel)][i] += prob
R[i] += prob*stepCost
else:
if pos in mmReach:
R[i] += 0.5*atkRew
for it in ret:
prob = list(it.keys())[0]
dat = it[prob]
A[getIndex(dat[0], dat[1], dat[2], dat[3], dat[4])][i] -= prob*0.5
A[getIndex(pos, mat, arw, mm, hel)][i] += prob
if pos in mmReach:
A[getIndex(pos, mat, "0", "D", str(min(100, int(hel)+25)))][i] -= prob*0.5
else:
A[getIndex(dat[0], dat[1], dat[2], "D", dat[4])][i] -= prob*0.5
R[i] += prob*stepCost
A = np.array(A)
R = np.array(R)
alpha = np.array(alpha)
alpha = alpha.reshape((600, 1))
constraints = [cp.matmul(A, x) == alpha, x >= 0.0]
objective = cp.Maximize(cp.matmul(R, x))
problem = cp.Problem(objective, constraints)
solution = problem.solve()
policy = []
for i in range(600):
mx = -100000
optAct = "NONE"
for j in range(10):
act = rACT[j]
pos, mat, arw, mm, hel = getState(i)
if not actionPossible(pos, mat, arw, mm, hel, act):
continue
ind = stateActions[(pos, mat, arw, mm, hel, act)]
if x.value[ind] > mx:
optAct = act
mx = x.value[ind]
policy.append([getState(i), optAct])
dump = {}
dump["a"] = A.tolist()
dump["r"] = R.tolist()
dump["x"] = x.value.flatten().tolist()
dump["alpha"] = alpha.flatten().tolist()
dump["policy"] = policy
dump["objective"] = solution
f = open("part_3_output.json", "w+")
json.dump(dump, f) | Aa-Aanegola/ML-Assignments | Assignment_2/Part3/linear_programming.py | linear_programming.py | py | 8,394 | python | en | code | 0 | github-code | 13 |
38999127760 | import datetime
from timeit import default_timer as timer
from random import shuffle
# Задание 1
# версия с timestamp
class Benchmark1:
def __init__(self, function, *arguments):
self.code_start_timestamp = datetime.datetime.now()
self.function = function
self.arguments = arguments
def __enter__(self):
self.function_output = self.function(*self.arguments)
return self.function_output
def __exit__(self, exc_type, exc_val, exc_tb):
self.code_end_timestamp = datetime.datetime.now()
self.time_delta = (self.code_end_timestamp - self.code_start_timestamp).total_seconds() * 1000
print('Время старта выполнения функции - {}'.format(self.code_start_timestamp.time()))
print('Время завершения выполнения функции - {}'.format(self.code_end_timestamp.time()))
print('Весь код исполнен за {} ms'.format(round(self.time_delta)))
# версия с timer <- её вроде как больше рекомендуют
class Benchmark2:
def __init__(self, function, *arguments):
self.code_start_timestamp = timer()
self.function = function
self.arguments = arguments
def __enter__(self):
self.function_output = self.function(*self.arguments)
return self.function_output
def __exit__(self, exc_type, exc_val, exc_tb):
self.code_end_timestamp = timer()
self.time_delta = (self.code_end_timestamp - self.code_start_timestamp) * 1000
print('Время старта выполнения функции - {}'.format(self.code_start_timestamp))
print('Время завершения выполнения функции - {}'.format(self.code_end_timestamp))
print('Весь код исполнен за {} ms'.format(round(self.time_delta)))
# Задание 2
# программа из задания 2_4
import copy
# Задание 1
def recipes_file_to_dict(filename):
recipes_dict = {}
with open(filename) as file:
while True:
dish_name = file.readline().strip()
last_line_reached = len(dish_name) == 0
if last_line_reached:
return recipes_dict
ingredients_count = int(file.readline().strip())
ingredients_read = 0
while ingredients_count > ingredients_read:
# print(file.readline().strip().split(' | '))
ingredient_name, quantity, measure = file.readline().strip().split(' | ')
ingredient = {
'ingredient_name': ingredient_name,
'quantity': int(quantity),
'measure': measure
}
if dish_name not in recipes_dict:
recipes_dict[dish_name] = []
recipes_dict[dish_name].append(ingredient)
ingredients_read += 1
# read empty line before next dish
file.readline().strip()
# Задание 2
def get_ingredients_per_dish(dishes_list, recipes_dict):
shop_list = {}
for dish_name in dishes_list:
try:
dish_recipe = recipes_dict[dish_name]
except KeyError:
print('Блюда "{}" нет в файле рецептов'.format(dish_name))
else:
for ingredient in dish_recipe:
ingredient_name, measure, quantity = \
[ingredient[key] for key in ('ingredient_name', 'measure', 'quantity')]
if ingredient_name not in shop_list.keys():
shop_list[ingredient_name] = {
'measure': measure,
'quantity': quantity,
}
else:
shop_list[ingredient_name]['quantity'] += ingredient['quantity']
return shop_list
def multiply_list_by_person_count(shop_list_per_person, person_count):
shop_list = copy.deepcopy(shop_list_per_person)
for ingredient_data in shop_list.values():
ingredient_data['quantity'] *= person_count
return shop_list
def get_shop_list_by_dishes(dishes_list, person_count):
try:
1 / person_count
1 / len(dishes_list)
except ZeroDivisionError:
print('Не указан список блюд или количество гостей')
else:
recipes_dict = recipes_file_to_dict('recipes.txt')
shop_list_per_person = get_ingredients_per_dish(dishes_list, recipes_dict)
if person_count == 1:
return shop_list_per_person
else:
shop_list = multiply_list_by_person_count(shop_list_per_person, person_count)
return shop_list
if __name__ == '__main__':
def sort_list(input_list):
input_list.sort()
return input_list
test_list = list(range(100000))
shuffle(test_list)
with Benchmark1(sort_list, test_list) as result:
# print(result)
pass
shuffle(test_list)
with Benchmark2(sort_list, test_list) as result:
# print(result)
pass
with Benchmark1(get_shop_list_by_dishes, ['Фахитос', 'Омлет'], 3) as result:
print(result)
pass
with Benchmark2(get_shop_list_by_dishes, ['Запеченный картофель', 'Омлет'], 2) as result:
print(result)
pass
| ArteDuzz/python_lessons | home_work_2_5.py | home_work_2_5.py | py | 4,688 | python | en | code | 0 | github-code | 13 |
22563427011 | from GenObj import *
class Atacker(GenObj):
"""Basic enemy class"""
# speed = [0, 0]
# __Alive = True
def __init__(self, name, hp, img, rect, boundries):
GenObj.__init__(self, name, hp, img, rect, boundries)
self.__HP = hp
# self.__Alive = True
self.setAlive()
self.name = name
self.initMove()
self.hp = hp
self.img = img
self.rect = rect.copy()
self.bound = boundries
self.rect.x = self.bound[0] - self.rect.width - 1
self.setRndPosition(False, True)
def ai_decision(self):
""" Take some "smart" decision """
decision = {}
decision['direction_change'] = False
decision['fire'] = False
# print "AI decision"
if self.isAlive():
# print "HE HE HE: {}".format(self.isAlive())
decision['direction_change'] = self.changeDirection()
# if not (decision['direction_change']):
decision['fire'] = self.fire()
return decision | mgx259/PyGameTest1 | Atacker.py | Atacker.py | py | 1,039 | python | en | code | 0 | github-code | 13 |
30845767352 |
#getting input s is for plain text mam and k is for key ,number of rows in grid
s=input("Enter string:")
k=int(input("Enter key:"))
#To create grid i use a blank list.NumPy arrays can be use here,
# To initialize the list, first fill the list with ‘ ‘(single space)
enc=[[" " for i in range(len(s))] for j in range(k)]
print(enc)
#variable to determine which row to add our character to and flag whether we should travel in an upward or a downward direction
flag=0
row=0
for i in range(len(s)):
enc[row][i]=s[i]
if row==0:
flag=0 # If flag=0, then need to continue in the downward
elif row==k-1: # the last row, the flag will be 1.
flag=1 # travel in an upward direction.
if flag==0:
row+=1
else:
row-=1
# filled in our plaintext characters into our grid
for i in range(k):
print("".join(enc[i])) #join() function which will convert list to a string.
#convert ct list to string
ct = []
for i in range(k):
for j in range(len(s)):
if enc[i][j] != ' ':
ct.append(enc[i][j])
cipher = "".join(ct)
print("Cipher Text: ", cipher)
#Railfence by 171311066 | Rakibul66/Cryptography-Lab | Cryptography Lab/Rail felce.py | Rail felce.py | py | 1,234 | python | en | code | 0 | github-code | 13 |
70931156818 | import pytest
from tokamak.radix_tree import utils
def test_dyn_parse_node_init():
with pytest.raises(ValueError):
utils.DynamicParseNode("raw", "09ab")
with pytest.raises(ValueError):
utils.DynamicParseNode("raw", "")
dyn = utils.DynamicParseNode("raw", "ab01", regex="[0-9]+")
assert dyn.regex == "[0-9]+"
dyn = utils.DynamicParseNode("raw", "ab01", regex=None)
assert dyn.regex == utils.DynamicParseNode.MATCH_UP_TO_SLASH
dyn = utils.DynamicParseNode("raw", "ab01", regex="*")
assert dyn.regex == utils.DynamicParseNode.MATCH_UP_TO_SLASH
def test_dyn_parse_node_pattern():
dyn_noregex = utils.DynamicParseNode("raw", "ab01", regex=None)
assert dyn_noregex.pattern is not None
dyn = utils.DynamicParseNode("raw", "ab01", regex="[0-9]+")
assert dyn._pattern is None
assert dyn.pattern is not None
assert dyn._pattern is not None
assert dyn._pattern == dyn.pattern
@pytest.mark.parametrize(
"test_val,should_match,start,end",
(
("0000AABB34xxxx---/", True, 0, 10),
("xxxx---/0000AABB34xxxx---/", False, None, None), # `match` only matches start
),
)
def test_dyn_parse_node_match(test_val, should_match, start, end):
dyn = utils.DynamicParseNode(
"{name:[0-2]+AABB[3-4]{2}}", "name", regex="[0-2]+AABB[3-4]{2}"
)
result = dyn.match(test_val)
if not should_match:
assert result == (-1, None)
else:
assert result[0] == end
assert result[1]["name"] == test_val[start:end]
# TODO: Add hypothesis
@pytest.mark.parametrize(
"val,expected",
(
(("ab", "ab"), 2),
(("aba", "abc"), 2),
(("", "california"), 0),
(("california", ""), 0),
(("", ""), 0),
),
)
def test_first_nonequal_idx(val, expected):
assert utils.first_nonequal_idx(*val) == expected
@pytest.mark.parametrize(
"test_val,expected",
(
("/first/second/third", "/first/second/third"),
("/first/ /third", "/first/ /third"),
("/first/schloß/third", "/first/schloß/third"),
),
)
def test_parse_dyn_boring_input(test_val, expected):
assert next(utils.parse_dynamic(test_val)) == expected
@pytest.mark.parametrize(
"test_val",
(
"::::::::",
"}{",
"///}:{",
"}:{",
"}:",
"/api/{version{bad}}/data",
"/api/{version{bad}:^[a-z]{2}}/data",
"/api/{version{bad1}:^[a-z]{2}:123}{bad2}/data",
),
)
def test_parse_dyn_error_input(test_val):
with pytest.raises(AssertionError):
list(utils.parse_dynamic(test_val))
@pytest.mark.parametrize("incomplete", ("{", "{:", "{aaa", "{:aaaa"))
def test_parse_dyn_incomplete_input(incomplete):
assert list(utils.parse_dynamic(incomplete)) == []
@pytest.mark.parametrize(
"bad_data",
(
"/api/{}/data",
"/api/{version:^[a-z]{2}}{bad}/data",
"/optional/{name?:[a-zA-Z]+}/{word?}",
),
)
def test_parse_dyn_bad(bad_data):
with pytest.raises(ValueError):
list(utils.parse_dynamic(bad_data))
@pytest.mark.parametrize(
"test_val,parts",
(
(
"/api/{param1}/data",
["/api/", utils.DynamicParseNode("{param1}", "param1"), "/data"],
),
(
"/api/{name:[0-2]+}/data",
[
"/api/",
utils.DynamicParseNode("{name:[0-2]+}", "name", regex="[0-2]+"),
"/data",
],
),
(
"/api/{version:^[a-z]{2}}/data",
[
"/api/",
utils.DynamicParseNode(
"{version:^[a-z]{2}}", "version", regex="^[a-z]{2}"
),
"/data",
],
),
# ("/api/{param1}_{param2}/data", []),
# ("/api/{param1:[a-z]{3}}_{param2}/data", []),
# ("/api/prefix{param1:[a-z]{3}}_{param2}suffix/data", []),
),
)
def test_parse_dyn(test_val, parts):
assert list(utils.parse_dynamic(test_val)) == parts
| erewok/tokamak | tests/radix_tree/test_utils.py | test_utils.py | py | 4,033 | python | en | code | 8 | github-code | 13 |
72014401299 | from core.models import TimeStampModel
from django.db import models
class TrainTrack(TimeStampModel):
source = models.CharField(max_length=255, help_text="the source name of the track")
destination = models.CharField(
max_length=255, help_text="the destination name of the track"
)
is_busy = models.BooleanField(
default=False,
help_text="by default it is False when post master assigned the lines for any train it will update to True and after three hours it will automatically update to False",
)
class Parcel(TimeStampModel):
parcel_owner = models.ForeignKey(
"core.User",
on_delete=models.CASCADE,
help_text="the owner of the parcel who create a parcel for shipping.",
)
parcel_name = models.CharField(max_length=255, db_index=True)
parcel_weight = models.DecimalField(
max_digits=12, decimal_places=2, help_text="weight of the parcel in Kg"
)
parcel_volume = models.DecimalField(
max_digits=12, decimal_places=2, help_text="volume of the parcel in cm^3"
)
withdraw_bids = models.BooleanField(
default=False,
help_text="by default it is False when user create a parcel if user withdraw for some reason it will update to True.",
)
class Meta:
db_table = "parcel"
def __str__(self):
return f"parcel:- {self.parcel_name}"
class Train(TimeStampModel):
train_operator = models.ForeignKey(
"core.User",
on_delete=models.CASCADE,
help_text="the operator of the train who can posts an offer for a train",
)
train_name = models.CharField(max_length=255, help_text="The name of the train")
capacity = models.DecimalField(
max_digits=12,
decimal_places=2,
help_text="capacity of the train",
)
cost = models.DecimalField(
max_digits=12, decimal_places=2, help_text="the cost of the shipping"
)
is_available = models.BooleanField(
default=True,
help_text="by default it is True it will update to False when the train will shipped.",
)
withdraw_bids = models.BooleanField(
default=False,
help_text="by default it is False when Train Operator posts an offer for a train if user withdraw for some reason it will update to True.",
)
lines_they_operate = models.ManyToManyField(
"TrainTrack",
related_name="operate_track",
help_text="assigned all lines that train operator can operate",
)
class Meta:
db_table = "Train"
def __str__(self):
return f"Train:- {self.train_name}"
class ShippedParcel(TimeStampModel):
train = models.ForeignKey(
"Train", on_delete=models.CASCADE, related_name="shipped_train"
)
parcel = models.ForeignKey(
"Parcel", on_delete=models.CASCADE, related_name="shipped_parcel"
)
assigned_lines = models.ForeignKey(
"TrainTrack", on_delete=models.CASCADE, related_name="shipped_track"
)
class Meta:
db_table = "ShippedParcel"
| viprathore/Python | mail_service/service/shipping_parcel/models.py | models.py | py | 3,054 | python | en | code | 0 | github-code | 13 |
21862133170 | '''
Adapted from the video in the Functions secitons of Runestone's FOPP
Changes from house_1.py:
implemented draw_triangle()
implemented draw_rectangle()
'''
import turtle
skippy = turtle.Turtle()
win = turtle.Screen()
skippy.shape("turtle")
house_llx = 0
house_lly = 0
house_size = 200
eve_size = 10
window_size = house_size / 8
door_width = house_size / 8
door_height = house_size / 3
# draw a square with the given size, and with its lower-left
# corner at (x,y).
def draw_square(t, size, x, y):
t.up()
t.goto(x, y)
t.down()
for _ in range(4):
t.forward(size)
t.left(90)
# draw an equilateral triangle with the given size, and with
# its lower-left corner at (x,y).
def draw_triangle(t, size, x, y):
t.up()
t.goto(x, y)
t.down()
for _ in range(3):
t.forward(size)
t.left(120)
# draw a rectangle with the given size, and with its lower-left
# corner at (x,y).
def draw_rectangle(t, width, height, x, y):
t.up()
t.goto(x, y)
t.down()
for i in range(4):
if i % 2 == 0:
t.forward(width)
else:
t.forward(height)
t.left(90)
# Draw the square for the whole house.
draw_square(skippy, house_size, 0, 0)
# Draw a triangle for the roof.
draw_triangle(skippy, house_size + 2 * eve_size, house_llx-eve_size, house_size)
# Draw window
draw_square(skippy, window_size, house_size / 5, house_size / 3)
# Draw window
draw_square(skippy, window_size, house_size - house_size / 5 - window_size, house_size / 3)
# Draw door
draw_rectangle(skippy, door_width, door_height, house_size / 2 - door_width / 2, 0)
win.exitonclick()
| NormandaleWells/CS111Demos | functions/house_2.py | house_2.py | py | 1,660 | python | en | code | 1 | github-code | 13 |
20624458875 |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.cuda.amp as amp
'''
proposed in the BMVC2019 paper: [Large Margin in Softmax Cross-Entropy Loss
link to paper](https://staff.aist.go.jp/takumi.kobayashi/publication/2019/BMVC2019.pdf)
'''
##
# version 1: use torch.autograd
class LargeMarginSoftmaxV1(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV1, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
self.ce_crit = nn.CrossEntropyLoss(
reduction='none', ignore_index=ignore_index)
def forward(self, logits, label):
'''
Same usage method as nn.CrossEntropyLoss:
>>> criteria = LargeMarginSoftmaxV1()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
'''
# overcome ignored label
logits = logits.float()
logits.retain_grad()
logits.register_hook(lambda grad: grad)
with torch.no_grad():
num_classes = logits.size(1)
coeff = 1. / (num_classes - 1.)
lb = label.clone().detach()
mask = label == self.ignore_index
lb[mask] = 0
idx = torch.zeros_like(logits).scatter_(1, lb.unsqueeze(1), 1.)
lgts = logits - idx * 1.e6
q = lgts.softmax(dim=1)
q = q * (1. - idx)
log_q = lgts.log_softmax(dim=1)
log_q = log_q * (1. - idx)
mg_loss = ((q - coeff) * log_q) * (self.lam / 2)
mg_loss = mg_loss * (1. - idx)
mg_loss = mg_loss.sum(dim=1)
ce_loss = self.ce_crit(logits, label)
loss = ce_loss + mg_loss
loss = loss[mask == 0]
if self.reduction == 'mean':
loss = loss.mean()
if self.reduction == 'sum':
loss = loss.sum()
return loss
##
# version 2: user derived grad computation
class LargeMarginSoftmaxV2(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV2, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
def forward(self, logits, labels):
'''
Same usage method as nn.CrossEntropyLoss:
>>> criteria = LargeMarginSoftmaxV2()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
'''
logits = logits.float()
mask = labels == self.ignore_index
lb = labels.clone().detach()
lb[mask] = 0
loss = LargeMarginSoftmaxFuncV2.apply(logits, lb, self.lam)
loss = loss[mask == 0]
if self.reduction == 'mean':
loss = loss.mean()
elif self.reduction == 'sum':
loss = loss.sum()
return loss
class LargeMarginSoftmaxFuncV2(torch.autograd.Function):
@staticmethod
@amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, logits, labels, lam=0.3):
num_classes = logits.size(1)
coeff = 1. / (num_classes - 1.)
idx = torch.zeros_like(logits).scatter_(1, labels.unsqueeze(1), 1.)
lgts = logits.clone()
lgts[idx.bool()] = -1.e6
q = lgts.softmax(dim=1)
log_q = lgts.log_softmax(dim=1)
losses = q.sub_(coeff).mul_(log_q).mul_(lam / 2.)
losses[idx.bool()] = 0
losses = losses.sum(dim=1).add_(F.cross_entropy(logits, labels, reduction='none'))
ctx.variables = logits, labels, idx, coeff, lam
return losses
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
'''
compute gradient
'''
logits, labels, idx, coeff, lam = ctx.variables
num_classes = logits.size(1)
p = logits.softmax(dim=1)
lgts = logits.clone()
lgts[idx.bool()] = -1.e6
q = lgts.softmax(dim=1)
qx = q * lgts
qx[idx.bool()] = 0
grad = qx + q - q * qx.sum(dim=1).unsqueeze(1) - coeff
grad = grad * lam / 2.
grad[idx.bool()] = -1
grad = grad + p
grad.mul_(grad_output.unsqueeze(1))
return grad, None, None
#
# version 3: implement wit cpp/cuda to save memory and accelerate
class LargeMarginSoftmaxV3(nn.Module):
def __init__(self, lam=0.3, reduction='mean', ignore_index=255):
super(LargeMarginSoftmaxV3, self).__init__()
self.reduction = reduction
self.ignore_index = ignore_index
self.lam = lam
def forward(self, logits, labels):
'''
Same usage method as nn.CrossEntropyLoss:
>>> criteria = LargeMarginSoftmaxV3()
>>> logits = torch.randn(8, 19, 384, 384) # nchw, float/half
>>> lbs = torch.randint(0, 19, (8, 384, 384)) # nhw, int64_t
>>> loss = criteria(logits, lbs)
'''
logits = logits.float()
losses = LargeMarginSoftmaxFuncV3.apply(
logits, labels, self.lam, self.ignore_index)
if self.reduction == 'mean':
n_valid = (labels != self.ignore_index).sum()
losses = losses.sum() / n_valid
elif self.reduction == 'sum':
losses = losses.sum()
return losses
import large_margin_cpp
class LargeMarginSoftmaxFuncV3(torch.autograd.Function):
'''
use cpp/cuda to accelerate and shrink memory usage
'''
@staticmethod
@amp.custom_fwd(cast_inputs=torch.float32)
def forward(ctx, logits, labels, lam=0.3, ignore_index=255):
losses = large_margin_cpp.l_margin_forward(logits, labels, lam, ignore_index)
ctx.variables = logits, labels, lam, ignore_index
return losses
@staticmethod
@amp.custom_bwd
def backward(ctx, grad_output):
'''
compute gradient
'''
logits, labels, lam, ignore_index = ctx.variables
grads = large_margin_cpp.l_margin_backward(
logits, labels, lam, ignore_index)
grads.mul_(grad_output.unsqueeze(1))
return grads, None, None, None
if __name__ == '__main__':
import torchvision
import torch
import numpy as np
import random
torch.manual_seed(15)
random.seed(15)
np.random.seed(15)
torch.backends.cudnn.deterministic = True
class Model(nn.Module):
def __init__(self):
super(Model, self).__init__()
net = torchvision.models.resnet18(pretrained=False)
self.conv1 = net.conv1
self.bn1 = net.bn1
self.maxpool = net.maxpool
self.relu = net.relu
self.layer1 = net.layer1
self.layer2 = net.layer2
self.layer3 = net.layer3
self.layer4 = net.layer4
self.out = nn.Conv2d(512, 3, 3, 1, 1)
def forward(self, x):
feat1 = self.conv1(x)
feat2 = self.bn1(feat1)
feat3 = self.relu(feat2)
# feat4 = self.maxpool(feat3)
feat5 = self.layer1(feat3)
feat6 = self.layer2(feat5)
feat7 = self.layer3(feat6)
feat8 = self.layer4(feat7)
feat9 = self.out(feat8)
out = feat9
feat8.retain_grad()
feat8.register_hook(lambda grad: grad*100000)
return out, feat8
net1 = Model()
net2 = Model()
from copy import deepcopy
net2.load_state_dict(deepcopy(net1.state_dict()))
# criteria1 = nn.CrossEntropyLoss(reduction='mean')
# criteria2 = nn.CrossEntropyLoss(reduction='mean')
criteria1 = LargeMarginSoftmaxV1(reduction='mean')
criteria2 = LargeMarginSoftmaxV3(reduction='mean')
net1.cuda()
net2.cuda()
net1.train()
net2.train()
criteria1.cuda()
criteria2.cuda()
optim1 = torch.optim.SGD(net1.parameters(), lr=1e-2)
optim2 = torch.optim.SGD(net2.parameters(), lr=1e-2)
bs = 32
for it in range(1000):
inten = torch.randn(bs, 3, 256, 256).cuda()
lbs = torch.randint(0, 3, (bs, 16, 16)).cuda()
lbs[16:, :, :10] = 255
# s = lbs.cpu().detach().numpy()
# np.save('../lb.npy', s)
logits, feat = net1(inten.clone())
loss1 = criteria1(logits, lbs.clone())#.div(bs * 8 * 8)
optim1.zero_grad()
loss1.backward()
optim1.step()
# s = logits.cpu().detach().numpy()
# np.save('../logitsv2.npy', s)
logits, feat = net2(inten.clone())
loss2 = criteria2(logits, lbs.clone())#.div(bs * 8 * 8)
optim2.zero_grad()
loss2.backward()
optim2.step()
# s = logits.cpu().detach().numpy()
# np.save('../logitsv3.npy', s)
# print(logits[0, :, 0, 0])
# print(lbs[0, 0, 0])
# print('net2.weight: ', net2.out.weight[0, 0, :, 0])
# net2.load_state_dict(net1.state_dict())
with torch.no_grad():
if (it+1) % 50 == 0:
# if True:
# print(loss1.item())
# print(loss2.item())
# break
print('iter: {}, ================='.format(it+1))
print('out.weight: ', torch.mean(torch.abs(net1.out.weight - net2.out.weight)).item())
print('conv1.weight: ', torch.mean(torch.abs(net1.conv1.weight - net2.conv1.weight)).item())
# print(net1.out.weight.mean().item())
# print(net2.out.weight.mean().item())
print('\nloss: ', loss1.item() - loss2.item())
| CoinCheung/pytorch-loss | large_margin_softmax.py | large_margin_softmax.py | py | 9,752 | python | en | code | 2,048 | github-code | 13 |
31200586528 | import datetime
import mock
import six
from st2common.transport.publishers import PoolPublisher
from st2common.persistence.reactor import TriggerInstance
from st2common.models.db.reactor import TriggerInstanceDB
from tests import FunctionalTest
http_client = six.moves.http_client
@mock.patch.object(PoolPublisher, 'publish', mock.MagicMock())
class TestTriggerController(FunctionalTest):
@classmethod
def setUpClass(cls):
super(TestTriggerController, cls).setUpClass()
cls._setupTriggerTypes()
cls._setupTriggers()
cls._setupTriggerInstances()
def test_get_all(self):
resp = self.app.get('/v1/triggerinstances')
self.assertEqual(resp.status_int, http_client.OK)
self.assertEqual(len(resp.json), self.triggerinstance_count, 'Get all failure.')
def test_get_one(self):
triggerinstance_id = str(self.triggerinstance_1.id)
resp = self._do_get_one(triggerinstance_id)
self.assertEqual(resp.status_int, http_client.OK)
self.assertEqual(self._get_id(resp), triggerinstance_id)
triggerinstance_id = str(self.triggerinstance_2.id)
resp = self._do_get_one(triggerinstance_id)
self.assertEqual(resp.status_int, http_client.OK)
self.assertEqual(self._get_id(resp), triggerinstance_id)
triggerinstance_id = str(self.triggerinstance_3.id)
resp = self._do_get_one(triggerinstance_id)
self.assertEqual(resp.status_int, http_client.OK)
self.assertEqual(self._get_id(resp), triggerinstance_id)
def test_get_one_fail(self):
resp = self._do_get_one('1')
self.assertEqual(resp.status_int, http_client.NOT_FOUND)
@classmethod
def _setupTriggerTypes(cls):
TRIGGERTYPE_0 = {
'name': 'st2.test.triggertype0',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
'parameters_schema': {}
}
TRIGGERTYPE_1 = {
'name': 'st2.test.triggertype1',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
}
TRIGGERTYPE_2 = {
'name': 'st2.test.triggertype2',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'payload_schema': {'tp1': None, 'tp2': None, 'tp3': None},
'parameters_schema': {'param1': {'type': 'object'}}
}
cls.app.post_json('/v1/triggertypes', TRIGGERTYPE_0, expect_errors=False)
cls.app.post_json('/v1/triggertypes', TRIGGERTYPE_1, expect_errors=False)
cls.app.post_json('/v1/triggertypes', TRIGGERTYPE_2, expect_errors=False)
@classmethod
def _setupTriggers(cls):
TRIGGER_0 = {
'name': 'st2.test.trigger0',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'type': 'dummy_pack_1.st2.test.triggertype0',
'parameters': {}
}
TRIGGER_1 = {
'name': 'st2.test.trigger1',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'type': 'dummy_pack_1.st2.test.triggertype1',
'parameters': {}
}
TRIGGER_2 = {
'name': 'st2.test.trigger2',
'pack': 'dummy_pack_1',
'description': 'test trigger',
'type': 'dummy_pack_1.st2.test.triggertype2',
'parameters': {
'param1': {
'foo': 'bar'
}
}
}
cls.app.post_json('/v1/triggers', TRIGGER_0, expect_errors=False)
cls.app.post_json('/v1/triggers', TRIGGER_1, expect_errors=False)
cls.app.post_json('/v1/triggers', TRIGGER_2, expect_errors=False)
@classmethod
def _setupTriggerInstances(cls):
cls.triggerinstance_count = 0
cls.triggerinstance_1 = cls._create_trigger_instance(
'dummy_pack_1.st2.test.trigger0',
{'tp1': 1, 'tp2': 2, 'tp3': 3})
cls.triggerinstance_2 = cls._create_trigger_instance(
'dummy_pack_1.st2.test.trigger1',
{'tp1': 'a', 'tp2': 'b', 'tp3': 'c'})
cls.triggerinstance_3 = cls._create_trigger_instance(
'dummy_pack_1.st2.test.trigger2',
{'tp1': None, 'tp2': None, 'tp3': None})
@classmethod
def _create_trigger_instance(cls, trigger_ref, payload):
trigger_instance = TriggerInstanceDB()
trigger_instance.trigger = trigger_ref
trigger_instance.payload = payload
trigger_instance.occurrence_time = datetime.datetime.utcnow()
created = TriggerInstance.add_or_update(trigger_instance)
cls.triggerinstance_count += 1
return created
@staticmethod
def _get_id(resp):
return resp.json['id']
def _do_get_one(self, triggerinstance_id):
return self.app.get('/v1/triggerinstances/%s' % triggerinstance_id, expect_errors=True)
| gtmanfred/st2 | st2api/tests/unit/controllers/v1/test_triggerinstances.py | test_triggerinstances.py | py | 5,055 | python | en | code | null | github-code | 13 |
10839976616 | from flask import Flask
from flask import render_template
from flask import request
import webbrowser,json,os
def webrun():
app = Flask(__name__)
@app.route('/index',methods=['GET','POST'])
def index():
if request.method == 'POST':
filename = request.form['filename']
data1 = filename+"1"
data2 = filename+"2"
return render_template('index.html',data1=data1,data2=data2)
return render_template('index.html')
@app.route('/')
def search():
return render_template('search.html')
webbrowser.open('http://127.0.0.1:5000')
app.run()
webrun() | dan890407/OCR_finalproject | OCR/house_web/house_web.py | house_web.py | py | 639 | python | en | code | 0 | github-code | 13 |
161088098 | # -*- coding: utf-8 -*-
from selenium import webdriver
import time
from bs4 import BeautifulSoup
import urllib.request
def parser():
req=urllib.request.Request('https://www.nccst.nat.gov.tw/Vulnerability')
response = urllib.request.urlopen(req)
the_page = response.read()
soup = BeautifulSoup(the_page, "lxml")
divs = soup.find('ul', 'news-list-group')
soups=divs.select('li')
link=divs.find_all('a', href=True)
#print (link[0]['href'])
#print(soups[0].text)
for i in range(len(soups)):
print("%d.%s"%(i+1,soups[i].text.strip("\n")))
print ("https://www.nccst.nat.gov.tw/"+link[i]['href']+"\n")
def main():
parser()
if __name__=="__main__":
main()
| owen51251/linux-tcpreverse | parser_nccst.py | parser_nccst.py | py | 742 | python | en | code | 0 | github-code | 13 |
21949248818 | # encoding:utf-8
from kdmer import *
from collections import defaultdict
import sys
class GrafoDeBruijn:
def __init__(self, kdmers, k, d, rosalind=False):
self.kdmers = kdmers
self.k = int(k)
self.d = int(d)
self.rosalind = rosalind
self.construirGrafo()
def construirGrafo(self):
# Configuração do grafo:
# lista de tuplas onde cada tupla é:
# (vertice_origem, aresta, vertice_destino)
# exemplo: [(vertice1, aresta1, vertice2), (vertice2, aresta2, vertice3)]
self.grafo = []
# dicionário contendo a lista de adjacência
# a chave é o vértice
# o valor é uma lista de vértices
self.dict_lista_adj = defaultdict(list) # dicionário de listas
# dicionário para conter os vértices para poder acessar
# rapidamente as arestas do caminho euleriano
self.dict_arestas = {}
# percorre a lista de kdmers
# cada elemento da lista de kdmers é uma tupla de 2 elementos
for tupla in self.kdmers:
vertice_origem = tupla[0][0:self.k-1] + tupla[1][0:self.k-1]
aresta = tupla[0] + tupla[1]
vertice_destino = tupla[0][1:] + tupla[1][1:]
self.grafo.append((vertice_origem, aresta, vertice_destino))
# adiciona um elemento da lista ao dicionário
self.dict_lista_adj[vertice_origem].append(vertice_destino)
# adiciona no dicionário de arestas
self.dict_arestas[vertice_origem + vertice_destino] = aresta
# gera a lista de adjacência
self.gerarListaAdjacencia()
# gera os caminhos eulerianos
self.gerarCaminhosEulerianos()
# retorna o o grafo (lista de tuplas)
# cada tupla: (vertice1, aresta, vertice2)
def getGrafo(self):
return self.grafo
# função que gera e grava a lista de adjacência em arquivo (DeBruijn.txt)
def gerarListaAdjacencia(self):
lista_adj = ""
# percorrendo o dicionário de listas
for vertice_chave in self.dict_lista_adj:
lista_adj += vertice_chave + " -> "
# obtém a lista de vértices adjacentes ao vertice_chave
lista_vertices = self.dict_lista_adj[vertice_chave]
# percorre a lista de vértices adjacentes
for vertice in lista_vertices:
lista_adj += vertice + " -> "
lista_adj += "\n"
if not self.rosalind: # grava no arquivo
arq = open('DeBruijn.txt', 'w')
arq.write(lista_adj)
arq.close()
# função que retorna o dicionário que representa a lista de adjacência
def getListaAdjacencia(self):
return self.dict_lista_adj
# função que gera os caminhos eulerianos
def gerarCaminhosEulerianos(self):
self.existeCaminhoEuleriano = False
# verifica se possui algum caminho euleriano
# se todos os vértices tem o mesmo grau de saída e entrada, escolha qualquer um deles
# se todos (exceto 2 vértices) tem o mesmo grau de saída e entrada, e um desses 2 vértices
# tem o grau de saída com 1 a mais que o grau de entrada, e o outro tem grau de entrada com
# 1 a mais que grau de saída, então escolhe o vértice que tem 1 grau de saída com 1 a mais
# do que o grau de entrada
# em qualquer outro caso não possui circuito ou caminho euleriano.
# dicionário com a quantidade de graus de entrada e saída de cada vértice
# a chave é o vértice e o valor é a quantidade
self.graus_entrada, self.graus_saida = {}, {}
# obtém o dicionário de listas
dict_lista_adj = self.getListaAdjacencia()
# inicializando os a quantidade de graus de saída e entrada dos vértices
for vertice_chave in dict_lista_adj:
self.graus_saida[vertice_chave], self.graus_entrada[vertice_chave] = 0, 0
lista_vertices = dict_lista_adj[vertice_chave]
for vertice in lista_vertices:
self.graus_saida[vertice], self.graus_entrada[vertice] = 0, 0
# percorre o dicionário de listas
# a chave é o vértice e o valor é uma lista de vértices adjacentes
for vertice_chave in dict_lista_adj:
# obtém todos os vértices adjacentes ao vertice_chave
lista_vizinhos = dict_lista_adj[vertice_chave]
# a quantidade de graus de saída do vertice_chave é o tamanho dessa lista
self.graus_saida[vertice_chave] = len(lista_vizinhos)
# percorre todos os vértices adjacentes ao vertice_chave
for vizinho in lista_vizinhos:
self.graus_entrada[vizinho] += 1
# variáveis para verificar os graus dos vértices
todos_tem_mesmo_grau = True
qte_vertices_grau_diferente = 0
vertices_grau_diferente = []
# verifica o primeiro caso (todos os vértices tem o mesmo de entrada e saída)
for vertice in self.graus_entrada:
if (self.graus_entrada[vertice] != self.graus_saida[vertice]):
todos_tem_mesmo_grau = False
if (qte_vertices_grau_diferente > 2):
break
else:
vertices_grau_diferente.append(vertice)
qte_vertices_grau_diferente += 1
# vértice de onde começa
vertice_inicio = ""
if (todos_tem_mesmo_grau == True):
self.existeCaminhoEuleriano = True
# escolhe qualquer um para iniciar
if (len(graus_entrada) > 0):
vertice_inicio = graus_entrada.keys()[0]
else:
vertice_inicio = graus_saida.keys()[0]
else:
# verifica o segundo caso (exceto 2 vértices NÃO tem o mesmo grau)
# só execute se todos NÃO tiverem o mesmo grau e a quantidade
# de vértices de grau diferente for igual a 2
if (qte_vertices_grau_diferente == 2):
# pega os vértices com grau diferente de entrada e saída
vertice1, vertice2 = vertices_grau_diferente[0], vertices_grau_diferente[1]
if ((self.graus_saida[vertice1] + self.graus_entrada[vertice1]) == 1) and \
((self.graus_entrada[vertice2] + self.graus_saida[vertice2]) == 1):
self.existeCaminhoEuleriano = True
# escolhe o vértice com grau de saída 1 a mais que o grau de entrada
# nesse caso é o vertice1
if (self.graus_saida[vertice1] > self.graus_entrada[vertice1]):
vertice_inicio = vertice1[:]
else:
vertice_inicio = vertice2[:]
# a segunda parte do algoritmo só executa se tiver caminho euleriano
if (self.existeCaminhoEuleriano == True):
pilha, self.circuito = [], []
vertice_corrente = vertice_inicio
while(True):
# condição de parada: vértice corrente NÃO possuir vizinhos e a pilha estiver vazia
if(self.graus_saida[vertice_corrente] == 0 and len(pilha) == 0):
break
else:
# verifica se o vértice NÃO possui vizinhos (grau de saída 0)
if(self.graus_saida[vertice_corrente] == 0):
# adiciona ao circuito
self.circuito.append(vertice_corrente)
# remove o último elemento da pilha e seta ele como corrente
vertice_corrente = pilha.pop()
else:
# se caiu aqui, é porque o vertice_corrente possui vizinhos
# adiciona o vértice corrente na pilha
pilha.append(vertice_corrente)
# seleciona qualquer vizinho e remove ele da lista de vizinhos
vizinho = self.dict_lista_adj[vertice_corrente].pop()
# atualiza o grau de saída do vertice_corrente
self.graus_saida[vertice_corrente] -= 1
# atualiza o grau de entrada do vizinho
self.graus_entrada[vizinho] -= 1
# seta o vizinho como o vertice corrente
vertice_corrente = vizinho[:]
# adiciona o vertice_inicio ao circuito
self.circuito.append(vertice_inicio)
# inverte a lista para obter a ordem certa
self.circuito = self.circuito[::-1]
# grava o caminho euleriano no arquivo "Eulerianos.txt"
caminho_euleriano = vertice_inicio[:]
tam_caminho = len(self.circuito)
for i in range(1, tam_caminho - 1):
caminho_euleriano += " -> " + self.circuito[i]
caminho_euleriano += " -> " + self.circuito[tam_caminho - 1]
if not self.rosalind: # grava no arquivo
arq = open('Eulerianos.txt', 'w')
arq.write(caminho_euleriano)
arq.close()
# chama a função para reconstruir a sequência
self.reconstruirSequencia()
# função que remonta a sequência através das arestas
def reconstruirSequencia(self):
# obtém todas as arestas
arestas, tam_circuito = [], len(self.circuito)
# obtém todas as arestas para poder remontar a sequência
for i in range(tam_circuito):
if(i < tam_circuito - 1):
chave = self.circuito[i] + self.circuito[i + 1]
arestas.append(self.dict_arestas[chave])
# obtém o tamanho das arestas
len_arestas = len(arestas)
# obtém a string até primeira metade da aresta (isso só vale para a primeira)
self.sequencia = arestas[0][:self.k]
# sufixos utilizados na reconstrução da sequência
sufixos = ''
# a partir da segunda, obtém somente o último caractere da primeira metade da aresta
for i in range(1, len_arestas):
if i == (len_arestas - self.d - 1):
# acessa a aresta (len_arestas - self.d - 1)
sufixos += arestas[len_arestas - self.d - 1][self.k:self.k + self.d]
if self.k < self.d: # caso especial: k < d
# a partir de uma determinada posição, pega o último caractere da segunda metade (sufixo) da aresta
if i > (len_arestas - self.d - 1):
sufixos += arestas[i][-1]
# obtém somente o último caractere da primeira metade (prefixo) da aresta
self.sequencia += arestas[i][:self.k][-1]
self.sequencia += sufixos
if self.k >= self.d:
# obtém toda a segunda metade da última aresta
# essa segunda metade já é obtida quando k < d
self.sequencia += arestas[-1][self.k:]
# escrevendo a sequência reconstruída no arquivo
arq = open('sequencia_reconstruida.txt', 'w')
arq.write(self.sequencia)
arq.close()
# retorna se existe caminho euleriano
def existeEuleriano(self):
return self.existeCaminhoEuleriano
# retorna o circuito
def getCircuito(self):
return self.circuito
# retorna a sequência reconstruída
def getSequencia(self):
return self.sequencia
# obtém a quantidade de argumentos
len_args = len(sys.argv)
if len_args != 2 and len_args != 3:
print('\nExecute:\n\tpython assembler.py <arquivo_de_entrada>\n')
else:
if len_args == 2: # entrada normal do programa
try:
obj_fasta = ArquivoFasta(sys.argv[1])
kdmer = KDMer(obj_fasta.getSequencia(), obj_fasta.getK(), obj_fasta.getD())
grafo = GrafoDeBruijn(kdmer.getMers(), obj_fasta.getK(), obj_fasta.getD())
# verifica se existe caminho euleriano
if (grafo.existeEuleriano()):
# teste para verificar se as sequências batem
if (grafo.getSequencia() == obj_fasta.getSequencia()):
print('\nSequência reconstruída com sucesso!\n\nForam gerados os arquivos:\n\t DeBruijn.txt, Eulerianos.txt, kdmers.txt, sequencia_reconstruida.txt\n')
else:
print('Falha: foi gerada uma sequência diferente da original.')
print('Tamanho da sequência original: %d' % len(obj_fasta.getSequencia()))
print('Tamanho da sequência reconstruída: %d' % len(grafo.getSequencia()))
else:
print('Não existe caminho euleriano!')
except:
print('\nErro: verifique se o caminho do arquivo existe!\n')
else: # entrada do problema do Rosalind:
if sys.argv[2] == 'rosalind':
try:
# lê o arquivo no formato do Rosalind
arquivo = open(sys.argv[1], 'r')
# lê todas as linhas do arquivo
linhas = arquivo.readlines()
# obtém o "k" e o "d"
k, d = linhas[0].replace('\n', '').split(' ')
# obtém a quantidade de linhas
len_linhas = len(linhas)
# no formato do Rosalind os kdmers já são fornecidos (não precisa gerá-los)
kdmers = []
# obtém todos os kdmers
for i in range(1, len_linhas):
mer1, mer2 = linhas[i].replace('\n', '').split('|')
kdmers.append((mer1, mer2))
grafo = GrafoDeBruijn(kdmers, k, d, rosalind=True)
print('\nFoi gerado o arquivo: sequencia_reconstruida.txt\n')
# fecha o arquivo
arquivo.close()
except:
print('\nErro: verifique se o caminho do arquivo existe!\n')
else:
print('\nExecute:\n\tpython assembler.py <arquivo_rosalind> rosalind\n')
| marcoscastro/bruijn_graph | src/assembler.py | assembler.py | py | 12,056 | python | pt | code | 3 | github-code | 13 |
33527990756 | # -*- coding = utf-8 -*-
# @Time : 5/5/2023 7:49 PM
# @Author : zyxiao
# @File : 计算均值和方差.py
# @Software : {PyCharm}
import torch
from torchvision.datasets import ImageFolder
# def getStat(train_data):
# '''
# Compute mean and variance for training data
# :param train_data: 自定义类Dataset(或ImageFolder即可)
# :return: (mean, std)
# '''
# print('Compute mean and variance for training data.')
# print(len(train_data))
# train_loader = torch.utils.data.DataLoader(
# train_data, batch_size=1, shuffle=False, num_workers=0,
# pin_memory=True)
# mean = torch.zeros(3)
# std = torch.zeros(3)
# for X, _ in train_loader:
# for d in range(3):
# mean[d] += X[:, d, :, :].mean()
# std[d] += X[:, d, :, :].std()
# mean.div_(len(train_data))
# std.div_(len(train_data))
# return list(mean.numpy()), list(std.numpy())
#
#
# if __name__ == '__main__':
# train_dataset = ImageFolder(root=r'D:\cifar10_images\test')
# print(getStat(train_dataset))
import os
import numpy as np
import cv2
files_dir = r'F:\pycharm-workspace\selfCreateDataset\images/'
files = os.listdir(files_dir)
R = 0.
G = 0.
B = 0.
R_2 = 0.
G_2 = 0.
B_2 = 0.
N = 0
for file in files:
img = cv2.imread(files_dir+file)
print(files_dir+file+" is calculate")
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = np.array(img)
h, w, c = img.shape
N += h*w
R_t = img[:, :, 0]
R += np.sum(R_t)
R_2 += np.sum(np.power(R_t, 2.0))
G_t = img[:, :, 1]
G += np.sum(G_t)
G_2 += np.sum(np.power(G_t, 2.0))
B_t = img[:, :, 2]
B += np.sum(B_t)
B_2 += np.sum(np.power(B_t, 2.0))
R_mean = R/N
G_mean = G/N
B_mean = B/N
R_std = np.sqrt(R_2/N - R_mean*R_mean)
G_std = np.sqrt(G_2/N - G_mean*G_mean)
B_std = np.sqrt(B_2/N - B_mean*B_mean)
print("R_mean: %f, G_mean: %f, B_mean: %f" % (R_mean, G_mean, B_mean))
print("R_std: %f, G_std: %f, B_std: %f" % (R_std, G_std, B_std))
# R_mean: 113.414338, G_mean: 104.663787, B_mean: 90.194004
# R_std: 71.098730, G_std: 67.739814, B_std: 69.458805 | xzyxiaohaha/PythonUtil | pythonUtil/数据集操作工具类/计算均值和方差.py | 计算均值和方差.py | py | 2,215 | python | en | code | 0 | github-code | 13 |
33501959431 | from Generator import DataGenerator
from Model import BuildModel
import json
import pickle
if __name__ == "__main__":
with open("processedData/meta.json") as fl:
meta = json.load(fl)
# parameter to test model
test = True
train_ids = range(1, 100)
val_ids = range(100, 140)
batch_size = 2
if test == False:
train_ids = range(1, 100000)
val_ids = range(100000, 120000)
batch_size = 14
num_context_token = meta["max_context"]
num_query_token = meta["max_question"]
num_hidden_state = 100
training_generator = DataGenerator(train_ids, batch_size=batch_size)
validation_generator = DataGenerator(val_ids, batch_size=batch_size)
model = BuildModel(num_context_token, num_query_token, num_hidden_state, meta["vocab_length"])
# parameter to load and retrain model
load_model = False
if load_model == True:
model.load_weights('weights.h5')
model._make_train_function()
with open('optimizer.pkl', 'rb') as f:
weight_values = pickle.load(f)
model.optimizer.set_weights(weight_values)
model.fit_generator(generator=training_generator, validation_data=validation_generator, epochs=5)
model.save_weights('weights.h5')
symbolic_weights = getattr(model.optimizer, 'weights')
weight_values = K.batch_get_value(symbolic_weights)
with open('optimizer.pkl', 'wb') as f:
pickle.dump(weight_values, f) | ParthTandel/Bidaf | Main.py | Main.py | py | 1,448 | python | en | code | 0 | github-code | 13 |
25810773494 | #! /usr/bin/env python3
'''
Truck package
Note: truck csv file is checked with module load.
'''
import os
import math
import pandas as pd
import config as cfg
trucks_file = os.path.join(cfg.data_dir, cfg.trucks_csv)
if not os.path.isfile(trucks_file): raise FileNotFoundError
class Truck:
def __init__(self) -> None:
'''Load truck csv into a pandas dataframe'''
self.data = pd.read_csv(trucks_file)
def count(self) -> int:
'''Count trucks loaded from csv'''
return len(self.data.index)
def range(self) -> tuple:
'''Return (northern, southern) and (western, eastern limits), based on available trucks'''
return ( (round(self.data['lat'].max()), round(self.data['lat'].min())),
(round(self.data['lng'].min()), round(self.data['lng'].max())) )
def locate(self, lat, lng, range_limit=5, results_limit=3) -> dict:
'''
Return trucks as near as possible to the informed coords.
Arguments:
lat (float): latitude, as in 34.79981
lng (float): longitude, as in -87.677251
range_limit (int): search limit (optional, defaults to 5 degrees or 500+ km)
Return:
dict: up to three closest trucks (ordered by proximity)
'''
lat_limits = [ int(lat*10) ] * 2
lng_limits = [ int(lng*10) ] * 2
for degree in range(1, range_limit*10, 3): # increases range in 0.3 degrees
lat_limits = [ lat_limits[0]-degree, lat_limits[1]+degree ]
lng_limits = [ lng_limits[0]-degree, lng_limits[1]+degree ]
mask = self.data['lat'].between(lat_limits[0]/10, lat_limits[1]/10) \
& self.data['lng'].between(lng_limits[0]/10, lng_limits[1]/10)
df = self.data.loc[mask]
if len(df) >= results_limit: break
trucks = {}
for i, row in df.iterrows():
h = math.hypot(abs((lat)-(row.lat)), abs((lng)-(row.lng)))
trucks[h] = { 'id': i }
res = { 'location': { 'lat': lat, 'lng': lng }, 'options': [] }
for h in sorted(trucks):
i = trucks[h]['id']
res['options'].append(self.data.iloc[i].to_dict())
return res
def print(self, limit=None) -> None:
'''
Print list of trucks
Arguments:
limit (int): prints N records (optional, defaults to All records)
'''
print(self.data.head(limit))
| nandoabreu/geo-coordinate-calc-and-pandas | truck/__init__.py | __init__.py | py | 2,464 | python | en | code | 0 | github-code | 13 |
32471450025 | from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from common.views import CommentList, FavoriteList, FavoriteDelete, Search
from server import settings
urlpatterns = patterns('',
url(r'^$', 'common.views.index', name='index'),
url(r'^user/', include('common.urls')),
url(r'^ingredient/', include('ingredient.urls')),
url(r'^product/', include('product.urls')),
url(r'^recipe/', include('recipe.urls')),
url(r'^supplier/', include('supplier.urls')),
url(r'^gastronomist/', include('gastronomist.urls')),
url(r'^nutrition/', include('nutrition.urls')),
url(r'^(?P<type>\w+)/(?P<pk>\d+)/comment/$', CommentList.as_view()),
url(r'^(?P<type>\w+)/(?P<pk>\d+)/favorite/$', FavoriteList.as_view()),
url(r'^(?P<type>\w+)/(?P<id>\d+)/favorite/(?P<pk>\d+)/$', FavoriteDelete.as_view()),
url(r'^search/$', Search.as_view()),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^o/', include('oauth2_provider.urls', namespace='oauth2_provider')),
) \
+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) \
+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| wpstan/Nourriture | source/server/server/urls.py | urls.py | py | 1,854 | python | en | code | 0 | github-code | 13 |
5471398164 | # -*- coding: utf-8 -*-
import pyupbit
def get_coin(coin):
if coin == None:
pass
# tickers = pyupbit.get_tickers()
# return tickers
elif coin != None:
coin = coin.replace(" ","")
ticker = coin.replace("코인","")
price = format(pyupbit.get_current_price(ticker), ',')
pricelow = pyupbit.get_current_price(ticker)
df = pyupbit.get_ohlcv(ticker, count=2, interval="day")
df.drop(['volume', 'high', 'low'], axis=1, inplace=True)
df['N'] = ['1', '2']
df['Date'] = df.index
df.set_index('N', inplace=True)
df["open"] = [format(df['open'][0], ','), format(df['open'][1], ',')]
df["close"] = [format(df['close'][0], ','), format(df['close'][1], ',')]
yesterdaylow = float(df['close'][0].replace(",", ""))
pricefloat = float(pricelow)
per = (pricefloat / yesterdaylow * 100) - 100
per = "%.2f%%" %per
return df, price, per
if __name__ == '__main__':
print('im main') | redplug/slackbot | get_coin.py | get_coin.py | py | 1,026 | python | en | code | 0 | github-code | 13 |
20644297624 | from django import forms
from .models import Tracker, Finance, Task, TaskStatus
class TrackerForm(forms.ModelForm):
class Meta:
model = Tracker
fields = ['init_name', 'supplier', 'type', 'scope', 'comments', 'theo', 'start_date', 'end_date', 'business_owner', 'division', 'proc_owner', 'category', 'contract_value', 'status']
labels = {'init_name': 'Initiative Name', 'supplier': 'Supplier Name', 'type': 'Activity Type', 'scope': 'Activity Scope',
'comments': 'Comments', 'theo': 'Planned Project?', 'start_date': 'Activity Start Date (YYYY-MM-DD)', 'end_date': 'Activity End Date (YYYY-MM-DD)',
'business_owner': 'Business Owner', 'division': 'SBU', 'proc_owner': 'Procurement Owner', 'category': 'Category', 'contract_value': 'Total Contract Value', 'status': 'Status' }
class FinanceForm(forms.ModelForm):
class Meta:
model = Finance
fields = ['record','term_mo', 'yr_savings', 'spend_cur_fy', 'spend_prev_fy', 'contract_start', 'contract_end',
'savings_start', 'savings_end', 'budget_type', 'cs_type', 'confidence', 'nature']
labels = {'record': 'Tracker', 'term_mo': 'Term in Months', 'yr_savings': '12 mo Savings', 'spend_cur_fy': 'Current FY Spend', 'spend_prev_fy': 'Previous FY Spend', 'contract_start': 'Contract Start Date (YYYY-MM-DD)', 'contract_end': 'Contract End Date (YYYY-MM-DD)',
'savings_start': 'Savings Start Date (YYYY-MM-DD)', 'savings_end': 'Savings End Date (YYYY-MM-DD)', 'budget_type': 'Budget Type', 'cs_type': 'Cost Savings Type', 'confidence': 'Confidence Level', 'nature': 'Nature of Deal'}
class TaskForm(forms.Form):
description = forms.CharField(max_length = 255, strip=True)
due = forms.DateField()
status = forms.ModelChoiceField(queryset=TaskStatus.objects.all())
| eric-oaktree/spat | tracker/forms.py | forms.py | py | 1,847 | python | en | code | 1 | github-code | 13 |
40807770887 | from sklearn import datasets
from sklearn.model_selection import train_test_split
from scipy.spatial import distance
from sklearn.metrics import accuracy_score
def euc(a, b): # gives distance between two points
return distance.euclidean(a,b)
class MyClassifier:
def fit(self, x_train, y_train):
self.x_train = x_train
self.y_train = y_train
def predict(self, x_test):
predictions = []
for row in x_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = euc(row, self.x_train[0])
best_index = 0
for i in range(1, len(self.x_train)):
dist = euc(row, self.x_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
iris = datasets.load_iris()
x = iris.data
y = iris.target
# split total data into train and test data sets
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.5)
#call our own classifier class
my_classifier = MyClassifier()
my_classifier.fit(x_train, y_train) # provide training data to the classifier
predictions = my_classifier.predict(x_test)
print(accuracy_score(y_test, predictions))
| surazad99/Machine-Learning | MyOwnClassifier.py | MyOwnClassifier.py | py | 1,295 | python | en | code | 1 | github-code | 13 |
27685531249 | import moderngl
import numpy as np
from typing import Tuple
Color = Tuple[float, float, float, float]
Point2 = Tuple[float, float]
_SIGNAL_VERTEX_SHADER = '''
#version 330
uniform int Start;
uniform float XScale;
uniform mat4 MainRect;
in int in_vert;
void main() {
float x = 2. * (Start + gl_VertexID) * XScale - 1.;
float y = in_vert / 32768.;
gl_Position = vec4(x, y, 0.0, 1.0) * MainRect;
}
'''
_BASIC_FRAGMENT_SHADER = '''
#version 330
uniform vec4 Color;
out vec4 f_color;
void main() {
f_color = Color;
}
'''
_BASIC_VERTEX_SHADER = '''
#version 330
out int inst;
void main() {
inst = gl_InstanceID;
}
'''
_LINE_GEOMETRY_SHADER = '''
#version 330
layout (points) in;
layout (line_strip, max_vertices = 2) out;
uniform mat4x2 LineMatrix;
uniform int Segments;
uniform mat4 MainRect;
void main() {
float start = float(2 * gl_PrimitiveIDIn) / Segments;
gl_Position = vec2(start, 1) * LineMatrix * MainRect;
EmitVertex();
float end = float(2 * gl_PrimitiveIDIn + 1) / Segments;
gl_Position = vec2(end, 1) * LineMatrix * MainRect;
EmitVertex();
EndPrimitive();
}
'''
_GRID_GEOMETRY_SHADER = '''
#version 330
layout (points) in;
layout (line_strip, max_vertices = 2) out;
in int inst[1];
uniform mat4 MainRect;
uniform int HCount;
uniform int VCount;
uniform int Segments;
void vline(float n, float m) {
float x = 0.;
if (HCount != 0)
x = 2 * n / HCount - 1;
float y1 = 4 * m / Segments - 1;
float y2 = (4 * m + 2) / Segments - 1;
gl_Position = vec4(x, y1, 0, 1) * MainRect;
EmitVertex();
gl_Position = vec4(x, y2, 0, 1) * MainRect;
EmitVertex();
EndPrimitive();
}
void hline(float n, float m) {
float y = 0;
if (VCount != 0)
y = 2 * n / VCount - 1;
float x1 = 4 * m / Segments - 1;
float x2 = (4 * m + 2) / Segments - 1;
gl_Position = vec4(x1, y, 0, 1) * MainRect;
EmitVertex();
gl_Position = vec4(x2, y, 0, 1) * MainRect;
EmitVertex();
EndPrimitive();
}
void main() {
if (inst[0] <= HCount)
vline(inst[0], gl_PrimitiveIDIn);
else
hline(inst[0] - HCount - 1, gl_PrimitiveIDIn);
}
'''
_TEXTURE_VERTEX_SHADER = '''
#version 330
uniform mat4 MainRect;
uniform mat2 TexScale;
uniform int TexOffset;
uniform int Height;
in vec2 in_vert;
in vec2 in_texcoord;
out vec2 v_texcoord;
void main() {
v_texcoord = (in_texcoord - vec2(0, float(TexOffset) / Height))
* TexScale;
gl_Position = vec4(in_vert[0], in_vert[1], 0.0, 1.0) * MainRect;
}
'''
_TEXTURE_FRAGMENT_SHADER = '''
#version 330
uniform sampler2D Texture;
uniform int Palette;
in vec2 v_texcoord;
out vec4 f_color;
vec4 palette0(float v) {
return vec4(v*v, v*v*v, v*(1-v)+v*v*v, 1.);
}
vec4 palette1(float v) {
return vec4(v, v, v*v*v*v, v);
}
vec4 palette2(float v) {
return vec4(v, 0, 0, v);
}
void main() {
float val = texture(Texture, v_texcoord)[0];
if (Palette == 2) {
f_color = palette2(val);
}
else if (Palette == 1) {
f_color = palette1(val);
}
else {
f_color = palette0(val);
}
if (f_color.a < 0.1)
discard;
}
'''
class VertexArrayWrapper:
def __init__(self, ctx: moderngl.Context, program: moderngl.Program,
contents: list, main_rect: np.ndarray):
self.vao = ctx.vertex_array(program, contents)
self.program['MainRect'] = tuple(main_rect.flatten())
self.mode = None
@property
def program(self) -> 'moderngl.Program':
return self.vao.program
def render(self, mode=None, vertices=-1, **kargs):
r_mode = mode if mode is not None else self.mode
self.vao.render(mode=r_mode, vertices=vertices, **kargs)
class SignalVA(VertexArrayWrapper):
VERTEX_WIDTH = 2
def __init__(self, ctx: moderngl.Context, main_rect: np.ndarray,
max_count: int):
program = ctx.program(
vertex_shader=_SIGNAL_VERTEX_SHADER,
fragment_shader=_BASIC_FRAGMENT_SHADER)
self.buffer = ctx.buffer(
reserve=max_count * self.VERTEX_WIDTH)
super().__init__(ctx, program, [(self.buffer, 'i2', 'in_vert')],
main_rect)
self.mode = moderngl.LINE_STRIP
def render_data(self, points: np.ndarray, count: int, scale: int,
start: int, color: Color):
self.buffer.orphan()
self.buffer.write(points)
self.program['Color'] = color
self.program['XScale'] = 1 / (scale - 1)
self.program['Start'] = start
super().render(vertices=count)
class GridVA(VertexArrayWrapper):
# Grid color
GRID_COLOR = (0.4, 0.4, 0.4, 1.0)
# Color for frame, center lines and ticks
FRAME_COLOR = (0.8, 0.8, 0.8, 1.0)
# Number of segments of grid lines
GRID_SEGMENTS = 101
def __init__(self, ctx: moderngl.Context, main_rect: np.ndarray,
horizontal_div: int, vertical_div: int):
program = ctx.program(
vertex_shader=_BASIC_VERTEX_SHADER,
geometry_shader=_GRID_GEOMETRY_SHADER,
fragment_shader=_BASIC_FRAGMENT_SHADER)
super().__init__(ctx, program, [], main_rect)
self.mode = moderngl.POINTS
self.horizontal_div = horizontal_div
self.vertical_div = vertical_div
def render_fragment(self, hcount: int, vcount: int, segments: int,
color: Color, ticks: bool = False):
self.program['HCount'] = hcount
self.program['VCount'] = vcount
self.program['Color'] = color
self.program['Segments'] = segments
segs = segments // 2 + 1 if not ticks else 1
super().render(vertices=segs, instances=hcount + vcount + 2)
# super().render(vertices=segs, instances=1)
def render_grid(self):
# Render basic grid
self.render_fragment(self.horizontal_div, self.vertical_div,
self.GRID_SEGMENTS, self.GRID_COLOR)
# Render central horizontal and vertical lines
self.render_fragment(0, 0, self.GRID_SEGMENTS, self.FRAME_COLOR)
def render_frame(self):
# Render frame
self.render_fragment(1, 1, 1, self.FRAME_COLOR)
# Render ticks
self.render_fragment(self.horizontal_div * 5, self.vertical_div * 5,
200, self.FRAME_COLOR, ticks=1)
class LineVA(VertexArrayWrapper):
def __init__(self, ctx: moderngl.Context, main_rect: np.ndarray):
program = ctx.program(
vertex_shader=_BASIC_VERTEX_SHADER,
geometry_shader=_LINE_GEOMETRY_SHADER,
fragment_shader=_BASIC_FRAGMENT_SHADER)
super().__init__(ctx, program, [], main_rect)
self.mode = moderngl.POINTS
def render_line(self, start: Point2, end: Point2, color: Color,
segments: int):
self.program['LineMatrix'] = (end[0] - start[0], start[0],
end[1] - start[1], start[1],
0, 0,
0, 1)
self.program['Color'] = color
self.program['Segments'] = segments
super().render(vertices=segments // 2 + 1)
def render_hline(self, y: float, color: Color, segments: int):
self.render_line((-1, y), (1, y), color, segments)
def render_vline(self, x: float, color: Color, segments: int):
self.render_line((x, -1), (x, 1), color, segments)
class TextureVA(VertexArrayWrapper):
def __init__(self, ctx: moderngl.Context, main_rect: np.ndarray,
width: int, height: int,
palette: int = 0):
program = ctx.program(
vertex_shader=_TEXTURE_VERTEX_SHADER,
fragment_shader=_TEXTURE_FRAGMENT_SHADER)
self.width = width
self.height = height
self.buffer = ctx.buffer(
np.array([
-1.0, -1.0, 0, 0, # lower left
-1.0, 1.0, 0, 1, # upper left
1.0, -1.0, 1, 0, # lower right
1.0, 1.0, 1, 1, # upper right
], dtype=np.float32))
super().__init__(ctx, program,
[(self.buffer, '2f4 2f4', 'in_vert', 'in_texcoord')],
main_rect)
self.program['Height'] = height
self.program['TexScale'] = (1, 0, 0, 1)
self.program['Palette'] = palette
self.mode = moderngl.TRIANGLE_STRIP
self.texture = ctx.texture((width, height), 1, dtype='f4')
# self.texture.filter = moderngl.NEAREST, moderngl.NEAREST
self.texture.swizzle = 'R001'
self.texture_buffer = ctx.buffer(reserve=width * height * 4)
self.position = 0
def render(self):
self.texture.use(location=0)
super().render()
def write_data(self, data: np.ndarray):
self.texture_buffer.write(data, offset=(
self.height - self.position - 1)
* self.width * 4)
self.texture.write(self.texture_buffer)
self.program['TexOffset'] = self.position
self.position = (self.position + 1) % self.height
def render_data(self, data: np.ndarray):
self.write_data(data)
self.render()
def set_scale(self, scale_x: float):
self.program['TexScale'] = (1 / scale_x, 0, 0, 1)
def clear(self):
self.texture_buffer.clear()
| monkeyman79/mutt | mutt/ui/shaders.py | shaders.py | py | 9,906 | python | en | code | 0 | github-code | 13 |
39419187505 | from requests import Session
import json
import psycopg2
import websocket, json
from requests import Session
import psycopg2
from symbol import simil,coins
class Crypto:
def CoinCaP(self,simil):
def getInfo (): # Function to get the info
url = 'https://pro-api.coinmarketcap.com/v1/cryptocurrency/quotes/latest' # Coinmarketcap API url
# coins= "BTCUSD,ETHUSD,XLMUSD,LTCUSD,SHIBUSD,ETCUSD,XRPUSD,XMRUSD,XVGUSD,EOSUSD"
s=simil.values()
cs=list(s)
cs=cs
car=""
for coin in cs:
car+=coin+","
coins= car[:-1]
coins=coins.lower()
coins=coins.replace(" ", "-")
parameters = { 'slug': coins, 'convert': 'USD' } # API parameters to pass in for retrieving specific cryptocurrency data
# key1='4041310f-6b20-4c98-85c4-8244f356b32f'
key2='4041310f-6b20-4c98-85c4-8244f356b32f'
headers = {'Accepts': 'application/json','X-CMC_PRO_API_KEY': key2} # Replace 'YOUR_API_KEY' with the API key you have recieved in the previous step
session = Session()
session.headers.update(headers)
response = session.get(url, params=parameters)
info = json.loads(response.text)
return info
data=getInfo()
# data.keys()
date,vl,mcap,sym,sm= [],[],[],[],[]
for i in data['data'].keys():
# data=getInfo()
df=data['data'][i]
sym.append(df['name'])
sm.append(df['symbol']+'-USD')
date.append(df['date_added'][:4])
vl.append(df['quote']['USD']['volume_24h'])
mcap.append(df['quote']['USD']['market_cap'])
#Establishing the connection
conn = psycopg2.connect(
database="d6tqr9tjolth23", user='qgiojkehvmogex', password='f29a356850f099b62c0f3eecd3e15bebb3ab54e9b761838a454084848db26fc5', host='ec2-52-18-116-67.eu-west-1.compute.amazonaws.com', port= '5432'
)
#Creating a cursor object using the cursor() method
cursor = conn.cursor()
cursor.execute("DROP TABLE IF EXISTS coincap")
sql='''Create Table coincap(
Symbol varchar(40) not null,
date VARCHAR(5) not null,
vl_24 float not null,
MarketCap float not null
)
'''
cursor.execute(sql)
for i in range(len(date)):
cursor.execute('INSERT INTO coincap ( Symbol,date, vl_24 , MarketCap ) VALUES(%s,%s,%s,%s)',(sym[i],date[i],vl[i],mcap[i]))
conn.commit()
conn.close()
print('Coincap done')
def Polygon(self,coins):
key=""
for d in coins:
key+="XA."+d +"-USD,"
key=key[:-1]
# coins=['Bitcoin', 'Ethereum','Stellar','Litecoin', 'Shiba Inu', 'Ethereum Classic', 'XRP', 'Monero', 'Verge', 'EOS']
def on_open(ws):
auth_data = {
"action": "auth",
"params": 'n8pb4Jngx0gmpLLvz0aYb0flDhIs9JfO'
}
ws.send(json.dumps(auth_data))
channel_data = {
"action": "subscribe",
"params": key
}
ws.send(json.dumps(channel_data))
def on_error(ws, error):
print(error)
sym,pair,open,close,low,high,volw,vol,e,s= [],[],[],[],[],[],[],[],[],[]
def on_close(ws):
print( "### closed ###")
def on_message(ws, message):
data={}
msg=json.loads(message)
m=msg[0]
if m['ev']=='XA':
if len(pair)!=100 and m['pair'] not in pair:
pair.append(m['pair'])
sym.append(simil[m['pair']])
open.append(m['o'])
close.append(m['c'])
low.append(m['l'])
high.append(m['h'])
volw.append(m['vw'])
vol.append(m['v'])
e.append(m['e'])
s.append(m['s'])
else:
ws.close()
data={'symbol':sym,'pair':pair,'open':open,'close':close,'low':low,'high':high,'vol_week':volw,'volume':volw}
def Stream():
websocket.enableTrace(True)
crypto = "wss://socket.polygon.io/crypto"
ws = websocket.WebSocketApp(crypto,
on_open=on_open,
on_message=on_message,
on_error=on_error,
on_close=on_close,keep_running=False)
ws.run_forever() # Set dispatcher to automatic reconnection
return data
data=Stream()
# for k in data.keys():
# locals()[k]=data[k]
# dt={'symbol':symbol,'pair':pair,'open':open,'close':close,'low':low,'high':high,'vol_week':vol_week,'volume':volume}
def postgres(data):
conn = psycopg2.connect( database="d6tqr9tjolth23", user='qgiojkehvmogex', password='f29a356850f099b62c0f3eecd3e15bebb3ab54e9b761838a454084848db26fc5', host='ec2-52-18-116-67.eu-west-1.compute.amazonaws.com', port= '5432')
cursor = conn.cursor()
#Doping EMPLOYEE table if already exists.
cursor.execute("DROP TABLE IF EXISTS crypto")
#Creating table as per requirement
sql ='''CREATE TABLE crypto(
pair CHAR(100) NOT NULL ,
Symbol CHAR(80) NOT NULL,
Price Float NOT NULL,
open Float NOT NULL,
close Float NOT NULL,
low Float NOT NULL,
high Float NOT NULL,
volume Float NOT NULL
) '''
cursor.execute(sql)
for i in range(len(data['pair'])):
# try:
cursor.execute('insert into crypto values(%s,%s,%s,%s,%s,%s,%s,%s)',(data['pair'][i],data['symbol'][i],data['open'][i],data['open'][i],data['close'][i],data['low'][i],data['high'][i],data['volume'][i]))
print(i)
# except:
# print('ok')
# pass
conn.commit()
cursor.close()
return "Table created successfully........"
postgres(data)
return data
| D0723/forecaster-backend | pipeline.py | pipeline.py | py | 8,269 | python | en | code | 0 | github-code | 13 |
32085588458 | #!/usr/bin/python3
# This program makes the plot for L2 error of two series of data
# Author : Bruno Blais
#Python imports
import os
import sys
import numpy
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import LogLocator, FormatStrFormatter
import pylab
from scipy import stats
from matplotlib import rcParams
# User parameter
outputPDF=False
outputPNG=True
showGraphic=True
# Modify font of the graphic
font = {'weight' : 'normal',
'size' : 18}
plt.rc('font', **font)
plt.rcParams['legend.numpoints'] = 1
params = {'backend': 'ps',
'axes.labelsize': 24,
'text.fontsize': 28,
'legend.fontsize': 17,
'xtick.labelsize': 15,
'ytick.labelsize': 15,
'text.usetex': True,
}
plt.rcParams.update(params)
#================================
# FUNCTIONS
#================================
def rsquared(x, y):
""" Return R^2 where x and y are array-like."""
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
return r_value**2
#================================
# MAIN PROGRAM
#================================
syms = ["^","o","s", ">"]
labels = ["ILU-GMRES/AMG-GMRES"]
fig = plt.figure()
ax = fig.add_subplot(111) # Create plot object
#ax.set_yscale('log')
ax.set_xscale('log')
plt.ylabel('$t_{AMG-GMRES}/t_{ILU-GMRES}$')
plt.xlabel('Number of DOFs')
assert(len(sys.argv)==3)
fnameGMRES= sys.argv[1]
fnameAMG = sys.argv[2]
#Input file
print ("R-> %s" %fnameGMRES)
matGMRES = numpy.loadtxt(fnameGMRES)
timeGMRES=matGMRES[:,2]
nxGMRES=4*(2**matGMRES[:,0])**3
matAMG = numpy.loadtxt(fnameAMG)
timeAMG=matAMG[:,2]
nxAMG=matAMG[:,0]**2.
ax.plot(nxGMRES,timeGMRES/timeAMG,"-"+syms[0],label=labels[0])
ax.plot([nxGMRES[0],nxGMRES[-1]], [1,1], "k--")
ax.legend()
plt.tight_layout()
if (outputPNG): plt.savefig("./AMG-GMRES.png",dpi=300)
if (showGraphic): plt.show()
| lethe-cfd/lethe-utils | python/mms/compareSolutionTime.py | compareSolutionTime.py | py | 1,918 | python | en | code | 5 | github-code | 13 |
40322269014 | from django.shortcuts import render
from rest_framework.generics import GenericAPIView,CreateAPIView,RetrieveAPIView,UpdateAPIView
from rest_framework.views import APIView
from users.serializers import CreateUserSerializer,UserDetailSerializer,EmailSerializer,UserAddressSerializer,AddressTitleSerializer,UserBrowserHistorySerializer
from users.models import User,Address
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from rest_framework import status
from rest_framework.generics import GenericAPIView
from rest_framework.viewsets import GenericViewSet
from rest_framework.mixins import UpdateModelMixin
from rest_framework.decorators import action
# Create your views here.
class UserView(CreateAPIView):
# 指定序列化器
serializer_class = CreateUserSerializer
class UsernameCountView(APIView):
"""判断用户是否已注册"""
def get(self, request, username):
# 查询user表
count = User.objects.filter(username=username).count()
# 包装响应数据 username不一定用的上我们也传回去
data = {
'username':username,
'count':count
}
# 响应
return Response(data)
class MobileCountView(APIView):
"""判断手机号是否已注册"""
def get(self, request, mobile):
# 查询user表
count = User.objects.filter(mobile=mobile).count()
# 包装响应数据
data = {
'mobile':mobile,
'count':count
}
# 响应
return Response(data)
class UserDetail(RetrieveAPIView):
serializer_class = UserDetailSerializer
# queryset = User.objects.all() 以前是这样 指定queryset然后根据pk去查找 可是这种方法不是很好 需要去数据库中查找效率低 我们不用这种
permission_classes = [IsAuthenticated] #指定权限 只有通过认证的用户才能访问当前视图
def get_object(self):
"""重写get_object方法 返回要展示的用户模型对象"""
return self.request.user # 经过认证之后的 是哪一个用户 是否需要自己写认证类?
class EmailView(UpdateAPIView):
"""更新邮箱"""
serializer_class = EmailSerializer
permission_classes = [IsAuthenticated]
def get_object(self):
"""重写get_object方法 返回要展示的用户模型对象"""
return self.request.user
class EmailVerifyView(APIView): # 没用啥序列化器啥的就简单的apiview就可以了
"""激活用户邮箱"""
def get(self, request):
# 获取前端以查询字符串方式传过来的token
token = request.query_params.get('token')
#将token解密 并查询对应的user对象 里面是有逻辑的
user = User.check_verify_email_token(token)
#修改user对象的email_active为True
if user is None:
return Response({'message':'激活失败'}, status=status.HTTP_400_BAD_REQUEST)
# 项目中所有文件(views,serlizer,utils,models)都支持python语法对模型对象的相关操作
# 模型对象.属性赋值(对数据库中的某个对象的值进行修改) 从表里面查找某一个用户或者查询集 创建一个新的用户并保存等等
# user.check_password set_password等等 user = oauthqqmodel.user # 外键关联的用户对象等等return Response({
# 'token':token,
# 'username':user.username,
# 'user_id':user.id
# })
# 创建用户的时候要注意user.set_password(validated_data.get('password'))
# user.save() 密码这里比较特殊 不能objects.create()
#还要注意user = User.objects.get(id=id,email=email)
# except User.DoesNotExist:
user.email_active = True
user.save()
# 响应
return Response({'message':'ok'})
class AdressSetView(UpdateModelMixin, GenericViewSet):
"""用户收获地址增删改查"""
permission_classes = [IsAuthenticated]
serializer_class = UserAddressSerializer
# 重写get_queryset方法 返回一个queryset
def get_queryset(self):
#self.request.user.addresses.all()是一个queryset 得到queryset之后继续进行了筛选并省略了all()
return self.request.user.addresses.filter(is_deleted=False)
def create(self, request):
user = request.user
# count = user.addresses.all().count() #两种方法都可以
count = Address.objects.filter(user=user).count()
"""重写此方法因为需要进行判断 收获地址数量有上限 收获地址最多有20个"""
if count >= 20:
return Response({'message':"收获地址数量达到上限"}, status = status.HTTP_400_BAD_REQUEST)
else:
pass
# 对传过来的数据进行校验 创建序列化器,反序列化,保存数据,返回响应
serializer = self.get_serializer(data = request.data)
serializer.is_valid(raise_exception = True)
serializer.save()
return Response(serializer.data, status= status.HTTP_201_CREATED)
# GET /addresses/
def list(self, request, *args, **kwargs):
"""
用户地址列表数据
"""
queryset = self.get_queryset()
serializer = self.get_serializer(queryset, many=True)
user = self.request.user
return Response({
'user_id': user.id,
'default_address_id': user.default_address_id,
'limit': 20,
'addresses': serializer.data,
})
# delete /addresses/<pk>/
def destroy(self, request, *args, **kwargs):
"""
处理删除
"""
address = self.get_object()
# 进行逻辑删除
address.is_deleted = True
address.save()
return Response(status=status.HTTP_204_NO_CONTENT)
# put /addresses/pk/title/
# 需要请求体参数 title
@action(methods=['put'], detail=True) # 都是put不会冲突吗?路由是不一样的吗?是不一样的 会将函数名称放在路由里面
def title(self, request, pk=None):
"""
修改标题
"""
address = self.get_object()
serializer = AddressTitleSerializer(instance=address, data=request.data)
serializer.is_valid(raise_exception=True)
serializer.save()
return Response(serializer.data)
# put /addresses/pk/status/
@action(methods=['put'], detail=True)
def status(self, request, pk=None):
"""
设置默认地址
"""
address = self.get_object()
request.user.default_address = address
request.user.save()
return Response({'message': 'OK'}, status=status.HTTP_200_OK)
class UserBrowserHistory(CreateAPIView):
"""用户浏览记录"""
# 指定序列化器
serializer_class = UserBrowserHistorySerializer
permission_classes = [IsAuthenticated] # 指定权限 | frankky-cyber/meiduo2 | meiduo_mail/meiduo_mail/apps/users/views.py | views.py | py | 7,122 | python | en | code | 0 | github-code | 13 |
7404235796 | #!/usr/bin/python3
""" 6. POST an email #1 """
import requests
import sys
if __name__ == '__main__':
url = sys.argv[1]
email = sys.argv[2]
data = {'email': email}
response = requests.post(url, data=data)
print(response.text)
| AmrShoukry/alx-higher_level_programming | 0x11-python-network_1/6-post_email.py | 6-post_email.py | py | 249 | python | en | code | 0 | github-code | 13 |
34615264355 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('OrderPlacer', '0007_auto_20141204_2107'),
]
operations = [
migrations.RenameField(
model_name='medicalservice',
old_name='specialty',
new_name='speciality',
),
]
| 9929105/lini | src/OrderPlacer/migrations/backup/0008_auto_20141210_0544.py | 0008_auto_20141210_0544.py | py | 406 | python | en | code | 0 | github-code | 13 |
19902330877 | from matplotlib import pyplot as plt
import numpy as np
from scipy.integrate import odeint
class Parameters():
def __init__(self):
# system parameters
self.m = 1
self.c = 1
self.k = 1
# control gains
self.kp = 0.1
self.kd = -self.c + 2 * np.sqrt((self.k + self.kp) * self.m)
def smd_rhs(z, t, m, c, k, kp, kd):
x, xdot = z[0], z[1]
# w/o control
# xdotdot = -(c*xdot + k*x)/m
# with pd control - feedback linearization
xdotdot = -((c+kd)*xdot + (k+kp)*x)/m
return [xdot, xdotdot]
def plot(t, z):
plt.figure(1)
plt.plot(t, z[:, 0])
plt.xlabel('t')
plt.ylabel('position')
plt.title('Plot of position vs time')
plt.show()
if __name__ == '__main__':
params = Parameters()
m, c, k, kp, kd = params.m, params.c, params.k, params.kp, params.kd
# let's assume mass object initially located in 0.5 point
x0, xdot0 = 0.5, 0
t0, tend = 0, 20
t = np.linspace(t0, tend)
z0 = np.array([x0, xdot0])
result = odeint(smd_rhs, z0, t, args=(m, c, k, kp, kd))
plot(t, result)
| kimsooyoung/robotics_python | lec12_feedback_linearization/1_simple_control_partitioning/smd_main.py | smd_main.py | py | 1,124 | python | en | code | 18 | github-code | 13 |
18314472395 | from paterdal import Paterdal
def test_instance_is_correct():
"""
Verifies that a Paterdal instance initializes correctly
:return:
"""
class ExpectedData:
def __init__(self):
self.col_offset = 10
self.row_offset = 10
self.h_tile_size = 80
self.v_tile_size = 80
self.line_width = 4
self.line_color = (0, 0, 0)
self.back_color = (255, 255, 255)
self.cols = 5
self.rows = 6
actual = Paterdal()
expected = ExpectedData()
assert expected.col_offset == actual.col_offset
assert expected.row_offset == actual.row_offset
assert expected.h_tile_size == actual.h_tile_size
assert expected.v_tile_size == actual.v_tile_size
assert expected.line_width == actual.line_width
assert expected.line_color == actual.line_color
assert expected.back_color == actual.back_color
assert expected.cols == actual.cols
assert expected.rows == actual.rows
def test_calculate_points_col_0_row_0():
p = Paterdal()
expected = [
(10, 10),
(90, 10),
(90, 90),
(10, 90)
]
actual = p.calculate_points(0, 0)
assert expected == actual
def test_get_size_with_all_defaults():
"""
This tests get_size with all it's default values.
:return:
"""
p = Paterdal()
expected = (420, 500)
actual = p.get_size()
assert expected == actual
def test_calculate_new_size_200x300():
"""
This tests function with min possible
screen size. Note: also covers get_test
as well.
:return:
"""
p = Paterdal()
expected = (250, 300)
actual = p.calculate_new_size(200, 300)
assert expected == actual
def test_calculate_new_size_2560x1009():
"""
Tests calculate_new_size
:return:
"""
p = Paterdal()
expected = (2560, 2966)
actual = p.calculate_new_size(2560, 1009)
assert expected == actual
| SeanWH/paterdal | tests/tests_paterdal.py | tests_paterdal.py | py | 1,977 | python | en | code | 0 | github-code | 13 |
14505763267 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.action_chains import ActionChains
from bs4 import BeautifulSoup
from time import sleep
import random
from project_solo_app.models import Background_Task, Definition, Word
def definition_search(word_id=0, limit=0):
if word_id == 0 and limit == 0:
return f'Nothing to do. Job requires either a word id or a limit to proceed.'
current_collect = Background_Task.objects.filter(activity='definition_search', status=1)
if len(current_collect) !=0:
return f"Can't scrape now. Existing definition scrape may be occurring."
current_collect_object = Background_Task.objects.create(
activity = 'definition_search',
status = 1,
current = 0,
total = 0
)
if word_id > 0:
current_collect_object.current = 1
current_collect_object.total = 1
current_collect_object.save()
else:
pass
word_object = Word.objects.filter(id=word_id)
if len(word_object) == 1:
word = word_object[0].word
URL = 'https://google.com/search?q=' + word
options = Options()
options.headless = True
options.add_argument("--window-size=1920,1080")
search_driver = webdriver.Chrome('/usr/local/bin/chromedriver', options=options)
search_driver.get(URL)
search_driver.add_cookie({'name' : 'lang' , 'value' : 'v=2&lang=en-us'})
sleep(2)
action = ActionChains(search_driver)
search_page = search_driver.page_source
search_soup = BeautifulSoup(search_page, "html.parser")
span_items = search_soup.find_all('span')
for span_count in range(len(span_items)):
span_text = span_items[span_count].text
if span_text != word and span_text.find(word) > -1:
definition_object = Definition.objects.filter(word=word_object[0], source='Google')
if len(definition_object) == 0:
definition_object = Definition.objects.create(
definition = span_text,
source = 'Google',
source_url = URL,
word = word_object[0]
)
print(f'item {span_count}: {span_text}')
break
print('End of processing.')
current_collect_object.delete()
search_driver.quit()
return 'done'
# current_collect_object.total = len(li_jobs)
# current_collect_object.save() | catalystTGJ/01_project_solo | tasks/webscrape_search.py | webscrape_search.py | py | 2,570 | python | en | code | 0 | github-code | 13 |
4787290913 | import csv
path = r'C:\Users\Helen\Documents\ingest_files\test.csv'
with open(path) as csvfile:
m = list(csv.DictReader(csvfile))
columns = m[0].keys().replace('\t', '')
print(columns)
print(m[:2])
| lenapy/learn_python | coursera/introduction_to_data_science/week_1/data_files_and_summary_statistics.py | data_files_and_summary_statistics.py | py | 203 | python | en | code | 0 | github-code | 13 |
42304737139 | class Person:
name = "人間"
def __init__(self, height, weight):
self.age = 30
self.height = height
self.weight = weight
def walk(self):
print("歩きます")
def eat(self):
print("食べます")
@classmethod
def say_classmethod(cls):
print("classmethod")
@staticmethod
def say_staticmethod():
print("staticmethod")
@property
def birthday(self):
return self.__birthday
@birthday.setter
def birthday(self, birthday):
self.__birthday = birthday
class Child(Person):
def walk(self):
print("ハイハイします")
def eat(self):
super().eat()
print(self.height)
# インスタンス化
person = Person(170, 60)
# メソッド
person.walk()
person.eat()
# クラスメソッド
Person.say_classmethod()
# 静的メソッド
Person.say_staticmethod()
# インスタンス変数
print(person.height)
print(person.weight)
person.birthday = "1990/01/01"
print(person.birthday)
# クラス変数
print(Person.name)
print(person.name)
Person.name = "人間2"
print(Person.name)
print(person.name)
person.height = 170
# サブクラス
child = Child(70, 10)
child.walk()
print(child.height)
child.eat()
| tokitsubaki/labcode | class/class_python.py | class_python.py | py | 1,257 | python | en | code | 0 | github-code | 13 |
19117563824 | def rod(length, price, cost):
value = list()
value.append(0)
m = -1
for i in range(1,length):
m = price[i]
for j in range(1,i-1):
m = max(m,price[i]+value[i-j]-cost)
value[i] = max
return value[length] | natthakan2000/ICCS313 | a4/RodCutting.py | RodCutting.py | py | 257 | python | en | code | 0 | github-code | 13 |
18644820539 |
import array
import paho.mqtt.client as mqtt
array_size = 10
# Create an array to store the ADC data
adc_data = array.array("H", [0] * array_size)
array_pratap = []
# The callback for when the client receives a CONNACK response from the server.
def on_connect(client, userdata, flags, rc):
print("Connected with result code " + str(rc))
# Subscribing in on_connect() means that if we lose the connection and
# reconnect then subscriptions will be renewed.
client.subscribe("outTopicPratap")
# The callback for when a PUBLISH message is received from the server.
def on_message(client, userdata, msg):
for i in range(array_size):
print(msg.topic + " " + str(msg.payload))
print(str(msg.payload, "utf-8"))
array_pratap = msg.payload
print(type(array_pratap))
array_pratap = int(array_pratap)
# Read the ADC data and store it in the array
adc_data[i] = array_pratap
print (adc_data)
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.connect("broker.hivemq.com", 1883, 8000)
# Blocking call that processes network traffic, dispatches callbacks and
# handles reconnecting.
# Other loop*() functions are available that give a threaded interface and a
# manual interface.
client.loop_forever()
# Create an analog input object
# Define the size of the array
| prataprobotics/2023-coding | PY_CODING/pythonProject/subscribe_adc_filter_random_ma.py | subscribe_adc_filter_random_ma.py | py | 1,388 | python | en | code | 0 | github-code | 13 |
11855131414 | import torch.nn as nn
import torch
from utils import TopPool, BottomPool, LeftPool, RightPool
class convolution(nn.Module):
def __init__(self, k, inp_dim, out_dim, stride=1, with_bn=True):
super(convolution, self).__init__()
pad = (k - 1) // 2
self.conv = nn.Conv2d(inp_dim, out_dim, (k, k), padding=(pad, pad), stride=(stride, stride), bias=not with_bn)
self.bn = nn.BatchNorm2d(out_dim) if with_bn else nn.Sequential()
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
conv = self.conv(x)
bn = self.bn(conv)
relu = self.relu(bn)
return relu
class pool(nn.Module):
def __init__(self, dim, pool1, pool2, pool3, pool4):
super(pool, self).__init__()
self.p1_conv1 = convolution(3, dim, 128)
self.p2_conv1 = convolution(3, dim, 128)
self.p_conv1 = nn.Conv2d(256, dim, (3, 3), padding=(1, 1), bias=False)
self.p_bn1 = nn.BatchNorm2d(dim)
self.conv1 = nn.Conv2d(dim, dim, (1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(dim)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = convolution(3, dim*2, dim)
self.pool1 = pool1()
self.pool2 = pool2()
self.pool3 = pool3()
self.pool4 = pool4()
self._initialize_weights()
def forward(self, x):
# pool 1
p1_conv1 = self.p1_conv1(x)
pool1 = self.pool1(p1_conv1)
pool1 = self.pool2(pool1)
# pool 2
p2_conv1 = self.p2_conv1(x)
pool2 = self.pool3(p2_conv1)
pool2 = self.pool4(pool2)
# pool 1 + pool 2
p_conv1 = self.p_conv1(torch.cat([pool1, pool2], 1))
p_bn1 = self.p_bn1(p_conv1)
conv1 = self.conv1(x)
bn1 = self.bn1(conv1)
relu1 = self.relu1(torch.cat([p_bn1, bn1], 1))
conv2 = self.conv2(relu1)
return conv2
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
class direction_pool(pool):
def __init__(self, dim):
super(direction_pool, self).__init__(dim, TopPool, BottomPool, LeftPool, RightPool)
def make_pool_layer(dim):
return direction_pool(dim) | xinyu-ch/ProgressiveTextDetection | models/pool_direction.py | pool_direction.py | py | 2,546 | python | en | code | 3 | github-code | 13 |
25667347990 | import sys
import numpy as np
from extract_squares_from_image import extract_cube_faces_from_stream, capture_faces, load_imgs_from_dir, capture_faces_from_images
from color_classifier import get_classifier, label_images
from Cube import Cube
from Solver import Solver
from SolutionGallery import SolutionGallery
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
from matplotlib.patches import Rectangle
from matplotlib.image import AxesImage
from matplotlib import get_backend
import cv2 as cv
from ManualSquareExtractor import ManualSquareExtractor
class ClickableRectangle:
def __init__(self, fig, ax, colors_cube, f, i_f, j_f):
self.f = f
self.i_f = i_f
self.j_f = j_f
self.colors_cube = colors_cube
self.x = i_f
self.y = j_f
self.color = self.colors_cube[f, i_f, j_f]
self.fig = fig
self.ax = ax
self.rect = Rectangle(
(self.x, self.y),
width=1,
height=1,
facecolor=self.color,
linewidth=2,
edgecolor='k',
picker=True
)
self.patch = self.ax.add_patch(self.rect)
self.active = False
clicker = self.fig.canvas.mpl_connect(
'button_press_event', lambda e: self.onclick(e))
presser = self.fig.canvas.mpl_connect(
'key_press_event', lambda e: self.keypress(e))
def onclick(self, event):
# Was the click on the same axis as the rectangle?
if event.inaxes != self.rect.axes:
self.active = False
return
# Was the click inside the rectangle?
contains, attrd = self.rect.contains(event)
if not contains:
self.active = False
return
# Only concerned with double click events
if event.dblclick:
# Set active
self.active = True
def keypress(self, event):
if not self.active:
return
elif event.key in ['w', 'W']:
self.color = 'white'
elif event.key in ['o', 'O']:
self.color = 'orange'
elif event.key in ['r', 'R']:
self.color = 'red'
elif event.key in ['g', 'G']:
self.color = 'green'
elif event.key in ['b', 'B']:
self.color = 'blue'
elif event.key in ['y', 'Y']:
self.color = 'yellow'
elif event.key == 'enter':
self.active = False
self.colors_cube[self.f, self.i_f, self.j_f] = self.color
self.patch.set_facecolor(self.color)
self.fig.canvas.draw()
class RectContainer:
def __init__(self, fig, ax_img, ax_squares, f, orig_img, faces, colors_cube, clf_yuv):
self.fig = fig
self.ax_img = ax_img
self.ax_squares = ax_squares
self.f = f
self.orig_img = orig_img
self.faces = faces
self.colors_cube = colors_cube
self.clf_yuv = clf_yuv
# Plot the extracted faces on the left of the image
self.ax_img.set_xlim(0, 150)
self.ax_img.set_ylim(0, 150)
self.ax_img.axis('equal')
self.ax_img.axis('off')
self.ax_img.imshow(self.faces)
img_clicker = self.fig.canvas.mpl_connect(
'button_press_event', lambda e: self.onclick(e))
# Plot the colors on the right of the image
self.clickable_rects = []
for s in range(9):
i_f = s % 3
j_f = int(s / 3)
cr = ClickableRectangle(
self.fig, self.ax_squares, self.colors_cube, self.f, i_f, j_f)
self.clickable_rects.append(cr)
self.ax_squares.set_xlim(0, 3)
self.ax_squares.set_ylim(0, 3)
self.ax_squares.axis('equal')
self.ax_squares.axis('off')
self.fig.canvas.draw()
def update_squares(self):
# Plot the colors on the right of the image
for s in range(9):
i_f = s % 3
j_f = int(s / 3)
cr = self.clickable_rects[s]
cr.patch.set_facecolor(self.colors_cube[self.f, i_f, j_f])
def onclick(self, event):
if event.inaxes != self.ax_img.axes:
return
if event.dblclick:
mse = ManualSquareExtractor(self.orig_img)
plt.show()
while mse.complete is False:
plt.draw()
plt.pause(0.5)
self.faces = mse.faces
pred_colors_yuv, pred_proba_yuv = label_images(
self.clf_yuv,
[self.faces.reshape(1, 150, 150, 3)],
faces_per_image=1
)
pred_colors_yuv2 = np.array(pred_colors_yuv).reshape((1, 3, 3))
self.colors_cube[self.f, :, :] = pred_colors_yuv2
del mse
self.ax_img.imshow(self.faces)
self.update_squares()
self.fig.canvas.draw()
def onpick(event):
if isinstance(event.artist, Rectangle):
patch = event.artist
print('onpick patch:', patch.get_path())
patch.set_edgecolor('lime')
event.canvas.draw()
elif isinstance(event.artist, AxesImage):
im = event.artist
A = im.get_array()
print('onpick image', A.shape)
def check_images(captured_imgs, captured_faces, colors_cube, clf_yuv):
fig, axs = plt.subplots(2, 6, figsize=(24, 6))
for f in range(6):
RectContainer(
fig,
axs[0][f],
axs[1][f],
f,
cv.cvtColor(captured_imgs[f], cv.COLOR_BGR2RGB),
captured_faces[f],
colors_cube,
clf_yuv
)
fig.tight_layout()
return
def main(input_dir=None):
# Input the faces of the Cube
if input_dir is None:
captured_faces, captured_imgs = capture_faces()
else:
input_imgs = load_imgs_from_dir(input_dir)
captured_faces, captured_imgs = capture_faces_from_images(input_imgs)
# Get the color classifier
clf_yuv = get_classifier()
# Predict the face colors from the input images
faces = np.array(captured_faces)
pred_colors_yuv, pred_proba_yuv = label_images(clf_yuv, [faces])
colors_cube = np.array(pred_colors_yuv).reshape((6, 3, 3))
# Inspect / adjust results if necessary. This step can modify pred_colors_yuv2.
check_images(captured_imgs, captured_faces, colors_cube, clf_yuv)
plt.show()
# Define the cube using the updated colors
c = Cube(colors_cube)
# Solve and retain moves
initial_state = c.export_state()
s = Solver(c)
s.solve()
solve_moves = c.recorded_moves
# Display the solution
sg = SolutionGallery(initial_state, solve_moves)
plt.show()
if __name__ == '__main__':
if len(sys.argv) > 1:
img_dir = sys.argv[1]
main(img_dir)
else:
main()
| pkepley/rubiksolver | src/rubiksolver/end_to_end.py | end_to_end.py | py | 6,935 | python | en | code | 0 | github-code | 13 |
70662527379 | import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from settings import settings
class StepPerceptron():
def __init__(
self, learning_rate: float, inputs: np.array, expected_outputs: np.array
):
"""Constructor method
Args:
learning_rate (float): learning rate of the perceptron
inputs (np.array): inputs of the perceptron (x_1, x_2, ..., x_n)
expected_outputs (np.array): expected outputs of the perceptron (y_1, y_2, ..., y_n)
"""
self.learning_rate = learning_rate
# add bias x_0 = 1 to each input => (1, x_1, x_2, ..., x_n)
self.inputs = np.insert(inputs, 0, 1, axis=1)
self.expected_outputs = expected_outputs
# first weight is the bias => (w_0, w_1, w_2, ..., w_n)
self.weights = np.zeros(self.inputs.shape[1])
# Data for plotting
self.historical_weights = []
self.historical_outputs = []
# Momentum
self.previous_deltas = np.zeros(self.weights.shape)
def train(self, epochs = 1000):
"""
Trains the perceptron for a given number of epochs
Args:
epochs (Optional[int]): number of epochs to train the perceptron. Defaults to 1000.
Returns:
int: number of epochs needed to converge
bool: whether the perceptron converged or not
"""
for epoch in range(epochs):
# save the weights
self.update_weights()
self.historical_weights.append(self.weights)
self.historical_outputs.append(self.get_outputs())
if self.is_converged():
break
return epoch + 1, self.is_converged()
def predict(self, X):
# Compute the perceptron's excitation for each input, including the sum of the bias
excitations = np.dot(X, self.weights[1:]) + self.weights[0]
# Apply the activation function to each element of the array
return np.vectorize(self.activation_func)(excitations)
def update_weights(self):
deltas = self.compute_deltas()
aux = deltas.copy()
deltas += 0.9 * self.previous_deltas
self.previous_deltas = aux
self.weights = self.weights + np.sum(deltas, axis=0)
def get_outputs(self):
"""Returns the perceptron's output for each input"""
# Compute the perceptron's excitation for each input, including the sum of the bias
excitations = np.dot(self.inputs, self.weights)
# Apply the activation function to each element of the array
return np.vectorize(self.activation_func)(excitations)
def activation_func(self, value):
return 1 if value >= 0 else -1 # step function
def get_error(self):
return np.sum(abs(self.expected_outputs - self.get_outputs()))
def compute_deltas(self) -> np.array:
# Get the difference between the expected outputs and the actual outputs
output_errors = self.expected_outputs - self.get_outputs()
# Compute the delta weights for each input
deltas = self.learning_rate * output_errors.reshape(-1, 1) * self.inputs
return deltas
def is_converged(self):
return self.get_error() <= 0
def save_animation_frames(self, file_name = "step_perceptron", count = 100):
# remove bias term
_inputs = self.inputs[:, 1:]
for i, (weights, outputs) in reversed(list(enumerate(
zip(self.historical_weights, self.historical_outputs)
))):
# plot the points
sns.scatterplot(
x=_inputs[:, 0],
y=_inputs[:, 1],
hue=outputs,
style=outputs,
palette=["blue", "red"],
marker="o",
)
if count == 0:
break
else:
count -= 1
xmin, xmax = np.min(_inputs[:, 0]), np.max(_inputs[:, 0])
x = np.linspace(xmin - 100, xmax + 100, 1000)
# w1*x + w2*y + w0 = 0 => y = -(w1*x + w0) / w2
# w1*x + w2*y + w0 = 0 => y = -(w1*x + w0) / w2
if weights[2] == 0:
y = np.zeros(len(x))
else:
y = -(weights[1] * x + weights[0]) / weights[2]
lineplot = sns.lineplot(x=x, y=y, color="black")
plt.xlim([0, 5])
plt.ylim([0, 5])
plt.legend(markerscale=2)
plt.title(f"Step Perceptron Epoch {i}")
# save the plot to a file
fig = lineplot.get_figure()
fig.savefig(f"{settings.Config.out_dir}/{file_name}_{i}.png")
# clear the current figure to prevent overlapping of plots
plt.clf()
def save_animation(self, file_name = "step_perceptron"):
# remove bias term
_inputs = self.inputs[:, 1:]
fig, ax = plt.subplots()
def update(i):
ax.clear()
weights, outputs = self.historical_weights[i], self.historical_outputs[i]
# plot the points
sns.scatterplot(
x=_inputs[:, 0],
y=_inputs[:, 1],
hue=outputs,
style=outputs,
palette=["blue", "red"],
marker="o",
)
xmin, xmax = np.min(_inputs[:, 0]), np.max(_inputs[:, 0])
x = np.linspace(xmin - 100, xmax + 100, 1000)
# w1*x + w2*y + w0 = 0 => y = -(w1*x + w0) / w2
if weights[2] == 0:
y = np.zeros(len(x))
else:
y = -(weights[1] * x + weights[0]) / weights[2]
# plot the separating hyperplane
ax.plot(x, y, c="k")
ax.set_xlim([0, 5])
ax.set_ylim([0, 5])
ax.set_title(f"Step Perceptron Epoch {i}")
anim = FuncAnimation(
fig, update, frames=len(self.historical_weights), interval=500
)
anim.save(
f"{settings.Config.out_dir}/{file_name}.gif", writer="imagemagick"
)
fig.clf()
def visualize_step_perceptron(self, X_test, y_test, filename = "step_perceptron"):
def get_hyperplane_value(x, w, b):
"""Returns the y-value of the hyperplane at point `x`"""
# offset = b + x * w1 + y * w2
return (-w[0] * x - b) / w[1]
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
# Plot testing set
plt.scatter(X_test[:, 0], X_test[:, 1], marker="o", c=y_test)
# Get the x-values for the most external points
x0_1 = np.amin(X_test[:, 0])
x0_2 = np.amax(X_test[:, 0])
# Get the y-values for the hyperplane, at the most external points
x1_1 = get_hyperplane_value(x0_1, self.weights[1:], self.weights[0])
x1_2 = get_hyperplane_value(x0_2, self.weights[1:], self.weights[0])
ax.plot([x0_1, x0_2], [x1_1, x1_2], "y--")
ax.set_ylim([0, 5])
ax.set_xlim([0, 5])
plt.savefig(f"{settings.Config.out_dir}/{filename}.png")
plt.clf()
def __str__(self) -> str:
output = "Expected - Actual\n"
for expected, actual in zip(self.expected_outputs, self.get_outputs()):
output += f"{expected:<10} {actual}\n"
output += f"\nWeights: {self.weights}"
return output
| lvvittor/ml | tps/tp3/app/step_perceptron.py | step_perceptron.py | py | 7,495 | python | en | code | 0 | github-code | 13 |
433305424 | from PythonANN import Program
from ProgramParameters import ProgramParameters
def setNewNetwork(layers, nodes, divisbleBy, iterations, learningRate,algoNum):
input = ProgramParameters();
input._init_(layers, nodes, divisbleBy, iterations, learningRate,algoNum);
return Program(input);
userInput = True;
choice = 0;
network = None;
if (userInput):
print("This ANN judges if a number is divisble by a number");
while (choice < 3):
print("Input Arguments as follows");
print("layerSize,nodeSizes per layer as an array,divisbleBy,numOfIterations,learningRate");
print("Example:\n3,[8 4 1],3,1000,0.3");
inputs = input().split(',');
nodeSize = inputs[1][1: len(inputs[1]) - 1].split(' ');
nodeSizeInputs = [0]*len(nodeSize);
for i in range(len(nodeSize)):
nodeSizeInputs[i] = int(nodeSize[i]);
inputsAsNums = [0.0]*5;
for i in range(4):
if (i != 1):
inputsAsNums[i] = int(inputs[i])
inputsAsNums[4] = float(inputs[4]);
network = setNewNetwork(inputsAsNums[0], nodeSizeInputs, inputsAsNums[2], inputsAsNums[3], inputsAsNums[4],1);
choice = 1;
while (choice == 1):
choice = 0;
network.resetWeights();
while (choice == 0):
print("\nAccuracy: " + str(network.testOp()) + "\n\n");
print("Select an option:\n0: Run with current weights\n1: Reset weights and run\n2: Restart\n3: Exit ");
choice = int(input());
else:
print("Algorithm comparison test initiated");
node0 = [ 8, 4, 4, 2, 2, 1 ];
node1 = [ 6, 4, 4, 2, 2, 1 ];
node2 = [ 8, 2, 4, 3, 2, 1 ];
node3 = [ 4, 4, 3, 2, 2, 1 ];
node4 = [ 4, 4, 4, 2, 1, 1 ];
node5 = [ 2, 4, 4, 2, 1, 1 ];
node6 = [ 6, 6, 4, 2, 2, 1 ];
nodes = [ node0, node1, node2, node3,node4,node5,node6];
thisSet = [0.0]*100;
for aN in range(3):
topInfo = "";
toptop = 0;
for l in range(1,6):
for n in range(len(nodes)):
lR = 0.001;
while(lR<1.0):
network = setNewNetwork(l, nodes[n], 2, 10, lR, aN);
network.resetWeights();
topOutput = 0;
for i in range(50):
thisSet[i] = network.testOp();
if (topOutput < thisSet[i]):
topOutput = thisSet[i];
if (toptop < topOutput):
topInfo = "Algorithm: " + str(aN) + "\nLayers:" + str(l) + " Nodes Index: " + str(n) + " Learning Rate: " + str(lR) + "\nBest Output: " + str(topOutput);
toptop = topOutput;
lR *= 5.0;
print("Best learner:\n" + topInfo);
| MagicalPlayGames/Artifical-Neural-Networks | interface.py | interface.py | py | 2,821 | python | en | code | 0 | github-code | 13 |
30013739784 | import numpy as np
def get_data(path):
# both train and test data files have the same format
# so we only need one function to parse the data
# returns a list of tuples, of the form tuple(the_text, the_class_name)
# the list to store our tuples
the_data = []
with open(path) as dataFile:
for line in dataFile:
# each line begins with the name of the class and then follows the actual body of text
# we turn this into a tuple of the form tuple(the_text, the_class_name)
# and add it to the data list item
the_data.append(get_single_data_item(line))
return the_data
def get_single_data_item(line):
# get the tuple item from each line of data text
words = line.split()
# the class of the document is always the first term
the_class_name = words[0]
# after the first term, the data follows
text_data = " ".join(words[1:])
data_item = (text_data, the_class_name)
return data_item
def get_train_data(path="r8-train-stemmed.txt"):
# get the train data from the file r8-train-stemmed.txt
# return a list of the format: tuple(the_text, the_class_name)
# the file should be in the same folder with this file
the_train_data = get_data(path)
return the_train_data
def get_test_data(path="r8-test-stemmed.txt"):
# get the test data from the file r8-test-stemmed.txt
# return a list of the format: tuple(the_text, class_name)
# the file should be in the same folder with this file
the_test_data = get_data(path)
return the_test_data
def load_train_data_and_labels(path="r8-train-stemmed.txt"):
return load_data_and_labels(path)
def load_test_data_and_labels(path="r8-test-stemmed.txt"):
return load_data_and_labels(path)
def load_data_and_labels(path):
data_list_of_tuples = get_data(path)
# number of data texts
train_data_size = len(data_list_of_tuples)
texts = []
labels = []
# max size of single data text
max_size_of_text = 1
max_size_of_label = 1
for i in range(train_data_size):
texts.append(data_list_of_tuples[i][0])
labels.append(data_list_of_tuples[i][1])
temp_text_size = len(data_list_of_tuples[i][0])
temp_label_size = len(data_list_of_tuples[i][1])
if temp_text_size > max_size_of_text:
max_size_of_text = temp_text_size
if temp_label_size > max_size_of_label:
max_size_of_label = temp_label_size
my_nd_type_texts = np.dtype((str, max_size_of_text))
my_nd_type_labels = np.dtype((str, max_size_of_label))
data_texts_as_ndarray = np.array(texts, dtype=my_nd_type_texts)
labels_as_ndarray = np.array(labels, dtype=my_nd_type_labels)
return data_texts_as_ndarray, labels_as_ndarray
def get_metrics(labels_test, labels_predicted):
print("Metrics calculation started")
# count TP, FP, TN, FN for micro-averaging and macro-averaging
tp_index = 0
fp_index = 1
tn_index = 2
fn_index = 3
# metrics data description: "class_name":[TP_COUNT, FP_COUNT, TN_COUNT, FN_COUNT]
metrics = {"acq": [0.0, 0.0, 0.0, 0.0], "crude": [0.0, 0.0, 0.0, 0.0], "earn": [0.0, 0.0, 0.0, 0.0],
"grain": [0.0, 0.0, 0.0, 0.0], "interest": [0.0, 0.0, 0.0, 0.0], "money-fx": [0.0, 0.0, 0.0, 0.0],
"ship": [0.0, 0.0, 0.0, 0.0], "trade": [0.0, 0.0, 0.0, 0.0]}
for i in range(len(labels_test)):
# if the prediction is correct: TP for the correct class and TN for the rest of the classes
if labels_predicted[i] == labels_test[i]:
# true positive detected for correct class
metrics[labels_predicted[i]][tp_index] += 1
# true negative detected for the rest of the classes
for other_class in metrics.keys():
# skip the same class
if other_class == labels_predicted[i]:
continue
# increase the TN on the rest classes
metrics[other_class][tn_index] += 1
# if the prediction is wrong: FP for the predicted class,
# FN for the correct class and TN for the rest of the classes
else:
metrics[labels_predicted[i]][fp_index] += 1
metrics[labels_test[i]][fn_index] += 1
for other_class in metrics.keys():
if other_class == labels_predicted[i] or other_class == labels_test[i]:
continue
metrics[other_class][tn_index] += 1
print("Metrics calculation ended")
# for each class calculate precision and recall
# then for the system calculate micro-averaging and macro-averaging
print("Precision, Recall, Micro-Averaging and Macro-Averaging calculation started")
precisions = {"acq": 0, "crude": 0, "earn": 0,
"grain": 0, "interest": 0, "money-fx": 0,
"ship": 0, "trade": 0}
recalls = {"acq": 0, "crude": 0, "earn": 0,
"grain": 0, "interest": 0, "money-fx": 0,
"ship": 0, "trade": 0}
micro_averaging_precision = 0
micro_averaging_recall = 0
macro_averaging_precision = 0
macro_averaging_recall = 0
for key in precisions:
try:
precisions[key] = float(metrics[key][tp_index]) / float(metrics[key][tp_index] + metrics[key][fp_index])
except ZeroDivisionError:
print("Precision Error calculation for class:", key, "division by zero")
for key in recalls:
try:
recalls[key] = float(metrics[key][tp_index]) / float(metrics[key][tp_index] + metrics[key][fn_index])
except ZeroDivisionError:
print("Recall Error calculation in class:", key, "division by zero")
# calculate sums for all classes for: TPs, TPs+FPs, TPs+FNs
# which are needed in micro-averaging and macro-averaging
tp_sum = 0
tp_fp_sum = 0
tp_fn_sum = 0
precisions_sum = 0
recalls_sum = 0
for class_name in metrics:
tp_sum += metrics[class_name][tp_index]
tp_fp_sum += metrics[class_name][tp_index] + metrics[class_name][fp_index]
tp_fn_sum += metrics[class_name][tp_index] + metrics[class_name][fn_index]
precisions_sum += precisions[class_name]
recalls_sum += recalls[class_name]
micro_averaging_precision = float(tp_sum) / float(tp_fp_sum)
micro_averaging_recall = float(tp_sum) / float(tp_fn_sum)
macro_averaging_precision = float(precisions_sum) / float(len(metrics.keys()))
macro_averaging_recall = float(recalls_sum) / float(len(metrics.keys()))
print("Micro and Macro Averaging calculation ended")
print("")
print("Micro Averaging Precision:", micro_averaging_precision, "Micro Averaging Recall:", micro_averaging_recall)
print("Macro Averaging Precision:", macro_averaging_precision, "Macro Averaging Recall:", macro_averaging_recall)
for class_name in precisions:
print("Class", class_name, "precision:", precisions[class_name], "recall:", recalls[class_name])
class RandomClassifier:
# simple Random Classifier to be used as a borderline
# comparison of the rest of the approaches used
# pass in the frequencies of each document class
# then each prediction is random
# but the more the occurrences of each class,
# the higher the probability it will predict it
def __init__(self, freqs):
# argument freqs should be a dictionary object
# with key value pairs of the form:
# class_name: number_of_occurrences
self.frequencies = freqs
def predict(self, the_test_data):
# returns an ndarray with dtype string_
# with the same size as the_test_data ndarray
low = 1
high = np.sum(self.frequencies.values())
high = float(high)
predictions_np = np.random.uniform(low=low, high=high, size=(len(the_test_data), ))
class_names_list = self.frequencies.keys()
limits = []
for class_name in class_names_list:
limits.append(self.frequencies[class_name])
for i in range(1, len(limits)):
limits[i] += limits[i-1]
predictions = []
for predicted_value in predictions_np:
if predicted_value < limits[0]:
predictions.append(class_names_list[0])
elif predicted_value < limits[1]:
predictions.append(class_names_list[1])
elif predicted_value < limits[2]:
predictions.append(class_names_list[2])
elif predicted_value < limits[3]:
predictions.append(class_names_list[3])
elif predicted_value < limits[4]:
predictions.append(class_names_list[4])
elif predicted_value < limits[5]:
predictions.append(class_names_list[5])
elif predicted_value < limits[6]:
predictions.append(class_names_list[6])
elif predicted_value < limits[7]:
predictions.append(class_names_list[7])
else:
print("Error of predicted value, shouldn't be such high value: ", predicted_value)
return predictions
def get_class_frequencies(labels):
freqs = {}
for i in range(len(labels)):
if labels[i] in freqs.keys():
freqs[labels[i]] += 1
else:
freqs[labels[i]] = 1
return freqs
if __name__ == "__main__":
train_data, train_labels = load_train_data_and_labels()
test_data, test_labels = load_test_data_and_labels()
class_frequencies = get_class_frequencies(train_labels)
random_classifier = RandomClassifier(class_frequencies)
predicted_labels = random_classifier.predict(test_data)
print("Metrics for Random Classifier")
print("Accuracy = " + str(np.mean(predicted_labels == test_labels)))
get_metrics(test_labels, predicted_labels)
print("==============================")
print("==============================")
print("")
| billkouts/Reuters_document_classification | randomClassifier.py | randomClassifier.py | py | 9,995 | python | en | code | 0 | github-code | 13 |
40105255949 | import requests
from bs4 import BeautifulSoup
url="https://www.instagram.com/{}/"
def pic_downloader(username):
full_url=url.format(username)
r=requests.get(full_url)
s=BeautifulSoup(r.text,"lxml")
p=s.find("meta",property="og:image").attrs['content']
with open(username+".jpg","wb") as pic:
binary=requests.get(p).content
pic.write(binary)
user_name=input("Please enter account name :")
data=pic_downloader(username=user_name)
print(data)
#"garima.__.singla"
| aryan091/WEB-SCRAPING | Instagram Pic Download.py | Instagram Pic Download.py | py | 523 | python | en | code | 0 | github-code | 13 |
38701608385 | """
Contains unit and integration tests for checking the models of the web application.
"""
import logging
import sys
from django.test import TestCase
from questions.models import QuestionCategory
from users.models import MyUser
from ..models import Post
if len(sys.argv) > 1 and sys.argv[1] == 'test':
logging.disable(logging.CRITICAL)
class TestPostModel(TestCase):
"""Test class for the Post model."""
@classmethod
def setUpTestData(cls):
"""Set up non-modified MyUser, QuestionCategory and Post objects used by all test methods."""
MyUser.objects.create(first_name='Quentin', last_name='Tarantino', username='QweenTeen',
email='queenteen@mail.ru', is_active=True)
QuestionCategory.objects.create(name='TestCategory')
author = MyUser.objects.first()
category = QuestionCategory.objects.first()
Post.objects.create(title='TestPost', author=author, category=category, body='some text')
def test_title_max_length(self):
"""Testing the maximum length of the post title."""
post = Post.objects.first()
max_length = post._meta.get_field('title').max_length
self.assertEquals(max_length, 150)
def test_string_output_of_the_post_model(self):
"""Post model string output text test."""
post = Post.objects.first()
expected_string_output = f'{post.title}'
self.assertEquals(expected_string_output, str(post))
def test_new_post_is_not_available_by_default(self):
"""Testing the initial inactivity of the post."""
post = Post.objects.first()
self.assertFalse(post.available)
| Lalluviadel/interview_quiz | posts/tests/test_models.py | test_models.py | py | 1,668 | python | en | code | 0 | github-code | 13 |
2960969217 | """Sex terms."""
from spacy import registry
from traiter.actions import text_action
from traiter.patterns.matcher_patterns import MatcherPatterns
from odonata.pylib.const import COMMON_PATTERNS, REPLACE
COLORED = """ colored """.split()
SIMILAR = """ like similar as than exactly """.split()
TRAITS = """color color_mod body_part body_part_loc""".split()
COLOR_LIKE = MatcherPatterns(
'color_like',
on_match='odonata.color_like.v1',
decoder=COMMON_PATTERNS | {
'adp': {'POS': {'IN': ['ADP']}},
'cconj': {'POS': {'IN': ['CCONJ']}},
'det': {'POS': {'IN': ['DET']}},
'sconj': {'POS': {'IN': ['SCONJ', 'ADP']}},
'sex': {'ENT_TYPE': 'sex'},
'similar': {'LOWER': {'IN': SIMILAR}},
'colored': {'LOWER': {'IN': COLORED}},
'adv': {'DEP': 'advmod'},
'mod': {'ENT_TYPE': 'color_mod'},
'prep': {'DEP': 'prep'},
'loc': {'ENT_TYPE': 'part_loc'},
},
patterns=[
'similar adp? sex',
'sconj det? adp sex',
'colored adv? similar sex',
'mod prep? loc prep* sex',
],
)
@registry.misc(COLOR_LIKE.on_match)
def color_like(ent):
"""Enrich the match."""
text_action(ent, REPLACE)
| rafelafrance/traiter_odonata | odonata/patterns/color_like.py | color_like.py | py | 1,213 | python | en | code | 0 | github-code | 13 |
29749092015 | #!/usr/bin/env python
# coding=utf-8
# author=hades
# @Time : 2018/8/17 9:24
from django.conf.urls import patterns
from django.conf.urls import url
from openstack_dashboard.dashboards.project.access_and_security.keypairs \
import views
from openstack_dashboard.dashboards.project.access_and_security.qos \
import views as qos_views
QOS = r'^(?P<qos_id>[^/]+)/%s$'
urlpatterns = patterns(
'',
url(r'^create/$', qos_views.CreateQosView.as_view(), name='create_qos'),
url(QOS % 'detail', qos_views.DetailQosView.as_view(), name='detail_qos'),
url(r'^(?P<qos_id>[^/]+)/add_rule/$',
qos_views.AddRuleView.as_view(),
name='add_rule'),
url(r'^(?P<qos_id>[^/]+)/update/$',
qos_views.UpdateView.as_view(),
name='update'),
)
| leejshades/openstack-dashboard | openstack_dashboard/dashboards/project/access_and_security/qos/urls.py | urls.py | py | 783 | python | en | code | 0 | github-code | 13 |
70416640337 | import warnings
warnings.filterwarnings("ignore")
import os
import sys
import yaml
import logging
import pandas as pd
import multiprocessing as mp
from geckoml.data import *
from geckoml.metrics import save_analysis_plots
from argparse import ArgumentParser
from functools import partial
import tqdm
# Get the GPU
# is_cuda = torch.cuda.is_available()
# device = torch.device(torch.cuda.current_device()) if is_cuda else torch.device("cpu")
# Set the default logger
logger = logging.getLogger(__name__)
# def seed_everything(seed=1234):
# """
# Set seeds for determinism
# """
# random.seed(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
# np.random.seed(seed)
# torch.manual_seed(seed)
# if torch.cuda.is_available():
# torch.cuda.manual_seed(seed)
# torch.backends.cudnn.benchmark = True
# torch.backends.cudnn.deterministic = True
def worker(replica,
species=None,
output_path=None,
hidden_dim=None,
n_layers=None,
val_in_array=None,
y_scaler=None,
output_vars=None,
val_out=None,
val_out_col_idx=None,
log_trans_cols=None,
tendency_cols=None,
stable_thresh=10,
start_times=[0]):
"""
Load GRU ensemble model, run box simulations, and compute performance metrics
Args:
replica: ensemble member ID
species: Modeled species
output_path: Output path (str)
hidden_dim: size of the GRU hidden state
n_laers: number of hidden layers in the GRU
val_exps: List of experiment names
num_timesteps: Total time-steps in experiments
val_in_array: validation input data to the GRU
val_env_array: validation environmental data used as input to the GRU
y_scaler: sklearn scaler transformation
output_vars: List of names of the prediction outputs
out_val: output validation data
"""
import os
import torch
from geckoml.models import GRUNet
from geckoml.box import rnn_box_test
is_cuda = torch.cuda.is_available()
device = torch.device(torch.cuda.current_device()) if is_cuda else torch.device("cpu")
logger.info(f"Loading ensemble member {replica} model weights")
# Get the shapes of the input and output data
input_size = val_in_array.shape[-1]
output_size = val_out.shape[-1]
# Load the model
model = GRUNet(hidden_dim, n_layers, 0.0)
model.build(input_size,
output_size,
os.path.join(output_path, f"models/{species}_gru_{replica}.pt"))
model = model.to(device)
# MAE loss
val_criterion = torch.nn.L1Loss()
# Predict on the validation split and get the performance metrics
logger.info(f"Running box simulations for all experiments using model {replica}")
scaled_box_mae, box_mae, metrics, y_preds, y_true = rnn_box_test(
model,
val_criterion,
val_in_array,
val_out,
y_scaler,
output_vars,
val_out_col_idx,
log_trans_cols,
tendency_cols,
stable_thresh=10,
start_times=start_times
)
# add extra field to the pred and truth arrays to be used later
y_true['member'] = replica
y_preds['member'] = replica
metrics["ensemble_member"] = replica
# put the results into a dictionary
results_dict = {
"replica": replica,
"y_preds": y_preds,
"y_true": y_true,
"metrics": metrics
}
# return the results
return results_dict
if __name__ == '__main__':
parser = ArgumentParser(
description="Run t 1-step GRU models using t threads, where t is integer"
)
parser.add_argument(
"-c",
dest="model_config",
type=str,
default=False,
help="Path to the model configuration (yml) containing your inputs."
)
parser.add_argument(
"-t",
dest="threads",
type=int,
default=1,
help="The number of threads to use to run box simulations. Default is 1."
)
args_dict = vars(parser.parse_args())
config_file = args_dict.pop("model_config")
threads = int(args_dict.pop("threads"))
if not os.path.isfile(config_file):
logger.warning(f"The model config does not exist at {config_file}. Failing with error.")
sys.exit(1)
with open(config_file) as cf:
conf = yaml.load(cf, Loader=yaml.FullLoader)
# How many CPUs available for multiprocessing
n_cpus = min(os.cpu_count(), threads)
############################################################
root = logging.getLogger()
root.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s:%(name)s:%(message)s')
# Stream output to stdout
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
ch.setFormatter(formatter)
root.addHandler(ch)
# Save the log file
logger_name = os.path.join(conf["output_path"], f"metrics/log.txt")
fh = logging.FileHandler(logger_name,
mode="w",
encoding='utf-8')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
root.addHandler(fh)
############################################################
# seed_everything()
species = conf['species']
data_path = conf['dir_path']
aggregate_bins = conf['aggregate_bins']
input_vars = conf['input_vars']
output_vars = conf['output_vars']
tendency_cols = conf['tendency_cols']
log_trans_cols = conf['log_trans_cols']
output_path = conf['output_path']
scaler_type = conf['scaler_type']
ensemble_members = conf["ensemble_members"]
seed = conf['random_seed']
# Get the shapes of the input and output data
input_size = len(input_vars)
output_size = len(output_vars)
# Model settings
rnn_conf = conf["model_configurations"]["RNN"]["GRU_1"]
epochs = rnn_conf["epochs"]
batch_size = rnn_conf["batch_size"]
learning_rate = rnn_conf["lr"]
weight_decay = rnn_conf["l2_weight"] if rnn_conf["l2_weight"] > 1e-12 else 0.0
n_layers = rnn_conf["n_layers"]
hidden_dim = rnn_conf["hidden_size"]
rnn_dropout = rnn_conf["rnn_dropout"]
hidden_weight = rnn_conf["hidden_weight"]
loss_weights = [
rnn_conf["precursor_weight"],
rnn_conf["gas_weight"],
rnn_conf["aero_weight"]
]
verbose = rnn_conf["verbose"]
lr_patience = rnn_conf["lr_patience"]
stopping_patience = rnn_conf["stopping_patience"]
member = rnn_conf["member"]
model_name = "GRU"
# Validation starting times
start_times = rnn_conf["validation_starting_times"]
# Load the data
logger.info(f"Loading the train and validation data for {species}, this may take a few minutes")
for folder in ['models', 'plots', 'metrics']:
os.makedirs(join(output_path, folder), exist_ok=True)
data = load_data(data_path, aggregate_bins, species, input_vars, output_vars, log_trans_cols)
transformed_data, x_scaler, y_scaler = transform_data(
data,
output_path,
species,
tendency_cols,
log_trans_cols,
scaler_type,
output_vars,
train=False
)
# Batch the training data by experiment
train_in_array = transformed_data['train_in'].copy()
n_exps = len(train_in_array.index.unique(level='id'))
n_timesteps = len(train_in_array.index.unique(level='Time [s]'))
n_features = len(input_vars)
out_col_idx = train_in_array.columns.get_indexer(output_vars)
train_in_array = train_in_array.values.reshape(n_exps, n_timesteps, n_features)
# Batch the validation data by experiment
val_in_array = transformed_data['val_in'].copy()
n_exps = len(val_in_array.index.unique(level='id'))
n_timesteps = len(val_in_array.index.unique(level='Time [s]'))
val_out_col_idx = val_in_array.columns.get_indexer(output_vars)
val_in_array = val_in_array.values.reshape(n_exps, n_timesteps, n_features)
### STOPPED HERE
n_cpus = min(ensemble_members, n_cpus)
logger.info(f"Using {n_cpus} threads to run box simulations for {ensemble_members} GRU ensemble members")
truth = {}
predictions = {}
metrics = {}
with mp.Pool(n_cpus) as p:
work = partial(worker,
species=species,
output_path=output_path,
hidden_dim=hidden_dim,
n_layers=n_layers,
val_in_array=val_in_array,
y_scaler=y_scaler,
output_vars=output_vars,
val_out=data['val_out'],
val_out_col_idx=val_out_col_idx,
log_trans_cols=log_trans_cols,
tendency_cols=tendency_cols,
stable_thresh=10,
start_times=[0])
for results_dict in tqdm.tqdm(p.imap(work, range(ensemble_members)), total=ensemble_members):
replica = results_dict['replica']
truth[f'gru_{replica}'] = results_dict['y_true']
predictions[f'gru_{replica}'] = results_dict['y_preds']
metrics[f'member_{replica}'] = results_dict['metrics']
logger.info(f'Saving the predictions to {os.path.join(output_path, "metrics")}')
all_truth = pd.concat(truth.values())
all_preds = pd.concat(predictions.values())
all_preds.to_parquet(join(output_path, f'metrics/{species}_{model_name}_preds.parquet'))
all_truth.to_parquet(join(output_path, f'metrics/{species}_{model_name}_truth.parquet'))
logger.info("Saving and plotting ensembled performance for random experiments")
save_metrics(metrics, output_path, model_name, ensemble_members, 'box')
save_analysis_plots(all_truth, all_preds, data["train_in"], data["val_in"], output_path,
output_vars, species, model_name)
# logger.info(f'Creating and saving the fourier analysis plot using the averaged values for quantites')
# fourier_analysis(all_preds, output_path, species, model_name)
| NCAR/gecko-ml | applications/run_gecko_rnn_emulators.py | run_gecko_rnn_emulators.py | py | 10,182 | python | en | code | 1 | github-code | 13 |
31291147964 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 3 10:21:27 2022
@author: dtjgp
"""
#!/usr/bin/python3
import random
from queue import Queue, PriorityQueue
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats
# ******************************************************************************
# Constants
# ******************************************************************************
SERVICE = 10.0 # av service time 在无缓冲区的情况下设置为不变
TYPE1 = 1
arrivals=0
BusyServer=False # True: server is currently busy; False: server is currently idle
MM1=[]
# ******************************************************************************
# To take the measurements
# ******************************************************************************
class Measure:
def __init__(self,Narr,Ndep,NAveraegUser,OldTimeEvent,AverageDelay):
self.arr = Narr
self.dep = Ndep
self.ut = NAveraegUser
self.oldT = OldTimeEvent
self.delay = AverageDelay
# ******************************************************************************
# Client
# ******************************************************************************
class Client:
def __init__(self,type,arrival_time):
self.type = type
self.arrival_time = arrival_time
# ******************************************************************************
# Server
# ******************************************************************************
class Server(object):
# constructor
def __init__(self):
# whether the server is idle or not
self.idle = True
# ******************************************************************************
# arrivals *********************************************************************
def arrival_nobuffer(time, FES, queue): #没有buffer的情况下的arrival
global users
#print("Arrival no. ",data.arr+1," at time ",time," with ",users," users" )
# cumulate statistics, 用于计算时间相关参数
data.arr += 1
data.ut += users*(time-data.oldT)
data.oldT = time
# sample the time until the next event
inter_arrival = random.expovariate(lambd=1.0/ARRIVAL)
# schedule the next arrival
FES.put((time + inter_arrival, "arrival")) # 下一个arrival到来的时间点
users += 1
# create a record for the client
client = Client(TYPE1,time)
# insert the record in the queue
queue.append(client)
# print("the number of users before arrival calculation: ", users)
# if the server is idle start the service
if users==1:
# sample the service time
service_time = random.expovariate(1.0/SERVICE)
# print("service_time is : ", service_time)
#service_time = 1 + random.uniform(0, SEVICE_TIME)
# schedule when the client will finish the server
FES.put((time + service_time, "departure"))#departure的时间
else: #所有的在service过程中到达的包都会被丢弃,所以增加的users应该自动减去1
users -= 1
queue.pop()
# print("*******************")
# print("The number of users after arrival calculation is:", users)
# print("&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&")
# ******************************************************************************
# departures *******************************************************************
def departure_nobuffer(time, FES, queue):
global users
#print("Departure no. ",data.dep+1," at time ",time," with ",users," users" )
# cumulate statistics
data.dep += 1
data.ut += users*(time-data.oldT)
data.oldT = time
# get the first element from the queue
client = queue.pop(0)
# do whatever we need to do when clients go away
data.delay += (time-client.arrival_time)
# print("The number of users before departure is: ", users)
users -= 1
# print("The number of users after departure is: ", users)
# print("&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&^&")
# users只有两个结果:0&1,当users==1时,在执行departure_nobuffer,所以在执行结束的时候,users==0,所以不会出现users继续大于1的情况
# see whether there are more clients to in the line
# if users >0:
# # sample the service time
# service_time = random.expovariate(1.0/SERVICE)
# # schedule when the client will finish the server
# FES.put((time + service_time, "departure"))
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m-h, m+h
# ******************************************************************************
# the "main" of the simulation
# ******************************************************************************
def statistical_result(seed, ARRIVAL):
global users
global load
# the simulation time
time = 0
# users=0
load = SERVICE/ARRIVAL
# the list of events in the form: (time, type)
FES = PriorityQueue()
# schedule the first arrival at t=0
FES.put((0, "arrival"))
# arrival_num += 1
# simulate until the simulated time reaches a constant
while time < SIM_TIME:
(time, event_type) = FES.get()
if event_type == "arrival":
arrival_nobuffer(time, FES, MM1)
# arrival_num += 1
elif event_type == "departure":
departure_nobuffer(time, FES, MM1)
# served_num += 1
# print output data
# print("MEASUREMENTS \n\nNo. of users in the queue:",users,"\nNo. of arrivals =",
# data.arr,"- No. of departures =",data.dep)
# print("Load: ",SERVICE/ARRIVAL)
# print("\nArrival rate: ",data.arr/time," - Departure rate: ",data.dep/time)
# print("\nAverage number of users: ",data.ut/time)
# print("Average delay: ",data.delay/data.dep)
# print("Actual queue size: ",len(MM1))
# if len(MM1)>0:
# print("Arrival time of the last element in the queue:",MM1[len(MM1)-1].arrival_time)
# MM1.pop(0)
# print("\nThe loss rate is:", (data.arr - data.dep)/data.arr)
return (data.arr - data.dep)/data.arr, SERVICE/(ARRIVAL+SERVICE), SERVICE/ARRIVAL
# LOAD=0.85
# ARRIVAL = SERVICE/LOAD
'''
investigate the system performance under different arrival rates,
keeping a fixed value for the average service rate
'''
arrival_list = []
Loss_rate_ave_list = []
theoryloss_ave_list = []
Load_list=[]
confidence_loss = []
SIM_TIME = 500000
for ARRIVAL in range(1, 21, 1):
Loss_rate_list = []
theoryloss_list = []
load_list = []
for i in range(1,21,1):
users = 0
data = Measure(0,0,0,0,0)
loss_real, loss_theo, load = statistical_result(i, ARRIVAL)
Loss_rate_list.append(loss_real)
Loss_rate = np.array(Loss_rate_list)
Loss_rate_mean = np.mean(Loss_rate)
theoryloss_list.append(loss_theo)
theoryloss = np.array(theoryloss_list)
theoryloss_mean = np.mean(theoryloss)
load_list.append(load)
load_array = np.array(load_list)
load_mean = np.mean(load_array)
Loss_rate_ave_list.append(Loss_rate_mean)
theoryloss_ave_list.append(theoryloss_mean)
Load_list.append(load_mean)
arrival_list.append(ARRIVAL)
#calculate the loss rate with confidence interval
confidence_loss.append(mean_confidence_interval(Loss_rate_list))
#calculate the loss rate with confidence interval errorbar
confiloss = np.array(confidence_loss).reshape((20,2))
lossavg = np.array(Loss_rate_ave_list).reshape((20,1))
yerror_range = abs(confiloss - lossavg)
yerror_range_tran = np.transpose(yerror_range)
#plot the lossRate
plt.figure(1)
plt.plot(arrival_list,Loss_rate_ave_list,color='red',marker='o',label='simulated loss probability')
plt.plot(arrival_list,theoryloss_ave_list,color='green', label='theoretical loss probability')
plt.xlabel("average inter-arrival time [ms]")
plt.ylabel("loss probability")
plt.title("M/M/1/1")
plt.xticks(arrival_list)
plt.legend()
plt.grid()
plt.savefig("loss probability vs average inter-arrival time", dpi=300)
#plot the confidence interval
plt.figure(2)
plt.plot(arrival_list,Load_list,color='red',marker='o')
plt.xlabel("average inter-arrival time [ms]")
plt.ylabel("load")
plt.title("M/M/1/1")
plt.xticks(arrival_list)
plt.legend()
plt.grid()
plt.savefig("load vs average inter-arrival time", dpi=300)
#plot the confidence interval
plt.figure(3)
plt.plot(arrival_list,Loss_rate_ave_list,' ',color='red',marker='o')
plt.plot(arrival_list,confidence_loss,' ', marker='x',color='green')
plt.xlabel("average inter-arrival time [ms]")
plt.ylabel("loss probability with confidence interval")
plt.title("M/M/1/1")
plt.xticks(arrival_list)
plt.legend()
plt.grid()
plt.savefig("loss probability with confidence interval vs average inter-arrival time", dpi=300)
#plot the confidence interval errorbar
plt.figure(4)
plt.errorbar(arrival_list,Loss_rate_ave_list,yerr=yerror_range_tran, ls=" ",fmt='.',ecolor='g',color='r')
plt.xlabel("average inter-arrival time [ms]")
plt.ylabel("loss probability with confidence interval errorbar")
plt.title("M/M/1/1")
plt.xticks(arrival_list)
plt.legend()
plt.grid()
plt.savefig("loss probability with confidence interval errorbar vs average inter-arrival time", dpi=300)
'''
当没有缓冲区的时候,BS的工作状态为:
1.没有包的时候与其他保持正常
2.到达一个packet的时候,会进行处理,在处理过程中,直到service结束,当有其他的packet到达的时候,会直接drop
lab1_task1中的要求是:处理速率固定
average waiting delay:考虑到每个真正进入service中的packet都会立刻被进行处理,所以在没有缓冲区的情况下,AVD(average waiting delay)为0
average buffer occupancy:在无缓冲区的情况下,ABO(average buffer occupancy)为0
loss probability: 到达BS但是没有被处理的packets数量占全部packets总数的比例
busy time: packets在serve时所占用的总时间
''' | dtjgp/management-and-content-delievery-for-smart-network | lab1_task1.py | lab1_task1.py | py | 10,332 | python | en | code | 1 | github-code | 13 |
42804444907 |
from pyquil.gates import *
from pyquil.quil import Program
LAYER_XZ_OPTIONS_DEFAULT={
'nlayers': 1,
'dist': 1,
}
def layer_xz(params, qubits_chosen, options=LAYER_XZ_OPTIONS_DEFAULT):
"""
Variational circuit alternating between single-X rotations and constant
distance controlled-Z gates (see Schuld et al. arXiv:1804.00633
[quant-ph]).
Args:
params: list[float]
A list of parameters used for the circuit.
qubits_chosen: list[int]
List of indices for the qubits that the circuit acts on.
options: dictionary
Further information specifying the details of the
variational circuit. Entries include
nlayers: int
Number of layers.
dist: int
Distance between the control and target qubits.
See the 'r' parameter in (Fig. 4, Schuld et al).
Return:
A pyquil Program object representing the circuit.
"""
out = Program()
nlayers = options['nlayers']
dist = options['dist']
# iterate through the layers
for i in range(0, nlayers):
# controlled-z gate layer
out = out + layer_controlled_z(qubits_chosen, dist)
# single-x gate layer
out = out + layer_single_x(params, qubits_chosen)
return out
def layer_single_x(params, qubits_chosen):
"""
A layer of single X rotations.
Args:
params: list[float]
Rotation angles for each qubit.
qubits_chosen: list[int]
List of indices of the qubits that current layer acts
on.
Return:
A pyquil Program object representing the circuit layer.
"""
output = Program()
nqubits = len(qubits_chosen)
for i in range(0, nqubits):
output = output + Program(RX(params[i], qubits_chosen[i]))
return output
def layer_controlled_z(qubits_chosen, distance):
"""
An entangling layer which applies CZ gate on pairs of qubits that are
separated by a specified distance.
Args:
qubits_chosen: list[int]
List of indices of qubits that current layer acts on.
distance: int
Distance between the control and the target qubit for
each CZ gate.
Return:
A pyquil Program object representing the circuit layer.
"""
out = Program()
nqubits = len(qubits_chosen)
if nqubits == 2:
return Program(CZ(qubits_chosen[0], qubits_chosen[1]))
for i in range(0, nqubits):
out = out + Program(CZ(qubits_chosen[i],
qubits_chosen[(i+distance) % nqubits]))
return out
| zapatacomputing/QClassify | src/qclassify/proc_circ.py | proc_circ.py | py | 2,317 | python | en | code | 26 | github-code | 13 |
70101632979 | import pika
# remote server IP addr
remote_server_addr = 'a.b.c.d'
credentials = pika.PlainCredentials('guest', 'guest')
parameters = pika.ConnectionParameters(remote_server_addr,
5672,
'/',
credentials)
connection = pika.BlockingConnection(parameters)
channel = connection.channel()
channel.queue_declare(queue='hello')
def callback(ch, method, properties, body):
print("Received %s" % (body,))
channel.basic_consume(callback,
queue='hello',
no_ack=True)
# loop forever
channel.start_consuming()
| CuteLemon/Learn | RabbitMQ/receiver.py | receiver.py | py | 671 | python | en | code | 0 | github-code | 13 |
7511155226 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, LSTM,Dropout
import tensorflow as tf
import yfinance as yf
from textblob import TextBlob
import nltk
import pandas_datareader as data
import requests
from bs4 import BeautifulSoup
import pandas as pd
import re
from datetime import date
import datetime as datetime
import streamlit as st
import time
from dateutil.relativedelta import relativedelta
API="EQ6PLX2EW5FA0DFL"
def sentiment(Symbol,API,time_from):
url = "https://www.alphavantage.co/query"
params = {
"function": "NEWS_SENTIMENT",
"tickers": Symbol,
"apikey": API,
"time_from":time_from,
"limit":"200"
}
response = requests.get(url, params=params)
data = response.json()
return data
#Historical data function
def history(Symbol,API):
url = "https://www.alphavantage.co/query?"
params = {
"function":"TIME_SERIES_DAILY_ADJUSTED",
"symbol":Symbol,
"apikey":API,
"outputsize":"full"
}
response_HD = requests.get(url, params=params)
data = response_HD.json()
return data
with open('C:\\Users\\admin\\Desktop\\BCS 4.2\\Final year project\\Implementation\\Project semi-final\\styles.css') as f:
css = f.read()
st.markdown(f'<style>{css}</style>', unsafe_allow_html=True)
header= st.container()
# Add widgets to the columns
with header:
intro=st.title("Stock Market Analysis and Prediction ")
#st.markdown(
# )
Symbol=st.sidebar.text_input('Enter the name of your stock',"PG").upper()
time_from=st.sidebar.text_input('Enter the start date(YYYYMMDDTHHMM)',"20220304T0000")
start_date = time_from[:4] + "-" + time_from[4:6] + "-" + time_from[6:8]
# buttons
with open('C:\\Users\\admin\\Desktop\\BCS 4.2\\Final year project\\Implementation\\Project semi-final\\stockstyle.css') as f:
style= f.read()
headers=["Historical Data", "Chart", "Sentiments","Predict","About us"]
tab1, tab2, tab3,tab4,tab5 = st.tabs(headers)
with tab1:
#Historical Data
hist_data=history(Symbol,API)
stock_data = hist_data["Time Series (Daily)"]
stock_data =pd.DataFrame(stock_data).T
#stock_data.columns
stock_data=stock_data.rename(columns={'1. open':"Open","2. high":"High","3. low":"Low","4. close":"Close","6. volume":"Volume"})
hd_cols=["Open","High","Low","Close","Volume"]
stock_data=stock_data[hd_cols].astype(float)
stock_data= stock_data.loc[:start_date,:]
stock_data.index=pd.to_datetime(stock_data.index,format='%Y-%m-%d')
#stock_data.tail()
s=stock_data
s.index=s.index.date
s=stock_data.style.set_properties(**{'background-color': 'transparent',
'color': 'black',
'border-color': 'black'})
st.markdown("<p class=parag>Historical data upto the previous day</p> "
f"<style>{style}</style>"
,unsafe_allow_html=True )
with st.spinner('Please wait...'):
time.sleep(10)
st.dataframe(s,5000,2000)
with tab2:
# Create selectbox
options = ['Open', 'Close', 'High', 'Low']
selected_option = st.sidebar.selectbox('Select chart to display', options)
# Filter data based on selected option
if selected_option == 'Open':
price_col = stock_data['Open']
price="Open price"
elif selected_option == 'Close':
price_col = stock_data['Close']
price="Close price"
elif selected_option == 'High':
price_col = stock_data['High']
price="High price"
else:
price_col = stock_data['Low']
price="Low price"
# Plot the stock data
fig, ax = plt.subplots(figsize=(10,7))
price_col.plot(ax=ax, color="red", linestyle="-", linewidth=2)
# Add grid and legend
plt.grid(True)
plt.legend([price], loc="upper left")
# Set the chart title and axis labels
plt.title(f"{Symbol} Stock Prices")
plt.xlabel("Date")
plt.ylabel("Price ($)")
# Show the chart in Streamlit
st.pyplot(fig)
with tab3:
#SENTIMENT DATA
data=sentiment(Symbol,API,time_from)
news_items = data['feed']
feed=pd.DataFrame(news_items)
feed["time_published"] = pd.to_datetime(feed["time_published"])
feed = feed.query('time_published.dt.dayofweek<5')
feed['time_published'] = feed['time_published'].dt.date
#feed
cols=['title','time_published','summary','overall_sentiment_score','overall_sentiment_label']
sentiment_data=feed[cols]
#text polarity
# function to get sentiment score using TextBlob
def get_sentiment_score(text):
blob = TextBlob(text)
return blob.sentiment.polarity
text=sentiment_data['summary']+" "+sentiment_data['title']
# add sentiment score column to dataframe
sentiment_data['Sentiment_Score_2'] = text.apply(get_sentiment_score)
#st.header(f"Latest News Headlines for {Symbol}")
st.markdown(f"<p class=parag>Latest News Headlines for {Symbol}</p> "
f"<style>{style}</style>"
,unsafe_allow_html=True )
user_cols=['title','time_published','summary','overall_sentiment_label']
user_view=sentiment_data[user_cols]
st.dataframe(user_view)
#st.set_page_config(title="Stock Market Analysis and Prediction ", layout="wide")
#t.image()
with open('C:\\Users\\admin\\Desktop\\BCS 4.2\\Final year project\\Implementation\\Project semi-final\\styles.css') as f:
css = f.read()
st.markdown(f'<style>{css}</style>', unsafe_allow_html=True)
#st.markdown("",unsafe_allow_html=True)
contents= st.container()
with contents:
#prepare sentiment input data
input_col=['time_published','Sentiment_Score_2']
sentiment_input=sentiment_data[input_col]
sentiment_input=sentiment_input.rename(columns={'time_published':'Date'})
#sentiment_input
grouped_sent=sentiment_input.groupby(['Date']).mean().reset_index()
grouped_sent["Date"]=pd.to_datetime(grouped_sent["Date"],format="%Y-%m-%d")
grouped_sent["Date"]=grouped_sent["Date"].dt.date
score_col=['Sentiment_Score_2']
grouped_sent[score_col]=grouped_sent[score_col].astype(float)
#grouped_sent
#Combine sentiment data with historical data
stock_data=stock_data.reset_index()
stock_data=stock_data.rename(columns={'index':'Date'})
input_data= pd.merge(grouped_sent,stock_data, on='Date', how='inner')
#input_data
input_data.dropna(axis=0,inplace=True)
#input_data
#TECHNICAL DATA
#calculate exponential Moving Averages
input_data['EMA_50']=input_data['Close'].ewm(span=50,adjust=False).mean()
input_data['EMA_200']=input_data['Close'].ewm(span=200,adjust=False).mean()
#calculate trend where 1 represents rising and 0 represents falling
def get_trend(row):
if row['EMA_50'] > row['EMA_200'] and row['Open'] > row['EMA_50'] and row['Close'] > row['EMA_50']:
return 1
else:
return 0
input_data['Trend'] = input_data.apply(get_trend, axis=1)
#drop any null rows
input_data.dropna(inplace=True)
#input_data
#FUNDAMENTAL DATA
#Income Statement quarterly
url = 'https://www.alphavantage.co/query'
params = {
"function": "INCOME_STATEMENT",
"symbol": Symbol,
"apikey": API
}
response = requests.get(url, params=params)
data_inc = response.json()
#print(data_inc.keys())
income_data=pd.DataFrame(data_inc['quarterlyReports'])
#income_data.head()
#Balance Sheet
url = 'https://www.alphavantage.co/query'
params = {
"function": "BALANCE_SHEET",
"symbol": Symbol,
"apikey": API
}
response = requests.get(url, params=params)
data_bals = response.json()
#print(data_bals.keys())
balance_sheet=pd.DataFrame(data_bals['quarterlyReports'])
#balance_sheet.head()
#income staement columns
income_cols=['fiscalDateEnding','netIncome']
inc_df=income_data[income_cols]
net_income=['netIncome']
inc_df[net_income]=inc_df[net_income].astype(float)
inc_df['fiscalDateEnding']=pd.to_datetime(inc_df['fiscalDateEnding'],format='%Y-%m-%d')
#inc_df
#balancesheet columns
bals_cols=['fiscalDateEnding','totalShareholderEquity','commonStockSharesOutstanding']
bals_df=balance_sheet[bals_cols]
share_cols=['totalShareholderEquity','commonStockSharesOutstanding']
bals_df['fiscalDateEnding']=pd.to_datetime(bals_df['fiscalDateEnding'],format='%Y-%m-%d')
bals_df[share_cols]=bals_df[share_cols].astype(float)
#bals_df
fd_data=pd.merge(bals_df,inc_df,on='fiscalDateEnding',how='inner')
#fd_data.columns
# Calculate the financial ratios
fd_data["ROE"] = fd_data["netIncome"]/fd_data["totalShareholderEquity"]
fd_data["EPS"] = fd_data["netIncome"]/fd_data["commonStockSharesOutstanding"]
fd_data["P/E"] = stock_data["Close"]/fd_data["EPS"]
# Select the desired columns
selected_columns = ["fiscalDateEnding" ,"ROE", "EPS", "P/E"]
fd_data['Quarter'] = pd.PeriodIndex(fd_data['fiscalDateEnding'], freq='Q')
fd_data = fd_data[selected_columns]
#create an empty row to hold first row values after shift
fd_data.loc[-1] = [None] * len(fd_data.columns)
fd_data.index =fd_data.index + 1
fd_data = fd_data.sort_index()
#shift rows
fd_data[['ROE', 'EPS', 'P/E']] = fd_data[['ROE', 'EPS', 'P/E']].shift(-1)
#fd_data
#assign next quarter date to first row of first column
new_date = fd_data.loc[1,"fiscalDateEnding"] + relativedelta(months=3)
fd_data.loc[0,"fiscalDateEnding"]= new_date
#add a column quarter
fd_data['Quarter'] = pd.PeriodIndex(fd_data['fiscalDateEnding'], freq='Q')
#fd_data=fd_data.dropna()
#Add fundamental data to our input data
#Add quarter column to input data
input_data['Quarter'] = pd.PeriodIndex(input_data['Date'], freq='Q')
#input_data
final_input=pd.merge(input_data,fd_data,on='Quarter',how='left')
symbol_label = {
'DOW': 0,
'GOOG': 1,
'AMZN': 2,
'MSFT':3,
'AAPL':4,
'TSLA':5,
'PG':6,
'META':7,
'AMD':8,
'NFLX':9,
'TSM':10,
'KO':11,
'F':12,
'COST':13,
'DIS':14,
'VZ':15,
'CRM':16,
'INTC':17,
'BA':18,
'BX':19,
'NOC':20,
'PYPL':21,
'ENPH':22,
'NIO':23,
'ZS':24,
'XPEV':25
}
symbol_vec = symbol_label[Symbol]
final_input['symbol_label'] = symbol_vec
final_input.sort_values(by='Date', ascending = False, inplace = True)
final_input=final_input.reset_index()
#final_input
with tab4:
st.write("Click here to get your predictions")
predictbtn=st.button('Predict')
if predictbtn:
with st.spinner('Analyzing data...'):
time.sleep(10)
from tensorflow.keras.models import *
stock_model = "C:\\Users\\admin\\Desktop\\BCS 4.2\\Final year project\\Implementation\\Project semi-final\\Stock_prediction_Timeseries final.h5"
#final_input=pd.read_csv("C:\\Users\\admin\\Desktop\\BCS 4.2\\Final year project\\Implementation\\AMZN_final_input.csv")
cols = ['Open', 'Close', 'Sentiment_Score_2', 'EMA_50','EMA_200','Trend', 'ROE', 'EPS', 'P/E' ,'symbol_label','High', 'Low']
model_input= final_input[cols]
model_input
#normalize test data
scaler = MinMaxScaler(feature_range=(0, 1))
model_data_scaled=scaler.fit_transform(model_input)
y_test_scaled=[]
x_test_scaled=[]
#Declare input and target test lists
for i in range(5,len(model_input)-1):
x_test_scaled.append(model_data_scaled[i-5:i,:])
y_test_scaled.append(model_data_scaled[i+1,[10,11]])
#convert to arrays
y_test_scaled,x_test_scaled=np.array(y_test_scaled),np.array(x_test_scaled)
#y_test_scaled
model = load_model(stock_model)
prediction_scaled=model.predict(x_test_scaled)
# create new scaler object with n_features = 2
scaler_new = MinMaxScaler(feature_range=(0, 1))
scaler_new.n_features_in_ = 2
scaler_new.min_, scaler_new.scale_ = scaler.min_[[10,11]], scaler.scale_[[10,11]]
# inverse transform predictions
prediction = scaler_new.inverse_transform(prediction_scaled)
prediction_dic={'Predicted High':prediction[:,0],'Predicted Low':prediction[:,1]}
prediction_df=pd.DataFrame(prediction_dic)
#prediction_df.iloc[0,:]
st.markdown(f"<p class=val>Todays High will be : {prediction_df.iloc[0,0]}</p> "
f"<p class=val>Todays Low will be : {prediction_df.iloc[0,1]}</p> "
f"<style>{style}</style>"
,unsafe_allow_html=True )
| Kennedy-Wanjiku/finalyearapp | StockMarket_UI.py | StockMarket_UI.py | py | 15,466 | python | en | code | 0 | github-code | 13 |
42084563138 | import itertools
import sys
from collections import Counter
sys.setrecursionlimit(10 ** 9)
input = sys.stdin.readline
M = 1000000007
N = int(input())
left = list(map(int, input().split()))
counter = Counter(left)
def dp(t, n1, n2, n3):
cached = t.get((n1, n2, n3))
if cached is not None:
return cached
remaining = n1 + n2 + n3
assert remaining > 0
last_cnt = left[remaining - 1] + 1
res = 0
if last_cnt == n1:
res += dp(t, *sorted([n1 - 1, n2, n3]))
res %= M
if last_cnt == n2:
res += dp(t, *sorted([n1, n2 - 1, n3]))
res %= M
if last_cnt == n3:
res += dp(t, *sorted([n1, n2, n3 - 1]))
res %= M
# print(f"{remaining}: ({n1},{n2},{n3}) => {res}")
t[n1, n2, n3] = res
return res
def solve():
h = [0, 0, 0]
for i in range(N):
k = counter[i]
if k == 3:
h[2] = h[1] = h[0] = i + 1
elif k == 2:
h[2] = h[1] = i + 1
elif k == 1:
h[2] = i + 1
else:
break
if sum(h) != N:
return 0
t = dict()
t[0, 0, 0] = 1
res = dp(t, *h)
return (res * len(set(itertools.permutations(h)))) % M
print(solve())
| keijak/comp-pub | atcoder/sumitrust2019/E/main_dp.py | main_dp.py | py | 1,219 | python | en | code | 0 | github-code | 13 |
27736111316 | import tkinter as tk
def convert_temperature():
try:
temperature = float(entry.get())
if var.get() == 1: # Fahrenheit to Celsius
result.set((temperature - 32) * 5/9)
elif var.get() == 2: # Celsius to Fahrenheit
result.set((temperature * 9/5) + 32)
except ValueError:
result.set("Invalid input")
def on_validate_input(P):
if P == "" or P.isdigit() or P[0] == "-":
return True
else:
return False
root = tk.Tk()
root.title("Temperature Converter")
var = tk.IntVar()
frame = tk.Frame(root)
frame.pack(pady=20)
label = tk.Label(frame, text="Enter Temperature:")
label.grid(row=0, column=0)
validate_input = root.register(on_validate_input)
entry = tk.Entry(frame, validate="key", validatecommand=(validate_input, "%P"))
entry.grid(row=0, column=1)
radio_fahrenheit = tk.Radiobutton(frame, text="Fahrenheit to Celsius", variable=var, value=1)
radio_fahrenheit.grid(row=1, column=0, padx=10)
radio_celsius = tk.Radiobutton(frame, text="Celsius to Fahrenheit", variable=var, value=2)
radio_celsius.grid(row=1, column=1, padx=10)
convert_button = tk.Button(frame, text="Convert", command=convert_temperature)
convert_button.grid(row=2, columnspan=2, pady=10)
result = tk.StringVar()
result.set("")
output_label = tk.Label(frame, textvariable=result)
output_label.grid(row=3, columnspan=2)
root.mainloop()
| Swapnil-Singh-99/PythonScriptsHub | Temperature Converter/temperature_converter.py | temperature_converter.py | py | 1,404 | python | en | code | 19 | github-code | 13 |
13340326045 | from matplotlib import pyplot as plt
import statsmodels.api as sm
import seaborn as sns
import pandas as pd
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from statsmodels.datasets.longley import load_pandas
# pairplot (scatter plot) 확인하기
dfy = load_pandas().endog
dfX = load_pandas().exog
df = pd.concat([dfy, dfX], axis=1)
sns.pairplot(dfX)
plt.show()
# 상관관계 행렬
dfX.corr()
# heat map
cmap = sns.light_palette("darkgray", as_cmap=True)
sns.heatmap(dfX.corr(), annot=True, cmap=cmap)
plt.show()
# 다중공선성 때문에 condition number가 높음
def get_model1(seed):
df_train, df_test = train_test_split(df, test_size=0.5, random_state=seed)
model = sm.OLS.from_formula(
"TOTEMP ~ GNPDEFL + POP + GNP + YEAR + ARMED + UNEMP", data=df_train
)
return df_train, df_test, model.fit()
df_train, df_test, result1 = get_model1(3)
print(result1.summary())
# 학습용 데이터와 검증용 데이터로 나누어 회귀분석 성능을 비교하면 과최적화가 발생하였음을 알 수 있다.
def calc_r2(df_test, result):
target = df.loc[df_test.index].TOTEMP
predict_test = result.predict(df_test)
RSS = ((predict_test - target) ** 2).sum()
TSS = ((target - target.mean()) ** 2).sum()
return 1 - RSS / TSS
test1 = []
for i in range(10):
df_train, df_test, result = get_model1(i)
test1.append(calc_r2(df_test, result))
# 과최적화가 발생했음을 알 수 있다 val < 1
print(test1)
# (해결방법)
# 변수 선택법으로 의존적인 변수 삭제
# PCA(principal component analysis) 방법으로 의존적인 성분 삭제
# 정규화(regularized) 방법 사용
# VIF 계산하기
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [
variance_inflation_factor(dfX.values, i) for i in range(dfX.shape[1])
]
vif["features"] = dfX.columns
print(vif)
# 상관계수와 VIF를 사용하여 독립 변수를 선택하면 GNP, ARMED, UNEMP 세가지 변수만으로도 비슷한 수준의 성능이 나온다는 것을 알 수 있다.
def get_model2(seed):
df_train, df_test = train_test_split(df, test_size=0.5, random_state=seed)
model = sm.OLS.from_formula(
"TOTEMP ~ scale(GNP) + scale(ARMED) + scale(UNEMP)", data=df_train
)
return df_train, df_test, model.fit()
df_train, df_test, result2 = get_model2(3)
print(result2.summary())
# 다중공선성을 제거한 경우에는 학습 성능과 검증 성능간의 차이가 줄어들었음을 확인할 수 있다.
test2 = []
for i in range(10):
df_train, df_test, result = get_model2(i)
test2.append(calc_r2(df_test, result))
test2
plt.subplot(121)
plt.plot(test1, "ro", label="검증 성능")
plt.hlines(result1.rsquared, 0, 9, label="학습 성능")
plt.legend()
plt.xlabel("시드값")
plt.ylabel("성능(결정계수)")
plt.title("다중공선성 제거 전")
plt.ylim(0.5, 1.2)
plt.subplot(122)
plt.plot(test2, "ro", label="검증 성능")
plt.hlines(result2.rsquared, 0, 9, label="학습 성능")
plt.legend()
plt.xlabel("시드값")
plt.ylabel("성능(결정계수)")
plt.title("다중공선성 제거 후")
plt.ylim(0.5, 1.2)
plt.suptitle("다중공선성 제거 전과 제거 후의 성능 비교", y=1.04)
plt.tight_layout()
plt.show()
## 보스턴 집값 예측 문제에 응용
from sklearn.datasets import load_boston
import numpy as np
boston = load_boston()
dfX0 = pd.DataFrame(boston.data, columns=boston.feature_names)
from patsy import dmatrix
formula = (
"scale(CRIM) + scale(I(CRIM ** 2)) + "
+ "scale(ZN) + scale(I(ZN ** 2)) + scale(INDUS) + "
+ "scale(NOX) + scale(RM) + scale(AGE) + "
+ "scale(np.log(DIS)) + scale(RAD) + scale(TAX) + "
+ "scale(np.log(PTRATIO)) + scale(B) + scale(np.log(LSTAT)) + CHAS"
)
dfX = dmatrix(formula, dfX0, return_type="dataframe")
dfy = pd.DataFrame(boston.target, columns=["MEDV"])
idx_outlier = np.array(
[
7,
54,
148,
152,
160,
214,
253,
267,
364,
365,
367,
368,
369,
371,
372,
374,
380,
385,
397,
398,
399,
400,
401,
405,
409,
410,
412,
413,
414,
415,
416,
418,
419,
426,
445,
489,
490,
492,
505,
161,
162,
163,
166,
186,
195,
204,
225,
257,
267,
283,
368,
369,
370,
371,
372,
]
)
idx = list(set(range(len(dfX))).difference(idx_outlier))
dfX = dfX.iloc[idx, :].reset_index(drop=True)
dfy = dfy.iloc[idx, :].reset_index(drop=True)
# correlation heatmap 그리기
cmap = sns.light_palette("black", as_cmap=True)
sns.heatmap(dfX.corr(), annot=True, fmt="3.1f", cmap=cmap)
plt.show()
# vif 확인
vif = pd.DataFrame()
vif["VIF Factor"] = [
variance_inflation_factor(dfX.values, i) for i in range(dfX.shape[1])
]
vif["features"] = dfX.columns
vif = vif.sort_values("VIF Factor").reset_index(drop=True)
vif
model_boston1 = sm.OLS(np.log(dfy), dfX)
result_boston1 = model_boston1.fit()
print(result_boston1.summary())
| doheelab/backend-study | Statistics/regression/다중공선성과 변수 선택.py | 다중공선성과 변수 선택.py | py | 5,366 | python | ko | code | 0 | github-code | 13 |
41478229173 | from flask import Flask
from flask import jsonify
import json
import requests
#from pymongo import MongoClient
import random
app = Flask(__name__)
smhi_url = 'https://opendata-download-warnings.smhi.se/api/version/2.json'
data ={"events": [{"id": 0, "name": "average wind speed at sea", "severity": "Moderate", "description": "Lördag kväll tillfälligt sydväst 14 m/s."}, {"id": 1, "name": "heavy snow SMHI-B", "description": "Lördag sent eftermiddag och kväll, i den västra och nordligaste delen, snö eller blötsnö som kan ge 1-4 cm. I övriga delar faller nederbörden mest som regn eller snöblandat regn.", "severity": "Hazardous"}]}
images = {"wind": "http://cdn.video.nationalgeographic.com/45/af/7613e67c456588dedde7d7da0fae/nw-dly-ds1702001-238-tornado-storm-chasing-vin-spd-op-p170629.jpg",
"rain": "https://d1u4oo4rb13yy8.cloudfront.net/article/71489-klgwvidznp-1508409046.jpg",
"thunder": "https://media.phillyvoice.com/media/images/05152018_lightning_stock_Pexels.2e16d0ba.fill-735x490.jpg"}
def get_smhi_data():
req = requests.get("https://opendata-download-warnings.smhi.se/api/version/2/alerts.json").json()
data = {"events": []}
id_ = 0
for alert in req["alert"]:
print(alert)
eventCode = list(filter(lambda e: e["valueName"] == "system_event_level_sv-SE", alert["info"]["eventCode"]))[0]
name = eventCode["value"]
severity = alert["info"]["severity"]
description = alert["info"]["description"]
event = random.choice(["wind", "rain", "thunder"])
image = images[event]
id_ += 1
data["events"].append({"name": name, "severity": severity, "description": description, "id": id_, "imageUrl": image})
return data
@app.route("/smhi")
def smhi():
# resp = requests.get(url=smhi_url)
# data = resp.json()
return app.response_class(response=json.dumps(get_smhi_data()), mimetype="application/json")
@app.route("/")
def hello():
return "Hello world!"
@app.route("/mongodb")
def mongo():
client = MongoClient('localhost', 27017)
# db = client.get_default_database()
db = client['mydb']
events = db['events']
events.insert_many(data['events'])
query = {'severity': 'Moderate' }
cursor = events.find(query)
ourdata = []
for doc in cursor:
ourdata += doc
return app.response_class(response=json.dumps(ourdata), mimetype="application/json") | ThisIsCodeCommunity/disaster_relief_back_end | index.py | index.py | py | 2,434 | python | en | code | 0 | github-code | 13 |
10807516381 | import platform
import sys
import numpy as np
import cv2
from rknn.api import RKNN
import torch
model = 'mnist_cnn.pt'
rknn_model = 'mnist_cnn.rknn'
input_size_list = [[1, 28, 28]]
jpg_data_path = './test.jpg'
npy_data_path = './test.npy'
dataset_path = 'dataset.txt'
def postprocess(input_data):
index = input_data.argmax()
print(' The digit number is {}, with predicted confidence as {}'.format(index, input_data[0,index]))
def prepare_data(jpg_data_path, npy_data_path, dataset_path):
img = cv2.imread(jpg_data_path)
gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray_img = gray_img.reshape(28, 28, 1) # hw --> hwc, this is important, don't miss it
np.save(npy_data_path, gray_img)
with open(dataset_path, 'w') as F:
F.write(npy_data_path)
if __name__ == '__main__':
# Prepare input data
prepare_data(jpg_data_path, npy_data_path, dataset_path)
# Default target and device_id
target = 'rv1126'
device_id = None
# Parameters check
if len(sys.argv) == 1:
print("Using default target rv1126")
elif len(sys.argv) == 2:
target = sys.argv[1]
print('Set target: {}'.format(target))
elif len(sys.argv) == 3:
target = sys.argv[1]
device_id = sys.argv[2]
print('Set target: {}, device_id: {}'.format(target, device_id))
elif len(sys.argv) > 3:
print('Too much arguments')
print('Usage: python {} [target] [device_id]'.format(sys.argv[0]))
print('Such as: python {} rv1126 c3d9b8674f4b94f6'.format(
sys.argv[0]))
exit(-1)
# Create RKNN object
rknn = RKNN()
# pre-process config
print('--> Config model')
rknn.config(mean_values=[[0.1307*255]],
std_values=[[0.3081*255]],
target_platform=[target])
print('done')
# Load Pytorch model
print('--> Loading model')
ret = rknn.load_pytorch(model=model, input_size_list=input_size_list)
if ret != 0:
print('Load Pytorch model failed!')
rknn.release()
exit(ret)
print('done')
# Build model
print('--> Building model')
ret = rknn.build(do_quantization=True, dataset=dataset_path)
if ret != 0:
print('Build model failed!')
rknn.release()
exit(ret)
print('done')
# Export RKNN model
print('--> Export RKNN model')
ret = rknn.export_rknn(rknn_model)
if ret != 0:
print('Export RKNN model failed!')
rknn.release()
exit(ret)
print('done')
# Init runtime environment
print('--> Init runtime environment')
if target.lower() == 'rk3399pro' and platform.machine() == 'aarch64':
print('Run demo on RK3399Pro, using default NPU.')
target = None
device_id = None
ret = rknn.init_runtime(target=target, device_id=device_id)
if ret != 0:
print('Init runtime environment failed')
rknn.release()
exit(ret)
print('done')
# Load data
input_data = np.load(npy_data_path)
# Inference with RKNN
print('--> Running model')
outputs = rknn.inference(inputs=[input_data])
print('done')
print('--> RKNN result')
postprocess(torch.tensor(outputs[0]))
rknn.release()
# Inference with PyTorch
pt_model = torch.jit.load(model)
pt_input_data = input_data.transpose(2,0,1) # hwc -> chw
pt_input_data = pt_input_data.reshape(1, *pt_input_data.shape) # chw -> nchw
pt_input_data = (pt_input_data/255.0 - 0.1307)/0.3081
pt_input_data = torch.tensor(pt_input_data).float()
pt_result = pt_model(pt_input_data)
print('--> PT result')
postprocess(pt_result)
| rockchip-linux/rknn-toolkit | examples/common_function_demos/single_channel_input/mnist/test.py | test.py | py | 3,673 | python | en | code | 658 | github-code | 13 |
11026482382 | from anduril import constants
from anduril.arrayio import get_array
from fasta import fasta_itr
import anduril.main
def fasta_merge(cf):
"""Merge an array of fastafiles."""
outfh = open(cf.get_output('output'), 'w')
fastafiles = get_array(cf, 'fastafiles')
cf.write_log(str(fastafiles))
for key, fastafile in fastafiles:
for rec in fasta_itr(fastafile):
outfh.write(str(rec) + "\n")
outfh.close()
return constants.OK
anduril.main(fasta_merge)
| mc-assemblage/nembase | components/FASTAMerge/fastamerge.py | fastamerge.py | py | 459 | python | en | code | 0 | github-code | 13 |
16673421757 | def take(count, iterable):
counter = 0
for item in iterable:
if counter == count:
return
counter += 1
yield item
def distinct(iterable):
seen = set()
for item in iterable:
if item in seen:
continue
yield item
seen.add(item)
def run_pipeline():
items = [3, 6, 2, 1, 1]
for item in take(3, distinct(items)):
print(item)
"""Generators are lazy and only evaluate the values they need to.
itertools is a package with a bunch of iterable generators
"""
run_pipeline()
| nickmcsimpson/python_playground | corepy/generators.py | generators.py | py | 578 | python | en | code | 0 | github-code | 13 |
2123553279 | import logging
import json
from kafka import KafkaConsumer
from kafka import KafkaProducer
from flask import Flask
app = Flask(__name__)
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
logger = logging.getLogger(__name__)
queues = {}
producer = KafkaProducer(bootstrap_servers='localhost:9092',
key_serializer=str.encode,
value_serializer=str.encode)
def kafka_recive_loop():
consumer = KafkaConsumer(bootstrap_servers=['localhost:9092'],
group_id="fl",
auto_offset_reset='latest',
key_deserializer=bytes.decode,
value_deserializer=bytes.decode)
consumer.subscribe(['worker-to-fl'])
for message in consumer:
logger.info(message)
value = json.loads(message.value)
if "IN_FL" == value['error']:
logger.error("error in fl do nothing")
else:
response = {"install": "installed",
"validate": "validated"}
producer.send('fl-to-worker', key=message.key,
value='{} {}'.format(response.get(value.get('command'), ""),
value['ruleid']))
if __name__ == "__main__":
logger.info('Start kafka loop...')
kafka_recive_loop()
| chrisatnoviflow/open-kilda | docs/design/hub-and-spoke/v7/poc/nb&fl/fl.py | fl.py | py | 1,395 | python | en | code | null | github-code | 13 |
12345036805 | """
Given a string consisting of lowercase letters
count the number of non-increasing and non-decreasing sequences in the input
Sample Input :gfcbdhdd
Output :3(gfcb | dh | dd)
Approach :)
At start we consider the sequence type as none and will define the type of
sequence(whether it’s non-increasing or non-decreasing)based on first two
characters and for further characters we can define the sequence based on
previous character we’ve seen.
We define sequence types using numbers as
increasing sequence with 1
decreasing sequence with -1
same character with 0
while traversing through the string if we found that the current character is
not obeying the previous characters sequence then make the sequence type
as none and check for the sequence type from current character onwards.
We will increase the number of sequence types whenever sequence becomes none.
"""
def getParts(s):
if(not s):
return 0
count = 0
seq = None
index = 0
n = len(s)
prev = ''
curr = ''
while index < n:
if(seq is None):
#Getting the new sequence type and
#increasing the count
count += 1
prev = s[index]
index += 1
if(index >= n):
return count
curr = s[index]
if(ord(curr) > ord(prev)):
seq = 1
elif(ord(curr) == ord(prev)):
seq = 0
else:
seq = -1
curr = s[index]
if(ord(curr) >= ord(prev) and (seq == 1 or seq == 0)):
if(ord(curr) > ord(prev)):
seq = 1
prev = curr
index += 1
elif(ord(curr) <= ord(prev) and (seq == -1 or seq == 0)):
if(ord(curr) < ord(prev)):
seq = -1
prev = curr
index += 1
else:
seq = None
return count
#Driver Code for tesing
print("gfcbdhdd", getParts("gfcbdhdd"))
print("ffdhbbbdeeggbb", getParts('ffdhbbbdeeggbb'))
print("aabccad", getParts("aabccad"))
| souravs17031999/100dayscodingchallenge | strings/count_non_increasing_non_decreasing_sequences.py | count_non_increasing_non_decreasing_sequences.py | py | 2,061 | python | en | code | 43 | github-code | 13 |
17785456993 | import os
import matplotlib.pyplot
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.datasets import mnist
import tensorflow_datasets as tfds
(ds_train, ds_test), ds_info = tfds.load(
"mnist",
split=["train", "test"],
shuffle_files=True,
as_supervised=True, # to return a tuple
with_info=True
)
def normalize_img(image, label):
# normalize images
return tf.cast(image, tf.float32)/255.0, label
AUTOTUNE = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 64
ds_train = ds_train.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_train = ds_train.cache()
ds_train = ds_train.shuffle(ds_info.splits["train"].num_examples)
ds_train = ds_train.batch(BATCH_SIZE)
ds_train = ds_train.prefetch(AUTOTUNE)
ds_test = ds_test.map(normalize_img, num_parallel_calls=AUTOTUNE)
ds_test = ds_test.batch(128)
ds_test = ds_test.prefetch(AUTOTUNE)
model = keras.Sequential([
keras.Input((28, 28, 1)),
layers.Conv2D(32, 3, activation='relu'),
layers.Flatten(),
layers.Dense(10),
])
model.compile(
loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),
optimizer=keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
model.fit(ds_train, epochs=5)
model.evaluate(ds_test)
| iampaigedlamini/TensorFlow | TensorFlow_datasets.py | TensorFlow_datasets.py | py | 1,325 | python | en | code | 0 | github-code | 13 |
32808079501 |
from setuptools import setup
from codecs import open
from os import path
import argparse
from distutils.extension import Extension
import numpy as np
# If building on OSX, may need to do this first:
# export CFLAGS="-I/Users/nielsond/miniconda3/lib/python3.6/site-packages/numpy/core/include $CFLAGS"
DEBUG = False
USE_CYTHON = True
if USE_CYTHON:
from Cython.Build import cythonize
from Cython.Compiler.Options import get_directive_defaults
if USE_CYTHON and DEBUG:
print("Using Cython with Debug Flags")
extensions = [
Extension('meld.cluster_topdown', ['meld/cluster_topdown.pyx'], define_macros=[('CYTHON_TRACE', '1')]),
Extension('meld.tfce',
['meld/tfce.pyx'],
define_macros=[('CYTHON_TRACE', '1')],
include_dirs=[np.get_include()]),
Extension('meld.pycluster', ['meld/pycluster.pyx'])
]
directive_defaults = get_directive_defaults()
directive_defaults['linetrace'] = True
directive_defaults['binding'] = True
extensions = cythonize(extensions, gdb_debug=True)
elif USE_CYTHON:
print("Using Cython")
extensions = [
Extension('meld.cluster_topdown', ['meld/cluster_topdown.pyx']),
Extension('meld.tfce',
['meld/tfce.pyx'],
include_dirs=[np.get_include()]),
Extension('meld.pycluster', ['meld/pycluster.pyx'])
]
extensions = cythonize(extensions)
else:
extensions = [
Extension('meld.cluster_topdown', ['meld/cluster_topdown.c']),
Extension('meld.tfce',
['meld/tfce.cpp'],
include_dirs=[np.get_include()]),
Extension('meld.pycluster', ['meld/pycluster.cpp'])
]
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='meld',
version='0.1.0',
description='Mixed effects models for large datasets',
long_description=long_description,
packages=['meld'],
package_dir={"meld": "meld"},
package_data={'meld': ['*.pxd']},
author=['Per B. Sederberg', 'Dylan M. Nielson'],
maintainer=['Per B. Sederberg', 'Dylan M. Nielson'],
maintainer_email=['psederberg@gmail.com', 'dylan.nielson@gmail.com'],
url=['http://github.com/compmem/meld'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Mathematics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.5',
],
keywords='mixed effects models rpy2',
install_requires=['numpy', 'scipy', 'rpy2', 'joblib', 'jinja2', 'pandas'],
ext_modules=extensions,
zip_safe=False
)
| compmem/MELD | setup.py | setup.py | py | 3,295 | python | en | code | 3 | github-code | 13 |
73126796498 | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class account_journal(models.Model):
_inherit = "account.journal"
deferred_check = fields.Boolean('Chèque différé', help="Cocher cette case si c'est un journal pour les chèques différé dans le pos")
check= fields.Boolean('Chèque', help="Cocher cette case si c'est un journal pour les chèques dans le pos")
check_kdo= fields.Boolean('Chèque KDO', help="Cocher cette case si c'est un journal pour les chèques KDO dans le pos")
avoir_journal = fields.Boolean('Journal d\'avoir', help='Cocher cette case pour considérer ce journal pour les avoir')
type = fields.Selection(selection_add=[('avoir_type', 'Avoir')], ondelete={'avoir_type': 'cascade'})
@api.onchange('type')
def on_change_type_user(self):
self.deferred_check = False
@api.onchange('type')
def on_change_type_user_journal(self):
self.check = False
@api.onchange('type')
def on_change_type_journal_kdo(self):
self.check_kdo = False
@api.onchange('type')
def onchange_type_general(self):
self.avoir_journal = False | hilinares1/MADEMO | tit_pos_paiement/models/account_journal.py | account_journal.py | py | 1,099 | python | fr | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.