index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
22,300 | f1194c84475b4499c6cb95a033362dfff761611c | # -*- coding: utf-8 -*-
from __future__ import division
import os,sys,datetime,pickle
import requests, json
import bs4 as BeautifulSoup
class BaseCollection:
def __init__(self):
self.items = {}
def clear(self):
self.items.clear()
def count(self):
return len(self.items.key())
def countItem(self,column):
return len(self.items[column])
def find(self,column):
if self.items.has_key(column):
return self.items[column]
return None
def iterItems(self):
return self.items.iterItems()
class StockCodeItem:
def __init__(self,market_type,code,full_code,company):
self.market_type = market_type
self.code = code
self.full_code = full_code
self.company = company
class StockCode:
def __init__(self):
self.items = {}
def count(self):
return len(self.items)
def clear(self):
self.items.clear()
def add(self,market_type,code,full_code,company):
a_item = StockCodeItem(market_type,code,full_code,company)
self.items[code] = a_item
def remove(self,stock_code):
del self.items[stock_code]
def find(self,stock_code):
return self.items[stock_code]
def iterItems(self):
return self.items.iteritems()
def dump(self):
index = 0
for key,value in self.items.iteritems():
print "%s : %s - Code=%s, Full Code=%s, Company=%s" % (index, value.market_type, key, value.full_code, value.company)
index += 1
class PortfolioItem:
def __init__(self,index,column,code,company):
self.index = index
self.column = column
self.code = code
self.company = company
self.df = None
self.score = 0
def setData(self,df):
self.df = df
class Portfolio(BaseCollection):
def findCode(self,model,code):
model = self.find(model)
if model is None:
return None
for a_item in self.items[model]:
if a_item.code == code:
return a_item
return None
def add(self,column,model,code,company):
portfolio = self.find(model)
if portfolio is None:
self.items[model] = []
#if self.findCode(model,code) is not None:
# return
a_item = PortfolioItem(self.countItem(model),column,code,company)
self.items[model].append( a_item )
def makeUniverse(self,column,model,stock_dict):
for key in stock_dict.keys():
self.add(column,model,key,stock_dict[key])
def dump(self):
print ">>> Portfolio.dump <<<"
for key in self.items.keys():
print "- model=%s" % (key)
for a_item in self.items[key]:
print "... column=%s : index=%s, code=%s, company=%s" % (a_item.column,a_item.index,a_item.code,a_item.company)
print "--- Done ---"
class TradeItem:
def __init__(self,model,code,row_index,position):
self.model = model
self.code = code
#self.trade_date = trade_date
self.row_index = row_index
self.position = position
|
22,301 | a003febcdf20a008c52f7b183a743f6266d15efb | """
"""
from __future__ import absolute_import
import logging
import sys
import os
from PyQt4 import QtCore, QtGui, uic
import praxes
from .ui import resources
from .phynx import FileModel, FileView, ExportRawCSV, ExportCorrectedCSV
from praxes.io import phynx
#logger = logging.getLogger(__file__)
class MainWindow(QtGui.QMainWindow):
"""
"""
def __init__(self, log_level=logging.CRITICAL, parent=None):
super(MainWindow, self).__init__(parent)
uic.loadUi(resources['mainwindow.ui'], self)
self.setCorner(QtCore.Qt.TopLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.BottomLeftCorner, QtCore.Qt.LeftDockWidgetArea)
self.setCorner(QtCore.Qt.TopRightCorner, QtCore.Qt.RightDockWidgetArea)
self.setCorner(QtCore.Qt.BottomRightCorner, QtCore.Qt.BottomDockWidgetArea)
self.setDockNestingEnabled(True)
self._specFileRegistry = {}
self.fileModel = FileModel(self)
self.fileView = FileView(self.fileModel, self)
self.setCentralWidget(self.fileView)
# TODO: will acquisition work on other platforms?
if sys.platform != 'linux2':
self.menuAcquisition.setEnabled(False)
self.expInterface = None
self.statusBar.showMessage('Ready', 2000)
self._currentItem = None
self._toolActions = {}
self._setupToolActions()
settings = QtCore.QSettings()
settings.beginGroup('MainWindow')
self.restoreGeometry(settings.value('Geometry').toByteArray())
self.restoreState(settings.value('State').toByteArray())
import praxes
# TODO: this should be a factory function, not a method of the main win:
praxes.application.registerService('ScanView', self.newScanWindow)
praxes.application.registerService('FileInterface', self)
def _createToolAction(
self, name, target, helptext=None, icon=None
):
assert hasattr(target, 'offersService')
action = QtGui.QAction(name, self)
action.setVisible(False)
self._toolActions[action] = target
action.triggered.connect(self.toolActionTriggered)
return action
def _setupToolActions(self):
try:
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
self.menuTools.addAction(
self._createToolAction("Analyze MCA", McaAnalysisWindow)
)
except ImportError:
pass
self.menuExport.addAction(
self._createToolAction("Raw data", ExportRawCSV)
)
self.menuExport.addAction(
self._createToolAction("Corrected data", ExportCorrectedCSV)
)
@QtCore.pyqtSignature("")
def on_actionAboutQt_triggered(self):
QtGui.qApp.aboutQt()
@QtCore.pyqtSignature("")
def on_actionAboutPraxes_triggered(self):
QtGui.QMessageBox.about(self, self.tr("About Praxes"),
self.tr("Praxes Application, version %s\n\n"
"Praxes is a user interface for controlling synchrotron "
"experiments and analyzing data.\n\n"
"Praxes depends on several programs and libraries:\n\n"
" spec: for controlling hardware and data acquisition\n"
" SpecClient: a python interface to the spec server\n"
" PyMca: a set of programs and libraries for analyzing "
"X-ray fluorescence spectra"%praxes.__version__))
@QtCore.pyqtSignature("")
def on_actionImportSpecFile_triggered(self, force=False):
f = '%s'% QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"Spec datafiles (*.dat *.mca);;All files (*)"
)
if f:
while 1:
h5_filename = str(
QtGui.QFileDialog.getSaveFileName(
self,
'Save HDF5 File',
os.path.join(os.getcwd(), f+'.h5'),
'HDF5 files (*.h5 *.hdf5 *.hdf *.nxs)'
)
)
if h5_filename and os.path.isfile(h5_filename):
res = QtGui.QMessageBox.question(
self,
'overwrite?',
'Do you want to overwrite the existing file?',
QtGui.QMessageBox.Yes,
QtGui.QMessageBox.No
)
if res == QtGui.QMessageBox.Yes:
os.remove(h5_filename)
else:
continue
break
if h5_filename:
self.statusBar.showMessage('Converting spec data...')
#QtGui.qApp.processEvents()
from praxes.io.phynx.migration.spec import convert_to_phynx
h5file = convert_to_phynx(
f, h5_filename=h5_filename, force=True, report=True
)
h5file.close()
self.statusBar.clearMessage()
self.openFile(h5_filename)
@QtCore.pyqtSignature("")
def on_menuTools_aboutToShow(self):
index = self.fileView.currentIndex()
self._currentItem = self.fileModel.getNodeFromIndex(index)
if self._currentItem is not None:
for action, tool in self._toolActions.items():
action.setVisible(tool.offersService(self._currentItem))
@QtCore.pyqtSignature("")
def on_actionOffline_triggered(self):
if self.expInterface is None: return
if self.expInterface.name == 'spec':
self.connectToSpec(False)
@QtCore.pyqtSignature("")
def on_actionOpen_triggered(self):
self.openFile()
@QtCore.pyqtSignature("bool")
def on_actionSpec_toggled(self, bool):
self.connectToSpec(bool)
def connectToSpec(self, bool):
if bool:
from praxes.instrumentation.spec.specinterface import ConnectionAborted
try:
from praxes.instrumentation.spec.specinterface import SpecInterface
self.expInterface = SpecInterface(self)
except ConnectionAborted:
return
if self.expInterface:
self.actionConfigure.setEnabled(True)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.menuView.addAction(action)
self.addDockWidget(area, item)
else:
self.actionOffline.setChecked(True)
else:
if self.expInterface:
self.actionConfigure.setEnabled(False)
for key, (item, area, action) in \
self.expInterface.dockWidgets.items():
self.removeDockWidget(item)
self.menuView.removeAction(action)
self.expInterface.close()
self.expInterface = None
def closeEvent(self, event):
for view in praxes.application.openViews:
view.close()
if praxes.application.openViews:
event.ignore()
return
self.connectToSpec(False)
settings = QtCore.QSettings()
settings.beginGroup("MainWindow")
settings.setValue('Geometry', QtCore.QVariant(self.saveGeometry()))
settings.setValue('State', QtCore.QVariant(self.saveState()))
#self.fileModel.close()
return event.accept()
def getH5FileFromKey(self, key):
h5File = self._specFileRegistry.get(key, None)
if not h5File:
default = key + '.h5'
h5File = self.saveFile(default)
if h5File:
self._specFileRegistry[key] = h5File
return h5File
## TODO: The following two methods needs to be generalized
## given a scan, offer analyses options
def getScanView(self, scan):
# this is a shortcut for now, in the future the view would be
# an overview of the entry with ability to open different analyses
if isinstance(scan, phynx.registry['Entry']):
from ..fluorescence.mcaanalysiswindow import McaAnalysisWindow
if len(scan['measurement'].mcas) > 0:
return McaAnalysisWindow(scan, self)
else:
msg = QtGui.QErrorMessage(self)
msg.showMessage(
'The entry you selected has no MCA data to process'
)
def newScanWindow(self, scan):
self.statusBar.showMessage('Configuring New Analysis Window ...')
scanView = self.getScanView(scan)
if scanView is None:
self.statusBar.clearMessage()
return
scanView.show()
self.statusBar.clearMessage()
return scanView
def openFile(self, filename=None):
if filename is None:
filename = QtGui.QFileDialog.getOpenFileName(
self,
'Open File',
os.getcwd(),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if filename:
self.fileModel.openFile(str(filename))
def saveFile(self, filename=None):
if os.path.isfile(filename):
return self.fileModel.openFile(filename)
else:
newfilename = QtGui.QFileDialog.getSaveFileName(
self,
"Save File",
os.path.join(os.getcwd(), filename),
"hdf5 files (*.h5 *.hdf5 *.hdf *.nxs)"
)
if newfilename:
newfilename = str(newfilename)
if os.path.splitext(newfilename)[-1] not in (
'.h5', '.hdf5', '.hdf', '.nxs'
):
newfilename = newfilename + '.h5'
return self.fileModel.openFile(newfilename)
def toolActionTriggered(self):
self.statusBar.showMessage('Configuring...')
action = self.sender()
if action is not None and isinstance(action, QtGui.QAction):
tool = self._toolActions[action](self._currentItem, self)
if isinstance(tool, QtGui.QWidget):
tool.show()
self.statusBar.clearMessage()
#def main():
# import sys
# from .application import PraxesApplication
#
# app = PraxesApplication(sys.argv)
# app.setOrganizationName('Praxes')
# app.setApplicationName('praxes')
# mainwindow = MainWindow()
# mainwindow.show()
#
# sys.exit(app.exec_())
#
#
#if __name__ == "__main__":
# main()
|
22,302 | b715f5fe81844621f3db8cab6a3ff8d6f63ff852 | #!/usr/bin/env python
import roslib; roslib.load_manifest('c3_trajectory_generator')
import rospy
import actionlib
import tf
from tf import transformations
from std_msgs.msg import Header
from visualization_msgs.msg import InteractiveMarker, InteractiveMarkerControl, MenuEntry
from interactive_markers.interactive_marker_server import InteractiveMarkerServer
from geometry_msgs.msg import Quaternion, Twist, Pose, Point
from uf_common.msg import MoveToAction, MoveToGoal, PoseTwist
from kill_handling.listener import KillListener
CENTER_TO_VEHICLE = 1
SET_DEPTH = 9
SET_DEPTH_VAL = 10
depth_vals = [0.3, 0.5, 1, 1.5, 2, 3, 4]
class Node(object):
def __init__(self):
self._tf_listener = tf.TransformListener()
self._server = InteractiveMarkerServer("moveto_marker")
self._move_client = actionlib.SimpleActionClient('moveto', MoveToAction)
self._move_client.wait_for_server()
self._kill_listener = KillListener(self.on_killed)
self._marker_pose = None
self._marker_pose_time = None
self._marker_pose_sent = False
self.setup_markers()
def setup_markers(self):
while not rospy.is_shutdown():
try:
self._tf_listener.waitForTransform('map', 'base_link',
rospy.Time(0), rospy.Duration(1))
trans, rot = self._tf_listener.lookupTransform('map', 'base_link',
rospy.Time(0))
break
except tf.Exception as ex:
print ex
continue
self._marker_pose = Pose(position=Point(*trans),
orientation=Quaternion(*rot))
int_marker = InteractiveMarker()
int_marker.header.frame_id = "/map"
int_marker.name = "moveto"
int_marker.description = "moveto"
int_marker.pose = self._marker_pose
for axis, dir in [('x', (1, 0, 0)),
('y', (0, 1, 0)),
('z', (0, 0, 1))]:
control = InteractiveMarkerControl()
control.name = axis
control.orientation = Quaternion(*(dir + (1,))) # w must be 1?
control.interaction_mode = InteractiveMarkerControl.MOVE_AXIS
int_marker.controls.append(control);
yaw_control = InteractiveMarkerControl()
yaw_control.name = "yaw"
yaw_control.orientation = Quaternion(0, 1, 0, 1)
yaw_control.interaction_mode = InteractiveMarkerControl.ROTATE_AXIS
int_marker.controls.append(yaw_control);
reset_menu = MenuEntry()
reset_menu.id = CENTER_TO_VEHICLE
reset_menu.title = 'Center to vehicle'
int_marker.menu_entries.append(reset_menu)
depth_menu = MenuEntry()
depth_menu.id = SET_DEPTH
depth_menu.title = 'Set depth'
int_marker.menu_entries.append(depth_menu)
for i, depth in enumerate(depth_vals):
menu = MenuEntry()
menu.id = SET_DEPTH_VAL + i
menu.parent_id = SET_DEPTH
menu.title = '%.1f m' % depth
int_marker.menu_entries.append(menu)
self._server.insert(int_marker, self.on_feedback)
self._server.applyChanges()
def on_feedback(self, feedback):
applyChanges = True
if feedback.menu_entry_id == CENTER_TO_VEHICLE:
trans, rot = self._tf_listener.lookupTransform('map', 'base_link',
rospy.Time(0))
yaw = transformations.euler_from_quaternion(rot)[2]
self._marker_pose = Pose(position=Point(*trans),
orientation=Quaternion(*transformations.quaternion_from_euler(0, 0, yaw)))
elif feedback.menu_entry_id >= SET_DEPTH_VAL:
self._marker_pose.position.z = -depth_vals[feedback.menu_entry_id - SET_DEPTH_VAL]
else:
self._marker_pose = feedback.pose
applyChanges = False # Don't need to apply feedback back to rviz
if not self._kill_listener.get_killed():
self._marker_pose_time = rospy.Time.now()
self._marker_pose_sent = False
if applyChanges:
self._server.setPose('moveto', self._marker_pose)
self._server.applyChanges()
def run(self):
while not rospy.is_shutdown():
rospy.sleep(.2)
if self._marker_pose_time is None:
continue
if self._marker_pose_sent:
continue
if rospy.Time.now() - self._marker_pose_time > rospy.Duration(.5):
self.send_goal()
def on_killed(self):
self._move_client.cancel_goal()
def send_goal(self):
self._move_client.send_goal(MoveToGoal(
header=Header(frame_id='/map'),
posetwist=PoseTwist(
pose=self._marker_pose)))
self._marker_pose_sent = True
if __name__ == "__main__":
rospy.init_node("interactive_marker_moveto")
node = Node()
node.run()
|
22,303 | c8dc1428b0ac3890e66adecdf57a0088368332c4 | import pandas as pd
import numpy as np
from sklearn.neighbors import NearestNeighbors
from scipy.sparse import csr_matrix
from . models import Book
class BookSuggestionEngine():
def __init__(self,book_name):
self.book_name = book_name
self.books = "recommender/BX-Books.csv"
self.ratings = "recommender/BX-Book-Ratings.csv"
def prepare_data(self):
df_books = pd.read_csv(
self.books,
encoding = "ISO-8859-1",
sep=";",
header=0,
names=['isbn', 'title', 'author'],
usecols=['isbn', 'title', 'author'],
dtype={'isbn': 'str', 'title': 'str', 'author': 'str'})
df_ratings = pd.read_csv(
self.ratings,
encoding = "ISO-8859-1",
sep=";",
header=0,
names=['user', 'isbn', 'rating'],
usecols=['user', 'isbn', 'rating'],
dtype={'user': 'int32', 'isbn': 'str', 'rating': 'float32'})
df_cleaned_ratings = df_ratings[df_ratings.rating != 0]
#select users which gives more than x ratings
rating_treshold = 30
# mask
valid_users = df_cleaned_ratings["user"].value_counts() >= rating_treshold
# Create index for DataFrame from valid users
# Construct dataset with valid users which gives more than 100 ratings
# Merge Books and Ratings df to get clean data
user_index = valid_users[valid_users].index
df_cleaned_ratings = df_cleaned_ratings[df_cleaned_ratings["user"].isin(user_index)]
clean_dataset = df_cleaned_ratings.merge(df_books, on="isbn")
# get rating counts for every title from all valid users and reset index
# rename rating columns to rating counts
# get DataFrame with rating counts for every Book
# get books with more than x rating counts
# drop duplicate rating by same user
count_rating = clean_dataset.groupby('title')['rating'].count().reset_index()
count_rating.rename(columns={"rating":"rating_counts"}, inplace=True)
final_dataset = count_rating.merge(clean_dataset, on="title")
mask_ratings = final_dataset["rating_counts"] >= 10
final_dataset = final_dataset[mask_ratings].reset_index(drop=True)
final_dataset.drop_duplicates(["user","title"])
# contruct pivot table for recommendation engine
pivot = final_dataset.pivot_table(index="title",columns="user",values="rating")
pivot.fillna(0,inplace=True)
book_titles = pivot.index.tolist()
row_index = book_titles.index(self.book_name)
book_sparse = csr_matrix(pivot)
model = NearestNeighbors(metric = 'cosine', algorithm='auto',n_neighbors=5)
model.fit(book_sparse)
distances, suggestions = model.kneighbors(pivot.iloc[row_index, :].values.reshape(1, -1))
book_titles = []
for i in range(len(suggestions)):
book_titles.append(pivot.index[suggestions[i]])
print(pivot.index[suggestions[i]])
return book_titles
"""
if __name__ == '__main__':
like_book_1 = 'Dark Justice' ## méně než 10 hodnocení
like_book_2 = "Where the Heart Is (Oprah's Book Club (Paperback))"
like_book_3 = 'The Queen of the Damned (Vampire Chronicles (Paperback))'
like_book_4 = 'The Fellowship of The Ring (the lord of the rings, part 1)'
like_book_5 = "The Fellowship of the Ring (The Lord of the Rings, Part 1)"
like_book_6 = "Harry Potter and the Sorcerer's Stone (Book 1)"
a = BookSuggestionEngine(like_book_2)
print(a)
b = a.prepare_data()
print(b)
c = a.book_suggestions(b)
print(c)
"""
"""
if __name__ == '__main__':
qs = Book.objects.all()
df = qs.to_dataframe()
print(df.head())
""" |
22,304 | 44f1ed3efe8f89094d72ff33b6c1013afeb84caa | from django.test import TestCase
from .models import Hydrostn30Subbasin
# Create your tests here.
class SubbasinTestCase(TestCase):
databases = ['argentina_01min']
def test_query_subbasin(self):
#Test can query Subbasin model (maps to database view)
self.assertEqual(Hydrostn30Subbasin.objects.count(), 48077) |
22,305 | 6a6a991c7b424f5d635f37b2b1c6c7f8ca5d6200 | from django.shortcuts import render, redirect
from django.urls import reverse
from ..forms import AddressForm
def address_details(request):
initial = request.session.get('address', None)
if request.GET.get('clear') == 'true':
initial = []
form = AddressForm(request.POST or None, initial=initial)
if request.method == 'POST':
if form.is_valid():
for item in form.cleaned_data:
request.session['cleaned_address'] = form.cleaned_data
return redirect(reverse('draft-letter'))
else:
return redirect('/')
return render(request, 'orderform/address_details.html', {'form': form })
|
22,306 | 8421a9077d8ae9c56d1dada237298c200ecdb9bc | import ecs
import ecs_actions
|
22,307 | 5366a83f93ab551348e1c0b130870d3be76fea60 | #! /usr/bin/env python3
# Dependencies:
# pip install ahk keyboard
import os
import pprint as pretty
import sys
from ahk import AHK
from ahk.window import Window
import keyboard
import logging
logging.basicConfig(
level=logging.INFO,
filename='c:/logs/ahk.log',
filemode='w')
def print_doc(obj):
for i in dir(obj):
if i.startswith('_'):
continue
print(i)
ahk = AHK(executable_path=os.path.expandvars('$USERPROFILE/scoop/apps/autohotkey/current/AutoHotkeyU64.exe'))
# win = ahk.active_window # get the active window
# print(win)
# win = ahk.win_get('slack.exe') # by title
# win = ahk.win_get('slack.exe') # by title
# print(win)
# win = list(ahk.windows()) # list of all windows
# print(win)
# win = ahk.Window(ahk, ahk_id='0xabc123') # by ahk_id
# print(win)
# win = ahk.Window.from_mouse_position(ahk) # a window under the mouse cursor
# print(win)
# win = ahk.Window.from_pid('20366') # by process ID
# print_doc(win)
def conv(text):
# text = text.decode('ascii')
return text
def title_contains(title_substring):
"""Partial match a title against a substring.
title_contains(string) -> function
"""
title_substring = title_substring.encode('ascii')
def f(win):
t = conv(win.title)
return title_substring in t
return f
def window_class_match(expected_class):
"""Exact match a window against an AHK class.
window_class_match(string) -> function
"""
# AHK has byte strings and I can't manage to convert them to str, so go the
# other way.
expected_class = expected_class.encode('ascii')
def f(win):
n = conv(win.class_name)
return expected_class in n
return f
def exe_match(expected_name):
"""Exact match a window against an executable name.
exe_match(string) -> function
"""
# expected_name = expected_name.encode('ascii')
def f(win):
n = conv(win.process_name)
return n == expected_name
return f
# win = ahk.find_window(title_contains("Slack"))
# print(win.rect)
# # I don't see any way to get the number of monitors or their sizes.
# win.rect = (70, 124, 1140, 789)
# for win in ahk.windows():
# print(f"{win.title} {win.engine} {win.id} {win.text}")
def move_and_restore(win_filter_fn, xywh):
"""Move window to x,y and resize to w,h.
Returns True if the window was found.
move_and_restore(string, [int*4]) -> bool
"""
x, y, w, h = xywh[0], xywh[1], xywh[2], xywh[3]
win = ahk.find_window(win_filter_fn)
if win:
win.restore()
win.move(x, y, w, h)
return win is not None
class Monitor(object):
def __init__(self, index, x,y, w, h):
self.index = index
# X is slightly off
self.x = x - 10
self.y = y
self.width = w
self.height = h
def __str__(self):
return "Monitor[{}] at ({},{}) dimensions ({},{})".format(self.index, self.x, self.y, self.width, self.height)
def topright(self, w,h):
fudge = 0
if self.index == 0:
# avoid touching screen edge or y-position is wrong (probably due
# to mismatched monitor sizes).
fudge = 10
x = self.x + self.width - w - fudge
y = self.y
return [x,y, w,h]
def botright(self, w,h):
x = self.x + self.width - w
y = self.y + self.height - h
return [x,y, w,h]
def botleft(self, w,h):
x = self.x
y = self.y + self.height - h
return [x,y, w,h]
def topleft(self, w,h):
x = self.x
y = self.y
return [x,y, w,h]
def get_monitor_layout():
# This returns incorrect numbers. Sometimes height is 0. Positions are off.
# Maybe because of text scaling?
# maximized window on left monitor (next to task bar)
# x: -2989 y: 282 w: 3000 h: 1750
# import win32api
# return [Monitor(index, *dimensions) for index,(h1,h2,dimensions) in enumerate(win32api.EnumDisplayMonitors())]
return [
# width doesn't include task bar and is shrunk until it doesn't overlap with next monitor.
Monitor(0, -2978, 282, 3005 - 10, 1750 - 5),
Monitor(1, 1, -11, 3862, 2182),
]
def organize_desktop():
"""Layout my desktop.
organize_desktop() -> None
"""
logging.info('organize_desktop')
monitor = get_monitor_layout()
# pretty.pprint([str(m) for m in monitor])
avoid_right_monitor = len(monitor) <= 2
# Lay out windows for my three monitors with centre as the work machine.
# Roughly in order of left-to-right appearance.
left_slack_width = monitor[0].width * 0.5
move_and_restore(exe_match("slack.exe"), monitor[0].topleft(left_slack_width, monitor[0].height))
move_and_restore(window_class_match("Vim"), monitor[1].topleft(monitor[1].width//2, monitor[1].height))
# Game and log go here (but they position themselves).
if avoid_right_monitor:
move_and_restore(exe_match("chrome.exe"), monitor[0].topright(monitor[0].width - left_slack_width, monitor[0].height))
# Using Chrome size on terminal doesn't produce the same size window?
# move_and_restore(exe_match("ubuntu.exe"), monitor[0].x + left_slack_width, monitor[0].y, monitor[0].width - left_slack_width, monitor[0].height - 200)
move_and_restore(exe_match("ubuntu.exe"), monitor[0].topright(1419, monitor[0].height-50))
else:
move_and_restore(exe_match("chrome.exe"), monitor[2].topleft(974, 1080))
move_and_restore(exe_match("ubuntu.exe"), monitor[2].topright(974, 1087))
# Tortoise has lots of windows and they all have the same ahk_exe
# (TortoiseProc.exe) and ahk_class (#32770). We could do try to match on
# text inside the window, but the title should be pretty consistent so use
# that instead.
if avoid_right_monitor:
move_and_restore(title_contains("Working Copy - TortoiseSVN"), monitor[0].botright(1395,722))
else:
move_and_restore(title_contains("Working Copy - TortoiseSVN", monitor[2].botright(974, 605)))
def shim(fn):
try:
fn()
except Exception as e:
raise e
logging.info('Starting...')
# suppress=True seems to work better, but prevents any shortcut with the
# windows key from working. Seems like this just stops working after some time.
keyboard.add_hotkey('windows+f12', organize_desktop, suppress=False)
keyboard.add_hotkey('windows+ctrl+f11', organize_desktop, suppress=False)
run_once = 'run-once' in sys.argv
run_loop = not run_once
logging.info(f'run_once: {run_once}')
try:
# Wait for hotkeys to get hit.
while run_loop:
keyboard.wait()
logging.info('Done waiting')
if not run_loop:
organize_desktop()
except Exception as e:
logging.exception(str(e))
raise e
logging.info('Exiting')
|
22,308 | 46e9bd44e4b7cda72b39099207cd1e35e400f15c | """
https://leetcode.com/problems/set-matrix-zeroes/
Runtime: 144 ms, faster than 92.22% of Python3 online submissions for Set Matrix Zeroes.
Memory Usage: 13.3 MB, less than 97.44% of Python3 online submissions for Set Matrix Zeroes.
"""
class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
row = len(matrix)
column = len(matrix[0])
for i in range(row):
for j in range(column):
if matrix[i][j] == 0:
for r in range(row):
if matrix[r][j] != 0:
matrix[r][j] = 'c'
for c in range(column):
if matrix[i][c] != 0:
matrix[i][c] = 'c'
for i in range(row):
for j in range(column):
if matrix[i][j] == 'c':
matrix[i][j] = 0
|
22,309 | 897c21dd1a85ee6123e627c30254794e3429f107 | #!/usr/bin/python
#-*-coding:utf-8-*-
import sys
import re
e = re.compile(r"\([^一-龠ぁ-んァ-ヴ]{3,6}\)")
for line in open(sys.argv[1]):
emoji = e.search(line)
if emoji is None:
pass
else:
print emoji.group(0)
|
22,310 | a217c1f7c0522edeb5054f2c804fabd1939584e6 | import pandas as pd
from functools import partial
import colorama
import sys, math
filenames = sys.argv[1:]
if len(filenames) == 0:
print('please specify some CSVs')
raise SystemExit
def red_print(s):
print('%s%s%s' % (colorama.Fore.RED, s, colorama.Fore.RESET))
def maybe_red_print(s, b):
print('%s%s%s' % (
'' if b else colorama.Fore.RED,
s,
'' if b else colorama.Fore.RESET
))
def is_growing(series):
x = -math.inf
for n in series:
if x >= n:
return False
return True
def is_interval(xs, interval):
result = True
last_x = xs[0]
for row_index, x in enumerate(xs[1:]):
expected_value = last_x + interval
if expected_value != x:
red_print('expected #%d to have value %d but it had %d instead' % (row_index, expected_value, x))
result = False
last_x = x
return result
def series_is_float_str(xs):
result = True
for row_index, x in enumerate(xs):
try:
float(x)
except ValueError:
red_print('series contains non-floatish string "%s" at row %d' % (x, row_index))
result = False
return result
def handle_filename(filename):
result = True
candles_df = pd.read_csv(filename)
# check if Timestamp column is growing
timestamp_series = candles_df['Timestamp']
success = is_growing(timestamp_series)
maybe_red_print('timestamp series is%s growing' % ('' if success else ' not'), success)
if not success: result = False
success = is_interval(timestamp_series, 60)
maybe_red_print('timestamp series is%s on an interval of 60 seconds' % ('' if success else ' not'), success)
if not success: result = False
for column in ['Open', 'High', 'Low', 'Close', 'Volume_(BTC)', 'Volume_(Currency)', 'Weighted_Price']:
success = series_is_float_str(candles_df[column])
maybe_red_print('%s series is%s 100%% floatish strings' % (column, '' if success else ' not'), success)
if not success: result = False
return success
for filename in filenames:
success = handle_filename(filename)
maybe_red_print('%s: %s' % (filename, 'passed all tests' if success else 'failed some test(s)'), success)
print()
|
22,311 | c7b378fd3b3e18634829fa57b88b03878818f81b | import streamlit
import pandas
import requests
import snowflake.connector
streamlit.title("My Parents New Healthy Dinner")
streamlit.header('Breakfast Menu')
streamlit.text('Omega 3 & Blueberry Oatmeal')
streamlit.text('Kale, Spinach & Rocket Smoothie')
streamlit.text('Hard-Boiled Free-Range Egg')
streamlit.header('🍌🥭 Build Your Own Fruit Smoothie 🥝🍇')
my_fruit_list = pandas.read_csv("https://uni-lab-files.s3.us-west-2.amazonaws.com/dabw/fruit_macros.txt")
my_fruit_list = my_fruit_list.set_index('Fruit')
fruites_selected = streamlit.multiselect("Pick some fruits : ", list(my_fruit_list.index), ['Avocado', 'Strawberries'])
fruites_to_show = my_fruit_list.loc[fruites_selected]
streamlit.dataframe(fruites_to_show)
streamlit.header('FruityVice Fruit Advice')
user_input = streamlit.text_input('Choose a fruit ?', 'Kiwi')
resp = requests.get('https://www.fruityvice.com/api/fruit/' + user_input)
resp_norm = pandas.json_normalize(resp.json())
streamlit.dataframe(resp_norm)
my_cnx = snowflake.connector.connect(**streamlit.secrets["snowflake"])
my_cur = my_cnx.cursor()
streamlit.header('What fruit would you like to add?')
user_input = streamlit.text_input('Choose a fruit ?', 'Jackfruit')
my_cur.execute("SELECT * from fruit_load_list")
my_data_rows = my_cur.fetchall()
streamlit.text("The fruit list contains")
streamlit.dataframe(my_data_rows)
|
22,312 | 41fd17d4f59af788f613b5b05cca317856977bd2 | import sys
import math
def main():
num = int(sys.stdin.readline().rstrip())
num_list = sys.stdin.readline().rstrip().split(' ')
count=0
for i in num_list:
flag = 1
number = int(i)
if number == 1:
continue
for j in range(2,int(math.sqrt(number))+1):
if number%j ==0:
flag = 0
if flag ==1:
count+=1
print(count)
if __name__ == "__main__":
main() |
22,313 | 8d67a3b11f56be742177b0d5f57cdbd867d0bf10 | class DataWriter():
def insert_statement_operation_SQL(self, table_name, df):
insert_query = """ INSERT INTO """ + table_name \
+ """(form, year, quarter, net_revenue, cost_of_sales, gross_margin, r_and_d,
operating_cost, net_total) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s) """
record_to_insert = (
df['Form'], df['Year'], df['Quarter'], df['NetRevenue'], df['CostOfSales'], df['GrossMargin'],
df['ResearchAndDevelopment'], df['OperatingCost'], df['NetTotal'])
# print sql
return insert_query, record_to_insert
|
22,314 | 9972a19e8a9e214c194199d107754ec4718b36dd | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 29 18:11:38 2016
@author: kiaph
"""
def main():
print()
print(" This program will take inputed values and use them to determine an employee's weekly earning based on hours worked.")
print()
name = input("What is the employee's name: ")
weeklyhours = input("Hours worked this week?: ")
hourlypay = input("?Hourly pay this week?: " )
print()
if float(weeklyhours) > 40:
pay = 40*float(hourlypay)
overtime = float(weeklyhours) - 40
overpay = 1.5*float(hourlypay)*float(overtime)
pay = float(overpay) + float(pay)
else:
pay = float(weeklyhours)*float(hourlypay)
print("The weekly wages earned for ",name," is: $",pay, sep="")
main()
|
22,315 | 7054335c115c0928dc083dd8d5dae1ed4f7650ff | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import seaborn as sn
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
plt.rcParams["font.family"] = "serif"
plt.rcParams["font.size"] = 11
df = pd.read_pickle('10_first_ppl_100_rots.pkl')
categ = "Hand" # The category for classification
if categ == "Finger":
it = 5
else:
it = 2
# --------------------------------------
# SECOND STAGE MODEL TRAINING
# --------------------------------------
model2 = [] # For storing trained models
X_train2 = [0]*it # For storing training images, sorted by finger
X_test2 = [0]*it # For storing testing images, sorted by finger
y_train2 = [0]*it # For storing training identities, sorted by finger
y_test2 = [0]*it # For storing testing identities, sorted by finger
for i in range(0,it):
X = list(df[df[categ]==i]["Image"])
y = df[df[categ]==i]["Identity"]
X_train2[i], X_test2[i], y_train2[i], y_test2[i] = train_test_split(X,y,test_size=0.2)
model2.append(RandomForestClassifier())
model2[i].fit(X_train2[i],y_train2[i])
# --------------------------------------
# END OF SECOND STAGE MODEL TRAINING
# --------------------------------------
# -------------------------------------
# FIRST STAGE MODEL TRAINING
# -------------------------------------
X_train = []
X_test = []
for i in range(len(X_train2)):
X_train += X_train2[i]
for i in range(len(X_test2)):
X_test += X_test2[i]
y_train_id = pd.concat(y_train2)
y_test_id = pd.concat(y_test2)
y_train = [] # Category
y_test = [] # Category
for i in range(len(y_train_id)):
j = y_train_id.index[i]
y_train.append([df[categ][j],j])
for i in range(len(y_test_id)):
j = y_test_id.index[i]
y_test.append([df[categ][j],j])
y_train_indexfree = [i[0] for i in y_train]
model_finger = RandomForestClassifier()
model_finger.fit(X_train,y_train_indexfree) # Fit the model fo training data
# -------------------------------------
# END OF FIRST STAGE MODEL TRAINING
# -------------------------------------
# --------------------------------------
# MODEL IMPLEMENTATION
# --------------------------------------
pred_finger = model_finger.predict(X_test)
X_pred = [[] for _ in range(it)] # Images sorted by predicted finger
y_true = [[] for _ in range(it)] # True identities sorted by predicted finger
for i in range(len(pred_finger)):
finger = pred_finger[i]
X_pred[finger].append(X_test[i])
index = y_test[i][1]
y_true[finger].append(df["Identity"][index])
pred_id = [[] for _ in range(it)]
for i in range(it):
if X_pred[i]:
pred_id[i] = model2[i].predict(X_pred[i])
# --------------------------------------
# END OF MODEL IMPLEMENTATION
# --------------------------------------
# --------------------------------------
# EVALUATION OF RESULTS
# --------------------------------------
# Calculate accuracy
succs = 0 # Number of successful predictions
for i in range(len(pred_id)):
for j in range(len(pred_id[i])):
if pred_id[i][j] == y_true[i][j]:
succs+=1
print("Accuracy: " + str(succs/len(X_test)))
print("Successful predictions: " + str(succs))
# --------------------------------------
# END OF EVALUATION OF RESULTS
# --------------------------------------
pred_id = np.concatenate(pred_id)
y_true = np.concatenate(y_true)
# Confusion matrix
cm = confusion_matrix(pred_id, y_true, normalize='all') # Remove 'normalize="all"' to get absolute numbers
plt.figure()
sn.heatmap(cm, annot=False, cmap='RdPu')
# plt.title('Confusion matrix for 2-stage prediction of identity')
plt.xlabel('Predicted')
plt.ylabel('Truth') |
22,316 | 0a4e24da357a92beba7feecc92715ff21f9be8c0 | from .models import MetadataContainer
from . import settings
from .connection import client
class MetadataMixin(object):
metadata = MetadataContainer(connection=client,
key=lambda instance: instance.metadata_key)
@property
def metadata_key(self):
key = getattr(self, 'METADATA_KEY', settings.METADATA_KEY)
if key:
return key % {
'identifier': self.__class__.__name__.lower(),
'id': self.pk
}
return None
|
22,317 | ae8c4dd36dd9507536d520fe5d49063ac88dcc14 | # app/home/views.py
from flask import abort, render_template, request, jsonify, send_from_directory, make_response, send_file, Response
from ..models import Expert, Factor, Operator, Node, Edge, Temp_imp, Spat_aspect, Temp_aspect, pageTexts, Sensitivity, Con_strength
from flask_login import current_user, login_required
from helpers import createJSON, export_network_data, convert, give_arrows, give_dashes, give_strength
from requests_toolbelt import MultipartEncoder
from forms import NodeForm
from . import home
from .. import db
from ..models import Node, Edge
from sqlalchemy import text
import pandas as pd
import numpy as np
import pickle
from random import randint, choice
import json
import zipfile
import os
import io
import re
import pathlib
@home.route('/')
def homepage():
"""
Render the homepage template on the / route
"""
pagesClassIDs = {
"index": {
"bannertitle": [],
"subtitle": [],
"firstText": [],
"secondText": []
}
}
for key in pagesClassIDs["index"].keys():
pagesClassIDs["index"][key].append(
str(
pageTexts.query.filter_by(pageID=key,
htmlName="index").first()))
experts = Expert.query.all()
data = {
"modules": [],
"discipline": [],
"subdivision": [],
"publications": [],
"People": []
}
for expert in experts:
if not expert.is_admin and str(expert.accepted) == "Yes":
data["modules"].append(str(expert.affiliation))
data["discipline"].append(str(expert.discipline))
data["subdivision"].append(str(expert.specialization))
data["publications"].append(0)
if str(expert.core_exp) == "Yes":
data["People"].append(
str(expert.title) + " " + str(expert.first_name) + " " +
str(expert.last_name) + "(T)")
else:
data["People"].append(
str(expert.title) + " " + str(expert.first_name) + " " +
str(expert.last_name) + "(N)")
crdata = pd.DataFrame(data).groupby(
['modules', 'discipline', 'subdivision'], as_index=False).agg({
'publications':
'sum',
'People':
lambda x: ','.join(x)
})
circleData = createJSON(crdata)
return render_template(
'home/index.html',
pageDicts=pagesClassIDs,
bannertitle="Introduction to Research",
subtitle="Interactive Causal Mapping",
title="Home",
circleData=circleData)
@home.route('/fase1')
@login_required
def fase1():
"""
Render the fase1 template on the /fase1 route
"""
pagesClassIDs = {
"fase1": {
"bannertitle": [],
"subtitle": [],
"firstText": [],
"secondText": []
}
}
for key in pagesClassIDs["fase1"].keys():
pagesClassIDs["fase1"][key].append(
str(
pageTexts.query.filter_by(pageID=key,
htmlName="fase1").first()))
spat_aspects = Spat_aspect.query.all()
spat_aspectsList = [
spat_aspect.__dict__["name"] for spat_aspect in spat_aspects
]
temp_aspects = Temp_aspect.query.all()
temp_aspectsList = [
temp_aspect.__dict__["name"] for temp_aspect in temp_aspects
]
nodes = Node.query.all()
nodesList = [node.__dict__ for node in nodes]
for nd in nodesList:
del nd['_sa_instance_state']
factorDict = {}
for node in nodesList:
spat_asp = int(node["spat_aspect_id"]) - 1
temp_asp = int(node["temp_aspect_id"]) - 1
factorDict.setdefault(spat_aspectsList[spat_asp], {})
factorDict[spat_aspectsList[spat_asp]].setdefault(
temp_aspectsList[temp_asp], [])
factorDict[spat_aspectsList[spat_asp]][
temp_aspectsList[temp_asp]].append(node["factor"])
print(nodesList)
print(factorDict)
return render_template(
'home/fase1.html',
factorDict=factorDict,
nodes=nodesList,
spat_aspects=spat_aspectsList,
temp_aspects=temp_aspectsList,
pageDicts=pagesClassIDs,
title="fase1")
@home.route('/fase2')
@login_required
def fase2():
"""
Render the fase2 template on the /fase2 route
"""
pagesClassIDs = {
"fase2": {
"bannertitle": [],
"subtitle": [],
"firstText": [],
"secondText": []
}
}
for key in pagesClassIDs["fase2"].keys():
pagesClassIDs["fase2"][key].append(
str(
pageTexts.query.filter_by(pageID=key,
htmlName="fase2").first()))
nodes = Node.query.all()
nodesList2 = {node.__dict__["id"] : node.__dict__ for node in nodes}
nodesList = [node.__dict__ for node in nodes]
for nd in nodesList:
del nd['_sa_instance_state']
edges = Edge.query.all()
edgesList = [edge.__dict__ for edge in edges]
for ed in edgesList:
del ed['_sa_instance_state']
sankeyData = {}
sankeyData["links"] = [{"source":nodesList2[edge["factor_A"]]["factor"], "target":nodesList2[edge["factor_B"]]["factor"], "value":edge["con_strength_id"]*10,"optimal":"yes"} for edge in edgesList]
nodeSet = set()
for link in sankeyData["links"]:
nodeSet.add(link["source"])
nodeSet.add(link["target"])
sankeyData["nodes"] = [{"name":node} for node in nodeSet]
con_strengths = Con_strength.query.all()
temp_aspects = Temp_aspect.query.all()
spat_aspects = Spat_aspect.query.all()
print(temp_aspects)
dropDowns = {
"temp_aspects": [str(x) for x in temp_aspects],
"spat_aspects": [str(x) for x in spat_aspects],
"con_strengths": [str(x) for x in con_strengths]
}
return render_template(
'home/fase2.html',
pageDicts=pagesClassIDs,
dropDowns=dropDowns,
nodes=nodesList,
edges=edgesList,
sankeyData=sankeyData,
title="fase2")
@home.route('/fase3')
@login_required
def fase3():
"""
Render the fase3 template on the / route
"""
pagesClassIDs = {
"fase3": {
"bannertitle": [],
"subtitle": [],
"firstText": [],
"secondText": [],
"thirdText": []
}
}
for key in pagesClassIDs["fase3"].keys():
pagesClassIDs["fase3"][key].append(
str(
pageTexts.query.filter_by(pageID=key,
htmlName="fase3").first()))
shapes = [
"triangle", "square", "diamond", "triangle", "square", "diamond",
"triangle", "square", "diamond"
]
colors = [
'#d53e4f', '#f46d43', '#fdae61', '#fee08b', '#e6f598', '#abdda4',
'#66c2a5', '#3288bd', '#ffffbf'
]
nodes = Node.query.all()
edges = Edge.query.all()
temp_imps = Temp_imp.query.all()
spat_aspects = Spat_aspect.query.all()
temp_aspects = Temp_aspect.query.all()
sensitivities = Sensitivity.query.all()
con_strengths = Con_strength.query.all()
operators = Operator.query.all()
experts = Expert.query.all()
acceptedList = [
expert.id for expert in experts if str(expert.accepted) == "Yes"
]
dist_ops = Operator.query.distinct(Operator.name)
op_dict = {str(x): shapes[i] for i, x in enumerate(dist_ops)}
cl_dict = {str(x): colors[i] for i, x in enumerate(dist_ops)}
dropDowns = {
"type": op_dict.keys(),
"temp_imps": [str(x) for x in temp_imps],
"spat_aspects": [str(x) for x in spat_aspects],
"temp_aspects": [str(x) for x in temp_aspects],
"sensitivity_id": [str(x) for x in sensitivities],
"con_strengths": [str(x) for x in con_strengths],
"operators": [str(x) for x in operators]
}
randicon = ["onset", "maintenance", "relapse"]
# randicon = ["stock", "cloud", "unknown", "onset", "maintenance", "relapse"]
# randicon = ["variable", "stock", "cloud", "unknown"]
data = {"nodes": [], "edges": []}
data["nodes"] = [{
"id": int(node.id),
"sensitivity_id": int(node.sensitivity_id),
"spat_aspect_id": str(node.spat_aspect_id),
"temp_aspect_id": str(node.temp_aspect_id),
"temp_imp_id": str(node.temp_imp_id),
"notes": str(node.notes),
"notes_factor": str(node.notes_factor),
"created_date": str(node.created_date),
"font": {
"multi": 'html',
"size": 20,
"color": 'black',
"bold": True
},
"label": '<b>'+str(node.factor).replace("_", "</b>\n<b>")+'</b>',
"group": str(node.temp_aspect_id),
"x": None,
"y": None,
"value": int(node.sensitivity_id),
"sup_lit": None,
"fixed": False,
"physics": True,
"level": node.temp_aspect_id
} for node in nodes]
data["edges"] = []
for edge in edges:
fromIndex = next((index for (index, d) in enumerate(data["nodes"]) if d["id"] == edge.factor_A), None)
toIndex = next((index for (index, d) in enumerate(data["nodes"]) if d["id"] == edge.factor_A), None)
if data["nodes"][fromIndex]["temp_aspect_id"] != data["nodes"][
toIndex]["temp_aspect_id"]:
smoothType = "continous"
else:
smoothType = "curvedCW"
data["edges"].append({
"arrows": give_arrows(edge.con_strength_id),
"dashes": bool(1),
"from": str(edge.factor_A),
"to": str(edge.factor_B),
"id": int(edge.id),
"created_date": str(edge.created_date),
"value": give_strength(edge.con_strength_id),
"temp_imp_id": str(edge.temp_imp_id),
"temp_aspect_id": str(edge.temp_aspect_id),
"con_strength":int(edge.con_strength_id),
"operator_id": str(edge.operator_id),
"notes_relation": str(edge.notes_relation),
"sup_lit": str(edge.sup_lit),
"smooth": {
"type": smoothType,
"forceDirection": 'vertical',
"roundness": 0.4
}
})
for index, group in enumerate(dropDowns["temp_aspects"]):
data["nodes"].append({"id": 1000 + index,
"x": -1000,
"y": index * 100,
"font":{"multi": 'html',
"size": 24,
"color": 'black',
"bold": True},
"notes":"This is just a Legend Node",
"notes_factor":"LegendNode999",
"sup_lit":None,
"created_date":None,
"label": group,
"group": str(index+1),
"sensitivity": None,
"temp_aspect_id": None,
"temp_imp_id": None,
"value": 7,
"fixed": True,
"level": str(index+1),
"physics":False})
return render_template(
'home/fase3.html',
pageDicts=pagesClassIDs,
dropDowns=dropDowns,
causalData=data,
op_dict=op_dict,
cl_dict=cl_dict,
title="fase3")
@home.route('/admin/dashboard')
@login_required
def admin_dashboard():
# prevent non-admins from accessing the page
if not current_user.is_admin:
abort(403)
return render_template(
'home/admin_dashboard.html',
bannertitle="Welcome Admin",
subtitle="A place to display your admin powers",
title="AdminDashboard")
@home.route('/admin/presentation')
@login_required
def admin_presentation():
# prevent non-admins from accessing the page
if not current_user.is_admin:
abort(403)
shapes = [
"triangle", "square", "diamond", "triangle", "square", "diamond",
"triangle", "square", "diamond"
]
colors = [
'#d53e4f', '#f46d43', '#fdae61', '#fee08b', '#e6f598', '#abdda4',
'#66c2a5', '#3288bd', '#ffffbf'
]
nodes = Node.query.all()
edges = Edge.query.all()
temp_imps = Temp_imp.query.all()
temp_aspects = Temp_aspect.query.all()
operators = Operator.query.all()
experts = Expert.query.all()
acceptedList = [
expert.id for expert in experts if str(expert.accepted) == "Yes"
]
dist_ops = Operator.query.distinct(Operator.name)
op_dict = {str(x): shapes[i] for i, x in enumerate(dist_ops)}
cl_dict = {str(x): colors[i] for i, x in enumerate(dist_ops)}
data = {"nodes": [], "edges": []}
data["nodes"] = [{
"id": int(node.id),
"sensitivity": int(node.sensitivity_id),
"temp_aspect_id": str(node.temp_aspect_id),
"temp_imp_id": str(node.temp_imp_id),
"notes": str(node.notes),
"notes_factor": str(node.notes_factor),
"created_date": str(node.created_date),
"font": {
"multi": 'html',
"size": 20,
"color": 'white',
"bold": True
},
"label": '<b>' + str(node.factor) + '</b>',
"x": None,
"y": None,
"value": np.random.uniform(8, 20),
"sup_lit": None
} for node in nodes]
data["edges"] = [{
"arrows": "to",
"from": str(edge.factor_A),
"to": str(edge.factor_B),
"id": int(edge.id),
"created_date": str(edge.created_date),
"value": np.random.uniform(0, 4),
"temp_imp_id": str(edge.temp_imp_id),
"temp_aspect_id": str(edge.temp_aspect_id),
"operator_id": str(edge.operator_id),
"notes_relation": str(edge.notes_relation),
"sup_lit": str(edge.sup_lit)
} for edge in edges]
circdat = {
"modules": [],
"discipline": [],
"subdivision": [],
"publications": [],
"People": []
}
for expert in experts:
if not expert.is_admin and str(expert.accepted) == "Yes":
circdat["modules"].append(str(expert.affiliation))
circdat["discipline"].append(str(expert.discipline))
circdat["subdivision"].append(str(expert.specialization))
circdat["publications"].append(0)
if expert.core_exp == "Yes":
circdat["People"].append(
str(expert.title) + " " + str(expert.first_name) + " " +
str(expert.last_name) + "(T)")
else:
circdat["People"].append(
str(expert.title) + " " + str(expert.first_name) + " " +
str(expert.last_name) + "(N)")
crdata = pd.DataFrame(circdat).groupby(
['modules', 'discipline', 'subdivision'], as_index=False).agg({
'publications':
'sum',
'People':
lambda x: ','.join(x)
})
circleData = createJSON(crdata)
return render_template(
'home/presentation.html',
circleData=circleData,
temp_imps1=enumerate(temp_imps),
temp_aspects1=enumerate(temp_aspects),
temp_imps=enumerate(temp_imps),
temp_aspects=enumerate(temp_aspects),
operators=enumerate(operators),
causalData=data,
op_dict=op_dict,
cl_dict=cl_dict,
title="Presentation")
@home.route('/submitcausalmap', methods=['GET', 'POST'])
@login_required
def submitcausalmap():
data = request.json
errors = ""
post = ""
# print("******************* DB EXECUTE ***************")
# result = db.engine.execute(text("SELECT * FROM nodes WHERE factor LIKE '%Ne%'"))
# nodes = [factor for factor in result]
# print(nodes)
# print("---------------------------")
for i, node in enumerate(data["nodes"]):
try:
nodePost = Node(
factor=node["label"].replace("<b>", "").replace("</b>", ""),
expert_id=current_user.get_id(),
sensitivity_id=node["sensitivity_id"],
spat_aspect_id=node["spat_aspect_id"],
temp_aspect_id=node["temp_aspect_id"],
temp_imp_id=node["temp_imp_id"],
notes=node["notes"],
notes_factor=node["notes_factor"])
db.session.add(nodePost)
db.session.commit()
post = "It worked!"
except Exception as e:
errors += " (" + str(i) + " " + str(e) + ")"
post = "Something went wrong with adding nodes: "
for i, edge in enumerate(data["edges"]):
try:
edgePost = Edge(
expert_id=current_user.get_id(),
factor_A=edge["from"],
factor_B=edge["to"],
con_strength_id=edge["con_strength"],
operator_id=edge["operator_id"],
temp_aspect_id=edge["temp_aspect_id"],
notes_relation=edge["notes_relation"],
sup_lit=edge["sup_lit"])
db.session.add(edgePost)
db.session.commit()
post = "It worked!"
except Exception as e:
errors += " (" + str(i) + " " + str(e) + ")"
post = "Something went wrong with adding edges: "
if errors == "":
return jsonify(post)
else:
return jsonify(post + errors)
@home.route('/submitNewNodes', methods=['GET', 'POST'])
@login_required
def submitNewNodes():
data = request.json
errors = ""
post = ""
for i, key in enumerate(data):
try:
nodePost = Node(
factor=key,
expert_id=current_user.get_id(),
spat_aspect_id=data[key]["spat_aspect"],
temp_aspect_id=data[key]["temp_aspect"])
db.session.add(nodePost)
db.session.commit()
post = "It worked!"
except Exception as e:
errors += " (" + str(i) + " " + str(e) + ")"
post = "Something went wrong with adding nodes: "
if errors == "":
return jsonify(post)
else:
return jsonify(post + errors)
@home.route('/submitNewEdges', methods=['GET', 'POST'])
@login_required
def submitNewEdges():
data = request.json
errors = ""
post = ""
print("&&&&&&&&&&&&&&&&& DEBUGGING ^^^^^^^^^^^^^^^")
print(data)
for i, edge in enumerate(data):
try:
edgePost = Edge(
factor_A=edge["source"],
factor_B=edge["target"],
con_strength_id=edge["value"],
temp_aspect_id=edge["temp_aspect"],
expert_id=current_user.get_id())
db.session.add(edgePost)
db.session.commit()
post = "It worked!"
except Exception as e:
errors += " (" + str(i) + " " + str(e) + ")"
post = "Something went wrong with adding nodes: "
if errors == "":
return jsonify(post)
else:
return jsonify(post + errors)
@home.route('/export_data', methods=['GET', 'POST'])
@login_required
def export_data():
filepath = os.path.dirname(os.path.realpath(__file__))[:-4] + "static/data/"
print(filepath)
data = request.json["data"]
if not data:
return "No file"
data = convert(data)
filename, contentType = export_network_data(data, filepath)
if data["format"] == "csv":
m = MultipartEncoder(
fields={
'field1': (filename[0], open(filename[0], 'rb'), 'text/csv'),
'field2': (filename[1], open(filename[1], 'rb'), 'text/csv')
})
return Response(m.to_string(), mimetype=m.content_type)
if data["format"] == "pkl":
m = MultipartEncoder(
fields={
'field1': (filename[0], open(filename[0], 'rb'), 'text'),
'field2': (filename[1], open(filename[1], 'rb'), 'text')
})
return Response(m.to_string(), mimetype=m.content_type)
return send_file(
filepath + filename,
mimetype=contentType,
attachment_filename=filename,
as_attachment=True) |
22,318 | 30fe30969bf76ca00c48adeda78982e43b85b75c | from contextlib import suppress
import sqlalchemy as sa
from django.db import DEFAULT_DB_ALIAS
from django.utils.module_loading import import_string
from .sqlalchemy import SQLAlchemy
from .transaction import TransactionContext
from .url import get_settings, make_url
class dbdict(dict):
"""Holds all configured :py:class:`..sqlalchemy.SQLAlchemy` instances."""
def get(self, alias=None, cls=SQLAlchemy, **kwargs):
"""Returns a :py:class:`..sqlalchemy.SQLAlchemy` instance from
configuration and registers it.
Can return a custom
:py:class:`..sqlalchemy.SQLAlchemy` instance thru args or thru ``SQLALCHEMY`` database setting in configuration.
"""
alias = alias or DEFAULT_DB_ALIAS
if alias in self:
return self[alias]
with suppress(Exception):
settings = get_settings(alias)
cls = import_string(settings.get("SQLALCHEMY"))
assert SQLAlchemy in cls.mro(), "'%s' needs to subclass from SQLAlchemy" % cls.__name__
url, _kwargs = make_url(alias)
_kwargs.update(kwargs)
_kwargs["alias"] = alias
return self.setdefault(alias, cls(url, **_kwargs))
def update(self, *args, **kwargs):
for arg in args:
other = dict(arg)
for key in other:
self[key] = other[key]
for key in kwargs:
self[key] = kwargs[key]
def __setitem__(self, alias, val):
if alias in self:
raise RuntimeError("Database alias `{alias}` has already been created".format(alias=alias))
if val in self.values():
raise RuntimeError("Database alias `{alias}` has already been created".format(alias=alias))
if not isinstance(val, SQLAlchemy):
raise RuntimeError("Database alias `{alias}` has wrong type".format(alias=alias))
super().__setitem__(alias, val)
def rollback(self):
"""Applies rollback on all registered databases."""
for db in self.values():
db.rollback()
def flush(self):
"""Applies flush on all registered databases."""
for db in self.values():
db.flush()
def commit(self):
"""Applies commit on all registered databases."""
for db in self.values():
db.commit()
def remove(self):
"""Applies remove on all registered databases."""
for db in self.values():
db.remove()
def atomic(self, savepoint=True):
"""Returns a context manager/decorator that guarantee atomic execution
of a given block or function across all configured and initialized
SQLAlchemy instances."""
return TransactionContext(*self.values(), savepoint=True)
def _index_foreign_keys(tbl):
indexes = {tuple(sorted(col.name for col in ix.columns)) for ix in tbl.indexes}
for fk in tbl.foreign_key_constraints:
key = tuple(sorted(col.name for col in fk.columns))
if key not in indexes:
sa.Index(None, *list(fk.columns), use_alter=True)
def index_foreign_keys(*args):
"""Generates indexes for all foreign keys for a table or metadata
tables."""
for arg in args:
if isinstance(arg, sa.Table):
_index_foreign_keys(arg)
elif isinstance(arg, sa.MetaData):
for table in arg.tables.values():
_index_foreign_keys(table)
|
22,319 | 29216377493fe8fe4f1c874c449effe71d0100d3 | import web3
import solc
import time
import threading
import hashlib
import os
w3 = None
cache = {}
def connect():
global w3
if w3 is None or not w3.isConnected:
# large request timeout require for performance tests
w3 = web3.Web3(web3.HTTPProvider('http://127.0.0.1:8545', request_kwargs={'timeout': 60 * 10}))
assert w3.isConnected
return w3
def filehash(path):
with open(path, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
def compile_contract(contract_name):
""" compiles the given contract (from the ./contracts folder)
and returns its ABI interface
"""
path = os.getcwd()
if path.endswith('client'):
path = f'../contracts/{contract_name}.sol'
else:
path = f'./contracts/{contract_name}.sol'
h = filehash(path)
interface = cache.get(h)
if interface:
return interface
with open(path) as f:
src = f.read()
for i in solc.compile_source(src, optimize=True).values():
interface = i
break
cache[h] = interface
return interface
def get_contract(contract_name, contract_address, patch_api=True):
""" gets the instance of an already deployed contract
if patch_api is set, all transactions are automatically syncronized, unless wait=False is specified in the tx
"""
connect()
interface = compile_contract(contract_name)
instance = w3.eth.contract(
address=contract_address,
abi=interface['abi'],
ContractFactoryClass=web3.contract.ConciseContract,
)
if patch_api:
for name, func in instance.__dict__.items():
if isinstance(func, web3.contract.ConciseMethod):
instance.__dict__[name] = _tx_executor(func)
# add event handling stuff to the instance object
contract = w3.eth.contract(abi=interface['abi'], bytecode=interface['bin'])
instance.eventFilter = contract.eventFilter
instance.events = contract.events
return instance
def _tx_executor(contract_function):
""" modifies the contract instance interface function such that whenever a transaction is performed
it automatically waits until the transaction in included in the blockchain
(unless wait=False is specified, in the case the default the api acts as usual)
"""
def f(*args, **kwargs):
wait = kwargs.pop('wait', True)
if 'transact' in kwargs and wait:
tx_hash = contract_function(*args, **kwargs)
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
return tx_receipt
return contract_function(*args, **kwargs)
return f
def deploy_contract(
contract_name, account=None, patch_api=True, return_tx_receipt=False
):
""" compiles and deploy the given contract (from the ./contracts folder)
returns the contract instance
"""
connect()
if account is None:
account = w3.eth.accounts[-1]
interface = compile_contract(contract_name)
contract = w3.eth.contract(abi=interface['abi'], bytecode=interface['bin'])
# increase max gas t
# tx_hash = contract.constructor().transact({'from': account, 'gas': 7_500_000})
tx_hash = contract.constructor().transact({'from': account, 'gas': 5_000_000})
tx_receipt = w3.eth.waitForTransactionReceipt(tx_hash)
c = get_contract(contract_name, tx_receipt['contractAddress'], patch_api)
if return_tx_receipt:
return c, tx_receipt
return c
def flatten(list_of_lists):
return [y for x in list_of_lists for y in x]
def get_events(contract_instance, event_name, from_block=0, to_block=None):
# eventFilter = contract.eventFilter(event_name, {'fromBlock': 0})
eventFilter = contract_instance.events.__dict__[event_name].createFilter(
fromBlock=from_block, toBlock=to_block
)
return [
e
for e in eventFilter.get_all_entries()
if e.address == contract_instance.address
]
def wait_for(predicate, check_interval=1.0):
while not predicate():
time.sleep(check_interval)
def mine_block():
connect()
w3.providers[0].make_request('evm_mine', params='')
def mine_blocks(num_blocks):
for i in range(num_blocks):
mine_block()
def mine_blocks_until(predicate):
while not predicate():
mine_block()
def blockNumber():
connect()
return w3.eth.blockNumber
def run(func_or_funcs, args=()):
""" executes the given functions in parallel and waits
until all execution have finished
"""
threads = []
if isinstance(func_or_funcs, list):
funcs = func_or_funcs
for i, f in enumerate(funcs):
arg = args[i] if isinstance(args, list) else args
if (arg is not None) and (not isinstance(arg, tuple)):
arg = (arg,)
threads.append(threading.Thread(target=f, args=arg))
else:
func = func_or_funcs
assert isinstance(args, list)
for arg in args:
xarg = arg if isinstance(arg, tuple) else (arg,)
threads.append(threading.Thread(target=func, args=xarg))
for t in threads:
t.start()
for t in threads:
t.join()
|
22,320 | 4bcb9def6ad8e422fe8a1a31bdb7a88cd93c6b72 | import sys, os
import subprocess
import shlex
import fileinput
from src.utils import check_type, create_folder, writeLogFile
from src.exceptions import BlastDBException, MultiFastaException, MetaCVException
from src.ftp_functions import ftp_functions
class DBCreation:
'''class for database creation functionalities - contains functions to generate multifasta
files from downloaded content and run external scripts from the software suits blast and
metacv for creation of the databases'''
DB_OUT = None
DOWNLOAD_FOLDER = None
BLAST_MF = None
METACV_MF = None
DB_TYPE = None
PARSE_SEQIDS = True
METACV = False
DEBUG = False
EXECUTABLE = None
def __init__(self, DB_OUT, DOWNLOAD_FOLDER, DB_TYPE, PARSE_SEQIDS, DEBUG, EXECUTABLE):
self.DB_OUT = DB_OUT
self.DOWNLOAD_FOLDER = DOWNLOAD_FOLDER
self.DB_TYPE = DB_TYPE
self.BLAST_MF = DOWNLOAD_FOLDER + os.sep + 'blast.multi.fasta'
self.METACV_MF = DOWNLOAD_FOLDER + os.sep + 'metacv.multi.fasta'
self.PARSE_SEQIDS = True
self.EXECUTABLE = EXECUTABLE
self.DEBUG = DEBUG
def set_METACV(self, var):
self.METACV = var
def get_METACV(self):
return self.METACV
def get_local_index(self, subfolder):
'''generate an index of all local files needed for multifasta creation'''
filelist = []
# generate a list of all files contained in selected folders
for folder in subfolder:
for dir, subdir, filename in os.walk(self.DOWNLOAD_FOLDER + os.sep + folder):
for item in filename:
if str(item).endswith(check_type(self.DB_TYPE)):
filelist.append(dir + os.sep + item)
# check the found files for duplicated GI Numbers
if self.PARSE_SEQIDS or self.METACV:
file_list = {}
#self.get_taxid_index()
for item in filelist:
with open(item, 'r') as f:
first_line = f.readline().split('|')[1]
# check for duplicated gi numbers
if not first_line in set(file_list):
file_list[first_line] = item
else:
if self.DEBUG: sys.stdout.write("Remove GI " + first_line + "\n")
# overwrite complete list with filtered list
filelist = file_list.values()
return filelist
# def get_taxid_index(self):
# gi_map_file = self.DOWNLOAD_FOLDER + os.sep + 'gi_taxid_prot.dmp'
# if os.path.exists(gi_map_file):
# gi_map = []
# with open(gi_map_file) as f:
# gi_map.append()
# sys.stderr.out("\nError: file %s not found" % (gi_map_file))
# print gi_map
# generate a multi fasta file consisting of all single fasta
# files downloaded by pyBlastDB
def createBlast_MF(self, subfolder):
''' generate a multifasta file from downloaded content, that will be used as input
file for the external database creation script of blast'''
# get a list of all suitable input files
fasta_list = self.get_local_index(subfolder)
try:
sys.stdout.write("Create multifasta input database creation ...\n")
# open the multifasta file
with open(self.BLAST_MF, 'w') as fout:
# open every file in fasta_list and write the content to multifasta file
for line in fileinput.input(fasta_list):
fout.write(line)
# return file location
return self.BLAST_MF
except:
raise MultiFastaException()
def createMetaCV_MF(self, subfolder):
'''gnerate a multifasta file from downloaded content, that will be used as input
file for the external database creation script of MetaCV'''
# get a list of all suitable input files
fasta_list = self.get_local_index(subfolder)
try:
sys.stdout.write("Create multifasta input database creation ...\n")
with open(self.METACV_MF, 'w') as fout:
#update_progress(count)
for line in fileinput.input(fasta_list):
fout.write(line)
return self.METACV_MF
except:
raise MultiFastaException()
def get_parse_seqids_stmt(self):
''' test for parse_seqids argument and return the correct stmt'''
if self.PARSE_SEQIDS:
return '-parse_seqids'
else:
return None
def createBlastDB(self, name):
''' wrapper for 'makeblastdb' script of the blast software suit'''
# create output folder
create_folder(self.DB_OUT)
# select all downloaded folders as input for multifasta file creation
subfolder = '.'
# create multifasta file as input for 'makeblastdb'
multi_fasta = self.createBlast_MF(subfolder)
try:
sys.stdout.write("Create BlastDB ...\n")
# run 'makeblastdb'
p = subprocess.Popen(shlex.split("%s -in %s -dbtype %s -input_type %s -title %s -out %s %s"
% (self.EXECUTABLE,
multi_fasta,
self.DB_TYPE,
'fasta',
name,
self.DB_OUT + os.sep + name,
self.get_parse_seqids_stmt())),
stdout = subprocess.PIPE, stderr = subprocess.PIPE)
p.wait()
# print statistics
sys.stdout.write("Creation of %s BlastDB successfull!%s\nDatabase Location: %s\n" %
(self.DB_TYPE, p.stdout.read(), self.DB_OUT + os.sep + name))
except:
raise BlastDBException()
def get_taxonomy(self):
'''checks if taxonomical annotation for metacv database is existing and actual
if not the files will be downloaded from ncbi ftp server'''
# FTP Server information
ncbiFTP = 'ftp.ncbi.nih.gov'
taxonomy = '/pub/taxonomy/'
ncbi_files = ['gi_taxid_prot.dmp.gz', 'taxdump.tar.gz']
files = []
# establish connection
ncbi = ftp_functions(ncbiFTP, taxonomy, self.DOWNLOAD_FOLDER, self.DEBUG)
ncbi.connect()
# go to taxonomy dir
ncbi.go_down(taxonomy)
for item in ncbi_files:
# download actual files and extract needed files
if item in ncbi_files[0]:
files.append(ncbi.get_gi_map(item))
else:
[files.append(x) for x in ncbi.get_taxdump(item)]
# close connection
ncbi.close()
return files
def get_functional_annotation(self):
'''checks if functional annotation if function annotion exists and is actual
if not the annotation will be downloaded from uniprot ftp server'''
# FTP Server information
uniprotFTP = 'ftp.uniprot.org'
functional = 'pub/databases/uniprot/current_release/knowledgebase/idmapping'
idmapping = 'idmapping.dat.gz'
# establish connection
uniprot = ftp_functions(uniprotFTP, functional, DOWNLOAD_FOLDER, self.DEBUG)
uniprot.connect()
# go to functional dir
uniprot.go_down(functional)
# download file and extract it
idmapping = uniprot.get_idmapping(idmapping)
# close connection
uniprot.close()
return idmapping
def createMetaCVDB(self, name, subfolder):
'''wrapper for "metacv formatdb" script to generate a MetaCV database'''
# check additional_files and get location
taxonomy = self.get_taxonomy()
#functional = self.get_functional_annotation()
create_folder(self.DB_OUT)
# generate multifasta file
multi_fasta = self.createMetaCV_MF(subfolder)
# needed to for run the external script
full_path_exe = os.path.abspath(self.EXECUTABLE)
try:
sys.stdout.write("Create MetaCV DB ...\n")
# metacv cannot pipe the output to other folder, so it
# have to be run in the same older as the output
os.chdir(self.DB_OUT)
# start metacv formatdb with standard parameter
p = subprocess.Popen(shlex.split("%s formatdb %s %s %s"
% (full_path_exe,
'../' + (multi_fasta),
' '.join(map(str,taxonomy)),
name)))
p.wait()
# print statistics
sys.stdout.write("Creation of MetaCV DB successfull!\nDatabase Location: %s\n" %
(self.DB_OUT + os.sep + name))
except:
MetaCVException()
|
22,321 | 8ec0de51e15b3ad7992e39c254f3ce0bf7a7467c | class Solution:
def leastBricks(self, wall: List[List[int]]) -> int:
edges = {}
for layer in wall:
length = 0
for brick in range(0, len(layer) - 1):
length += layer[brick]
edges[length] = edges.get(length, 0) + 1
if not edges.keys(): return len(wall)
return len(wall) - (max(edges.values()))
|
22,322 | d69d11d7d04606b57ae3111fcbad53a8dc1cbdb4 | import copy
import multiprocessing
import random
from deap import creator, base, tools, algorithms
from train import Train
creator.create("FitnessMin", base.Fitness, weights=(-1.0, 1.0, -0.1, -0.1))
creator.create("Individual", list, fitness=creator.FitnessMin)
toolbox = base.Toolbox()
toolbox.register("attr_bool", random.randint, 1, 1)
toolbox.register("individual", tools.initRepeat, creator.Individual, toolbox.attr_bool, n=81)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
t = Train(distance=3200)
print(t.control(t.ascii2list('')))
def eval_gem(individual):
tc = copy.deepcopy(t)
return tc.control(individual), tc.is_success(), tc.speed, abs(tc.distance - tc.position)
toolbox.register("evaluate", eval_gem)
toolbox.register("mate", tools.cxTwoPoint)
toolbox.register("mutate", tools.mutUniformInt, low=-2, up=2, indpb=0.2)
toolbox.register("select", tools.selTournament, tournsize=30)
if __name__ == "__main__":
pool = multiprocessing.Pool(processes=8)
toolbox.register("map", pool.map)
population = toolbox.population(n=2400)
print(" Evaluated %i individuals" % len(population))
gen = 0
while True:
gen += 1
offspring = algorithms.varAnd(population, toolbox, cxpb=0.5, mutpb=0.2)
fits = toolbox.map(toolbox.evaluate, offspring)
for fit, ind in zip(fits, offspring):
ind.fitness.values = fit
population = toolbox.select(offspring, k=len(population))
if gen % 10 == 0:
best_ind = tools.selBest(population, 1)[0]
print('%d\t[%s]\t[%s]' % (gen, Train.list2ascii(best_ind), best_ind.fitness.values))
if gen > 1000:
break
pool.close()
print("-- End of (successful) evolution --")
best_ind = tools.selBest(population, 1)[0]
print("Best individual is %s, %s" % (best_ind, best_ind.fitness.values))
|
22,323 | bbc709676589b943f9c61d0dcb654f9b3906882a | ### 商品クラス
import pandas as pd
import datetime
import eel
class Item: # 商品情報
def __init__(self,item_code,item_name,price):
self.item_code=item_code
self.item_name=item_name
self.price=price
def get_price(self):
return self.price
### オーダークラス
class Order:
def __init__(self,item_master):
self.ordering_party_list = []
self.item_order_list=[] # 商品コードのリスト
self.item_quantity_list = [] # 注文数量のリスト
self.item_master=item_master # Itemのリスト
def add_ordering_party(self,ordering_party): # 注文する人の名前
self.ordering_party_list.append(ordering_party)
def add_item_order(self,item_code): # 商品コードのリストにコードを追加
self.item_order_list.append(item_code)
def add_quantity_order(self,item_quantity): # 注文数量のリストに注文数を追加
self.item_quantity_list.append(item_quantity)
def view_item_list(self, item_dict, deposit_amount):
for order, quantity in zip(self.item_order_list, self.item_quantity_list):
price = int(item_dict[order][1])*int(quantity)
change = int(deposit_amount) - price
return "商品コード:{}が{}個で{}円になります。お預かりしている金額は{}円なので{}円お返しいたします".format(order,quantity,price,deposit_amount,change)
### メイン処理
def main():
# マスタ登録
item_master=[]
item_master.append(Item("001","りんご",100))
item_master.append(Item("002","なし",120))
item_master.append(Item("003","みかん",150))
# 標準入力でユーザから注文内容を聞き取る
ordering_party = input('お名前をお願いします:')
item_code = input('購入予定商品の商品コードの入力をしてください。例:002:')
item_quantity = input('欲しい個数を入力してください。例:2')
deposit_amount = input('入金額を入力してください:')
# マスタ登録されている商品を、商品名:価格のdictに変換
item_dict = {item.item_code: [item.item_name, item.price] for item in item_master}
print(f'商品コード{item_code}は{item_dict[item_code][0]}で{item_dict[item_code][1]}円です')
codes = []
product_names = []
prices = []
for item in item_master:
codes.append(item.item_code)
product_names.append(item.item_name)
prices.append(item.price)
csv_data = {
'code': codes,
'name': product_names,
'price': prices,
}
# 商品マスタをcsv形式のファイルに保存
df = pd.DataFrame(csv_data)
df.to_csv('/Users/toguchitaichi/Desktop/study-04-pos-system-01-master/masta.csv', encoding='utf-8')
# オーダー登録
order=Order(item_master)
order.add_ordering_party(ordering_party)
order.add_item_order(item_code)
order.add_quantity_order(item_quantity)
# オーダー表示
dt_now = datetime.datetime.now()
dt_now = dt_now.strftime('%Y年%m月%d日 %H:%M:%S')
with open(dt_now + '.txt', mode='w') as f:
f.write(order.view_item_list(item_dict, deposit_amount))
main() |
22,324 | 4e071c6e39b64cddaf417002101917394f598ef9 | from django import forms
from django.utils.safestring import mark_safe
from django.template.loader import render_to_string
from django.forms.utils import flatatt
class MarkdownWidget(forms.Textarea):
def render(self, name, value, attrs=None):
value = value or ""
final_attrs = self.build_attrs(attrs, name=name)
return mark_safe(render_to_string("widget/markdown.html", {"attrs": flatatt(final_attrs), "value": value}))
class MarkdownFormField(forms.CharField):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.widget = MarkdownWidget()
|
22,325 | 33030e5a1565321c1777872f3ec80a8e62f79845 | from django.contrib import admin
from django.urls import path, include
from django.views.generic import TemplateView, RedirectView
from collection import views
from collection.backends import MyRegistrationView
from django.contrib.auth.views import (
PasswordResetView, PasswordResetDoneView,
PasswordResetConfirmView, PasswordResetCompleteView,
)
urlpatterns = [
path('', views.index, name='home'),
path('about/',
TemplateView.as_view(template_name='about.html'),
name='about'),
path('contact/',
TemplateView.as_view(template_name='contact.html'),
name='contact'),
path('blogs/', RedirectView.as_view(pattern_name='browse', permanent=True)),
path('blogs/<slug>/', views.blog_detail,
name='blog_detail'),
path('blogs/<slug>/edit/',
views.edit_blog, name='edit_blog'),
path('browse/', RedirectView.as_view(pattern_name='browse', permanent=True)),
path('browse/name/',
views.browse_by_name, name='browse'),
path('browse/name/<initial>/',
views.browse_by_name, name='browse_by_name'),
path('accounts/', include('registration.backends.simple.urls')),
path('accounts/password/reset/',
PasswordResetView.as_view(template_name='registration/password_reset_form.html'),
name="password_reset"),
path('accounts/password/reset/done/',
PasswordResetView.as_view(template_name='registration/password_reset_done.html'),
name="password_reset_done"),
path('accounts/password/reset/<uidb64>/<token>/',
PasswordResetConfirmView.as_view(template_name='registration/password_reset_confirm.html'),
name="password_reset_confirm"),
path('accounts/password/done/',
PasswordResetCompleteView.as_view(template_name='registration/password_reset_complete.html'),
name="password_reset_complete"),
path('accounts/register/',
MyRegistrationView.as_view(), name='registration_register'),
path('accounts/create_blog/',
views.create_blog, name='registration_create_blog'),
path('accounts/', include('registration.backends.simple.urls')),
path('admin/', admin.site.urls),
]
|
22,326 | d97e6d131d0d03ca4ebe16c49344de5b70835942 | """Image collector module."""
import hashlib
import logging
import mimetypes
import os
import re
from typing import Dict, Iterable, List, Optional, Tuple, Union
import requests
from lxml import etree
class ImageNotFound(Exception):
"""Image not found."""
class CollectImages:
"""Find links to images in HTML code and create a list of contents."""
def __init__(self, cache=None, folders_root: List[str] = None,
requests_timeout: Union[int, Tuple[int, int]] = None) -> None:
self.pretty_print = False
# https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
self.requests_timeout = requests_timeout
self.cache = cache
self.folders_root = ["."] if folders_root is None else folders_root
def log_error(self, error: Exception) -> None:
"""Log error, then raise if is is set."""
logging.error(error)
def conditionally_raise(self, error: ImageNotFound) -> None:
"""Raise if it is needed."""
def get_replacement_file(self, path) -> Optional[bytes]:
"""Get replacement file when original missing."""
return None
def cache_set(self, key: str, value: bytes) -> None:
"""Set value to the cache."""
if self.cache is not None:
self.cache.set(key, value)
def cache_get(self, key: str) -> Optional[bytes]:
"""Get value from the cache."""
if self.cache is not None:
return self.cache.get(key)
return None
def load_file_from_url(self, url: str) -> bytes:
"""Load file from url."""
cached_content = self.cache_get(url)
if cached_content is not None:
return cached_content
try:
req = requests.get(url, timeout=self.requests_timeout)
req.raise_for_status()
content = req.content
self.cache_set(url, content)
except requests.RequestException as err:
self.log_error(err)
repl_content = self.get_replacement_file(url)
if repl_content is None:
raise ImageNotFound(err)
content = repl_content
return content
def load_file_from_folders(self, path: str) -> bytes:
"""Load file from file."""
for root in self.folders_root:
fullpath = os.path.join(root, path)
if os.path.isfile(fullpath):
with open(fullpath, "rb") as handle:
return handle.read()
content = self.get_replacement_file(path)
if content is not None:
return content
raise ImageNotFound()
def load_file(self, src: str) -> bytes:
"""Load image from source."""
if re.match("https?://", src):
content = self.load_file_from_url(src)
else:
content = self.load_file_from_folders(src)
return content
def init_cid(self) -> None:
"""Initialize counter of images."""
self.position = 0
def get_next_cid(self) -> str:
"""Get next CID for related content."""
self.position += 1
return "img{}".format(self.position)
def _get_mime_type(self, path: str) -> List[str]:
ctype = mimetypes.guess_type(path)[0]
if ctype is None or "/" not in ctype:
return ["", ""]
return ctype.split('/', 1)
def collect_images(self, html_body: str, encoding: str = "UTF-8") -> Tuple[str, List[Tuple[str, str, str, bytes]]]:
"""Collect images from html code.
Return html with iamge src=cid and list of tuple with (maintype, subtype, cid, imagebytes).
"""
images = []
reader = etree.HTMLParser(recover=True, encoding=encoding)
root = etree.fromstring(html_body, reader)
self.init_cid()
same_content = {} # type: Dict[bytes, str]
# Search elements <img src="..."> and <input type="image" src="...">
for image in root.xpath("//img | //input[@type='image']"):
image_src = image.attrib["src"]
try:
image_content = self.load_file(image_src)
except ImageNotFound as err:
self.log_error(err)
self.conditionally_raise(err)
continue
content_hash = hashlib.md5(image_content).digest()
if content_hash in same_content:
cid = same_content[content_hash]
else:
cid = self.get_next_cid()
same_content[content_hash] = cid
maintype, subtype = self._get_mime_type(image_src)
images.append((maintype, subtype, cid, image_content))
image.attrib["src"] = "cid:{}".format(cid)
html_content = etree.tostring(root, encoding=encoding, pretty_print=self.pretty_print)
return html_content.decode(encoding), images
def collect_attachments(self, paths_or_urls: Iterable[str]) -> List[Tuple[str, str, str, bytes]]:
"""Collect attachment contents from paths or urls."""
attachments = []
same_content = [] # type: List[bytes]
for src in paths_or_urls:
try:
content = self.load_file(src)
except ImageNotFound as err:
self.log_error(err)
self.conditionally_raise(err)
continue
content_hash = hashlib.md5(content).digest()
if content_hash in same_content:
continue
same_content.append(content_hash)
maintype, subtype = self._get_mime_type(src)
filename = os.path.basename(src)
attachments.append((maintype, subtype, filename, content))
return attachments
|
22,327 | 0be1fbc6cc6af9279588630633d6fe67177aa3b2 | # encoding: utf-8
import time
import math
from random import random, randint
import numpy as np
def es(fitness, bounds_min, bounds_max, mu, lambda_, dimension, sigma_init=1, sigma_min=float('-inf'), tau=None, maxiter=float('inf'), max_execution_time=float('inf')):
if not tau:
tau = 1/math.sqrt(2*dimension)
population_x = np.random.uniform(bounds_min, bounds_max, size=(1, mu, dimension))[0]
population = [(xi, sigma_init, fitness(xi)) for xi in population_x]
iterations = 0
start_time = time.time()
fitness_evolution = []
while True:
for l in range(lambda_):
recombinant = recombine(population, mu, fitness)
offspring_individual_sigma = recombinant[1] * math.exp(tau*random())
mutation = offspring_individual_sigma*np.random.randn(1,dimension)[0]
offspring_individual_x = recombinant[0]+mutation
#print mutation
offspring_individual_fitness = fitness(offspring_individual_x)
population.append((offspring_individual_x, offspring_individual_sigma, offspring_individual_fitness))
population = sort_poulation(population, mu)
iterations += 1
fitness_evolution.append(population[0][2])
if population[0][1] < sigma_min or maxiter < iterations or start_time+max_execution_time < time.time():
return population[0], fitness_evolution
def recombine(population, mu, fitness):
population = sort_poulation(population, mu)
dimension = len(population[0][0])
x = []
sigma = 0
for i in range(dimension):
individual = population[randint(0, mu-1)]
x.append(individual[0][i])
sigma += individual[1]
return (x, sigma/mu, fitness(x))
def sort_poulation(population, mu):
return sorted(population, key=lambda x: x[2])[:mu]
if __name__ == '__main__':
def rastrigin(x):
n = len(x)
value = 10*n + sum([x[i]**2 - 10*math.cos(2*math.pi*x[i]) for i in range(n)])
return value
result = es(fitness=rastrigin, bounds_min=-5.12, bounds_max=5.12, mu=20, lambda_=5, dimension=5, maxiter=200, sigma_init=20)
import matplotlib.pyplot as plt
plt.plot(range(len(result[1])), result[1])
print result[0]
plt.savefig('es.png')
|
22,328 | 714d0effd6891b675f564fcc0ea5fa53d129c893 | from django.contrib import admin
from api.models import Item, Preference, \
Supplier, SupplierRating
# @admin.register(Movie)
# class MovieAdmin(admin.ModelAdmin):
# fields = ('title', 'description')
# list_display = ['title', 'description']
# search_fields = ('title', 'description')
#
#
# @admin.register(Rating)
# class RatingAdmin(admin.ModelAdmin):
# fields = ('user', 'movie', 'stars')
# list_display = ['user', 'movie', 'stars']
# search_fields = ('movie',)
#kraya
@admin.register(Item)
class ItemAdmin(admin.ModelAdmin):
fields = ('user', 'name', 'description', 'uom', 'target_price', 'currency')
list_display = ['user', 'name', 'description', 'uom', 'target_price', 'currency']
search_fields = ('uom', 'currency')
@admin.register(Preference)
class PreferenceAdmin(admin.ModelAdmin):
fields = ('item', 'delivery_address', 'note_to_buyer', 'emergency_contact')
list_display = ['item', 'delivery_address', 'note_to_buyer', 'emergency_contact']
# search_fields = ('')
@admin.register(Supplier)
class SupplierAdmin(admin.ModelAdmin):
fields = ('item', 'name', 'country', 'city', 'category', 'success_rate', 'description')
list_display = ['item', 'name', 'country', 'city', 'category', 'success_rate', 'description']
search_fields = ('country', 'city', 'category', 'success_rate')
@admin.register(SupplierRating)
class SupplierRatingAdmin(admin.ModelAdmin):
fields = ('user', 'supplier', 'stars')
list_display = ['user', 'supplier', 'stars']
search_fields = ('supplier',)
|
22,329 | 4a87a7a62b35db869ac9e6f360e2e4c93c851e9f | # -*- coding: utf-8 -*-
"""
Network Motifs
===============
Simple network motifs in Networkx.DiGraph format that can be directly loaded.
"""
# Copyright (C) 2021 by
# Alex Gates <ajgates@gmail.com>
# Rion Brattig Correia <rionbr@gmail.com>
# All rights reserved.
# MIT license.
import networkx as nx
def network_motif(name=None):
"""Graph motifs from :cite:`Milo:2012`.
Args:
name (string): The name of the motif.
Possible values are : ``FeedForward``, ``Fan``, ``FeedForwardSelf1``,
``FeedForwardSelf2``, ``FeedForwardSelf3``, ``FeedForwardSelf123``,
``BiFan``, ``CoRegulated``, ``CoRegulating``, ``BiParallel``,
``TriParallel``, ``Dominating4``, ``Dominating4Undir``, ``3Loop``,
``4Loop``, ``3LoopSelf123``, ``FourLoop``, ``FourCoLoop``,
``DirectedTwoLoop``, ``BiParallelLoop``, ``5Chain``, ``3Chain``,
``KeffStudy3``, ``KeffStudy4``, ``CoRegulatedSelf``, ``KeffLine4``,
``KeffLineLoop4``, ``3Full``, ``6Pyramid``, ``4Split``, ``5BiParallel``,
``6BiParallelDilation``, ``6BiParallelDilationLoop``, ``5combine``, ``4tree``.
Returns:
(networkx.DiGraph) : The directed graph motif.
"""
graph = nx.DiGraph()
if name == "FeedForward":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
elif name == "Fan":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
elif name == "FeedForwardSelf1":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(0, 0)
elif name == "FeedForwardSelf2":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(1, 1)
elif name == "FeedForwardSelf3":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 2)
elif name == "FeedForwardSelf123":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(0, 0)
graph.add_edge(1, 1)
graph.add_edge(2, 2)
elif name == "BiFan":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
elif name == "CoRegulated":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 1)
elif name == "CoRegulating":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 0)
graph.add_edge(1, 2)
elif name == "BiParallel":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
elif name == "TriParallel":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(0, 3)
elif name == "Dominating4":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "Dominating4Undir":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(1, 0)
graph.add_edge(2, 0)
graph.add_edge(3, 0)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "3Loop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
elif name == "4Loop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "3LoopSelf123":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
graph.add_edge(0, 0)
graph.add_edge(1, 1)
graph.add_edge(2, 2)
elif name == "FourLoop":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "FourCoLoop":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 1)
elif name == "DirectedTwoLoop":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(2, 3)
graph.add_edge(3, 2)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
elif name == "BiParallelLoop":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "5Chain":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 3)
graph.add_edge(3, 4)
elif name == "3Chain":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
elif name == "KeffStudy3":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(0, 2)
graph.add_edge(2, 0)
elif name == "KeffStudy4":
graph.add_edge(0, 1)
graph.add_edge(1, 0)
graph.add_edge(0, 2)
graph.add_edge(2, 0)
graph.add_edge(0, 3)
graph.add_edge(3, 0)
elif name == "CoRegulatedSelf":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 2)
graph.add_edge(2, 1)
graph.add_edge(0, 0)
elif name == "KeffLine4":
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
elif name == "KeffLineLoop4":
graph.add_edge(0, 1)
graph.add_edge(0, 3)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(3, 0)
elif name == "3Full":
graph.add_edge(0, 0)
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 0)
graph.add_edge(1, 1)
graph.add_edge(1, 2)
graph.add_edge(2, 0)
graph.add_edge(2, 1)
graph.add_edge(2, 2)
elif name == "6Pyramid":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(1, 5)
graph.add_edge(2, 3)
graph.add_edge(2, 4)
graph.add_edge(2, 5)
elif name == "4Split":
graph.add_edge(0, 1)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(2, 3)
graph.add_edge(0, 3)
elif name == "5BiParallel":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
elif name == "6BiParallelDilation":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 5)
graph.add_edge(4, 5)
elif name == "6BiParallelDilationLoop":
graph.add_edge(0, 2)
graph.add_edge(0, 3)
graph.add_edge(0, 4)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
graph.add_edge(1, 4)
graph.add_edge(2, 5)
graph.add_edge(3, 5)
graph.add_edge(4, 5)
graph.add_edge(5, 1)
elif name == "5combine":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(0, 2)
graph.add_edge(1, 3)
graph.add_edge(3, 0)
graph.add_edge(3, 4)
graph.add_edge(4, 2)
elif name == "4tree":
graph.add_edge(0, 1)
graph.add_edge(1, 2)
graph.add_edge(1, 3)
else:
raise TypeError('The motif name could not be found.')
return graph
|
22,330 | b4945ec53df29ace216bc010def240e7b2326aa9 | # Generated by Django 2.0.1 on 2019-03-03 12:50
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20190302_1746'),
]
operations = [
migrations.AlterField(
model_name='product',
name='category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category'),
),
migrations.AlterField(
model_name='product',
name='condition',
field=models.CharField(blank=True, choices=[('U', 'Used'), ('N', 'New')], default='N', max_length=10, null=True),
),
migrations.AlterField(
model_name='product',
name='item_status',
field=models.CharField(choices=[('I', 'In Stock'), ('S', 'Subscription Only'), ('O', 'Out of Stock')], default='O', max_length=20),
),
migrations.AlterField(
model_name='promocard',
name='type',
field=models.CharField(choices=[('coverpage_bottom', 'Coverpage Bottom'), ('banner_right', 'Banner Right'), ('banner_left', 'Banner Left'), ('coverpage_center', 'Coverpage Center'), ('coverpage_top', 'Coverpage Top')], default='coverpage_top', max_length=100),
),
]
|
22,331 | b0545df97554d70045b4d15ecd2d8601b937012c | import json
from pprint import pprint
import psycopg2
from psycopg2 import sql
class DataHandler:
COMMANDS = {'create_table' : """
CREATE TABLE articles (
article_id SERIAL PRIMARY KEY,
title VARCHAR(255) NOT NULL,
publish_date VARCHAR(255),
summary TEXT,
link VARCHAR(255),
authors TEXT ARRAY,
source VARCHAR(255),
literature_type VARCHAR(255)
);
""",
'delete_table' : """
DROP TABLE articles;
""",
'check_existence' : """
SELECT EXISTS (
SELECT 1
FROM pg_tables
WHERE schemaname = 'public'
AND tablename = 'articles'
);
""",
'update_table' : """
INSERT INTO articles(title, publish_date, summary, link, authors, source, literature_type) VALUES(%s, %s, %s, %s, %s, %s, %s);
""",
'select_all' : """
SELECT * FROM articles;
""",
'check_if_such_already_exists': ("""
select exists ( select title from articles where {} = %(value)s);
"""),
'select_column': """
SELECT {} FROM articles;
""",
}
def connect_to_db(self):
try:
connection = psycopg2.connect(user = "valentin",
password = "mydb",
host = "127.0.0.1",
port = "5432",
database = "valentin")
print(connection.get_dsn_parameters(), "\n")
except (Exception, psycopg2.Error) as error:
print("Error while connecting " , error)
return connection
def db_set_up(self, connection):
cursor = connection.cursor()
cursor.execute(self.COMMANDS['check_existence'])
if not cursor.fetchone()[0]:
cursor.execute(self.COMMANDS['create_table'])
print('table was created')
else:
cursor.execute(self.COMMANDS['delete_table'])
print('table deleted')
cursor.execute(self.COMMANDS['create_table'])
print('table was created')
connection.commit()
cursor.close()
def db_select_all(self, connection):
cursor = connection.cursor()
cursor.execute(self.COMMANDS['select_all'])
pprint(cursor.fetchall())
cursor.close()
def db_select_column(self, connection, column):
cursor = connection.cursor()
cursor.execute(sql.SQL(self.COMMANDS['select_column']).format(sql.Identifier(column)))
pprint(cursor.fetchall())
cursor.close()
def db_check_if_record_exists(self, connection, column, value):
cursor = connection.cursor()
cursor.execute(sql.SQL(self.COMMANDS['check_if_such_already_exists']).format(sql.Identifier(column)), {'value' : value})
return cursor.fetchone()[0]
def db_update(self, connection, api_output):
cursor = connection.cursor()
duplicate_counter = 0
for _, data_dict in api_output.items():
record_to_write = [data for data in data_dict.values()]
record_title = record_to_write[0]
if self.db_check_if_record_exists(connection,'title',record_title):
duplicate_counter += 1
continue
cursor.execute(self.COMMANDS['update_table'], tuple(record_to_write))
print('table updated')
print(duplicate_counter, ' duplicates were found')
connection.commit()
cursor.close()
def write_to_file(self, dict_data):
with open('json_data/query_results.json', 'w') as output:
json.dump(dict_data,output)
print('written into file')
for source, source_records in dict_data.items():
count_of_results = 0
for record in source_records.keys():
count_of_results += 1
print(source + ' has ' + str(count_of_results) + ' results') |
22,332 | 91b0bdba68649a4480523f4d66d45eebfa8e8f36 | def calculate_years(principal, interest, tax, desired):
y = 0
while principal < desired :
interestC = principal * interest
principal = principal + (interestC - interestC * tax)
y = y + 1
return y
print(calculate_years(1000, 0.05, 0.18, 1100))
|
22,333 | 14565abf9ce8c6d99f540a5478e8b8269d2a53b7 | #!usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:Liujj
@file: ExceptionTest.py
@time: 2018/04/25
"""
def temp_convert(var):
try:
return int(var)
except (ValueError) as Argument:
print ("参数没有包含数字\n", Argument)
# 调用函数
temp_convert("xy") |
22,334 | 43c27bc0fd88b561342b2f8e0d08b201a5c54eba | from operator import itemgetter
from django.test import TestCase, Client
from django.conf import settings
from rest_framework import status
from .models import Post
from .views import PostSerializer
class PostTestCase(TestCase):
def setUp(self):
self.client = Client()
posts = [Post(story_id=i + 1, title='Title {}'.format(i + 1), url='https://example.com/{}'.format(i + 1)) for i in range(30)]
Post.objects.bulk_create(posts, ignore_conflicts=False)
def test_default_posts_count(self):
response = self.client.get('/posts')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), settings.REST_FRAMEWORK.get('PAGE_SIZE'))
def test_serializer_posts_count(self):
response = self.client.get('/posts')
posts = Post.objects.all()[:settings.REST_FRAMEWORK.get('PAGE_SIZE')]
serializer = PostSerializer(posts, many=True)
self.assertEqual(response.data, serializer.data)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_get_single_post(self):
post = Post.objects.get(story_id=5)
self.assertEqual(post.title, 'Title 5')
def test_order(self):
response = self.client.get('/posts?order=id')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), settings.REST_FRAMEWORK.get('PAGE_SIZE'))
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 1', 'Title 2', 'Title 3', 'Title 4', 'Title 5'])
response = self.client.get('/posts?order=-id')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), settings.REST_FRAMEWORK.get('PAGE_SIZE'))
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 30', 'Title 29', 'Title 28', 'Title 27', 'Title 26'])
response = self.client.get('/posts?order=abc')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_limit(self):
response = self.client.get('/posts?limit=2')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 2)
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 1', 'Title 2'])
response = self.client.get('/posts?limit=abc')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get('/posts?limit={}'.format(-10))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get('/posts?limit={}'.format(settings.MAX_PAGE_SIZE + 10))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_offset(self):
response = self.client.get('/posts?offset=2')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), settings.REST_FRAMEWORK.get('PAGE_SIZE'))
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 3', 'Title 4', 'Title 5', 'Title 6', 'Title 7'])
response = self.client.get('/posts?offset=abcde')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
response = self.client.get('/posts?offset={}'.format(-10))
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
def test_multiple_params(self):
response = self.client.get('/posts?limit=2&offset=2')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 2)
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 3', 'Title 4', ])
response = self.client.get('/posts?order=-id&limit=2&offset=2')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.json()), 2)
self.assertSequenceEqual(list(map(itemgetter('title'), response.json())), ['Title 28', 'Title 27', ])
def tearDown(self):
Post.objects.all().delete()
class FetchPostsTestCase(TestCase):
def test_fetch_posts(self):
self.assertEqual(Post.objects.count(), 0)
self.client.get('/posts/_fetch')
self.assertEqual(Post.objects.count(), 30)
def tearDown(self):
Post.objects.all().delete()
|
22,335 | 291308f8c3079fa7b1a08573f331098517919d99 | import numpy as np
from exibicao import exibe_resultado_teste
from exibicao import exibe_resultado_teste_primeira_imagem
def testa_modelo(imagens_treino, identificacao_treino, imagens_teste, identificacao_teste, modelo):
predicoes = modelo.predict(imagens_teste)
exibe_resultado_teste(predicoes, identificacao_teste)
prediz_primeira_imagem(imagens_teste, modelo)
def prediz_primeira_imagem(imagens_teste, modelo):
primeira_imagem_teste = imagens_teste[0]
lote_de_uma_imagem = (np.expand_dims(primeira_imagem_teste,0))
predicao_primeira_imagem_teste = modelo.predict(lote_de_uma_imagem)
exibe_resultado_teste_primeira_imagem(predicao_primeira_imagem_teste) |
22,336 | c777f5335c787448f76b380e5006632396f87abd | import numpy as np
from sklearn.cluster import AgglomerativeClustering
from plotting import plot_points
def agg(X: np.ndarray):
"""
Agglomarative Clustering algorithm used for clusterize input data.
References
----------
[1] https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html
Parameters
----------
X : ndarray
Array of points coordinates as a list of tuple/lists in shape
[(x_0, y_0), (x_1, y_1), ...].
Returns
-------
agg
AgglomerativeClustering fitted clusterizer.
"""
# _check_agg_params(X)
agg = AgglomerativeClustering(n_clusters=6,
affinity='euclidean',
linkage='ward')
y_agg = agg.fit_predict(X)
return agg
def _check_agg_params(X: np.ndarray):
"""
Check all parameters needed in AgglomerativeClustering algorithm.
Show result on chart.
Parameters
----------
X : ndarray
Array of points coordinates as a list of lists/tuples in shape
[(x_1, y_1), (x_2, y_2), ...].
"""
n_min, n_max = 6, 6
linkages = ['ward', 'complete', 'average', 'single']
affinities = ["euclidean", "l1", "l2", "manhattan", "cosine"]
for linkage in linkages:
for aff in affinities:
for i in range(n_min, n_max + 1):
agg = AgglomerativeClustering(n_clusters=6,
affinity=aff,
linkage=linkage)
y_agg = agg.fit_predict(X)
plot_points(X, y_agg, title=f"link={linkage}, n={i}, aff={aff}")
|
22,337 | f47f2cb817fb36851eec0f615e78dd944daebd55 | import networkx as nx
import sys
import random
from pprint import pprint
def randomRegularGraph(n, d, sname=None):
g = nx.random_regular_graph(d,n)
if sname is not None:
nx.write_edgelist(g, sname)
def corePeriphery(n1, n2, sname=None):
pp = 0.20
pc = 0.80
mat = [[0 for _ in xrange(0, n1 + n2)] for _ in xrange(0, n1 + n2)]
for i in xrange(0, n1 + n2):
for j in xrange(i, n1 + n2):
if i < n1 and j < n1:
v = 1 if random.random() < pc else 0
elif (i < n1 and j >= n1) or (i >= n1 and j < n1) :
v = 1 if random.random() < pp else 0
else:
v = 0
mat[i][j] = v
mat[j][i] = v
# Create the graph
g = nx.Graph()
for i in xrange(0, len(mat)):
for j in xrange(i, len(mat[i])):
if mat[i][j] == 1:
g.add_edge(i,j)
print(nx.info(g))
if sname is not None:
nx.write_edgelist(g, sname)
def corePeriphery2(n1, n2, n3, sname=None):
pp = 0.9
pc = 0.1
mat = [[0 for _ in xrange(0, n1 + n2 + n3)] for _ in xrange(0, n1 + n2 + n3)]
for i in xrange(0, n1):
for j in xrange(i, n1):
v = 1 if random.random() < pp else 0
mat[i][j] = v
mat[j][i] = v
core = range(0, n1)
step = int(n2/5)
p = pc
nodes = [range(n1 + i, n1 + i + step) for i in xrange(0, n2, step)]
count = int(pp * n1) - 5
for n in nodes:
p = p - 0.1
#count = int(n1 * p)
count -= 1
for i in n:
n = random.sample(core, count)
for j in n:
mat[i][j] = 1
mat[j][i] = 1
nodes = range(n1 + n2, n1 + n2 + n3)
for n in nodes:
v = random.choice(core)
mat[n][v] = 1
mat[v][n] = 1
# Create the graph
g = nx.Graph()
for i in xrange(0, len(mat)):
for j in xrange(i, len(mat[i])):
if mat[i][j] == 1:
g.add_edge(i,j)
print(nx.info(g))
if sname is not None:
nx.write_edgelist(g, sname)
if __name__ == '__main__':
"""
sname = sys.argv[1]
d = int(sys.argv[2])
n = int(sys.argv[3])
randomRegularGraph(n, d, sname)
"""
sname = sys.argv[1]
n1 = int(sys.argv[2])
n2 = int(sys.argv[3])
n3 = int(sys.argv[4])
corePeriphery2(n1, n2, n3, sname)
|
22,338 | 89038200c547c9a26759ee579bc359305ec9b47c | from django.contrib import admin
from .models import *
@admin.register(Plc)
class PlcAdmin(admin.ModelAdmin):
pass
@admin.register(Area)
class AreaAdmin(admin.ModelAdmin):
pass
@admin.register(Dato)
class DatoAdmin(admin.ModelAdmin):
list_display = ["id", "area", "get_created_at", "dato", "procesado",]
list_filter = ["procesado",]
def get_created_at(self, obj: Dato):
date: datetime = obj.created_at
return date.strftime("%d-%m-%y %H:%M:%S")
get_created_at.short_description = "Created"
@admin.register(Tag)
class TagAdmin(admin.ModelAdmin):
pass
@admin.register(Fila)
class FilaAdmin(admin.ModelAdmin):
list_display = ["id", "area", "name", "byte", "bit", "tipo_dato",]
# def get_value(self, obj: Fila):
# return obj.read_value(obj.area.datos.first().dato)
# get_value.short_description = "Last value"
@admin.register(DatoProcesado)
class DatoProcesadoAdmin(admin.ModelAdmin):
list_display = [
"id",
"name",
"get_date",
"dato",
"area",
"fila",
"raw_dato",
"created_at",
"mod_at",
]
list_filter = ["area", "fila",]
def get_date(self, obj: DatoProcesado):
return obj.date.strftime("%d-%m-%y %H:%M:%S")
get_date.short_description = "Date" |
22,339 | a583e8174da57126c8af128f653b2b4e0a9f7de7 | class Solution:
# @param A : list of integers
# @return an integer
def removeDuplicates(self, A):
na = len(A)
i = 0
j = 1
if na <= 2:
return na
prev = A[i]
alr_same = False
while j < na:
a = A[j]
if a != prev:
i += 1
A[i] = a
prev = a
alr_same = False
elif not alr_same:
i += 1
A[i] = a
alr_same = True
j += 1
return i + 1
|
22,340 | 0d7932f3243740a6eb62515bfc81e6592a7e5548 | from flask import (Flask, request, render_template, redirect, session)
from flask_debugtoolbar import DebugToolbarExtension
from jinja2 import StrictUndefined
app = Flask(__name__)
app.secret_key = "papabear"
app.jinja_env.undefined = StrictUndefined
@app.route('/')
def index():
# show homepage
print("\n\nshow homepage\n\n")
return render_template("homepage.html")
if __name__ == "__main__":
app.debug = True
app.jinja_env.auto_reload = app.debug
DebugToolbarExtension(app)
# connect_to_db(app, 'postgresql:///journals')
app.run(port=5000, host='0.0.0.0') |
22,341 | ad382b4fdcf6a5c2a3c33ca8a82fe72ac7a9afc6 | import math
fin = open('file.txt', 'r', encoding = 'utf-8')
c = ''
for line in fin:
line = line.replace('\n','')
c = c+line
if c.islower() == False:
c = c.lower()
for s in c:
if s.isalpha()==False:
c = c.replace(s,'')
letters = dict()
length = len(c)
for s in c:
letters[s]=letters.get(s,0)+1
res_mono = 0
for s in letters:
res_mono+=(-1)*(letters[s]/length)*(math.log((letters[s]/length),2))
print(res_mono,'Монограмма без пробела') #
fin = open('file.txt','r', encoding = 'utf-8')
d = ''
for line in fin:
line = line.replace('\n', '')
d = d + line
if d.islower() == False:
d = d.lower()
for s in d:
if s==' ':
continue
elif s.isalpha()==False:
d = d.replace(s,'')
letters2 = dict()
res_mono2 = 0
for s in d:
letters2[s] = letters2.get(s,0) + 1
for s in letters2:
res_mono2+=(-1)*(letters2[s]/length)*math.log(letters2[s]/length,2)
print(res_mono2,'Монограмма с пробелом')
#print(d)
alphabet_for_bigrams = dict() #
c = list(c)
massiv = []
for i in range(1,len(c)):
massiv.append(c[i-1]+c[i])
for s in massiv:
alphabet_for_bigrams[s] = alphabet_for_bigrams.get(s,0)+1
res_bi = 0
for s in alphabet_for_bigrams:
res_bi+=(-1)*(alphabet_for_bigrams[s]/len(massiv))*math.log2(alphabet_for_bigrams[s]/len(massiv))
print(res_bi,'Биграмма без пробелов (пересекающ.)')
#print('Пробел')
for s in d:
if s==' ':
d = d.replace(' ','*')
else:
pass
#print(d)
alphabet_for_bigrams2 = dict()
d = list(d)
massiv2 = []
for i in range(1,len(d)):
massiv2.append(d[i-1]+d[i])
for s in massiv2:
alphabet_for_bigrams2[s] = alphabet_for_bigrams2.get(s,0) + 1
res_bi2 = 0
for s in alphabet_for_bigrams2:
res_bi2+=(-1)*(alphabet_for_bigrams2[s]/len(massiv2))*math.log(alphabet_for_bigrams2[s]/len(massiv2))
print(res_bi2, 'Биграмма с пробелами(пересекающ.)')
alphabet = dict()
fin = open('file.txt','r',encoding = 'utf-8')
c = list(c)
array = []
for i in range(1,len(c),2):
array.append(c[i-1]+c[i])
for s in array:
alphabet[s] = alphabet.get(s,0) + 1
res_bi_out = 0
for s in alphabet:
res_bi_out+= (-1)*(alphabet[s]/len(array))*math.log(alphabet[s]/len(array),2)
print(res_bi_out, 'Биграмма без пробелов(непересекающ.)')
alphabet2 = dict()
fin = open('file.txt','r',encoding = 'utf-8')
for s in d:
if s==' ':
c = c.replace(' ','*')
else:
pass
d = list(d)
array2 = []
for i in range(1,len(d),2):
array2.append(d[i-1]+d[i])
for s in array2:
alphabet2[s] = alphabet2.get(s,0)+1
res_bi_out2 = 0
for s in array2:
res_bi_out2+=(-1)*(alphabet2[s]/len(array2))*math.log(alphabet2[s]/len(array2),2)
print(res_bi_out2,'Биграмма с пробелами(непересекающ.)')
|
22,342 | 90edb2944e6cf2f4975a00b97bc29a7cb480ce46 | from common import *
T = readInt()
for t in range(T):
_,obs = readInt(),readIntArr()
c1, c2, rate = 0,0,0
for i in range(len(obs)-1):
rate = max(rate, obs[i]-obs[i+1])
if obs[i]-obs[i+1] > 0:
c1 += obs[i]-obs[i+1]
for i in range(len(obs)-1):
c2 += min(rate, obs[i])
writeLine("Case #{}: {} {}".format(t+1, c1, c2)) |
22,343 | cce6fc9b8f59b46f227e74ae87ed7897ee3396da | import numpy as np
import cv2 as cv
#load the image in gray scale
img = cv.imread("mandril_color.tif",0)
#Now set the Image window size to Normal
cv.namedWindow("image",cv.WINDOW_NORMAL)
cv.imshow('image', img)
#waitkey is the keyboard function
#If 0 is passed, it waits indefinitely for a key stroke
k = cv.waitKey(0)
if k==27:
cv.destroyAllWindows() #Destroy all open windows by program
elif k ==ord('s'):
cv.imwrite("mandril_color.png", img) #Write teh image to any other location
cv.destroyAllWindows()
|
22,344 | 0a6cf818c744ed392568360912446b17fd2689ef | import astropy.constants as const
import astropy.units as u
import numpy as np
import pytest
from plasmapy.particles._isotopes import data_about_isotopes
from plasmapy.particles.atomic import (
_is_electron,
atomic_number,
charge_number,
common_isotopes,
electric_charge,
half_life,
ionic_levels,
is_stable,
isotopic_abundance,
known_isotopes,
mass_number,
particle_mass,
periodic_table_block,
periodic_table_category,
periodic_table_group,
periodic_table_period,
reduced_mass,
stable_isotopes,
standard_atomic_weight,
)
from plasmapy.particles.exceptions import (
ChargeError,
InvalidElementError,
InvalidIsotopeError,
InvalidParticleError,
MissingParticleDataError,
ParticleWarning,
)
from plasmapy.particles.particle_class import Particle
from plasmapy.particles.symbols import atomic_symbol, element_name, isotope_symbol
from plasmapy.utils._pytest_helpers import run_test
# function to be tested, argument(s), expected result/outcome
# The following lists (with the name of a function
table_functions_args_kwargs_output = [
[
atomic_symbol,
[
1,
],
{},
"H",
],
[atomic_symbol, [1], {}, "H"],
[atomic_symbol, ["H"], {}, "H"],
[atomic_symbol, ["p"], {}, "H"],
[atomic_symbol, ["T"], {}, "H"],
[atomic_symbol, ["deuterium"], {}, "H"],
[atomic_symbol, ["deuteron"], {}, "H"],
[atomic_symbol, ["Tritium"], {}, "H"],
[atomic_symbol, ["triton"], {}, "H"],
[atomic_symbol, ["H-2"], {}, "H"],
[atomic_symbol, ["D"], {}, "H"],
[atomic_symbol, ["T"], {}, "H"],
[atomic_symbol, ["H-3"], {}, "H"],
[atomic_symbol, ["Hydrogen-3"], {}, "H"],
[atomic_symbol, ["helium"], {}, "He"],
[atomic_symbol, [2], {}, "He"],
[atomic_symbol, ["alpha"], {}, "He"],
[atomic_symbol, ["gold"], {}, "Au"],
[atomic_symbol, ["Gold"], {}, "Au"],
[atomic_symbol, [79], {}, "Au"],
[atomic_symbol, ["79"], {}, "Au"],
[atomic_symbol, ["P"], {}, "P"],
[atomic_symbol, [118], {}, "Og"],
[atomic_symbol, ["N-14"], {}, "N"],
[atomic_symbol, ["N"], {}, "N"],
[atomic_symbol, ["H +1"], {}, "H"],
[atomic_symbol, ["H 1+"], {}, "H"],
[atomic_symbol, ["hydrogen 1+"], {}, "H"],
[atomic_symbol, ["deuterium 1+"], {}, "H"],
[atomic_symbol, ["Fe 24+"], {}, "Fe"],
[atomic_symbol, ["Fe +24"], {}, "Fe"],
[atomic_symbol, ["Fe 2-"], {}, "Fe"],
[atomic_symbol, ["Fe -2"], {}, "Fe"],
[atomic_symbol, ["Fe+"], {}, "Fe"],
[atomic_symbol, ["Fe++"], {}, "Fe"],
[atomic_symbol, ["Fe-"], {}, "Fe"],
[atomic_symbol, ["Fe++++++++++++++"], {}, "Fe"],
[isotope_symbol, ("He", 4), {}, "He-4"],
[isotope_symbol, ("helium-4",), {}, "He-4"],
[isotope_symbol, ("H-2",), {}, "D"],
[isotope_symbol, ("Deuterium",), {}, "D"],
[isotope_symbol, ("deuterium",), {}, "D"],
[isotope_symbol, ("deuteron",), {}, "D"],
[isotope_symbol, ("tritium",), {}, "T"],
[isotope_symbol, ("triton",), {}, "T"],
[isotope_symbol, ("Hydrogen-3",), {}, "T"],
[isotope_symbol, ("hydrogen-3",), {}, "T"],
[isotope_symbol, ("H-3",), {}, "T"],
[isotope_symbol, (1, 2), {}, "D"],
[isotope_symbol, ("Hydrogen", 3), {}, "T"],
[isotope_symbol, ("tritium",), {}, "T"],
[isotope_symbol, ("H", 2), {}, "D"],
[isotope_symbol, ("Alpha",), {}, "He-4"],
[isotope_symbol, ("alpha",), {}, "He-4"],
[isotope_symbol, (79, 197), {}, "Au-197"],
[isotope_symbol, ("p",), {}, "H-1"],
[isotope_symbol, ("beryllium-8",), {}, "Be-8"],
[isotope_symbol, ("N-13",), {}, "N-13"],
[isotope_symbol, ("p",), {}, "H-1"],
[isotope_symbol, ("proton",), {}, "H-1"],
[isotope_symbol, ("protium",), {}, "H-1"],
[isotope_symbol, ("N-13 2+",), {}, "N-13"],
[isotope_symbol, ("Hydrogen-3 +1",), {}, "T"],
[atomic_number, ["H"], {}, 1],
[atomic_number, ["D"], {}, 1],
[atomic_number, ["deuterium"], {}, 1],
[atomic_number, ["Deuterium"], {}, 1],
[atomic_number, ["tritium"], {}, 1],
[atomic_number, ["p"], {}, 1],
[atomic_number, ["P"], {}, 15],
[atomic_number, ["Alpha"], {}, 2],
[atomic_number, ["C-12"], {}, 6],
[atomic_number, ["Argon"], {}, 18],
[atomic_number, ["protium"], {}, 1],
[atomic_number, ["H-3"], {}, 1],
[atomic_number, ["p+"], {}, 1],
[atomic_number, ["Be-8"], {}, 4],
[atomic_number, ["N"], {}, 7],
[atomic_number, ["N 2+"], {}, 7],
[atomic_number, ["N +1"], {}, 7],
[atomic_number, ["N+++"], {}, 7],
[mass_number, ["helium-3"], {}, 3],
[mass_number, ["Au-197"], {}, 197],
[mass_number, ["deuterium"], {}, 2],
[mass_number, ["D"], {}, 2],
[mass_number, ["H-2"], {}, 2],
[mass_number, ["tritium"], {}, 3],
[mass_number, ["T"], {}, 3],
[mass_number, ["alpha"], {}, 4],
[mass_number, ["p"], {}, 1],
[mass_number, ["Be-8"], {}, 8],
[mass_number, ["N-13"], {}, 13],
[mass_number, ["N-13 2+"], {}, 13],
[mass_number, ["N-13 +2"], {}, 13],
[mass_number, ["N-13+++"], {}, 13],
[element_name, ["D"], {}, "hydrogen"],
[element_name, ["deuterium"], {}, "hydrogen"],
[element_name, ["Au"], {}, "gold"],
[element_name, ["alpha"], {}, "helium"],
[element_name, ["helium-4"], {}, "helium"],
[element_name, ["H-2"], {}, "hydrogen"],
[element_name, ["Deuterium"], {}, "hydrogen"],
[element_name, ["Hydrogen-3"], {}, "hydrogen"],
[element_name, ["hydrogen-3"], {}, "hydrogen"],
[element_name, ["H-3"], {}, "hydrogen"],
[element_name, ["tritium"], {}, "hydrogen"],
[element_name, ["Alpha"], {}, "helium"],
[element_name, ["alpha"], {}, "helium"],
[element_name, [1], {}, "hydrogen"],
[element_name, [26], {}, "iron"],
[element_name, [79], {}, "gold"],
[element_name, ["p"], {}, "hydrogen"],
[element_name, ["P"], {}, "phosphorus"],
[element_name, ["Be-8"], {}, "beryllium"],
[element_name, ["Li-7"], {}, "lithium"],
[element_name, ["N"], {}, "nitrogen"],
[element_name, ["N+++"], {}, "nitrogen"],
[element_name, ["D-"], {}, "hydrogen"],
[standard_atomic_weight, ["H"], {}, (1.008 * u.u).to(u.kg)],
[standard_atomic_weight, [1], {}, (1.008 * u.u).to(u.kg)],
[standard_atomic_weight, ["Hydrogen"], {}, (1.008 * u.u).to(u.kg)],
[standard_atomic_weight, ["Au"], {}, u.kg],
[particle_mass, ["proton"], {}, const.m_p],
[particle_mass, ["H-1+"], {}, const.m_p],
[particle_mass, ["H-1 +1"], {}, const.m_p],
[particle_mass, ["H-1 1+"], {}, const.m_p],
[particle_mass, ["H-1"], {"Z": 1}, const.m_p],
[particle_mass, ["hydrogen-1"], {"Z": 1}, const.m_p],
[particle_mass, ["p+"], {}, const.m_p],
[particle_mass, ["F-19"], {"Z": 3}, u.kg],
[particle_mass, ["H"], {}, standard_atomic_weight("H")],
[is_stable, ["H-1"], {}, True],
[is_stable, [1, 1], {}, True],
[is_stable, ["N-14"], {}, True],
[is_stable, ["N", 14], {}, True],
[is_stable, ["P-31"], {}, True],
[is_stable, ["P", 31], {}, True],
[is_stable, ["p"], {}, True],
[is_stable, ["alpha"], {}, True],
[is_stable, ["Xe-124"], {}, True],
[is_stable, ("Fe",), {"mass_numb": 56}, True],
[is_stable, ["Fe-56"], {}, True],
[is_stable, ["iron-56"], {}, True],
[is_stable, ["Iron-56"], {}, True],
[is_stable, [26, 56], {}, True],
[is_stable, ["Be-8"], {}, False],
[is_stable, ["U-235"], {}, False],
[is_stable, ["uranium-235"], {}, False],
[is_stable, ["T"], {}, False],
[is_stable, [4, 8], {}, False],
[is_stable, ["tritium"], {}, False],
[is_stable, ["Pb-209"], {}, False],
[is_stable, ["lead-209"], {}, False],
[is_stable, ["Lead-209"], {}, False],
[is_stable, ("Pb",), {"mass_numb": 209}, False],
[is_stable, [82, 209], {}, False],
[charge_number, ["H+"], {}, 1],
[charge_number, ["D +1"], {}, 1],
[charge_number, ["tritium 1+"], {}, 1],
[charge_number, ["H-"], {}, -1],
[charge_number, ["Fe -2"], {}, -2],
[charge_number, ["Fe 2-"], {}, -2],
[charge_number, ["N--"], {}, -2],
[charge_number, ["N++"], {}, 2],
[charge_number, ["alpha"], {}, 2],
[charge_number, ["proton"], {}, 1],
[charge_number, ["deuteron"], {}, 1],
[charge_number, ["triton"], {}, 1],
[charge_number, ["electron"], {}, -1],
[charge_number, ["e-"], {}, -1],
[charge_number, ["e+"], {}, 1],
[charge_number, ["positron"], {}, 1],
[charge_number, ["n"], {}, 0],
[charge_number, ["neutron"], {}, 0],
[charge_number, ["p-"], {}, -1],
[charge_number, ["antiproton"], {}, -1],
[electric_charge, ["p"], {}, u.C],
[electric_charge, ["p"], {}, 1.6021766208e-19 * u.C],
[electric_charge, ["e"], {}, -1.6021766208e-19 * u.C],
[electric_charge, ["alpha"], {}, 3.2043532416e-19 * u.C],
[electric_charge, ["n"], {}, 0 * u.C],
[half_life, ["H-1"], {}, u.s],
[half_life, ["tritium"], {}, u.s],
[half_life, ["H-1"], {}, np.inf * u.s],
]
@pytest.mark.parametrize(
("tested_function", "args", "kwargs", "expected_output"),
table_functions_args_kwargs_output,
)
def test_functions_and_values(tested_function, args, kwargs, expected_output):
run_test(tested_function, args, kwargs, expected_output)
class TestInvalidPeriodicElement:
def test_periodic_table_period(self):
with pytest.raises(TypeError):
periodic_table_period(("Ne", "Na"))
def test_periodic_table_block(self):
with pytest.raises(TypeError):
periodic_table_block(("N", "C", "F"))
def test_periodic_table_category(self):
with pytest.raises(TypeError):
periodic_table_category(["Rb", "He", "Li"])
def test_periodic_table_group(self):
with pytest.raises(TypeError):
periodic_table_group(("B", "Ti", "Ge"))
# Next we have tests that do not fall nicely into equality comparisons.
def test_standard_atomic_weight_value_between():
"""Test that `standard_atomic_weight` returns approximately the
correct value for phosphorus."""
assert (
30.973 < standard_atomic_weight("P").to(u.u).value < 30.974
), "Incorrect standard atomic weight for phosphorus."
def test_particle_mass_berkelium_249():
"""Test that `particle_mass` returns the correct value for Bk-249."""
assert np.isclose(
particle_mass("berkelium-249").to(u.u).value, 249.0749877
), "Incorrect isotope mass for berkelium."
def test_particle_mass_for_hydrogen_with_no_mass_number():
"""Test that `particle_mass` does not return the proton mass when no
mass number is specified for hydrogen. In this case, the
standard atomic weight should be used to account for the small
fraction of deuterium."""
assert particle_mass("H", Z=1) > const.m_p
assert particle_mass("hydrogen", Z=1) > const.m_p
def test_particle_mass_helium():
"""Test miscellaneous cases for `particle_mass`."""
assert particle_mass("alpha") > particle_mass("He-3 2+")
# (arg1, kwargs1, arg2, kwargs2, expected)
equivalent_particle_mass_args = [
["e+", {}, "positron", {}, const.m_e],
["alpha", {}, "He-4++", {}, None],
["alpha", {}, "helium-4 2+", {}, None],
["deuteron", {}, "H", {"Z": 1, "mass_numb": 2}, None],
["D+", {}, "H-2+", {}, None],
["D+", {}, "D 1+", {}, None],
["Deuterium+", {}, "D", {"Z": 1}, None],
["triton", {}, "H", {"Z": 1, "mass_numb": 3}, None],
["T+", {}, "H-3+", {}, None],
["T+", {}, "T 1+", {}, None],
["Tritium+", {}, "T", {"Z": 1}, None],
[
"Fe-56 1+",
{},
"Fe",
{"mass_numb": 56, "Z": 1},
particle_mass("Fe-56 1-") - 2 * const.m_e,
],
["Fe-56 +1", {}, 26, {"mass_numb": 56, "Z": 1}, None],
]
@pytest.mark.parametrize(
("arg1", "kwargs1", "arg2", "kwargs2", "expected"), equivalent_particle_mass_args
)
def test_particle_mass_equivalent_args(arg1, kwargs1, arg2, kwargs2, expected):
"""Test that `particle_mass` returns equivalent results for
equivalent positional and keyword arguments."""
result1 = particle_mass(arg1, **kwargs1)
result2 = particle_mass(arg2, **kwargs2)
assert u.isclose(result1, result2), (
f"particle_mass({arg1!r}, **{kwargs1}) = {result1!r}, whereas "
f"particle_mass({arg2!r}, **{kwargs2}) = {result2!r}. "
f"These results are not equivalent as expected."
)
if expected is not None:
assert u.isclose(result1, result2) and u.isclose( # noqa: PT018
result2, expected
), (
f"particle_mass({arg1!r}, **{kwargs1}) = {result1!r} and "
f"particle_mass({arg2!r}, **{kwargs2}) = {result2!r}, but "
f"these results are not equal to {expected!r} as expected."
)
@pytest.mark.slow()
def test_known_common_stable_isotopes():
"""Test that `known_isotopes`, `common_isotopes`, and
`stable_isotopes` return the correct values for hydrogen."""
known_should_be = ["H-1", "D", "T", "H-4", "H-5", "H-6", "H-7"]
common_should_be = ["H-1", "D"]
stable_should_be = ["He-3", "He-4"]
assert known_isotopes("H") == known_should_be, (
f"known_isotopes('H') should return {known_should_be}, but is "
f"instead returning {known_isotopes('H')}"
)
assert common_isotopes("H") == common_should_be, (
f"common_isotopes('H') should return {common_should_be}, but is "
f"instead returning {common_isotopes('H')}"
)
assert stable_isotopes("He") == stable_should_be, (
f"stable_isotopes('He') should return {stable_should_be}, but is "
f"instead returning {stable_isotopes('He')}"
)
def test_half_life():
"""Test that `half_life` returns the correct values for various
isotopes."""
assert np.isclose(
half_life("tritium").to(u.s).value, (12.32 * u.yr).to(u.s).value, rtol=2e-4
), "Incorrect half-life for tritium."
def test_half_life_unstable_isotopes():
"""Test that `half_life` returns `None` and raises an exception for
all isotopes that do not yet have half-life data."""
for isotope in data_about_isotopes:
if (
"half_life" not in data_about_isotopes[isotope]
and not data_about_isotopes[isotope]
):
with pytest.raises(MissingParticleDataError):
half_life(isotope)
def test_half_life_u_220():
"""Test that `half_life` returns `None` and issues a warning for an
isotope without half-life data."""
isotope_without_half_life_data = "No-248"
with pytest.raises(MissingParticleDataError):
half_life(isotope_without_half_life_data)
pytest.fail(
f"This test assumes that {isotope_without_half_life_data} does "
f"not have half-life data. If half-life data is added for this "
f"isotope, then a different isotope that does not have half-life "
f"data should be chosen for this test."
)
def test_known_common_stable_isotopes_cases():
"""Test that known_isotopes, common_isotopes, and stable_isotopes
return certain isotopes that fall into these categories."""
assert "H-1" in known_isotopes("H")
assert "D" in known_isotopes("H")
assert "T" in known_isotopes("H")
assert "Be-8" in known_isotopes("Be")
assert "Og-294" in known_isotopes(118)
assert "H-1" in common_isotopes("H")
assert "H-4" not in common_isotopes(1)
assert "H-1" in stable_isotopes("H")
assert "D" in stable_isotopes("H")
assert "T" not in stable_isotopes("H")
assert "Fe-56" in common_isotopes("Fe", most_common_only=True)
assert "He-4" in common_isotopes("He", most_common_only=True)
@pytest.mark.slow()
def test_known_common_stable_isotopes_len():
"""Test that `known_isotopes`, `common_isotopes`, and
`stable_isotopes` each return a `list` of the expected length.
The number of common isotopes may change if isotopic composition
data has any significant changes.
The number of stable isotopes may decrease slightly if some isotopes
are discovered to be unstable but with extremely long half-lives.
The number of known isotopes will increase as new isotopes are
discovered, so a buffer is included in the test.
"""
assert len(common_isotopes()) == 288, (
"The length of the list returned by common_isotopes() is "
f"{len(common_isotopes())}, which is not the expected value."
)
assert len(stable_isotopes()) == 254, (
"The length of the list returned by stable_isotopes() is "
f"{len(stable_isotopes())}, which is not the expected value."
)
assert 3352 <= len(known_isotopes()) <= 3400, (
"The length of the list returned by known_isotopes() is "
f"{len(known_isotopes())}, which is not within the expected range."
)
@pytest.mark.parametrize("func", [common_isotopes, stable_isotopes, known_isotopes])
def test_known_common_stable_isotopes_error(func):
"""Test that `known_isotopes`, `common_isotopes`, and
`stable_isotopes` raise an `~plasmapy.utils.InvalidElementError` for
neutrons."""
with pytest.raises(InvalidElementError):
func("n")
pytest.fail(f"{func} is not raising a ElementError for neutrons.")
def test_isotopic_abundance():
"""Test that `isotopic_abundance` returns the appropriate values or
raises appropriate errors for various isotopes."""
assert isotopic_abundance("H", 1) == isotopic_abundance("protium")
assert np.isclose(isotopic_abundance("D"), 0.000115)
assert isotopic_abundance("Be-8") == 0.0, "Be-8"
assert isotopic_abundance("Li-8") == 0.0, "Li-8"
with pytest.warns(ParticleWarning):
isotopic_abundance("Og", 294)
with pytest.raises(InvalidIsotopeError):
isotopic_abundance("neutron")
pytest.fail("No exception raised for neutrons.")
with pytest.raises(InvalidParticleError):
isotopic_abundance("Og-2")
isotopic_abundance_elements = (
atomic_number(atomic_numb) for atomic_numb in range(1, 119)
)
isotopic_abundance_isotopes = (
common_isotopes(element) for element in isotopic_abundance_elements
)
isotopic_abundance_sum_table = (
(element, isotopes)
for element, isotopes in zip(
isotopic_abundance_elements, isotopic_abundance_isotopes
)
if isotopes
)
@pytest.mark.parametrize(("element", "isotopes"), isotopic_abundance_sum_table)
def test_isotopic_abundances_sum(element, isotopes):
"""Test that the sum of isotopic abundances for each element with
isotopic abundances is one."""
sum_of_iso_abund = sum(isotopic_abundance(isotope) for isotope in isotopes)
assert np.isclose(
sum_of_iso_abund, 1, atol=1e-6
), f"The sum of the isotopic abundances for {element} does not equal 1."
class TestReducedMassInput:
def test_incorrect_units(self):
with pytest.raises(InvalidParticleError):
reduced_mass("N", 6e-26 * u.l)
def test_missing_atomic_data(self):
assert u.isclose(reduced_mass("Og", "H"), np.nan * u.kg, equal_nan=True)
def test_ion_list_example():
ions = ionic_levels("He-4")
np.testing.assert_equal(ions.charge_number, [0, 1, 2])
assert ions.symbols == ["He-4 0+", "He-4 1+", "He-4 2+"]
@pytest.mark.parametrize(
("particle", "min_charge", "max_charge", "expected_charge_numbers"),
[
("H-1", 0, 1, [0, 1]),
("p+", 1, 1, [1]),
(Particle("p+"), 0, 0, [0]),
("C", 3, 5, [3, 4, 5]),
],
)
def test_ion_list(particle, min_charge, max_charge, expected_charge_numbers):
"""Test that inputs to ionic_levels are interpreted correctly."""
particle = Particle(particle)
ions = ionic_levels(particle, min_charge, max_charge)
np.testing.assert_equal(ions.charge_number, expected_charge_numbers)
assert ions[0].element == particle.element
if particle.is_category("isotope"):
assert ions[0].isotope == particle.isotope
@pytest.mark.parametrize(
("element", "min_charge", "max_charge"), [("Li", 0, 4), ("Li", 3, 2)]
)
def test_invalid_inputs_to_ion_list(element, min_charge, max_charge):
with pytest.raises(ChargeError):
ionic_levels(element, min_charge, max_charge)
str_electron_table = [
("e-", True),
("e+", False),
("e", True),
("electron", True),
("ELECTRON", True),
("H", False),
("positron", False),
("Carbon", False),
(("e", "e-"), False),
(["e+", "proton"], False),
("merry-go-round", False),
]
@pytest.mark.parametrize(("particle", "electron"), str_electron_table)
def test_is_electron(particle, electron):
assert _is_electron(particle) == electron
def test_ionic_levels_example():
"""
Test that `ionic_levels` can be used to create a |ParticleList|
containing all the ions for a particular element.
"""
ions = ionic_levels("He-4")
np.testing.assert_equal(ions.charge_number, [0, 1, 2])
assert ions.symbols == ["He-4 0+", "He-4 1+", "He-4 2+"]
@pytest.mark.parametrize(
("particle", "min_charge", "max_charge", "expected_charge_numbers"),
[
("H-1", 0, 1, [0, 1]),
("p+", 1, 1, [1]),
(Particle("p+"), 0, 0, [0]),
("C", 3, 5, [3, 4, 5]),
],
)
def test_ion_list2(particle, min_charge, max_charge, expected_charge_numbers):
"""Test that inputs to ionic_levels are interpreted correctly."""
particle = Particle(particle)
ions = ionic_levels(particle, min_charge, max_charge)
np.testing.assert_equal(ions.charge_number, expected_charge_numbers)
assert ions[0].element == particle.element
if particle.is_category("isotope"):
assert ions[0].isotope == particle.isotope
@pytest.mark.parametrize(
("element", "min_charge", "max_charge"), [("Li", 0, 4), ("Li", 3, 2)]
)
def test_invalid_inputs_to_ion_list2(element, min_charge, max_charge):
with pytest.raises(ChargeError):
ionic_levels(element, min_charge, max_charge)
|
22,345 | 9f9865f70f0b871b195a1c894f85b56d3ff8f0fd | from django.contrib import admin
from .models import Notice
from .models import AboutPage
admin.site.register(Notice)
admin.site.register(AboutPage)
|
22,346 | 7d4cb686690ab67facb1d24390634e113bf71dcc | import json
import os
from collections import namedtuple
import yaml
from ansible.executor.task_queue_manager import TaskQueueManager
from ansible.inventory import Inventory
from ansible.parsing.dataloader import DataLoader
from ansible.playbook.play import Play
from ansible.vars import VariableManager
from inventory import dynlxc
from mock import Mock, patch
class TestAllInventory(object):
@classmethod
def setup_class(cls):
cls.popen_patcher = patch('inventory.dynlxc.subprocess.Popen')
cls.mock_popen = cls.popen_patcher.start()
cls.mock_rv = Mock()
cls.mock_rv.returncode = 0
cls.mock_popen.return_value = cls.mock_rv
cls.mock_rv.stdout.read.return_value = ''
Options = namedtuple(
'Options', ['listtags', 'listtasks', 'listhosts', 'syntax',
'connection', 'module_path', 'forks', 'remote_user',
'private_key_file', 'ssh_common_args',
'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args',
'become', 'become_method', 'become_user', 'verbosity',
'check'])
cls.options = Options(listtags=False, listtasks=False, listhosts=False,
syntax=False, connection='ssh', module_path=None,
forks=100, remote_user='slotlocker',
private_key_file=None, ssh_common_args=None,
ssh_extra_args=None, sftp_extra_args=None,
scp_extra_args=None, become=True,
become_method=None, become_user='root',
verbosity=None, check=True)
@classmethod
def teardown_class(cls):
cls.popen_patcher.stop()
def test_base(self):
test_inv_dir = 'test/inventory'
for inv in os.listdir(test_inv_dir):
print "Processing ", inv
res = dynlxc.main(os.path.join(test_inv_dir, inv), '')
variable_manager = VariableManager()
loader = DataLoader()
self.mock_rv.communicate.return_value = [
json.dumps(res), 'mocked_err']
try:
inventory = Inventory(
loader=loader,
variable_manager=variable_manager,
host_list='inventory/dynlxc.py'
)
except Exception as err:
raise Exception("Inventory file {0} processing result '{1}' "
"failed with {2}".format(inv, res, err))
variable_manager.set_inventory(inventory)
play_source = dict(name="Ansible Play", hosts='localhost',
gather_facts='no')
playbook = os.path.abspath(os.path.join(test_inv_dir,
'../playbooks', inv))
if os.path.isfile(playbook):
with open(playbook) as fh:
real_playbook = yaml.load(fh)[0]
play_source.update(real_playbook)
play = Play().load(play_source, variable_manager=variable_manager,
loader=loader)
tqm = None
try:
tqm = TaskQueueManager(
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=self.options,
passwords=None,
stdout_callback='default',
)
result = tqm.run(play)
assert result == 0, ("Ansible playbook exitcode "
"different from 0")
finally:
if tqm is not None:
tqm.cleanup()
|
22,347 | 292e8ecdc1cc0d054d5aedc18618783cf8e8c0ff | from flask import *
river_app = Flask(__name__)
import mlab
from river import river
mlab.connect()
@river_app.route("/")
def all_rivers_in_africa_continent():
all_rivers = river.objects(continent = "Africa")
return render_template("ex-8.html", all_rivers = all_rivers)
@river_app.route("/ex-9")
def all_rivers_in_south_america_continent_length_lt_1000():
all_rivers = river.objects(continent = "S. America", length__lt = 1000)
return render_template("ex-9.html", all_rivers = all_rivers)
if __name__ == '__main__':
river_app.run(debug=True)
|
22,348 | 84a5780a93922d90dd2ab6859a3b0fce404851e1 | import threading
class Completer(object):
def __init__(self, editor, translation_unit_accessor, complete_flags):
self._editor = editor
self._translation_unit_accessor = translation_unit_accessor
self._complete_flags = complete_flags
def format_results(self, result):
completion = dict()
return_value = None
abbr = ""
args_pos = []
cur_pos = 0
word = ""
for chunk in result.string:
if chunk.isKindInformative():
continue
if chunk.isKindResultType():
return_value = chunk
continue
chunk_spelling = chunk.spelling
if chunk.isKindTypedText():
abbr = chunk_spelling
chunk_len = len(chunk_spelling)
if chunk.isKindPlaceHolder():
args_pos += [[cur_pos, cur_pos + chunk_len]]
cur_pos += chunk_len
word += chunk_spelling
menu = word
if return_value:
menu = return_value.spelling + " " + menu
completion['word'] = word
completion['abbr'] = abbr
completion['menu'] = menu
completion['info'] = word
completion['args_pos'] = args_pos
completion['dup'] = 1
# Replace the number that represents a specific kind with a better
# textual representation.
completion['kind'] = kinds[result.cursorKind]
return completion
def get_current_completions(self, base):
sorting = self._editor.sort_algorithm()
thread = CompleteThread(self._editor,
self._translation_unit_accessor,
self._complete_flags,
self._editor.current_line(),
self._editor.current_column())
thread.start()
while thread.is_alive():
thread.join(0.01)
if self._editor.abort_requested():
return []
completionResult = thread.result
if completionResult is None:
return []
results = completionResult.results
if base != "":
results = filter(lambda x: self.get_abbr(x.string).startswith(base), results)
if sorting == 'priority':
get_priority = lambda x: x.string.priority
key = get_priority
results = sorted(results, None, key)
if sorting == 'alpha':
get_abbreviation = lambda x: self.get_abbr(x.string).lower()
key = get_abbreviation
results = sorted(results, None, key)
return map(self.format_results, results)
def get_abbr(self, strings):
for chunks in strings:
if chunks.isKindTypedText():
return chunks.spelling
return ""
class CompleteThread(threading.Thread):
lock = threading.Lock()
def __init__(self, editor, translation_unit_accessor, complete_flags, line, column):
threading.Thread.__init__(self)
self._editor = editor
self._complete_flags = complete_flags
self._line = line
self._column = column
self._translation_unit_accessor = translation_unit_accessor
self._current_file = editor.current_file()
self._file_name = editor.file_name()
self.result = None
def run(self):
try:
CompleteThread.lock.acquire()
self.result = self.get_current_completion_results(
self._line, self._column)
except Exception, e:
self._editor.display_message("Exception thrown in completion thread: " + str(e))
finally:
CompleteThread.lock.release()
def get_current_completion_results(self, line, column):
def _do_it(translation_unit):
return translation_unit.codeComplete(
self._file_name, line, column, [self._current_file], self._complete_flags)
return self._translation_unit_accessor.translation_unit_do(self._current_file, _do_it)
kinds = dict({
# Declarations
1: 't', # CXCursor_UnexposedDecl (A declaration whose specific kind is not
# exposed via this interface)
2: 't', # CXCursor_StructDecl (A C or C++ struct)
3: 't', # CXCursor_UnionDecl (A C or C++ union)
4: 't', # CXCursor_ClassDecl (A C++ class)
5: 't', # CXCursor_EnumDecl (An enumeration)
6: 'm', # CXCursor_FieldDecl (A field (in C) or non-static data member
# (in C++) in a struct, union, or C++ class)
7: 'e', # CXCursor_EnumConstantDecl (An enumerator constant)
8: 'f', # CXCursor_FunctionDecl (A function)
9: 'v', # CXCursor_VarDecl (A variable)
10: 'a', # CXCursor_ParmDecl (A function or method parameter)
11: '11', # CXCursor_ObjCInterfaceDecl (An Objective-C @interface)
12: '12', # CXCursor_ObjCCategoryDecl (An Objective-C @interface for a
# category)
13: '13', # CXCursor_ObjCProtocolDecl (An Objective-C @protocol declaration)
14: '14', # CXCursor_ObjCPropertyDecl (An Objective-C @property declaration)
15: '15', # CXCursor_ObjCIvarDecl (An Objective-C instance variable)
16: '16', # CXCursor_ObjCInstanceMethodDecl (An Objective-C instance method)
17: '17', # CXCursor_ObjCClassMethodDecl (An Objective-C class method)
18: '18', # CXCursor_ObjCImplementationDec (An Objective-C @implementation)
19: '19', # CXCursor_ObjCCategoryImplDecll (An Objective-C @implementation
# for a category)
20: 't', # CXCursor_TypedefDecl (A typedef)
21: 'f', # CXCursor_CXXMethod (A C++ class method)
22: 'n', # CXCursor_Namespace (A C++ namespace)
23: '23', # CXCursor_LinkageSpec (A linkage specification, e.g. 'extern "C"')
24: '+', # CXCursor_Constructor (A C++ constructor)
25: '~', # CXCursor_Destructor (A C++ destructor)
26: '26', # CXCursor_ConversionFunction (A C++ conversion function)
27: 'a', # CXCursor_TemplateTypeParameter (A C++ template type parameter)
28: 'a', # CXCursor_NonTypeTemplateParameter (A C++ non-type template
# parameter)
29: 'a', # CXCursor_TemplateTemplateParameter (A C++ template template
# parameter)
30: 'f', # CXCursor_FunctionTemplate (A C++ function template)
31: 'p', # CXCursor_ClassTemplate (A C++ class template)
32: '32', # CXCursor_ClassTemplatePartialSpecialization (A C++ class template
# partial specialization)
33: 'n', # CXCursor_NamespaceAlias (A C++ namespace alias declaration)
34: '34', # CXCursor_UsingDirective (A C++ using directive)
35: '35', # CXCursor_UsingDeclaration (A using declaration)
\
# References
40: '40', # CXCursor_ObjCSuperClassRef
41: '41', # CXCursor_ObjCProtocolRef
42: '42', # CXCursor_ObjCClassRef
43: '43', # CXCursor_TypeRef
44: '44', # CXCursor_CXXBaseSpecifier
45: '45', # CXCursor_TemplateRef (A reference to a class template, function
# template, template template parameter, or class template partial
# specialization)
46: '46', # CXCursor_NamespaceRef (A reference to a namespace or namespace
# alias)
47: '47', # CXCursor_MemberRef (A reference to a member of a struct, union,
# or class that occurs in some non-expression context, e.g., a
# designated initializer)
48: '48', # CXCursor_LabelRef (A reference to a labeled statement)
49: '49', # CXCursor_OverloadedDeclRef (A reference to a set of overloaded
# functions or function templates that has not yet been resolved to
# a specific function or function template)
\
# Error conditions
#70 : '70', # CXCursor_FirstInvalid
70: '70', # CXCursor_InvalidFile
71: '71', # CXCursor_NoDeclFound
72: 'u', # CXCursor_NotImplemented
73: '73', # CXCursor_InvalidCode
\
# Expressions
100: '100', # CXCursor_UnexposedExpr (An expression whose specific kind is
# not exposed via this interface)
101: '101', # CXCursor_DeclRefExpr (An expression that refers to some value
# declaration, such as a function, varible, or enumerator)
102: '102', # CXCursor_MemberRefExpr (An expression that refers to a member
# of a struct, union, class, Objective-C class, etc)
103: '103', # CXCursor_CallExpr (An expression that calls a function)
104: '104', # CXCursor_ObjCMessageExpr (An expression that sends a message
# to an Objective-C object or class)
105: '105', # CXCursor_BlockExpr (An expression that represents a block
# literal)
\
# Statements
200: '200', # CXCursor_UnexposedStmt (A statement whose specific kind is not
# exposed via this interface)
201: '201', # CXCursor_LabelStmt (A labelled statement in a function)
\
# Translation unit
300: '300', # CXCursor_TranslationUnit (Cursor that represents the
# translation unit itself)
\
# Attributes
400: '400', # CXCursor_UnexposedAttr (An attribute whose specific kind is
# not exposed via this interface)
401: '401', # CXCursor_IBActionAttr
402: '402', # CXCursor_IBOutletAttr
403: '403', # CXCursor_IBOutletCollectionAttr
\
# Preprocessing
500: '500', # CXCursor_PreprocessingDirective
501: 'd', # CXCursor_MacroDefinition
502: '502', # CXCursor_MacroInstantiation
503: '503' # CXCursor_InclusionDirective
})
|
22,349 | a7c49003c7492d2ecd10e24fab8ff6b5d53874e8 | import hashlib
import pickle
def encode(str, code='utf-8'):
'''
encode string into bytes by utf-8
'''
return str.encode(code)
def decode(str, code='utf-8'):
'''
decode bytes into string by utf-8
'''
return str.decode(code)
def sum256_byte(*args) -> bytes:
'''
hash function
input type: bytes
output type: bytes
'''
h = hashlib.sha256()
for arg in args: # arg needs to be encoded
h.update(arg)
return h.digest()
def sum256_hex(*args) -> str:
'''
hash function
input type: bytes
output type: hex string
'''
h = hashlib.sha256()
for arg in args: # arg needs to be encoded
h.update(arg)
return h.hexdigest()
def serialize(obj):
return pickle.dumps(obj)
def deserialize(obj):
return pickle.loads(obj)
class ContinueIt(Exception):
pass |
22,350 | c02ca66b6810fdfc3f2862388d37972537d40fd0 | from sklearn.metrics import brier_score_loss
import numpy as np
def var_brier_score(y_test, y_proba):
val = np.mean(y_proba**4)
val -= 4*np.mean(y_proba**3 * y_test)
val += 6*np.mean(y_proba**2 * y_test)
val -= 4*np.mean(y_proba * y_test)
val += np.mean(y_test)
val -= brier_score_loss(y_test, y_proba)**2
return val
def var_auc(auc, num_pos, num_neg):
prop = num_pos/(num_pos + num_neg)
n = num_pos + num_neg
se = np.sqrt(auc*(1-auc)*(1 + (n/2 - 1)*(1 - auc)/(2- auc) + (n/2 -1)*auc/(1+auc))/(n**2*prop*(1-prop)))
return se**2
def var_nb(sens, spec, prop, n, th=0.5):
w = (1-prop)/prop*th/(1-th)
return 1/n * ( sens*(1-sens)/prop + w**2 * spec*(1-spec)/(1-prop) + w**2 * (1-spec)**2/(prop*(1-prop)))
|
22,351 | 1b052325cb25ed35e8907f15e58d6c352c3c132f | from django.contrib import admin
from django.contrib.auth.admin import UserAdmin
from .models import MyUsers, Allergen, Diet, FoodList, Favorites
admin.site.register(Allergen)
admin.site.register(Diet)
admin.site.register(FoodList)
admin.site.register(Favorites)
admin.site.register(MyUsers, UserAdmin)
|
22,352 | 17baa60c25f7faffb3033fb3e855024e363a1e6d | # Bir sayının basamaklarındaki tüm rakamların ayrı ayrı küplerinin
# toplamı kendisine eşitse bu sayıya Armstrong sayı denir...
num = input('Enter a positive integer number to see if it is an Armstrong Number :')
result = 0
if (not (num.isdigit())) or int(num) < 0:
print('It is an invalid entry. Don\'t use non-numeric, float, or negative values!')
else:
for i in range(len(num)):
result += int(num[i]) ** len(num)
if result == int(num):
print(f'{num} is an Armstrong Number')
else:
print(f'{num} is not an Armstrong Number') |
22,353 | e8ea9eef32393e55182e5fc3df3161c89a8e2d9b | '''
@description:# 数据库查询程序,数据已经通过处理并放在了服务器中的mysql数据库中,四个字段分别表示的是id,颜色,车型和车辆id
@time:2019年11月30日20:01:39
@author:西安交通大学软件学院场景检索与检测组
'''
import pymysql
# 测试是否可以连接服务器
def search_version():
db = pymysql.connect("139.199.193.78", "root", "123456", "vehicle")
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
# 使用 execute() 方法执行 SQL 查询
cursor.execute("SELECT VERSION()")
# 使用 fetchone() 方法获取单条数据.
data = cursor.fetchone()
print("Database version : %s " % data)
# 关闭数据库连接
db.close()
# 通过预测ID查询图片路径,返回路径
def get_paths_by_id(vid):
db = pymysql.connect("139.199.193.78", "root", "123456", "vehicle")
# 使用 cursor() 方法创建一个游标对象 cursor
cursor = db.cursor()
sql = "select pid from vehicle.vehicle_info where vehicle_id = %d" % (vid)
paths = []
try:
# 执行SQL语句
cursor.execute(sql)
# 获取所有记录列表
results = cursor.fetchall()
for resultx in results:
# print(resultx)
result = resultx[0]
result = str(result)
length = len(result)
# print(length)
for i in range(7-length):
result = '0' + result
result = result + '.jpg'
paths.append(result)
except:
print("Error: unable to fetch data")
return paths
if __name__ == '__main__':
results = get_paths_by_id(5049)
print(results)
# print(len(str(7888)))
|
22,354 | 45aa86cb6b3403e1b8d8c0f52633649e305e1096 | #!/usr/bin/env python
"""Shear analysis."""
import sys
from clusters import main
sys.exit(main.shear())
|
22,355 | caaec4282c2330b316b3821b7f5ddd5ef9047365 | #!/usr/bin/env python3
# Copyright 2009-2017 BHG http://bw.org/
r = 228
g = 00
b = 80
z = x >> y
print(f'(hex) x is {x:02x}, y is {y:02x}, z is {z:02x}')
print(f'(bin) x is {x:08b}, y is {y:08b}, z is {z:08b}')
|
22,356 | c3d3230410ce22727ce43c85246ea9173128d550 | from .coreference import Coreference
from .dependency import Dependency
from .document import Document
from .mention import Mention
from .sentence import Sentence
from .token import Token
__all__ = \
['Coreference', 'Dependency', 'Document', 'Mention', 'Token', 'Sentence']
|
22,357 | 9db61b31556af9857236659e06abd66eec060713 | from sys import argv
script, filename=argv
txt = open(filename)
print ("your file is: %s" % filename)
print (txt.read())
file_again = input("> ")
txt_again = open(file_again)
print (txt_again.read())
|
22,358 | 5575bef19f991615158a477feb160bd568460c95 | # modules
import os
import json
import pandas as pd
# # the json file to parse
# names = ['Trello_08042020.json', 'NAFC_task_08042020.json', 'CV_08042020.json']
names = ['Trello_08042020.json']
sv_names = ['Expectations_08042020']
file_extension = '.txt' # the file extension you'd like to use
end_dir = '/home/manuel/trello-json-parser/data/' # dir to store your cards
year_limit = 2018
for ind_name, name in enumerate(names):
print(name)
json_file = '/home/manuel/trello-json-parser/data/'+name
# open json
with open(json_file) as data_file:
data = json.load(data_file)
# variables
cards = data["cards"]
total_cards = len(data["cards"])
written_cards = 0
# list of strings
# Calling DataFrame constructor on list
df_tmp = {'Name': [], 'Description': [], 'Assignee': [],
'Follower': [], 'Due': [], 'date': [], 'Start date': [],
'Section/Column': [], 'Notes': [], 'Completed At': [], 'Tags': []}
# relevant_projects = ['paper/book', 'RNN project', 'students', 'DMS',
# 'Expectations', 'metaWork', 'NeuroGym']
# loop
disc_sects = ['565febb3fabcb515abac1dfd']
sel_lbl = ['Expectations']
for card in cards:
year = float(card["dateLastActivity"][:4])
# process labels
labels = ''
lbl_in = False
for item in card['labels']:
labels += item['name'] + ','
if item['name'] in sel_lbl:
lbl_in = True
process_card =\
year >= year_limit and card['idList'] not in disc_sects and lbl_in
if process_card:
# NAME
df_tmp['Name'].append(card["name"])
# DESCRIPTION
df_tmp['Description'].append(card["desc"])
# ASSIGNEE
df_tmp['Assignee'].append('')
# FOLLOWERS
df_tmp['Follower'].append('')
# DUE DATE
if card["due"] is None:
due = ''
else:
due = card['due'][:10]
df_tmp['Due'].append(due)
df_tmp['date'].append(card["dateLastActivity"][:10])
df_tmp['Start date'].append(card["dateLastActivity"][:10])
# NOTES (add trello url)
df_tmp['Notes'].append(card['shortUrl'])
# WHEN WAS COMPLETED (not processed by asana)
if card['closed']:
# df_tmp['Completed At'].append(card["dateLastActivity"][:10])
df_tmp['Completed At'].append('')
else:
df_tmp['Completed At'].append('')
written_cards += 1
# TAGS (not processed by asana)
df_tmp['Tags'].append(labels[:-1])
# ASSIGN TO SPECIFIC COLUMN
if card['closed']:
df_tmp['Section/Column'].append('Done')
elif card['idList'] == '5703e6e28a257311cab6c1f2':
df_tmp['Section/Column'].append('Papers/books')
elif card['idList'] == '5d9db08e1c943d86fc9174f7':
df_tmp['Section/Column'].append('Useful stuff')
elif card['idList'] == '565374092595fae22d8a1832':
df_tmp['Section/Column'].append('To-do')
elif card['idList'] == '5653740e7b1cf15997807b3f':
df_tmp['Section/Column'].append('Doing')
elif card['idList'] == '5653741a546f5787b821970b':
df_tmp['Section/Column'].append('Ideas')
else:
df_tmp['Section/Column'].append(str(card["idList"]))
if card['name'] == 'NY':
print(card.keys())
print(card["idList"])
# message
df = pd.DataFrame(df_tmp)
df.to_csv(end_dir+'csv_file_'+sv_names[ind_name]+'.csv', index=False)
print(str(written_cards) + " out of " + str(total_cards) +
" written successfully! :)")
print("======")
|
22,359 | db3501ebf7ab974b4f3a496bf62e9c4e779c633d | /usr/bin/env python
import redis, time, socket, argparse, os
zabbix_conf = "/etc/zabbix/zabbix_agentd.conf" # (optional) Zabbix configuration file
redis_ip = '127.0.0.1'
redis_port = 6379
parser = argparse.ArgumentParser(description='Zabbix Redis status script')
parser.add_argument('-a','--auth',dest='redis_pass',action='store',help='Redis server pass',default=None)
parser.add_argument('-i', '--interval', required=True, type=int, help='Refresh interval in secs.')
args = parser.parse_args()
def main():
while args.interval:
client = redis.StrictRedis(host=redis_ip, port=redis_port, password=args.redis_pass)
server_info = client.info()
stats = []
for i in server_info:
stats.append("- redis[{0}] {1}".format(i, server_info[i]))
llensum = 0
for key in client.scan_iter('*'):
if client.type(key) == 'list':
llensum += client.llen(key)
stats.append("- redis[llenall] {}".format(llensum))
# Send stats to zabbix
hostname = socket.gethostname()
stdin,stdout = os.popen2("zabbix_sender -s {0} -c {1} -i -".format(hostname, zabbix_conf))
stdin.write('\n'.join(stats))
stdin.close()
stdout.close()
time.sleep(args.interval)
if __name__ == '__main__':
main()
|
22,360 | 5b415397545cb4f199fc16c81f6103080c173b60 | import array
import os
import pickle
import json
from os import listdir
import random
import numpy as np
import pandas as pd
from keras.models import load_model
from learner.evolution.sascorer import calculateScore
from learner.models import coeff_determination
from learner.seq2seq.sampler import latent_to_smiles
from rdkit import Chem
from rdkit.Chem import Descriptors, AllChem
from rdkit.DataStructs.cDataStructs import TanimotoSimilarity
from sklearn.externals import joblib
import matplotlib.pyplot as plt
import seaborn
from deap import algorithms, base, benchmarks, tools, creator
from eva import uniform, get_models
df = pd.read_csv('C:\PycharmProjects\ml.services\Source\sds_tools\learner\evolution\can2enum_sol.csv',)
df = df.iloc[:,3:259]
counter = 0
non_zero_index = []
non_zero_columns = []
flag = True
for column in df:
flag = True
for value in df[column].values:
if value != 0.0:
flag = False
if flag is False:
non_zero_index.append(counter)
non_zero_columns.append(column)
counter+=1
print(len(non_zero_index))
df = df[non_zero_columns]
BOUND_UP = list(df.max())
BOUND_LOW = list(df.min())
stds = list(df.std(axis=0))
means = list(df.mean(axis=0))
df = df.iloc[200:301]
df.to_json(orient='values',path_or_buf='starters.json')
BOUND_UP = [x+0.0000000001 for x in BOUND_UP]
BOUND_LOW = [x for x in BOUND_LOW]
smi_to_lat_model = load_model('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\smi2lat.h5')
latent_to_states_model = load_model('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\lat2state.h5')
sample_model = load_model('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\samplemodel.h5')
charset = pickle.load(open('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\charset.obj','rb'))
char_to_int = pickle.load(open('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\char_to_int.obj','rb'))
smiles_len = pickle.load(open('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\smiles_len.obj','rb'))
int_to_char = pickle.load(open('C:\PycharmProjects\ml.services\Source\sds_tools\learner\seq2seq\CAN2ENUM\int_to_char.obj','rb'))
print(len(non_zero_index))
def problem_drug_likeness(individual):
final_vector = [0.0 for x in range(256)]
individual_latent_vector = [x for x in individual]
counter = 0
for i in range(256):
if i in non_zero_index:
final_vector[i] = individual_latent_vector[counter]
counter += 1
final_vector = np.reshape(final_vector,(1, 256))
smiles = latent_to_smiles(
charset,smiles_len,char_to_int,int_to_char,
latent_to_states_model,sample_model,final_vector,
type='2_layers'
)
molecule = Chem.MolFromSmiles(smiles)
if molecule:
try:
logP = Descriptors.MolLogP(molecule)
logP_score = (1.575 - logP)**2
SA_score = calculateScore(molecule)
print(Chem.MolToSmiles(molecule))
bad_drug = logP_score + SA_score
mol_fp = AllChem.GetMorganFingerprintAsBitVect(molecule, 2)
ref = AllChem.GetMorganFingerprintAsBitVect(Chem.MolFromSmiles('c1ccccc1'),2)
dissimilarity_to_ref = (1 - TanimotoSimilarity(mol_fp,ref))
print((bad_drug,dissimilarity_to_ref))
return bad_drug, dissimilarity_to_ref
except:
return 9999,9999
else:
return 9999,9999
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
plt.rcParams['text.latex.preamble'] ='\\usepackage{libertine}\n\\usepackage[utf8]{inputenc}'
seaborn.set(style='whitegrid')
seaborn.set_context('notebook')
creator.create("FitnessMin", base.Fitness, weights=(-0.001,-1.0))
creator.create("Individual", list, fitness=creator.FitnessMin)
def initIndividual(icls, content):
return icls(content)
def initPopulation(pcls, ind_init, filename):
with open(filename, "r") as pop_file:
contents = json.load(pop_file)
return pcls(ind_init(c) for c in contents)
toolbox = base.Toolbox()
NDIM = len(non_zero_index)
toolbox.register("attr_float", uniform, BOUND_LOW, BOUND_UP, NDIM)
toolbox.register("individual", tools.initIterate, creator.Individual, toolbox.attr_float)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("individual_guess", initIndividual, creator.Individual)
toolbox.register("population_guess", initPopulation, list, toolbox.individual_guess, "starters.json")
toolbox.register("evaluate", problem_drug_likeness)
toolbox.register("mate", tools.cxSimulatedBinaryBounded, low=BOUND_LOW, up=BOUND_UP, eta=10)
toolbox.register("mutate", tools.mutPolynomialBounded, low=BOUND_LOW, up=BOUND_UP, eta=10, indpb=0.01)
toolbox.register("select", tools.selNSGA2)
toolbox.pop_size = 250
mu = 100
toolbox.max_gen = 1000
toolbox.mut_prob = 0.5
toolbox.cross_prob = 0.5
def valid(individual):
if 9999 in individual.fitness.values:
return False
else:
return True
def run_ea(toolbox, stats=None, verbose=False):
final_pop = []
while len(final_pop) < toolbox.pop_size:
pop = toolbox.population(toolbox.pop_size - len(final_pop))
# Evaluate the entire population
fitnesses = list(map(toolbox.evaluate, pop))
for ind, fit in zip(pop, fitnesses):
ind.fitness.values = fit
pop = list(filter(valid, pop))
final_pop.extend(pop)
print('#############################################################' + str(len(final_pop)))
pop = toolbox.select(final_pop, len(final_pop))
return algorithms.eaMuPlusLambda(pop, toolbox, mu=mu,
lambda_=toolbox.pop_size,
cxpb=toolbox.cross_prob,
mutpb=toolbox.mut_prob,
stats=stats,
ngen=toolbox.max_gen,
verbose=verbose)
res,_ = run_ea(toolbox)
fronts = tools.emo.sortLogNondominated(res, len(res))
df = pd.DataFrame(fronts)
df.to_csv('results.csv')
|
22,361 | e90ff5ffb265655b8be1945ed6adcb60a8a30427 | """
You are required to write a program to sort the (name, age, height) tuples by ascending order where name is string, age
and height are numbers. The tuples are input by console. The sort criteria is:
1: Sort based on name
2: Then sort based on age
3: Then sort by score
The priority is that name > age > score
If the following tuples are given as input to the program:
Tom,19,80
John,20,90
Jony,17,91
Jony,17,93
Json,21,85
Then, the output of the program should be:
[('John', '20', '90'), ('Jony', '17', '91'), ('Jony', '17', '93'), ('Json', '21', '85'), ('Tom', '19', '80')]
"""
from operator import itemgetter
output = []
user_in = raw_input("Input details as name,age,score :")
while True:
str_in = raw_input("Input details as name,age,score or blank: ")
if not str_in:
break
output.append(tuple(str_in.split(",")))
print sorted(output, key=itemgetter(0,1,2))
|
22,362 | df3b9f7ff99b664d4519525920d26287ccdda02b | import discord
import json
from discord.ext import commands, tasks
import random
import datetime
def get_prefix(client, message):
with open('./json/prefixes.json', 'r') as f:
prefixes = json.load(f)
return prefixes[str(message.guild.id)]
bot = commands.Bot(command_prefix=">", case_insensitive=True)
Bot = discord.client
client = bot
client.remove_command('help')
class Help(commands.Cog):
def __init__(self, bot, *args, **kwargs):
self.bot = bot
# Events
@commands.Cog.listener()
async def on_ready(self):
print('Help Cog is on')
# HELP COMMANDS
@commands.command()
async def help(self, ctx):
value = random.randint(0, 0xffffff)
embed = discord.Embed(
colour=value,
timestamp=datetime.datetime.utcnow()
)
embed.set_author(name="Commands")
embed.add_field(name="HelpF", value="Gives all the entertainment commands.", inline=False)
embed.add_field(name="HelpM", value="Gives all the moderation commands.", inline=False)
embed.add_field(name="Miscellaneous", value="Other commands. (also can use 'Misc'", inline=False)
embed.set_footer(text=f"Just helped{ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed)
@commands.command(aliases=['helpf','help_fun','help1'])
async def helpfun(self, ctx):
value1 = random.randint(0, 0xffffff)
embed1 = discord.Embed(
colour=value1,
timestamp=datetime.datetime.utcnow()
)
embed1.set_author(name="Fun Commands")
embed1.add_field(name="Coinflip", value="Heads or Tails", inline=False)
embed1.add_field(name="8ball", value="Ask a question and the bot will give an answer", inline=False)
embed1.add_field(name="WhoIs", value="Will give a certain user's info.", inline=False)
embed1.add_field(name="Avatar", value="Shows the selected user's profile picture.", inline=False)
embed1.add_field(name="Dm", value="Dm someone with the bot", inline=False)
embed1.add_field(name="Meme", value="Get a Meme from Reddit", inline=False)
embed1.set_footer(text=f"Just helped{ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed1)
@commands.command(aliases=['helpmod','helpM','helpstaff','help_staff'])
async def helpmoderation(self, ctx):
value2 = random.randint(0, 0xffffff)
embed2 = discord.Embed(
colour=value2,
timestamp=datetime.datetime.utcnow()
)
embed2.set_author(name="Moderation Commands")
embed2.add_field(name="Ban", value="Bans Users", inline=False)
embed2.add_field(name="Unban", value="Unbans Users", inline=False)
embed2.add_field(name="Kick", value="Kicks Users", inline=False)
embed2.add_field(name="Mute", value="Mutes Users", inline=False)
embed2.add_field(name="Unmute", value="Unmutes muted users", inline=False)
embed2.add_field(name="Clear", value="Clears messages (clearall deletes all the messages from the channel)", inline=False)
embed2.add_field(name="Warn", value="Warns users", inline=False)
embed2.set_footer(text=f"Just helped{ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=embed2)
@commands.command(aliases=['Miscellaneous'])
async def Misc(self, ctx):
value = random.randint(0, 0xffffff)
misc = discord.Embed(
colour=value,
timestamp=datetime.datetime.utcnow()
)
misc.set_author(name="Miscellaneous Commands")
misc.add_field(name="Stats", value="Bot Stats", inline=False)
misc.add_field(name="MemberCount", value="Number of members in the server", inline=False)
misc.set_footer(text=f"Just helped{ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=crypto)
def setup(bot):
bot.add_cog(Help(bot))
print('Help Loaded')
|
22,363 | a62b949bdc0e8824de275c20c72a54f21f6f1d8d | import six
if six.PY3:
from .trader import Trader
else:
from .trader import Trader
from . import endpoints
from . import crypto_endpoints
|
22,364 | faf9a53eff9de9118220c3f805a33b124c6d114f | import random
import civilization
import star
civnum = 10
starnum = star.starnum
civiposlist = random.sample(list(range(star.starnum)), civnum)
civs = {} # {civ_id: civ},星系序号civiposlist[id],位置是 star.maplist[civiposlist[id]]['pos']
stafflist = [] # ('attack',self_id,target_id,self.state)
def move():
print('move')
for staff in stafflist:
if staff in stafflist:
print('staff:', staff)
stafflist.remove(staff)
event(staff[0], staff[1], staff[2], staff[3:])
def event(movement, id_start, id_part, start_state=None):
print('event:', (movement, id_start, id_part, start_state, civs.keys()))
if movement == 'attack':
# print('attack')
if start_state[0] > 0.7 * civs[id_part].state:
# print('attack_success')
civs[id_part].live = 0
civs[id_part].move()
civs.pop(id_part)
pass
pass
elif movement == 'trap' or 'communicate':
civs[id_part].reply(movement, id_start)
elif movement == 're_trap':
print('re_trapped')
for stars in civs[id_start].stars_landed:
civs[id_part].stars_not_seen.remove(stars)
civs[id_part].staff.append(['attack', civiposlist[id_start],
star.distance(id_part, civiposlist[id_start])])
elif movement == 're_communicate':
civs[id_start].speed += civs[id_part].speed / 40
civs[id_part].speed += civs[id_start].speed / 40
civs[id_part].state += civs[id_start].state / 5
civs[id_start].state += civs[id_part].state / 5
civs[id_start].civ_with_communication.append(id_part)
civs[id_part].civ_with_communication.append(id_start)
for stars in civs[id_start].stars_landed:
civs[id_part].stars_not_seen.remove(stars)
for stars in civs[id_part].stars_landed:
civs[id_start].stars_not_seen.remove(stars)
else:
print('movement_ERROR:',movement)
pass
def dist(id_start, id_part):
return star.distance((random.sample(civs[id_part].stars_landed, 1)[-1]),
(random.sample(civs[id_start].stars_landed, 1))[-1])
for i in range(civnum):
civ_i = civilization.civ(i, civiposlist[i])
civs.update({i: civ_i})
pass
|
22,365 | 06a4beccb447e5f903073407f883e3db9990c4db | #!/usr/bin/python3
"""Script for linux to make and scale scans. Relies on scan_core, which
in turn uses scanimage (SANE) for the scanning and ImageMagick for the
image manipulation."""
#========================================================
# Copyright Jody M Sankey 2010 - 2018
#========================================================
# AppliesTo: linux
# AppliesTo: client
# RemoveExtension: True
# PublicPermissions: True
#========================================================
import os
import re
import sys
import scan_core
SCAN_PATH = os.path.expanduser('~/tmp/scan')
def print_option_set(option_set, leader):
"""Print a line for each option in the set, prefixed with leader"""
for option in option_set:
labels = ",".join(option['labels'])
option_set = leader + labels + " "*(20-len(labels)) + "- " + option['description']
print(option_set)
def print_usage():
"""Print standard help string then quit"""
leader = " "
print("\n Usage: scanning [-v|-c|-k=N] SOURCE PAPER SCALE COLOR [basename]\n")
print(" SOURCE Paper source:")
print_option_set(scan_core.SOURCES, leader)
print(" PAPER Paper size:")
print_option_set(scan_core.PAPERS, leader)
print(" SCALE Scaling factor:")
print_option_set(scan_core.SCALES, leader)
print(" COLOR Colour mode:")
print_option_set(scan_core.COLORS, leader)
print(" basename Desired base filename, optionally including path")
print(" -v View each scan when conversion is complete")
print(" -c Confirm each scan before saving in final location")
print(" -d Print the scanning a conversion commands used for debugging")
print(" -k=N Do not convert page N of scan\n")
print("SCANNING Script (c)2010 Jody Sankey")
version = sys.version_info
print("Currently running in Python v{}.{}.{}\n".format(*version))
sys.exit()
def die(print_string):
"""Prints the specified string then exits with code 1"""
print(print_string)
sys.exit(1)
def main():
"""Run the scanning function using parameters from the command line."""
if len(sys.argv) < 2:
print_usage()
args = sys.argv[1:]
#Declare and initialize the variables controlled by switch
check = False
view = False
debug = False
kills = []
#Eat any switches from the front
while args and args[0].startswith('-'):
arg = args.pop(0).lower()
print("eating " + arg)
mko = re.search(r"-k=([1-9]+)$", arg)
if mko is not None:
kills.append(int(mko.groups()[0]))
elif arg == '-c':
check = True
elif arg == '-v':
view = True
elif arg == '-d':
debug = True
elif arg == '--help':
print_usage()
else:
die("ERROR: Switch '{}' not recognized".format(arg))
# Do we have enough parameters left?
if len(args) not in range(4, 6):
print(args)
die("ERROR: Wrong number of parameters supplied")
dest = os.path.join(SCAN_PATH, args[4]) if len(args) == 5 else None
scan_core.perform_scan(dest, args[0], args[1], args[2], args[3],
view=view, check=check, kills=kills, debug=debug)
if __name__ == '__main__':
main()
|
22,366 | 01d378fdbdb7b7217983dbc5c68419a43b46cbf0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This is the main file of the engine of the game.
# In here, the commands are received and the response are sent back
# to whoever sent the command.
from board import Board
class Game:
'''
This is the main function of the game.
Here is where the magic happens.
Definitions:
- Player 0 = Player X
- Player 1 = Player O
- Upper-case X and O !!
'''
def __init__(self, player_id, starting_player, small_boards, big_board, pAI):
'''
Constructor of Game class.
[player_id] is the id number of the bot.
[starting_player] is the id of the player that will start the game.
[player_id] must be 0 or 1.
[starting_player] must be 0 or 1.
'''
self.small_boards = small_boards
self.big_board = big_board
self.bot_id = player_id
self.AI = pAI
def move(self, last_move):
if self.check_state() >= 0:
return [-1, -1, -1, -1]
# Making sure the move made by the other player is updated in this instance.
if last_move[0] != -1:
if self.bot_id == 0:
self.small_boards[last_move[0]][last_move[1]].fill(last_move[2],
last_move[3], 'O')
else:
self.small_boards[last_move[0]][last_move[1]].fill(last_move[2],
last_move[3], 'X')
new_state = self.small_boards[last_move[0]][last_move[1]].check_state()
if new_state == 0:
self.big_board.fill(last_move[0], last_move[1], 'X')
elif new_state == 1:
self.big_board.fill(last_move[0], last_move[1], 'O')
return self.AI.move(self.big_board, self.small_boards,
self.bot_id, last_move[2],
last_move[3])
def check_state(self):
'''
This function returns an integer value.
0 if player 0 has won the game,
1 if player 1 has won the game,
2 if it ended in a draw,
-1 if the game has not ended yet.
'''
state = self.big_board.check_state()
if state != -1:
return state
for i in range(3):
for j in range(3):
small_state = self.small_boards[i][j].check_state()
if small_state == -1 or small_state == 2:
if not self.small_boards[i][j].board_fulfilled():
return -1
return 2
|
22,367 | 45c3ce81a649d6c2539114d8f8fb50b4290dbaeb | from __future__ import unicode_literals
from django.db import models
#from django.contrib.auth.models import User
# class PinUser(models.Model):
# user = models.OneToOneField(User, on_delete=models.CASCADE)
# token = models.CharField(max_length=255)
class Pin(models.Model):
id = models.CharField(primary_key=True, max_length=200)
link = models.CharField(max_length=200)
url = models.CharField(max_length=200)
created_at = models.CharField(max_length=200)
note = models.CharField(max_length=200)
color = models.CharField(max_length=20)
image = models.CharField(max_length=200)
title = models.CharField(max_length=200)
''' other more complicated fields are shown in api docs '''
def __str__(self):
return self.url + " " + self.title
class MealDay(models.Model):
date = models.CharField(max_length=20)
def __str__(self):
return self.date
class Meal(models.Model):
url = models.CharField(max_length=200)
image = models.CharField(max_length=200)
title = models.CharField(max_length=200)
meal_day = models.ForeignKey(MealDay, on_delete=models.CASCADE)
def __str__(self):
return self.title + " " + self.meal_day.date
|
22,368 | a978c7955f51e3070e09828a2798b445bf9a0aaa | import pandas as pd
from data_prep import load_data
def category_column_correlation(column, target):
'''
Input: Series - Column to be dummied, Series - Target, undummified
Output: DataFrame - Correlation coefficient matrix dummies of column and target
Non-redundant
'''
column_dummies = pd.get_dummies(column)
target_dummies = pd.get_dummies(target)
correlation_df = pd.concat([column_dummies, target_dummies], axis=1).corr()
return correlation_df.ix[column_dummies.columns, target_dummies.columns]
if __name__ == '__main__':
df = load_data()
|
22,369 | 7b4ad04b5f20ca6b41930f4c8cc60a80bc225d1f | import pandas as pd
from pandas import DataFrame
from pandasql import sqldf, load_meat, load_births
df = DataFrame({'name': ['zf', 'gy', 'a', 'b', 'c'], 'data1': range(5)})
print(df)
pysqldf = lambda sql: sqldf(sql, globals())
sql = "select * from df where name='zf'"
print(pysqldf(sql))
|
22,370 | afab85ea5e9b0f45eb6f35f7059f2a3602294515 | # -*- coding: utf-8 -*-
import re
from odoo import api, models, fields, _
class PurchaseCurrency(models.Model):
_inherit = 'purchase.order'
company_currency_amount = fields.Float(string='Company Currency Total', compute='find_amount')
def find_amount(self):
for this in self:
price = self.env['res.currency']._compute(this.currency_id, this.company_id.currency_id, this.amount_total)
this.company_currency_amount = price
|
22,371 | 0105ba80e6c98b119fd8fe9098ad1dd61e10408f | import unittest
from repl import *
class TestRepl(unittest.TestCase):
def test_simple_code(self) -> None:
code = afc("1 int 2 int +")
assert do_repl("test", code) == 3
|
22,372 | ef5fd0bc4b83d90fad0b5a6df165c9eb533f0100 | import win32com.client as wincl
TH_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech_OneCore\Voices\Tokens\MSTTS_V110_thTH_Pattara"
speaker_number = 1
spk = wincl.Dispatch("SAPI.SpVoice")
vcs = spk.GetVoices()
SVSFlag = 11
print(vcs.Item (speaker_number) .GetAttribute ("Name")) # speaker name ?
spk.Voice
spk.SetVoice(vcs.Item(speaker_number)) # set voice (see Windows Text-to-Speech settings)
spk.Speak("35.5องศา")
spk.Speak("อุณหภูมิเกิน 37.5องศา กรุณาลองใหม่อีกครั้ง")
spk.Speak("กรุณาลองใหม่อีกครั้ง")
spk.Speak("ผ่านครับ")
import pyttsx3
engine = pyttsx3.init()
TH_voice_id = "HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Speech\Voices\Tokens\MSTTS_V110_thTH_Pattara"
engine.setProperty('volume', 1) # Volume 0-1
engine.setProperty('rate', 180) #148
engine.setProperty('voice', TH_voice_id)
engine.say('กรุณาสวมหน้ากากอนามัยด้วยครับ')
engine.runAndWait()
|
22,373 | 0310cfb68ef59fd6f08f2c0fe0c8f7d49fa0df8e | #Random Forest Regression
#Importing libraries
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
#Fitting Random Forest REgression to dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators=100, random_state=0)
regressor.fit(X, y)
#Predicting a new result
y_pred = regressor.predict([[6.5]])
#Visulaizing the regression results
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color='red')
plt.plot(X_grid, regressor.predict(X_grid), color='blue')
plt.title('Random Forest Regression')
plt.xlabel('Position Label')
plt.ylabel('Salary')
plt.show() |
22,374 | f62ff62c3d46e4412a61321cdd8b38812c3e5048 | import sys
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
import random
from openpyxl import Workbook, load_workbook
def outputToExcel(fileName, list1, list2):
book = Workbook()
ws = book.active
for i in range(1,len(list1)+1):
ws['A' + str(i)] = list1[i][0]
ws['B' + str(i)] = list2[i][0]
if i % 10000 == 0:
print(str(i*2) + "%")
book.save(fileName)
def parse(string):
newString = ""
ignore = set('"$()%, ')
for i in string:
if i not in ignore:
newString += i
return newString.split("\t")
if __name__ == "__main__":
# Read file
fileName = "pharmacydata.txt"
fileReader = open(fileName,"r")
lines = fileReader.readlines()
totalSales = []
acq = []
awp = []
randomizedLines = lines[1:]
random.shuffle(randomizedLines)
# Fetch data that fall within certain criteria
for line in randomizedLines:
d,ndc,desc,qty,p,s,c,sales,acqCosts,profit,margin,dawcode,manu,acqUnit,price= parse(line)
sales = float(sales.strip())
acqCosts = float(acqCosts.strip())
margin = float(margin.strip())
price = float(price.strip())
if 400 > margin > 0 and price < 5000:
totalSales.append([sales])
acq.append([acqCosts])
awp.append([price])
# Split data into training data and testing data
x = acq
y = awp
trainingSize = len(lines) // 5
xTrain = x[:trainingSize]
xTest = x[trainingSize:]
yTrain = y[:trainingSize]
yTest = y[trainingSize:]
# Create linear regression model
model = linear_model.LinearRegression()
# Train model
model.fit(xTrain, yTrain)
model.score(xTrain, yTrain)
# Make predictions using testing set
predictions = model.predict(xTest)
print("Coefficients: ", model.coef_)
print("Intercept: ", model.intercept_)
print("Regression Equation: y = %.4fx+%.4f" %(model.coef_, model.intercept_))
print("Mean squared error: %.2f" % mean_squared_error(yTest, predictions))
print('Variance score: %.2f' % r2_score(yTest, predictions))
'''
# Plot outputs
plt.scatter(xTest, yTest, color='black')
plt.plot(xTest, predictions, color='blue', linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
'''
while True:
inputVal = raw_input("Enter acquisition costs to determine AWP or press Enter to exit:")
if len(inputVal) == 0:
break
try:
calculatedAWP = model.coef_ * float(inputVal) + model.intercept_
print("Predicted AWP is : $%.2f" %(calculatedAWP))
except Exception:
print("Error:", Exception)
# outputToExcel(newFile, acq, awp)
# newFile = "results.csv"
# fileWriter = open(newFile,"w")
# for i in range(len(x)):
# toWrite = str(x[i][0]) + "," + str(y[i][0]) + "\n"
# fileWriter.write(toWrite) |
22,375 | e2a74eeab5622d530180b515d2aac1ee83608c1a | from openerp import models, fields, _, api
import logging
import ipdb as pdb
_logger = logging.getLogger(__name__)
class account_voucher(models.Model):
_inherit = 'account.voucher'
@api.model
def create(self, vals):
# pdb.set_trace()
_logger.warning("VOUCHER CREATE: {0}".format(vals))
return super(account_voucher, self).create(vals)
class account_move(models.Model):
_inherit = 'account.move'
@api.model
def create(self, vals):
# pdb.set_trace()
_logger.warning("VOUCHER CREATE: {0}".format(vals))
return super(account_move, self).create(vals)
|
22,376 | e452a8914c9353f38b775ab8a32233c521b31e56 | # -*- coding:utf-8 -*-
# configparser是python自带的模块,用来读取配置文件,用法简单
# 配置文件 test.conf
"""
[section1]
name = tank
age = 28
[section2]
ip = 192.168.1.1
port = 8080
"""
# python代码
import configparser
conf = configparser.ConfigParser()
conf.read(r"C:\Users\PCPC\cloud\S12学习\note\test.conf")
# 获取指定的section,指定的option的值
name = conf.get("section1", "name")
print(name)
age = conf.get("section1", "age")
print(age)
# 获取所有的section
sections = conf.sections()
print(sections)
# 写配置文件
# 更新指定section,option的值
conf.set("section2", "port", "8081")
# 写入指定section,增加option的值
conf.set("section2", "IEPort", "80")
conf.set("section2", "IE", "800")
conf.set("section2", "google", "1080")
# 添加新的section
conf.add_section("new_section")
conf.set("new_section", "new_option", "http://www.baidu.com/")
# 写回配置文件
conf.write(open("C:\\Users\\PCPC\\cloud\\S12学习\\note\\test.conf",'w'))
|
22,377 | 6ce759c24beae94d58f2b74e78d8b525e2d4709d | #!/usr/bin/env python
import sys
import pycurl
class Console(object):
_instance = None
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Console, cls).__new__(cls,
*args, **kwargs)
return cls._instance
def __init__(self):
self.content = ""
def buffer_content(self, buf):
self.content += buf
def print_content(self):
print(self.content)
def curl_setup(word, console):
req = pycurl.Curl()
req.setopt(pycurl.URL,
"dict://dict.org/d:{0}".format(word))
req.setopt(pycurl.CONNECTTIMEOUT, 5)
req.setopt(pycurl.TIMEOUT, 8)
req.setopt(pycurl.FAILONERROR, True)
req.setopt(pycurl.WRITEFUNCTION, console.buffer_content)
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stderr.write("Usage: define [word]")
sys.exit(1)
console = Console()
curl_setup(sys.argv[1], console)
try:
req.perform()
console.print_content()
except pycurl.error, error:
errno, errstr = error
sys.stderr.write("Error {0}: {1}".format(errno, errstr))
|
22,378 | 5a05e3cb0f14f06ff216e140120a6280c4c78c2d | # -*- coding: utf-8 -*-
from odoo import models, fields, api
class CICF(models.Model):
_name = 'philsteel.cicf'
jobsite_image = fields.Binary()
cicf_no = fields.Char(string="CICF. No:")
concern_dept = fields.Char(string='Concerned Department')
complain_date = fields.Date(string='Date')
name = fields.Many2one(
'philsteel.projects', 'Project Name', ondelete='cascade', required='True'
)
client = fields.Char(string='Client')
location = fields.Text(string="Location")
ic_no = fields.Char(string="I.C. No:" )
sc_no = fields.Char(string="S.C. No:")
dept = fields.Many2one(
'philsteel.departmentx', 'Concerned Departments', ondelete='cascade'
)
client_code = fields.Char(string='Customer Code')
#--------REFERENCES---------#
nica_signed_date = fields.Date(string="NICA Date")
jobsite_contacts = fields.Many2one(
'philsteel.sitecontacts', 'Site Contact', ondelete='cascade'
)
designation = fields.Many2one('philsteel.positions', string='Designation', ondelete='cascade')
tel_no = fields.Char(string='Contact Person Tele.No')
date_action_required = fields.Char(string='Date Action Required')
jobsite_sketch = fields.Binary()
#--------particulars---------#
particularss = fields.Text(string="Particular")
#--------signatory---------#
prep_by = fields.Many2one(
'philsteel.contacts', 'Prepared By', ondelete='cascade'
)
prep_date = fields.Date(string='Prepared Date')
endorsed_by = fields.Many2one(
'philsteel.contacts', 'Endorsed By', ondelete='cascade'
)
endorsed_date = fields.Date(string='Endorsed Date')
approved_by = fields.Many2one(
'philsteel.contacts', 'Approved By', ondelete='cascade'
)
approved_date = fields.Date(string='Approved Date')
acknowledge_by = fields.Many2one(
'philsteel.contacts', 'Acknowledged By', ondelete='cascade'
)
acknowledge_date = fields.Date(string='Acknowledged Date')
statuss = fields.Selection([
('draft', 'Draft'),
('solved', 'Solved'),
], string='Status', readonly=True, copy=False, index=True, track_visibility='onchange', default='draft')
@api.multi
def action_approved(self):
for visit in self:
visit.statuss = 'solved'
return True
@api.onchange('name')
def get_proj_details(self):
for recorda in self:
recorda.client = recorda.name.customer_name
recorda.ic_no = recorda.name.ic_no
recorda.sc_no = recorda.name.sc_no
recorda.location = recorda.name.location
|
22,379 | 3fdf65c730790898965d13bc21ea73abe59528bc | from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
bmi = ''
if request.method == 'POST' and 'weight' in request.form:
weight = int(request.form.get('weight'))
height = int(request.form.get('height'))
bmi = round((weight / ((height / 100) ** 2)), 2)
if bmi < 16:
return render_template("bmi_calc.html", bmi=bmi, condition = "You are severly underweight")
elif 16 <= bmi < 18.5:
return render_template("bmi_calc.html", bmi=bmi, condition = "You are Underweight")
elif 18.5 <= bmi <25:
return render_template("bmi_calc.html", bmi=bmi, condition = "You are Normal")
elif 25 <= bmi <30:
return render_template("bmi_calc.html", bmi=bmi, condition = "You are Overweight")
else:
return render_template("bmi_calc.html", bmi=bmi, condition = "You are Obese")
return render_template("bmi_calc.html", bmi=bmi)
if __name__ == '__main__':
app.run() |
22,380 | 500d498ebd1ae1cc5561f333225e088514bc5a11 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 08 00:34:08 2017
@author: micsh
"""
# Import regular expression and BeautifulSoup package
import requests
from bs4 import BeautifulSoup as bsoup
# Request contents from the url
my_wm_username = 'yshao06'
search_url = 'http://publicinterestlegal.org/county-list/'
response = requests.get(search_url, headers={
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36"}).content
# Parse the url content using bsoup and compile all attributes of 'tr'
parsed_url = bsoup(response, 'lxml')
target_rows = parsed_url.find_all('tr')
# Create an empty list to append all the results
result_list = []
# Go into each block of 'tr', which contains 'County', 'State' and 'Registration Rate'
# Create an empty list to compile data from each row
for row in target_rows:
record = []
# In each block, iterate through 'td' to extract value
# The .text.encode function convert unicode into ascii to get rid of the 'u'
# Append the value to the list of row, and further append the row to the master list
for x in row.find_all('td'):
record.append(x.text.encode("ascii", 'ignore'))
# for y in x.find_all('strong'):
# record.append(y.text.encode("ascii", 'ignore'))
result_list.append(record)
# Print out all results as required
print my_wm_username
print len(result_list)
print result_list
|
22,381 | cf7278e7d55798fd0fcb0dbd9fbfbf0fe8b6368e | # -*- coding: utf-8 -*-
# Python 3.6
# YourTurnFourB.py
"""
Author: Kebur Fantahun
Created: Tue Aug 14 22:12:40 2018
Modified: Tue Aug 14 22:12:40 2018
Description: 4B from the book
"""
import numpy as np, matplotlib.pyplot as plt
num_curves = 3
x = np.linspace(0, 1, 51)
y = np.zeros((x.size,num_curves))
for n in range(num_curves):
y[:,n] = np.sin((n+1) * x * 2 * np.pi)
plt.plot(x,y)
# open and close any extra figures with the below code
plt.figure('Jack')
plt.close('Jack')
# legend embellishment
ax = plt.gca()
ax.legend((r"$\sin(2 \pi x)$",r"$\sin(4 \pi x)$",r"$\sin(6 \pi x)$"))
|
22,382 | 996bb9013bc23689ebf96130e64aed03b6b397ea | import hashlib
import json
import re
from typing import Dict, Optional, List, Tuple
from rdflib import URIRef, Graph
from .literal import Literal
from ..util_models import URIRelation
class Rule(object):
"""
Rules are assumed to have their consequent always with first argument ?a and second ?b
"""
def __init__(self,
antecedents: List[Literal] = None,
consequents: List[Literal] = None,
standard_confidence: float = 1.0,
pca_confidence: float = 1.0):
"""
Create a rule from a premise and a conclusion along with two confidence scores produced by AMIE. The premise can
contain multiple literals, while the conclusion only contains one literal.
The measures are further explained in the AMIE paper(http://resources.mpi-inf.mpg.de/yago-naga/amie/amie.pdf)
:param antecedents: list of literals that describe the premise
:param consequents: list of literals that describe the conclusion (should only contain one literal in AMIE)
:param standard_confidence: takes all facts that are not in the KB as negative evidence. Thus it is the ratio
of its predictions that are in the kB
:param pca_confidence: the confidence of the partial completeness assumption (PCA). It identifies more
productive rules than the other measures
"""
self.antecedents: List[Literal] = antecedents or []
self.consequents: List[Literal] = consequents or []
self.standard_confidence = standard_confidence
self.pca_confidence = pca_confidence
def __str__(self):
rule_string = ""
# add premise literals
for antecedent in self.antecedents:
rule_string += antecedent.__str__() + " "
# add implication arrow
rule_string += "=> "
# add conclusion literals
for consequent in self.consequents:
rule_string += consequent.__str__() + " "
return rule_string
def full_query_pattern(self) -> str:
query_pattern = ""
for antecedent in self.antecedents:
query_pattern += antecedent.sparql_patterns()
if "?b" not in query_pattern and "?a" not in query_pattern:
query_projection = "ask "
else:
# insert the selectors for subject and object into the select query if they exist in the query pattern
query_projection = "select where "
# the resulting query would look like "select ?a ?b ..." if both cases are true
if "?b" in query_pattern:
query_projection = query_projection.replace("select ", "select ?b ")
if "?a" in query_pattern:
query_projection = query_projection.replace("select ", "select ?a ")
# build remaining part of the query and execute it
query_pattern = "{" + query_pattern + "}"
return query_projection + query_pattern
def antecedents_patterns(self,
graph: Graph,
subject_uri: URIRef,
relation_uri: URIRef,
object_uri: URIRef) -> Tuple[str, Optional[Literal]]:
"""
Creates the SPARQL pattern to filter the graph according to the premise of this rule (i.e., all literals in the
premise).
:param graph: the synthesized graph
:param subject_uri: uri of the subject in the new fact
:param relation_uri: uri of the relation in the new fact
:param object_uri: uri of the object in the new fact
:return: tuple of the full SPARQL pattern of the premise and the literal of the premise with a matching relation
type as the new fact, if such a literal exists
"""
# contains the concatenated SPARQL patterns of the literals, i.e. the SPARQL filter to match nodes that conform
# with all literals in the premise
patterns = ""
# subject of a matching literal
matched_literal_subject = None
# object of a matching literal
matched_literal_object = None
# the literal that matches the new fact
matched_literal = None
# test if a literal in the premise handles the same relation that is in the new fact
# save the literal and its subject and object if such an literal exists
for antecedent in self.antecedents:
antecedent_relation_uri = antecedent.relation.uri
if antecedent_relation_uri == relation_uri:
matched_literal_subject = f"?{antecedent.literal_subject}"
matched_literal_object = f"?{antecedent.literal_object}"
matched_literal = antecedent
break
# concatenate the SPARQL pattern fo every literal to query nodes matching all literals
# exclude the literal with a matching relation type since it is already satisfied by the new fact that will be
# added
for antecedent in self.antecedents:
if antecedent.relation != relation_uri:
patterns += antecedent.sparql_patterns()
subject_entity = f"<{subject_uri}>"
object_entity = f"<{object_uri}>"
if matched_literal_subject is not None:
patterns = patterns.replace(matched_literal_subject, subject_entity)
if matched_literal_object is not None:
patterns = patterns.replace(matched_literal_object, object_entity)
return patterns, matched_literal
def to_dict(self) -> dict:
return {
"pattern": self.full_query_pattern()
}
def to_rudik(self):
# this can't be a top level import since that will cause circular imports
from .rudik_rule import RudikRule
conclusion_literal = self.consequents[0]
id_to_role = {
str(conclusion_literal.literal_subject): "subject",
str(conclusion_literal.literal_object): "object"
}
def convert_param(param: str):
if param not in id_to_role:
v_index = len(id_to_role) - 2
id_to_role[param] = f"v{v_index}"
return id_to_role[param]
def convert_literal(literal: Literal) -> dict:
subject_param = str(literal.literal_subject)
subject_param = convert_param(subject_param)
predicate = str(literal.relation)
object_param = str(literal.literal_object)
object_param = convert_param(object_param)
return {
"subject": subject_param,
"predicate": predicate,
"object": object_param
}
rudik_premise = [convert_literal(literal) for literal in self.antecedents]
rudik_conclusion = convert_literal(conclusion_literal)
hashcode = hashlib.sha1(f"{json.dumps(rudik_premise)} {json.dumps(rudik_conclusion)}".encode()).hexdigest()
return RudikRule(premise=self.antecedents,
conclusion=self.consequents,
rudik_premise=rudik_premise,
rudik_conclusion=rudik_conclusion,
hashcode=hashcode,
rule_type=True,
graph_iri=None)
def is_negative(self):
return False
def produce(self,
graph: Graph,
subject_uri: URIRef,
relation_uri: URIRef,
object_uri: URIRef) -> List[Tuple[URIRef, URIRef, URIRef]]:
"""
Produces new facts according to this rule given a new input fact.
:param graph: the synthesized graph
:param subject_uri: uri of the subject in the new fact
:param relation_uri: uri of the relation in the new fact
:param object_uri: uri of the object in the new fact
:return: a list of facts produced by this rule
"""
# contains the facts produced by this rule
new_facts: List[Tuple[URIRef, URIRef, URIRef]] = []
# QUESTION: apparently AMIE rules can only have one triple in their conclusion. Is this actually the case?
# if there is only one literal in the premise, simply check if it matches
# a new fact is only produced if both subject and object of the input fact also appear in the premise literal
if len(self.antecedents) == 1:
# relation of the (only) literal in the conclusion
new_relation = self.consequents[0].relation
if isinstance(new_relation, URIRelation):
new_relation_uri = new_relation.uri
else:
new_relation_uri = URIRelation(new_relation).uri
# if the subject and object of the premise and the conclusion are the same entities
if (
self.antecedents[0].literal_subject_id == self.consequents[0].literal_subject_id
and self.antecedents[0].literal_object_id == self.consequents[0].literal_object_id
):
new_facts.append((subject_uri, new_relation_uri, object_uri))
# if the subject and object of the premise are swapped in the conclusion
if (
self.antecedents[0].literal_subject_id == self.consequents[0].literal_object_id
and self.antecedents[0].literal_object_id == self.consequents[0].literal_subject_id
):
new_facts.append((object_uri, new_relation_uri, subject_uri))
return new_facts
else:
# there are multiple literals in the premise
# to check for triples matching every literal, a sparql query is built from them
# build the where part of the sparql query and find the literal matching the relation type of the input fact
# if such a literal exists
query_patterns, new_literal = self.antecedents_patterns(graph, subject_uri, relation_uri, object_uri)
# if the patterns of the sparql query do not contain either the subject or the object, only query for
# possible solutions to the query
# an ask query only queries if the pattern has a solution, i.e. do any nodes match the pattern
# it will return a yes/no answer
if "?b" not in query_patterns and "?a" not in query_patterns:
query_projection = "ask "
else:
# insert the selectors for subject and object into the select query if they exist in the query pattern
query_projection = "select where "
# the resulting query would look like "select ?a ?b ..." if both cases are true
if "?b" in query_patterns:
query_projection = query_projection.replace("select ", "select ?b ")
if "?a" in query_patterns:
query_projection = query_projection.replace("select ", "select ?a ")
# build remaining part of the query and execute it
query_patterns = "{" + query_patterns + "}"
sparql_query = query_projection + query_patterns
query_result = graph.query(sparql_query)
# relation type of the resulting triple
new_relation = self.consequents[0].relation
if isinstance(new_relation, URIRelation):
new_relation_uri = self.consequents[0].relation.uri
else:
new_relation_uri = URIRelation(self.consequents[0].relation).uri
# handle every possible projection of the query
if "?a" in query_projection and "?b" in query_projection:
# both subject and object for each of the new facts were queried
# add every result tuple as a new fact with the relation of the conclusion
for new_subject, new_object in query_result:
new_facts.append((new_subject, new_relation_uri, new_object))
elif "?a" in query_projection:
# only the subject for each of the new facts was queried
# select the subject or the object of the premise as object for new fact depending on the naming
# i.e., a subject_id == 2 represents a "b", therefore the subject would be the new object
if new_literal.literal_subject_id == 2:
new_object = subject_uri
else:
# the object in the premise was named "b"
new_object = object_uri
# add every result subject with the previously determined object as new fact with the relation of the
# conclusion
for new_subject, in query_result:
new_facts.append((new_subject, new_relation_uri, new_object))
elif "?b" in query_projection:
# only the object for each of the new facts was queried
# select the subject or the object of the premise as subject for new fact depending on the naming
# i.e., a subject_id == 1 represents an "a", therefore the subject would be the new subject
if new_literal.literal_subject_id == 1:
new_subject = subject_uri
else:
# the object in the premise was named "a"
new_subject = object_uri
# add every result object with the previously determined subject as new fact with the relation of the
# conclusion
for new_object, in query_result:
new_facts.append((new_subject, new_relation_uri, new_object))
elif bool(query_result):
# if the result is non empty, or an ask query response is yes
# if the subject was named "a" and the object named "b", the new fact will have the same subject and
# object. otherwise they are swapped
if new_literal.literal_subject_id == 1:
new_subject = subject_uri
else:
new_subject = object_uri
if new_literal.literal_object_id == 2:
new_object = object_uri
else:
new_object = subject_uri
# add the new fact with the original subject and object (possibly swapped) and the relation of the
# conclusion
new_facts.append((new_subject, new_relation_uri, new_object))
return new_facts
def validate(self,
graph: Graph,
subject_uri: URIRef,
relation_uri: URIRef,
object_uri: URIRef) -> bool:
"""
Produces new facts according to this rule given a new input fact.
:param graph: the synthesized graph
:param subject_uri: uri of the subject in the new fact
:param relation_uri: uri of the relation in the new fact
:param object_uri: uri of the object in the new fact
:return: a list of facts produced by this rule
"""
raise NotImplementedError
@staticmethod
def parse_amie(line: str, relation_to_id: Dict[URIRef, int]) -> Optional['Rule']:
"""
Parses an AMIE rule from a line in a file, translates the relation URI to an id and creates a rule object.
:param line: line of a file that contains an AMIE rule
:param relation_to_id: dictionary pointing from relation URIs to the ids used in the models
:return: rule object containing the parsed AMIE rule
"""
# extract fields from tsv-formatted AMIE rule
cells = line.split("\t")
rule_string = cells[0]
std_confidence = float(cells[2].strip())
pca_confidence = float(cells[3].strip())
# split rule into premise and conclusion
assert "=>" in rule_string, "Rule string does not contain \"=>\" substring!"
premise, conclusion = [rule_part.strip() for rule_part in rule_string.split("=>") if rule_part]
# TODO: why this replacement (matches "?[a-zA-Z0-9_]+<whitespace>+?" (i.e., relation begins with ?)
premise = re.sub("(\?\w+)\s+\?", "\g<1>|?", premise)
conclusion = re.sub("(\?\w+)\s+\?", "\g<1>|?", conclusion)
# split premise into single literals (i.e., triples)
antecedents = []
for antecedent in premise.split("|"):
literal = Literal.parse_amie(antecedent, relation_to_id)
if literal is None:
return None
antecedents.append(literal)
# split conclusion into single literals (i.e., triples)
consequents = []
for consequent in conclusion.split("|"):
literal = Literal.parse_amie(consequent, relation_to_id)
if literal is None:
return None
consequents.append(literal)
return Rule(antecedents, consequents, std_confidence, pca_confidence)
|
22,383 | 56daf3a0dd935426dcfbf69d759cc387564eb7bb | from django.contrib import admin
from .models import Detail, Comment
# Register your models here.
admin.site.register(Detail)
admin.site.register(Comment)
|
22,384 | df498dca5cb827780d1bdec47b510a321cbe4e1d |
from defaults import normalise_alias, replace_value, literal
from conditional import o
from connection import CONN
DEFAULT_REPORT = ("SELECT\n"
" service.description,\n"
" cost_price,\n"
" sales_price,\n"
" subscription.qty,\n"
" first_name,\n"
" last_name,\n"
" company,\n"
" entity.code,\n"
" subscription.service,\n"
" service.supplier,\n"
" service.type\n"
"FROM ecn.entity, ecn.subscription, ecn.service, ecn.service_type\n"
"WHERE ecn.entity.code = ecn.subscription.client\n"
"AND ecn.service.type = ecn.service_type.type\n"
"AND ecn.subscription.service = ecn.service.code;")
CLIENT_TOTALS = ("SELECT\n"
" service.description,\n"
" sum(cost_price*subscription.qty) as total_cost,\n"
" sum(sales_price*subscription.qty) as total_sales,\n"
" sum(subscription.qty) as total_qty,\n"
" first_name,\n"
" last_name,\n"
" company,\n"
" entity.code,\n"
" subscription.service,\n"
" service.supplier,\n"
" service.type\n"
"FROM ecn.entity, ecn.subscription, ecn.service, ecn.service_type\n"
"WHERE ecn.entity.code = ecn.subscription.client\n"
"AND ecn.service.type = ecn.service_type.type\n"
"AND ecn.subscription.service = ecn.service.code\n"
"GROUP BY entity.code;")
def default_report(title, where, order=('name DESC', 'sales_price DESC')):
hl = '-' * 89
report = [title, '{:^30} {:^3} {:^10} {:^10} {:^30}'.format('Description', 'Qty', 'Cost', 'Sales', 'Client'), hl]
row_str = '|{:<30}|{:^3}|{:>10}|{:>10}|{:>30}|'
total_cost, total_sales, total_qty = 0, 0, 0
for row in CONN.query(DEFAULT_REPORT).\
merge('name', normalise_alias, 'first_name', 'last_name', 'company').refine(where).order_by(*order):
cost_price = row['cost_price'] * row['qty']
sales_price = row['sales_price'] * row['qty']
total_cost += cost_price
total_sales += sales_price
total_qty += row['qty']
report.append(row_str.format(row['description'], row['qty'], cost_price, sales_price, row['name']))
report.append(hl)
report.append(' {:<30} {:^3} {:>10} {:>10}'.format('Total', total_qty, total_cost, total_sales))
return '\n'.join(report)
def internet_solutions_domain():
print(default_report('IS Domain Reconciliation',
o(type=literal('domain'),
supplier=literal('is0001'))))
def internet_solutions_mobile():
print(default_report('IS Mobile Reconciliation',
o(type=literal('mobile'),
supplier=literal('is0001'))))
def internet_solution_adsl():
print(default_report('IS Per Account Reconciliation',
o(type=(literal('peracc'), literal('uncapped')),
supplier=literal('is0001'))))
print()
print(default_report('IS Per GB Reconciliation',
o(type=literal('pergb'),
supplier=literal('is0001'))))
def axxess():
print(default_report('Axxess Reconciliation',
o(supplier=literal('axx001'))))
def client_totals():
hl = "-" * 54
report = ["Client Totals", " {:^30} {:^10} {:^10}".format("Client", "Cost", "Sales"), hl]
total_cost, total_sales, total_qty = 0, 0, 0
for row in CONN.query(CLIENT_TOTALS).merge('name', normalise_alias, 'first_name', 'last_name', 'company'):
total_cost += replace_value(row['total_cost'], 0)
total_sales += replace_value(row['total_sales'], 0)
total_qty += replace_value(row['total_qty'], 0)
report.append("|{:<30}|{:>10}|{:>10}|".format(row['name'], row['total_cost'], row['total_sales']))
report.append(hl)
report.append(" {:<30} {:>10} {:>10}".format("Total", total_cost, total_sales))
print('\n'.join(report))
if __name__ == '__main__':
# axxess()
# internet_solution_adsl()
# internet_solutions_domain()
# internet_solutions_mobile()
client_totals()
pass
|
22,385 | ed8c2de4a600faf5b50e424286a14e3f93ff1625 | from moviepy.editor import *
import argparse
import datetime
import os
parser = argparse.ArgumentParser(description='Clip video')
parser.add_argument('--starttime',type=str,default='00:00:00')
parser.add_argument('--endtime',type=str,default='00:00:00')
parser.add_argument('--path',type=str,default='')#视频路径
parser.add_argument('--output',type=str,default='./')#输出文件夹
opt=parser.parse_args()
# 时间字符串转换为秒
def timeTransform(time_str):
#print(time_str)
hour,minute,second = time_str.split(':')
t = datetime.datetime(2019,1,1,int(hour), int(minute), int(second))
return int((t-datetime.datetime(2019,1,1)).total_seconds())
def main():
start_sec = timeTransform(opt.starttime)
end_sec = timeTransform(opt.endtime)
if start_sec > end_sec:
print('出错:开始时间大于结束时间')
return
file_name = os.path.basename(opt.path)
name, ext = file_name.split('.')
print("开始剪辑:{}-{},共{}秒".format(opt.starttime,opt.endtime,end_sec-start_sec))
clip = VideoFileClip(opt.path).subclip(start_sec, end_sec)
new_file = name + '_clip.' + ext
clip.write_videofile(os.path.join(opt.output,new_file))
main()
|
22,386 | b4dd92a7f72e0b56abb336bc81a2584836acdcbd | from SICXE import Assembler
import unittest
class TestAssembler(unittest.TestCase):
def setUp(self):
self.asm = Assembler()
self.asm.load_file("SICXE.txt")
def test_read_source(self):
self.assertIsNotNone(self.asm.source, "Can't read file")
print("=====Source code=====")
for i in self.asm.source:
print("{1}\t{0}\t{2}".format(i['operator'],
i['symbol'] if i['symbol'] else "\t",
i['operand'] if i['operand'] else "\t"))
def test_load_operators(self):
load_op = self.asm.load_operators('Operators.dat')
self.assertIsNotNone(load_op)
print(load_op)
def test_append_operator(self):
add_op = self.asm.append_operator('ADD', '0x18', 3)
self.assertIsNotNone(add_op)
print(add_op)
def test_operator_table(self):
print("=======OPTAB========")
for i, val in self.asm.OPTAB.items():
print(" {:6}\t{:2}\t{:02X}".format(i, val['format'], int(val['opcode'], 16)))
def test_symbol_table(self):
self.asm.pass_one()
self.asm.pass_two()
print("======SYMTAB======")
print("{:^8}\t{:^5}".format('"symbol"', '"val"'))
for i, val in self.asm.SYMTAB.items():
print(" {:8}\t{:04X}".format(i, val))
def test_literal(self):
self.asm.pass_one()
self.asm.pass_two()
print("=======LITERALS========")
for i, val in self.asm.LITERAL.items():
print(" {:7}\t{:04X}".format(i, val))
def test_record(self):
self.asm.pass_one()
self.asm.pass_two()
print("======Object Program=====")
print(self.asm.object_program)
if __name__ == "__main__":
unittest.main()
|
22,387 | a701f82758930e7b5cf0e27bfe1414f941624cd4 | import pandas
from sklearn.neighbors.kde import KernelDensity
import numpy as np
from matplotlib import pyplot
import scipy.stats as stats
from scipy.stats import norm
from scipy.stats.mstats import winsorize
from collections import Counter
EMPRUNT = 5
PREVISION = 6
SECTEUR1 = 1
SECTEUR2 = 2
SECTEUR_PARTICULIER = 3
def plot_normal_distribution(numeric_data):
i=1
for data in numeric_data:
mu, std = norm.fit(data)
pyplot.hist(data, bins=25, normed=True, alpha=0.6, color='b')
xmin, xmax = pyplot.xlim()
x = np.linspace(xmin, xmax, 100)
p = norm.pdf(x, mu, std)
pyplot.plot(x, p, 'k', linewidth=2)
if i==1:
pyplot.title("Distribution normale de la variable CapaciteEmprunt")
else:
pyplot.title("Distribution normale de la variable PrevisionnelAnnuel")
pyplot.legend()
pyplot.show()
i+=1
def plot_binary_distribution(full_dataset):
count_secteur = []
colors = ['red', 'green', 'blue']
count_secteur.append(Counter(full_dataset.iloc[:, 1]))
count_secteur.append(Counter(full_dataset.iloc[:, 2]))
count_secteur.append(Counter(full_dataset.iloc[:, 3]))
i = 1
for count in count_secteur:
sub = pyplot.subplot(1, 3, i)
if i==1:
sub.set_title("Distribution de Secteur1")
elif i==2:
sub.set_title("Distribution de Secteur2")
elif i==3:
sub.set_title("Distribution de SecteurParticulier")
sub.pie(list(count.values()), labels=list(count.keys()))
sub.legend()
i+=1
pyplot.suptitle("Distribution des variables binaires")
pyplot.legend()
pyplot.show()
def apply_winsorisation(full_dataset):
numeric_data = [full_dataset.iloc[:, EMPRUNT],full_dataset.iloc[:, PREVISION]]
winsorized_data = [winsorize(numeric_data[0],limits=(0,0.01)), winsorize(numeric_data[1],limits=(0,0.01))]
return winsorized_data
def plot_data_distribution():
import treatement
np.random.seed(1)
full_training_dataframe = pandas.read_csv("../data/cleaned_learning.csv", sep=";")
numeric_data = [sorted(full_training_dataframe.iloc[:, EMPRUNT]),sorted(full_training_dataframe.iloc[:, PREVISION])]
plot_normal_distribution(numeric_data)
plot_binary_distribution(full_training_dataframe)
wd = apply_winsorisation(full_training_dataframe)
plot_normal_distribution([sorted(x) for x in wd])
winsorize_learning_dataset()
def winsorize_learning_dataset():
full_training_dataframe = pandas.read_csv("../data/cleaned_learning.csv", sep=";")
numeric_data = [sorted(full_training_dataframe.iloc[:, EMPRUNT]),sorted(full_training_dataframe.iloc[:, PREVISION])]
wd = apply_winsorisation(full_training_dataframe)
full_training_dataframe["CapaciteEmprunt"] = wd[0]
full_training_dataframe["PrevisionnelAnnuel"] = wd[1]
full_training_dataframe.to_csv("../data/cleaned_learning.csv", sep=";", index=False)
if __name__=='__main__' :
plot_data_distribution()
|
22,388 | 108e8fc47d571f63221269bcfb138e2ac9d7a7d9 | import os
import sys
env = Environment()
objs = []
# Lapack
objs += Glob('../../Lapack/Blas/*.c')
objs += Glob('../../Lapack/F2c/*.c')
objs += Glob('../../Lapack/Install/*.c')
objs += Glob('../../Lapack/Scr/*.c')
# Linear Algebra
objs += Glob('../../LinearAlgebra/*.c')
# get current directory
cwd = os.getcwd()
path = [os.path.join(cwd, './')]
path += [os.path.join(cwd, '../../')]
path += [os.path.join(cwd, '../../LinearAlgebra')]
path += [os.path.join(cwd, '../../Lapack/Blas')]
path += [os.path.join(cwd, '../../Lapack/F2c')]
path += [os.path.join(cwd, '../../Lapack/Install')]
path += [os.path.join(cwd, '../../Lapack/Scr')]
env.Program(target = "pc_linsolve", source = objs + Glob('./pc_linsolve.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_lu", source = objs + Glob('./pc_lu.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_qr", source = objs + Glob('./pc_qr.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_eig", source = objs + Glob('./pc_eig.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_svd", source = objs + Glob('./pc_svd.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_state_space", source = objs + Glob('./pc_state_space.c'), CPPPATH = path, LIBS=['m'])
env.Program(target = "pc_model_predictive_control", source = objs + Glob('./pc_model_predictive_control.c'), CPPPATH = path, LIBS=['m'])
|
22,389 | 063006ae124517ca9f2874429fb5cdf7d3d4a379 | #!/usr/bin/python3
# classes.py by Akim Delli
# Copyright 2010 The BearHeart Group, LLC
class Duck:
private_variable ={};
def quack(self):
print('Quaaack!')
def walk(self):
print('Walks like a duck.')
def main():
donald = Duck()
donald.quack()
donald.walk()
if __name__ == "__main__": main()
|
22,390 | 4d1d1ae2660a727518de87bb700505fbce7b26cd |
# These classes get all the data from the json and sets them to variables for easy use
# This class deals with the board including all snakes and food
class board:
def __init__(self, height, width, food, snakes, health):
self.height = height
self.width = width
self.food = food
self.snakes = snakes
self.health = health
# prints the boards dimensions
def printBoard(self):
print("Board is: ", self.height, " x ", self.width)
# prints the food on the board
def printFood(self):
print("Food is at position(s): ", self.food)
# prints the health of each snake on the board
def printHealth(self, name, health):
print("Health of \"", name, "\" is: ", health)
# This class deals with our snakes id, name, health, and body position
class ourSnake:
def __init__(self, sid, name, health, body, x, y, tailX, tailY):
self.sid = sid
self.name = name
self.health = health
self.body = body
self.x = x
self.y = y
self.tailX = tailX
self.tailY = tailY
def printHead(self):
print("Head is at: ", self.x, " : ", self.y) |
22,391 | a1d132e8648e154998f843f4ff52f98c41097845 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Sighting.active'
db.add_column(u'sighting_sighting', 'is_active',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Sighting.active'
db.delete_column(u'sighting_sighting', 'is_active')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'location.coordinate': {
'Meta': {'object_name': 'Coordinate'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'latitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '6'}),
'longitude': ('django.db.models.fields.DecimalField', [], {'max_digits': '10', 'decimal_places': '6'}),
'polygon': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'coordinates'", 'null': 'True', 'to': u"orm['location.Polygon']"})
},
u'location.country': {
'Meta': {'ordering': "['name']", 'object_name': 'Country'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
u'location.polygon': {
'Meta': {'object_name': 'Polygon'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'reserve.reserve': {
'Meta': {'ordering': "['name']", 'object_name': 'Reserve'},
'border': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reserves'", 'to': u"orm['location.Polygon']"}),
'country': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'reserves'", 'symmetrical': 'False', 'to': u"orm['location.Country']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'species': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'reserves'", 'symmetrical': 'False', 'to': u"orm['wildlife.Species']"}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200'})
},
u'sighting.sighting': {
'Meta': {'ordering': "['-date_of_sighting']", 'object_name': 'Sighting'},
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_sighting': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'estimated_number': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['location.Coordinate']"}),
'reserve': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sightings'", 'to': u"orm['reserve.Reserve']"}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'species': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sightings'", 'to': u"orm['wildlife.Species']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sightings'", 'to': u"orm['auth.User']"}),
'with_kill': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'with_young': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'sighting.sightingimage': {
'Meta': {'object_name': 'SightingImage'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'sighting': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'images'", 'null': 'True', 'to': u"orm['sighting.Sighting']"})
},
u'wildlife.species': {
'Meta': {'ordering': "['common_name']", 'object_name': 'Species'},
'classification': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'common_name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'default_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'female_info': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'female_info'", 'to': u"orm['wildlife.SpeciesInfo']"}),
'general_info': ('django.db.models.fields.CharField', [], {'max_length': '250'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inverted_default_image': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'male_info': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'male_info'", 'to': u"orm['wildlife.SpeciesInfo']"}),
'marker': ('django.db.models.fields.files.ImageField', [], {'max_length': '250'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scientific_name': ('django.db.models.fields.CharField', [], {'max_length': '75'}),
'similiar_species': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'similiar_species_rel_+'", 'null': 'True', 'to': u"orm['wildlife.Species']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'})
},
u'wildlife.speciesinfo': {
'Meta': {'object_name': 'SpeciesInfo'},
'height': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '1', 'blank': 'True'}),
'horn_length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '1', 'blank': 'True'}),
'mass': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '7', 'decimal_places': '1', 'blank': 'True'})
}
}
complete_apps = ['sighting']
|
22,392 | e2b3a872ccf6da6437b764b95b9a6aa7ee58305b | import pandas as pd
import numpy as np
from itertools import combinations
from itertools import permutations
from math import floor
class Apriori:
def __init__(self,minSupport,minConfidence):
self._minSupport = minSupport
self._minConfidence = minConfidence
self._sets = {}
self._currentSet = 0
def readFile(self,path):
"""Reads a text file and construct the base table
Args:
path (str): the path to the text file
variables used in another methods:
_dataTable
Returns:
None
"""
_file = pd.read_csv(path, sep='\s+', engine='python', header=None)
self._dataTable = pd.DataFrame(_file.iloc[:, 3:15])
self._dataTable.columns = ['MGEMLEEF Avg age', 'MOSHOOFD Customer main type', 'MGODRK Roman catholic',
'MGODPR Protestant', 'MGODOV Other religion', 'MGODGE No religion', 'MRELGE Married',
'MRELSA Living together', 'MRELOV Other relation', 'MFALLEEN Singles',
'MFGEKIND Household without children', 'MFWEKIND Household with children']
def unique(self):
"""Returns all unique values from the data table
Args:
None
Returns:
dic with the data as keys
"""
# variables for uniques
self._currentSet = 1
self._uniqueValue = {}
pd = self._dataTable
for col in pd:
arr = pd[col].unique()
for i in arr:
unique_entry = ((col,i),)
self._uniqueValue[unique_entry] = 0
self._sets[self._currentSet] = self._uniqueValue
def getSupport(self,data):
"""calculates support from the data table
Args:
data (list): list of data to calculate it's support in the data table
Returns:
support (float): the support
"""
ans=np.ones(5822)
num_of_trans=5822
for i in range(len(data)):
arr=np.array(self._dataTable[data[i][0]])
num=(arr==data[i][1])
ans=np.logical_and(ans,num)
return(sum(ans)/num_of_trans)
def getConfidence(self,LeftTup,RightTup):
"""calculates Confidence from the data table
Args:
LeftTup,RightTup : Tuple of Tuples to calculate it's Confidence in the data table
Returns:
confidence (float): the Confidence
"""
tup=LeftTup+RightTup
_intersection=self.getSupport(tup)
_LHS=self.getSupport(LeftTup)
_confidence=_intersection/_LHS
return (_confidence)
def getlift(self,LeftTup,RightTup):
"""calculates lift for each rule from the data table
Args:
LeftTup,RightTup : Tuple of Tuples to calculate it's Lift
Returns:
lift (float): the lift
"""
tup=LeftTup+RightTup
_nom=self.getSupport(tup)
_den=self.getSupport(LeftTup) * self.getSupport(RightTup)
_lift=_nom/_den
return (_lift)
def getLeverage(self,LeftTup,RightTup):
"""calculates Leverage for each rule from the data table
Args:
LeftTup,RightTup : Tuple of Tuples to calculate it's Leverage
Returns:
Leverage (float): the Leverage
"""
tup=LeftTup+RightTup
_nom=self.getSupport(tup)
_den=self.getSupport(LeftTup) * self.getSupport(RightTup)
_leverage=_nom - _den
return (_leverage)
def eliminate(self):
"""eliminates the data elements that are less than the min-support
or the min-confidence, uses self._sets , and self._currentSet
Args:
none
Returns:
none
"""
deleteKey = []
for key,value in self._sets[self._currentSet].items():
if value < self._minSupport:
deleteKey.append(key)
for key in deleteKey:
del self._sets[self._currentSet][key]
def eliminateRules(self):
"""eliminates the data elements that are less than the min-support
or the min-confidence, uses self._sets , and self._currentSet
Args:
none
Returns:
none
"""
deleteKey = []
for key,value in self._rules.items():
if value[0] < self._minConfidence:
deleteKey.append(key)
for key in deleteKey:
del self._rules[key]
def calculateAllSupport(self):
"""calculates Support for the current set
Args:
none
Returns:
none
"""
for key, value in self._sets[self._currentSet].items():
val = self.getSupport(key)
self._sets[self._currentSet][key] = self.getSupport(key)
def calculateAllConfidence(self):
"""calculates Support for the current set
Args:
none
Returns:
none
"""
for key, value in self._rules.items():
val = self.getConfidence(key[0],key[1])
self._rules[key] = [val]
def construct(self):
"""constructs the next level elements,
updates self._sets with a new key and a new set
update self._currentSet
Args:
none
Returns:
none
"""
newSet = {}
current_index = 0
for key_1, value_1 in self._sets[self._currentSet].items():
current_index += 1
for key_2,value_2 in list(self._sets[self._currentSet].items())[current_index:]:
# join the 2 tuples
join = key_1 + key_2
# remove duplicates
join = tuple(set(join))
# get combinations
combined = tuple(combinations(join, self._currentSet+1))
# sort combination
combined = tuple(sorted(combined[0]))
# append new combination to dict
if len(combined) != 0 :
newSet[combined] = 0
self._currentSet += 1
# append the new itemset in the sets dict
self._sets[self._currentSet] = newSet
def getRules(self):
"""constructs the Rules set
Args:
none
Returns:
none
"""
self._rules = {}
_RuleSet = self._sets[self._currentSet - 1 ]
for oneSet in _RuleSet :
if len(oneSet) < 2 :
pass
for x in range(1, max(floor(len(oneSet) / 2),2) ):
comb = combinations(oneSet, x)
for item in comb:
remaining = tuple(x for x in oneSet if x not in item)
self._rules[(item,remaining)] = 0
self._rules[(remaining,item)] = 0
def ArrangeRulesByLev(self):
count={}
for key, value in self._rules.items():
lev=self.getLeverage(key[0],key[1])
self._rules[key].append(lev)
def ArrangeRulesByLift(self):
count={}
for key, value in self._rules.items():
Lift=self.getlift(key[0],key[1])
self._rules[key].append(Lift)
def printRules(self):
for key, value in self._rules.items():
print (f"{key[0]} ----> {key[1]} with conf. = {value[0]:3f}, lev. = {value[1]:3f}, left = {value[2]:3f} ")
def aprioriAlgorithm(path, minSuppor, minConfidence):
"""runs Apriori algorithm & prints the rules after
Args:
path (string):
Returns:
none
"""
apriori = Apriori(minSuppor, minConfidence)
print("Loading Data...")
apriori.readFile(path)
apriori.unique()
apriori.calculateAllSupport()
apriori.eliminate()
if (apriori._sets[1].__len__() == 0):
print("There is No rules")
return
print("Constructing Itemsets...")
while (len(apriori._sets[apriori._currentSet]) != 0):
apriori.construct()
apriori.calculateAllSupport()
apriori.eliminate()
if apriori._currentSet == 2:
print("There is No rules")
return
print("Getting Association Rules...")
apriori.getRules()
print("Calculating the Confidence... \n")
apriori.calculateAllConfidence()
apriori.eliminateRules()
print("Association Rules are:")
print(len("Association Rules are:") * ".",'\n')
apriori.ArrangeRulesByLev()
apriori.ArrangeRulesByLift()
apriori.printRules()
|
22,393 | 0f2d88ca0d2853079874ded45f2eacccfc7dd4cb | from django.db import models
from django.conf import settings
class ModelWithFileField(models.Model):
uploadedImage = models.FileField(upload_to=settings.MEDIA_ROOT)
|
22,394 | ea471e6b6008f413143c1bbb1c28aea52a9ecae9 | """
This is the machinnery that runs your agent in an environment.
This is not intented to be modified during the practical.
"""
class Runner:
def __init__(self, environment, agent):
self.environment = environment
self.agent = agent
def step(self,i):
observation = self.environment.observe()
action = self.agent.act(observation)
(reward, stop) = self.environment.act(action)
Q, stats =self.agent.reward(observation, action, reward,i)
return (observation, action, reward, stop,Q, stats)
def loop(self, games, max_iter):
cumul_reward = 0.0
stats=0
for g in range(1, games+1):
print ("Game Number {}:".format(g))
curr_reward = 0.0
self.agent.reset()
self.environment.reset()
for i in range(1, max_iter+1):
print ("Iteration Number {}:".format(i))
self.environment.display()
(obs, act, rew, stop,Q, stats) = self.step(i)
cumul_reward += rew
curr_reward += rew
print (" reward: {}".format(rew))
print (" current game reward: {}".format(curr_reward))
if stop is not None:
print ("Terminal event: {}".format(stop))
if stop is not None:
break
print ("Finished game number: {} ".format(g))
print ("cumulative reward: {}".format(cumul_reward))
print ("current game Final reward: {}".format(curr_reward))
self.environment.display()
self.agent.plot_episode_stats(stats)
return cumul_reward
|
22,395 | 586187bfb73c9fe7a7624cd78c3a13e3b586f07b | import numpy as np
import ajustador as aju
from ephys_feature_extract_func import data_lists,data_extract, stat, plot, cv_plot,parse_args
import matplotlib.pyplot as plt
import importlib
import sys
#usage: python3 ephys_feature_extrat.py -n list of neuron types -w data_repo
#example python3 ephys_feature_extrat.py -n LR non -w A2Acre
desired_params=['baseline', 'latency', 'response', 'spike_height', 'spike_width','trace_inj','startspikes', 'spike_count', 'spike_ahp', 'falling_curve', 'rectification', 'ap_time', 'ap_amp', 'mean_isi']
##Below is the list of the parameters that you want to be considered for each neuron
commandline=sys.argv[1:]
neuron_types,data_name=parse_args(commandline)
print(neuron_types,data_name)
wavedir=importlib.import_module(data_name)
##this prodces a dictonary with the desired type of neurons
n_list=data_lists(neuron_types)
##Next, all the neuron names in desired list (for this example is the alldata in A2Acre) is sorted into the appropriate neuron types. Here, n_list should provide you with a dictionary that has all your data sorted by the type of neuron that each neuron is.
for exp in wavedir.alldata.keys():
for nm in neuron_types:
if exp.startswith(nm):
n_list[nm].append(exp)
## NOTE: If you do not consistenly name your neurons in alldata, this dictionary will not contain all the necessary data.
##This creates another dictionary that holds the information from the parameters for each individual neuron. Dictionary style is {neuron type #1: {param1: [], param2:[]}} It then stores the data for each neuron
info=data_lists(neuron_types)
for ntype in n_list.keys():
print(ntype)
info[ntype] = data_extract(n_list[ntype],wavedir.alldata,desired_params)
##nstats is another dictonary that will hold all the data with the numpy nan spacers, the standard deviation, mean, and coeffcient of variance with each neuron type.
nstats=data_lists(neuron_types)
for ntype in nstats:
nstats[ntype]=data_lists(desired_params)
for nkey in desired_params:
##The following for loops check to ensure that you are not changing the trace injection data or the start spikes data. Then it performs the statistical analysis and assigns it properly
if nkey != 'trace_inj' and nkey!= 'startspikes':
print('my neuron is:', ntype,'my key is:', nkey )
nwstat=stat(info[ntype][nkey],info[ntype]['trace_inj'], info[ntype]['startspikes'], nkey)
nstats[ntype][nkey]=nwstat
##This uses the function plot to plot the data for parameters listed in the desired_params. Each graph will contain the parameter vs. trace injection and include each of the specific neuron type. For example, the first graph will produce a graph of baseline vs. trace_inj for all arky neurons.
for ntype in nstats.keys():
for nstat in nstats[ntype].keys():
if nstat != 'trace_inj' and nstat!= 'startspikes':
plot(info[ntype]['trace_inj'], nstats[ntype][nstat]['adjdata'], nstat, n_list[ntype], nstats[ntype][nstat]['mean'], ntype)
##This provides 1 graph of all the cv's for all the parameters vs. trace_inj for each neuron type. You should get the same amount of graphs as neuron types.
for nkey in nstats.keys():
plt.figure()
for nstat in nstats[nkey].keys():
if nstat != 'trace_inj' and nstat!= 'startspikes':
cv_plot(nstat, info[nkey]['trace_inj'],nstats[nkey][nstat],nkey)
CVs={ntype:[] for ntype in nstats.keys()}
for ntype in nstats.keys():
for nkey in nstats[ntype].keys():
if nkey != 'trace_inj' and nkey != 'startspikes':
print(ntype, nkey, nstats[ntype][nkey]['cv'], np.abs(np.nanmean(nstats[ntype][nkey]['cv'])))
CVs[ntype].append(1/np.abs(np.nanmean(nstats[ntype][nkey]['cv'])))
print(ntype,CVs[ntype],np.log(CVs[ntype]))
|
22,396 | 5aeb3b27e27d67b5198995b74132c4545f5514aa | """
url patterns for answers
"""
from django.urls import path
from .views import AnswersPostView, UpdateAnswer, GetAnswerView, DeleteAnswer, UpvoteAnswer, DownvoteAnswer
urlpatterns = [
path('<str:meetupId>/questions/<str:questionId>/answers/', AnswersPostView.as_view(), name='post_answer'),
path('<meetupId>/questions/<questionId>/answers/<answerId>', UpdateAnswer.as_view(), name='update_answer'),
path('<str:meetupId>/questions/<str:questionId>/answers', GetAnswerView.as_view(), name='Get_all_answers'),
path('<str:meetupId>/questions/<str:questionId>/answers/<str:answerId>/', DeleteAnswer.as_view(), name='delete_answer'),
path('<str:meetupId>/questions/<str:questionId>/answers/<str:answerId>/upvote', UpvoteAnswer.as_view(), name='upvote_answer'),
path('<str:meetupId>/questions/<str:questionId>/answers/<str:answerId>/downvote', DownvoteAnswer.as_view(), name='downvote_answer')
]
|
22,397 | 67a37ce3f0c89fcd70819f5bca5047fae19fdf7a | # Generated by Django 2.2 on 2020-08-16 17:58
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('deal', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='deal',
name='kanban',
field=models.ForeignKey(choices=[('ПЗ', 'Первий звонок'), ('ДК', 'Договор'), ('$', 'Проплата услуги == товара'), ('ПТ', 'Получение товара')], on_delete=django.db.models.deletion.CASCADE, to='work.Kontakt'),
),
]
|
22,398 | 83aa4ce3cb1becd772882aef7eb26f94bac4ec5e | """
WSGI config for onlinerequest project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import sys
sys.path.insert(1, '/home/data/html/onlinerequest/lib/python2.7/site-packages')
sys.path.append('/home/data/html/onlinerequest/lib/python2.7/site-packages/onlinerequest')
import os
from django.core.wsgi import get_wsgi_application
#os.environ.setdefault("DJANGO_SETTINGS_MODULE", "onlinerequest.settings")
os.environ["DJANGO_SETTINGS_MODULE"] = "onlinerequest.settings"
application = get_wsgi_application()
|
22,399 | a388bd3c9d2d82b963e8a0c976c9acd83ce48cfe | from typing import Optional
from ray.data.block import Block, BlockAccessor
from ray.data.impl.arrow_block import DelegatingArrowBlockBuilder
class Batcher:
"""Chunks blocks into batches.
Implementation Note: When there are multiple batches per block,
this batcher will slice off and return each batch and add the
remaining block back to the buffer instead of optimally slicing and
returning all batches from the block at once. This will result in
extra (and nested) block slicing. However, since slices are
zero-copy views, we sacrifice what should be a small performance
hit for better readability.
"""
def __init__(self, batch_size: Optional[int]):
self._batch_size = batch_size
self._buffer = []
def add(self, block: Block):
"""Add a block to the block buffer.
Args:
block: Block to add to the block buffer.
"""
self._buffer.append(block)
def has_batch(self) -> bool:
"""Whether this Batcher has any full batches.
"""
return self._buffer and (self._batch_size is None or sum(
BlockAccessor.for_block(b).num_rows()
for b in self._buffer) >= self._batch_size)
def has_any(self) -> bool:
"""Whether this Batcher has any data.
"""
return any(
BlockAccessor.for_block(b).num_rows() > 0 for b in self._buffer)
def next_batch(self) -> Block:
"""Get the next batch from the block buffer.
Returns:
A batch represented as a Block.
"""
# If no batch size, short-circuit.
if self._batch_size is None:
assert len(self._buffer) == 1
block = self._buffer[0]
self._buffer = []
return block
output = DelegatingArrowBlockBuilder()
leftover = []
needed = self._batch_size
for block in self._buffer:
accessor = BlockAccessor.for_block(block)
if needed <= 0:
# We already have a full batch, so add this block to
# the leftovers.
leftover.append(block)
elif accessor.num_rows() <= needed:
# We need this entire block to fill out a batch.
output.add_block(block)
needed -= accessor.num_rows()
else:
# We only need part of the block to fill out a batch.
output.add_block(accessor.slice(0, needed, copy=False))
# Add the rest of the block to the leftovers.
leftover.append(
accessor.slice(needed, accessor.num_rows(), copy=False))
needed = 0
# Move the leftovers into the block buffer so they're the first
# blocks consumed on the next batch extraction.
self._buffer = leftover
return output.build()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.