text stringlengths 38 1.54M |
|---|
# Example of low-level Python wrapper for rpi_ws281x library.
# Author: Tony DiCola (tony@tonydicola.com), Jeremy Garff (jer@jers.net)
#
# This is an example of how to use the SWIG-generated _rpi_ws281x module.
# You probably don't want to use this unless you are building your own library,
# because the SWIG generated module is clunky and verbose. Instead look at the
# high level Python port of Adafruit's NeoPixel Arduino library in strandtest.py.
#
# This code will animate a number of WS281x LEDs displaying rainbow colors.
import time
import _rpi_ws281x as ws
# LED configuration.
LED_CHANNEL = 0
LED_COUNT = 8 # How many LEDs to light.
LED_FREQ_HZ = 800000 # Frequency of the LED signal. Should be 800khz or 400khz.
LED_DMA_NUM = 10 # DMA channel to use, can be 0-14.
LED_GPIO = 21 # GPIO connected to the LED signal line. Must support PWM!
LED_BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
LED_INVERT = 0 # Set to 1 to invert the LED signal, good if using NPN
# transistor as a 3.3V->5V level converter. Keep at 0
# for a normal/non-inverted signal.
#LED_STRIP = ws.WS2811_STRIP_RGB
#LED_STRIP = ws.WS2811_STRIP_GBR
#LED_STRIP = ws.SK6812_STRIP_RGBW
LED_STRIP = ws.SK6812W_STRIP
# Define colors which will be used by the example. Each color is an unsigned
# 32-bit value where the lower 24 bits define the red, green, blue data (each
# being 8 bits long).
DOT_COLORS = [ 0x200000, # red
0x201000, # orange
0x202000, # yellow
0x002000, # green
0x002020, # lightblue
0x000020, # blue
0x100010, # purple
0x200010 ] # pink
# Create a ws2811_t structure from the LED configuration.
# Note that this structure will be created on the heap so you need to be careful
# that you delete its memory by calling delete_ws2811_t when it's not needed.
leds = ws.new_ws2811_t()
# Initialize all channels to off
for channum in range(2):
channel = ws.ws2811_channel_get(leds, channum)
ws.ws2811_channel_t_count_set(channel, 0)
ws.ws2811_channel_t_gpionum_set(channel, 0)
ws.ws2811_channel_t_invert_set(channel, 0)
ws.ws2811_channel_t_brightness_set(channel, 0)
channel = ws.ws2811_channel_get(leds, LED_CHANNEL)
ws.ws2811_channel_t_count_set(channel, LED_COUNT)
ws.ws2811_channel_t_gpionum_set(channel, LED_GPIO)
ws.ws2811_channel_t_invert_set(channel, LED_INVERT)
ws.ws2811_channel_t_brightness_set(channel, LED_BRIGHTNESS)
ws.ws2811_channel_t_strip_type_set(channel, LED_STRIP)
ws.ws2811_t_freq_set(leds, LED_FREQ_HZ)
ws.ws2811_t_dmanum_set(leds, LED_DMA_NUM)
# Initialize library with LED configuration.
resp = ws.ws2811_init(leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_init failed with code {0} ({1})'.format(resp, message))
# Wrap following code in a try/finally to ensure cleanup functions are called
# after library is initialized.
try:
offset = 0
while True:
# Update each LED color in the buffer.
for i in range(LED_COUNT):
# Pick a color based on LED position and an offset for animation.
color = DOT_COLORS[(i + offset) % len(DOT_COLORS)]
# Set the LED color buffer value.
ws.ws2811_led_set(channel, i, color)
# Send the LED color data to the hardware.
resp = ws.ws2811_render(leds)
if resp != ws.WS2811_SUCCESS:
message = ws.ws2811_get_return_t_str(resp)
raise RuntimeError('ws2811_render failed with code {0} ({1})'.format(resp, message))
# Delay for a small period of time.
time.sleep(0.25)
# Increase offset to animate colors moving. Will eventually overflow, which
# is fine.
offset += 1
finally:
# Ensure ws2811_fini is called before the program quits.
ws.ws2811_fini(leds)
# Example of calling delete function to clean up structure memory. Isn't
# strictly necessary at the end of the program execution here, but is good practice.
ws.delete_ws2811_t(leds)
|
import sys
import csv
import requests
from bs4 import BeautifulSoup
import re
import urllib3
import datetime
tags_file = open('stanford_tags.txt', 'r')
tags = tags_file.readlines()
for i in range(0, len(tags)):
tags[i] = tags[i].strip().lower()
with open('output_files/final.tsv', 'r') as in_file:
file_reader = csv.reader(in_file, delimiter='\t', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in file_reader:
main_url = row[7]
if main_url == "main_url" or main_url == "n/a":
continue
if main_url[0:4] != "http":
main_url = "https://" + str(main_url)
print(main_url)
webpage = ""
try:
webpage = requests.get(main_url)
except Exception as e:
#broken url
print("url " + str(main_url) + " is broken")
print(webpage)
continue
print(webpage)
if webpage.status_code != 200 or len(webpage.history) > 1:
print(str(main_url) + " bad status code")
continue
if len(webpage.history) > 0 and "302" in webpage.history[0]:
print("redirect")
continue
print("webpage url " + str(webpage.url))
print("main url " + str(main_url))
print(webpage.status_code)
print(webpage.history)
soup = BeautifulSoup(webpage.text, 'html.parser')
if soup is None or soup.title is None or soup.title.string is None:
continue
if soup.title is None or "404" in soup.title.string or "error" in soup.title.string or "Error" in soup.title.string or "Forbidden" in soup.title.string or "forbidden" in soup.title.string or "403" in soup.title.string :
#404 page
print("url " + str(main_url) + " was bad title")
continue
print(soup.title.string)
occurrences = {}
for tag in tags:
occurrences[tag] = 0
for tag in tags:
occurrences[tag] = len((soup.find_all(string= re.compile(tag), recursive=True)))
print(occurrences)
with open('output_files/found_stanford_tags.csv', 'a') as out_file:
writer = csv.writer(out_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
row[0] = main_url
row[1] = str(occurrences)
writer.writerow(row)
|
import tkinter as tk
#import subprocess import call
#Create & Configure root
root = tk.Tk()
root.title("ELEC 490: Eye Tracking Keybaord")
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 0, weight=1)
#Create & Configure frame
frame=tk.Frame(root)
frame.grid(row=0, column=0, sticky='NSEW')
#Program Variabes
eyeDetection = -1
displayTextBoxes = ["a","b","c","d","sentence","e","f","g","h"]
#Buttons
btn_str = [tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar(),tk.StringVar()]
def button_click(buttonNum):
global btn_str
global displayTextBoxes
print("button pressed!")
displayTextBoxes[4]=displayTextBoxes[4]+displayTextBoxes[buttonNum]
#displayTextBoxes=guessNextLetter(sentence,8)
btn_str[4].set(displayTextBoxes[4])
btn_str[buttonNum].set("button pressed")
return
def middleButton_click():
global windowPosition
global btn_str
print("middle button pressed!")
btn_str[4].set("new view")
return
btn0 =tk.Button(frame,textvariable=btn_str[0], command=lambda: button_click(0))
btn1 =tk.Button(frame,textvariable=btn_str[1],command=lambda: button_click(1))
btn2 =tk.Button(frame,textvariable=btn_str[2],command=lambda: button_click(2))
btn3 =tk.Button(frame,textvariable=btn_str[3],command=lambda: button_click(3))
btn4 =tk.Button(frame,textvariable=btn_str[4],command=lambda: middleButton_click())
btn5 =tk.Button(frame,textvariable=btn_str[5],command=lambda: button_click(5))
btn6 =tk.Button(frame,textvariable=btn_str[6],command=lambda: button_click(6))
btn7 =tk.Button(frame,textvariable=btn_str[7],command=lambda: button_click(7))
btn8 =tk.Button(frame,textvariable=btn_str[8],command=lambda: button_click(8))
button = [btn0,btn1,btn2,btn3,btn4,btn5,btn6,btn7,btn8]
tk.Grid.rowconfigure(frame, 0, weight=1)
tk.Grid.columnconfigure(frame, 0, weight=1)
button[0].grid(row=0, column=0, sticky='NSEW')
tk.Grid.rowconfigure(frame, 0, weight=1)
tk.Grid.columnconfigure(frame, 1, weight=1)
button[1].grid(row=0, column=1,columnspan=2, sticky='NSEW')
tk.Grid.rowconfigure(frame, 0, weight=1)
tk.Grid.columnconfigure(frame, 3, weight=1)
button[2].grid(row=0, column=3, sticky='NSEW')
tk.Grid.rowconfigure(frame, 1, weight=1)
tk.Grid.columnconfigure(frame, 0, weight=1)
button[3].grid(row=1, column=0, rowspan=2, sticky='NSEW')
tk.Grid.rowconfigure(frame, 1, weight=1)
tk.Grid.columnconfigure(frame, 1, weight=1)
button[4].grid(row=1, column=1,columnspan=2, rowspan=2, sticky='NSEW')
tk.Grid.rowconfigure(frame, 1, weight=1)
tk.Grid.columnconfigure(frame, 3, weight=1)
button[5].grid(row=1, column=3, rowspan=2, sticky='NSEW')
tk.Grid.rowconfigure(frame, 3, weight=1)
tk.Grid.columnconfigure(frame, 0, weight=1)
button[6].grid(row=3, column=0, sticky='NSEW')
tk.Grid.rowconfigure(frame, 2, weight=1)
tk.Grid.columnconfigure(frame, 2, weight=1)
button[7].grid(row=3, column=1,columnspan=2, sticky='NSEW')
tk.Grid.rowconfigure(frame, 3, weight=1)
tk.Grid.columnconfigure(frame, 3, weight=1)
button[8].grid(row=3, column=3, sticky='NSEW')
btn_str[0].set("a")
#main loop
root.mainloop()
|
# pyowm is the open weather api client, use ide and docs to find methods https://openweathermap.org/api
# to look at client class import pyowm.weatherapi25.weather.Weather
import pyowm
from pyowm.exceptions.api_response_error import NotFoundError, UnauthorizedError, APIResponseError
# allows for conversion from degrees to wind direction
from weather.helper.utils import wind_deg_to_text
class WeatherApiWrapper:
"""
Wraps pyown in a facade
"""
def __init__(self, options):
"""An object constructor , called when an object is instantiated"""
api_key = options['api_key']
if api_key:
self.client = pyowm.OWM(options['api_key'])
else:
# if there is no api_key we should raise an exception
raise WeatherApiException("No api key found, exiting")
def _query_location(self, location):
"""Accepts a location string e.g London,GB and returns a weather dict, is a private method"""
location_details = self.client.weather_at_place(location)
return location_details.get_weather()
def get_current_weather_at_location(self, location, temperature_unit, wind_unit):
"""
Gets current weather status at a specific location
returns a dict of form
{'status': 'Clouds', 'icon_url': 'http://openweathermap.org/img/w/04d.png', 'temp': 14.32, 'temp_max': 15.0,
'temp_min': 12.78, 'temp_kf': None}
"""
try:
weather_location = self._query_location(location)
# this method can throw exceptions, catch them and deal with them cleanly
except (NotFoundError, UnauthorizedError) as e:
return {'errors': [e]}
# initialise weather dicitionary
weather_dict = {
"status": weather_location.get_status(),
"icon_url": weather_location.get_weather_icon_url()
}
try:
# get_temperature retrieves a dictionary, so we can append this to ours to avoid too many keys!
weather_dict.update(weather_location.get_temperature(temperature_unit))
## extra code
weather_dict.update(weather_location.get_wind(wind_unit))
weather_dict.update({"wind_sector": wind_deg_to_text(weather_dict['deg'])})
except APIResponseError as e:
return {'errors': [e]}
return weather_dict
## extra code
def get_current_weather_at_coordinates(self, longtitude, latitude, unit):
"""Gets the current weather at a specified longitude and latitude"""
try:
forecast = self.client.weather_at_coords(latitude, longtitude)
weather_location = forecast.get_weather()
except (ValueError, UnauthorizedError) as e:
return {'errors': [e]}
weather_dict = {
"status": weather_location.get_status(),
"icon_url": weather_location.get_weather_icon_url(),
}
weather_dict.update(weather_location.get_temperature(unit))
return weather_dict
##
class WeatherApiException(Exception):
"""Generic weather api exception"""
pass
|
# ======================================================================================================================
# PROJECT NAME: Parking Sensor Mock
# FILE NAME: Main
# FILE VERSION: 1.0
# DATE: 19.05.2019
# AUTHOR: Piotr Skalski [github.com/SkalskiP]
# ======================================================================================================================
# File contains function used to define id of parking spot to be released
# ======================================================================================================================
import curses
import zeep
import requests
from ..utils.views_names import ViewsNames
from ..utils.soap_client import SoapClient
from ..utils.error_type import ErrorType
def select_spot_to_release_view(router):
router.screen.clear()
router.screen.addstr(1, 4, "Insert id of parking spot you want to mark as released...", curses.A_BOLD)
router.screen.addstr(2, 4, "Type in id or insert q to aboard:")
s = router.read_text_from_user(4, 4)
if s is not "q":
try:
spot_id = int(s)
router.spot_id = spot_id
router.soap_response = SoapClient.release_parking_spot(spot_id)
router.error_type = ErrorType.NO_ERROR
except ValueError:
router.error_type = ErrorType.PARSING
except zeep.exceptions.Fault:
router.error_type = ErrorType.WRONG_ID
except requests.exceptions.ConnectionError:
router.error_type = ErrorType.NO_CONNECTION
finally:
router.current_view = ViewsNames.SHOW_RELEASE_SPOT_OUTCOME
else:
router.current_view = ViewsNames.ACTIONS_MENU
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/MainWindow.ui'
#
# Created: Tue Dec 26 22:57:29 2017
# by: pyside-uic 0.2.13 running on PySide 1.2.2
#
# WARNING! All changes made in this file will be lost!
from PySide import QtCore, QtGui
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(766, 565)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setAutoFillBackground(False)
self.centralwidget.setObjectName("centralwidget")
self.gridLayout = QtGui.QGridLayout(self.centralwidget)
self.gridLayout.setContentsMargins(0, 0, 0, 0)
self.gridLayout.setObjectName("gridLayout")
self.ModeSwitcher = QtGui.QTabWidget(self.centralwidget)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.ModeSwitcher.sizePolicy().hasHeightForWidth())
self.ModeSwitcher.setSizePolicy(sizePolicy)
self.ModeSwitcher.setObjectName("ModeSwitcher")
self.StoryMode = QtGui.QWidget()
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StoryMode.sizePolicy().hasHeightForWidth())
self.StoryMode.setSizePolicy(sizePolicy)
self.StoryMode.setObjectName("StoryMode")
self.verticalLayout = QtGui.QVBoxLayout(self.StoryMode)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setObjectName("verticalLayout")
self.StoryMode_Text = QtGui.QTextBrowser(self.StoryMode)
self.StoryMode_Text.setObjectName("StoryMode_Text")
self.verticalLayout.addWidget(self.StoryMode_Text)
self.StoryMode_Input = QtGui.QPlainTextEdit(self.StoryMode)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Maximum)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.StoryMode_Input.sizePolicy().hasHeightForWidth())
self.StoryMode_Input.setSizePolicy(sizePolicy)
self.StoryMode_Input.setMinimumSize(QtCore.QSize(0, 100))
self.StoryMode_Input.setMaximumSize(QtCore.QSize(16777215, 50))
self.StoryMode_Input.setContextMenuPolicy(QtCore.Qt.NoContextMenu)
self.StoryMode_Input.setAcceptDrops(False)
self.StoryMode_Input.setUndoRedoEnabled(False)
self.StoryMode_Input.setTextInteractionFlags(QtCore.Qt.TextEditable)
self.StoryMode_Input.setObjectName("StoryMode_Input")
self.verticalLayout.addWidget(self.StoryMode_Input)
self.ModeSwitcher.addTab(self.StoryMode, "")
self.tab_2 = QtGui.QWidget()
self.tab_2.setObjectName("tab_2")
self.ModeSwitcher.addTab(self.tab_2, "")
self.gridLayout.addWidget(self.ModeSwitcher, 0, 0, 1, 1)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtGui.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 766, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtGui.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.ModeSwitcher.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(QtGui.QApplication.translate("MainWindow", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.ModeSwitcher.setTabText(self.ModeSwitcher.indexOf(self.StoryMode), QtGui.QApplication.translate("MainWindow", "Story", None, QtGui.QApplication.UnicodeUTF8))
self.ModeSwitcher.setTabText(self.ModeSwitcher.indexOf(self.tab_2), QtGui.QApplication.translate("MainWindow", "Tab 2", None, QtGui.QApplication.UnicodeUTF8))
|
from django.urls import path, include,re_path
from user import views
urlpatterns = [
path('newslistpic/',views.newlistpic),
path('detail_con/',views.detail_con),
path('listpic/',views.listpic)
]
|
import os
from .indices import (
read_ids, read_path_ids, ids_match, trim_ids,
ids_startswith, ids_tail_match_head, ids_match_tail,
)
__all__ = [
'find_calc_dir', 'find_data_dir', 'find_data_fname',
'find_data_file', 'get_data_fname', 'get_data_file',
'find_data_subdir',
]
def iter_subdir(topdir):
for subdir in os.listdir(topdir):
if os.path.isdir(os.path.join(topdir, subdir)):
yield subdir
def find_data_fname(ids, where='.', sep='-', prefix=None, tag=None,
ext=None,
recursive=True,
ids_include_directory=False,
accumulate_ids=True,
):
"""
Search for a file with specific ids, within a certain directory.
Raise an exception if file is not found.
Examples:
>>> find_data_file([116, 400], where='116-xct-ph-test/Data/')
'116-xct-ph-test/Data/data-116-400-xctph.nc'
>>> find_data_file([116, 400, 3], where='116-xct-ph-test/Data',
ids_include_directory=True, accumulate_ids=True)
'116-xct-ph-test/Data/116-400-bse-qgrid/data-3-bse.nc'
>>> find_data_file([116, 116, 400], where='116-xct-ph-test/Data/',
ids_include_directory=True, accumulate_ids=True)
'116-xct-ph-test/Data/data-116-400-xctph.nc'
>>> find_data_file([3], where='116-xct-ph-test/Data',
'116-xct-ph-test/Data/116-400-bse-qgrid/data-3-bse.nc'
"""
ids = listify(ids)
# Scan all the files in the directory
for fname in os.listdir(where):
fname_fullpath = os.path.join(where, fname)
if os.path.isdir(fname_fullpath):
continue
if tag and tag not in fname:
continue
if prefix and not fname.startswith(prefix):
continue
if ext is not None:
if not fname.endswith(ext):
continue
fname_scan = fname.lstrip(prefix).lstrip(sep)
if ids_include_directory:
fname_scan = os.path.join(where, fname_scan)
fname_ids = read_path_ids(fname_scan)
if ids_match(ids, fname_ids):
return fname_fullpath
# Descend into each subdirectory and look for files
if recursive:
kwargs = dict(recursive=recursive, sep=sep, prefix=prefix, tag=tag,
ids_include_directory=ids_include_directory,
accumulate_ids=accumulate_ids)
for subdir in iter_subdir(where):
subdir_fullpath = os.path.join(where, subdir)
if accumulate_ids:
subdir_ids = read_ids(subdir)
if not ids_startswith(ids, subdir_ids):
continue
kwargs['ids'] = trim_ids(ids, subdir_ids)
else:
kwargs['ids'] = ids
try:
return find_data_fname(where=subdir_fullpath, **kwargs)
except:
pass
raise Exception(
'Did not find any file matching ids {} in directory {}'.format(
ids, where)
)
# GKA: Cant seem to decide on a name...
get_data_fname = find_data_fname
find_data_file = find_data_fname
get_data_file = find_data_fname
def find_data_subdir(ids, where='.'):
"""
Scan recursively a directory to find one directory whose sequence
matches ids.
"""
for subdir in iter_subdir(where):
subdir_fullpath = os.path.join(where, subdir)
subdir_ids = read_ids(subdir)
subdir_path_ids = read_path_ids(subdir_fullpath)
if ids_match(ids, subdir_ids) or ids_match_tail(ids, subdir_path_ids):
return subdir_fullpath
elif ids_tail_match_head(subdir_path_ids, ids):
try:
return find_data_subdir(ids, where=subdir_fullpath)
except:
pass
raise Exception('could not find calculation directory.\n' +
'directory: {}\n'.format(where) +
'ids: {}\n'.format(ids)
)
def find_calc_dir(ids, where=None):
"""
Find a calculation directory inside the production directory (where)
for which each subdirectory is identified with one element of ids.
The subdirectory depth matches the length of ids.
"""
ids = listify(ids)
if not where:
from .filebuttler import production_dir
where = production_dir
found = 0
topdir = where
for idi in ids:
for subdir in os.listdir(topdir):
idsub = read_ids(subdir)
if idsub and idi == idsub[0]:
topdir = os.path.join(topdir, subdir)
found += 1
break
if found < len(ids):
raise Exception('could not find calculation directory.\n' +
'directory: {}\n'.format(where) +
'ids: {}\n'.format(ids)
)
return topdir
def find_data_dir(ids, where=None, data_subdir_name='Data'):
"""
Find a Data directory inside a calculation directory (identified with ids)
inside the production directory (where)
for which each subdirectory is identified with one element of ids.
The subdirectory depth matches the length of ids.
"""
ids = listify(ids)
calc_dir = find_calc_dir(ids, where=where)
if not data_subdir_name:
return calc_dir
data_dir = os.path.join(calc_dir, data_subdir_name)
if not os.path.exists(data_dir):
raise Exception(
'Could not find data directory within calculation directory:\n'
+ calc_dir)
return data_dir
def listify(obj):
try:
return list(obj)
except:
return [obj]
|
class TreeNode:
def __init__(self, data, left=None, right=None):
self.data = data
self.left = left
self.right = right
def __str__(self):
fmt = 'TreeNode({}, {}, {})'
return fmt.format(str(self.data), str(self.left), str(self.right))
def to_list(self):
a = []
c = []
if self.left is not None:
a = self.left.to_list()
b = [self.data]
if self.right is not None:
c = self.right.to_list()
return a + b + c
class BinarySearchTree:
def __init__(self, tree_data):
self.tree = TreeNode(tree_data[0])
for i in range(1,len(tree_data)):
position = self.tree
while True:
if tree_data[i] <= position.data:
if position.left is None:
position.left = TreeNode(str(tree_data[i]))
break
else:
position = position.left
else:
if position.right is None:
position.right = TreeNode(str(tree_data[i]))
break
else:
position = position.right
def data(self):
return self.tree
def sorted_data(self):
return self.tree.to_list() |
import os
from serif import Document
from serif.model.ingester import Ingester
from serif.theory.alert_author import ALERTAuthor
import csv
import sys
csv.field_size_limit(sys.maxsize)
class CSVIngester(Ingester):
def __init__(self, lang, headers, csv_file=None, corpus=None, **kwargs):
super(CSVIngester, self).__init__(**kwargs)
self.headers = headers.split(",")
self.language = lang
self.corpus = corpus
def ingest(self, filepath):
docs = []
docid = CSVIngester.get_docid_from_filename(filepath)
with open(filepath) as f:
attributes_reader = csv.reader((line.replace('\0','') for line in f))
for i, row in enumerate(attributes_reader):
assert len(row) == len(self.headers)
if "docid" in self.headers:
cur_docid = row[self.headers.index("docid")]
else:
cur_docid = docid + "_" + str(i)
doc = Document.from_string(row[self.headers.index("text")], self.language, cur_docid)
self.get_author_attributes_from_csv(row, doc)
if self.corpus:
doc.alert_metadata.corpus = self.corpus
docs.append(doc)
return docs
@staticmethod
def get_docid_from_filename(filepath):
basename = os.path.basename(filepath)
if basename.endswith(".csv"):
basename = basename[0:-4]
return basename
def get_author_attributes_from_csv(self, row, doc):
doc.add_alert_metadata()
author = doc.alert_metadata.add_new_author()
for header, field in zip(self.headers, row):
if hasattr(author, header):
setattr(author, header, field) |
from unittest import TestCase
from basketball_reference_web_scraper.data import OUTCOME_ABBREVIATIONS_TO_OUTCOME, Outcome
from basketball_reference_web_scraper.parsers import OutcomeAbbreviationParser, PlayerBoxScoreOutcomeParser
class TestPlayerBoxScoreOutcomeParser(TestCase):
def setUp(self):
self.parser = PlayerBoxScoreOutcomeParser(
outcome_abbreviation_parser=OutcomeAbbreviationParser(
abbreviations_to_outcomes=OUTCOME_ABBREVIATIONS_TO_OUTCOME
)
)
def test_parse_win_abbreviation_for_single_digit_margin_of_victory(self):
self.assertEqual("W", self.parser.parse_outcome_abbreviation(formatted_outcome="W (+8)"))
def test_parse_win_abbreviation_for_double_digit_margin_of_victory(self):
self.assertEqual("W", self.parser.parse_outcome_abbreviation(formatted_outcome="W (+18)"))
def test_parse_loss_abbreviation_for_single_digit_margin_of_victory(self):
self.assertEqual("L", self.parser.parse_outcome_abbreviation(formatted_outcome="L (-8)"))
def test_parse_loss_abbreviation_for_double_digit_margin_of_victory(self):
self.assertEqual("L", self.parser.parse_outcome_abbreviation(formatted_outcome="L (-18)"))
def test_parse_win_outcome_for_single_digit_margin_of_victory(self):
self.assertEqual(Outcome.WIN, self.parser.parse_outcome(formatted_outcome="W (+8)"))
def test_parse_win_outcome_for_double_digit_margin_of_victory(self):
self.assertEqual(Outcome.WIN, self.parser.parse_outcome(formatted_outcome="W (+18)"))
def test_parse_loss_outcome_for_single_digit_margin_of_victory(self):
self.assertEqual(Outcome.LOSS, self.parser.parse_outcome(formatted_outcome="L (-8)"))
def test_parse_loss_outcome_for_double_digit_margin_of_victory(self):
self.assertEqual(Outcome.LOSS, self.parser.parse_outcome(formatted_outcome="L (-18)"))
def test_parse_positive_single_digit_margin_of_victory(self):
self.assertEqual(8, self.parser.parse_margin_of_victory(formatted_outcome="W (+8)"))
def test_parse_positive_double_digit_margin_of_victory(self):
self.assertEqual(18, self.parser.parse_margin_of_victory(formatted_outcome="W (+18)"))
def test_parse_negative_single_digit_margin_of_victory(self):
self.assertEqual(-8, self.parser.parse_margin_of_victory(formatted_outcome="L (-8)"))
def test_parse_negative_double_digit_margin_of_victory(self):
self.assertEqual(-18, self.parser.parse_margin_of_victory(formatted_outcome="L (-18)"))
|
#!/usr/local/bin/python -tt
import sys
import argparse
import networkx as nx
import operator
import colorsys
graphFilePostfix = None;
graphType = None;
multipleAcquireWithoutRelease = 0;
noMatchingAcquireOnRelease = 0;
separator = " ";
tryLockWarning = 0;
verbose = False;
#
# LogRecord contains all the fields we expect in the log record.
class LogRecord:
def __init__(self, func, op, thread, time, otherInfo):
self.func = func;
self.op = op;
self.thread = thread;
self.time = long(time);
self.otherInfo = otherInfo;
#
# otherInfo typically includes argument values. We append
# it to the function name.
#
if (otherInfo is not None):
self.fullName = func + " " + otherInfo;
else:
self.fullName = func;
def printLogRecord(self):
print(self.op + " " + self.func + " " + str(self.thread) + " "
+ str(self.time));
#
# LockRecord contains temporary information for generating lock-held times
class LockRecord:
def __init__(self, name, fname, thread, timeAcquired):
self.name = name;
self.funcName = fname;
self.thread = thread;
self.timeAcquired = long(timeAcquired);
def printLockRecord(self):
print(self.name + ": [" + str(self.thread) + "] " +
str(self.timeAcquired) + "\n");
#
# TraceStats class holds attributes pertaining to the performance
# trace.
class TraceStats:
def __init__(self, name):
self.name = name;
self.startTime = 0;
self.endTime = 0;
def setStartTime(self, startTime):
self.startTime = startTime;
def setEndTime(self, endTime):
self.endTime = endTime;
def getTotalTime(self):
if (self.startTime == 0 or self.endTime == 0):
print("Warning: start or end time not set for trace " +
self.name);
return self.endTime - self.startTime;
#
# PerfData class contains informtation about the function running
# times.
class PerfData:
def __init__(self, name, otherInfo, threadID):
self.name = name;
# If this is a lock function, then otherInfo
# would contain the information for identifying
# this lock.
#
self.lockName = otherInfo;
self.threadID = threadID;
self.numCalls = 0;
self.totalRunningTime = long(0);
self.runningTimes = [];
self.maxRunningTime = 0;
self.maxRunningTimeTimestamp = 0;
self.filtered = False;
def getAverage(self):
return (float(self.totalRunningTime) / float(self.numCalls));
def printSelf(self, file):
if(file is None):
file = sys.stdout;
file.write("*** " + self.name + "\t" +
str(self.numCalls) + "\t" + str(self.totalRunningTime) + "\t"
+ str(self.getAverage()) + "\n");
file.write("\t Total running time: " +
'{:,}'.format(self.totalRunningTime) +
" ns.\n");
file.write("\t Average running time: "
+ '{:,}'.format(long(self.getAverage())) + " ns.\n");
file.write("\t Largest running time: " +
'{:,}'.format(self.maxRunningTime) +
" ns.\n");
def printSelfHTML(self, prefix, locksSummaryRecords):
with open(prefix + "/" + self.name + ".html", 'w+') as file:
file.write("*** " + self.name + "\n");
file.write("\t Total running time: " +
'{:,}'.format(self.totalRunningTime) +
" ns.\n");
file.write("\t Average running time: "
+ '{:,}'.format(long(self.getAverage())) + " ns.\n");
file.write("\t Largest running time: " +
'{:,}'.format(self.maxRunningTime) +
" ns.\n");
file.write("------------------\n");
if (self.lockName is not None):
if (locksSummaryRecords.has_key(self.lockName)):
lockData = locksSummaryRecords[self.lockName];
lockData.printSelfHTML(file);
#
# LockData class contains information about lock-related functions
class LockData:
def __init__(self, name):
self.name = name;
self.numAcquire = 0;
self.numRelease = 0;
self.numTryLock = 0;
self.timeAcquire = 0;
self.timeTryLock = 0;
self.timeRelease = 0;
self.timeHeld = 0;
self.lastAcquireRecord = None;
self.lockHeldTimes = [];
def getAverageAcquire(self):
if(self.numAcquire > 0):
return (float(self.timeAcquire) / float(self.numAcquire));
else:
return 0;
def getAverageRelease(self):
if(self.numRelease > 0):
return (float(self.timeRelease) / float(self.numRelease));
else:
return 0;
def getAverageTryLock(self):
if(self.numTryLock > 0):
return (float(self.timeTryLock) / float(self.numTryLock));
else:
return 0;
def getAverageTimeHeld(self):
if(self.numRelease > 0):
return (float(self.timeHeld) / float(self.numRelease));
else:
return 0;
def printSelf(self, file):
if (file is None):
file = sys.stdout;
file.write("Lock \"" + self.name + "\":\n");
file.write("\t Num acquire: " + str(self.numAcquire) + "\n");
file.write("\t Num trylock: " + str(self.numTryLock) + "\n");
file.write("\t Num release: " + str(self.numRelease) + "\n");
file.write("\t Average time in acquire: "
+ str(long(self.getAverageAcquire())) + " ns.\n");
file.write("\t Average time in trylock: "
+ str(long(self.getAverageTryLock())) + " ns.\n");
file.write("\t Average time in release: "
+ str(long(self.getAverageRelease())) + " ns.\n");
file.write("\t Average time the lock was held: "
+ str(long(self.getAverageTimeHeld())) + " ns.\n");
def printSelfHTML(self, file):
file.write("Lock \"" + self.name + "\":\n");
file.write("\t Num acquire: " + str(self.numAcquire) + "\n");
file.write("\t Num trylock: " + str(self.numTryLock) + "\n");
file.write("\t Num release: " + str(self.numRelease) + "\n");
file.write("\t Average time in acquire: "
+ str(long(self.getAverageAcquire())) + " ns.\n");
file.write("\t Average time in trylock: "
+ str(long(self.getAverageTryLock())) + " ns.\n");
file.write("\t Average time in release: "
+ str(long(self.getAverageRelease())) + " ns.\n");
file.write("\t Average time the lock was held: "
+ str(long(self.getAverageTimeHeld())) + " ns.\n");
#
# The following data structures and functions help us decide what
# kind of lock-related action the function is doing:
# acquiring the lock, releasing the lock, of trying to acquire the lock.
#
acquireStrings = ["acquire", "lock"];
trylockStrings = ["trylock"];
releaseStrings = ["release", "unlock"];
def looks_like_acquire(funcname):
if(looks_like_release(funcname)):
return False;
if(looks_like_trylock(funcname)):
return False;
for hint in acquireStrings:
if(funcname.find(hint) != -1):
return True;
return False;
def looks_like_trylock(funcname):
for hint in trylockStrings:
if(funcname.find(hint) != -1):
return True;
return False;
def looks_like_release(funcname):
for hint in releaseStrings:
if(funcname.find(hint) != -1):
return True;
return False;
def looks_like_lock(funcname):
if(looks_like_acquire(funcname) or
looks_like_release(funcname) or
looks_like_trylock(funcname)):
return True;
else:
return False;
def do_lock_processing(locksDictionary, logRec, runningTime,
lockName):
global verbose;
global multipleAcquireWithoutRelease;
global tryLockWarning;
global noMatchingAcquireOnRelease;
func = logRec.func
if(not locksDictionary.has_key(lockName)):
lockData = LockData(lockName);
locksDictionary[lockName] = lockData;
lockData = locksDictionary[lockName];
lastAcquireRecord = lockData.lastAcquireRecord;
# If this is an acquire or trylock, simply update the stats in the
# lockData object and remember the lastAcquire record, so we can
# later match it with a corresponding lock release.
#
# If this is a release, update the stats in the lockData object and
# get the corresponding acquire or trylock so we can compute the lock
# held time.
#
if(looks_like_acquire(func) or looks_like_trylock(func)):
lockRec = LockRecord(lockName, func, logRec.thread, logRec.time);
if(looks_like_acquire(func)):
if(lastAcquireRecord is not None):
if(verbose):
print("Another acquire record seen on acquire. "
" for lock " + lockName);
print("Current lock record:");
lockRec.printLockRecord();
print("Existing acquire record:");
lastAcquireRecord.printLockRecord();
multipleAcquireWithoutRelease = multipleAcquireWithoutRelease \
+ 1;
else:
lockData.lastAcquireRecord = lockRec;
lockData.numAcquire = lockData.numAcquire + 1;
lockData.timeAcquire = lockData.timeAcquire + runningTime;
elif(looks_like_trylock(func)):
if(lastAcquireRecord is not None):
if(lastAcquireRecord.funcName != func):
if(verbose):
print("Warning: A trylock record seen, but not in the "
"same function as ours!");
print("Current lock record:");
lockRec.printLockRecord();
print("Existing acquire record:");
lastAcquireRecord.printLockRecord();
tryLockWarning = tryLockWarning + 1;
else:
# If there is already an acquire record with the same func
# name as ours, this means that the lock was not acquired in
# the last try attempt. We update the timestamp, so that
# lock held time is subsequently calculated correctly.
lastAcquireRecord.timeAcquired = logRec.time;
else:
lockData.lastAcquireRecord = lockRec;
lockData.numTryLock = lockData.numTryLock + 1;
lockData.timeTryLock = lockData.timeTryLock + runningTime;
else:
print("PANIC!")
sys.exit(-1);
elif(looks_like_release(func)):
if(lastAcquireRecord is None):
if(verbose):
print("Could not find a matching acquire for: ")
logRec.printLogRecord();
print("Lock name: " + lockName);
noMatchingAcquireOnRelease = noMatchingAcquireOnRelease + 1;
else:
lockHeldTime = logRec.time - lastAcquireRecord.timeAcquired;
lockData.timeHeld = lockData.timeHeld + lockHeldTime;
lockData.lockHeldTimes.append(long(lockHeldTime));
# Reset the lockAcquire record to null
lockData.lastAcquireRecord = None;
lockData.numRelease = lockData.numRelease + 1;
lockData.timeRelease = lockData.timeRelease + runningTime;
else:
print("PANIC! Unrecognized lock function: " + func);
sys.exit(-1);
class HSL:
def __init__(self, h, s, l):
self.h = h;
self.s = s;
self.l = l;
#
# Code borrowed from http://www.rapidtables.com/convert/color/hsl-to-rgb.htm
#
def toRGB(self):
h = float(self.h);
s = self.s;
l = self.l;
if(h < 0 or h > 360):
return -1, -1, -1;
if(s < 0 or s > 1):
return -1, -1, -1;
if(l < 0 or l > 1):
return -1, -1, -1;
C = (1 - abs(2*l - 1)) * s;
X = C * (1 - abs(h / 60 % 2 -1));
m = l - C/2;
if(h >= 0 and h < 60):
r = C;
g = X;
b = 0;
elif(h >= 60 and h < 120):
r = X;
g = C;
b = 0;
elif(h >= 120 and h < 180):
r = 0;
g = C;
b = 0;
elif(h >= 180 and h < 240):
r = 0;
g = X;
b = C;
elif(h >= 240 and h < 300):
r = X;
g = 0;
b = C;
elif(h >= 300 and h <= 360):
r = C;
g = 0;
b = X;
r = int(round((r + m) * 255));
g = int(round((g + m) * 255));
b = int(round((b + m) * 255));
return r, g, b;
def toHex(self):
r, g, b = self.toRGB();
hexString = "#" + "%0.2X" % int(r) + "%0.2X" % int(g) + "%0.2X" % int(b)
return hexString;
def isInt(s):
try:
int(s);
return True;
except ValueError:
return False;
def buildColorList():
colorRange = [];
baseHue = 360;
saturation = 0.70;
lightness = 0.56;
lightInc = 0.02;
for i in range(0, 14):
hslColor = HSL(baseHue - i * 20, saturation, lightness + lightInc * i,);
hexColor = hslColor.toHex();
colorRange.append(hexColor);
return colorRange;
#
# Figure out the percent execution time for each function relative to the total.
# Compute the colour based on the percent. Set the node colour accordingly.
# Update the node label with the percent value.
#
def augment_graph(graph, funcSummaryData, traceStats):
# This dictionary is the mapping between function names and
# their percent of execution time.
#
percentDict = {};
# This is the dictionary of function names with colour codes
# based on their contribution to the total runtime.
#
funcWithColorCode = {};
# Generate a progression of colours from bright red to pale green
#
rgbArray = buildColorList();
# Generate a dictionary keyed by function name, where the value is
# the percent runtime contributed to total by this function.
#
traceRuntime = traceStats.getTotalTime();
for func, pdr in funcSummaryData.iteritems():
if(pdr.filtered):
continue;
percent = float(pdr.totalRunningTime) / float(traceRuntime) * 100;
percentDict[func] = percent;
# Sort the dictionary by percent. We get back a list of tuples.
#
sortedByPercentRuntime = sorted(percentDict.items(),
key=operator.itemgetter(1));
# Look up the colour for each function based on its contribution
# to the total runtime. Greater contribution --> more intense color.
#
for funcPercentTuple in reversed(sortedByPercentRuntime):
func = funcPercentTuple[0];
percent = funcPercentTuple[1];
percentStr = str(round(percent)) + "%";
# Let's find the color for this percent value.
#
idx = int(round((100.0 - percent) / 7.5));
funcWithColorCode[func] = [rgbArray[idx], percentStr];
for func, attrs in funcWithColorCode.iteritems():
if graphType == 'func_only':
allNames = [func];
else:
enterNodeName = "enter " + func;
exitNodeName = "exit " + func;
allNames = [enterNodeName, exitNodeName];
for nodeName in allNames:
graph.node[nodeName]['label'] = nodeName + "\n" + \
attrs[1];
graph.node[nodeName]['style'] = "filled";
graph.node[nodeName]['fillcolor'] = attrs[0];
graph.node[nodeName]['URL'] = nodeName + ".html";
def update_graph(graph, nodeName, prevNodeName):
if (not graph.has_node(nodeName)):
graph.add_node(nodeName, fontname="Helvetica");
if (not graph.has_edge(prevNodeName, nodeName)):
graph.add_edge(prevNodeName, nodeName, label = " 1 ",
fontname="Helvetica");
else:
graph[prevNodeName][nodeName]['label'] = \
" " + str(int(graph[prevNodeName][nodeName]['label']) + 1) + " ";
if(prevNodeName == "START"):
graph[prevNodeName][nodeName]['label'] = "";
if (graphType == 'func_only'):
graph[prevNodeName][nodeName]['label'] = " 1 "
def generate_func_only_graph(graph, logRecords, prevNodeName):
funcStack = [prevNodeName];
lastFuncName = prevNodeName;
for logRec in logRecords:
if logRec.op == 'enter':
update_graph(graph, logRec.func, lastFuncName)
funcStack.append(logRec.func)
elif logRec.op == 'exit':
if funcStack[-1] == logRec.func:
lastFuncName = funcStack.pop()
return lastFuncName;
def generate_graph(logRecords):
graph = nx.DiGraph();
graph.add_node("START", fontname="Helvetica");
graph.node["START"]['shape']='box'
prevNodeName = "START";
if graphType == 'func_only':
prevNodeName = generate_func_only_graph(graph, logRecords, prevNodeName)
else:
for logRec in logRecords:
nodeName = logRec.op + " " + logRec.fullName;
update_graph(graph, nodeName, prevNodeName);
prevNodeName = nodeName;
graph.add_node("END", fontname="Helvetica");
graph.add_edge(prevNodeName, "END");
graph.node["END"]['shape']='diamond';
return graph;
#
# When we compute the execution flow graph, we will not include any functions
# whose percent execution time is below that value.
#
percentThreshold = 0.0;
useMaxRuntimeFilter = False;
maxRuntimeThreshold = 3300000; # in clock cycles
def filterLogRecords(logRecords, funcSummaryRecords, traceStats):
filteredRecords = [];
traceRuntime = traceStats.getTotalTime();
for rec in logRecords:
# A log may have no corresponding function record if we stopped
# logging before the function exit record was generated, as can
# be with functions that start threads.
#
if not funcSummaryRecords.has_key(rec.fullName):
print("Warning: no performance record for function " +
rec.func);
continue;
pdr = funcSummaryRecords[rec.fullName];
percent = float(pdr.totalRunningTime) / float(traceRuntime) * 100;
if (percent <= percentThreshold):
pdr.filtered = True;
continue;
elif (useMaxRuntimeFilter and pdr.maxRunningTime < maxRuntimeThreshold):
pdr.filtered = True;
continue;
else:
filteredRecords.append(rec);
return filteredRecords;
def generateHTML(htmlFileName, imageFileName, mapFileName):
with open htmlFileName as htmlFile:
with open mapFileName as mapFile:
def parse_file(fname, prefix):
startTime = 0;
endTime = 0;
stack = [];
lockStack = [];
outputFile = None;
if(fname is not None):
try:
logFile = open(fname, "r");
print "Parsing file " + fname;
except:
print "Could not open file " + fname;
return;
else:
print "Reading from stdin";
logFile = sys.stdin;
try:
outputFile = open(prefix+".txt", "w");
print("Output file is " + prefix + ".txt");
except:
print("Could not open output file with prefix " + prefix);
outputFile = sys.stdout;
funcSummaryRecords = {}
locksSummaryRecords = {}
traceStats = TraceStats(prefix);
logRecords = [];
graph = nx.DiGraph();
graph.add_node("START", fontname="Helvetica");
graph.node["START"]['shape']='box'
prevNodeName = "START";
for line in logFile:
words = line.split(separator);
thread = 0;
time = 0;
func = "";
otherInfo = None;
if(len(words) < 4):
continue;
try:
func = words[1];
thread = int(words[2]);
time = long(words[3]);
if (len(words) > 4):
parts = words[4:len(words)];
otherInfo = (" ".join(parts)).rstrip();
except ValueError:
print "Could not parse: " + line;
continue;
if (words[0] == "-->"):
op = "enter";
elif (words[0] == "<--"):
op = "exit";
else:
continue;
rec = LogRecord(func, op, thread, time, otherInfo);
if(startTime == 0):
startTime = time;
else:
endTime = time;
if(op == "enter"):
# Timestamp for function entrance
# Push each entry record onto the stack.
stack.append(rec);
# Add this log record to the array
logRecords.append(rec);
# If we are told to write the records to the output
# file, do so.
if(outputFile is not None):
outputFile.write(line);
else:
if(outputFile is not None):
# If this is a function exit record, we may need to add
# the name of the lock to the end of the line, if this
# happens to be a lock function. So for now we write the
# line without the newline character at the end. Later we
# will either add the lock name with the newline character
# (if this happens to be a lock function, or just the
# newline character otherwise.
outputFile.write(line.rstrip());
found = False;
# Timestamp for function exit. Find its
# corresponding entry record by searching
# the stack.
while(len(stack) > 0):
stackRec = stack.pop();
if(stackRec is None):
print("Ran out of opening timestamps when searching "
"for a match for: " + line);
break;
# If the name of the entrance record
# on the stack is not the same as the name
# in the exit record we have on hand, complain
# and continue. This means that there are errors
# in the instrumentation, but we don't want to fail
# because of them.
if(not (stackRec.func == rec.func)):
continue;
else:
# We have a proper function record. Let's add the data to
# the file's dictionary for this function.
runningTime = long(rec.time) - long(stackRec.time);
if(not funcSummaryRecords.has_key(stackRec.fullName)):
newPDR = PerfData(stackRec.fullName, otherInfo, thread);
funcSummaryRecords[stackRec.fullName] = newPDR;
pdr = funcSummaryRecords[stackRec.fullName];
pdr.totalRunningTime = pdr.totalRunningTime + runningTime;
pdr.numCalls = pdr.numCalls + 1;
pdr.runningTimes.append(runningTime);
if (runningTime > pdr.maxRunningTime):
pdr.maxRunningTime = runningTime;
pdr.maxRunningTimeTimeStamp = stackRec.time;
found = True
# Full name is the name of the function, plus whatever other
# info was given to us, usually values of arguments. This
# information is only printed for the function entry record,
# so if we are at the function exit record, we must copy
# that information from the corresponding entry record.
#
rec.fullName = stackRec.fullName;
logRecords.append(rec);
# If this is a lock-related function, do lock-related
# processing. stackRec.otherInfo variable would contain
# the name of the lock, since only the function enter
# record has this information, not the exit record.
if(stackRec.otherInfo is not None
and looks_like_lock(func)):
do_lock_processing(locksSummaryRecords, rec,
runningTime,
stackRec.otherInfo);
if(outputFile is not None):
outputFile.write(" " + stackRec.otherInfo);
break;
if(not found):
print("Could not find matching function entrance for line: \n"
+ line);
if(outputFile is not None):
outputFile.write("\n");
traceStats.setStartTime(startTime);
traceStats.setEndTime(endTime);
# Filter the log records according to criteria on their attributes
filteredLogRecords = filterLogRecords(logRecords, funcSummaryRecords,
traceStats);
# Generate HTML files summarizing function stats for all functions that
# were not filtered.
print("Generating per-function HTML files...");
for pdr in funcSummaryRecords.values():
if (not pdr.filtered):
pdr.printSelfHTML(".", locksSummaryRecords);
# Augment graph attributes to reflect performance characteristics
graph = generate_graph(filteredLogRecords);
augment_graph(graph, funcSummaryRecords, traceStats);
# Prepare the graph
aGraph = nx.drawing.nx_agraph.to_agraph(graph);
aGraph.add_subgraph("START", rank = "source");
aGraph.add_subgraph("END", rank = "sink");
# Generate files
nameNoPostfix = prefix + "." + graphType + "."+ str(percentThreshold) + "%."
imageFileName = nameNoPostfix + graphFilePostfix;
print("Graph image is saved to: " + imageFileName);
aGraph.draw(imageFileName, prog = 'dot');
mapFileName = nameNoPostfix + "cmapx";
aGraph.draw(mapFileName, prog = 'dot');
print("Graph image map is saved to: " + graphFileName);
generateHTML(nameNoPostfix + "html", imageFileName, mapFileName);
if(outputFile is not None):
outputFile.close();
# Write the summary to the output file.
try:
summaryFileName = prefix + ".summary";
summaryFile = open(summaryFileName, "w");
print("Summary file is " + summaryFileName);
except:
print("Could not create summary file " + summaryFileName);
summaryFile = sys.stdout;
summaryFile.write(" SUMMARY FOR FILE " + prefix + ":\n");
summaryFile.write("------------------------------\n");
summaryFile.write("Total trace time: "
+ str(traceStats.getTotalTime()) + "\n");
summaryFile.write(
"Function \t Num calls \t Runtime (tot) \t Runtime (avg)\n");
for fkey, pdr in funcSummaryRecords.iteritems():
pdr.printSelf(summaryFile);
summaryFile.write("------------------------------\n");
lockDataDict = locksSummaryRecords;
summaryFile.write("\nLOCKS SUMMARY\n");
for lockKey, lockData in lockDataDict.iteritems():
lockData.printSelf(summaryFile);
summaryFile.write("------------------------------\n");
summaryFile.close();
def getPrefix(fname):
words = fname.split(".txt");
if (len(words) > 0):
return words[0];
else:
return fname;
def main():
global firstNodeName;
global graphFilePostfix;
global graphType;
global lastNodeName;
global multipleAcquireWithoutRelease;
global noMatchingAcquireOnRelease;
global percentThreshold;
global separator;
global tryLockWarning;
global verbose;
parser = argparse.ArgumentParser(description=
'Process performance log files');
parser.add_argument('files', type=str, nargs='*',
help='log files to process');
parser.add_argument('--prefix', dest='prefix', type=str);
parser.add_argument('--verbose', dest='verbose', action='store_true');
parser.add_argument('-g', '--graphtype', dest='graphtype',
default='enter_exit',
help='Default=enter_exit; \
Possible values: enter_exit, func_only');
parser.add_argument('-p', '--percent-threshold', dest='percentThreshold',
type=float, default = 0.0,
help='Default=0.0; \
When we compute the execution flow graph, we will not \
include any functions, whose percent execution time \
is smaller that value.')
parser.add_argument('--graph-file-postfix', dest='graphFilePostfix',
default='png');
parser.add_argument('-s', '--separator', dest='separator', default=' ');
args = parser.parse_args();
if(args.verbose):
verbose = True;
graphType = args.graphtype;
graphFilePostfix = args.graphFilePostfix;
percentThreshold = args.percentThreshold;
separator = args.separator;
if(len(args.files) > 0):
for fname in args.files:
# Figure out the prefix for the output files
if (args.prefix is None):
prefix = getPrefix(fname);
else:
prefix = args.prefix;
print("Prefix is " + prefix);
parse_file(fname, prefix);
else: # We are reading from stdin
if(args.prefix is None):
print("I am told to read from stdin (no files are provided), "),
print("but there is no prefix for the output file. "),
print("Please use --prefix to provide it.");
sys.exit();
else:
parse_file(None, args.prefix);
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: NoticeSqlBase.py
通知书接口/签名交易接口
契约作业通知书签名查库方法
Description :
1 . 获取建议书状态与类型
2 . 查询契约通知书
3 . 查询投保人签名明文PLAINAPPNTSIGN
4 . 查询投保人签名密文CIPHERAPPNTSIGN
5 . 查询被保人签名明文PLAININSUREDSIGN
6 . 查询被保人签名密文CIPHERINSUREDSIGN
Author : 张 悦
date : 2018/04/24
UpdateTimeFirst : 2018/04/27
-------------------------------------------------
"""
from aeonlifebase.CaseBase import CaseBase
from aeonlifebase.notice.Config import Config
from aeonlifebase.notice.EnumOfNotice import EnumOfNotice
from aeonlifebase.notice.NoticeDao import NoticeDao
import logging
import requests
import json
class NoticeSqlBase(CaseBase):
def __init__(self):
CaseBase.__init__(self, Config())
# 1.获取建议书状态与类型
# 条件:agent_code=usercode and notice_info_id=noticeNO
def getNoticeInfoByQueryParam(data):
userCode = data['userCode']
noticeNo = data['noticeNo']
info = {
"agent_code" : userCode,
"notice_no" : noticeNo
}
sqlGetNoticeNoByAgentCode = '''SELECT notice_no FROM `notice_agent_relation` nar
WHERE nar.agent_code = %s ''' % info['agent_code']
sqlGetNoticeInfo = '''SELECT status, notice_type, notice_info_id, sign_role FROM `notice_info` WHERE notice_no
in (%s) AND notice_no = '%s' ''' % (sqlGetNoticeNoByAgentCode, info['notice_no'])
results = CaseBase.executeSql(sqlGetNoticeInfo)
noticeInfo = {}
if results:
noticeInfo = {"status" : results[0][0],
"noticeType" : results[0][1],
"noticeInfoId" : results[0][2],
"signRole" : results[0][3]
}
return noticeInfo
# 2.查询契约/核保作业/核保决定/转账收费不成功通知书
def oneOfNoticeFileRelationInfo(data, noticeInfo):
NoticeInfoId = noticeInfo['noticeInfoId']
noticeNo = data['noticeNo']
fileType = noticeInfo['noticeType']
info = {
"notice_info_id" : NoticeInfoId,
"notice_no" : noticeNo,
"file_type" : fileType,
"file_id" : None,
"file_order" : None,
"url" : None
}
fileInfoList = data.get('fileInfoList', None)
businessWorkInfoResults = []
if fileInfoList:
for fileInfo in fileInfoList:
fileId = fileInfo['fileId']
fileOrder = fileInfo['order']
url = fileInfo['fileUrl']
info['file_id'] = fileId
info['file_order'] = fileOrder
info['url'] = url
str = CaseBase.strAppend(info)
sql = '''SELECT notice_file_relation_id FROM `notice_file_relation` WHERE %s''' % str
result = CaseBase.executeSql(sql)
if result:
noticeFileRelationInfo = {'noticeFileRelationId' : result[0][0]}
businessWorkInfoResults.append(noticeFileRelationInfo)
return businessWorkInfoResults
# 3.查询投保人签名明文PLAINAPPNTSIGN
def getPlainAppntSignPLAINAPPNTSIGN(data, noticeInfo):
NoticeInfoId = noticeInfo['noticeInfoId']
noticeNo = data['noticeNo']
fileType = EnumOfNotice.PLAINAPPNTSIGN.value
fileOrder = "1"
fileId = data['plainAppntSignUrl']
url = data['plainAppntSignUrl']
info = {
"notice_info_id" : NoticeInfoId,
"notice_no" : noticeNo,
"file_type" : fileType,
"file_id" : fileId,
"file_order" : fileOrder,
"url" : url
}
str = CaseBase.strAppend(info)
sql = '''SELECT notice_file_relation_id FROM `notice_file_relation` WHERE %s''' % str
result = CaseBase.executeSql(sql)
plainInsuredSignResults = []
if result:
noticeFileRelationInfo = {'notice_file_relation_id' : result[0][0]}
plainInsuredSignResults.append(noticeFileRelationInfo)
return plainInsuredSignResults
# 4.查询投保人签名密文CIPHERAPPNTSIGN
def getPlainAppntSignCIPHERAPPNTSIGN(data, noticeInfo):
NoticeInfoId = noticeInfo['noticeInfoId']
noticeNo = data['noticeNo']
fileType = EnumOfNotice.CIPHERAPPNTSIGN.value
fileOrder = "1"
fileId = data['cipherAppntSignUrl']
url = data['cipherAppntSignUrl']
info = {
"notice_info_id" : NoticeInfoId,
"notice_no" : noticeNo,
"file_type" : fileType,
"file_id" : fileId,
"file_order" : fileOrder,
"url" : url
}
str = CaseBase.strAppend(info)
sql = '''SELECT notice_file_relation_id FROM `notice_file_relation` WHERE %s''' % str
result = CaseBase.executeSql(sql)
plainInsuredSignResults = []
if result:
noticeFileRelationInfo = {'notice_file_relation_id' : result[0][0]}
plainInsuredSignResults.append(noticeFileRelationInfo)
return plainInsuredSignResults
# 5.查询被保人签名明文PLAININSUREDSIGN
def getPlainInsuredSignPLAININSUREDSIGN(data, noticeInfo):
NoticeInfoId = noticeInfo['noticeInfoId']
noticeNo = data['noticeNo']
fileType = EnumOfNotice.PLAININSUREDSIGN.value
fileOrder = "1"
fileId = data.get('plainInsuredSignUrl', None)
url = data.get('plainInsuredSignUrl', None)
info = {
"notice_info_id" : NoticeInfoId,
"notice_no" : noticeNo,
"file_type" : fileType,
"file_id" : fileId,
"file_order" : fileOrder,
"url" : url
}
str = CaseBase.strAppend(info)
sql = '''SELECT notice_file_relation_id FROM `notice_file_relation` WHERE %s''' % str
result = CaseBase.executeSql(sql)
plainInsuredSignResults = []
if result:
noticeFileRelationInfo = {'notice_file_relation_id' : result[0][0]}
plainInsuredSignResults.append(noticeFileRelationInfo)
return plainInsuredSignResults
# 6.查询被保人签名密文CIPHERINSUREDSIGN
def getPlainInsuredSignCIPHERINSUREDSIGN(data, noticeInfo):
NoticeInfoId = noticeInfo['noticeInfoId']
noticeNo = data['noticeNo']
fileType = EnumOfNotice.CIPHERINSUREDSIGN.value
fileOrder = "1"
fileId = data.get('cipherInsuredSignUrl', None)
url = data.get('cipherInsuredSignUrl', None)
info = {
"notice_info_id" : NoticeInfoId,
"notice_no" : noticeNo,
"file_type" : fileType,
"file_id" : fileId,
"file_order" : fileOrder,
"url" : url
}
str = CaseBase.strAppend(info)
sql = '''SELECT notice_file_relation_id FROM `notice_file_relation` WHERE %s''' % str
result = CaseBase.executeSql(sql)
plainInsuredSignResults = []
if result:
noticeFileRelationInfo = {'notice_file_relation_id' : result[0][0]}
plainInsuredSignResults.append(noticeFileRelationInfo)
return plainInsuredSignResults
# 7.签名交易专用请求接口方法
def interfaceTest(request_url, request_data, request_method, Config):
headers = {
'content-type': "multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW;charset=UTF-8",
'Cache-Control': "no-cache",
'Postman-Token': "ba05d087-2a07-18eb-6249-79c4edb12739"
}
logging.info('===start request===')
logging.info(str(request_method) + ', ' + str(request_url) + ', ' + str(request_data).replace('\'', '\"').replace('None', 'null'))
if request_method == 'POST':
request = requests.post('http://' + Config.apihost + request_url, data=request_data,
headers=headers)
else:
logging.error('request请求方法错误,请确认[Request Method]字段是否正确!!!')
logging.info('===end request===')
CaseBase.checkInterfaseHead([400, request_method])
return 400, request_method
status = request.status_code
respjson = request.json()
resptext = request.text
resp = resptext.encode("utf-8")
if status == 200:
logging.info('成功,' + str(status) + ', ' + resptext)
logging.info(' ===end request===')
# CaseBase.checkInterfaseHead([status, resptext])
CaseBase.checkInterfaseHead([status, json.loads(resp)])
return [status, json.loads(resp)]
else:
logging.error(' 失败!!!, [ ' + str(status) + ' ], ')
logging.info('===end request===')
CaseBase.checkInterfaseHead([status, json.loads(resp)])
return status, resptext
# 8.签名交易插入数据方法
def insertData():
# 成功案例
# 1057:核保作业通知书
insertParamsSuccess1057 = {
'noticeNo': '10570000',
'notice_info':{
'notice_type':EnumOfNotice.UWWORK.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1051:契约作业通知书
insertParamsSuccess1051 = {
'noticeNo': '10510000',
'notice_info':{
'notice_type':EnumOfNotice.NEW_BUSINESS_WORK.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1069:核保决定通知书
insertParamsSuccess1069 = {
'noticeNo': '10690000',
'notice_info':{
'notice_type':EnumOfNotice.UWDECISION.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1050:转账收费不成功通知书
insertParamsSuccess1050 = {
'noticeNo': '10500000',
'notice_info':{
'notice_type':EnumOfNotice.TRANSFERPAY.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 失败案例
# 1057:核保作业通知书
insertParamsFaild1057 = {
'noticeNo': '10571111',
'notice_info':{
'notice_type':EnumOfNotice.UWWORK.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1051:契约作业通知书
insertParamsFaild1051 = {
'noticeNo': '10511111',
'notice_info':{
'notice_type':EnumOfNotice.NEW_BUSINESS_WORK.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1069:核保决定通知书
insertParamsFaild1069 = {
'noticeNo': '10691111',
'notice_info':{
'notice_type':EnumOfNotice.UWDECISION.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
# 1050:转账收费不成功通知书
insertParamsFaild1050 = {
'noticeNo': '10501111',
'notice_info':{
'notice_type':EnumOfNotice.TRANSFERPAY.value,
'status':'0'
},
'notice_agent_relation':[{}],
'notice_customer_relation':[{}]
}
insertParamsList = [insertParamsSuccess1057, insertParamsSuccess1051, insertParamsSuccess1069,
insertParamsSuccess1050, insertParamsFaild1057, insertParamsFaild1051, insertParamsFaild1069, insertParamsFaild1050]
deltDictList = []
for insertParams in insertParamsList:
deltDict = {}
delDict = NoticeDao.insertTestData(insertParams)
deltDictList.append(delDict)
return deltDictList
# 9.签名交易删除数据方法
def deleteData(deltDictList):
for deltDict in deltDictList:
NoticeDao.delTestDataByNoticeNo(deltDict)
|
from django.test import TestCase
from couchdbkit import ResourceConflict, ResourceNotFound
from corehq.util.couch_helpers import ResumableDocsByTypeIterator, TooManyRetries
from dimagi.utils.couch.database import get_db
class TestResumableDocsByTypeIterator(TestCase):
@classmethod
def setUpClass(cls):
cls.db = get_db()
cls.docs = []
for i in range(3):
cls.create_doc("Foo", i)
cls.create_doc("Bar", i)
cls.create_doc("Baz", i)
cls.doc_types = ["Foo", "Bar", "Baz"]
@classmethod
def tearDownClass(cls):
for doc_id in set(d["_id"] for d in cls.docs):
try:
cls.db.delete_doc(doc_id)
except ResourceNotFound:
pass
def setUp(self):
self.domain = "TEST"
self.sorted_keys = ["{}-{}".format(n, i)
for n in ["bar", "baz", "foo"]
for i in range(3)]
self.itr = self.get_iterator()
def tearDown(self):
self.itr.discard_state()
@classmethod
def create_doc(cls, doc_type, ident):
doc = {
"_id": "{}-{}".format(doc_type.lower(), ident),
"doc_type": doc_type,
}
cls.docs.append(doc)
try:
cls.db.save_doc(doc)
except ResourceConflict:
pass
return doc
def get_iterator(self):
return ResumableDocsByTypeIterator(self.db, self.doc_types, "test", 2)
def test_iteration(self):
self.assertEqual([doc["_id"] for doc in self.itr], self.sorted_keys)
def test_resume_iteration(self):
itr = iter(self.itr)
self.assertEqual([next(itr)["_id"] for i in range(6)], self.sorted_keys[:6])
# stop/resume iteration
self.itr = self.get_iterator()
self.assertEqual([doc["_id"] for doc in self.itr], self.sorted_keys[4:])
def test_resume_iteration_after_complete_iteration(self):
self.assertEqual([doc["_id"] for doc in self.itr], self.sorted_keys)
# resume iteration
self.itr = self.get_iterator()
self.assertEqual([doc["_id"] for doc in self.itr], [])
def test_iteration_with_retry(self):
itr = iter(self.itr)
doc = next(itr)
self.itr.retry(doc)
self.assertEqual(doc["_id"], "bar-0")
self.assertEqual(["bar-0"] + [d["_id"] for d in itr],
self.sorted_keys + ["bar-0"])
def test_iteration_complete_after_retry(self):
itr = iter(self.itr)
self.itr.retry(next(itr))
list(itr)
self.itr = self.get_iterator()
self.assertEqual([doc["_id"] for doc in self.itr], [])
def test_iteration_with_max_retry(self):
itr = iter(self.itr)
doc = next(itr)
ids = [doc["_id"]]
self.assertEqual(doc["_id"], "bar-0")
self.itr.retry(doc)
retries = 1
for doc in itr:
ids.append(doc["_id"])
if doc["_id"] == "bar-0":
if retries < 3:
self.itr.retry(doc)
retries += 1
else:
break
self.assertEqual(doc["_id"], "bar-0")
with self.assertRaises(TooManyRetries):
self.itr.retry(doc)
self.assertEqual(ids, self.sorted_keys + ["bar-0", "bar-0", "bar-0"])
self.assertEqual(list(itr), [])
self.assertEqual(list(self.get_iterator()), [])
def test_iteration_with_missing_retry_doc(self):
itr = iter(self.itr)
doc = next(itr)
self.assertEqual(doc["_id"], "bar-0")
self.itr.retry(doc)
self.db.delete_doc(doc)
try:
self.assertEqual(["bar-0"] + [d["_id"] for d in itr],
self.sorted_keys)
finally:
self.create_doc("Bar", 0)
|
dict = {'Name': 'Zara', 'Age': 7, 'Name': 'Manni'}
dict.get('Name')
# 'Manni'
dict.keys()
# ['Name', 'Age']
dict.items()
# [('Name', 'Manni'), ('Age', 7)]
dict.values()
# ['Manni', 7]
|
# -*- coding: utf-8 -*-
from sys import argv
script, name=argv
promot = ">: "
print("Hi,{0}.I'm {1}".format(script, name))
a=input(promot)
b=input(promot)
c=input(promot)
print("The result is {0},{1},{2}".format(a,b,c))
|
#!/usr/bin/env python3
"""mapper.py"""
import sys
from datetime import datetime as dt
input_file = sys.stdin
# Not read the first line of input file
next(input_file)
# Read lines from input_file
for line in input_file:
# Removing leading/trailing whitespaces
line = line.strip()
# Parse the input elements
ticker, open_str, close_str, adj_close, low_str, high_str, volume, date_str = line.split(",")
# Convert String to Date
date_dt = dt.strptime(date_str, '%Y-%m-%d')
if date_dt.year == 2017:
print('%s\t%s\t%s' % (ticker, date_str, close_str)) |
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.orm import relationship, backref
from app import db, app
class Person(db.Model):
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
first_name = db.Column(db.String(128))
last_name = db.Column(db.String(128))
managed_by_id = db.Column(db.Integer, db.ForeignKey('person.id'))
def __init__(self, user_id, first_name, last_name, managed_by_id = None):
self.user_id = user_id
self.first_name = first_name
self.last_name = last_name
self.managed_by_id = managed_by_id
def __repr__(self):
return '<Person %r>' % self.first_name
class Entry(db.Model):
id = db.Column(db.Integer, primary_key=True)
author_id = db.Column(db.Integer, db.ForeignKey('person.id'))
subject_id = db.Column(db.Integer, db.ForeignKey('person.id'))
date = db.Column(db.Date)
def __init__(self, author_id, subject_id, date):
self.author_id = author_id
self.subject_id = subject_id
self.date = date
class Note(db.Model):
id = db.Column(db.Integer, primary_key=True)
entry_id = db.Column(db.Integer, db.ForeignKey('entry.id'))
author_id = db.Column(db.Integer, db.ForeignKey('person.id'))
subject_id = db.Column(db.Integer, db.ForeignKey('person.id'))
note_type = db.Column(db.String(16))
body = db.Column(db.Text)
is_pinned = db.Column(db.Boolean)
linked_feedback = db.Column(db.Integer, db.ForeignKey('feedback.id'))
person = relationship("Entry", backref="notes")
def __init__(self, entry, note_type, body, is_pinned=False, linked_feedback=None):
self.entry_id = entry.id
self.subject_id = entry.subject_id
self.author_id = entry.author_id
self.note_type = note_type
self.body = body
self.linked_feedback = linked_feedback
self.is_pinned = is_pinned
class Feedback(db.Model):
id = db.Column(db.Integer, primary_key=True)
from_id = db.Column(db.Integer, db.ForeignKey('person.id'))
to_id = db.Column(db.Integer, db.ForeignKey('person.id'))
has_communicated = db.Column(db.Boolean)
body = db.Column(db.Text)
note = relationship("Note", backref="feedback", uselist=False)
from_person = relationship("Person", backref="feedback_given", foreign_keys=[from_id])
to_person = relationship("Person", backref="feedback_taken", foreign_keys=[to_id])
|
#import sys
#import threading
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from edc.subject.registration.models import RegisteredSubject
from ...models import SubjectIdentifier
class Command(BaseCommand):
args = '--check <subject_type> --update <subject_type>'
help = 'Check / Update SubjectIdentifier model from RegisteredSubject Model'
option_list = BaseCommand.option_list + (
make_option('--update',
action='store_true',
dest='update',
default=False,
help=('Update SubjectIdentifier. (DATA WILL BE CHANGED.).')), )
option_list += (
make_option('--check',
action='store_true',
dest='check',
default=False,
help=('Check SubjectIdentifier. (Safe).')), )
def handle(self, *args, **options):
try:
subject_type = args[0]
except:
raise CommandError('missing parameter <subject_type>')
if options['check']:
self.check(subject_type)
elif options['update']:
self.update(subject_type)
else:
raise CommandError('Unknown option, Try --help for a list of valid options')
def check(self, subject_type):
self._process(subject_type, 'check')
def update(self, subject_type):
self._process(subject_type, 'update')
def _process(self, subject_type, action):
n = 0
tot = RegisteredSubject.objects.filter(subject_identifier__isnull=False, subject_type=subject_type).count()
for rs in RegisteredSubject.objects.filter(subject_identifier__isnull=False, subject_type=subject_type):
if not SubjectIdentifier.objects.filter(identifier=rs.subject_identifier).exists():
n += 1
print ' {0} / {1} {2}, missing.'.format(n, tot, rs.subject_identifier)
if action == 'update':
SubjectIdentifier.objects.create(identifier=rs.subject_identifier)
print ' created'
else:
print ' {0} / {1} {2}, found.'.format(n, tot, rs.subject_identifier)
if action == 'check':
print '{0} / {1} identifiers NOT found in RegisteredSubject but not in SubjectIdentifier'.format(n, tot)
print 'Done.'
|
#!/usr/bin/env python3
"""
YOLO: You Only Look Once
arxiv paper: https://arxiv.org/pdf/1506.02640.pdf
"""
import os
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras
import numpy as np
from keras import layers
from keras import models
from keras.applications import vgg16
#from keras.applications import inception_v3, resnet50, mobilenet
from keras import losses
from keras.layers import advanced_activations
class YOLODetectionNetwork(object):
def __init__(self):
self.model = models.Sequential()
# From Block #1 to Block #4 - ImageNet
self.model.add(vgg16.VGG16(weights='imagenet', include_top=False, input_shape=(224, 224, 3)))
#""" Block #1 """
#self.model.add(layers.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), activation=advanced_activations.LeakyReLU, input_size=(224, 224, 3)))
#self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
#""" Block #2 """
#self.model.add(layers.Conv2D(filters=192, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
#""" Block #3 """
#self.model.add(layers.Conv2D(filters=128, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=256, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
#""" Block #4 """
#self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=256, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU))
#self.model.add(layers.MaxPool2D(pool_size=(2, 2), strides=(2, 2)))
""" Block #5
self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), input_shape=(None, 7, 7, 512), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(2, 2), strides=(2, 2), activation=advanced_activations.LeakyReLU()))
"""
""" Block #5 """
self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), input_shape=(None, 7, 7, 512), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=512, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
""" Block #6
self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation=advanced_activations.LeakyReLU()))
# Linear activation function for the final layer
self.model.add(layers.Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))
"""
""" Block #6 """
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation=advanced_activations.LeakyReLU()))
# Linear activation function for the final layer
self.model.add(layers.Conv2D(filters=1024, kernel_size=(1, 1), activation='relu'))
""" Block #7 """
self.model.add(layers.Dense(units=1024))
#self.model.add(layers.Flatten())
self.model.add(layers.Dense(units=30))
self.model.summary()
""" Compile """
self.model.compile(optimizer='adam', loss=losses.mean_squared_error, metrics=['acc'])
if __name__ == '__main__':
detector = YOLODetectionNetwork() |
'''
4.Implement the sieve of Eratosthenes algorithm for generating all prime numbers less than a
given bound
'''
def CiurulEratostene(n):
#initialize with true all the numbers in range 1 to n
prime = [True for i in range(n+1)]
p = 2
#have a p to go from 2 to p square
while (p * p <= n):
# If prime[p] is tur, then it is a prime
if (prime[p] == True):
# Giving the false value to all multiples of p starting from p*p,with the step p
for i in range(p * p, n+1, p):
prime[i] = False
#incrementing p
p += 1
# Print all prime numbers
for p in range(2, n):
if prime[p]:
print(p)
if __name__=='__main__':
#input the number
while True:
n = input("Give your n = ")
#validating the input
if n and n.isdigit():
print("The result is")
CiurulEratostene(int(n))
else:
print("Invalid input") |
from flask_sqlalchemy import SQLAlchemy
import graphene
from main.models import User, Post
from main.schema import PostObject
db = SQLAlchemy()
class CreatePost(graphene.Mutation):
class Arguments:
title = graphene.String(required=True)
body = graphene.String(required=True)
username = graphene.String(required=True)
post = graphene.Field(lambda: PostObject)
def mutate(self, info, title, body, username):
user = User.query.filter_by(username=username).first()
post = Post(title=title, body=body)
if user is not None:
post.author = user
db.session.add(post)
db.session.commit()
return CreatePost(post=post)
class Mutation(graphene.ObjectType):
create_post = CreatePost.Field()
|
import re
import gnomad
import ensembl
import csv
import sys
import time
def find_norm_freq(protein_name):
# Opens sequence file and finds amino acid location of any matches to the motif
canonical_id = gnomad.get_canonical_id(protein_name)
mutations = gnomad.get_variants(canonical_id)
results = [] # final results
for mutation in mutations:
matchPattern = re.compile(r'(?<=p\.[A-Z][a-z][a-z])\d+(?=[A-Z][a-z]{2}$)') # AA change/place
consequence = mutation.get('consequence')
af = mutation.get('af')
m = matchPattern.search(consequence)
if m:
results.append([protein_name, consequence, af]) # [location, frequency]
print("Results found")
return(results)
results = find_norm_freq("OBSCN")
with open("/Users/jennyxu/Desktop/phospho-files/obscn.txt", 'w') as f:
for result in results:
for item in result:
f.write(str(item))
f.write("\t")
f.write("\n")
#
|
from sym import Sym
from num import Num
from test import O
import sys
import re
class Data:
def __init__(self):
self.w = {}
self.syms = {}
self.nums = {}
self._class = None
self.rows = []
self.name = []
self._use = []
self.indeps = []
def indep(self, c):
return c not in self.w and self._class != c
def dep(self, c):
return not self.indep(c)
def header(self, cells):
for i, v in enumerate(cells):
if not re.match(r'^\?', v):
c = len(self._use)
self._use.append(i)
self.name.append(v)
if re.search('[<>$]', v):
self.nums[c] = Num([])
else:
self.syms[c] = Sym([])
if re.search('<', v):
self.w[c] = -1
elif re.search('>', v):
self.w[c] = 1
elif re.search('!', v):
self._class = c
else:
self.indeps.append(c)
def row(self, cells):
r = len(self.rows)
self.rows.append([])
for c, c0 in enumerate(self._use):
x = cells[c0]
if x != '?':
if self.nums.get(c) is not None:
self.nums[c].numInc(float(x))
else:
self.syms[c].symInc(x)
self.rows[r].append(x)
def rows1(src):
data = Data()
first = True
for line in src:
line = re.sub('[\t\r\n]*|#.*', "", line)
cells = [i.strip() for i in line.split(',')]
if len(cells) > 0:
if first:
data.header(cells)
else:
data.row(cells)
first = False
print("\t\t\tn\tmode\tfrequency")
for k, v in data.syms.items():
print(f'{k+1}\t{data.name[k]}\t{v.n}\t{v.mode}\t{v.most}')
print('\n')
print('\t\t\tn\tmu\tsd')
for k, v in data.nums.items():
print(f'{k+1}\t{data.name[k]}\t{v.n}\t{v.mu:.2f}\t{v.sd:.2f}')
def lines(src=None):
if src == None:
for line in sys.stdin:
yield line
elif src[-3:] in ["csv", ".dat"]:
with open(src) as fs:
for line in fs:
yield line
else:
for line in src.splitlines():
yield line
def rows(s):
rows1(lines(s))
@O.k
def test():
print("\nweather.csv\n")
rows("weather.csv")
print("\nweatherLong.csv\n")
rows("weatherLong.csv")
print("\nauto.csv\n")
rows("auto.csv") |
from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.PostList.as_view(), name='list'),
path('<int:pk>/', views.PostDetail.as_view(), name='detail'),
path('<int:post_id>/comments/', views.CommentList.as_view(), name='comment-list'),
path('<int:post_id>/comments/<int:pk>/', views.CommentDetail.as_view(), name='comment-detail'),
path('<int:pk>/like/', views.PostLike.as_view(), name='like'),
path('feed/', views.PostFeed.as_view(), name='feed'),
]
|
import pandas as pd
def __remove_percentile_outliers(data: pd.DataFrame, col, lower=0, upper=0) -> pd.DataFrame:
lower_values_to_remove = round(data.shape[0] * lower)
upper_values_to_remove = round(data.shape[0] * upper)
data = data.sort_values(col)
data = data.head(data.shape[0] - upper_values_to_remove)
data = data.tail(data.shape[0] - lower_values_to_remove)
return data
def remove_percentile_outliers_from_group(data: pd.DataFrame, dimensions_for_grouping=['dataset_size', 'epsilon', 'query'], metric='result', lower=0, upper=0) -> pd.DataFrame:
'''
no order will be perserved
'''
new_data = pd.DataFrame()
for _, _data in data.groupby(dimensions_for_grouping, dropna=False):
new_data = new_data.append(__remove_percentile_outliers(_data, col=metric, lower=lower, upper=upper))
return new_data |
import argparse
import cv2
from ocr.detector import image_pyramid, sliding_window_batch
from ocr.helper import save_image_batch
from keras.models import load_model
import ntpath
import numpy as np
pyramid_scale, pyramid_min_width, pyramid_min_height = 0.8, 150, 150
sl_w_step, sl_w_width, sl_w_height = 5, 30, 30
ap = argparse.ArgumentParser(description='Detecting text blocks with sliding window algorithm.')
ap.add_argument('image', help='Path to the image')
ap.add_argument('model', type=str, help='Path to Keras model')
ap.add_argument('output', type=str, help='Output directory (default: .)')
# Pyramid arguments
ap.add_argument('-ps', type=float, default=pyramid_scale, metavar='SCALE',
help='Pyramid scale rate (default: {})'.format(pyramid_scale))
ap.add_argument('-pw', type=int, default=pyramid_min_width, metavar='P_WIDTH',
help='Minimum width of scaled image (default: {})'.format(pyramid_min_width))
ap.add_argument('-ph', type=int, default=pyramid_min_height, metavar='P_HEIGHT',
help='Minimum height of scaled image (default: {})'.format(pyramid_min_height))
# Sliding window arguments
ap.add_argument('-s', type=int, default=sl_w_step, metavar='STEP',
help='Sliding window step size in pixels (default: {})'.format(sl_w_step))
ap.add_argument('-sw', type=int, default=sl_w_width, metavar='SW_WIDTH',
help='Sliding window width (default: {})'.format(sl_w_width))
ap.add_argument('-sh', type=int, default=sl_w_height, metavar='SW_HEIGHT',
help='Sliding window height (default: {})'.format(sl_w_height))
args = ap.parse_args()
# Ugly assigning
image_path, model_path, directory_to_write, pyramid_scale, pyramid_min_width, pyramid_min_height, sl_w_step, sl_w_width, sl_w_height \
= args.image, args.model, args.output, args.ps, args.pw, args.ph, args.s, args.sw, args.sh
# Preparing image and model
model = load_model(model_path)
print('Model {} loaded.'.format(model_path))
image = cv2.imread(image_path)
# Processing image
output_directory = '{}{}/'.format(directory_to_write, ntpath.basename(image_path))
pyramid_number = 0
batch_size = 2048
for pyramid_image in image_pyramid(image, scale=pyramid_scale, min_size=(pyramid_min_height, pyramid_min_width)):
# Getting batches of images from image window algorithm
for window_batch in sliding_window_batch(pyramid_image, sl_w_step, (sl_w_height, sl_w_width), batch_size):
# Getting prediction from keras model
predictions = model.predict(window_batch.reshape(batch_size, 3, 30, 30)/255)
# Computing if only we fount an image with more than 50%
if bool(np.any(predictions[:, 0] >= 0.9)):
# Getting filtered predictions
predictions_by_percent = {
'99': (predictions[:, 0] >= 0.99),
'98': (predictions[:, 0] < 0.99) & (predictions[:, 0] >= 0.98),
'97': (predictions[:, 0] < 0.98) & (predictions[:, 0] >= 0.97),
'96': (predictions[:, 0] < 0.97) & (predictions[:, 0] >= 0.96),
'95': (predictions[:, 0] < 0.96) & (predictions[:, 0] >= 0.95),
'94': (predictions[:, 0] < 0.95) & (predictions[:, 0] >= 0.94),
'93': (predictions[:, 0] < 0.94) & (predictions[:, 0] >= 0.93),
'92': (predictions[:, 0] < 0.93) & (predictions[:, 0] >= 0.92),
'91': (predictions[:, 0] < 0.92) & (predictions[:, 0] >= 0.91),
'90': (predictions[:, 0] < 0.91) & (predictions[:, 0] >= 0.90),
}
# Saving relevant images by corresponding subdirectories
for percentage_bound in predictions_by_percent:
if bool(np.any(predictions_by_percent[percentage_bound])) is True:
save_image_batch(window_batch[predictions_by_percent[percentage_bound]],
'{}{}/'.format(output_directory, percentage_bound))
pyramid_number += 1
|
"""Benchmark Search algorithm"""
# pylint: disable=missing-docstring, invalid-name
import netCDF4
import bench
import util
import obsoper.grid
class BenchmarkRealData(bench.Suite):
def setUp(self):
for path in ["sample_class4.nc",
"sample_prodm.nc"]:
util.grab(path)
with netCDF4.Dataset("data/sample_class4.nc") as dataset:
self.observed_longitudes = dataset.variables["longitude"][:]
self.observed_latitudes = dataset.variables["latitude"][:]
with netCDF4.Dataset("data/sample_prodm.nc") as dataset:
self.grid_longitudes = dataset.variables["TLON"][:].T
self.grid_latitudes = dataset.variables["TLAT"][:].T
self.grid_values = dataset.variables["aice"][:]
# Correct longitudes [0, 360) to [-180, 180)
self.grid_longitudes[self.grid_longitudes > 180] -= 360.
def bench_interpolate_given_10e0_observations(self):
self.run_interpolate(1)
def bench_interpolate_given_10e1_observations(self):
self.run_interpolate(10)
def bench_interpolate_given_10e2_observations(self):
self.run_interpolate(100)
def bench_interpolate_given_10e3_observations(self):
self.run_interpolate(1000)
def bench_interpolate_given_10e4_observations(self):
self.run_interpolate(10**4)
def bench_interpolate_given_10e5_observations(self):
self.run_interpolate(10**5)
def run_interpolate(self, number):
interpolator = obsoper.grid.Search(self.grid_longitudes,
self.grid_latitudes)
interpolator.lower_left(self.observed_longitudes[:number],
self.observed_latitudes[:number])
|
import pandas as pd
import numpy as np
from tqdm import tqdm
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.style as style
style.use('fivethirtyeight')
# Enable high resolution plots
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('retina')
def plot_dataframe(df, plot_title, x_name, y_name, legend_loc, legend_labels):
"""plot a dataframe using the given specifications
options for legend_loc: best, upper right, upper left, lower left, lower right, right, center left, center right, lower center, upper center, center
example: helperfun.plot_dataframe(timeseries_2030[['DE_load', 'DE_wind']], 'Titel', 'timesteps', 'load and wind', 'lower right', ['load','wind'])
"""
ax = sns.lineplot(data=df)
ax.set(xlabel=x_name, ylabel=y_name)
plt.title(plot_title)
plt.legend(loc=legend_loc, labels=legend_labels)
#plt.xticks(rotation=45)
plt.show(ax)
return None
def make_EE_dict(settings, timeseries_2030):
"""make EE dictionary.
Arguments:
settings -- dictionary of settings
timeseries_2030 -- dataframe with timeseries data for 2030
Returns:
EE -- dictionary with hourly electricity generation data for each country
Side effects:
None
"""
EE = dict()
for t in tqdm(settings['timesteps'], ascii=True, desc='generation of EE dictionary'):
for s in settings['countries']:
EE[t,s] = dict()
EE[t,s]['sum'] = timeseries_2030.loc[t, s+'_EE_sum']
for source in settings['electricity_sources']:
EE[t,s][source] = timeseries_2030.loc[t, s+'_'+source]
return EE
def make_EV_dict(settings, timeseries_2030):
"""make EV dictionary.
Arguments:
settings -- dictionary of settings
timeseries_2030 -- dataframe with timeseries data for 2030
Returns:
EV -- dictionary with hourly electricity demand data for each country
Side effects:
None
"""
EV = dict()
for t in tqdm(settings['timesteps'], ascii=True, desc='generation of EV dictionary'):
for s in settings['countries']:
EV[t,s] = timeseries_2030.loc[t, s+'_load']
return EV
def make_V_df_from_V_dict(settings, V):
"""make dataframes out of gurobi variable solutions, which come as dictionaries.
Arguments:
settings -- dictionary of settings
V -- dictionary of dictionaries with all optimal variable values
Returns:
EV -- dictionary of dataframes with all optimal variable values
Side effects:
None
"""
S_neighbours = settings['neighbours']
V_df = dict()
for V_key in settings['plot_variables']:
if V_key in ['H','GtP','PtG','EI','EX']:
V_df[V_key] = pd.DataFrame(columns=['DE', 'FR', 'NL'], index=range(8760))
for (row, column), value in tqdm( V[V_key].items(), ascii=True, desc=str('building '+V_key+' dataframe from '+V_key+' dict' )):
V_df[V_key].loc[row, column] = value
V_df[V_key] = V_df[V_key].astype('float')
elif V_key in ['HT','ET']:
V_df[V_key] = pd.DataFrame(columns=[str(x[0]+' --> '+x[1]) for x in S_neighbours], index=range(8760))
for (row, (s1,s2)), value in tqdm( V[V_key].items(), ascii=True, desc=str('building '+V_key+' dataframe from '+V_key+' dict' )):
if (s1,s2) in S_neighbours:
V_df[V_key].loc[row, str(s1+' --> '+s2)] = value
V_df[V_key] = V_df[V_key].astype('float')
elif V_key in ['HTL','ETL']:
print(str('building '+V_key+' dataframe from '+V_key+' dictionary'))
V_df[V_key] = dict()
for (s1,s2), value in V[V_key].items():
V_df[V_key][str(s1+' --> '+s2)] = [value]
V_df[V_key] = pd.DataFrame.from_dict(V_df[V_key])
elif V_key in ['GtPL','PtGL','HL']:
print(str('building '+V_key+' dataframe from '+V_key+' dictionary.'))
V[V_key] = {k: [v] for k, v in V[V_key].items()}
V_df[V_key] = pd.DataFrame.from_dict(V[V_key])
else:
print(str('Building a dataframe for the '+V_key+' variable has not been implemented yet.'))
return V_df
def get_limits(settings):
"""get limits for rolling horizon optimization.
Arguments:
settings -- dictionary of settings
V -- dictionary of dictionaries with all optimal variable values
Returns:
EV -- dictionary of dataframes with all optimal variable values
Side effects:
None
"""
if settings['limits_source'] == 'basismodell':
limits = pickle.load( open( './data/internal_data/optimal_limits/limits.p', "rb" ) )
HTL = limits['HTL']
ETL = limits['ETL']
GtPL = {k: v[0] for k, v in limits['GtPL'].items()}
PtGL = {k: v[0] for k, v in limits['PtGL'].items()}
HL = {k: v[0] for k, v in limits['HL'].items()}
elif settings['limits_source'] == 'recherche':
print("settings['limits_source'] == 'recherche' wurde noch nicht implementiert")
else:
print("Bitte checke settings['limits_source'] in master_RH.py und die gegebenen Optionen.")
return HTL, ETL, GtPL, PtGL, HL |
import sys
import re
import sqlite3
import os
import keyring
import getpass
import subprocess
import tempfile
import numpy as np
import pandas as pd
from trm import cline
from trm.cline import Cline
import hipercam as hcam
from hipercam.utils import target_lookup
__all__ = [
"calsearch",
]
#######################################################################
#
# calsearch -- search for calibration runs matching a set of input runs
#
#######################################################################
def calsearch(args=None):
description = \
"""``calsearch runs output``
Given a csv file from |logsearch| (possibly with rows edited, but
with the same columns), this searches for matching calibration
files. This is to aid data export. It first searches for flat
fields, then for the combined list of data files and flats, it
searches for bias frames. The flats do not try to determine if
the filter matches in the case of ultracam because the filters
can be unreliable.
It works by searching through the database .db files which are
accessed by password in the same way vias access to your keyring
as described in |logsearch|.
Arguments::
runs : str
csv input file
diff : int
maximum time difference in days to allow between a frame and
a matching calibration file. Mainly here to prevent
excessive numbers of matches. The "night" column is
used. Thus diff=0 only allows calibrations from the same
night to be considered, but diff=1 allows any preceding or
following nights too.
output : str
Name of CSV file to store the results. The results include
the original runs along with matching flats and biases and
biases for the flats as well. This is readable by oocalc for
instance (UTF-8, semi-colon separators disabled). """
command, args = cline.script_args(args)
with Cline("HIPERCAM_ENV", ".hipercam", command, args) as cl:
# register parameters
cl.register("runs", Cline.LOCAL, Cline.PROMPT)
cl.register("diff", Cline.LOCAL, Cline.PROMPT)
cl.register("output", Cline.LOCAL, Cline.PROMPT)
runs = cl.get_value(
"runs", "input csv file of runs",
cline.Fname('results', '.csv')
)
diff = cl.get_value(
"diff", "maximum time difference for matching calibrations (days)", 10, 0
)
output = cl.get_value(
"output", "name of spreadsheet of results ['none' to ignore]",
cline.Fname('results', '.csv', cline.Fname.NEW), ignore="none"
)
# Read the runs into pandas
runs_df = pd.read_csv(runs)
# Get database files.
# First create directory for them if need be
dbases_dir = os.path.join(
os.environ.get(
'HIPERCAM_ENV',
os.path.join(os.environ["HOME"],'.hipercam')
),
'dbases'
)
os.makedirs(dbases_dir, 0o700, True)
# Then download them. Passwords will be prompted and, if the
# subsequent download is successful, will be stored in the
# system keyring
server = 'https://cygnus.astro.warwick.ac.uk/phsaap/'
dbases = []
for dbase in ('ultracam', 'ultraspec', 'hipercam'):
pword = keyring.get_password("Data logs", dbase)
prompted = False
if pword is None:
pword = getpass.getpass(f'{dbase} logs password: ')
prompted = True
# accumulate list of files and equivalent table names
fname = os.path.join(dbases_dir, f'{dbase}.db')
dbases.append((fname, dbase))
if pword != "":
# use 'curl' to download. Check timestamp to see if
# file is updated.
if os.path.exists(fname):
start_time = os.path.getmtime(fname)
else:
start_time =''
args = [
'curl','-u', f'{dbase}:{pword}','-o',fname,
'-z',fname,f'{server}/{dbase}/logs/{dbase}.db'
]
result = subprocess.run(
args, capture_output=True, universal_newlines=True
)
if result.returncode and not os.path.exists(fname):
raise hcam.HipercamError(
f'Failed to download {dbase}.db. Return from curl:'
+ 'stdout={result.stdout}, stderr={result.stderr}'
)
elif result.returncode:
print(
f'Failed to download {dbase}.db. Will use old'
'local copy although it may be out of date'
)
elif prompted:
# successful, will store password in the keyring
keyring.set_password("Data logs", dbase, pword)
print(f'Downloaded {dbase}.db')
print(f' stored password for {dbase} in keyring')
end_time = os.path.getmtime(fname)
if end_time == start_time:
print(f' {dbase}.db unchanged on server')
else:
print(f' {dbase}.db updated from server')
else:
print(f'No attempt to update {fname}')
print()
# write runs to be checked to junk file. this is because
# i can't get in memory option to work for some reason
dbname = 'zzz_junk.db'
cnx = sqlite3.connect(dbname)
runs_df.to_sql(name='tab', con=cnx, if_exists='replace')
cnx.commit()
cnx.close()
# Search for flat field frames
results = []
for dbase, dtable in dbases:
# connect to big database
conn = sqlite3.connect(f"file:{dbase}?mode=ro", uri=True)
# Add database / table runs.tab representing the runs we wish
# to search over
cursor = conn.cursor()
cursor.execute(f'ATTACH "{dbname}" as runs')
# Build query string to locate matching bias frames.
#
# Designed to:
#
# 1) only return entries from big table
# 2) only from matching observing runs
# 3) should not be the same run
# 4) should match read speed and binning
# 5) have more than 10 frames
# 6) have some indication by name, type or comment that it is a bias.
# 7) main table referred to as m, runs we are looking for biases for as t.
# 8) main table referred to as m, runs we are looking for biases for as t.
query = f'SELECT DISTINCT m.* FROM main.{dtable} AS m\n'
query += f"""INNER JOIN runs.tab AS t
ON (m.obs_run = t.obs_run AND m.instrument = t.instrument)
WHERE (m.night != t.night OR m.run_no != t.run_no)
AND m.binning = '1x1' AND m.nframe > 10
AND (m.target LIKE '%flat%' OR m.run_type = 'flat' OR m.comment LIKE '%flat%')
AND ABS(JULIANDAY(m.night)-JULIANDAY(t.night)) <= {diff}
AND ((m.sun_alt_start IS NULL OR (m.sun_alt_start > -15 AND m.sun_alt_start < 0))
OR (m.sun_alt_end IS NULL OR (m.sun_alt_end > -15 AND m.sun_alt_end < 0)))
"""
if dtable == 'ultraspec':
query += f
print(f'Searching for flats in "{dbase}" with SQL code:\n\n{query}\n')
res = pd.read_sql_query(query, conn)
if len(res):
print(res)
results.append(res)
# close connection
conn.close()
# Add the "flats" to the selected runs.
dbname = 'zzz_junk.db'
cnx = sqlite3.connect(dbname)
for res in results:
res.to_sql(name='tab', con=cnx, if_exists='append')
cnx.commit()
cnx.close()
# Now search the whole lot for bias frames
results = [runs_df,]
for dbase, dtable in dbases:
# connect to big database
conn = sqlite3.connect(f"file:{dbase}?mode=ro", uri=True)
# Add database / table runs.tab representing the runs we wish
# to search over
cursor = conn.cursor()
cursor.execute(f'ATTACH "{dbname}" as runs')
# Build query string to locate matching bias frames.
#
# Designed to:
#
# 1) only return entries from big table
# 2) only from matching observing runs
# 3) should not be the same run
# 4) should match read speed and binning
# 5) have more than 10 frames
# 6) have some indication by name, type or comment that it is a bias.
# 7) main table referred to as m, runs we are looking for biases for as t.
# 8) main table referred to as m, runs we are looking for biases for as t.
query = f'SELECT DISTINCT m.* FROM main.{dtable} AS m\n'
query += f"""INNER JOIN runs.tab AS t
ON (m.obs_run = t.obs_run AND m.instrument = t.instrument)
WHERE (m.night != t.night OR m.run_no != t.run_no)
AND m.read_speed = t.read_speed AND m.binning = t.binning AND m.nframe > 10
AND (m.target LIKE '%bias%' OR m.run_type = 'bias' OR m.comment LIKE '%bias%')
AND ABS(JULIANDAY(m.night)-JULIANDAY(t.night)) <= {diff}"""
print(f'Searching for biases in "{dbase}" with SQL code:\n\n{query}\n')
res = pd.read_sql_query(query, conn)
if len(res):
print(res)
results.append(res)
# close connection
conn.close()
# Save the results.
biglist = pd.concat(results,sort=False)
biglist.to_csv(output)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import sys
try:
import pypandoc
readme = pypandoc.convert('README.md', 'rst')
except(IOError, ImportError):
with open('README.md') as f:
readme = f.read()
install_requires = [
'cachetools>=1.1.5',
'requests>=2.7.0',
'xmltodict>=0.9.2',
]
tests_require = [
'pytest',
'requests-mock==0.7.0'
]
setup(
name='pinkopy',
version='2.2.dev',
description='Python wrapper for Commvault api',
long_description=readme,
author='Herkermer Sherwood',
author_email='theherk@gmail.com',
url='https://github.com/theherk/pinkopy',
download_url='https://github.com/theherk/pinkopy/archive/2.2.dev.zip',
packages=find_packages(),
platforms=['all'],
license='MIT',
install_requires=install_requires,
setup_requires=['pytest-runner'],
tests_require=tests_require,
classifiers=[
'Development Status :: 4 - Beta',
'License :: Other/Proprietary License',
'License :: OSI Approved :: MIT License',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Utilities',
],
)
|
def solve(n):
if n == 0:
return "INSOMNIA"
s = set([ d for d in str(n) ])
l = n
while len(s) < 10:
l += n
s.update([ d for d in str(l) ])
return str(l)
if __name__ == "__main__":
t = int(raw_input())
for i in xrange(1, t+1):
n = int(raw_input())
print "Case #%d: %s" % (i, solve(n))
|
# coding: utf-8
from gpiozero import Button
import ap310
import time
if __name__ == "__main__":
token = ap310.login()
vermelho = Button(2)
verde = Button(3)
while True:
if vermelho.is_pressed and verde.is_pressed:
print('Ambos')
r = ap310.changeLed(token, "yellow", "blink")
ap310.apply(token)
time.sleep(5)
elif vermelho.is_pressed:
print('Vermelho')
r = ap310.changeLed(token, "red", "pulse")
ap310.apply(token)
time.sleep(3)
elif verde.is_pressed:
print('Verde')
r = ap310.changeLed(token, "green", "on")
ap310.apply(token)
|
import scipy.io as sio
import numpy as np
import sklearn
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.svm import SVC
from sklearn.grid_search import GridSearchCV
#from sklearn.preprocessing import normalize
#from MySVM import TrainMySVM, TestMySVM
from TrainMyClassifier import TrainMyClassifier as MC
from TestMyClassifier import TestMyClassifier as TC
'''
def normalize2D(array):
if array.dtype != float:
array = array.astype(float)
for i in range(array.shape[0]):
array[i] = np.divide(array[i], np.sum(array[i]))
return array
def GetClassLabels(svm_predict_prob):
#print np.sum(svm_predict_prob)
ClassLabels = np.zeros((svm_predict_prob.shape[0], svm_predict_prob.shape[1]+1))
for i in range(svm_predict_prob.shape[0]):
ClassLabels[i, :-1] = svm_predict_prob[i]
sorted_prob = np.sort(svm_predict_prob[i])[::-1]
interval = np.subtract(sorted_prob[:-1], sorted_prob[1:])
ClassLabels[i, -1] = 1.0 - interval[0] / np.sum(interval)
#ClassLabels = normalize(ClassLabels, axis=1)
ClassLabels = normalize2D(ClassLabels)
#print ClassLabels
#print np.sum(ClassLabels[0])
#raw_input()
return ClassLabels
def TrainMySVM(XEstimate, XValidate, ClassLabelsEstimate, ClassLabelsValidate, Parameters=None):
# Each label in ClassLabelsEstimate and ClassLabelsValidate is 5 dimensional vector like [1,-1,-1,-1,-1];
# So ClassLabelsEstimate and ClassLabelsValidate are both N-by-5 numpy arrays.
train_labels = np.int8(np.zeros(ClassLabelsEstimate.shape[0]))
cv_labels = np.int8(np.zeros(ClassLabelsValidate.shape[0]))
for i in range(ClassLabelsEstimate.shape[0]):
train_labels[i] = np.where(ClassLabelsEstimate[i] == 1)[0]
for i in range(ClassLabelsValidate.shape[0]):
cv_labels[i] = np.where(ClassLabelsValidate[i] == 1)[0]
'''
"""
C_range = 10. ** np.arange(-2, 3) # 10 is best
gamma_range = 10. ** np.arange(-2, 3) # 1 is best
param_grid = dict(gamma=gamma_range, C=C_range)
grid = GridSearchCV(SVC(max_iter=100, decision_function_shape='ovo', probability=True, verbose=True), param_grid=param_grid, cv=5)
grid.fit(XEstimate, train_labels)
svm = grid.best_estimator_
"""
'''
svm = SVC(C=10, gamma=1, max_iter=100, decision_function_shape='ovo', probability=True, verbose=True)
svm.fit(XEstimate, train_labels)
cv_predict_prob = svm.predict_proba(XValidate)
#cv_predict = svm.predict(XValidate)
#cv_acc = sum([1 for i in range(cv_labels.shape[0]) if cv_predict[i] == cv_labels[i]]) / float(cv_labels.shape[0])
#print svm.score(XValidate, cv_labels)
#print "==== CV Accuracy: %f ===="%cv_acc
Yvalidate = GetClassLabels(cv_predict_prob)
EstParameters = {}
InternalParams = ['support_', 'support_vectors_', 'n_support_', 'dual_coef_', 'coef_',
'intercept_', '_sparse', 'shape_fit_', '_dual_coef_', '_intercept_', 'probA_', 'probB_', '_gamma', 'classes_']
EstParameters['HyperParameters'] = svm.get_params()
EstParameters['EstimatedParameters'] = {}
for p in InternalParams:
try:
EstParameters['EstimatedParameters'][p] = eval('svm.%s'%p)
except:
continue
return Yvalidate, EstParameters
def TestMySVM(XTest, EstParameters, Parameters=None):
hyperParams = EstParameters['HyperParameters']
trainedParams = EstParameters["EstimatedParameters"]
svm = SVC(C=hyperParams['C'], gamma=hyperParams['gamma'], kernel=hyperParams['kernel'], max_iter=hyperParams['max_iter'], decision_function_shape='ovo', probability=True)
InternalParams = ['support_', 'support_vectors_', 'n_support_', 'dual_coef_', 'coef_',
'intercept_', '_sparse', 'shape_fit_', '_dual_coef_', '_intercept_', 'probA_', 'probB_', '_gamma', 'classes_']
for p in InternalParams:
try:
exec('svm.%s = trainedParams[p]'%p)
except:
continue
test_predict_prob = svm.predict_proba(XTest)
Ytest = GetClassLabels(test_predict_prob)
return Ytest
'''
if __name__ == '__main__':
data = sio.loadmat('Proj2FeatVecsSet1.mat')
data = data['Proj2FeatVecsSet1']
labels = sio.loadmat('Proj2TargetOutputsSet1.mat')
labels = labels['Proj2TargetOutputsSet1']
scalar_labels = np.uint(np.zeros(labels.shape[0]))
for i in range(scalar_labels.shape[0]):
scalar_labels[i] = np.where(labels[i] == 1)[0]
#print labels
#print sum([1 for i in range(20000, 25000) if np.where(labels[i]==1)[0]==4])
#raw_input()
'''
class_num = 5
#test_data = data[20000:]
#test_labels = labels[20000:]
data_comb = np.zeros((data.shape[0], data.shape[1]+class_num))
#print data_comb.shape
#raw_input()
data_comb[:, :data.shape[1]] = data
data_comb[:, data.shape[1]:] = labels
'''
#print sum([1 for i in range(20000) if np.where(data_comb[i, data[:20000].shape[1]:]==1)[0]==4])
#raw_input()
#print data_comb[:, data[:20000].shape[1]:].shape
#print labels[0]
#print labels[:2, :class_num-5].shape
#print data_comb
#print data_comb.shape
#input()
#print data.shape
#print labels.shape
class_num = 5
nFold = 5
C = 1
gamma = 1 # C=1 & gamma=1 has the best performance
#max_iter = 500
param = {'type':'GPC'}
kf_cv = StratifiedKFold(n_splits=nFold, shuffle=True)
kf_test = StratifiedKFold(n_splits=nFold, shuffle=True)
i = 0
for train_idx, test_idx in kf_test.split(data, scalar_labels):
#print cv_idx
i += 1
print "------- Fold-%d -------" % i
'''
train_data_labels = data_comb[train_idx]
train_data = train_data_labels[:, :data.shape[1]]
train_labels = np.int8(np.round(train_data_labels[:, data.shape[1]:])) # Each label is like [1, -1, -1, -1, -1]
#print train_labels.shape
#raw_input()
cv_data_labels = data_comb[cv_idx]
cv_data = cv_data_labels[:, :data.shape[1]]
cv_labels = np.int8(np.round(cv_data_labels[:, data.shape[1]:])) # Each label is like [1, -1, -1, -1, -1]
'''
train_data = data[train_idx]
train_labels = labels[train_idx]
train_scalar_labels = scalar_labels[train_idx]
test_data = data[test_idx]
test_labels = labels[test_idx]
for est_idx, cv_idx in kf_cv.split(train_data, train_scalar_labels):
est_data = train_data[est_idx]
est_labels = train_labels[est_idx]
cv_data = train_data[cv_idx]
cv_labels = train_labels[cv_idx]
print param['type']
Yvalidate, EstParameters, vecNum = MC(est_data, cv_data, est_labels, cv_labels, param)
print 'Vector Number:%d'%vecNum
test_predict = TC(test_data, EstParameters, param) # Each label is like [0.1, 0.2, 0.31, 0.1, 0.13, 0.16]
correct_predict = 0
out_any_class = 0
for i in range(test_predict.shape[0]):
#print test_predict[i], np.argmax(test_predict[i])
#print cv_labels[i], np.argmax(cv_labels[i])
#raw_input()
#print test_predict[i]
#raw_input()
if np.argmax(test_predict[i]) == np.argmax(test_labels[i]):
correct_predict += 1
if np.argmax(test_predict[i]) == class_num:
out_any_class += 1
test_acc = correct_predict / float(test_predict.shape[0])
out_percentage = out_any_class / float(test_predict.shape[0])
#test_acc = sum([1 for i in range(test_predict.shape[0]) if np.argmax(test_predict[i]) == np.argmax(cv_labels[i])]) / float(cv_labels.shape[0])
print "==== Test Accuracy: %f Samples out of classes: %d(%f) ====" % (test_acc, out_any_class, out_percentage)
raw_input() |
#coding: utf8
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
import unittest
from tests.integration.for_sqlite.helper import Student, Course, Score
from sweet_orm.orm import atomic, Model
class TestTransactionSQLite(unittest.TestCase):
def setUp(self):
Model.db = Student.db
self.remove_record()
def tearDown(self):
Model.db = None
self.remove_record()
def remove_record(self):
Score.delete_all()
Student.delete_all()
Course.delete_all()
def test_atomic_transaction_successful(self):
@atomic
def insert():
s1 = Student.create(name='lily')
s2 = Student.create(name='jon')
c1 = Course.create(name='math')
c2 = Course.create(name='sport')
Score.create(student=s1, course=c1, value=100)
Score.create(student=s1, course=c2, value=90)
Score.create(student=s2, course=c1, value=95)
insert()
self.assertEqual(2, Student.count())
self.assertEqual(2, Course.count())
self.assertEqual(3, Score.count())
def test_atomic_without_db_transaction_successful(self):
@atomic()
def insert():
s1 = Student.create(name='lily')
s2 = Student.create(name='jon')
c1 = Course.create(name='math')
c2 = Course.create(name='sport')
Score.create(student=s1, course=c1, value=100)
Score.create(student=s1, course=c2, value=90)
Score.create(student=s2, course=c1, value=95)
insert()
self.assertEqual(2, Student.count())
self.assertEqual(2, Course.count())
self.assertEqual(3, Score.count())
def test_atomic_with_db_transaction_successful(self):
@atomic(Student.db)
def insert():
s1 = Student.create(name='lily')
s2 = Student.create(name='jon')
c1 = Course.create(name='math')
c2 = Course.create(name='sport')
Score.create(student=s1, course=c1, value=100)
Score.create(student=s1, course=c2, value=90)
Score.create(student=s2, course=c1, value=95)
insert()
self.assertEqual(2, Student.count())
self.assertEqual(2, Course.count())
self.assertEqual(3, Score.count())
def test_atomic_transaction_failed(self):
@atomic
def insert():
s1 = Student.create(name='lily')
s2 = Student.create(name='jon')
c1 = Course.create(name='math')
c2 = Course.create(name='sport')
Score.create(student=s1, course=c1, value=100)
Score.create(student=s1, course=c2, value=90)
Score.create(student=s2, course=c1, value=95)
raise Exeption("Fake Exception")
with self.assertRaises(Exception) as err:
insert()
self.assertEqual("Fake Exception", str(err.exception))
self.assertEqual(0, Student.count())
self.assertEqual(0, Course.count())
self.assertEqual(0, Score.count())
def test_manual_transaction_with(self):
with Student.transaction() as t:
s1 = Student.create(name='lily')
s2 = Student.create(name='jon')
t.commit()
c1 = Course.create(name='math')
c2 = Course.create(name='sport')
t.commit()
Score.create(student=s1, course=c1, value=100)
Score.create(student=s1, course=c2, value=90)
Score.create(student=s2, course=c1, value=95)
t.rollback()
self.assertEqual(2, Student.count())
self.assertEqual(2, Course.count())
self.assertEqual(0, Score.count())
if __name__ == '__main__':
unittest.main()
|
import logging
from django.core.management.base import BaseCommand
from django.db.models import Q
from django.utils import timezone
from squalaetp.models import Xelon, ProductCategory, Indicator
from psa.models import Multimedia, Ecu
from utils.conf import XLS_SQUALAETP_FILE, XLS_DELAY_FILES, XLS_TIME_LIMIT_FILE, string_to_list
from utils.django.models import defaults_dict
from utils.django.validators import comp_ref_isvalid
from utils.data.analysis import ProductAnalysis
from ._excel_squalaetp import ExcelSqualaetp
from ._excel_analysis import ExcelDelayAnalysis, ExcelTimeLimitAnalysis
logger = logging.getLogger('command')
class Command(BaseCommand):
help = 'Interact with the Squalaetp tables in the database'
MAX_SIZE = None
def add_arguments(self, parser):
parser.add_argument(
'-S',
'--squalaetp_file',
dest='squalaetp_file',
help='Specify import Excel Squalaetp file',
)
parser.add_argument(
'-D',
'--delay_files',
dest='delay_files',
help='Specify import Excel Delay files',
)
parser.add_argument(
'-T',
'--time_limit_file',
dest='time_limit_file',
help='Specify import Excel Time Limit file',
)
parser.add_argument(
'--xelon_update',
action='store_true',
dest='xelon_update',
help='Update Xelon table',
)
parser.add_argument(
'--relations',
action='store_true',
dest='relations',
help='Add the relationship between the xelon and corvet tables',
)
parser.add_argument(
'--prod_category',
action='store_true',
dest='prod_category',
help='Add values in ProductCategory table',
)
parser.add_argument(
'--xelon_name_update',
action='store_true',
dest='xelon_name',
help='Update Xelon name'
)
def handle(self, *args, **options):
self.stdout.write("[SQUALAETP] Waiting...")
if options['xelon_update']:
if options['squalaetp_file'] is not None:
squalaetp = ExcelSqualaetp(options['squalaetp_file'])
else:
squalaetp = ExcelSqualaetp(XLS_SQUALAETP_FILE)
if options['delay_files']:
delay_files = string_to_list(options['delay_files'])
else:
delay_files = XLS_DELAY_FILES
if options['time_limit_file']:
time_limit = ExcelTimeLimitAnalysis(options['time_limit_file'])
else:
time_limit = ExcelTimeLimitAnalysis(XLS_TIME_LIMIT_FILE)
self._squalaetp_file(Xelon, squalaetp)
self._delay_files(Xelon, squalaetp, delay_files)
self._time_limit_files(Xelon, squalaetp, time_limit)
self._indicator()
elif options['relations']:
self._foreignkey_relation()
elif options['prod_category']:
self._product_category()
elif options['xelon_name']:
self._xelon_name_update()
def _foreignkey_relation(self):
self.stdout.write("[SQUALAETP_RELATIONSHIPS] Waiting...")
nb_xelon, nb_category = 0, 0
for xelon in Xelon.objects.filter(corvet__isnull=True):
xelon.save()
nb_xelon += 1
self.stdout.write(
self.style.SUCCESS("[SQUALAETP] Relationships update completed: CORVET/XELON = {}".format(nb_xelon))
)
for xelon in Xelon.objects.filter(product__isnull=True):
xelon.save()
nb_category += 1
self.stdout.write(
self.style.SUCCESS("[SQUALAETP] Relationships update completed: CATEGORY/XELON = {}".format(nb_category))
)
def _squalaetp_file(self, model, excel):
self.stdout.write("[XELON] Waiting...")
nb_prod_before, nb_prod_update = model.objects.count(), 0
nb_category_change = 0
if not excel.ERROR:
model.objects.exclude(Q(numero_de_dossier__in=excel.xelon_number_list()) |
Q(type_de_cloture__in=['Réparé', 'Rebut', 'N/A']) |
Q(date_retour__isnull=True)).update(type_de_cloture='N/A', is_active=False)
for row in excel.read({'is_active': True}):
xelon_number = row.get("numero_de_dossier")
defaults = defaults_dict(model, row, "numero_de_dossier")
try:
if self._actions_check(xelon_number):
self.stdout.write(f"[XELON] Xelon file {xelon_number} not modified")
continue
obj, created = model.objects.update_or_create(numero_de_dossier=xelon_number, defaults=defaults)
if not created:
nb_prod_update += 1
if "ZONECE" in obj.lieu_de_stockage and obj.product.animator is None:
obj.product.category = "ETUDE"
obj.product.save()
nb_category_change += 1
except Exception as err:
logger.error(f"[XELON_CMD] {xelon_number} - {err}")
model.objects.exclude(numero_de_dossier__in=excel.xelon_number_list()).update(is_active=False)
nb_prod_after = model.objects.count()
self.stdout.write(f"[SQUALAETP_FILE] '{XLS_SQUALAETP_FILE}' => OK")
self.stdout.write(self.style.SUCCESS(f"[XELON] Product category changed: {nb_category_change}"))
self.stdout.write(
self.style.SUCCESS(
"[XELON] data update completed: EXCEL_LINES = {} | ADD = {} | UPDATE = {} | TOTAL = {}".format(
excel.nrows, nb_prod_after - nb_prod_before, nb_prod_update, nb_prod_after
)
)
)
else:
self.stdout.write(f"[SQUALAETP_FILE] {excel.ERROR}")
@classmethod
def _actions_check(cls, xelon_number):
action_filter = Q(content__startswith='OLD_VIN') | Q(content__startswith='OLD_PROD')
query = Xelon.objects.filter(numero_de_dossier=xelon_number, actions__isnull=False).first()
if query and query.actions.filter(action_filter):
return True
return False
def _delay_files(self, model, squalaetp, delay_files):
self.stdout.write("[DELAY] Waiting...")
nb_prod_before, nb_prod_update, nrows, value_error_list = model.objects.count(), 0, 0, []
xelon_list, delay_list = squalaetp.xelon_number_list(), []
for count, file in enumerate(delay_files):
if count == 0:
delay = ExcelDelayAnalysis(file, datedelta=0)
else:
delay = ExcelDelayAnalysis(file)
if not delay.ERROR:
delay_list += delay.xelon_number_list()
for row in delay.table():
xelon_number = row.get("numero_de_dossier")
product_model = row.get("modele_produit")
defaults = defaults_dict(model, row, "numero_de_dossier", "modele_produit")
try:
obj, created = model.objects.update_or_create(numero_de_dossier=xelon_number, defaults=defaults)
if not created:
nb_prod_update += 1
if product_model and not obj.modele_produit:
obj.modele_produit = product_model
obj.save()
except ValueError:
value_error_list.append(xelon_number)
except Exception as err:
logger.error(f"[DELAY_CMD] {xelon_number} - {err}")
if value_error_list:
logger.error(f"[DELAY_CMD] ValueError row: {', '.join(value_error_list)}")
self.stdout.write(f"[DELAY_FILE] '{file}' => OK")
nrows += delay.nrows
else:
self.stdout.write(f"[DELAY_FILE] {delay.ERROR}")
nb_prod_after = model.objects.count()
self.stdout.write(
self.style.SUCCESS(
f"[DELAY_CMD] data update completed: EXCEL_LINES = {nrows} | " +
f"ADD = {nb_prod_after - nb_prod_before} | UPDATE = {nb_prod_update} | TOTAL = {nb_prod_after}"
)
)
self.stdout.write(f"[DELAY] Nb dossiers xelon: {len(xelon_list)} - Nb dossiers delais: {len(delay_list)}")
def _time_limit_files(self, model, squalaetp, excel):
self.stdout.write("[TIME_LIMIT] Waiting...")
nb_prod_before, nb_prod_update, value_error_list = model.objects.count(), 0, []
if not excel.ERROR:
for row in excel.read_all():
xelon_number = row.get("numero_de_dossier")
defaults = defaults_dict(model, row, "numero_de_dossier")
try:
obj, created = model.objects.update_or_create(numero_de_dossier=xelon_number, defaults=defaults)
if not created:
nb_prod_update += 1
except ValueError:
value_error_list.append(xelon_number)
except Exception as err:
logger.error(f"[TIME_LIMIT_CMD] {xelon_number} - {err}")
if value_error_list:
logger.error(f"[TIME_LIMIT_CMD] ValueError row: {', '.join(value_error_list)}")
nb_prod_after = model.objects.count()
self.stdout.write(f"[TIME_LIMIT_FILE] '{XLS_TIME_LIMIT_FILE}' => OK")
self.stdout.write(
self.style.SUCCESS(
"[TIME_LIMIT] data update completed: EXCEL_LINES = {} | ADD = {} | UPDATE = {} | TOTAL = {}".format(
excel.nrows, nb_prod_after - nb_prod_before, nb_prod_update, nb_prod_after
)
)
)
else:
self.stdout.write(f"[TIME_LIMIT_FILE] {excel.ERROR}")
def _product_category(self):
xelons = Xelon.objects.exclude(modele_produit="")
psa = xelons.filter(Q(ilot='PSA') | Q(famille_produit__exact='TBORD PSA')).exclude(famille_produit='CALC MOT')
clarion = xelons.filter(ilot='CLARION')
etude = xelons.filter(ilot='LaboQual').exclude(famille_produit='CALC MOT')
autre = xelons.filter(ilot='ILOTAUTRE').exclude(Q(famille_produit='CALC MOT') |
Q(famille_produit__exact='TBORD PSA'))
calc_mot = xelons.filter(famille_produit='CALC MOT')
defaut = xelons.filter(ilot='DEFAUT').exclude(famille_produit='CALC MOT')
cat_list = [
(psa, "PSA"), (clarion, "CLARION"), (etude, "ETUDE"), (autre, "AUTRE"), (calc_mot, "CALCULATEUR"),
(defaut, "DEFAUT")
]
cat_old = ProductCategory.objects.count()
for model, category in cat_list:
values_list = list(model.values_list('modele_produit').distinct())
values_list = list(set(values_list))
for prod in values_list:
ProductCategory.objects.get_or_create(product_model=prod[0], defaults={'category': category})
cat_new = ProductCategory.objects.count()
self.stdout.write(
self.style.SUCCESS(f"[SQUALAETP] ProductCategory update completed: ADD = {cat_new - cat_old}")
)
def _indicator(self):
self.stdout.write("[INDICATOR] Waiting...")
prod = ProductAnalysis()
defaults = {
"products_to_repair": prod.pending,
"express_products": prod.express,
"late_products": prod.late,
"output_products": 0,
}
obj, created = Indicator.objects.update_or_create(date=timezone.now(), defaults=defaults)
for query in prod.pendingQueryset:
obj.xelons.add(query)
self.stdout.write(self.style.SUCCESS("[INDICATOR] data update completed"))
def _xelon_name_update(self):
self.stdout.write("[ECU & MEDIA] Waiting...")
xelons = Xelon.objects.filter(
corvet__isnull=False, product__isnull=False, date_retour__isnull=False).order_by('date_retour')
self.MAX_SIZE, number = xelons.count(), 0
for xelon in xelons:
corvet, product = xelon.corvet, xelon.product
if corvet and product:
ecu_dict = {
"NAV": corvet.electronique_14x, "RAD": corvet.electronique_14f,
"EMF": corvet.electronique_14l, "CMB": corvet.electronique_14k, "BSI": corvet.electronique_14b,
"CMM": corvet.electronique_14a, "HDC": corvet.electronique_16p, "BSM": corvet.electronique_16b
}
for corvet_type, comp_ref in ecu_dict.items():
if product.corvet_type == corvet_type and comp_ref_isvalid(comp_ref):
if product.corvet_type in ["NAV", "RAD"]:
obj, created = Multimedia.objects.update_or_create(
hw_reference=comp_ref,
defaults={'xelon_name': xelon.modele_produit, 'type': product.corvet_type})
else:
obj, created = Ecu.objects.update_or_create(
comp_ref=comp_ref,
defaults={'xelon_name': xelon.modele_produit, 'type': product.corvet_type}
)
break
if number % 100 == 1:
self._progress_bar(number)
number += 1
self.stdout.write(self.style.SUCCESS(f"\r\n[ECU & MEDIA] data update completed: NB_UPDATE={self.MAX_SIZE}"))
def _progress_bar(self, current_size, bar_length=80):
if self.MAX_SIZE is not None:
percent = float(current_size) / self.MAX_SIZE
arrow = '-' * int(round(percent*bar_length) - 1) + '>'
spaces = ' ' * (bar_length - len(arrow))
print("\r[{0}]{1}% ".format(arrow + spaces, int(round(percent*100))), end="", flush=True)
|
import diffcp.cones as cone_lib
import numpy as np
from scipy import sparse
def scs_data_from_cvxpy_problem(problem):
import cvxpy as cp
data = problem.get_problem_data(cp.SCS)[0]
cone_dims = cp.reductions.solvers.conic_solvers.scs_conif.dims_to_solver_dict(data[
"dims"])
return data["A"], data["b"], data["c"], cone_dims
def least_squares_eq_scs_data(m, n, seed=0):
"""Generate a conic problem with unique solution."""
import cvxpy as cp
np.random.seed(seed)
assert m >= n
x = cp.Variable(n)
b = np.random.randn(m)
A = np.random.randn(m, n)
assert np.linalg.matrix_rank(A) == n
objective = cp.pnorm(A @ x - b, 1)
constraints = [x >= 0, cp.sum(x) == 1.0]
problem = cp.Problem(cp.Minimize(objective), constraints)
return scs_data_from_cvxpy_problem(problem)
def get_random_like(A, randomness):
"""Generate a random sparse matrix with the same sparsity
pattern as A, using the function `randomness`.
`randomness` is a function that returns a random vector
with a prescribed length.
"""
rows, cols = A.nonzero()
values = randomness(A.nnz)
return sparse.csc_matrix((values, (rows, cols)), shape=A.shape)
|
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 25 14:36:19 2019
@author: kg
"""
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
font = FontProperties(fname=r"C:\Windows\Fonts\simhei.ttf", size=14)
plt.bar([1, 3, 5, 7], [19, 23, 50, 68], label='我们的聚类算法')
plt.bar([2, 4, 6, 8], [18, 20, 47, 65], label='ClusStream')
# params
# x: 条形图x轴
# y:条形图的高度
# width:条形图的宽度 默认是0.8
# bottom:条形底部的y坐标值 默认是0
# align:center / edge 条形图是否以x轴坐标为中心点或者是以x轴坐标为边缘
plt.legend()
plt.xlabel('K的数量')
plt.ylabel('误差平方和')
plt.title(u'运行时间对比', FontProperties=font)
plt.show()
|
#matplotlib 사용하기
from matplotlib import pyplot
x_name = ('1','2','3','4','5','6','7','8','9','10','11','12')
temp_2018 = [0.5,2.0,6.7,12.7,18.6,21.7,24.4,25.2,20.5,15.0,10.0,3.5]
pyplot.bar(x_name,temp_2018,
width =0.4,color = 'red',label='temp 2018')
pyplot.xlabel('Month')
pyplot.ylabel('Average Temperature')
pyplot.title("Weather Bar Chart")
pyplot.legend(loc='upper right')
pyplot.show() |
import time
import random
start = time.time()
print("start:%0.2fs" % start)
while True:
play = input('play the game(y/n)?')
if play == 'y':
number = random.randint(0, 1000)
guess = int(input('guess a number: '))
while True:
if number > guess:
guess = int(input("guess a bigger number: "))
elif number < guess:
guess = int(input("guess a smaller number: "))
else:
end = time.time()
print("end:%0.2fs" % end)
print("bingo! ")
print(u"%0.2fs耗时:" % (end - start))
break
else:
break
|
"""
Support instabot's methods.
"""
import sys
import os
import codecs
def check_if_file_exists(file_path, quiet=False):
if not os.path.exists(file_path):
if not quiet:
print("Can't find '%s' file." % file_path)
return False
return True
def read_list_from_file(file_path, quiet=False):
"""
Reads list from file. One line - one item.
Returns the list if file items.
"""
try:
if not check_if_file_exists(file_path, quiet=quiet):
return []
with codecs.open(file_path, "r", encoding="utf-8") as f:
content = f.readlines()
if sys.version_info[0] < 3:
content = [str(item.encode('utf8')) for item in content]
content = [item.strip() for item in content if len(item) > 0]
return content
except Exception as e:
print(str(e))
return []
def check_whitelists(self):
"""
Check whitelists in folder with script
"""
default_names = ('whitelist.txt',
'friends_{0}.txt'.format(self.username),
'friends_{0}.txt'.format(self.user_id),
'friends.txt')
for file_path in default_names:
whitelist = read_list_from_file(file_path, quiet=True)
if whitelist:
self.logger.info('Found whitelist: {0} ({1} users)'.format(file_path, len(whitelist)))
return whitelist
return []
def add_whitelist(self, file_path):
file_contents = read_list_from_file(file_path)
self.whitelist = [self.convert_to_user_id(item) for item in file_contents]
return not not self.whitelist
def add_blacklist(self, file_path):
file_contents = read_list_from_file(file_path)
self.blacklist = [self.convert_to_user_id(item) for item in file_contents]
return not not self.blacklist
def console_print(verbosity, text):
if verbosity:
print(text)
|
from __future__ import print_function
import ctypes
import json
import logging
import os
import shutil
import subprocess
import sys
import time
import zipfile
import jinja2
import requests
from gerrit_mq import common
from gerrit_mq import orm
def add_or_update_account_info(sql, ai_obj):
"""
Update the AccountInfo object from gerrit json, or create it if it's new.
"""
query = (sql
.query(orm.AccountInfo)
.filter(orm.AccountInfo.rid == ai_obj.account_id)
.limit(1))
if query.count() > 0:
ai_sql = query.first()
for field in ['name', 'email', 'username']:
setattr(ai_sql, field, getattr(ai_obj, field))
else:
kwargs = ai_obj.as_dict()
kwargs['rid'] = kwargs.pop('_account_id')
for key in ['name', 'email', 'username']:
if key not in kwargs:
kwargs[key] = '<none>'
ai_sql = orm.AccountInfo(**kwargs)
sql.add(ai_sql)
def get_next_poll_id(sql):
"""
Return the next unused poll id
"""
from sqlalchemy.sql.expression import func
query = sql.query(func.max(orm.ChangeInfo.poll_id))
last_poll_id = query.scalar()
if last_poll_id is None:
return 1
else:
return last_poll_id + 1
def poll_gerrit(gerrit, sql, poll_id):
"""
Hit gerrit REST and read off the current queue of merge requests. Update the
local cache database entries for any changes that have been updated since
our last poll. Write the resulting ordered queue to the queue file.
"""
request_queue = gerrit.get_merge_requests()
for changeinfo in request_queue:
# Take this opportunity to to update the AccountInfo table
# with the owner info
add_or_update_account_info(sql, changeinfo.owner)
priority = changeinfo.message_meta.get('Priority', 100)
ci_sql = orm.ChangeInfo(project=changeinfo.project,
branch=changeinfo.branch,
change_id=changeinfo.change_id,
subject=changeinfo.subject,
current_revision=changeinfo.current_revision,
owner_id=changeinfo.owner.account_id,
message_meta=json.dumps(changeinfo.message_meta),
queue_time=changeinfo.queue_time,
poll_id=poll_id,
priority=priority)
sql.add(ci_sql)
sql.commit()
# Delete anything in the cache which did not show up during this poll
(sql.query(orm.ChangeInfo)
.filter(orm.ChangeInfo.poll_id != poll_id)
.delete())
sql.commit()
def get_queue(sql, project_filter=None, branch_filter=None,
offset=None, limit=None):
"""
Return list of ChangeInfo objects matching the given project and branch
filters (as SQL LIKE expressions)
Returns a tuple of (`count`, `result_list`) where `count` is the size of
the query without `offset` or `limit`.
TODO(josh): filter out any which are IN_PROGRESS?
"""
query = sql.query(orm.ChangeInfo)
if project_filter is not None:
query = query.filter(orm.ChangeInfo.project.like(project_filter))
if branch_filter is not None:
query = query.filter(orm.ChangeInfo.branch.like(branch_filter))
query = query.order_by(orm.ChangeInfo.poll_id.desc(),
orm.ChangeInfo.priority.asc(),
orm.ChangeInfo.queue_time.asc())
count = query.count()
if offset is not None and offset > 0:
query = query.offset(offset)
if limit is not None and limit > 0:
query = query.limit(limit)
return count, [common.ChangeInfo(**ci_sql.as_dict()) for ci_sql in query]
def get_history(sql, project_filter, branch_filter, offset, limit):
"""
Return json serializable list of MergeStatus dictionaries for available
merge requests matching the given project and branch filters (as regular
expressions)
TODO(josh): filter out any which are IN_PROGRESS?
"""
query = sql.query(orm.MergeStatus)
if project_filter is not None:
query = query.filter(orm.MergeStatus.project.like(project_filter))
if branch_filter is not None:
query = query.filter(orm.MergeStatus.branch.like(branch_filter))
query = query.order_by(orm.MergeStatus.end_time.desc())
count = query.count()
if offset > 0:
query = query.offset(offset)
if limit > 0:
query = query.limit(limit)
return dict(count=count, result=[ms_sql.as_dict() for ms_sql in query])
def sync_account_db(gerrit, sql):
"""
Synchronize local account table to gerrit account table
"""
page_size = 25
for page_idx in range(10000):
offset = page_idx * page_size
json_list = gerrit.get('accounts/?start={offset}&n={page_size}&o=DETAILS'
.format(offset=offset, page_size=page_size))
for ai_json in json_list:
query = (sql
.query(orm.AccountInfo)
.filter(orm.AccountInfo.rid == ai_json['_account_id']))
if query.count() > 0:
for ai_sql in query:
ai_sql.name = ai_json.get('name', '<none>')
ai_sql.email = ai_json.get('email', '<none>')
ai_sql.username = ai_json.get('username', '<none>')
sql.commit()
break
else:
kwargs = dict(ai_json)
kwargs['rid'] = kwargs.pop('_account_id')
for key in ['name', 'email', 'username']:
if key not in kwargs:
kwargs[key] = '<none>'
ai_sql = orm.AccountInfo(**kwargs)
sql.add(ai_sql)
sql.commit()
if len(json_list) < 1 or '_more_accounts' not in json_list[-1]:
break
def migrate_db(gerrit, input_path, from_version, output_path, to_version):
"""
Migrate a database from one schema version to another
"""
if from_version == '0.1.0' and to_version == '0.2.0':
migrate_db_v0p1p0_to_v0p2p0(gerrit, input_path, output_path)
elif from_version == '0.2.0' and to_version == '0.2.1':
migrate_db_v0p2p0_to_v0p2p1(input_path, output_path)
def migrate_db_v0p2p0_to_v0p2p1(input_path, output_path):
"""
Split merge_history into merge_history and merge_changes.
"""
tmp_path = input_path + '.mq_migration'
if os.path.exists(tmp_path):
logging.info('Removing stale temporary %s', tmp_path)
os.remove(tmp_path)
logging.info('Copying %s to %s', input_path, tmp_path)
shutil.copyfile(input_path, tmp_path)
logging.info('Creating merge_changes table')
import sqlite3
conn = sqlite3.connect(tmp_path)
cur = conn.cursor()
cur.execute('SELECT name FROM SQLITE_MASTER WHERE type="index"'
' AND tbl_name="merge_history"')
indices = [row[0] for row in cur]
for index in indices:
logging.info('Dropping index %s from merge_history', index)
cur.execute('DROP INDEX {}'.format(index))
cur.execute('ALTER TABLE merge_history RENAME TO merge_history_v0p2p0')
conn.commit()
conn.close()
try:
os.remove(output_path)
except OSError:
pass
os.rename(tmp_path, output_path)
logging.info('Migrating rows')
sql = orm.init_sql('sqlite:///{}'.format(output_path))()
prev_migration_query = (sql.query(orm.MergeStatus)
.order_by(orm.MergeStatus.rid.desc())
.limit(1))
source_query = sql.query(orm.MergeStatusV0p2p0)
if prev_migration_query.count() > 0:
prev_migration_last = -1
for prev_status in prev_migration_query:
prev_migration_last = prev_status.rid
break
logging.info('Detected previous migration, will migrate increment'
' starting at row id %d', prev_migration_last)
source_query = source_query.filter(orm.MergeStatusV0p1p0.id >
prev_migration_last)
last_print_time = 0
query_count = source_query.count()
for idx, old_status in enumerate(source_query):
kwargs = {key: getattr(old_status, key) for key in
['rid', 'project', 'branch', 'start_time', 'end_time', 'status']}
sql.add(orm.MergeStatus(**kwargs))
kwargs = {key: getattr(old_status, key) for key in
['owner_id', 'change_id', 'request_time', 'msg_meta']}
kwargs['merge_id'] = old_status.rid
sql.add(orm.MergeChange(**kwargs))
sql.commit()
if time.time() - last_print_time > 0.5:
last_print_time = time.time()
progress = 100.0 * (idx + 1) / query_count
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\r'
.format(idx, query_count, progress))
sys.stdout.flush()
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\n'
.format(query_count, query_count, 100.0))
sql.close()
conn = sqlite3.connect(output_path)
cur = conn.cursor()
cur.execute('DROP TABLE merge_history_v0p2p0')
conn.commit()
conn.close()
def migrate_db_v0p1p0_to_v0p2p0(gerrit, input_path, output_path):
"""
Migrate a database from one schema version to another
"""
tmp_path = input_path + '.mq_migration'
if os.path.exists(tmp_path):
logging.info('Removing stale temporary %s', tmp_path)
os.remove(tmp_path)
logging.info('Copying %s to %s', input_path, tmp_path)
shutil.copyfile(input_path, tmp_path)
logging.info('Renaming old table')
import sqlite3
conn = sqlite3.connect(tmp_path)
cur = conn.cursor()
cur.execute('ALTER TABLE merge_history RENAME TO merge_history_v0p1p0')
conn.commit()
conn.close()
logging.info('Migrating rows')
source_sql = orm.init_sql('sqlite:///{}'.format(tmp_path))()
dest_sql = orm.init_sql('sqlite:///{}'.format(output_path))()
prev_migration_query = (dest_sql.query(orm.MergeStatus)
.order_by(orm.MergeStatus.rid.desc())
.limit(1))
source_query = source_sql.query(orm.MergeStatusV0p1p0)
if prev_migration_query.count() > 0:
prev_migration_last = -1
for prev_status in prev_migration_query:
prev_migration_last = prev_status.rid
break
logging.info('Detected previous migration, will migrate increment'
' starting at row id %d', prev_migration_last)
source_query = source_query.filter(orm.MergeStatusV0p1p0.id >
prev_migration_last)
last_print_time = 0
query_count = source_query.count()
for idx, old_status in enumerate(source_query):
if old_status.result == orm.StatusKey.IN_PROGRESS.value:
status = orm.StatusKey.CANCELED.value
else:
status = old_status.result
changeinfo = None
msg_meta = {}
max_tries = 10
sleep_duration = 2
for try_idx in range(max_tries):
try:
if not changeinfo:
changeinfo = gerrit.get_change(old_status.change_id)
if not msg_meta:
msg_meta = gerrit.get_message_meta(old_status.change_id,
changeinfo.current_revision)
break
except requests.RequestException:
logging.warn('Failed to poll gerrit for change %s %d/%d',
old_status.change_id, try_idx, max_tries)
time.sleep(sleep_duration)
if changeinfo is not None:
owner_id = changeinfo.owner.account_id
else:
owner_id = -1
new_status = orm.MergeStatusV0p2p0(rid=old_status.id,
project='aircam',
branch=old_status.target_branch,
owner_id=owner_id,
change_id=old_status.change_id,
request_time=old_status.request_time,
start_time=old_status.start_time,
end_time=old_status.end_time,
msg_meta=json.dumps(msg_meta),
status=status)
dest_sql.add(new_status)
dest_sql.commit()
if time.time() - last_print_time > 0.5:
last_print_time = time.time()
progress = 100.0 * (idx + 1) / query_count
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\r'
.format(idx, query_count, progress))
sys.stdout.flush()
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\n'
.format(query_count, query_count, 100.0))
os.remove(tmp_path)
MISSING_IDS_QUERY = """
SELECT DISTINCT(owner_id)
FROM merge_history LEFT JOIN account_info
ON owner_id = account_info.rid
WHERE account_info.rid is NULL
ORDER BY owner_id ASC
"""
def fetch_missing_account_info(gerrit, db_path):
"""
Fetch any account info from gerrit that is missing in the database
"""
import sqlite3
logging.info('Querying missing ids')
sql = orm.init_sql('sqlite:///{}'.format(db_path))()
conn = sqlite3.connect(db_path)
cur = conn.cursor()
cur.execute(MISSING_IDS_QUERY)
missing_ids = [row[0] for row in cur]
num_missing = len(missing_ids)
conn.close()
logging.info('Fetching %d account infos from gerrit', num_missing)
last_print_time = 0
for idx, owner_id in enumerate(missing_ids):
if time.time() - last_print_time > 1:
last_print_time = time.time()
progress = 100.0 * (idx + 1) / num_missing
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\r'
.format(idx, num_missing, progress))
sys.stdout.flush()
try:
ai_json = gerrit.get('accounts/{}'.format(owner_id))
ai_obj = common.AccountInfo(**ai_json)
add_or_update_account_info(sql, ai_obj)
except requests.RequestException:
logging.warn('Failed to get account info for owner_id=%d', owner_id)
continue
except ValueError:
logging.warn('Malformed json for owner_id=%d', owner_id)
continue
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%]\n'
.format(num_missing, num_missing, 100.0))
sql.close()
def gzip_old_logs(srcdir, destdir):
"""
For any logfiles that are not already gzipped, gzip them and then create a
zero sized stub.
"""
last_print_time = 0
max_num_logs = 1000000
gzip_jobs = 0
for idx in range(max_num_logs):
if time.time() - last_print_time > 1.0:
last_print_time = time.time()
progress = 100.0 * (idx + 1) / max_num_logs
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%] ({:6d})\r'
.format(idx, max_num_logs, progress, gzip_jobs))
sys.stdout.flush()
for extension in ['log', 'stderr', 'stdout']:
filename = '{:06d}.{}'.format(idx, extension)
logpath = os.path.join(srcdir, filename)
stub_path = os.path.join(destdir, filename)
gzip_path = os.path.join(destdir, filename + '.gz')
if os.path.exists(logpath) and not os.path.exists(gzip_path):
gzip_jobs += 1
with open(gzip_path, 'w') as outfile:
subprocess.check_call(['gzip', '--stdout', logpath], stdout=outfile)
with open(stub_path, 'w') as _:
pass
sys.stdout.write('{:6d}/{:6d} [{:6.2f}%] ({:6d})\n'
.format(max_num_logs, max_num_logs, 100.0, gzip_jobs))
def path_prefix_in(query_path, prefix_list):
query_parts = query_path.split('/')
for prefix in prefix_list:
prefix_parts = prefix.split('/')
if prefix_parts[:len(query_parts)] == query_parts:
return True
return False
# this struct will be passed as a ponter,
# so we don't have to worry about the right layout
class dl_phdr_info(ctypes.Structure): # pylint: disable=invalid-name
_fields_ = [
('padding0', ctypes.c_void_p), # ignore it
('dlpi_name', ctypes.c_char_p), # ignore the reset
]
class LibListStore(object):
def __init__(self):
self.liblist = []
def handle_libinfo(self, info, size, data): # pylint: disable=unused-argument
self.liblist.append(info.contents.dlpi_name)
return 0
def get_loaded_libraries():
"""
Return a list of file paths for libraries which are loaded into the current
interpreter process
"""
# NOTE(josh): c_void_p changed to c_char_p
callback_t = ctypes.CFUNCTYPE(ctypes.c_int,
ctypes.POINTER(dl_phdr_info),
ctypes.POINTER(ctypes.c_size_t),
ctypes.c_char_p)
dl_iterate_phdr = ctypes.CDLL('libc.so.6').dl_iterate_phdr
# NOTE(josh): c_void_p replaced with c_char_p
dl_iterate_phdr.argtypes = [callback_t, ctypes.c_char_p]
dl_iterate_phdr.restype = ctypes.c_int
list_store = LibListStore()
dl_iterate_phdr(callback_t(list_store.handle_libinfo), "dummy")
return list_store.liblist
def get_watch_manifest(ignore_prefixes=None):
"""
Return a list of file paths and mtimes for files that should trigger a restart
if they change.
"""
if ignore_prefixes is None:
ignore_prefixes = []
zipfiles_on_path = []
for component in sys.path:
if component and zipfile.is_zipfile(component):
realpath_to_zip = os.path.realpath(component)
zipfiles_on_path.append((realpath_to_zip,
os.path.getmtime(realpath_to_zip)))
zipfiles = [zfile for (zfile, _) in zipfiles_on_path]
module_files = []
for _, module in sys.modules.items():
if hasattr(module, '__file__'):
realpath_to_module = os.path.realpath(module.__file__)
if path_prefix_in(realpath_to_module, zipfiles):
logging.info('Skipping zipfile module %s', realpath_to_module)
continue
elif path_prefix_in(realpath_to_module, ignore_prefixes):
logging.info('Skipping ignored module %s', realpath_to_module)
continue
else:
module_files.append((realpath_to_module,
os.path.getmtime(realpath_to_module)))
so_files = []
for so_path in get_loaded_libraries():
realpath_to_so = os.path.realpath(so_path)
if path_prefix_in(realpath_to_module, ignore_prefixes):
logging.info('Skipping ignored so %s', realpath_to_so)
continue
else:
so_files.append((realpath_to_so, os.path.getmtime(realpath_to_so)))
return sorted(zipfiles_on_path + module_files + so_files)
def get_changelist(manifest):
"""
Given a list of (fullpath, mtime) return a list of paths whose current mtime
is newer than the manifest mtime.
"""
changelist = []
for fullpath, mtime in manifest:
if os.path.getmtime(fullpath) - mtime > 0.1:
changelist.append(fullpath)
return changelist
def get_real_argv():
"""
Return the actual command line
"""
with open('/proc/self/cmdline', 'r') as infile:
return infile.read().split('\0')[:-1]
def restart_if_modified(watch_manifest, pidfile_path):
"""
Restart the process if any file in the manifest has changed
"""
changelist = get_changelist(watch_manifest)
if changelist:
logging.info('Detected a sourcefile change: \n '
+ '\n '.join(changelist))
argv = get_real_argv()
os.remove(pidfile_path)
os.execvp(sys.executable, argv)
class ZipFileLoader(jinja2.BaseLoader):
"""
Implements a template loader which reads templates from a zipfile
"""
def __init__(self, zipfile_path, base_directory):
self.zipf = zipfile.ZipFile(zipfile_path)
self.basedir = base_directory
def __del__(self):
self.zipf.close()
def get_source(self, environment, template):
try:
fullpath = '{}/{}'.format(self.basedir, template)
with self.zipf.open(fullpath) as fileobj:
source = fileobj.read()
except IOError:
raise jinja2.TemplateNotFound(template,
message='Fullpath: {}'.format(fullpath))
return (source, None, lambda: False)
def render_templates(config, outdir):
"""
Render jinja2 templates into the specified documentroot
"""
pardir = os.path.dirname(__file__)
pardir = os.path.dirname(pardir)
logging.info('pardir: %s', pardir)
if zipfile.is_zipfile(pardir):
logging.info('reading data from zipfile')
loader = ZipFileLoader(pardir, 'gerrit_mq/templates')
else:
logging.info('reading data from package directory')
loader = jinja2.PackageLoader('gerrit_mq', 'templates')
env = jinja2.Environment(loader=loader)
for page in ['daemon', 'detail', 'history', 'index', 'queue']:
template_name = '{}.html.tpl'.format(page)
template = env.get_template(template_name)
outpath = os.path.join(outdir, '{}.html'.format(page))
with open(outpath, 'w') as outfile:
outfile.write(template.render()) # pylint: disable=no-member
outfile.write('\n')
script_path = 'gerrit_mq/templates/script.js.tpl'
style_path = 'gerrit_mq/templates/style.css'
if zipfile.is_zipfile(pardir):
with zipfile.ZipFile(pardir) as zfile:
with zfile.open(script_path) as infile:
js_lines = infile.readlines()
with zfile.open(style_path) as infile:
style_content = infile.read()
else:
with open(os.path.join(pardir, script_path)) as infile:
js_lines = infile.readlines()
with open(os.path.join(pardir, style_path)) as infile:
style_content = infile.read()
outpath = os.path.join(outdir, 'script.js')
with open(outpath, 'w') as outfile:
for line in js_lines:
if line.startswith('var kGerritURL'):
outfile.write('var kGerritURL = "{}";\n'
.format(config['gerrit.rest.url']))
else:
outfile.write(line)
outpath = os.path.join(outdir, 'style.css')
with open(outpath, 'w') as outfile:
outfile.write(style_content)
|
#!/usr/bin/env python
import tensorflow as tf
import numpy as np
from file_parser import *
# converting text into vectors
#################################################
def predict_category_based_on_description(description):
description = description.lower()
Total_Number_of_Records = Travel['NUM_RECORDS'] + Meals_and_Entertainment['NUM_RECORDS'] + Computer_Hardware['NUM_RECORDS'] + Computer_Software['NUM_RECORDS'] + Office_Supplies['NUM_RECORDS']
P_Travel = Travel['NUM_RECORDS']/Total_Number_of_Records
P_Meals_and_Entertainment = Meals_and_Entertainment['NUM_RECORDS']/Total_Number_of_Records
P_Computer_Hardware = Computer_Hardware['NUM_RECORDS']/Total_Number_of_Records
P_Computer_Software = Computer_Software['NUM_RECORDS']/Total_Number_of_Records
P_Office_Supplies = Office_Supplies['NUM_RECORDS']/Total_Number_of_Records
# Example: P(Travel | Taxi ride) = P(Taxi ride | Travel)*P(Travel)/(SUM_{all categories}P(Taxi ride | some_category)*P(some_category))
# = P(Taxi | Travel)*P(ride | Travel)* P(other words NOT present | Travel) * P(Travel)
# / (SUM_{all categories}P(Taxi | some_category)*P(Ride | some_category)*P(some_category))
P_Travel_given_description = 1.0
P_Meals_and_Entertainment_given_description = 1.0
P_Computer_Hardware_given_description = 1.0
P_Computer_Software_given_description = 1.0
P_Office_Supplies_given_description = 1.0
description_words = description.split(" ") # all the words in the current description
##### numerator
for word in Key_Words:
if word in description_words:
if (word in Travel): P_Travel_given_description = P_Travel_given_description * Travel[word]/Travel['NUM_RECORDS']
else: P_Travel_given_description = Epsilon
if (word in Meals_and_Entertainment): P_Meals_and_Entertainment_given_description = P_Meals_and_Entertainment_given_description * Meals_and_Entertainment[word]/Meals_and_Entertainment['NUM_RECORDS']
else: P_Meals_and_Entertainment_given_description = Epsilon
if (word in Computer_Hardware): P_Computer_Hardware_given_description = P_Computer_Hardware_given_description * Computer_Hardware[word]/Computer_Hardware['NUM_RECORDS']
else: P_Computer_Hardware_given_description = Epsilon
if (word in Computer_Software): P_Computer_Software_given_description = P_Computer_Software_given_description * Computer_Software[word]/Computer_Software['NUM_RECORDS']
else: P_Computer_Software_given_description = Epsilon
if (word in Office_Supplies): P_Office_Supplies_given_description = P_Office_Supplies_given_description * Office_Supplies[word]/Office_Supplies['NUM_RECORDS']
else: P_Office_Supplies_given_description = Epsilon
else:
if (word in Travel): P_Travel_given_description = P_Travel_given_description * (1 - (Travel[word]/Travel['NUM_RECORDS']))
if (word in Meals_and_Entertainment): P_Meals_and_Entertainment_given_description = P_Meals_and_Entertainment_given_description * (1 - (Meals_and_Entertainment[word]/Meals_and_Entertainment['NUM_RECORDS']))
if (word in Computer_Hardware): P_Computer_Hardware_given_description = P_Computer_Hardware_given_description * (1 - (Computer_Hardware[word]/Computer_Hardware['NUM_RECORDS']))
if (word in Computer_Software): P_Computer_Software_given_description = P_Computer_Software_given_description * (1 - (Computer_Software[word]/Computer_Software['NUM_RECORDS']))
if (word in Office_Supplies): P_Office_Supplies_given_description = P_Office_Supplies_given_description * (1 - (Office_Supplies[word]/Office_Supplies['NUM_RECORDS']))
P_Travel_given_description = P_Travel_given_description * P_Travel
P_Meals_and_Entertainment_given_description = P_Meals_and_Entertainment_given_description * P_Meals_and_Entertainment
P_Computer_Hardware_given_description = P_Computer_Hardware_given_description * P_Computer_Hardware
P_Computer_Software_given_description = P_Computer_Software_given_description * P_Computer_Software
P_Office_Supplies_given_description = P_Office_Supplies_given_description * P_Office_Supplies
##### denominator is the same for all the classes
SUM = P_Travel_given_description + P_Meals_and_Entertainment_given_description + P_Computer_Hardware_given_description + P_Computer_Software_given_description + P_Office_Supplies_given_description;
##### combining numerator and denominator
P_Travel_given_description /= SUM
P_Meals_and_Entertainment_given_description /= SUM
P_Computer_Hardware_given_description /= SUM
P_Computer_Software_given_description /= SUM
P_Office_Supplies_given_description /= SUM
max_probability = max(P_Travel_given_description, P_Meals_and_Entertainment_given_description, P_Computer_Hardware_given_description, P_Computer_Software_given_description, P_Office_Supplies_given_description)
# print P_Travel_given_description
# print P_Meals_and_Entertainment_given_description
# print P_Computer_Hardware_given_description
# print P_Computer_Software_given_description
# print P_Office_Supplies_given_description
if (P_Travel_given_description == max_probability): return "Travel"
if (P_Meals_and_Entertainment_given_description == max_probability): return "Meals and Entertainment"
if (P_Computer_Hardware_given_description == max_probability): return "Computer - Hardware"
if (P_Computer_Software_given_description == max_probability): return "Computer - Software"
if (P_Office_Supplies_given_description == max_probability): return "Office Supplies"
def category_key_words(training_list, category_name): # returns the array with the number of occurences per word
Category_Array = {}
for record in training_list:
if (record[1] == category_name):
# NUM_RECORDS number of records for the current category
if "NUM_RECORDS" in Category_Array: Category_Array['NUM_RECORDS'] += 1
else: Category_Array['NUM_RECORDS'] = float(1)
# The number of occurences per word for category_name
words = record[3].lower().split(" ")
for word in words:
if (word != "to") and (word != "the") and (word != "with"):
Key_Words[word] = 1
if word in Category_Array: Category_Array[word] += 1
else: Category_Array[word] = float(1)
return Category_Array
#################################################
# Main program
Epsilon = 0.01
#training_list, training_header = parse_csv_data("training_data_example.csv")
training_list, training_header = parse_csv_data("validation_data_example.csv")
Key_Words = {} # all key words (in all categories)
# number of records with the word for every particular category
Travel = category_key_words(training_list, "Travel")
Meals_and_Entertainment = category_key_words(training_list, "Meals and Entertainment")
Computer_Hardware = category_key_words(training_list, "Computer - Hardware")
Computer_Software = category_key_words(training_list, "Computer - Software")
Office_Supplies = category_key_words(training_list, "Office Supplies")
if "NUM_RECORDS" not in Travel: Travel['NUM_RECORDS'] = 0
if "NUM_RECORDS" not in Meals_and_Entertainment: Meals_and_Entertainment['NUM_RECORDS'] = 0
if "NUM_RECORDS" not in Computer_Hardware: Computer_Hardware['NUM_RECORDS'] = 0
if "NUM_RECORDS" not in Computer_Software: Computer_Software['NUM_RECORDS'] = 0
if "NUM_RECORDS" not in Office_Supplies: Office_Supplies['NUM_RECORDS'] = 0
success = 0;
for record in training_list:
print record
if (predict_category_based_on_description(record[3]) == record[1]):
success+=1
print "Predicted Category: ", predict_category_based_on_description(record[3]), ": CORRECT"
else:
print "Predicted Category: ", predict_category_based_on_description(record[3]), ": INCORRECT"
print "### Success Rate is:", (success*100/len(training_list)), "%"
|
# Indexing
# Slicing -> Substring
# Reversing
# Contains
# Concatenating
# Repeat
# working with loops
'''
Indexing and Slicing
'''
var1 = "Python"
var2 = "Tutorial"
#+ve indexing
print ("var1[0]:",var1[2])
#-ve indexing
print ("var1[0]:",var1[-3])
#+ve slicing
print ("var2[1:5]:",var2[1:5])
#-ve slicing
print ("var2[1:5]:",var2[-5:-2])
#Reverse a complete string
print("Reverse a complete string",var2[::-1])
#Reverse a substring
print("Reverse a substring", var2[5:2:-1])
'''
Operators with string
'''
#Logical operator in and not
print("in")
print("y" in var1)
print("not in")
print("u" not in var1)
#Arithmetic operator +(concadinate) and *(repeat)
print(var1+var2)
print(var1 * 10)
#working with loop
#By Indexing
name="Don Bosco"
for i in range(0,len(name)):
print(name[i])
#By Iterable
name="Don Bosco"
for c in name:
print(c)
|
import scrapy
import re
from datetime import datetime
from dateutil.relativedelta import relativedelta
import dateparser
from tpdb.BasePerformerScraper import BasePerformerScraper
class siteAuntJudysPerformerSpider(BasePerformerScraper):
selector_map = {
'name': '//div[@class="title_bar"]/span/text()',
'image': '//div[@class="cell_top cell_thumb"]/img/@src0_2x',
'height': '//comment()[contains(.,"Bio Extra Fields")]/following-sibling::text()[contains(.,"Height")]',
'cupsize': '//comment()[contains(.,"Bio Extra Fields")]/following-sibling::text()[contains(.,"Bust")]',
'measurements': '//comment()[contains(.,"Bio Extra Fields")]/following-sibling::text()[contains(.,"Measurements")]',
'pagination': '/tour/models/models_%s.html',
'external_id': 'models\/(.*)\/'
}
name = 'AuntJudysPerformer'
network = "Aunt Judys"
start_urls = [
'https://www.auntjudys.com',
]
def get_gender(self, response):
return 'Female'
def get_performers(self, response):
performers = response.xpath('//div[@class="update_details"]/a/@href').getall()
for performer in performers:
yield scrapy.Request(
url=self.format_link(response, performer),
callback=self.parse_performer
)
def get_height(self, response):
if 'height' in self.selector_map:
height = self.process_xpath(response, self.get_selector_map('height')).getall()
if height:
height = " ".join(height)
if "Height:" in height:
height = height.replace(" ", "").replace("\n", "").replace("\t", " ").replace("\r", " ").strip()
height = re.sub("\s\s+", " ", height).strip()
height = re.search('Height:\s+(\d+.*)', height).group(1)
if height:
height = height.replace(" ","")
return height.strip()
return ''
def get_measurements(self, response):
if 'measurements' in self.selector_map:
measurements = self.process_xpath(response, self.get_selector_map('measurements')).getall()
if measurements:
measurements = " ".join(measurements)
if "Measurements:" in measurements and re.search('(\d+\w+-\d+\d+)', measurements):
measurements = measurements.replace(" ", "").replace("\n", "").replace("\t", " ").replace("\r", " ").strip()
measurements = re.sub("\s\s+", " ", measurements).strip()
measurements = re.search('Measurements:\s+(\d+.*)', measurements).group(1)
if measurements:
measurements = re.sub('[^a-zA-Z0-9-]', '', measurements)
return measurements.strip()
return ''
def get_cupsize(self, response):
if 'cupsize' in self.selector_map:
cupsize = self.process_xpath(response, self.get_selector_map('cupsize')).getall()
if cupsize:
cupsize = " ".join(cupsize)
if "Bust:" in cupsize:
cupsize = cupsize.replace(" ", "").replace("\n", "").replace("\t", " ").replace("\r", " ").strip()
cupsize = re.sub("\s\s+", " ", cupsize).strip()
cupsize = re.search('Bust:\s+(\d+.*)', cupsize).group(1)
if cupsize:
cupsize = cupsize.replace(" ","")
return cupsize.strip()
return ''
def get_image(self, response):
if 'image' in self.selector_map:
image = self.process_xpath(response, self.get_selector_map('image')).get()
if not image:
image = response.xpath('//div[@class="cell_top cell_thumb"]/img/@src0_2x').get()
if not image:
image = response.xpath('//div[@class="cell_top cell_thumb"]/img/@src').get()
if image:
image = "https://www.auntjudys.com" + image
return image.strip()
return ''
|
import numpy as np
import Node2VecFeatures as n2v
import RefexFeatures as refex
def calculate_features(self, order = 'linear'):
refex_feats = refex.calculate_features(self, order)
node2vec_feats = n2v.calculate_features(self, order)
features = np.concatenate((refex_feats, node2vec_feats), axis = 1)
self.NumF = features.shape[1]
self.F = features
return features
def update_features(self, node, order='linear'):
refex_feats = refex.update_features(self, node, order)
node2vec_feats = n2v.update_features(self, node, order)
features = np.concatenate((refex_feats, node2vec_feats), axis = 1)
self.NumF = features.shape[1]
self.F = features
return features
|
#!/usr/bin/env python3
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.layers import Input
## Basic blocks ##
def skip_connection(x, xskip):
"""
A long skip connection that concatenates output from an encoding layer to a
layer in the decoder along the channel dimension.
Parameters
----------
x :
Input tensor from the decoder phase.
xskip :
Tensor from the encoder phase.
"""
con = keras.layers.Concatenate(axis=3)([x, xskip])
return con
def convolution_block(x, filters, kernel_size, padding, strides, act, dropval, name=None, first_block=False, upsample=False):
"""
Fundamental convolution block.
This block has three possible configurations.
1) conv -> activation -> batchnorm
2) dropout -> conv -> activation -> batchnorm
3) upsample -> dropout -> conv -> activation -> batchnorm
This convolution block is used in both the encoder and decoder of the network.
Upsampling in the decoder is done with a dedicated layer. Downsampling is done
with a convolutional layer.
Parameters
----------
x :
Input tensor.
filters : int
Number of activation maps in the convolutiional layer.
kernel_size : int
Dimension of filter in convolutional layer. Only a single integer is accepted.
padding : str
Convolutional layer padding.
strides : int
Stride number for convolutional layer.
act :
Activation function
dropval :
Drop rate for droupout layer.
name : str
Name that will be applied to only the batch normalization layer.
first_block : boolean
If True, dropout layer will be omitted from the block.
upsample : boolean
If True, a 2D upsampling layer will be placed before the dropout layer.
This enables configuration (3).
Returns
-------
con :
Output tensor.
"""
if first_block:
con = keras.layers.Conv2D(filters, kernel_size=kernel_size, padding=padding, strides=strides)(x)
elif upsample:
con = keras.layers.UpSampling2D(strides)(x)
con = keras.layers.Dropout(dropval)(con)
con = keras.layers.Conv2D(filters, kernel_size=kernel_size, padding=padding, strides=1)(con)
else:
con = keras.layers.Dropout(dropval)(x)
con = keras.layers.Conv2D(filters, kernel_size=kernel_size, padding=padding, strides=strides)(con)
con = keras.layers.Activation(act)(con)
con = keras.layers.BatchNormalization(name=name)(con)
return con
def residual_block(x, filters, kernel_size, padding, strides, act, dropval, name=[None, None], first_block=False, upsample=[False, False, False], filters_changed=False, mid_skip=None):
"""
Parameters
----------
x :
Input tensor.
filters : list
A list with two elements. The items in the list, filters[0] and filters[1],
are the number of filters in the first and second convolution block,
respectively.
kernel_size : int
Dimension of filter in convolutional layer. Only a single integer is accepted.
The kernel_size provided is applied to both convolutional blocks.
padding : str
Convolutional layer padding. Given string is applied to both convolution
blocks.
strides : list
A list with three elements. Two items in the list, strides[0] and strides[1],
are the stride values for the first and second convolutional block, respectively.
The third item, stride[2], is the stride that will be applied to the convolutional
layer or upsampling layer in the residual connection.
act : list
A list with two elements. The items in the list, act[0] and act[1], are the
activation functions for the first and second convolutional block, respectively.
dropval :
Drop rate for droupout layer. Given value is applied to both convolution
blocks.
name : list
A list with two elements. The items in the list, name[0] and name[1], are the
names that will be applied to only the batch normalization layer of the first
and second convolution block, respectively.
first_block : boolean
If True, dropout layer will be omitted from the first convolution block.
upsample : list
A list with three elements. Two items in the list, upsample[0] and upsample[1],
indicate if an 2D upsampling layer should be added before the dropout layer in
the first and second convolution block, respectively. The third item, upsample[2],
indicates if a 2D upsampling layer should be used. All elements in the list
must be boolean.
filters_changed : boolean
Set to True if the input to the residual block and the output have a different
number of channels (filters). If True, a convolutional layer, if not already present
due to automatic triggers, will be placed in the residual connection to fix the channel
dimension.
mid_skip :
A keras tensor. This keras tensor will be concatenated with the output of the
first convolution block.
Returns
-------
con0 :
Output tensor of the first convolution block.
con1 :
Output tensor of the second convolution block. This is before the residual
connection is applied.
resoutput :
Output tensor from running the entire residual block. Residual connection is applied.
"""
con0 = convolution_block(x, filters[0], kernel_size, padding, strides[0], act[0], dropval, name=name[0], first_block=first_block, upsample=upsample[0])
if mid_skip is not None:
con0 = skip_connection(con0, mid_skip)
con1 = convolution_block(con0, filters[1], kernel_size, padding, strides[1], act[1], dropval, name=name[1], upsample=upsample[1])
if strides[2] != 1 and upsample[2] == True:
x = keras.layers.UpSampling2D(strides[2])(x)
if filters_changed == True:
x = keras.layers.Conv2D(filters[1], kernel_size=1, padding=padding, strides=1)(x)
elif (strides[2] != 1 and upsample[2] == False):
x = keras.layers.Conv2D(filters[1], kernel_size=1, padding=padding, strides=strides[2])(x)
elif filters_changed == True:
x = keras.layers.Conv2D(filters[1], kernel_size=1, padding=padding, strides=1)(x)
rescon = keras.layers.BatchNormalization()(x)
resoutput = keras.layers.Add()([rescon, con1])
return con0, con1, resoutput
def ResUNet_CMB(params):
"""
ResUNet-CMB Network
Network used in "Reconstructing Patchy Reionization with Deep Learning."
Parameters
----------
params:
params is a container for the variables defined in the configuration file.
An instance of class resunet.utils.Params.
Returns
-------
model :
model object.
"""
input_img1 = Input(shape=(params.imagesize, params.imagesize, 1), dtype=np.float32, name="qlen")
input_img2 = Input(shape=(params.imagesize, params.imagesize, 1), dtype=np.float32, name="ulen")
# encoder
enc_0 = keras.layers.Concatenate(axis=3)([input_img1, input_img2])
enc_1 = residual_block(enc_0, [64,64], 5, "same", [1,1,1], ["selu","selu"], params.dropval, first_block=True, filters_changed=True)[2]
enc_2 = residual_block(enc_1, [64,128], 5, "same", [1,2,2], ["selu","selu"], params.dropval, filters_changed=True)
enc_3 = residual_block(enc_2[2], [128,128], 5, "same", [1,1,1], ["selu","selu"], params.dropval)[2]
# bridge between encoder and decoder
enc_dec = residual_block(enc_3, [256,128], 5, "same", [2,2,1], ["selu","selu"], params.dropval, upsample=[False, True, False])[2]
lskip1 = skip_connection(enc_dec, enc_3)
dec_1 = residual_block(lskip1, [128,128], 5, "same", [1,1,1], ["selu","selu"], params.dropval, filters_changed=True)[2]
dec_2 = residual_block(dec_1, [64, 64], 5, "same", [2,1,2], ["selu","selu"], params.dropval, upsample=[True, False, True], mid_skip=enc_2[0], filters_changed=True)[2]
# Block all branches use for final residual connection
dec_3 = convolution_block(dec_2, 64, 5, "same", 1, "selu", params.dropval)
# kappa branch
kappa_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
kappa_res_1 = keras.layers.BatchNormalization()(dec_2)
kappa_1 = keras.layers.Add()([kappa_res_1, kappa_1])
kappa_2 = convolution_block(kappa_1, 64, 5, "same", 1, "selu", params.dropval)
kappa_3 = convolution_block(kappa_2, 1, 5, "same", 1, "linear", params.dropval, name="kappa")
# primordial E branch
unle_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
unle_res_1 = keras.layers.BatchNormalization()(dec_2)
unle_1 = keras.layers.Add()([unle_res_1, unle_1])
unle_2 = convolution_block(unle_1, 64, 5, "same", 1, "selu", params.dropval)
unle_3 = convolution_block(unle_2, 1, 5, "same", 1, "linear", params.dropval, name="unle")
# tau branch
tau_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
tau_res_1 = keras.layers.BatchNormalization()(dec_2)
tau_1 = keras.layers.Add()([tau_res_1, tau_1])
tau_2 = convolution_block(tau_1, 64, 5, "same", 1, "selu", params.dropval)
tau_3 = convolution_block(tau_2, 1, 5, "same", 1, "linear", params.dropval, name="tau")
model = keras.models.Model(inputs=[input_img1, input_img2], outputs=[tau_3, unle_3, kappa_3])
return model
def ResUNet_CMB_4out(params):
"""
ResUNet-CMB 4-output Network
Network used in "Reconstructing Cosmic Polarization Rotation with ResUNet-CMB"
Parameters
----------
params:
params is a container for the variables defined in the configuration file.
An instance of class resunet.utils.Params.
Returns
-------
model :
model object.
"""
input_img1 = Input(shape=(params.imagesize, params.imagesize, 1), dtype=np.float32, name="qlen")
input_img2 = Input(shape=(params.imagesize, params.imagesize, 1), dtype=np.float32, name="ulen")
# encoder
enc_0 = keras.layers.Concatenate(axis=3)([input_img1, input_img2])
enc_1 = residual_block(enc_0, [64,64], 5, "same", [1,1,1], ["selu","selu"], params.dropval, first_block=True, filters_changed=True)[2]
enc_2 = residual_block(enc_1, [64,128], 5, "same", [1,2,2], ["selu","selu"], params.dropval, filters_changed=True)
enc_3 = residual_block(enc_2[2], [128,128], 5, "same", [1,1,1], ["selu","selu"], params.dropval)[2]
# bridge between encoder and decoder
enc_dec = residual_block(enc_3, [256,128], 5, "same", [2,2,1], ["selu","selu"], params.dropval, upsample=[False, True, False])[2]
lskip1 = skip_connection(enc_dec, enc_3)
dec_1 = residual_block(lskip1, [128,128], 5, "same", [1,1,1], ["selu","selu"], params.dropval, filters_changed=True)[2]
dec_2 = residual_block(dec_1, [64, 64], 5, "same", [2,1,2], ["selu","selu"], params.dropval, upsample=[True, False, True], mid_skip=enc_2[0], filters_changed=True)[2]
# Block all branches use for final residual connection
dec_3 = convolution_block(dec_2, 64, 5, "same", 1, "selu", params.dropval)
# kappa branch
kappa_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
kappa_res_1 = keras.layers.BatchNormalization()(dec_2)
kappa_1 = keras.layers.Add()([kappa_res_1, kappa_1])
kappa_2 = convolution_block(kappa_1, 64, 5, "same", 1, "selu", params.dropval)
kappa_3 = convolution_block(kappa_2, 1, 5, "same", 1, "linear", params.dropval, name="kappa")
# primordial E branch
unle_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
unle_res_1 = keras.layers.BatchNormalization()(dec_2)
unle_1 = keras.layers.Add()([unle_res_1, unle_1])
unle_2 = convolution_block(unle_1, 64, 5, "same", 1, "selu", params.dropval)
unle_3 = convolution_block(unle_2, 1, 5, "same", 1, "linear", params.dropval, name="unle")
# tau branch
tau_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
tau_res_1 = keras.layers.BatchNormalization()(dec_2)
tau_1 = keras.layers.Add()([tau_res_1, tau_1])
tau_2 = convolution_block(tau_1, 64, 5, "same", 1, "selu", params.dropval)
tau_3 = convolution_block(tau_2, 1, 5, "same", 1, "linear", params.dropval, name="tau")
# alpha branch
cbf_1 = convolution_block(dec_3, 64, 5, "same", 1, "selu", params.dropval)
cbf_res_1 = keras.layers.BatchNormalization()(dec_2)
cbf_1 = keras.layers.Add()([cbf_res_1, cbf_1])
cbf_2 = convolution_block(cbf_1, 64, 5, "same", 1, "selu", params.dropval)
cbf_3 = convolution_block(cbf_2, 1, 5, "same", 1, "linear", params.dropval, name="cbf")
model = keras.models.Model(inputs=[input_img1, input_img2], outputs=[tau_3, unle_3, kappa_3, cbf_3])
return model
|
import praw, re
from urlextract import URLExtract
import json, csv
_reddit = praw.Reddit(client_id='U-9whxE5yXShxA', client_secret='OQVVmWYy2rediR-6jm0zophXizM', user_agent='python:fact:1.00 (by /u/kalebr80)')
def writeToFiles(stances, body):
with open('./fakeNews/stances_test_reddit.csv', 'w') as out:
writer = csv.writer(out)
stances_header = ['Headline', 'Body ID', 'submissionId', 'commentId']
writer.writerow(stances_header)
for stance in stances:
writer.writerow([stances[stance]['title'],
stances[stance]['Body ID'],
stances[stance]['id'],
stances[stance]['comment_id']])
with open('./fakeNews/body_test_reddit.csv', 'w') as bout:
writer = csv.writer(bout)
body_header = ['Body ID', 'articleBody']
writer.writerow(body_header)
for bod in body:
writer.writerow([body[bod]['Body ID'], body[bod]['articleBody']])
def save_submission_db():
stances = {}
body = {}
count = 0
for num, sub in enumerate(_reddit.subreddit('politicalfactchecking').hot(limit=550)):
sub.comments.replace_more(limit=0)
y = 0
body_temp = ''
for comment in sub.comments:
temp_dict = {}
temp_dict['id'] = sub.id
temp_dict['title'] = sub.title.strip().replace('\n', '')
temp_dict['selftext'] = sub.selftext.strip().replace('\n', '')
#print (vars(comment))
print ('working on {}-{}'.format(num, y))
y += 1
temp_dict['Body ID'] = count
temp_dict['comment_id'] = comment.id
stances[count] = temp_dict
body[count] = {'Body ID': count}
body[count]['articleBody']= comment.body.strip().replace('\n', '')
count += 1
return stances, body
stances, body = save_submission_db()
writeToFiles(stances, body)
|
import discord
import cogs.utils.checks
from discord.ext import commands
from cogs.utils.embed import (passembed, errorembed)
class ErrorHandler(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.Cog.listener()
async def on_command_error(self, ctx, error, bypass=False):
# Do nothing if the command/cog has its own error handler
if (
hasattr(ctx.command, "on_error")
or (ctx.command and hasattr(ctx.cog, f"{ctx.command.cog_name}_error"))
and not bypass
):
pass
if isinstance(error, commands.MissingRequiredArgument):
eembed = errorembed(description=f'Command Usage: ``{ctx.prefix}{ctx.command.name} <{error.param.name}>``')
await ctx.send(embed=eembed)
if type(error) == cogs.utils.checks.EconomyChannel:
lootLake = discord.utils.get(ctx.message.guild.channels, name='loot-lake').mention
pleasantPark = discord.utils.get(ctx.message.guild.channels, name='pleasant-park').mention
tiltedTowers = discord.utils.get(ctx.message.guild.channels, name='tilted-towers').mention
eembed = errorembed(description=f'{ctx.author.mention} Command can only be used in {lootLake}, {pleasantPark}, {tiltedTowers}')
return await ctx.send(embed=eembed)
elif type(error) == cogs.utils.checks.NoDonator:
titanDonor = discord.utils.get(ctx.message.guild.roles, name='Titan Donator').mention
mysticDonor = discord.utils.get(ctx.message.guild.roles, name='Mystic Donator').mention
immortalDonor = discord.utils.get(ctx.message.guild.roles, name='Immortal Donator').mention
eembed = errorembed(description=f'{ctx.author.mention} Want to be claim this **Supporter** kit? You have to be either a {titanDonor}, {mysticDonor} or {immortalDonor}')
return await ctx.send(embed=eembed)
elif type(error) == cogs.utils.checks.NotRegistered:
eembed = errorembed(description=f'{ctx.author.mention} You are currently not registered yet. Kindly type ``.register`` to be registered.')
return await ctx.send(embed=eembed)
# Adding the cog to main script
def setup(bot):
bot.add_cog(ErrorHandler(bot))
|
import os
_version_file = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'version.txt')
_version = None
class IncorrectVersion(Exception):
pass
def get_version():
global _version
if not _version:
_version = _read_version()
return _version
def _read_version():
with open(_version_file) as version_file:
v = version_file.readline().strip()
if v:
return v
raise IncorrectVersion("Incorrect version of application")
|
import math
from random import random
from config import *
def get_best_individual(population):
path_length = float("Inf")
individual = None
for i in range(len(population.individuals)):
if(population.individuals[i].path_length < path_length):
path_length = population.individuals[i].path_length
individual = population.individuals[i]
return path_length, individual
def depot_cluster(depots, customers, bound=0.1):
bound = DISTANCE_BOUND
nearest_customers = [[] for _ in depots]
borderline = [[] for _ in depots]
for i in range(len(customers)):
nearest_depot, nearest_depot_dist = get_nearest_depot(depots, customers[i]) # e.g. (1, 156.2)
nearest_customers[nearest_depot].append(i)
borderline_depots = get_borderline_depots(depots, customers[i], nearest_depot, nearest_depot_dist, bound) # e.g. [2, 4, 5]
for j in borderline_depots:
borderline[j].append(i)
return nearest_customers, borderline
def get_nearest_depot(depots, customer):
nearest = None
nearest_dist = float("Inf")
for i in range(len(depots)):
dist = euclideanDistance(depots[i], (customer[1], customer[2]))
if dist < nearest_dist:
nearest = i
nearest_dist = dist
return nearest, nearest_dist
def get_borderline_depots(depots, customer, nearest_depot, nearest_depot_dist, bound):
borderline_depots = []
for i in range(len(depots)):
if i != nearest_depot:
borderline_dist = euclideanDistance(depots[i], (customer[1], customer[2]))
if (borderline_dist - nearest_depot_dist) / nearest_depot_dist <= bound:
borderline_depots.append(i)
return borderline_depots
def euclideanDistance(coordinate1, coordinate2):
return pow(pow(coordinate1[0] - coordinate2[0], 2) + pow(coordinate1[1] - coordinate2[1], 2), .5)
def flatten(array):
return [item for sublist in array for item in sublist]
def get_path_length(vehicles, depots_params, customers_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
path_length = 0
lengths = []
for i, depot in enumerate(vehicles):
curr_depot_coords = (
depots_params[i][0], depots_params[i][1])
depot_path_length, vehicle_path_lengths = get_depot_path_length(depot, i, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
path_length += depot_path_length
lengths.append(vehicle_path_lengths)
return path_length, lengths
def get_depot_path_length(vehicles, depot_index, customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
#vehicles = construct_route(depot, depot_index, customers, depots, num_vehicles)
curr_depot_coords = (depots[depot_index][0], depots[depot_index][1])
path_length = 0
vehicle_path_lengths = []
for vehicle in vehicles:
vehicle_path_length = 0
if(len(vehicle) > 1):
for k, v in enumerate(vehicle[:-1]):
v_next = vehicle[k+1]
#path_length += euclideanDistance((customers[v][1], customers[v][2]), (customers[v_next][1], customers[v_next][2]))
path_length += customer_2_customer[v][v_next]
#vehicle_path_length += euclideanDistance((customers[v][1], customers[v][2]), (customers[v_next][1], customers[v_next][2]))
vehicle_path_length += customer_2_customer[v][v_next]
#From depot to first customer
#vehicle_path_length += euclideanDistance(curr_depot_coords, (customers[vehicle[0]][1], customers[vehicle[0]][2]))
vehicle_path_length += depots_2_customers[depot_index][vehicle[0]]
#path_length += euclideanDistance(curr_depot_coords, (customers[vehicle[0]][1], customers[vehicle[0]][2]))
path_length += depots_2_customers[depot_index][vehicle[0]]
#From last customer to depot
#vehicle_path_length += euclideanDistance((customers[vehicle[len(vehicle)-1]][1], customers[vehicle[len(vehicle)-1]][2]), curr_depot_coords)
vehicle_path_length += customer_2_depots[vehicle[len(vehicle)-1]][depot_index]
#path_length += euclideanDistance((customers[vehicle[len(vehicle)-1]][1], customers[vehicle[len(vehicle)-1]][2]), curr_depot_coords)
path_length += customer_2_depots[vehicle[len(vehicle)-1]][depot_index]
vehicle_path_lengths.append(vehicle_path_length)
elif(len(vehicle) == 1):
path_length += depots_2_customers[depot_index][vehicle[0]] * 2
vehicle_path_lengths.append(depots_2_customers[depot_index][vehicle[0]] * 2)
return path_length, vehicle_path_lengths
def get_route_load(vehicle, depots_params, customers_params):
total_load = 0
for n in range(len(vehicle)):
total_load += customers_params[vehicle[n]][4]
return total_load
def construct_vehicles(gene, customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
vehicles = []
for i, depot in enumerate(gene):
curr_route = []
route_load_cost = 0
depot_max_load = depots[i][3]
depot_vehicles = []
vehicles.append(construct_route(depot, i, customers,depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers))
return vehicles
def randint(start, stop):
return int(start+random()*(stop-start))
def construct_route(depot, depot_index, customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
vehicles = []
depot_max_load = depots[depot_index][3] if depots[depot_index][3] != 0 else float("Inf")
route_load_cost = 0
route_load_costs = []
route_max_duration = depots[depot_index][2] if depots[depot_index][2] != 0 else float("Inf")
curr_route = []
for customer_index in depot:
can_carry_load = route_load_cost + customers[customer_index][4] <= depot_max_load
if(can_carry_load):
curr_route.append(customer_index)
route_load_cost += customers[customer_index][4]
if(get_depot_path_length([curr_route], depot_index, customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)[0] > route_max_duration):
last_customer = curr_route.pop()
vehicles.append(curr_route)
curr_route = [last_customer]
route_load_costs.append(route_load_cost)
route_load_cost = customers[last_customer][4]
else:
vehicles.append(curr_route)
curr_route = [customer_index]
route_load_costs.append(route_load_cost)
route_load_cost = customers[customer_index][4]
if(len(curr_route) != 0):
vehicles.append(curr_route)
route_load_costs.append(route_load_cost)
length, lengths = get_path_length([vehicles], customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
lenghts = lengths[0]
for i in range(len(vehicles)-1):
if len(vehicles[i]) == 0:
continue
customer_to_move = vehicles[i].pop()
vehicles[i+1].insert(0, customer_to_move)
if route_load_costs[i+1] + customers[customer_to_move][4] > depot_max_load:
#print("Cannot move:(")
customer_to_move_back = vehicles[i+1].pop(0)
vehicles[i].append(customer_to_move_back)
else:
new_length, new_lengths = get_path_length([vehicles], customers, depots, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
if new_length > length:
#print("Was not shorter buu")
customer_to_move_back = vehicles[i+1].pop(0)
vehicles[i].append(customer_to_move_back)
else:
#print("Found better path woo")
length, lengths = new_length, new_lengths[0]
return vehicles
'''
In Phase 2, the last customer of each route ri,
is relocated to become the first cus- tomer to route ri+1 .
If this removal and insertion maintains feasibility for route ri+1,
and the sum of costs of ri and ri+1 at Phase 2 is less than the sum of costs of ri +ri+1
at Phase 1, the routing configuration at Phase 2 is accepted,
otherwise the route network before Phase 2 (that is, at Phase 1) is maintained.
'''
def construct_child_gene(parent_gene1, parent_gene2, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
copy_p1 = [x[:] for x in parent_gene1]
copy_p2 = [x[:] for x in parent_gene2]
rand_depot = randint(0, len(depots_params))
#print("rand depot", rand_depot)
routes_p1 = construct_route(parent_gene1[rand_depot], rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
routes_p2 = construct_route(parent_gene2[rand_depot], rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
# print("Routes1", routes_p1)
# print("Routes2", routes_p2)
# print("\n")
if(T):
return copy_p1, copy_p2
if(routes_p1 == []):
return copy_p1, copy_p2
if(routes_p2 == []):
return copy_p1, copy_p2
rand_route_p1 = routes_p1[randint(0, len(routes_p1))]
rand_route_p2 = routes_p2[randint(0, len(routes_p2))]
for i in rand_route_p1:
for j in range(len(copy_p2)-1, -1, -1):
if(i in copy_p2[j]):
copy_p2[j].remove(i)
for i in rand_route_p2:
for j in range(len(copy_p1)-1, -1, -1):
if(i in copy_p1[j]):
copy_p1[j].remove(i)
child1 = crossover(copy_p2, rand_route_p1, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
child2 = crossover(copy_p1, rand_route_p2, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
return child1, child2
def crossover(parent, rand_route, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers):
if(random() < 0.8):
for i in rand_route:
for j, _ in enumerate(parent[rand_depot]):
temp_depot_copy = parent[rand_depot][:]
temp_depot_copy.insert(j, i)
#if construct_route(temp_depot_copy, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers) != None: #Route with customer i in location j was is_valid
parent[rand_depot] = temp_depot_copy[:]
break
else:
for i in rand_route:
valids = []
for j, _ in enumerate(parent[rand_depot]):
temp_depot_copy = parent[rand_depot][:]
temp_depot_copy.insert(j, i)
r = construct_route(temp_depot_copy, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)
if r != None: #Route with customer i in location j was is_valid
valids.append([temp_depot_copy, r])
best_depot_length = float("Inf")
best_depot_index = 0
for i, v in enumerate(valids):
vehicles = v[1]
curr_length = get_depot_path_length(vehicles, rand_depot, customers_params, depots_params, num_vehicles, customer_2_customer, customer_2_depots, depots_2_customers)[0]
if(curr_length < best_depot_length):
best_depot_length = curr_length
best_depot_index = i
if(len(valids) == 0):
return parent
parent[rand_depot] = valids[best_depot_index][0]
return parent
#depot = depot index in gene
#vehicle = vehicle index in depot
def enhance_vehicle_path(gene, depot, vehicle, depots_params, customers_params):
pass
|
class ThreeStack1:
def __init__(self, l1, l2, l3):
self.stack = [l1, l2, l3]
class ThreeStack2:
def __init__(self, l1, l2, l3)
self.stack = [0] * (10 ** 8)
self.l1_st = 0
self.l2_st = len(self.stack) // 3
self.l3_st = len(self.stack) // 3 * 2
for l, st in zip([l1, l2, l3], [self.l1_st, self.l2_st, self.l3_st]):
for i in range(l):
self.stack[st+i] = l[i]
# TODO movable version
|
import itchat
itchat.auto_login()
import math
import math
import os
import PIL.Image as Image
def get_friends_lists():
friends = itchat.get_friends(update=True)[0:]
print(friends)
user = friends[0]["UserName"]
print(user)
# @c5a69b45e0b9ad6f910f282847a69e08ffb83f77f91b33f5ba39868a3eb66ae3
os.mkdir(user)
return (friends,user)
def get_friend_image(i,friend,user):
print("get_friend_image:进入函数")
print("get_friend_image:获取img")
img=itchat.get_head_img(userName=friend['UserName'])
print("get_friend_image:img保存到文件")
with open(user + "/" + str(i) + ".jpg",'wb') as f:
f.write(img)
f.close()
print("get_friend_image:退出函数")
def count_for_distribution(friends):
print("count_for_distribution:进入函数")
numpic=len(friends)
#eachsize=int(math.sqrt(float(640*640)/numpic))
#numline=int(640/eachsize)
numline=math.ceil(math.sqrt(float(numpic)))
eachsize=int(640/numline)
print("共有好友{0}个,每小图片的边长为{1},一行有{2}个图".format(numpic,eachsize,numline))
print("count_for_distribution:退出函数")
return(numpic,eachsize,numline)
def merge_to_total(friends,user):
print("merge_to_total:开始进入函数")
print("merge_to_total:下面计算布局")
numpic, eachsize, numline = count_for_distribution(friends)
print("merge_to_total:下面新建一张总图")
total_image = Image.new('RGBA', (640, 640)).convert('RGB')
#打开图片的时候,要转换成RGB格式,不然最后保存报错:
#OSError: cannot write mode RGBA as JPEG
print("merge_to_total:下面开始遍历子图")
for (i,friend) in enumerate(friends):
#for friend in friends:
print("遍历中:第{0}个朋友开始了".format(i))
print("下面开始获取第{0}个图片".format(i))
print("fiend type===",type(friend))
print("fiend===",friend)
get_friend_image(i,friend,user)
#get_friend_image(friend)
print("下面开始合并第{0}个图片到总图片".format(i))
each_to_total(i,friend, numline,eachsize,total_image,user)
print("第{0}个图片处理完成,开始下一个".format(i))
print("merge_to_total:遍历完成,下面开始保存总图")
total_image.save(user + ".jpg")
print("merge_to_total:退出函数")
def each_to_total(i,friend,numline,eachsize,total_image,user):
print("each_to_total1:进入函数")
try:
# 打开图片
img = Image.open(user + "/" + str(i)+ ".jpg").convert('RGB')
#下载的图片本身是jpg格式,但是拼接前需要转换成rgb格式,否则报错
except IOError:
print("Error: 没有找到文件或读取文件失败")
else:
# 缩小图片
img = img.resize((eachsize, eachsize), Image.ANTIALIAS)
# 拼接图片
# 第i个图片的位置是……
#这里需要根据i算x和y
x=int(i/numline)
y=i%numline
print("each_to_total2:位置信息{0} {1}".format(x,y))
#假如numline=5,i=10,则x=2,y=0
total_image.paste(img, (x * eachsize, y * eachsize))
#total_image.save(user + ".jpg")
print("each_to_total3:退出函数")
def send_to_user(user,total_image):
itchat.send_image(user+".jpg",'filehelper')
def main():
friends,user=get_friends_lists()
print("main:已经获取到朋友们的信息了")
print('main:共有好友{0}个'.format(len(friends)))
print("main:下面开始生成总图片")
print()
total_image=merge_to_total(friends,user)
print("main:下面把总图片发送给你")
print()
send_to_user(user,total_image)
if __name__=='__main__':
main() |
from csi_soap_test import CsiSoapTest
import csv
if __name__ == "__main__":
csoap_test = CsiSoapTest("d:\\csi_use_csoap.log")
gsopa_test = CsiSoapTest("d:\\csi_use_gsoap.log")
n = [i+1 for i in range(20)]
row = [''] + n
out = open('d:\\test_result.csv', 'a', newline="")
csv_write = csv.writer(out, dialect='excel')
csv_write.writerow(row)
csoap_query_patient = ['csoap query patient(s)'] + csoap_test.queryPatient()
csv_write.writerow(csoap_query_patient)
gsoap_query_patient = ['gsoap_query_patient(s)'] + gsopa_test.queryPatient()
csv_write.writerow(gsoap_query_patient)
all_patient = csoap_test.allPatient()
list_objects = csoap_test.listObjects()
for patient in all_patient:
row = [patient] + list_objects.get(patient)
csv_write.writerow(row)
list_objects1 = gsopa_test.listObjects()
for patient in all_patient:
row = [patient] + list_objects1.get(patient)
csv_write.writerow(row)
all_id = csoap_test.getPresentation()
presentationstate1 = csoap_test.allPresentationId()
presentationstate2 = gsopa_test.allPresentationId()
for id in all_id:
row = [id] + all_id.get(id)
csv_write.writerow(row)
for id in all_id:
row = [id] + all_id.get(id)
csv_write.writerow(row)
|
def isBalanced(self, root):
def helper(node):
if node == None:
return (0, True)
l_height, l_balance = helper(node.left)
r_height, r_balance = helper(node.right)
return (max(l_height, r_height) + 1, l_balance and r_balance and abs(l_height - r_height) <= 1)
return helper(root)[-1] |
#! /usr/local/bin/python3
import math
class BinarySearch:
def search(self, element, sorted_list):
if sorted_list is None or len(sorted_list) <= 0:
return -1
left = 0
right = len(sorted_list) - 1
while left < right:
mid = left + math.floor((right - left) / 2)
if element == sorted_list[mid]:
return mid
if element < sorted_list[mid]:
right = mid - 1
else:
left = mid + 1
return -1
def test(self):
import time
start = time.process_time()
result = self.search(50, [0, 10, 21, 22, 33, 40, 44, 45 ,50, 51])
end = time.process_time()
print(result, (end - start) * 1000)
if __name__ == '__main__':
BinarySearch().test()
|
# https://www.hackerrank.com/challenges/write-a-function/problem
def is_leap(year):
leap = False
# Write your logic here
if 1900 <= year <= 100000:
if year % 4 == 0 and year % 100 != 0:
leap = True
if year % 100 == 0 and year % 400 == 0:
leap = True
return leap
# year = int(input())
year = 2000
print(is_leap(year)) # expected True
year = 2400
print(is_leap(year)) # expected True
year = 1992
print(is_leap(year)) # expected True
print()
year = 1800
print(is_leap(year)) # expected False
year = 1900
print(is_leap(year)) # expected False
year = 2100
print(is_leap(year)) # expected False
year = 2200
print(is_leap(year)) # expected False
year = 2300
print(is_leap(year)) # expected False
year = 2500
print(is_leap(year)) # expected False
|
import constants as c
import random
class ROBOT:
def __init__(self,sim,wts):
self.send_objects(sim)
self.send_joints(sim)
self.send_sensors(sim)
self.send_neurons(sim)
self.send_synapses(sim, wts)
def send_objects(self, sim):
# self.whiteObject = sim.send_cylinder( x=0 ,y=0 , z=0.6 , length=1.0 , radius=0.1)
# self.redObject = sim.send_cylinder( x=0 , y=0.5 , z=1.1, r=1, g=0, b=0, r1=0, r2=1, r3=0 )
self.O0 = sim.send_box(x=0, y=0, z=c.L + c.R, length=c.L, width=c.L, height=2 * c.R, r=0.5, g=0.5, b=0.5)
self.O1 = sim.send_cylinder(x=0, y=c.L, z=c.L + c.R, length=c.L, radius =c.R, r=1, g=0, b=0, r1=0, r2=1, r3=0)
self.O2 = sim.send_cylinder(x=c.L, y=0, z=c.L + c.R, length=c.L, radius=c.R, r=0, g=1, b=0, r1=1, r2=0, r3=0)
self.O3 = sim.send_cylinder(x=0, y= -c.L, z=c.L + c.R, length=c.L,radius=c.R, r=0, g=0, b=1, r1=0, r2=1, r3=0)
self.O4 = sim.send_cylinder(x=-c.L, y=0, z=c.L + c.R, length=c.L, radius=c.R, r=1, g=0, b=1, r1=1, r2=0, r3=0)
self.O5 = sim.send_cylinder(x=0, y=1.5*c.L, z=c.L - 1.5*c.R, length=c.L, radius=c.R, r=1, g=0, b=0)
self.O6 = sim.send_cylinder(x=1.5*c.L, y=0, z=c.L - 1.5*c.R, length=c.L, radius=c.R, r=0, g=1, b=0)
self.O7 = sim.send_cylinder(x=0, y=-1.5*c.L, z=c.L - 1.5*c.R, length=c.L, radius=c.R, r=0, g=0, b=1)
self.O8 = sim.send_cylinder(x=-1.5*c.L, y=0, z=c.L - 1.5*c.R, length=c.L, radius=c.R, r=1, g=0, b=1)
self.O = {}
self.O[0] = self.O0
self.O[1] = self.O1
self.O[2] = self.O2
self.O[3] = self.O3
self.O[4] = self.O4
self.O[5] = self.O5
self.O[6] = self.O6
self.O[7] = self.O7
self.O[8] = self.O8
def send_joints(self, sim):
self.J0 = sim.send_hinge_joint( first_body_id = self.O0 , second_body_id = self.O1, x=0, y=c.L/2, z=c.L + c.R, n1=-1, n2=0, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J1 = sim.send_hinge_joint( first_body_id = self.O1, second_body_id = self.O5, x=0, y=1.5*c.L, z=c.L + c.R, n1=-1, n2=0, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J2 = sim.send_hinge_joint( first_body_id = self.O0, second_body_id = self.O2, x=c.L/2, y=0, z=c.L + c.R, n1=0, n2=1, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J3 = sim.send_hinge_joint( first_body_id = self.O2, second_body_id = self.O6, x=1.5*c.L, y=0, z=c.L + c.R, n1=0, n2=1, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J4 = sim.send_hinge_joint( first_body_id = self.O0, second_body_id = self.O3, x=0, y=-c.L/2, z=c.L + c.R, n1=1, n2=0, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J5 = sim.send_hinge_joint( first_body_id = self.O3, second_body_id = self.O7, x=0, y=-1.5*c.L, z=c.L + c.R, n1=1, n2=0, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J6 = sim.send_hinge_joint( first_body_id = self.O0, second_body_id = self.O4, x=-c.L/2, y=0, z=c.L + c.R, n1=0, n2=-1, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J7 = sim.send_hinge_joint( first_body_id = self.O4, second_body_id = self.O8, x=-1.5*c.L, y=0, z=c.L + c.R, n1=0, n2=-1, n3=0, lo=-3.14159/2 , hi=3.14159/2)
self.J = {}
self.J[0] = self.J0
self.J[1] = self.J1
self.J[2] = self.J2
self.J[3] = self.J3
self.J[4] = self.J4
self.J[5] = self.J5
self.J[6] = self.J6
self.J[7] = self.J7
def send_sensors(self, sim):
self.T0 = sim.send_touch_sensor( body_id = self.O5 )
self.T1 = sim.send_touch_sensor( body_id = self.O6 )
self.T2 = sim.send_touch_sensor( body_id = self.O7 )
self.T3 = sim.send_touch_sensor( body_id = self.O8 )
self.P4 = sim.send_position_sensor( body_id = self.O0 )
self.S = {}
self.S[0] = self.T0
self.S[1] = self.T1
self.S[2] = self.T2
self.S[3] = self.T3
# self.P2 = sim.send_proprioceptive_sensor( joint_id = self.joint )
# self.R3 = sim.send_ray_sensor( body_id = self.redObject , x = 0 , y = 1.1 , z = 1.1 , r1 = 0 , r2 = 1, r3 = 0)
#R3 = sim.send_ray_sensor( body_id = redObject , x = 0 , y = 0.5 , z = 1.0 , r1 = 0, r2 = 0, r3 = -1)
def send_neurons(self, sim):
self.SN = {}
self.MN = {}
for s in self.S:
self.SN[s] = sim.send_sensor_neuron(sensor_id = self.S[s])
for j in self.J:
self.MN[j] = sim.send_motor_neuron(joint_id = self.J[j], tau = 0.3)
# self.SN0 = sim.send_sensor_neuron( sensor_id = self.T0 )
# self.SN1 = sim.send_sensor_neuron( sensor_id = self.T1 )
# self.SN2 = sim.send_sensor_neuron( sensor_id = self.P2 )
# self.SN3 = sim.send_sensor_neuron( sensor_id = self.R3 )
#
# self.sensorNeurons = {}
# self.sensorNeurons[0] = self.SN0
# self.sensorNeurons[1] = self.SN1
# self.sensorNeurons[2] = self.SN2
# self.sensorNeurons[3] = self.SN3
#
# self.MN2 = sim.send_motor_neuron( joint_id = self.joint )
# self.motorNeurons = {}
# self.motorNeurons[0] = self.MN2
def send_synapses(self, sim, wts):
# for sn in self.SN:
# firstMN = min(self.MN, key=self.MN.get)
# sim.send_synapse(source_neuron_id = self.SN[sn] , target_neuron_id = self.MN[firstMN] , weight = random.random()*2 - 1 )
# for s in self.sensorNeurons:
# for m in self.motorNeurons:
# sim.send_synapse( source_neuron_id = self.sensorNeurons[s] , target_neuron_id = self.motorNeurons[m] , weight = wts[s] )
for j in self.SN:
for i in self.MN:
sim.send_synapse(source_neuron_id = self.SN[j], target_neuron_id = self.MN[i], weight = wts[j,i])
#sim.send_synapse( source_neuron_id = SN0 , target_neuron_id = MN2 , weight = -1.0 )
|
#TODO: IDEAS TO INCREASE RESULT QUALITY AND USABILITY
#TODO: Analyze markup for business related information? For example, yelp uses the class
#TODO: biz-country-ca for canadian companies and biz-country-us for american companies
#TODO: When comparing against other accurate data, if we know the country/state/etc... are the same
#TODO: Then that information can be used for calculating a score for the selector because if the
#TODO: Selectors are very different, then odds are they aren't what we want since we know at least those
#TODO: values should be the same. E.G: It would filter out things like reviews that mention the city since
#TODO: lots of information in the tag is likely only good if it's something like "555 road, 90210, saskatoon, sk, ca"
#TODO: instead of "Hey, I went to saskatoon and tried this great food"
#TODO: Make sure accurate data includes edge cases and multiple cities/states/countries in case there are
#TODO: classes/attributes/tags that are location specific.
#TODO: Could pass the soup/comparison soups into get_single_occurrence_selectors so that it can be more easily
#TODO: used to compare segments of a page instead of just different pages. This would make it usable for finding
#TODO: things like multiple reviews on a page, or listings when all the listings are on one page, or analyzing
#TODO: search results
#TODO: Build python code that gets executed instead of using select? Would this be faster/have more options?
#TODO: Would it be a good fallback if select is actually faster, but more limited (it is more limited)?
#TODO: make skip_substring_check go down to a datatype level on the url. So if we want to check state, then
#TODO: skip_substring_check will return False if the state is a substring of another datatype value in that data,
#TODO: but if we are checking city then skip_substring_check may return differently. E.G.: If city is a substring of
#TODO: company name, then it would return True
#TODO: in get_selector_for_each_data_instance we may want to use a couple for initial comparison,
#TODO: or build the unique selectors for every successful case in any of the pages
#TODO: so that if this page happens to suck we don't just fail
import re
from bs4 import BeautifulSoup
from itertools import combinations, chain
from operator import add
from urllib2 import urlopen
DEBUG = False
INFO = False
class SourceScraper(object):
CONTENT_TYPES = ['name', 'phone_number', 'city', 'state', 'zip', 'address']
TERMINAL_TAG = '[document]'
BAD_TAGS = ['script']
BAD_CLASSES = [] # Might want to avoid using classes for things like page formatting in jquery
BAD_ATTRIBUTES_NAMES = []
GOOD_ATTRIBUTE_PRIORITY = ['itemprop']
GOOD_TAG_PRIORITY = ['address', 'title', 'a', 'span', 'li']
GOOD_CLASS_PRIORITY = []
MIN_SELECTOR_SCORE = 0 # Out of 100
SUB_SCORE_WEIGHTS = {
'good_content': 1,
'bad_content': 1,
'selector_quality': 1,
'selector_success': 1,
}
CLASS_SELECTION_TEMPLATES = [
u'.{classes}',
u'{tag}.{classes}',
u'{parent}{separator}.{classes}',
u'{parent}{separator}{tag}.{classes}',
]
ATTR_SELECTION_TEMPLATE = u'{parent}{separator}{tag}[{attr}{sign}{attr_value}]'
ID_SELECTION_TEMPLATE = u'#{0}'
PRIMARY_POSITION_TEMPLATE = u'{parent}{separator}{tag}'
ALTERNATE_POSITION_TEMPLATE = u'{parent}{separator}{tag}:nth-of-type({position})'
def __init__(self, accurate_source_data, source_templates):
"""
:arg accurate_source_data: A dictionary of listing information known to be correct in the format
{
'<url to listing>: {
'<data type>': '<content expected to be found>',
...
'<data type with multiple segments>: {
'<segment name>': '<content expected to be found>',
...
},
...
},
...
}
:arg source_templates: a dict of the templates that data types split into segments should follow in the format
{
'<data type with multiple segments>': '<string template for said segments>',
...
}
"""
self.accurate_source_data = accurate_source_data
self.source_templates = source_templates
self._fetch_url_data()
self._format_accurate_source_data()
@staticmethod
def _debug(indent, *args):
"""
Print the text you want if DEBUG is True
:param indent: how many tabs the line should be indented
:param args: what to print in the format "(<title>, <value>), (...), ..."
"""
if DEBUG:
for name, value in args:
print u'{indent}{name}:\t\t{value}'.format(indent='\t'*indent, name=name, value=value)
print
def _fetch_url_data(self):
"""
Prefetch all pages for optimization since the data is reviewed multiple times
Creates test soups with new lines and capitalization removed
"""
self.soups = {
url: BeautifulSoup(self.clean_text(urlopen(url).read()))
for url in self.accurate_source_data.iterkeys()
}
def _format_accurate_source_data(self):
"""
Applies formatting templates to convert dictionaries of values to strings and removes
any string formatting that was also removed from the soups to make comparisons easier
"""
for url, correct_content in self.accurate_source_data.iteritems():
for content_type, expected_value in correct_content.iteritems():
if isinstance(expected_value, dict):
expected_value = self.source_templates[content_type].format(**expected_value)
self.accurate_source_data[url][content_type] = self.clean_text(expected_value)
def _get_scored_id_based_selectors(self, tag_id, key_data_type, parent_selector, tag_name):
"""
:param key_data_type:
:param tag_id:
:return:
"""
if tag_id:
new_selector = self.ID_SELECTION_TEMPLATE.format(tag_id)
return [(new_selector, self.check_selector_success(new_selector, key_data_type))]
else:
return []
def _get_scored_class_based_selectors(self, classes, key_data_type, parent_selector, tag_name):
"""
:param key_data_type:
:param classes:
:param parent_selector:
:param tag_name:
:return:
"""
return [
(new_selector, self.check_selector_success(new_selector, key_data_type))
for new_selector
in chain(*[
[
template.format(
parent=parent_selector,
separator=' > ' if parent_selector else '',
tag=tag_name,
classes=class_combo
)
for class_combo
in (
'.'.join(x)
for x
in chain(*[
combinations(classes, length)
for length in xrange(1, 1 + len(classes))
])
)
]
for template
in self.CLASS_SELECTION_TEMPLATES
])
]
def _get_scored_attribute_based_selectors(self, attributes, key_data_type, parent_selector, tag_name):
"""
:param key_data_type:
:param attributes:
:param parent_selector:
:param tag_name:
:return:
"""
return [
(new_selector, self.check_selector_success(new_selector, key_data_type))
for new_selector
in (
self.ATTR_SELECTION_TEMPLATE.format(
parent=parent_selector,
separator=' > ' if parent_selector else '',
tag=tag_name,
attr=attr,
attr_value=attr_value,
sign='=' if attr_value else ''
)
for attr, attr_value
in attributes.iteritems()
if attr not in ['id', 'class']
and isinstance(attr_value, str)
#TODO: Remove this when BeautifulSoup supports dashes in attribute names
#TODO: https://bugs.launchpad.net/beautifulsoup/+bug/1304007 as supposed to be a fix for the issue
and '-' not in attr
#TODO: Change this to allow spaces when BeautifulSoup supports
#TODO: spaces in attribute values, will also require quotes
and ' ' not in attr_value
and attr_value.isalnum()
)
]
def _get_scored_position_based_selectors(self, sibling_position, key_data_type, parent_selector, tag_name):
"""
:param key_data_type:
:param sibling_position:
:param parent_selector:
:param tag_name:
:return:
"""
if sibling_position == 1:
template = self.PRIMARY_POSITION_TEMPLATE
else:
template = self.ALTERNATE_POSITION_TEMPLATE
new_selector = template.format(
separator=' > ' if parent_selector else '',
parent=parent_selector,
tag=tag_name,
position=sibling_position
)
return [(new_selector, self.check_selector_success(new_selector, key_data_type))]
def build_selector(self, selector_element_list, key_data_type, selector=''):
"""
Creates a CSS selector that
:param selector_element_list:
:param key_data_type:
:return: A CSS selector that can be used to find the type of data that key_data_type is specifying
"""
if INFO:
print '.',
# Work with the topmost element and return the current selector if there are no elements left
if selector_element_list:
element = selector_element_list.pop(0)
else:
if INFO:
print '^'
return selector
# Build the selector segment options
args = [key_data_type, selector, element['name']]
selector_score_map = self._get_scored_id_based_selectors(element['attributes'].get('id'), *args)
selector_score_map += self._get_scored_class_based_selectors(element['attributes'].get('class', []), *args)
selector_score_map += self._get_scored_attribute_based_selectors(element['attributes'], *args)
selector_score_map += self._get_scored_position_based_selectors(element['sibling_position'], *args)
# Order selector list based on quality from best at 0 to worst at -1
selector_score_map.sort(key=lambda x: x[1], reverse=True)
# Step down a level to the child element and add a selector segment to get to that
for best_selector, score in selector_score_map:
more_precise_selector = self.build_selector(selector_element_list, key_data_type, selector=best_selector)
if more_precise_selector:
if INFO:
print '>',
return more_precise_selector
if INFO:
print 's',
# Ran out of selector usable segments for this level so this branch failed to find the data
if INFO:
print 'x'
return ''
@classmethod
def build_selector_element_list(cls, current_element):
"""
Builds a list of the tags a selector can use to find the data in current_element
NOTE: Any class listed in BAD_CLASSES is not included as a class in the response
:param current_element: the BeautifulSoup navigable string for the tag in which the selector should find data
:return: A list of tags a CSS selector can use to find the data in current_element in the format
[
<outermost element>,
{'name': '<tag name>', 'attributes': {<tag attributes>}, 'sibling_position': <nth-of-type number>}
, ...,
<current_element>
]
"""
path = []
while current_element.parent.name != cls.TERMINAL_TAG:
if current_element.name in cls.BAD_TAGS:
return None
sibling_position = 1
previous_sibling = current_element.previous_sibling
while previous_sibling is not None:
if previous_sibling.name == current_element.name:
sibling_position += 1
previous_sibling = previous_sibling.previous_sibling
current_element.attrs['class'] = [
klass for klass in current_element.attrs.get('class', []) if klass not in cls.BAD_CLASSES
]
path.insert(0, {
'name': current_element.name,
'attributes': current_element.attrs,
'sibling_position': sibling_position
})
current_element = current_element.parent
return path
def check_selector_success(self, selector, key_data_type):
"""
Using the selector on each soup and finds out how many soups for which the selector found the correct data
:param selector: the CSS selector to use on each page
:param key_data_type: the type of data to look for
:return: the rate of success this selector had (0 to 1)
"""
#TODO: Possibly make this score or more than just how many results are returned.
#TODO: Could be the same or similar logic to the final prioritization possibly
#TODO: Take into account ATTRIBUTE_PRIORITY
score = reduce(
add,
[
100
for url, soup in self.soups.iteritems()
if self.non_substring_data_instance_exists(url, selector, key_data_type)
and len(soup.select(selector))
and self.accurate_source_data[url][key_data_type] in soup.select(selector)[0].text
] or [0]
) / len(self.soups)
if DEBUG:
for soup in self.soups.itervalues():
find = soup.select(selector)
self._debug(0, ('CHECK SUCCESS FOR', selector), ('RESULT', find[0].text if find else 'FAILED'))
self._debug(0, ('SUCCESS SCORE', score))
return score
def non_substring_data_instance_exists(self, url, selector, key_data_type):
"""
Finds out if the data type value found is just a substring of another data type value
:return: True if this is a real find, False if it is only substring of other data type values
# TODO: optimize this function with skip_substring_check
if self.skip_substring_check[url]:
return True
accurate_data = self.accurate_source_data.viewvalues()[0]
for url, soup in self.soups.iteritems():
selected_text = soup.select(selector).text
for data_type, data_value in accurate_data:
index = 0
keep_trying = True
while keep_trying:
selected_text.find(data_value, start=index)
# Find out which types of content show up using the selector
existing_content_types = accurate_data.keys()
selector_contents = lambda x: soup.select(x) and soup.select(x)[0].contents
and soup.select(x)[0].contents[0] or ''
occurrence_positions = {
existing_content_type: [
(x.start(), x.start()+len(accurate_data[url][existing_content_type]))
for x
in re.finditer(
self.clean_text_for_regex(accurate_data[url][existing_content_type]),
selected_text
)
]
for existing_content_type in existing_content_types + [data_type]
}
"""
#TODO: Make this function work
return True
@property
def skip_substring_check(self):
"""
One type of data could be a substring of another type of data which can screw up finding the best
selector. E.G.: in {'state': 'sk', 'city':'saskatoon'} sk is a substring of saskatoon. If this
doesn't happen in the data then this time consuming check doesn't have to be done, so this function
finds out if the check needs to be done
:return: True if content sub strings are an issue, False if they are not
"""
# TODO: use of this function can be better optimized by narrowing down to data type instead of url
# TODO: If we know which types are substrings of which types then those are all we need to worry about
if not hasattr(self, '_skip_substring_check'):
self._skip_substring_check = {}
for url, page_content in self.accurate_source_data.iteritems():
self._skip_substring_check[url] = True
for data_type, value in page_content.iteritems():
for substring_data_type, substring_value in page_content.iteritems():
if substring_data_type != data_type:
if substring_value in value:
self._skip_substring_check[url] = False
break
if not self._skip_substring_check[url]:
break
return self._skip_substring_check
@staticmethod
def clean_text(string):
"""
Prepares a string for data comparison and searching by removing characters we never want
:param string: The string to clean
:return: The cleaned up string
"""
return string.replace('\n', '').replace('\r', '').strip(' ').lower()
@staticmethod
def clean_text_for_regex(string):
"""
Prepares a string to be searched for in a regular expression
:param string: The search string
:return: The search string with regex special characters escaped
"""
return ''.join(('\{0}'.format(char) if char in '.^$?+*{}[]()|' else char for char in string))
def get_selector_for_each_data_instance(self, data_type):
# Pull any old page to use for finding selectors. The other data is to check how successful the selector is
comparison_url = next(self.accurate_source_data.iterkeys())
comparison_data = self.accurate_source_data.pop(comparison_url)
comparison_soup = self.soups.pop(comparison_url)
selectors = set()
clean_search_string = self.clean_text_for_regex(comparison_data[data_type])
#TODO: Remove this hack for apostrophes in text. Can't figure out how to handle apostrophes right now
if "'" in clean_search_string:
segments = clean_search_string.split("'")
clean_search_string = ''
for segment in segments:
if len(segment) >= len(clean_search_string):
clean_search_string = segment
regex = re.compile(clean_search_string)
navigable_strings = comparison_soup.findAll(text=regex)
self._debug(0, ('CLEAN SEARCH STRING', clean_search_string), ('SEARCH RESULTS', navigable_strings))
for navigable_string in navigable_strings:
final_tag = navigable_string.parent
element_list = self.build_selector_element_list(final_tag)
self._debug(1, ('NAVIGABLE STR:', navigable_string), ('FINAL TAG', final_tag), ('ELEMENTS', element_list))
if element_list:
if INFO:
print '\nG',
selector = self.build_selector(element_list, data_type)
if INFO:
print
selectors.add(selector)
self._debug(2, ('SELECTOR', selector))
if '' in selectors:
selectors.remove('')
if INFO:
print 'Selectors for: ', data_type
for selector in selectors:
print selector
print
self.accurate_source_data[comparison_url] = comparison_data
self.soups[comparison_url] = comparison_soup
return selectors
def get_single_occurrence_selectors(self):
"""
Generate the CSS selectors for a source with them ordered from best first to worst last
:return: {'<content_type':[<best css selector path>, ..., <worst css selector path>]}
"""
ranked_selectors = {}
for content_type in self.CONTENT_TYPES:
selectors = []
for selector in self.get_selector_for_each_data_instance(content_type):
score = self.get_selector_score(selector)
if score > self.MIN_SELECTOR_SCORE:
print score
selectors.append((selector, score))
selectors.sort(key=lambda x: x[1], reverse=True)
ranked_selectors[content_type] = [ranked_selector for ranked_selector, _ in selectors]
return ranked_selectors
@classmethod
def get_selector_score(cls, selector_path):
sub_scores = {}
# Build the score for the number of content types found with the selector
sub_scores['good_content'] = 100
# Build the score for the amount of extraneous data found with the selector
sub_scores['bad_content'] = 100
# Build the score for the quality of the selector itself
sub_scores['selector_quality'] = 100
# Build the score for the number of urls on which the selector found good data
# TODO: Get this score from when it was computed earlier
# TODO: Maybe just compute the whole score earlier in check_selector_success and keep the score?
sub_scores['selector_success'] = 100
# Build final score using weighted versions of all other scores
score = reduce(add, (sub_scores[score_type] * weight for score_type, weight in cls.SUB_SCORE_WEIGHTS.items()))
return score |
x = 10
a = lambda y: x+y
x = 20
b = lambda y: x+y
print(a(10)) # 30
print(b(10)) # 30
x=10
a= lambda y,x=x: x+y
x=20
b= lambda y,x=x: x+y
print(a(10)) # 20
print(b(10)) # 30 |
import numpy as np
import matplotlib.pyplot as plt
from scipy import signal, misc
from mpl_toolkits.mplot3d import Axes3D
from scipy import ndimage
#시그마값이 커지면, 가우시안의 높이는 낮지만 폭이 넓어지게 된다.
#즉, 시그마의 값이 커지게 되면, 블러링 되는 정도도 커지게 된다.
def Im_filtering(im, Filter, FilterSize, dummyNum):
#이미지의 형태 불러오기
row, col = im.shape
#필터사이즈의 절반을 취해 페딩 사이즈를 설정 (이하 주석엔 페딩 2를 기준으로 설명)
Padding = int(FilterSize/2)
#페딩 사이즈를 취해준 새로운 배열 생성(255+(2x2)=259, 259)
Image_Buffer = np.zeros(shape=(row+2*Padding, col+2*Padding), dtype=np.uint8)
#페딩을 해준 이미지를 만듬 -> [259, 259]에 [2,2]부터 이미지를 채워넣음.
#이렇게 해주면 결국 원본 이미지는 중간에 있게 됨.
Image_Buffer[Padding:row+Padding, Padding:col+Padding] = im[:, :]
#필터가 적용된 255,255 사이즈 이미지를 담아줄 그릇
Image_New = np.zeros(shape=(row, col), dtype=np.uint8)
#각 2~257까지 돌아가는 중첩 for문 2개 (255번)
for y in range(Padding, row+Padding):
for x in range(Padding, col+Padding):
#버퍼를 만들어준다
#[(0~259), (0~259)] (뒷 인자는 260을 적어놔야 259까지 작동)
buff = Image_Buffer[y-Padding:y+Padding+1, x-Padding:x+Padding+1]
#버퍼와 필터를 곱한 값의 배열을 전부 더해서 픽셀에 담아준다.
pixel = np.sum(buff * Filter) + dummyNum
#오버플로우를 방지한다
pixel = np.uint8(np.where(pixel>255, 255, np.where(pixel<0, 0, pixel)))
#마지막으로 이미지에 0~255, 0~255에 담아준다.
Image_New[y-Padding, x-Padding] = pixel
return Image_New
#sample source
lena = misc.imread('./image_sample/lena_256.bmp')
row, col = lena.shape
sigma = 30.0
#for문을 사용한 2차원 가우시안
G = np.zeros(shape=(256, 256), dtype=np.float)
for x in range(-127, 128):
for y in range(-127, 128):
#가우시안 각 항을 나눠서 써줌
s1 = -(1/np.pi * pow(sigma, 4))
s2 = 1-((pow(x, 2)+pow(y, 2))/(2 * pow(sigma, 2)))
v = -(pow(x,2)+pow(y,2))/(2*pow(sigma, 2))
G[y+127, x+127] = s1*s2*np.exp(v)
#만들어진 데이터는 256x256 이미지 파일로 표현됨
#(a)
plt.imshow(G)
plt.title('LoG Filter, sigma:'+str(sigma))
plt.gray()
plt.show()
#3차원
#(b)
x = np.arange(-127,128, 1.0)
y = np.arange(-127,128, 1.0)
x1, y1 = np.meshgrid(x, y)
s1 = -(1 / np.pi * pow(sigma, 4))
s2 = 1 - ((pow(x1, 2) + pow(y1, 2)) / (2 * pow(sigma, 2)))
v = -(pow(x1, 2)+pow(y1, 2)) / (2 * pow(sigma, 2))
g_3d = s1*s2*np.exp(v)
fig = plt.figure()
ax = Axes3D(fig)
ax.plot_surface(x1, y1, g_3d)
plt.show()
#9x9 LoG 마스크
sigma = 0.8
G = np.zeros(shape=(9, 9), dtype=np.float)
for x in range(-4, 5):
for y in range(-4, 5):
#가우시안 각 항을 나눠서 써줌
s1 = -(1/np.pi * pow(sigma, 4))
s2 = 1-((pow(x, 2)+pow(y, 2))/(2 * pow(sigma, 2)))
v = -(pow(x,2)+pow(y,2))/(2*pow(sigma, 2))
G[y+4, x+4] = s1*s2*np.exp(v)
#(c)
plt.imshow(G)
plt.title('LoG Filter, sigma:'+str(sigma))
plt.gray()
plt.show()
#(d)
Image_LoD = Im_filtering(lena, G, 9, 0)
plt.subplot(1,2,1)
plt.gray()
plt.axis('off')
plt.imshow(lena)
plt.title('Original Images')
plt.subplot(1,2,2)
plt.gray()
plt.axis('off')
plt.imshow(Image_LoD)
plt.title('LoD Images (FilterSize9, sigma:0.8)')
plt.show()
#(e)
#어느 한 픽셀과 인접한 다른 픽셀의 값의 변화가 클 경우
#이 필터는 선을 끄어준다.
#변화가 크면 클수록 밝은 값을 넣어 입체감도 느낄수 있다
#결과적으로 일종의 경계선 처리 필터이다
|
import sys
from pathlib import Path
import requests
document1_path = Path(sys.argv[1]).resolve()
document2_path = Path(sys.argv[2]).resolve()
with open(document1_path) as f:
content1 = f.read().strip()
with open(document2_path) as f:
content2 = f.read().strip()
data = {
"document1": content1,
"document2": content2
}
files = {
"file": open(document1_path, "rb")
}
headers = {
'Content-Type': "multipart/form-data",
}
response = requests.post("https://scholastic-bcmc.herokuapp.com/similarity", data=data)
print(str(response.content)) |
# tutorial41.py
# import Game
# import Game.main
# print(Game.a)
# print(Game.main.b)
# import Game.Level.start
# from Game.Level import
from Game.Level.start import select_difficulty
select_difficulty(2)
# Game/__init__.py
a = 10
# Game/main.py
b = 20
# start.py
def select_difficulty(d):
print(d)
|
import time
username = ''
text = "Snape is stupid"
textSplit = text.split()
randomSpace = [' ', ' ', ' ']
eachText = []
while True:
for x in textSplit:
time.sleep(0.6)
print(x)
|
# 2014.10.18 14:39:52 Central European Daylight Time
#Embedded file name: scripts/client/AuxiliaryFx/Roccat/__init__.py
pass
+++ okay decompyling res/scripts/client/auxiliaryfx/roccat/__init__.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2014.10.18 14:39:52 Central European Daylight Time
|
import argparse
from producer import RedditProducer
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Reddit Kafka Producer')
parser.add_argument('topic', type=str, help='Reddit Topics: [submission, comment, subreddit, redditor]')
args = parser.parse_args()
producer = RedditProducer.get_producer_subclass(args.topic)
producer.produce()
|
#!/usr/bin/env python
# coding=utf-8
### binary search by python
searchlist=[12,29,32,34,38,42,49,60,66,72,88]
low=0
high=len(searchlist)-1
print low,high ### searchlist.__len__() can also get length of list
k=int(raw_input("please input the num u r searching:"))
while(low<=high):
mid=(low+high)/2
if k<searchlist[mid]:
high=mid-1 ### 1,2,3,4,5 find 2 in 12345->find 2 in 12-> find 2
elif k>searchlist[mid]:
low=mid+1
else:
if k==searchlist[mid]:
print 'find at position:',mid
break
if low>high:
print 'not found'
|
"""
Урок 4. Полезные инструменты
1. Реализовать скрипт, в котором должна быть предусмотрена функция расчета заработной платы сотрудника.
В расчете необходимо использовать формулу: (выработка в часах * ставка в час) + премия.
Для выполнения расчета для конкретных значений необходимо запускать скрипт с параметрами.
"""
import sys
import argparse
def createParser():
parser = argparse.ArgumentParser()
parser.add_argument('-production', type=int, nargs='?', required=True, help='выработка в часах')
parser.add_argument('-tariff', type=int, nargs='?', required=True, help='ставка в час')
parser.add_argument('-bonus', type=int, nargs='?', help='премия', default="0")
return parser
parser = createParser()
a = parser.parse_args(sys.argv[1:])
print(a.production)
print(f"ЗП сотрудника = {(a.production * a.tariff) + (a.production * a.tariff) * a.bonus / 100}")
# python.exe lesson_4.py -production=10 -tariff=20 -bonus=5
"""
2. Представлен список чисел. Необходимо вывести элементы исходного списка, значения которых больше предыдущего элемента.
Подсказка: элементы, удовлетворяющие условию, оформить в виде списка.
Для формирования списка использовать генератор.
Пример исходного списка: [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55].
Результат: [12, 44, 4, 10, 78, 123].
"""
l: list = [300, 2, 12, 44, 1, 1, 4, 10, 7, 1, 78, 123, 55]
result: list = [l[i] for i in range(1, len(l)) if l[i] > l[i - 1]]
print(result)
"""
3. Для чисел в пределах от 20 до 240 найти числа, кратные 20 или 21. Необходимо решить задание в одну строку.
Подсказка: использовать функцию range() и генератор.
"""
print([i for i in range(20, 240 + 1) if not i % 20 or not i % 21])
"""
4. Представлен список чисел. Определить элементы списка, не имеющие повторений.
Сформировать итоговый массив чисел, соответствующих требованию.
Элементы вывести в порядке их следования в исходном списке.
Для выполнения задания обязательно использовать генератор.
Пример исходного списка: [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11].
Результат: [23, 1, 3, 10, 4, 11]
"""
l: list = [2, 2, 2, 7, 23, 1, 44, 44, 3, 2, 10, 7, 4, 11]
result = []
result += [i for i in l if i not in result and result.append(i)]
print(result)
"""
5. Реализовать формирование списка, используя функцию range() и возможности генератора. В список должны войти четные числа от 100 до 1000 (включая границы). Необходимо получить результат вычисления произведения всех элементов списка.
Подсказка: использовать функцию reduce().
"""
from functools import reduce
print(reduce(lambda x, y: x * y, range(100, 1001)))
"""
6. Реализовать два небольших скрипта:
а) итератор, генерирующий целые числа, начиная с указанного,
б) итератор, повторяющий элементы некоторого списка, определенного заранее.
Подсказка: использовать функцию count() и cycle() модуля itertools.
Обратите внимание, что создаваемый цикл не должен быть бесконечным.
Необходимо предусмотреть условие его завершения.
Например, в первом задании выводим целые числа, начиная с 3, а при достижении числа 10 завершаем цикл.
Во втором также необходимо предусмотреть условие, при котором повторение элементов списка будет прекращено.
"""
from itertools import count, cycle
def cGenerator(end: int = 10, start: int = None, lst: list = None):
if (start and lst):
return 0
elif (start):
for i in count(start - 1):
if i < end:
yield i + 1
elif (lst):
cnt = 0
for i in cycle(lst):
if cnt >= end:
break
yield i
cnt += 1
else:
return 0
g = cGenerator(lst=[2, 4, 3, 1], end=20)
for i in g:
print(i)
g = cGenerator(start=2, end=20)
for i in g:
print(i)
"""
7. Реализовать генератор с помощью функции с ключевым словом yield, создающим очередное значение. При вызове функции должен создаваться объект-генератор.
Функция должна вызываться следующим образом: for el in fact(n).
Функция отвечает за получение факториала числа, а в цикле необходимо выводить только первые n чисел, начиная с 1! и до n!.
Подсказка: факториал числа n — произведение чисел от 1 до n. Например, факториал четырёх 4! = 1 * 2 * 3 * 4 = 24.
"""
from itertools import count
def fact(end: int = 4):
start = 1
fct = 1
while start <= end:
yield fct
start = start + 1
fct = fct * start
func = fact(4)
for el in func:
print(el)
|
from rrtnode import RRTNode
import line
class Graph:
"""
An RRT graph.
Args:
start_angles: The initial angles of the arm.
end_angles: The desired angles of the arm.
Instance Attributes:
start_node: Node containing cartesian coordinates and arm angles of the start position.
end_node: Node containing cartesian coordinates and arm angles of the end position.
nodes: List of all nodes in the graph.
edges: List of all pairs (n1, n2) for which there is an edge from node n1 to node n2.
success: True if there is a valid path from start_node to end_node.
node_to_index: Maps nodes to indexes that are used to find the distance from start_node of each node.
neighbors: Maps each node to its neighbors.
distances: Maps each node to its shortest known distance from the start node.
ranking: List of all intermediate nodes, ordered by distance between the end effector to the target position.
sx, sy, sz: The distance between the start and end nodes.
"""
def __init__(self, start_angles, end_angles, startpos, endpos):
self.start_node = RRTNode(start_angles)
self.end_node = RRTNode(end_angles)
self.nodes = [self.start_node]
self.edges = []
self.success = False
self.node_to_index = {self.start_node: 0}
self.neighbors = {0: []}
self.distances = {0: 0.}
self.ranking = []
self.sx = endpos[0] - startpos[0]
self.sy = endpos[1] - startpos[1]
self.sz = endpos[2] - startpos[2]
def add_vex(self, node):
try:
idx = self.node_to_index[node]
except:
idx = len(self.nodes)
self.nodes.append(node)
self.node_to_index[node] = idx
self.neighbors[idx] = []
self.ranking.append(node)
self.ranking.sort(key=lambda n: self.dist_to_end(n))
return idx
def add_edge(self, idx1, idx2, cost):
self.edges.append((idx1, idx2))
self.neighbors[idx1].append((idx2, cost))
self.neighbors[idx2].append((idx1, cost))
def dist_to_end(self, node):
return line.distance(node.end_effector_pos, self.end_node.end_effector_pos)
def get_parent(self, idx):
near_neighbors = self.neighbors[idx]
parent_idx = near_neighbors[0][0]
node_list = list(self.node_to_index.keys())
return node_list[parent_idx] |
"""
Client that performs inferences on the tensorflow serving model using the REST API.
"""
# for pre/post-proccesing
import SimpleITK as sitk
from preprocessing.metadata import Patient
from preprocessing.preprocess import Preprocessor
from augmentation.augment_data import process
import numpy as np
from scipy.ndimage import zoom
# for getting requests
from predict_client.prod_client import ProdClient
# constants needed for pre-proccesding
record_shape = [37,99,99]
feature_shape = [31,87,87]
def get_prediction(coords, path_to_img, host):
# account for papaya recieved image
coords = change_coordinate_system(coords, path_to_img)
# pre-process image so that it matches input of model
processed_image = pre_process_image(coords, path_to_img)
# specify where the client should look to make requests
client = ProdClient(host+':9200', 'crohns', 1)
# query tensorflow seriving model for predictions and attention layer
prob_values, max_prob_indx, attentions = query_client(processed_image, client)
# proccess the feature map to get the average and resize it
feature_maps_arr = process_feature_maps(attentions, processed_image[0].shape)
# make the attention layer into a nifit file
make_feature_image(coords, path_to_img, feature_maps_arr)
# produce an output string to display on front-end
classes = {0: 'healthy', 1: 'abnormal (Crohn\'s)'}
predictions = classes[max_prob_indx]
output_str = f'{predictions} with probability {round(prob_values[0][max_prob_indx], 3)}'
return output_str
def change_coordinate_system(coords, path_image):
# load original image and convert to numpy arr
loaded_image = sitk.ReadImage(path_image)
arr_fig_shape = sitk.GetArrayFromImage(loaded_image).shape
# account for papaya's weird system of changing coordinates
new_x = arr_fig_shape[1] - coords[0]
new_y = arr_fig_shape[2] - coords[1]
new_z = arr_fig_shape[0] - coords[2]
return [new_y, new_x, new_z]
def process_feature_maps(attention_layer, processed_image_shape):
# Get the mean of the feature maps
attention_layer = attention_layer.mean(4)
# Upsample the attention layer to 87, 87 size
ratio = tuple(map(lambda x, y: x/y, processed_image_shape, attention_layer.shape))
upsampled_attention_layer = zoom(attention_layer, ratio)
return upsampled_attention_layer
def make_feature_image(coords, path, feature_maps_arr):
# load original image and convert to numpy arr
loaded_image = sitk.ReadImage(path)
arr_fig = sitk.GetArrayFromImage(loaded_image).astype("float32")
# add the maps
new_arr = add_feature_arra_zero_arr(arr_fig, feature_maps_arr, coords, feature_shape)
# make it into a nifit file with the same meta-data as original image
make_arr_into_nifit_image(loaded_image, new_arr)
def add_feature_arra_zero_arr(arr_image, arr_feature_map, pixel_center, physical_crop_size):
# compute box size
box_size = np.array([physical_crop_size[1], physical_crop_size[2], physical_crop_size[0]]) # np.array([pcsz / vsz for vsz,pcsz in zip(image.GetSpacing(), physical_crop_size)])
lb = np.array(pixel_center - box_size/2).astype(int) # lower corner of cropped box
ub = (lb + box_size).astype(int) # upper corner of cropped box
# fully convert lower bound to Python (=!numpy) format, s.t. it can be used by SITK
lb = list(lb)
lb = [int(lower_b) for lower_b in lb]
# noramlise feature array and fill original image zeros
arr_feature_map = (arr_feature_map - arr_feature_map.min()) / (arr_feature_map.max() - arr_feature_map.min())
arr_image = np.zeros(arr_image.shape)
# get data of cropped box region
arr_image[lb[2]:ub[2], lb[0]:ub[0], lb[1]:ub[1]] = arr_feature_map # place the feature map at the given location
return arr_image.astype(np.float32)
def make_arr_into_nifit_image(base_image, new_image_arr):
# make the new image array a sitk Image
feature_map_image = sitk.GetImageFromArray(new_image_arr)
feature_map_image.CopyInformation(base_image)
# write to file
sitk.WriteImage(feature_map_image, './feature_map_image.nii')
def pre_process_image(coords, path_to_img):
# pre-proccess image as per Robbie Holland's code
patient = Patient('A', 36)
patient.set_paths(path_to_img)
patient.set_ileum_coordinates(coords)
patient.load_image_data()
preprocessor = Preprocessor(constant_volume_size=[record_shape[1], record_shape[2], record_shape[0]])
[patient] = preprocessor.process([patient], ileum_crop=True, region_grow_crop=False, statistical_region_crop=False)
image = process(sitk.GetArrayFromImage(patient.axial_image), out_dims=feature_shape)
# add this extra dimension so that it is ready for the input of the tf serving model
image = image.reshape([-1, 1] + feature_shape)
return image
def query_client(image, client):
# specify the type of data that the client should send
req_data = [{'in_tensor_name':'Input',
'in_tensor_dtype': 'DT_FLOAT',
'data': image}]
# query the model with the given data
out_model = client.predict(req_data)
prob_values = out_model['Output']
max_prob_indx = np.argmax(np.squeeze(prob_values))
# get the attention layers
attention_layer = out_model['attention_layer']
return prob_values, max_prob_indx, attention_layer
|
import os
import pandas as pd
from util.paths import ensure_dir
BASE_FOLDER_PATH = '/mnt/all1/ml20m_yt/videos_resized'
def youtube_video_link_by_id(youtube_id):
return 'https://www.youtube.com/watch?v=' + youtube_id
# Import dataframe containing the ML20M YT dataset
#
df = pd.read_csv('datasets/ml20m_youtube/youtube_extracted.csv', index_col=0)
# Import list to be filtered
#
with open('datasets/ml20m_youtube/ml20myt_available__cleaned.txt') as f:
filter_list = f.readlines()
filter_list = [line.strip() for line in filter_list]
# Filter out movies that are not needed
#
filtered_df = df[df.index.isin(filter_list)]
for index, row in filtered_df.iterrows():
# Build download location and ensure that the folder exists
# or create otherwise
#
current_path = '/'.join([BASE_FOLDER_PATH, str(row['custom.movielens_id'])]) + '/'
if os.path.isdir(current_path):
num_files_in_dir = len([name for name in os.listdir(current_path) if os.path.isfile(name)])
if num_files_in_dir > 0:
continue
ensure_dir(current_path)
# Download trailer from youtube
#
os.system("youtube-dl -v --recode-video=mp4 --exec 'mv {} temp; ffmpeg -i temp -vf scale=224x224,setsar=1:1 -c:v libx264 -crf 18 -c:a copy {}; rm temp' " + youtube_video_link_by_id(row['custom.youtube_id'] + " -o '" + current_path + "%(id)s.%(ext)s'"))
# import code; code.interact(local=dict(globals(), **locals()))
|
import copy # fork a chain
import datetime # get real time for timestamps
import hashlib # hash
class MinimalChain():
def __init__(self): # initialize when creating a chain
self.blocks = [self.get_genesis_block()]
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def get_genesis_block(self):
return MinimalBlock(0,
datetime.datetime.utcnow(),
'Genesis',
'arbitrary')
def add_block(self, data):
self.blocks.append(MinimalBlock(len(self.blocks),
datetime.datetime.utcnow(),
data,
self.blocks[len(self.blocks)-1].hash))
def get_chain_size(self): # exclude genesis block
return len(self.blocks)-1
def verify(self, verbose=True):
flag = True
for i in range(1,len(self.blocks)):
if not self.blocks[i].verify(): # assume Genesis block integrity
flag = False
if verbose:
print(f'Wrong data type(s) at block {i}.')
if self.blocks[i].index != i:
flag = False
if verbose:
print(f'Wrong block index at block {i}.')
if self.blocks[i-1].hash != self.blocks[i].previous_hash:
flag = False
if verbose:
print(f'Wrong previous hash at block {i}.')
if self.blocks[i].hash != self.blocks[i].hashing():
flag = False
if verbose:
print(f'Wrong hash at block {i}.')
if self.blocks[i-1].timestamp >= self.blocks[i].timestamp:
flag = False
if verbose:
print(f'Backdating at block {i}.')
return flag
def fork(self, head='latest'):
if head in ['latest', 'whole', 'all']:
return copy.deepcopy(self) # deepcopy since they are mutable
else:
c = copy.deepcopy(self)
c.blocks = c.blocks[0:head+1]
return c
def get_root(self, chain_2):
min_chain_size = min(self.get_chain_size(), chain_2.get_chain_size())
for i in range(1,min_chain_size+1):
if self.blocks[i] != chain_2.blocks[i]:
return self.fork(i-1)
return self.fork(min_chain_size)
class MinimalBlock():
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self.hashing()
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.__dict__ == other.__dict__
else:
return False
def hashing(self):
key = hashlib.sha256()
key.update(str(self.index).encode('utf-8'))
key.update(str(self.timestamp).encode('utf-8'))
key.update(str(self.data).encode('utf-8'))
key.update(str(self.previous_hash).encode('utf-8'))
return key.hexdigest()
def verify(self): # check data types of all info in a block
instances = [self.index, self.timestamp, self.previous_hash, self.hash]
types = [int, datetime.datetime, str, str]
if sum(map(lambda inst_, type_: isinstance(inst_, type_), instances, types)) == len(instances):
return True
else:
return False
'''
c = MinimalChain() # Start a chain
for i in range(1,20+1):
c.add_block(f'This is block {i} of my first chain.')
print(c.blocks[3].timestamp)
print(c.blocks[7].data)
print(c.blocks[9].hash)
print(c.get_chain_size())
print(c.verify())
c_forked = c.fork('latest')
print(c == c_forked)
c_forked.add_block('New block for forked chain!')
print(c.get_chain_size(), c_forked.get_chain_size())
''' |
#!/usr/bin/python3
from decimal import Decimal
from unittest import TestCase, mock
from wikidata import WikiData
class TestWikiData(TestCase):
@classmethod
@mock.patch.multiple(WikiData, __abstractmethods__=set())
def setUp(cls):
cls.wd = WikiData('0000 0001 2197 5163')
def test_format_float(self):
self.assertEqual('0.12345679', WikiData.format_float('0.123456789', 8))
def test_format_zero(self):
self.assertEqual(0, Decimal(WikiData.format_float('+0E-7', 8)))
def test_obtain_claims_empty_entity(self):
claim = self.wd.obtain_claim(WikiData.create_snak('P31', 'Q5'))
self.assertEqual('P31', claim['mainsnak']['property'])
self.assertEqual('Q5', claim['mainsnak']['datavalue']['value']['id'])
@mock.patch('wikidata.WikiData.api_call', return_value=None)
def test_load_items_none(self, api_call):
self.assertIsNone(WikiData.load_items(['Q1', 'Q2']))
api_call.assert_called_with('wbgetentities', {'props': 'claims|info|labels', 'ids': 'Q1|Q2'})
@mock.patch('wikidata.WikiData.api_call', return_value=None)
def test_load_items_single(self, api_call):
self.assertIsNone(WikiData.load_items(['Q3']))
api_call.assert_called_with('wbgetentities', {'props': 'claims|info|labels', 'ids': 'Q3'})
@mock.patch('logging.log')
def test_trace_without_entity(self, info):
self.wd.trace('test')
info.assert_called_with(20, 'test')
self.wd.entity = None
self.wd.trace('test')
info.assert_called_with(20, 'test')
@mock.patch('wikidata.WikiData.api_call', return_value={'query': {'search': [{'title': 'Q1091618'}]}})
def test_api_search(self, api_call):
value = WikiData.api_search('haswbstatement:"P3083=HD 1"')
self.assertEqual('Q1091618', value)
api_call.assert_called_with('query', {'list': 'search', 'srsearch': 'haswbstatement:"P3083=HD 1"'})
def test_obtain_claim_self_reference(self):
self.wd.qid = 'Q5'
self.wd.obtain_claim({'datavalue': {'value': 'id'}, 'property': 'P213'}) # should not throw an exception
self.assertIsNone(self.wd.obtain_claim(WikiData.create_snak('P397', 'Q5')))
@mock.patch('wikidata.WikiData.load_items', return_value=None)
def test_prepare_data_null_items(self, load_items):
self.wd.qid = 'Q1'
self.wd.prepare_data()
load_items.assert_called_with(['Q1'])
def test_date_parser(self):
self.assertIsNone(WikiData.parse_date(''))
self.assertEqual('+1987-00-00T00:00:00Z', WikiData.parse_date('1987')['time'])
self.assertEqual(9, WikiData.parse_date('1987')['precision'])
self.assertEqual(0, WikiData.parse_date('1987')['timezone'])
self.assertEqual(0, WikiData.parse_date('1987')['before'])
self.assertEqual(0, WikiData.parse_date('1987')['after'])
self.assertEqual('http://www.wikidata.org/entity/Q1985727', WikiData.parse_date('1987')['calendarmodel'])
self.assertEqual('+2009-04-00T00:00:00Z', WikiData.parse_date('2009-04')['time'])
self.assertEqual(10, WikiData.parse_date('2009-04')['precision'])
self.assertEqual('+2009-04-12T00:00:00Z', WikiData.parse_date('2009-04-12')['time'])
self.assertEqual(11, WikiData.parse_date('2009-4-12')['precision'])
self.assertEqual('+2009-04-02T00:00:00Z', WikiData.parse_date('2009-04-2')['time'])
self.assertEqual('+3456-02-01T00:00:00Z', WikiData.parse_date('1/2/3456')['time'])
self.assertEqual('+1903-01-00T00:00:00Z', WikiData.parse_date('01/1903')['time'])
class TestAddRefs(TestCase):
@classmethod
@mock.patch.multiple(WikiData, __abstractmethods__=set())
def setUp(cls):
wd = WikiData('0000 0001 2197 5163')
WikiData.db_property = 'P213'
WikiData.db_ref = 'Q423048'
wd.entity = {'claims': {}}
cls.wd = wd
def test_add_refs_when_no_external_id(self):
claim = {}
self.wd.add_refs(claim, set())
self.assertEqual('Q423048', claim['references'][0]['snaks']['P248'][0]['datavalue']['value']['id'])
self.assertEqual('0000 0001 2197 5163', claim['references'][0]['snaks']['P213'][0]['datavalue']['value'])
def test_add_missing_foreign_id(self):
self.wd.obtain_claim(self.wd.create_snak('P213', '0000 0001 2197 5163')) # add claim with external id
claim = {}
self.wd.add_refs(claim, set()) # add without external id
self.assertNotIn('P213', claim['references'][0]['snaks'])
self.assertEqual('Q423048', claim['references'][0]['snaks']['P248'][0]['datavalue']['value']['id'])
self.wd.entity = {'claims': {}} # remove claim with external id
self.wd.add_refs(claim, set())
self.assertEqual('0000 0001 2197 5163', claim['references'][0]['snaks']['P213'][0]['datavalue']['value'])
def test_add_refs_without_foreign_id_if_other_sources(self):
claim = {}
self.wd.add_refs(claim, {'Q51905050'})
self.assertEqual('Q423048', claim['references'][0]['snaks']['P248'][0]['datavalue']['value']['id'])
self.assertNotIn('P213', claim['references'][0]['snaks'])
def test_add_refs_2_equal_sources(self):
claim = {}
self.wd.add_refs(claim, {'Q51905050'})
self.wd.add_refs(claim, {'Q51905050'})
self.assertEqual(2, len(claim['references']))
self.assertIn(claim['references'][0]['snaks']['P248'][0]['datavalue']['value']['id'], ['Q51905050', 'Q423048'])
self.assertIn(claim['references'][1]['snaks']['P248'][0]['datavalue']['value']['id'], ['Q51905050', 'Q423048'])
def test_add_refs_empty_after_source(self):
claim = {}
self.wd.add_refs(claim, {'Q51905050'})
self.wd.add_refs(claim, set())
def test_remove_P143(self):
claim = {'references': [{'snaks': {'P248': [WikiData.create_snak('P248', 'Q423048')],
'P143': [WikiData.create_snak('P143', 'Q328')]}}]}
self.wd.add_refs(claim, set())
self.assertIn('P248', claim['references'][0]['snaks'])
self.assertNotIn('P143', claim['references'][0]['snaks'])
def test_try_to_add_second_id(self):
claim = {}
self.wd.add_refs(claim, set())
self.wd.external_id = '0000 0001 2146 438X'
self.wd.add_refs(claim, set())
self.assertEqual('0000 0001 2197 5163', claim['references'][0]['snaks']['P213'][0]['datavalue']['value'])
class TestFindClaim(TestCase):
@classmethod
@mock.patch.multiple(WikiData, __abstractmethods__=set())
def setUp(cls):
cls.wd = WikiData('0000 0001 2197 5163')
def testIgnoreInsignificantDatePart(self):
self.assertIsNotNone(WikiData.find_claim({'time': '+1999-12-31T00:00:00Z', 'precision': 9},
[self.wd.obtain_claim(WikiData.create_snak('P575', '1999'))]))
|
from django import forms
from .models import Recommendation, RecommendationCategory
from .widgets import CustomClearableFileInput
class RecommendationForm(forms.ModelForm):
class Meta:
model = Recommendation
fields = ('category', 'name', 'image',
'intro', 'description', 'link_to_website',
'link_to_google_maps', 'distance'
)
image = forms.ImageField(label="Image", required=True,
widget=CustomClearableFileInput)
def __init__(self, *args, **kwargs):
"""
Add placeholders and classes, remove auto-generated
labels and set autofocus on first field
"""
super().__init__(*args, **kwargs)
categories = RecommendationCategory.objects.all()
friendly_names = [(c.id, c.friendly_name) for c in categories]
self.fields['category'].choices = friendly_names
placeholders = {
'category': 'Category',
'name': 'Name',
'image': 'Image',
'intro': 'Intro',
'description': 'Description',
'link_to_website': 'Link to website',
'link_to_google_maps': 'Link to Google Maps',
'distance': 'Distance',
}
self.fields['category'].widget.attrs['autofocus'] = True
for field in self.fields:
if self.fields[field].required:
placeholder = f'{placeholders[field]} *'
else:
placeholder = placeholders[field]
self.fields[field].widget.attrs['placeholder'] = placeholder
self.fields['intro'].widget.attrs['rows'] = 2
self.fields['description'].widget.attrs['rows'] = 5
self.fields[field].widget.attrs['class'] = 'recommendation-form-fields'
self.fields[field].label = False
|
def shell(lista):
contadorsublistas = len(lista)//2
while contadorsublistas > 0:
for init in range(contadorsublistas):
brechainsercion(lista, init, contadorsublistas)
contadorsublistas = contadorsublistas // 2
return lista
def brechainsercion(lista,init, brecha):
for i in range(init + brecha, len(lista), brecha):
actual = lista[i]
pos = i
while pos >= brecha and lista[pos-brecha]> actual:
lista[pos] = lista[pos-brecha]
pos = pos-brecha
lista[pos] = actual
lista = [8, 43, 17, 6, 40, 16, 18, 97, 11, 7]
print("Teniendo una lista desordenada ", lista)
print("Se crea un intervalo = n//2 tal que 10//2 = 5")
print("Se halla el elemento en la posicion del intervalo = 16, y lo compara con el elemento 5 posiciones atras = 8, 16 > 8 por lo tanto queda igual")
print("")
print("luego compara el siguiente al 16 = 18 con el siguiente al 8 = 43, 18 < 43 por lo tanto se intercambian, quedando tal que[8, 18, 17, 6, 40, 16, 43, 97, 11, 7] ")
print("Luego compara el proximo elemento a 43 = 97, con el proximo elemento a 18 = 17, 97 > 17, por lo tanto queda igual")
print("Compara despues el siguiente a 97 = 11, con el siguiente a 17 = 6, 11 > 6, por lo tanto queda igual")
print("por ultimo, Compara el siguiente a 11 = 7, con el siguiente a 6 = 40, 7 < 40, por lo tanto intercambia quedando [8, 18, 17, 6, 7, 16, 43, 97, 11, 40] ")
print("")
print("Una vez terminado este primer recorrido actualiza el intervalo con el anterior intervalo como n = 5, 5//2 = 3 y vuelve a lo mismo" )
print("Elemento en la posicion 3 = 6, elemento 3 posiciones anterior = 8, 6 < 8, por ende se intercambia, quedando [6, 18, 17, 8, 7, 16, 43, 97, 11, 40]")
print("Sigue realizando estas comparaciones tal como se hizo anteriormente dando como resultado a todos los intercambios = [6, 7, 16, 8, 18, 11, 40, 97, 17, 43] ")
print("")
print("Luego tomara de nuevo el anterior intervalo para n = 3, 3//2 = 2 y repite el proceso esta vez comparando de a 2 espacios")
print("el elemento en la posicion 2 = 16, el elemento 2 posiciones atras = 6, 16 > 6, no intercambia, y vuelta a empezar")
print("Seguira haciendo el proceso hasta llegar a [6, 7, 16, 8, 18,11 ,17 ,43 ,40, 97] ")
print("")
print("Una vez terminado este proceso, repetirá lo de crear un nuevo intervalo usando el anterior como n = 2, 2//2 = 1")
print("Ahora se hara cambios de una posicion, estando en la posicion 1 = 7, y el elemento 1 posicion atras = 6, 7 > 6, no hay cambio")
print("Al terminar estas comparaciones estaria terminado el ordenamiento dando como resultado: ")
print(shell(lista))
#Terminado |
from django.contrib.auth.hashers import make_password
from rest_framework import serializers
from core.serializers import UserSerializer
from .models import EmployeeRequest
class RequestSerializer(serializers.ModelSerializer):
class Meta:
model = EmployeeRequest
fields = (
'id',
'request_status',
'title',
'description',
'processed_by'
)
class RequestListSerializer(RequestSerializer):
request_by = UserSerializer()
processed_by = UserSerializer()
class Meta:
model = EmployeeRequest
fields = RequestSerializer.Meta.fields + (
'request_by',
'processed_by',
'created_at',
)
|
#!/usr/bin/env python
"""
This run a user specified command and log its result.
./command.py [-a] [-c command] {logfilename}
logfilename : This is the name of the log file. Default is command.log.
-a : Append to log file. Default is to overwrite log file.
-c : spawn command. Default is the command 'ls -l'.
Example:
This will execute the command 'pwd' and append to the log named my_session.log:
./command.py -a -c 'pwd' my_session.log
"""
import os, sys, getopt
import traceback
import pexpect
# 如果程序中间出错,打印提示信息后退出
def exit_with_usage():
print globals()['__doc__']
os._exit(1)
def main():
######################################################################
# Parse the options, arguments, get ready, etc.
######################################################################
try:
optlist, args = getopt.getopt(sys.argv[1:], 'h?ac:', ['help','h','?'])
# 如果指定的参数不是’ -a ’ , ‘ -h ’ , ‘ -c ’ , ‘ -? ’ , ‘ --help ’ ,
#‘ --h ’或’ --? ’时,会抛出 exception,
# 这里 catch 住,然后打印出 exception 的信息,并输出 usage 提示信息.
except Exception, e:
print str(e)
exit_with_usage()
options = dict(optlist)
# 最多只能指定一个 logfile,否则出错.
if len(args) > 1:
exit_with_usage()
# 如果指定的是 '-h','--h','-?','--?' 或 '--help',只输出 usage 提示信息.
if [elem for elem in options if elem in ['-h','--h','-?','--?','--help']]:
print "Help:"
exit_with_usage()
# 获取 logfile 的名字.
if len(args) == 1:
script_filename = args[0]
else:
# 如果用户没指定,默认 logfile 的名字是 command.log
script_filename = "command.log"
# 如果用户指定了参数 -a,如果之前该 logfile 存在,那么接下来的内容会附加在原先内容之后,
# 如果之前没有该 logfile,新建一个文件,并且接下来将内容写入到该文件中.
if '-a' in options:
fout = open (script_filename, "ab")
else:
# 如果用户没指定参数 -a,默认按照用户指定 logfile 文件名新建一个文件,然后将接下来将内容写入到该文件中.
fout = open (script_filename, "wb")
# 如果用户指定了 -c 参数,那么运行用户指定的命令.
if '-c' in options:
command = options['-c']
# 如果用户没有指定 -c 参数,那么默认运行命令'ls – l'
else:
command = "ls -l"
# logfile 文件的 title
fout.write ('==========Log Tile: IBM developerWorks China==========\n')
# 为接下来的运行命令生成一个 pexpect 的 spawn 类子程序的对象.
p = pexpect.spawn(command)
# 将之前 open 的 file 对象指定为 spawn 类子程序对象的 log 文件.
p.logfile = fout
# 命令运行完后,expect EOF 出现,这时会将 spawn 类子程序对象的输出写入到 log 文件.
p.expect(pexpect.EOF)
#open 完文件,使用完毕后,需关闭该文件.
fout.close()
return 0
if __name__ == "__main__":
try:
main()
except SystemExit, e:
raise e
except Exception, e:
print "ERROR"
print str(e)
traceback.print_exc()
os._exit(1)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 8 14:01:16 2020
@author: gsd818
"""
import pandas as pd
import csv
configfile: "config.yaml"
## --------------------------------------------------------------------------------
##### Modules #####
include: "rules/gwasApproaches.smk"
include: "rules/polyadapt.smk"
include: "vcf2acf.smk"
## --------------------------------------------------------------------------------
##### Wildcards #####
wildcard_constraints:
pheno="[^-]+"
## --------------------------------------------------------------------------------
##### Target rules #####
def run_all_rules(_):
inputs = []
# this will run one of the phenotypes
#"UKBiobank/data/gwasfreqs-pops-102_irnt.tsv.gz",
this will run all the phenotypes
with open("phenoname_qx.txt", "w") as fout:
for pheno in pd.read_table('phenoname.txt')['phenoname'].tolist():
tsv = checkpoints.polyAdapt_freqs.get(pheno=pheno, level='pops').output.candi
with open(tsv) as fin:
if len(fin.readlines()) > 4:
inputs.append("UKBiobank/selection_UKBV2/Genscores-pops-{pheno}.txt".format(pheno=pheno))
inputs.append("UKBiobank/selection_UKBV2/QX_fm_report-pops-{pheno}.txt".format(pheno=pheno))
## targets
rule allrules:
input:
run_all_rules
|
from django.urls import path
from . import views
app_name = 'members'
urlpatterns = (
[
path("", views.index, name="index"),
path("profile-list/", views.list_students, name="list_students"),
path("profile/<slug:member_id>/", views.profile, name="profile"),
path("signup/", views.signup, name="signup"),
path("test/list/", views.test_list, name="test_list"),
path("test/edit/<slug:test_id>/", views.edit_test, name="edit_test"),
path("test/add/", views.add_test, name="add_test"),
]
) |
list1 = [1, 3, 6, 78, 35, 55]
list2 = [12, 24, 35, 24, 88, 120, 155]
list3 = [num for num in list1 if num in list2]
print(list3) |
import torch
import torchaudio
import os
from pathlib import Path
from torch import Tensor
from torchaudio import transforms as T
from torch.utils.data import Dataset, DataLoader
from typing import Tuple
class SpeechCommandsv1(Dataset):
CLASSES = ['bed', 'bird', 'cat', 'dog', 'down', 'eight', 'five', 'four', 'go', 'happy', 'house', 'left', 'marvin', 'nine', 'no',
'off', 'on', 'one', 'right', 'seven', 'sheila', 'six', 'stop', 'three', 'tree', 'two', 'up', 'wow', 'yes', 'zero']
def __init__(self, root: str, split: str = 'train', sample_rate: int = 32000, win_length: int = 1024, n_mels: int = 64, fmin: int = 0, fmax: int = None, transform=None) -> None:
super().__init__()
assert split in ['train', 'val', 'test']
self.num_classes = len(self.CLASSES)
self.transform = transform
self.mel_tf = T.MelSpectrogram(sample_rate, win_length, win_length, sample_rate//100, fmin, fmax, n_mels=n_mels, norm='slaney', mel_scale='slaney')
self.resample = T.Resample(16000, sample_rate)
self.data, self.targets = self.get_data(root, split)
print(f"Found {len(self.data)} {split} audios in {root}.")
def get_data(self, root: str, split: int):
root = Path(root)
if split == 'train':
files = root.rglob('*.wav')
excludes = []
with open(root / 'testing_list.txt') as f1, open(root / 'validation_list.txt') as f2:
excludes += f1.read().splitlines()
excludes += f2.read().splitlines()
excludes = list(map(lambda x: str(root / x), excludes))
files = list(filter(lambda x: "_background_noise_" not in str(x) and str(x) not in excludes, files))
else:
split = 'testing' if split == 'test' else 'validation'
with open(root / f'{split}_list.txt') as f:
files = f.read().splitlines()
files = list(map(lambda x: root / x, files))
targets = list(map(lambda x: self.CLASSES.index(str(x.parent).rsplit(os.path.sep, maxsplit=1)[-1]), files))
assert len(files) == len(targets)
return files, targets
def __len__(self) -> int:
return len(self.data)
def __getitem__(self, index: int) -> Tuple[Tensor, Tensor]:
audio, _ = torchaudio.load(self.data[index])
audio = self.resample(audio)
if audio.shape[1] < 32000: audio = torch.cat([audio, torch.zeros(1, 32000-audio.shape[1])], dim=-1) # if less than 1s, pad the audio
audio = self.mel_tf(audio) # convert to mel spectrogram
audio = 10.0 * audio.clamp_(1e-10).log10() # convert to log mel spectrogram
if self.transform: audio = self.transform(audio)
target = int(self.targets[index])
return audio, target
class SpeechCommandsv2(SpeechCommandsv1):
CLASSES = ['backward', 'bed', 'bird', 'cat', 'dog', 'down', 'eight', 'five', 'follow', 'forward', 'four', 'go', 'happy', 'house', 'learn', 'left',
'marvin', 'nine', 'no', 'off', 'on', 'one', 'right', 'seven', 'sheila', 'six', 'stop', 'three', 'tree', 'two', 'up', 'visual', 'wow', 'yes', 'zero']
if __name__ == '__main__':
dataset = SpeechCommandsv2('C:\\Users\\sithu\\Documents\\Datasets\\SpeechCommandsv2', 'train')
dataloader = DataLoader(dataset, 4, True)
for audio, target in dataloader:
print(audio.shape, target)
print(audio.min(), audio.max())
break
|
from langdetect import detect_langs
minThreshold = 0.7
def DetectLanguage(message):
if message == '':
return 'unk'
langCoef = detect_langs(message)
if len(langCoef) == 0:
raise LanguageDetectionError("The language of the message could not be identified.")
if langCoef[0].prob < minThreshold:
raise LanguageDetectionError("The language of the message could not be identified.")
return langCoef[0].lang
replies = {
'af': "Ek praat nie Afrikaans nie. Gebruik asseblief Engels.",
'ar': "'ana la 'atakallam alearabiati. yrja aistikhdam alllughat al'iinjaliziat.",
'bg': "Az ne govorya bulgarski. Molya, izpolzvai?te anglii?ski.",
'bn': "Ami banla balate pari na. Inreji byabahara karuna.",
'ca': "No parlo català. Si us plau, utilitzeu anglès.",
'cs': "Nemluvím cesky. Použijte anglictinu.",
'cy': "Dydw i ddim yn siarad Cymraeg. Defnyddiwch Saesneg.",
'da': "Jeg taler ikke dansk. Brug venligst engelsk.",
'de': "Ich spreche kein Deutsch. Bitte verwende Englisch.",
'el': "Den miló elliniká. Parakaloúme chrisimopoiíste Angliká.",
'en': "",
'es': "Yo no hablo español. Por favor usa inglés.",
'et': "Ma ei räägi eesti keelt. Palun kasutage inglise keeles.",
'fi': "En puhu suomea. Käytä Englanti.",
'fr': "Je ne parle pas français. Veuillez utiliser l'anglais.",
'gu': "Hu? gujarati nathi bolata. I?galisa upayoga karo.",
'hi': "main hindee mein baat nahin karate. krpaya inglish istemaal karen.",
'hr': "Ne govorim hrvatski. Molimo koristite engleski.",
'hu': "Én nem beszélek magyarul. Használja angol.",
'id': "Saya tidak berbicara bahasa Indonesia. Harap menggunakan bahasa Inggris.",
'it': "Non parlo italiano. Si prega di usare l'inglese.",
'ja': "Watashi wa nihongo o hanasemasen. Eigo o tsukatte kudasai.",
'kn': "Nanu kanna?a matana?uvudilla. I?gli? ba?asi.",
'ko': "naneun hangug-eoleul moshae. yeong-eoleul sayonghasibsio.",
'lt': "Aš nekalbu lietuviškai. Prašome naudoti anglu kalba.",
'lv': "Es nerunaju latviski. Ludzu, izmantojiet anglu.",
'mk': "Jas ne zboruvam makedonski. Ve molime da go koristat angliskiot jazik.",
'ml': "ñan malaya?a? sansarikkilla. i?gli? upayeagikkuka.",
'mr': "Mi mara?hi bolata nahi. Kr?paya i?graji vapara.",
'ne': "Ma nepali bolna chaina. A?greji prayoga garnuhos.",
'nl': "Ik spreek geen Nederlands. Gebruik Engels.",
'no': "Jeg snakker ikke norsk. Vennligst bruk engelsk.",
'pa': "Mainu pajabi di gala na karade. Agarezi vica varata karo ji.",
'pl': "Nie mówie po polsku. Prosze uzywac jezyka angielskiego.",
'pt': "Eu não falo português. Por favor, use o inglês.",
'ro': "Eu nu vorbesc române?te. Va rugam sa folosi?i limba engleza.",
'ru': "YA ne govoryu po-russki. Pozhaluysta, ispol'zuyte angliyskiy yazyk.",
'sk': "Nechcem hovorit slovenské. Použite anglictinu.",
'sl': "Ne govorim slovensko. Prosimo, uporabite anglešcino.",
'so': "Aanan u hadli Soomaali aadan. Fadlan isticmaal Ingiriisi.",
'sq': "Unë nuk flas shqip. Ju lutem përdorni anglisht.",
'sv': "Jag talar inte svenska. Använd engelska.",
'sw': "Sizungumzi Kiswahili. Tafadhali kutumia lugha ya Kiingereza.",
'ta': "Na? tami? peca mu?iyatu. A?kilam paya?pa?utta tayavu ceytu.",
'te': "Nenu telugu ma?la?utaru ledu. Dayacesi i?gli? upayogin¯ca??i.",
'th': "C¯h?n m?` phud p?has¯'a th?y kru?a chi^ p?has¯'a x?ngkvs¯'",
'tr': "Türkçe bilmiyorum. Lütfen ingilizceyi kullan.",
'uk': "YA ne hovoryu po-ukrayins'ky. Bud' laska, vykorystovuyte anhliys'ku movu.",
'vi': "Tôi không nói du?c ti?ng Vi?t. Vui lòng s? d?ng ti?ng Anh.",
'zh-cn': "Wo bù huì shuo zhongwén. Qing shiyòng yingyu.",
'zh-tw': "Wo bù huì shuo zhongwén. Qing shiyòng yingyu."
}
def GenerateReply(language):
if language in replies:
return replies[language]
raise LanguageIdentificationError("Sorry, we could not identify your language. Supported languages are: {l}."
.format(l=list(replies.keys())))
class LanguageIdentificationError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(LanguageIdentificationError, self).__init__(message)
self.message = message
class LanguageDetectionError(Exception):
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(LanguageDetectionError, self).__init__(message)
self.message = message
|
import pandas as pd
dataset=pd.read_csv('Salary.csv')
x = dataset['YearsExperience'].values.reshape(-1,1)
y = dataset['Salary']
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x,y)
years=input("Enter experience of an indiviual(in years):")
print("Salary:",model.predict([[float(years)]]))
|
# Module imports
import numpy as np
import cv2
import images
# Desciption:
# Class that defines camera properties and processes its images
# Attributes:
# camera: cv2.VideoCapture object
# resolution: camera resolution
class Camera:
def __init__(self, camera_port = 0, resolution = 1):
self.camera = cv2.VideoCapture('estaaa.webm')
self.resolution = resolution
if not self.camera.isOpened():
raise ValueError('Camera disconnected or wrong port!')
self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
# Description:
# Destructor, releases the camera
def __del__(self):
self.camera.release()
print('Camera released!')
# Description:
# Returns next frame on camera if there is so
# Outputs:
# img/None: image taken
def take_photo(self):
check, img = self.camera.read()
if check:
return img
return None
# Description:
# Reproduces live video from camera
def live_video(self):
while True:
img = self.take_photo()
cv2.imshow('Live video', img)
if cv2.waitKey(1) == ord('q'):
break
cv2.destroyAllWindows()
# Description:
# Returns processed image of camera's next frame
# Inputs:
# a, b, c: Hough parameters
# Outputs:
# final: processed image
temp_medio = [0,0,0,0]
def get_final(self, a = 2, b = 15, c = 2):
global temp_medio
img = self.take_photo()
img_resized = cv2.resize(img, (int(640 * self.resolution),int(480 * self.resolution)), interpolation = cv2.INTER_CUBIC)
src_coordinates = np.float32(
[[0 , 480], # Bottom left
[220, 150], # Top left
[420, 150], # Top right
[600, 480]]) # Bottom right
src_coordinates = src_coordinates * self.resolution
dst_coordinates = np.float32(
[[0, 480], # Bottom left
[140, 0], # Top left
[500, 0], # Top right
[640, 480]]) # Bottom right
dst_coordinates = dst_coordinates * self.resolution
img_warped, M, Minv = images.warp(img_resized, src_coordinates, dst_coordinates)
img_gray = cv2.cvtColor(img_warped, cv2.COLOR_RGB2GRAY)
img_blur = images.gaussian(img_gray, 5)
img_canny = images.canny(img_blur, 100, 130)
imshape = img_resized.shape
height = img_resized.shape[0]
length = img_resized.shape[1]
vertix0 =(0,height)
vertix1 =(0, 80)
vertix2 = (length, 80)
vertix3 = (length,height)
#vertix4 = (imshape[1], int(333 * imshape[0] / 480))
#vertix5 = (imshape[1], imshape[0])
vertices = np.array([[vertix0, vertix1, vertix2, vertix3]], dtype = np.int32)
img_region = images.region_of_interest(img_canny, vertices)
img_hough, lines = images.hough(img_resized, img_region, a, b, c)
try:
medio = np.average(lines, axis = 0) #promedio de
temp_medio = np.copy(medio)
except IndexError:
medio = temp_medio
carritox1, carritoy1, carritox2, carritoy2 = medio
mask = cv2.inRange(img_hough, np.array([255, 255, 200]), np.array([255, 255, 255]))
final0 = cv2.bitwise_or(img,cv2.resize(img_hough, (640, 480), interpolation = cv2.INTER_AREA),mask = cv2.bitwise_not(cv2.resize(mask, (640, 480), interpolation = cv2.INTER_AREA)))
medio_image = images.punto_medio(img_resized,medio)
final = cv2.addWeighted(final0, 0.8, medio_image, 1, 1) #mostrar
return final,carritox1, carritoy1, carritox2, carritoy2,lines
|
#!/usr/local/bin/python3
import subprocess as subp
from os import chdir, listdir
from os.path import join, isfile, realpath, dirname
import getpass
import MySQLdb
import MySQLdb.cursors
import re
def run(cmd):
return subp.run(cmd, stdout=subp.PIPE, stderr=subp.PIPE)
def run2var(cmd):
out = run(cmd).stdout.decode('utf-8')
out = '\n'.join(out.split('\n')[:-1])
return out
def get_db_config(base_path, filename='settings.php'):
settings = join(base_path, filename)
with open(settings) as f:
settings = f.read()
return {
'host': re.findall(r'\$db_host = \"(.+?)\"', settings)[0],
'user': re.findall(r'\$db_user = \"(.+?)\"', settings)[0],
'pass': re.findall(r'\$db_pass = \"(.+?)\"', settings)[0],
'name': re.findall(r'\$db_name = \"(.+?)\"', settings)[0],
}
def get_db(base_path):
db_conf = get_db_config(base_path)
return MySQLdb.connect(
host=db_conf['host'],
user=db_conf['user'],
passwd=db_conf['pass'],
db=db_conf['name'],
cursorclass=MySQLdb.cursors.DictCursor,
)
USERNAME = getpass.getuser()
BASE_PATH = dirname(realpath(__file__))
chdir(BASE_PATH)
db = get_db(BASE_PATH)
cursor = db.cursor()
cursor.execute("SELECT * FROM configuration;")
config = cursor.fetchone()
db.close()
currM = int(config['current_migration'])
run(['git', 'pull'])
dbp = join(BASE_PATH, 'db')
files = sorted([f for f in listdir(dbp) if re.match(r'\d\d\d\d_.*\.sql', f)])
print('Current migration', f'{currM:04}')
did_migrate = False
for f in files:
if int(f[:4]) > currM:
did_migrate = True
print('Applying migration', f)
conn = get_db(BASE_PATH)
fullf = join(dbp, f)
try:
cur = conn.cursor()
with open(fullf, 'r') as mf:
query = mf.read()
cur.execute(query)
cur.close()
conn.commit()
except Exception as e:
print('Error upgrading to', f)
print(e)
conn.rollback()
currM = int(f[:4])
db = get_db(BASE_PATH)
cursor = db.cursor()
if not did_migrate:
print('No new migrations found')
else:
cursor.execute(
f"UPDATE configuration SET current_migration = '{currM:04}';")
cursor.close()
db.commit()
|
import os
# we can create another file for each project (folder)
def create_project_dir(directory):
if not os.path.exists(directory):
print('creating directory for now ' + directory)
os.makedirs(directory)
# create queue and final crawled files (by some if) :)
def create_data_files(project_name, base_url):
queue = os.path.join(project_name , 'queue.txt')
FinalCrawled = os.path.join(project_name,"FinalCrawled.txt")
if not os.path.isfile(queue):
write_file(queue, base_url)
if not os.path.isfile(FinalCrawled):
write_file(FinalCrawled, '')
# create a new file :)
def write_file(path, data):
with open(path, 'w') as f:
f.write(data)
# add data onto my file by append method
def append_to_file(path, data):
with open(path, 'a') as file:
file.write(data + '\n')
# delete the contents of a file by write mode
def delete_file_contents(path):
open(path, 'w').close()
# Top the best solution every files are convert each line to set items
def file_to_set(file_name):
results = set()
with open(file_name, 'rt') as f:
for line in f:
results.add(line.replace('\n', ''))
return results
# through a set, convert set to file by iterative
def set_to_file(links, file_name):
with open(file_name,"w") as f:
for l in sorted(links):
f.write(l+"\n")
|
import torch
import torch.nn as nn
from torch import autograd
from neat.phenotype.feed_forward import FeedForwardNet
import numpy
# from neat.visualize import draw_net
from time import time
from funk_svd import SVD
def train_one_epoch(model, inputs, targets, loss_fn, optimizer, epoch_no, device, verbose=1):
'trains the model for one epoch and returns the loss'
if verbose:
print("Epoch = {}".format(epoch_no))
# Training
# get user, item and rating data
t1 = time()
epoch_loss = []
# put the model in train mode before training
model.train()
# transfer the data to GPU
for k, feed_dict in enumerate(inputs):
"""FIXME: for key in feed_dict:
if type(feed_dict[key]) != type(None):
feed_dict[key] = feed_dict[key].to(dtype = torch.long, device = device)"""
# if(k % 100 == 0):
# print('Training with inputs. {0} out of {1} inputs'.format(k , len(inputs)))
# get the predictions
prediction = model(feed_dict)
# print(prediction.shape)
# get the actual targets
rating = targets[k]
# convert to float and change dim from [batch_size] to [batch_size,1]
rating = rating.float().view(prediction.size())
loss = loss_fn(prediction, rating)
# clear the gradients
optimizer.zero_grad()
# backpropagate **** QUESTION
loss.backward()
# update weights
optimizer.step()
# accumulate the loss for monitoring
epoch_loss.append(loss.item())
epoch_loss = numpy.mean(epoch_loss)
if verbose:
print("Epoch completed {:.1f} s".format(time() - t1))
print("Train Loss: {}".format(epoch_loss))
return epoch_loss
class RecsysConfig:
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
VERBOSE = True
NUM_INPUTS = 2
NUM_OUTPUTS = 1
USE_BIAS = True
# ACTIVATION = 'sigmoid'
ACTIVATION = 'ReLU'
# ACTIVATION = 'tanh'
SCALE_ACTIVATION = 4.9
POPULATION_SIZE = 150
NUMBER_OF_GENERATIONS = 150
# NUMBER_OF_GENERATIONS = 1500
SPECIATION_THRESHOLD = 3.0
CONNECTION_MUTATION_RATE = 0.80
CONNECTION_PERTURBATION_RATE = 0.90
ADD_NODE_MUTATION_RATE = .1 # 0.03
ADD_CONNECTION_MUTATION_RATE = .8 # 0.5
CROSSOVER_REENABLE_CONNECTION_GENE_RATE = 0.25
# Top percentage of species to be saved before mating
PERCENTAGE_TO_SAVE = 0.30
FITNESS_THRESHOLD = 13.0
def fitness_fn(self, genome, shared_data):
phenotype = FeedForwardNet(genome, self)
phenotype.to(self.DEVICE)
criterion = nn.MSELoss()
totloss = 0.0
epochs = 3
# loss_fn = torch.nn.BCELoss()
weight_decay = 0.00001
optimizer = torch.optim.Adam(phenotype.parameters(), weight_decay=weight_decay)
#for epoch in (range(epochs)):
# print("Epoch {0} out of {1} epochs starts".format(epoch + 1, epochs))
#epoch_loss = train_one_epoch(phenotype, shared_data.inputs_tr, shared_data.targets_tr, criterion, optimizer, epoch, self.DEVICE, False)
for input, target in zip(shared_data.inputs_tst, shared_data.targets_tst): # 4 training examples
input, target = input.to(self.DEVICE), target.to(self.DEVICE)
pred = phenotype(
input) # calling the FeedForwardNet (which is torch Module and calling it eventually causes forward to be called)
# loss = torch.norm(pred - target)
# loss = float(loss)
# totloss += loss
totloss += float(criterion(pred, target)) ####FIXME??
totloss /= len(shared_data.targets_tst)
genome.avgloss = totloss
return shared_data.max_fitness - totloss
def get_preds_and_labels(self, genome):
phenotype = FeedForwardNet(genome, self)
phenotype.to(self.DEVICE)
predictions = []
labels = []
for input, target in zip(self.inputs, self.targets_tst): # 4 training examples
input, target = input.to(self.DEVICE), target.to(self.DEVICE)
predictions.append(float(phenotype(input)))
labels.append(float(target))
return predictions, labels
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
from email.mime.text import MIMEText
import smtplib,sys
mail_host = 'smtp.163.com'
mail_user = 'nicefeiniu@163.com'
mail_passwd = '53557873ly'
sender = 'nicefeiniu@163.com'
receivers = ['920036515@qq.com', '281188071@qq.com']
def send_mails():
content = "Hello, my frend, where are you from? I'm from Rusha"
msg = MIMEText(content, _subtype='plain', _charset='utf-8')
msg['From'] = mail_user
msg['To'] = ",".join(receivers)
msg['Subject'] = '人生苦短,我用python!!!!'
try:
smtp_em = smtplib.SMTP(mail_host)
smtp_em.login(mail_user, mail_passwd)
smtp_em.send_message(sender, receivers, str(msg))
print('OK')
smtp_em.quit()
except smtplib.SMTPException as err:
print(err)
if __name__ == '__main__':
send_mails() |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011, Florent Lamiraux, Thomas Moulard, JRL, CNRS/AIST
import os
from dynamic_graph.sot.dynamic_pinocchio.feet_follower import FeetFollowerFromFile
from dynamic_graph.sot.dynamic_pinocchio.tools import (
checkFinalConfiguration,
clt,
plug,
robot,
solver,
timeStep,
)
print(os.environ)
feetFollower = FeetFollowerFromFile("feet-follower")
feetFollower.feetToAnkleLeft = robot.dynamic.getAnklePositionInFootFrame()
feetFollower.feetToAnkleRight = robot.dynamic.getAnklePositionInFootFrame()
plug(feetFollower.signal("com"), robot.featureComDes.signal("errorIN"))
plug(feetFollower.signal("left-ankle"), robot.features["left-ankle"].reference)
plug(feetFollower.signal("right-ankle"), robot.features["right-ankle"].reference)
robot.comTask.signal("controlGain").value = 50.0
robot.tasks["left-ankle"].signal("controlGain").value = 50.0
robot.tasks["right-ankle"].signal("controlGain").value = 50.0
# Push tasks
# Operational points tasks
solver.sot.push(robot.name + ".task.right-ankle")
solver.sot.push(robot.name + ".task.left-ankle")
# Center of mass
solver.sot.push(robot.name + ".task.com")
# Main.
# Main loop
for i in range(500):
robot.device.increment(timeStep)
if clt:
clt.updateElementConfig("hrp", robot.smallToFull(robot.device.state.value))
finalPosition = (
-0.015361,
-0.0049075500000000001,
-0.00047065200000000001,
-0.0172946,
-0.020661800000000001,
0.0374547,
-0.037641599999999997,
0.025434399999999999,
-0.45398100000000002,
0.86741800000000002,
-0.39213799999999999,
-0.0089269499999999995,
-0.037646100000000002,
0.025648199999999999,
-0.46715499999999999,
0.87717599999999996,
-0.38872200000000001,
-0.0091408199999999992,
0.080488199999999996,
-0.18355399999999999,
-0.00036695100000000002,
-0.0056776600000000002,
-0.12173299999999999,
-0.23972599999999999,
-0.00637303,
-0.56908000000000003,
0.00296262,
0.19108900000000001,
0.100088,
0.23896800000000001,
0.21485599999999999,
-0.18973400000000001,
-0.49457699999999999,
0.040646799999999997,
0.16970299999999999,
0.100067,
)
checkFinalConfiguration(robot.device.state.value, finalPosition)
print("Exiting.")
|
from .future import DSSFuture
import json, warnings
from datetime import datetime
class DSSConnectionListItem(dict):
"""
An item in a list of connections.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.list_connections` instead.
"""
def __init__(self, client, data):
super(DSSConnectionListItem, self).__init__(data)
self.client = client
def to_connection(self):
"""
Gets a handle corresponding to this item
:rtype: :class:`DSSConnection`
"""
return DSSConnection(self.client, self["name"])
@property
def name(self):
"""
Get the identifier of the connection.
:rtype: string
"""
return self["id"]
@property
def type(self):
"""
Get the type of the connection.
:return: a DSS connection type, like PostgreSQL, EC2, Azure, ...
:rtype: string
"""
return self["label"]
class DSSConnectionInfo(dict):
"""
A class holding read-only information about a connection.
.. important::
Do not instantiate directly, use :meth:`DSSConnection.get_info` instead.
The main use case of this class is to retrieve the decrypted credentials for a connection,
if allowed by the connection permissions.
Depending on the connection kind, the credential may be available using :meth:`get_basic_credential`
or :meth:`get_aws_credential`.
"""
def __init__(self, data):
super(DSSConnectionInfo, self).__init__(data)
def get_type(self):
"""
Get the type of the connection
:return: a connection type, for example Azure, Snowflake, GCS, ...
:rtype: string
"""
return self["type"]
def get_params(self):
"""
Get the parameters of the connection, as a dict
:return: the parameters, as a dict. Each connection type has different sets of fields.
:rtype: dict
"""
return self["params"]
def get_basic_credential(self):
"""
Get the basic credential (user/password pair) for this connection, if available
:return: the credential, as a dict containing "user" and "password"
:rtype: dict
"""
if not "resolvedBasicCredential" in self:
raise ValueError("No basic credential available")
return self["resolvedBasicCredential"]
def get_aws_credential(self):
"""
Get the AWS credential for this connection, if available.
The AWS credential can either be a keypair or a STS token triplet.
:return: the credential, as a dict containing "accessKey", "secretKey", and "sessionToken" (only in the case of STS token)
:rtype: dict
"""
if not "resolvedAWSCredential" in self:
raise ValueError("No AWS credential available")
return self["resolvedAWSCredential"]
class DSSConnection(object):
"""
A connection on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_connection` instead.
"""
def __init__(self, client, name):
self.client = client
self.name = name
########################################################
# Location info
########################################################
def get_location_info(self):
"""
Get information about this connection.
.. caution::
Deprecated, use :meth:`~get_info()`
"""
warnings.warn("DSSConnection.get_location_info is deprecated, please use get_info", DeprecationWarning)
return self.get_info()
def get_info(self, contextual_project_key=None):
"""
Get information about this connection.
.. note::
This call requires permissions to read connection details
:param string contextual_project_key: (optional) project key to use to resolve variables
:return: an object containing connection information
:rtype: :class:`DSSConnectionInfo`
"""
additional_params = { "contextualProjectKey": contextual_project_key } if contextual_project_key is not None else None
return DSSConnectionInfo(self.client._perform_json(
"GET", "/connections/%s/info" % self.name, params=additional_params))
########################################################
# Connection deletion
########################################################
def delete(self):
"""
Delete the connection
"""
return self.client._perform_empty(
"DELETE", "/admin/connections/%s" % self.name)
def get_settings(self):
"""
Get the settings of the connection.
You must use :meth:`~DSSConnectionSettings.save()` on the returned object to make your changes effective
on the connection.
Usage example
.. code-block:: python
# make details of a connection accessible to some groups
connection = client.get_connection("my_connection_name")
settings = connection.settings()
settings.set_readability(False, "group1", "group2")
settings.save()
:return: the settings of the connection
:rtype: :class:`DSSConnectionSettings`
"""
settings = self.client._perform_json(
"GET", "/admin/connections/%s" % self.name)
return DSSConnectionSettings(self, settings)
def get_definition(self):
"""
Get the connection's raw definition.
.. caution::
Deprecated, use :meth:`get_settings()` instead.
The exact structure of the returned dict is not documented and depends on the connection
type. Create connections using the DSS UI and call :meth:`get_definition` to see the
fields that are in it.
.. note::
This method returns a dict with passwords and secrets in their encrypted form. If you need
credentials, consider using :meth:`get_info()` and :meth:`dataikuapi.dss.admin.DSSConnectionInfo.get_basic_credential()`.
:return: a connection definition, as a dict. See :meth:`DSSConnectionSettings.get_raw()`
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/connections/%s" % self.name)
def set_definition(self, definition):
"""
Set the connection's definition.
.. caution::
Deprecated, use :meth:`get_settings()` then :meth:`DSSConnectionSettings.save()` instead.
.. important::
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
Usage example
.. code-block:: python
# make details of a connection accessible to some groups
connection = client.get_connection("my_connection_name")
definition = connection.get_definition()
definition['detailsReadability']['readableBy'] = 'ALLOWED'
definition['detailsReadability']['allowedGroups'] = ['group1', 'group2']
connection.set_definition(definition)
:param dict definition: the definition for the connection, as a dict.
"""
return self.client._perform_json(
"PUT", "/admin/connections/%s" % self.name,
body = definition)
########################################################
# Security
########################################################
def sync_root_acls(self):
"""
Resync root permissions on this connection path.
This is only useful for HDFS connections when DSS has User Isolation activated with "DSS-managed HDFS ACL"
:return: a handle to the task of resynchronizing the permissions
:rtype: :class:`~dataikuapi.dss.future.DSSFuture`
"""
future_response = self.client._perform_json(
"POST", "/admin/connections/%s/sync" % self.name,
body = {'root':True})
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
def sync_datasets_acls(self):
"""
Resync permissions on datasets in this connection path.
This is only useful for HDFS connections when DSS has User Isolation activated with "DSS-managed HDFS ACL"
:return: a handle to the task of resynchronizing the permissions
:rtype: :class:`~dataikuapi.dss.future.DSSFuture`
"""
future_response = self.client._perform_json(
"POST", "/admin/connections/%s/sync" % self.name,
body = {'root':True})
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
class DSSConnectionSettings(object):
"""
Settings of a DSS connection.
.. important::
Do not instantiate directly, use :meth:`DSSConnection.get_settings` instead.
Use :meth:`save` to save your changes
"""
def __init__(self, connection, settings):
self.connection = connection
self.settings = settings
def get_raw(self):
"""
Get the raw settings of the connection.
:return: a connection definition, as a dict. Notable fields are:
* **type** : type of the connection (for example PostgreSQL, Azure, ...)
* **params** : dict of the parameters specific to the connection type
* **allowWrite** : if False, DSS will not perform write operations on the connection
* **allowManagedDatasets** : whether DSS will allow creating managed datasets on the connection
* **allowManagedFolders** : whether DSS will allow creating managed folders on the connection
* **credentialsMode** : whether the credentials of the connection are per-user ("PER_USER") or global for all users ("GLOBAL")
* **usableBy** : defines which DSS users can use the connection. Possible values: ALL, ALLOWED
* **allowedGroups** : if **usableBy** is "ALLOWED", a list of group names
* **detailsReadability** : definition of which DSS users can access the details of the connection, in particular the credentials in it
* **readableBy** : defines which users can access the details. Possible values: NONE, ALL, ALLOWED.
* **allowedGroups** : if **readableBy** is "ALLOWED", a list of group names
* **indexingSettings** : for SQL-like connection, what to index
* **maxActivities** : maximum number of concurrent activities in all jobs of the instance that use the connection
* **useGlobalProxy** : if a proxy is defined in the DSS general settings, whether to use it or not
:rtype: dict
"""
return self.settings
@property
def type(self):
"""
Get the type of the connection.
:return: a DSS connection type, like PostgreSQL, EC2, Azure, ...
:rtype: string
"""
return self.settings['type']
@property
def allow_managed_datasets(self):
"""
Whether managed datasets can use the connection.
:rtype: boolean
"""
return self.settings['allowManagedDatasets']
@allow_managed_datasets.setter
def allow_managed_datasets(self, new_value):
self.settings["allowManagedDatasets"] = new_value
@property
def allow_managed_folders(self):
"""
Whether managed datasets can use the connection.
:rtype: boolean
"""
return self.settings['allowManagedFolders']
@allow_managed_folders.setter
def allow_managed_folders(self, new_value):
self.settings["allowManagedFolders"] = new_value
@property
def allow_write(self):
"""
Whether data can be written to this connection.
If not, the connection is read-only from DSS point of view.
:rtype: boolean
"""
return self.settings['allowWrite']
@allow_write.setter
def allow_write(self, new_value):
self.settings["allowWrite"] = new_value
@property
def details_readability(self):
"""
Get the access control to connection details.
:return: an handle on the access control definition.
:rtype: :class:`DSSConnectionDetailsReadability`
"""
return DSSConnectionDetailsReadability(self.settings["detailsReadability"])
@property
def usable_by(self):
"""
Get the mode of access control.
This controls usage of the connection, that is, reading and/or writing data
from/to the connection.
:return: one ALL (anybody) or ALLOWED (ie. only users from groups in :meth:`usable_by_allowed_groups()`)
:rtype: string
"""
return self.settings["usableBy"]
@property
def usable_by_allowed_groups(self):
"""
Get the groups allowed to use the connection
Only applies if :meth:`usable_by()` is ALLOWED.
:return: a list of group names
:rtype: list[string]
"""
return self.settings["allowedGroups"]
def set_usability(self, all, *groups):
"""
Set who can use the connection.
:param boolean all: if True, anybody can use the connection
:param *string groups: a list of groups that can use the connection
"""
if all:
self.settings["usableBy"] = 'ALL'
else:
self.settings["usableBy"] = 'ALLOWED'
self.settings["allowedGroups"] = groups
def save(self):
"""
Save the changes to the connection's settings
"""
self.connection.client._perform_json(
"PUT", "/admin/connections/%s" % self.connection.name,
body = self.settings)
class DSSConnectionDetailsReadability(object):
"""
Handle on settings for access to connection details.
Connection details mostly cover credentials, and giving access to the
credentials is necessary to some workloads. Typically, having Spark processes
access data directly implies giving credentials to these Spark processes,
which in turn implies that the user can access the connection's details.
"""
def __init__(self, data):
self._data = data
@property
def readable_by(self):
"""
Get the mode of access control.
:return: one of NONE (nobody), ALL (anybody) or ALLOWED (ie. only users from groups in :meth:`allowed_groups()`)
:rtype: string
"""
return self._data["readableBy"]
@property
def allowed_groups(self):
"""
Get the groups allowed to access connection details.
Only applies if :meth:`readable_by()` is ALLOWED.
:return: a list of group names
:rtype: list[string]
"""
return self._data["allowedGroups"]
def set_readability(self, all, *groups):
"""
Set who can get details from the connection.
To make the details readable by nobody, pass all=False and no group.
:param boolean all: if True, anybody can use the connection
:param *string groups: a list of groups that can use the connection
"""
if all:
self._data["readableBy"] = 'ALL'
elif groups is None or len(groups) == 0:
self._data["readableBy"] = 'NONE'
else:
self._data["readableBy"] = 'ALLOWED'
self._data["allowedGroups"] = groups
class DSSUser(object):
"""
A handle for a user on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_user` instead.
"""
def __init__(self, client, login):
self.client = client
self.login = login
def delete(self):
"""
Deletes the user
"""
return self.client._perform_empty(
"DELETE", "/admin/users/%s" % self.login)
def get_settings(self):
"""
Get the settings of the user.
You must use :meth:`~DSSUserSettings.save()` on the returned object to make your changes effective
on the user.
Usage example
.. code-block:: python
# disable some user
user = client.get_user('the_user_login')
settings = user.get_settings()
settings.enabled = False
settings.save()
:return: the settings of the user
:rtype: :class:`DSSUserSettings`
"""
raw = self.client._perform_json("GET", "/admin/users/%s" % self.login)
return DSSUserSettings(self.client, self.login, raw)
def get_activity(self):
"""
Gets the activity of the user.
:return: the user's activity
:rtype: :class:`DSSUserActivity`
"""
activity = self.client._perform_json("GET", "/admin/users/%s/activity" % self.login)
return DSSUserActivity(self.client, self.login, activity)
########################################################
# Legacy
########################################################
def get_definition(self):
"""
Get the definition of the user
.. caution::
Deprecated, use :meth:`get_settings` instead
:return: the user's definition, as a dict. Notable fields are
* **login** : identifier of the user, can't be modified
* **enabled** : whether the user can log into DSS
* **email** : email of the user
* **displayName** : name of the user in the UI
* **groups** : list of group names this user belongs to
* **sourceType** : how the user logs in. Possible values: LOCAL, LOCAL_NO_AUTH, LDAP, SAAS, PAM
* **userProfile** : name of the license profile the user belongs to
* **credentials** : dict of connection name or plugin preset name to credentials
* **secrets** : list of secrets, as name/value pairs
* **adminProperties** and **userProperties** : dicts of arbitrary properties
* **activeWebSocketSesssions** : number of tabs currently open by the user (informative only, not modifiable)
:rtype: dict
"""
warnings.warn("DSSUser.get_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json("GET", "/admin/users/%s" % self.login)
def set_definition(self, definition):
"""
Set the user's definition.
.. caution::
Deprecated, use :meth:`dataikuapi.dss.admin.DSSUserSettings.save()` instead
.. important::
You should only use :meth:`set_definition` with an object that you obtained through :meth:`get_definition`,
not create a new dict.
.. note::
This call requires an API key with admin rights
The fields that may be changed in a user definition are:
* email
* displayName
* enabled
* groups
* userProfile
* password (not returned by :meth:`get_definition()` but can be set)
* userProperties
* adminProperties
* secrets
* credentials
:param dict definition: the definition for the user, as a dict
"""
warnings.warn("DSSUser.set_definition is deprecated, please use get_settings", DeprecationWarning)
return self.client._perform_json("PUT", "/admin/users/%s" % self.login, body = definition)
def get_client_as(self):
"""
Get an API client that has the permissions of this user.
This allows administrators to impersonate actions on behalf of other users, in order to perform
actions on their behalf.
:return: a client through which calls will be run as the user
:rtype: :class:`dataikuapi.DSSClient`
"""
from dataikuapi.dssclient import DSSClient
if self.client.api_key is not None:
return DSSClient(self.client.host, self.client.api_key, extra_headers={"X-DKU-ProxyUser": self.login})
elif self.client.internal_ticket is not None:
return DSSClient(self.client.host, internal_ticket = self.client.internal_ticket,
extra_headers={"X-DKU-ProxyUser": self.login})
else:
raise ValueError("Don't know how to proxy this client")
class DSSOwnUser(object):
"""
A handle to interact with your own user
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_own_user` instead.
"""
def __init__(self, client):
self.client = client
def get_settings(self):
"""
Get your own settings
You must use :meth:`~DSSOwnUserSettings.save()` on the returned object to make your changes effective
on the user.
:rtype: :class:`DSSOwnUserSettings`
"""
raw = self.client._perform_json("GET", "/current-user")
return DSSOwnUserSettings(self.client, raw)
class DSSUserSettingsBase(object):
"""
Settings for a DSS user.
.. important::
Do not instantiate directly, use :meth:`DSSUser.get_settings` or :meth:`DSSOwnUser.get_settings` instead.
"""
def __init__(self, settings):
self.settings = settings
def get_raw(self):
"""
Get the the raw settings of the user
Modifications made to the returned object are reflected when saving.
:return: the dict of the settings (not a copy). Notable fields are:
* **login** : identifier of the user, can't be modified
* **enabled** : whether the user can log into DSS
* **email** : email of the user
* **displayName** : name of the user in the UI
* **groups** : list of group names this user belongs to
* **sourceType** : how the user logs in. One of: LOCAL, LOCAL_NO_AUTH, LDAP, SAAS, PAM
* **userProfile** : name of the license profile the user belongs to
* **credentials** : dict of connection name or plugin preset name to credentials
* **secrets** : list of secrets, as name/value pairs
* **adminProperties** and **userProperties** : dicts of arbitrary properties
* **activeWebSocketSesssions** : number of tabs currently open by the user (informative only, not modifiable)
:rtype: dict
"""
return self.settings
def add_secret(self, name, value):
"""
Add a user secret.
If there was already a secret with the same name, it is replaced
:param string name: name of the secret
:param string value: name of the value
"""
self.remove_secret(name)
return self.settings["secrets"].append({"name": name, "value": value, "secret": True})
def remove_secret(self, name):
"""
Remove a user secret based on its name
If no secret of the given name exists, the method does nothing.
:param string name: name of the secret
"""
self.settings["secrets"] = [x for x in self.settings["secrets"] if x["name"] != name]
@property
def user_properties(self):
"""
Get the user properties for this user.
.. important::
Do not set this property, modify the dict in place
User properties can be seen and modified by the user themselves. A contrario admin
properties are for administrators' eyes only.
:rtype: dict
"""
return self.settings["userProperties"]
def set_basic_connection_credential(self, connection, login, password):
"""
Set per-user-credentials for a connection that takes a user/password pair.
:param string connection: name of the connection
:param string login: login of the credentials
:param string password: password of the credentials
"""
self.settings["credentials"][connection] = {
"type": "BASIC",
"user": login,
"password": password
}
def remove_connection_credential(self,connection):
"""
Remove per-user-credentials for a connection
If no credentials for the givent connection exists, this method does nothing
:param string connection: name of the connection
"""
if connection in self.settings["credentials"]:
del self.settings["credentials"][connection]
def set_basic_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name, login, password):
"""
Set per-user-credentials for a plugin preset that takes a user/password pair
:param string plugin_id: identifier of the plugin
:param string param_set_id: identifier of the parameter set to which the preset belongs
:param string preset_id: identifier of the preset
:param string param_name: name of the credentials parameter in the preset
:param string login: login of the credentials
:param string password: password of the credentials
"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
self.settings["credentials"][name] = {
"type": "BASIC",
"user": login,
"password": password
}
def set_oauth2_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name, refresh_token):
"""
Set per-user-credentials for a plugin preset that takes a OAuth refresh token
:param string plugin_id: identifier of the plugin
:param string param_set_id: identifier of the parameter set to which the preset belongs
:param string preset_id: identifier of the preset
:param string param_name: name of the credentials parameter in the preset
:param string refresh_token: value of the refresh token
"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
self.settings["credentials"][name] = {
"type": "OAUTH_REFRESH_TOKEN",
"refreshToken": refresh_token
}
def remove_plugin_credential(self, plugin_id, param_set_id, preset_id, param_name):
"""
Remove per-user-credentials for a plugin preset
:param string plugin_id: identifier of the plugin
:param string param_set_id: identifier of the parameter set to which the preset belongs
:param string preset_id: identifier of the preset
:param string param_name: name of the credentials parameter in the preset
"""
name = json.dumps(["PLUGIN", plugin_id, param_set_id, preset_id, param_name])[1:-1]
if name in self.settings["credentials"]:
del self.settings["credentials"][name]
class DSSUserSettings(DSSUserSettingsBase):
"""
Settings for a DSS user.
.. important::
Do not instantiate directly, use :meth:`DSSUser.get_settings` instead.
"""
def __init__(self, client, login, settings):
super(DSSUserSettings, self).__init__(settings)
self.client = client
self.login = login
@property
def admin_properties(self):
"""
Get the admin properties for this user.
.. important::
Do not set this property, modify the dict in place
Admin properties can be seen and modified only by administrators, not by the user themselves.
:rtype: dict
"""
return self.settings["adminProperties"]
@property
def enabled(self):
"""
Whether this user is enabled.
:rtype: boolean
"""
return self.settings["enabled"]
@enabled.setter
def enabled(self, new_value):
self.settings["enabled"] = new_value
@property
def creation_date(self):
"""
Get the timestamp of when the user was created
:return: the creation date
:rtype: :class:`datetime.datetime` or None
"""
timestamp = self.settings["creationDate"] if "creationDate" in self.settings else None
return datetime.fromtimestamp(timestamp / 1000) if timestamp else None
def save(self):
"""
Saves the settings
"""
self.client._perform_json("PUT", "/admin/users/%s" % self.login, body = self.settings)
class DSSOwnUserSettings(DSSUserSettingsBase):
"""
Settings for the current DSS user.
.. important::
Do not instantiate directly, use :meth:`DSSOwnUser.get_settings()` instead.
"""
def __init__(self, client, settings):
super(DSSOwnUserSettings, self).__init__(settings)
self.client = client
def save(self):
"""
Saves the settings
"""
self.client._perform_empty("PUT", "/current-user", body = self.settings)
class DSSUserActivity(object):
"""
Activity for a DSS user.
.. important::
Do not instantiate directly, use :meth:`DSSUser.get_activity` or :meth:`dataikuapi.DSSClient.list_users_activity()` instead.
"""
def __init__(self, client, login, activity):
self.client = client
self.login = login
self.activity = activity
def get_raw(self):
"""
Get the raw activity of the user as a dict.
:return: the raw activity. Fields are
* **login** : the login of the user for this activity
* **lastSuccessfulLogin** : timestamp in milliseconds of the last time the user logged into DSS
* **lastFailedLogin** : timestamp in milliseconds of the last time DSS recorded a login failure for this user
* **lastSessionActivity** : timestamp in milliseconds of the last time the user opened a tab
:rtype: dict
"""
return self.activity
@property
def last_successful_login(self):
"""
Get the last successful login of the user
Returns None if there was no successful login for this user.
:return: the last successful login
:rtype: :class:`datetime.datetime` or None
"""
timestamp = self.activity["lastSuccessfulLogin"]
return datetime.fromtimestamp(timestamp / 1000) if timestamp > 0 else None
@property
def last_failed_login(self):
"""
Get the last failed login of the user
Returns None if there were no failed login for this user.
:return: the last failed login
:rtype: :class:`datetime.datetime` or None
"""
timestamp = self.activity["lastFailedLogin"]
return datetime.fromtimestamp(timestamp / 1000) if timestamp > 0 else None
@property
def last_session_activity(self):
"""
Get the last session activity of the user
The last session activity is the last time the user opened a new DSS tab or
refreshed his session.
Returns None if there is no session activity yet.
:return: the last session activity
:rtype: :class:`datetime.datetime` or None
"""
timestamp = self.activity["lastSessionActivity"]
return datetime.fromtimestamp(timestamp / 1000) if timestamp > 0 else None
class DSSAuthorizationMatrix(object):
"""
The authorization matrix of all groups and enabled users of the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_authorization_matrix` instead.
"""
def __init__(self, authorization_matrix):
self.authorization_matrix = authorization_matrix
@property
def raw(self):
"""
Get the raw authorization matrix as a dict
:return: the authorization matrix. There are 2 parts in the matrix, each as a top-level field and with similar structures,
the **perUser** and **perGroup**:
* **users** (resp. **groups**) : list of user (resp. group) names
* **mayXXXX** (with different permissions as "XXXX") : list of booleans of the same length as **users** (resp. **groups**) indicating where the corresponding user has the permission
* **projectsGrants** : list of project permissions, each as a dict of:
* **projectKey** and **projectName** : identifiers of the project
* **grants** : list of dict of the same length as **users** (resp. **groups**) indicating which grants the corresponding user has on the project
:rtype: dict
"""
return self.authorization_matrix
class DSSGroup(object):
"""
A group on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_group` instead.
"""
def __init__(self, client, name):
self.client = client
self.name = name
########################################################
# Group deletion
########################################################
def delete(self):
"""
Deletes the group
"""
return self.client._perform_empty(
"DELETE", "/admin/groups/%s" % self.name)
def get_definition(self):
"""
Get the group's definition (name, description, admin abilities, type, ldap name mapping)
:return: the group's definition. Top-level fields are:
* **name** : name of the group
* **sourceType** : type of group. Possible values: LOCAL, LDAP
* **description** : description of the group
* **canObtainAPITicketFromCookiesForGroupsRegex** : users in the group can impersonate users from groups whose name match this regex
* **admin** : whether users in the group have administrative rights in DSS
* **mayXXXX** : whether users in the group has the "XXXX" permission
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/groups/%s" % self.name)
def set_definition(self, definition):
"""
Set the group's definition.
.. important::
You should only use :meth:`set_definition` with an object that you obtained through :meth:`get_definition`,
not create a new dict.
:param dict definition: the definition for the group, as a dict
"""
return self.client._perform_json(
"PUT", "/admin/groups/%s" % self.name,
body = definition)
class DSSGeneralSettings(object):
"""
The general settings of the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_general_settings` instead.
"""
def __init__(self, client):
self.client = client
self.settings = self.client._perform_json("GET", "/admin/general-settings")
########################################################
# Update settings on instance
########################################################
def save(self):
"""
Save the changes that were made to the settings on the DSS instance
.. note::
This call requires an API key with admin rights
"""
return self.client._perform_empty("PUT", "/admin/general-settings", body = self.settings)
########################################################
# Value accessors
########################################################
def get_raw(self):
"""
Get the settings as a dictionary
:return: the settings
:rtype: dict
"""
return self.settings
def add_impersonation_rule(self, rule, is_user_rule=True):
"""
Add a rule to the impersonation settings
:param object rule: an impersonation rule, either a :class:`dataikuapi.dss.admin.DSSUserImpersonationRule`
or a :class:`dataikuapi.dss.admin.DSSGroupImpersonationRule`, or a plain dict
:param boolean is_user_rule: when the rule parameter is a dict, whether the rule is for users or groups
"""
rule_raw = rule
if isinstance(rule, DSSUserImpersonationRule):
rule_raw = rule.raw
is_user_rule = True
elif isinstance(rule, DSSGroupImpersonationRule):
rule_raw = rule.raw
is_user_rule = False
impersonation = self.settings['impersonation']
if is_user_rule:
impersonation['userRules'].append(rule_raw)
else:
impersonation['groupRules'].append(rule_raw)
def get_impersonation_rules(self, dss_user=None, dss_group=None, unix_user=None, hadoop_user=None, project_key=None, scope=None, rule_type=None, is_user=None, rule_from=None):
"""
Retrieve the user or group impersonation rules that match the parameters
:param string dss_user: a DSS user name
:param string dss_group: a DSS group name
:param string rule_from: a regex (which will be applied to user or group names)
:param string unix_user: a name to match the target UNIX user
:param string hadoop_user: a name to match the target Hadoop user
:param string project_key: a project key
:param string scope: project-scoped ('PROJECT') or global ('GLOBAL')
:param string type: the rule user or group matching method ('IDENTITY', 'SINGLE_MAPPING', 'REGEXP_RULE')
:param boolean is_user: True if only user-level rules should be considered, False for only group-level rules, None to consider both
"""
user_matches = self.settings['impersonation']['userRules'] if is_user == None or is_user == True else []
if dss_user is not None:
user_matches = [m for m in user_matches if dss_user == m.get('dssUser', None)]
if rule_from is not None:
user_matches = [m for m in user_matches if rule_from == m.get('ruleFrom', None)]
if unix_user is not None:
user_matches = [m for m in user_matches if unix_user == m.get('targetUnix', None)]
if hadoop_user is not None:
user_matches = [m for m in user_matches if hadoop_user == m.get('targetHadoop', None)]
if project_key is not None:
user_matches = [m for m in user_matches if project_key == m.get('projectKey', None)]
if rule_type is not None:
user_matches = [m for m in user_matches if rule_type == m.get('type', None)]
if scope is not None:
user_matches = [m for m in user_matches if scope == m.get('scope', None)]
group_matches = self.settings['impersonation']['groupRules'] if is_user == None or is_user == False else []
if dss_group is not None:
group_matches = [m for m in group_matches if dss_group == m.get('dssGroup', None)]
if rule_from is not None:
group_matches = [m for m in group_matches if rule_from == m.get('ruleFrom', None)]
if unix_user is not None:
group_matches = [m for m in group_matches if unix_user == m.get('targetUnix', None)]
if hadoop_user is not None:
group_matches = [m for m in group_matches if hadoop_user == m.get('targetHadoop', None)]
if rule_type is not None:
group_matches = [m for m in group_matches if rule_type == m.get('type', None)]
all_matches = []
for m in user_matches:
all_matches.append(DSSUserImpersonationRule(m))
for m in group_matches:
all_matches.append(DSSGroupImpersonationRule(m))
return all_matches
def remove_impersonation_rules(self, dss_user=None, dss_group=None, unix_user=None, hadoop_user=None, project_key=None, scope=None, rule_type=None, is_user=None, rule_from=None):
"""
Remove the user or group impersonation rules that matches the parameters from the settings
:param string dss_user: a DSS user name
:param string dss_group: a DSS group name
:param string rule_from: a regex (which will be applied to user or group names)
:param string unix_user: a name to match the target UNIX user
:param string hadoop_user: a name to match the target Hadoop user
:param string project_key: a project key
:param string scope: project-scoped ('PROJECT') or global ('GLOBAL')
:param string type: the rule user or group matching method ('IDENTITY', 'SINGLE_MAPPING', 'REGEXP_RULE')
:param boolean is_user: True if only user-level rules should be considered, False for only group-level rules, None to consider both
"""
for m in self.get_impersonation_rules(dss_user, dss_group, unix_user, hadoop_user, project_key, scope, rule_type, is_user, rule_from):
if isinstance(m, DSSUserImpersonationRule):
self.settings['impersonation']['userRules'].remove(m.raw)
elif isinstance(m, DSSGroupImpersonationRule):
self.settings['impersonation']['groupRules'].remove(m.raw)
########################################################
# Admin actions
########################################################
def push_container_exec_base_images(self):
"""
Push the container exec base images to their repository
"""
resp = self.client._perform_json("POST", "/admin/container-exec/actions/push-base-images")
if resp is None:
raise Exception('Container exec base image push returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Container exec base image push failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
class DSSUserImpersonationRule(object):
"""
An user-level rule items for the impersonation settings
"""
def __init__(self, raw=None):
self.raw = raw if raw is not None else {'scope':'GLOBAL','type':'IDENTITY'}
def scope_global(self):
"""
Make the rule apply to all projects
"""
self.raw['scope'] = 'GLOBAL'
return self
def scope_project(self, project_key):
"""
Make the rule apply to a given project
:param string project_key: the project this rule applies to
"""
self.raw['scope'] = 'PROJECT'
self.raw['projectKey'] = project_key
return self
def user_identity(self):
"""
Make the rule map each DSS user to a UNIX user of the same name
"""
self.raw['type'] = 'IDENTITY'
return self
def user_single(self, dss_user, unix_user, hadoop_user=None):
"""
Make the rule map a given DSS user to a given UNIX user
:param string dss_user: a DSS user
:param string unix_user: a UNIX user
:param string hadoop_user: a hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'SINGLE_MAPPING'
self.raw['dssUser'] = dss_user
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
def user_regexp(self, regexp, unix_user, hadoop_user=None):
"""
Make the rule map a DSS users matching a given regular expression to a given UNIX user
:param string regexp: a regular expression to match DSS user names
:param string unix_user: a UNIX user
:param string hadoop_user: a hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'REGEXP_RULE'
self.raw['ruleFrom'] = regexp
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
class DSSGroupImpersonationRule(object):
"""
A group-level rule items for the impersonation settings
"""
def __init__(self, raw=None):
self.raw = raw if raw is not None else {'type':'IDENTITY'}
def group_identity(self):
"""
Make the rule map each DSS user to a UNIX user of the same name
"""
self.raw['type'] = 'IDENTITY'
return self
def group_single(self, dss_group, unix_user, hadoop_user=None):
"""
Make the rule map a given DSS user to a given UNIX user
:param string dss_group: a DSS group
:param string unix_user: a UNIX user
:param string hadoop_user: a hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'SINGLE_MAPPING'
self.raw['dssGroup'] = dss_group
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
def group_regexp(self, regexp, unix_user, hadoop_user=None):
"""
Make the rule map a DSS users matching a given regular expression to a given UNIX user
:param string regexp: a regular expression to match DSS groups
:param string unix_user: a UNIX user
:param string hadoop_user: a hadoop user (optional, defaults to unix_user)
"""
self.raw['type'] = 'REGEXP_RULE'
self.raw['ruleFrom'] = regexp
self.raw['targetUnix'] = unix_user
self.raw['targetHadoop'] = hadoop_user
return self
class DSSCodeEnv(object):
"""
A code env on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_code_env` instead.
"""
def __init__(self, client, env_lang, env_name):
self.client = client
self.env_lang = env_lang
self.env_name = env_name
########################################################
# Env deletion
########################################################
def delete(self):
"""
Delete the code env
.. note::
This call requires an API key with admin rights
"""
resp = self.client._perform_json(
"DELETE", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name))
if resp is None:
raise Exception('Env deletion returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env deletion failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
########################################################
# Code env description
########################################################
def get_definition(self):
"""
Get the code env's definition
.. caution::
Deprecated, use :meth:`get_settings` instead
.. note::
This call requires an API key with admin rights
:return: the code env definition
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name))
def set_definition(self, env):
"""
Set the code env's definition. The definition should come from a call to :meth:`get_definition`
.. caution::
Deprecated, use :meth:`get_settings` then :meth:`DSSDesignCodeEnvSettings.save()` or
:meth:`DSSAutomationCodeEnvSettings.save()` instead
Fields that can be updated in design node:
* env.permissions, env.usableByAll, env.desc.owner
* env.specCondaEnvironment, env.specPackageList, env.externalCondaEnvName, env.desc.installCorePackages,
env.desc.corePackagesSet, env.desc.installJupyterSupport, env.desc.yarnPythonBin, env.desc.yarnRBin
env.desc.envSettings, env.desc.allContainerConfs, env.desc.containerConfs,
env.desc.allSparkKubernetesConfs, env.desc.sparkKubernetesConfs
Fields that can be updated in automation node (where {version} is the updated version):
* env.permissions, env.usableByAll, env.owner, env.envSettings
* env.{version}.specCondaEnvironment, env.{version}.specPackageList, env.{version}.externalCondaEnvName,
env.{version}.desc.installCorePackages, env.{version}.corePackagesSet, env.{version}.desc.installJupyterSupport
env.{version}.desc.yarnPythonBin, env.{version}.desc.yarnRBin, env.{version}.desc.allContainerConfs,
env.{version}.desc.containerConfs, env.{version}.desc.allSparkKubernetesConfs,
env.{version}.{version}.desc.sparkKubernetesConfs
.. note::
This call requires an API key with admin rights
.. important::
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
:param dict data: a code env definition
:return: the updated code env definition
:rtype: dict
"""
return self.client._perform_json(
"PUT", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name), body=env)
def get_version_for_project(self, project_key):
"""
Resolve the code env version for a given project
.. note::
Version will only be non-empty for versioned code envs actually used by the project
:param string project_key: project to get the version for
:return: the code env version full reference for the version of the code env that the project use. Fields are
* **lang** : language of the code env (PYTHON or R)
* **name** : name of the code env
* **version** : identifier of the version
* **projectKey** : project key
* **bundleId** : identifier of the active bundle in the project
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/%s/version" % (self.env_lang, self.env_name, project_key))
def get_settings(self):
"""
Get the settings of this code env.
.. important::
You must use :meth:`DSSCodeEnvSettings.save()` on the returned object to make your changes effective
on the code env.
.. code-block:: python
# Example: setting the required packagd
codeenv = client.get_code_env("PYTHON", "code_env_name")
settings = codeenv.get_settings()
settings.set_required_packages("dash==2.0.0", "bokeh<2.0")
settings.save()
# then proceed to update_packages()
:rtype: :class:`DSSDesignCodeEnvSettings` or :class:`DSSAutomationCodeEnvSettings`
"""
data = self.client._perform_json(
"GET", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name))
# you can't just use deploymentMode to check if it's an automation code
# env, because some modes are common to both types of nodes. So we rely
# on a non-null field that only the automation code envs have
if data.get("versions", None) is not None:
return DSSAutomationCodeEnvSettings(self, data)
else:
return DSSDesignCodeEnvSettings(self, data)
########################################################
# Code env actions
########################################################
def set_jupyter_support(self, active):
"""
Update the code env jupyter support
.. note::
This call requires an API key with admin rights
:param boolean active: True to activate jupyter support, False to deactivate
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/jupyter" % (self.env_lang, self.env_name),
params = {'active':active})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env update failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def update_packages(self, force_rebuild_env=False):
"""
Update the code env packages so that it matches its spec
.. note::
This call requires an API key with admin rights
:param boolean force_rebuild_env: whether to rebuild the code env from scratch
:return: list of messages collected during the operation. Fields are:
* **anyMessage** : whether there is at least 1 message
* **success**, **warning**, **error** and **fatal** : whether there is at least one message of the corresponding category
* **messages** : list of messages. Each message is a dict, with at least **severity** and **message** sufields.
:rtype: dict
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/packages" % (self.env_lang, self.env_name),
params={"forceRebuildEnv": force_rebuild_env})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env update failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def update_images(self, env_version=None):
"""
Rebuild the docker image of the code env
.. note::
This call requires an API key with admin rights
:param string env_version: (optional) version of the code env. Applies only to versioned code envs.
:return: list of messages collected during the operation. Fields are:
* **anyMessage** : whether there is at least 1 message
* **success**, **warning**, **error** and **fatal** : whether there is at least one message of the corresponding category
* **messages** : list of messages. Each message is a dict, with at least **severity** and **message** sufields.
:rtype: dict
"""
resp = self.client._perform_json(
"POST", "/admin/code-envs/%s/%s/images" % (self.env_lang, self.env_name),
params={"envVersion": env_version})
if resp is None:
raise Exception('Env image build returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Env image build failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def list_usages(self):
"""
List usages of the code env in the instance
:return: a list of objects where the code env is used. Each usage has fields:
* **envLang** and **envName** : identifiers of the code env
* **envUsage** : type of usage. Possible values: PROJECT, RECIPE, NOTEBOOK, PLUGIN, SCENARIO, SCENARIO_STEP, SCENARIO_TRIGGER, DATASET_METRIC, DATASET_CHECK, DATASET, WEBAPP, REPORT, API_SERVICE_ENDPOINT, SAVED_MODEL, MODEL, CODE_STUDIO_TEMPLATE
* **projectKey** and **objectId** : identifier of the object where the code env is used
* **accessible** : if False, the **projectKey** and **objectId** are obfuscated and point to an object of a project that the user can't access
:rtype: list[dict]
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/usages" % (self.env_lang, self.env_name))
def list_logs(self):
"""
List logs of the code env in the instance
:return: a list of log descriptions. Each log description has fields:
* **name** : name of the log file
* **totalSize** : size in bytes of the log
* **lastModified** : timestamp in milliseconds of the last change to the log
* **tail** : structure holding the tail of the log file
:rtype: list[dict]
"""
return self.client._perform_json(
"GET", "/admin/code-envs/%s/%s/logs" % (self.env_lang, self.env_name))
def get_log(self, log_name):
"""
Get the logs of the code env
:param string log_name: name of the log to fetch
:return: the raw log
:rtype: string
"""
return self.client._perform_text(
"GET", "/admin/code-envs/%s/%s/logs/%s" % (self.env_lang, self.env_name, log_name))
class DSSCodeEnvSettings(object):
"""
Base settings class for a DSS code env.
.. important::
Do not instantiate directly, use :meth:`DSSCodeEnv.get_settings` instead.
Use :meth:`save` to save your changes
"""
def __init__(self, codeenv, settings):
self.codeenv = codeenv
self.settings = settings
@property
def env_lang(self):
"""
Get the language of the code env
:return: a language (possible values: PYTHON, R)
:rtype: string
"""
return self.codeenv.env_lang
@property
def env_name(self):
"""
Get the name of the code env
:rtype: string
"""
return self.codeenv.env_name
def save(self):
"""
Save the changes to the code env's settings
"""
self.codeenv.client._perform_json(
"PUT", "/admin/code-envs/%s/%s" % (self.env_lang, self.env_name), body=self.settings)
class DSSCodeEnvPackageListBearer(object):
def get_required_packages(self, as_list=False):
"""
Get the list of required packages, as a single string
:param boolean as_list: if True, return the spec as a list of lines; if False, return as a single multiline string
:return: a list of packages specifications
:rtype: list[string] or string
"""
x = self.settings.get("specPackageList", "")
return x.split('\n') if as_list else x
def set_required_packages(self, *packages):
"""
Set the list of required packages
:param list[string] packages: a list of packages specifications
"""
self.settings["specPackageList"] = '\n'.join(packages)
def get_required_conda_spec(self, as_list=False):
"""
Get the list of required conda packages, as a single string
:param boolean as_list: if True, return the spec as a list of lines; if False, return as a single multiline string
:return: a list of packages specifications
:rtype: list[string] or string
"""
x = self.settings.get("specCondaEnvironment", "")
return x.split('\n') if as_list else x
def set_required_conda_spec(self, *spec):
"""
Set the list of required conda packages
:param list[string] spec: a list of packages specifications
"""
self.settings["specCondaEnvironment"] = '\n'.join(packages)
class DSSCodeEnvContainerConfsBearer(object):
def get_built_for_all_container_confs(self):
"""
Whether the code env creates an image for each container config
:rtype: boolean
"""
return self.settings.get("allContainerConfs", False)
def get_built_container_confs(self):
"""
Get the list of container configs for which the code env builds an image (if not all)
:return: a list of container configuration names
:rtype: list[string]
"""
return self.settings.get("containerConfs", [])
def set_built_container_confs(self, *configs, **kwargs):
"""
Set the list of container configs for which the code env builds an image
:param boolean all: if True, an image is built for each config
:param list[string] configs: list of configuration names to build images for
"""
all = kwargs.get("all", False)
self.settings['allContainerConfs'] = all
if not all:
self.settings['containerConfs'] = configs
def built_for_all_spark_kubernetes_confs(self):
"""
Whether the code env creates an image for each managed Spark over Kubernetes config
"""
return self.settings.get("allSparkKubernetesConfs", False)
def get_built_spark_kubernetes_confs(self):
"""
Get the list of managed Spark over Kubernetes configs for which the code env builds an image (if not all)
:return: a list of spark configuration names
:rtype: list[string]
"""
return self.settings.get("sparkKubernetesConfs", [])
def set_built_spark_kubernetes_confs(self, *configs, **kwargs):
"""
Set the list of managed Spark over Kubernetes configs for which the code env builds an image
:param boolean all: if True, an image is built for each config
:param list[string] configs: list of configuration names to build images for
"""
all = kwargs.get("all", False)
self.settings['allSparkKubernetesConfs'] = all
if not all:
self.settings['sparkKubernetesConfs'] = configs
class DSSDesignCodeEnvSettings(DSSCodeEnvSettings, DSSCodeEnvPackageListBearer, DSSCodeEnvContainerConfsBearer):
"""
Base settings class for a DSS code env on a design node.
.. important::
Do not instantiate directly, use :meth:`DSSCodeEnv.get_settings` instead.
Use :meth:`save` to save your changes
"""
def __init__(self, codeenv, settings):
super(DSSDesignCodeEnvSettings, self).__init__(codeenv, settings)
def get_raw(self):
"""
Get the raw code env settings
The structure depends on the type of code env. Notable fields are:
* **envLang** and **envName** : identifiers of the code env
* **desc** : definition of the code env, persisted on disk
* **deploymentMode** : type of code env. Possible values: DSS_INTERNAL, DESIGN_MANAGED, DESIGN_NON_MANAGED, PLUGIN_MANAGED, PLUGIN_NON_MANAGED, EXTERNAL_CONDA_NAMED
* **conda** : if True, the code env is created using Conda. If False, using virtualenv (for Python) or by linking to the system R (for R)
* **externalCondaEnvName** : for EXTERNAL_CONDA_NAMED code envs, the name of the associated conda env
* **envSettings** : settings for the building of the code env
* **inheritGlobalSettings** : if True, values come from the instance general settings
* **condaInstallExtraOptions** : extra command line options to pass to `conda install`
* **condaCreateExtraOptions** : extra command line options to pass to `conda create`
* **pipInstallExtraOptions** : extra command line options to pass to `pip install`
* **virtualenvCreateExtraOptions** : extra command line options to pass to `virtualenv`
* **cranMirrorURL** : URL of CRAN mirror to use to pull package
* **allContainerConfs** : if True, build container images for all container configs on code env updates. If False, build images only for configs in **containerConfs**
* **containerConfs** : list of container config names
* **allSparkKubernetesConfs** : if True, build container images for all spark configs on code env updates. If False, build images only for configs in **sparkKubernetesConfs**
* **sparkKubernetesConfs** : list of spark config names
* **rebuildDependentCodeStudioTemplates** : which code studio templates to rebuild on code env updates. Possible values are ASK (open modal to ask for user input), ALL, NONE
* **owner** : login of the owner of the code env
* **usableByAll** : if True, all users can use the code env. If false, **permissions** apply
* **permissions** : list of permissions items. Each item has a group name and booleans for each permission
* **yarnPythonBin** or **yarnRBin** : path to Python (resp. R) on the cluster nodes, for use in Spark jobs running on Yarn
* **pythonInterpreter** : type of Python used. Possible values: PYTHON27, PYTHON34, PYTHON35, PYTHON36, PYTHON37, PYTHON38, PYTHON39, PYTHON310, PYTHON311, CUSTOM
* **customInterpreter** : if **pythonInterpreter** is "CUSTOM", the path to the Python binary
* **corePackagesSet** : which set of core packages to instal in the code env. Possible values: LEGACY_PANDAS023, PANDAS10, PANDAS11, PANDAS12, PANDAS13
* **installJupyterSupport** : if True, the packages necessary for using the code env in notebooks are installed
* **dockerImageResources** : behavior w.r.t. code env resources. Possible values: INIT (run initialization script), COPY (copy resources), NONE
* several fields from **desc** are also copied to the top-level, notably **deploymentMode** and the fields around permission handling.
* **canUpdateCodeEnv** and **canManageUsersCodeEnv** : (read-only) indicate whether the current user can update the code env or manage its permissions
* **resourcesInitScript** : for Python code env, the contents resource script
* **info** : (read-only) for Python code env, a dict with a **pythonVersion** field
* **specPackageList** and **specCondaEnvironment** : list of packages requested by the user, as strings
* **actualPackageList** and **actualCondaEnvironment** : (read-only) actual packages in the code env, as strings
* **mandatoryPackageList** and **mandatoryCondaEnvironment** : (read-only) base packages added automatically by DSS on update, as strings
:return: code env settings
:rtype: dict
"""
return self.settings
class DSSAutomationCodeEnvSettings(DSSCodeEnvSettings, DSSCodeEnvContainerConfsBearer):
"""
Base settings class for a DSS code env on an automation node.
.. important::
Do not instantiate directly, use :meth:`DSSCodeEnv.get_settings` instead.
Use :meth:`save` to save your changes
"""
def __init__(self, codeenv, settings):
super(DSSAutomationCodeEnvSettings, self).__init__(codeenv, settings)
def get_raw(self):
"""
Get the raw code env settings
The structure depends on the type of code env. Notable fields are:
* **envLang** and **envName** : identifiers of the code env
* **deploymentMode** : type of code env. Possible values: DSS_INTERNAL, PLUGIN_MANAGED, PLUGIN_NON_MANAGED, AUTOMATION_VERSIONED, AUTOMATION_SINGLE, AUTOMATION_NON_MANAGED_PATH, EXTERNAL_CONDA_NAMED
* **allContainerConfs** : if True, build container images for all container configs on code env updates. If False, build images only for configs in **containerConfs**
* **containerConfs** : list of container config names
* **allSparkKubernetesConfs** : if True, build container images for all spark configs on code env updates. If False, build images only for configs in **sparkKubernetesConfs**
* **sparkKubernetesConfs** : list of spark config names
* **rebuildDependentCodeStudioTemplates** : which code studio templates to rebuild on code env updates. Possible values are ASK (open modal to ask for user input), ALL, NONE
* **owner** : login of the owner of the code env
* **usableByAll** : if True, all users can use the code env. If false, **permissions** apply
* **permissions** : list of permissions items. Each item has a group name and booleans for each permission
* **envSettings** : settings for the building of the code env
* **overrideImportedEnvSettings** : if False, values come from the instance general settings
* **condaInstallExtraOptions** : extra command line options to pass to `conda install`
* **condaCreateExtraOptions** : extra command line options to pass to `conda create`
* **pipInstallExtraOptions** : extra command line options to pass to `pip install`
* **virtualenvCreateExtraOptions** : extra command line options to pass to `virtualenv`
* **cranMirrorURL** : URL of CRAN mirror to use to pull package
* **canUpdateCodeEnv** and **canManageUsersCodeEnv** : (read-only) indicate whether the current user can update the code env or manage its permissions
* **currentVersion** : when **deploymentMode** is "AUTOMATION_SINGLE", the single version. Use :meth:`get_version()` to access
* **versions** : when **deploymentMode** is "AUTOMATION_VERSIONED", a list of code env versions. Use :meth:`get_version()` to access
* **noVersion** : when **deploymentMode** is neither "AUTOMATION_SINGLE" nor "AUTOMATION_VERSIONED", the spec of the code env. Use :meth:`get_version()` to access
:return: code env settings
:rtype: dict
"""
return self.settings
def get_version(self, version_id=None):
"""
Get a specific code env version (for versioned envs) or the single version
:param string version_id: for versioned code env, identifier of the desired version
:return: the settings of a code env version
:rtype: :class:`DSSAutomationCodeEnvVersionSettings`
"""
deployment_mode = self.settings.get("deploymentMode", None)
if deployment_mode in ['AUTOMATION_SINGLE']:
return DSSAutomationCodeEnvVersionSettings(self.codeenv, self.settings.get('currentVersion', {}))
elif deployment_mode in ['AUTOMATION_VERSIONED']:
versions = self.settings.get("versions", [])
version_ids = [v.get('versionId') for v in versions]
if version_id is None:
raise Exception("A version id is required in a versioned code env. Existing ids: %s" % ', '.join(version_ids))
for version in versions:
if version_id == version.get("versionId"):
return DSSAutomationCodeEnvVersionSettings(self.codeenv, version)
raise Exception("Version %s not found in : %s" % (version_id, ', '.join(version_ids)))
elif deployment_mode in ['PLUGIN_NON_MANAGED', 'PLUGIN_MANAGED', 'AUTOMATION_NON_MANAGED_PATH', 'EXTERNAL_CONDA_NAMED']:
return DSSAutomationCodeEnvVersionSettings(self.codeenv, self.settings.get('noVersion', {}))
else:
raise Exception("Unexpected deployment mode %s for an automation node code env. Alter the settings directly with get_raw()", deployment_mode)
class DSSAutomationCodeEnvVersionSettings(DSSCodeEnvPackageListBearer):
"""
Base settings class for a DSS code env version on an automation node.
.. important::
Do not instantiate directly, use :meth:`DSSAutomationCodeEnvSettings.get_version` instead.
Use :meth:`save` on the :class:`DSSAutomationCodeEnvSettings` to save your changes
"""
def __init__(self, codeenv_settings, version_settings):
self.codeenv_settings = codeenv_settings
self.settings = version_settings
def get_raw(self):
"""
Get the raw code env version settings
The structure depends on the type of code env. Notable fields are:
* **versionId** : identifier of the code env version
* **path** : (read-only) location of the version on disk
* **desc** : definition of the code env, persisted on disk
* **versionId** : type of code env. Possible values: DSS_INTERNAL, DESIGN_MANAGED, DESIGN_NON_MANAGED, PLUGIN_MANAGED, PLUGIN_NON_MANAGED, EXTERNAL_CONDA_NAMED
* **conda** : if True, the code env is created using Conda. If False, using virtualenv (for Python) or by linking to the system R (for R)
* **externalCondaEnvName** : for EXTERNAL_CONDA_NAMED code envs, the name of the associated conda env
* **yarnPythonBin** or **yarnRBin** : path to Python (resp. R) on the cluster nodes, for use in Spark jobs running on Yarn
* **pythonInterpreter** : type of Python used. Possible values: PYTHON27, PYTHON34, PYTHON35, PYTHON36, PYTHON37, PYTHON38, PYTHON39, PYTHON310, PYTHON311, CUSTOM
* **customInterpreter** : if **pythonInterpreter** is "CUSTOM", the path to the Python binary
* **corePackagesSet** : which set of core packages to instal in the code env. Possible values: LEGACY_PANDAS023, PANDAS10, PANDAS11, PANDAS12, PANDAS13
* **installJupyterSupport** : if True, the packages necessary for using the code env in notebooks are installed
* **dockerImageResources** : behavior w.r.t. code env resources. Possible values: INIT (run initialization script), COPY (copy resources), NONE
* **resourcesInitScript** : for Python code env, the contents resource script
* **info** : (read-only) for Python code env, a dict with a **pythonVersion** field
* **specPackageList** and **specCondaEnvironment** : list of packages requested by the user, as strings
* **actualPackageList** and **actualCondaEnvironment** : (read-only) actual packages in the code env, as strings
* **mandatoryPackageList** and **mandatoryCondaEnvironment** : (read-only) base packages added automatically by DSS on update, as strings
:return: code env settings
:rtype: dict
"""
return self.settings
class DSSGlobalApiKey(object):
"""
A global API key on the DSS instance
"""
def __init__(self, client, key):
self.client = client
self.key = key
########################################################
# Key deletion
########################################################
def delete(self):
"""
Delete the api key
.. note::
This call requires an API key with admin rights
"""
return self.client._perform_empty(
"DELETE", "/admin/globalAPIKeys/%s" % self.key)
########################################################
# Key description
########################################################
def get_definition(self):
"""
Get the API key's definition
.. note::
This call requires an API key with admin rights
:return: the API key definition. Top-level fields are:
* **id** : identifier of the key
* **key** : value of the key
* **label** : label of the key
* **description** : longer description of the key
* **createdOn** : timestamp of creation, in milliseconds
* **createdBy** : login of user who created the key
* **dssUserForImpersonation** : login of user that the key impersonates
* **adminProperties** : dict of properties set by administrators
* **userProperties** : dict of properties set by users with access to the key
* **globalPermissions** : dict of instance-wide permissions (each field is a boolean for a permission)
* **execSQLLike** : whether the key can run SQL queries
* **projectFolders** : dict of project folder identifier to dict of permissions on that project folder
* **projects** : dict of project key to dict of permissions on that project
* **codeEnvs** : dict of code env name to dict of permissions on that code env
* **clusters** : dict of cluster name to dict of permissions on that cluster
* **codeStudioTemplates** : dict of code studio template identifier to dict of permissions on that code studio template
* **plugins** : dict of plugin identifier to dict of permissions on that plugin
* **pluginPresets** : dict of preset identifier to dict of permissions on that preset
* **pluginParameterSets** : dict of parameter set name to dict of permissions on that parameter set
* **unscopedDatasets** : list of dict giving permissions to specific datasets
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/globalAPIKeys/%s" % (self.key))
def set_definition(self, definition):
"""
Set the API key's definition.
.. note::
This call requires an API key with admin rights
.. important::
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
Usage example
.. code-block:: python
# make an API key able to create projects
key = client.get_global_api_key('my_api_key_secret')
definition = key.get_definition()
definition["globalPermissions"]["mayCreateProjects"] = True
key.set_definition(definition)
:param dict definition: the definition for the API key
"""
return self.client._perform_empty(
"PUT", "/admin/globalAPIKeys/%s" % self.key,
body = definition)
class DSSGlobalApiKeyListItem(dict):
"""
An item in a list of personal API key.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.list_global_api_keys` instead.
"""
def __init__(self, client, data):
super(DSSGlobalApiKeyListItem, self).__init__(data)
self.client = client
def to_global_api_key(self):
"""
Gets a handle corresponding to this item
:rtype: :class:`DSSGlobalApiKey`
"""
return DSSGlobalApiKey(self.client, self["key"])
@property
def id(self):
"""
Get the identifier of the API key
:rtype: string
"""
return self["id"]
@property
def user_for_impersonation(self):
"""
Get the user associated to the API key
:rtype: string
"""
return self.get("dssUserForImpersonation")
@property
def key(self):
"""
Get the API key
:rtype: string
"""
return self["key"]
@property
def label(self):
"""
Get the label of the API key
:rtype: string
"""
return self["label"]
@property
def description(self):
"""
Get the description of the API key
:rtype: string
"""
return self.get("description")
@property
def created_on(self):
"""
Get the timestamp of when the API key was created
:rtype: :class:`datetime.datetime`
"""
timestamp = self["createdOn"]
return datetime.fromtimestamp(timestamp / 1000) if timestamp > 0 else None
@property
def created_by(self):
"""
Get the login of the user who created the API key
:rtype: string
"""
return self["createdBy"]
class DSSPersonalApiKey(object):
"""
A personal API key on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_personal_api_key` instead.
"""
def __init__(self, client, id_):
self.client = client
self.id_ = id_
########################################################
# Key description
########################################################
def get_definition(self):
"""
Get the API key's definition
:return: the personal API key definition. Top level fields are:
* **id** : identifier of the key
* **key** : value of the key
* **user** : login of the user that this key acts on behalf of
* **label** : label of the key
* **description** : longer description of the key
* **createdOn** : timestamp of creation, in milliseconds
* **createdBy** : login of the user who create the key
:rtype: dict
"""
return self.client._perform_json(
"GET", "/personal-api-keys/%s" % (self.id_))
########################################################
# Key deletion
########################################################
def delete(self):
"""
Delete the API key
"""
return self.client._perform_empty(
"DELETE", "/personal-api-keys/%s" % self.id_)
class DSSPersonalApiKeyListItem(dict):
"""
An item in a list of personal API key.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.list_personal_api_keys` or :meth:`dataikuapi.DSSClient.list_all_personal_api_keys` instead.
"""
def __init__(self, client, data):
super(DSSPersonalApiKeyListItem, self).__init__(data)
self.client = client
def to_personal_api_key(self):
"""
Gets a handle corresponding to this item
:rtype: :class:`DSSPersonalApiKey`
"""
return DSSPersonalApiKey(self.client, self["id"])
@property
def id(self):
"""
Get the identifier of the API key
:rtype: string
"""
return self["id"]
@property
def user(self):
"""
Get the user associated to the API key
:rtype: string
"""
return self["user"]
@property
def key(self):
"""
Get the API key
:rtype: string
"""
return self["key"]
@property
def label(self):
"""
Get the label of the API key
:rtype: string
"""
return self["label"]
@property
def description(self):
"""
Get the description of the API key
:rtype: string
"""
return self["description"]
@property
def created_on(self):
"""
Get the timestamp of when the API key was created
:rtype: :class:`datetime.datetime`
"""
timestamp = self["createdOn"]
return datetime.fromtimestamp(timestamp / 1000) if timestamp > 0 else None
@property
def created_by(self):
"""
Get the login of the user who created the API key
:rtype: string
"""
return self["createdBy"]
class DSSCluster(object):
"""
A handle to interact with a cluster on the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_cluster` instead.
"""
def __init__(self, client, cluster_id):
self.client = client
self.cluster_id = cluster_id
########################################################
# Cluster deletion
########################################################
def delete(self):
"""
Deletes the cluster.
.. important::
This does not previously stop it.
"""
self.client._perform_empty(
"DELETE", "/admin/clusters/%s" % (self.cluster_id))
########################################################
# Cluster description
########################################################
def get_settings(self):
"""
Get the cluster's settings. This includes opaque data for the cluster if this is
a started managed cluster.
The returned object can be used to save settings.
:return: a :class:`DSSClusterSettings` object to interact with cluster settings
:rtype: :class:`DSSClusterSettings`
"""
settings = self.client._perform_json(
"GET", "/admin/clusters/%s" % (self.cluster_id))
return DSSClusterSettings(self.client, self.cluster_id, settings)
def get_definition(self):
"""
Get the cluster's definition. This includes opaque data for the cluster if this is
a started managed cluster.
.. caution::
Deprecated, use :meth:`get_settings()`
:return: the definition of the cluster. Fields are:
* **id** : unique identifier of the cluster
* **name** : name of the cluster, in the UI
* **architecture** : kind of cluster (either HADOOP or KUBERNETES)
* **origin** : agent who created the cluster (either MANUAL or SCENARIO)
* **type** : type of cluster. Can be "manual" or a plugin cluster element type
* **params** : for clusters from plugin components, the settings shown in the cluster's form.
* **state** : (read-only) current state of the cluster. Possible values are NONE, STARTING, RUNNING, STOPPING
* **data** : when in **state** "RUNNING", a dict of data for use by the cluster's plugin component. Contents depend on each cluster type.
* **owner**, **usableByAll** and **permissions** : definition of permissions on cluster
* **canUpdateCluster** : (read-only) whether the user can update the cluster's settings or state
* **canManageUsersCluster** : (read-only) whether the user can manage the cluster's permissions
* **XXXXSettings** : dict of settings (resp. override mask) for XXXX in Hadoop, Hive, Impala, Spark and Container. These settings apply on top of the corresponding settings in the instance's general settings
:rtype: dict
"""
return self.client._perform_json(
"GET", "/admin/clusters/%s" % (self.cluster_id))
def set_definition(self, cluster):
"""
Set the cluster's definition. The definition should come from a call to the get_definition()
method.
.. caution::
Deprecated, use :meth:`DSSClusterSettings.save()`
.. important::
You should only :meth:`set_definition` using an object that you obtained through :meth:`get_definition`,
not create a new dict.
:param dict cluster: a cluster definition
:return: the updated cluster definition
:rtype: dict
"""
return self.client._perform_json(
"PUT", "/admin/clusters/%s" % (self.cluster_id), body=cluster)
def get_status(self):
"""
Get the cluster's status and usage
:return: The cluster status, as a :class:`DSSClusterStatus` object
:rtype: :class:`DSSClusterStatus`
"""
status = self.client._perform_json("GET", "/admin/clusters/%s/status" % (self.cluster_id))
return DSSClusterStatus(self.client, self.cluster_id, status)
########################################################
# Cluster actions
########################################################
def start(self):
"""
Starts or attaches the cluster
.. caution::
This operation is only valid for a managed cluster.
"""
resp = self.client._perform_json(
"POST", "/admin/clusters/%s/actions/start" % (self.cluster_id))
if resp is None:
raise Exception('Cluster operation returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Cluster operation failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def stop(self, terminate=True, force_stop=False):
"""
Stops or detaches the cluster
This operation is only valid for a managed cluster.
:param boolean terminate: whether to delete the cluster after stopping it
:param boolean force_stop: whether to try to force stop the cluster, useful if DSS expects
the cluster to already be stopped
"""
resp = self.client._perform_json(
"POST", "/admin/clusters/%s/actions/stop" % (self.cluster_id),
params={'terminate': terminate, 'forceStop': force_stop})
if resp is None:
raise Exception('Env update returned no data')
if resp.get('messages', {}).get('error', False):
raise Exception('Cluster operation failed : %s' % (json.dumps(resp.get('messages', {}).get('messages', {}))))
return resp
def run_kubectl(self, args):
"""
Runs an arbitrary kubectl command on the cluster.
.. caution::
This operation is only valid for a Kubernetes cluster.
.. note::
This call requires an API key with DSS instance admin rights
:param string args: the arguments to pass to kubectl (without the "kubectl")
:return: a dict containing the return value, standard output, and standard error of the command
:rtype: dict
"""
return self.client._perform_json(
"POST", "/admin/clusters/%s/k8s/actions/run-kubectl" % self.cluster_id,
body={'args': args})
def delete_finished_jobs(self, delete_failed=False, namespace=None, label_filter=None, dry_run=False):
"""
Runs a kubectl command to delete finished jobs.
.. caution::
This operation is only valid for a Kubernetes cluster.
:param boolean delete_failed: if True, delete both completed and failed jobs, otherwise only delete completed jobs
:param string namespace: the namespace in which to delete the jobs, if None, uses the namespace set in kubectl's current context
:param string label_filter: delete only jobs matching a label filter
:param boolean dry_run: if True, execute the command as a "dry run"
:return: a dict containing whether the deletion succeeded, a list of deleted job names, and
debug info for the underlying kubectl command
:rtype: dict
"""
return self.client._perform_json(
"POST", "/admin/clusters/%s/k8s/jobs/actions/delete-finished" % self.cluster_id,
params={'deleteFailed': delete_failed, 'namespace': namespace, 'labelFilter': label_filter, 'dryRun': dry_run})
def delete_finished_pods(self, namespace=None, label_filter=None, dry_run=False):
"""
Runs a kubectl command to delete finished (succeeded and failed) pods.
.. caution::
This operation is only valid for a Kubernetes cluster.
:param string namespace: the namespace in which to delete the pods, if None, uses the namespace set in kubectl's current context
:param string label_filter: delete only pods matching a label filter
:param boolean dry_run: if True, execute the command as a "dry run"
:return: a dict containing whether the deletion succeeded, a list of deleted pod names, and
debug info for the underlying kubectl command
:rtype: dict
"""
return self.client._perform_json(
"POST", "/admin/clusters/%s/k8s/pods/actions/delete-finished" % self.cluster_id,
params={'namespace': namespace, 'labelFilter': label_filter, 'dryRun': dry_run})
def delete_all_pods(self, namespace=None, label_filter=None, dry_run=False):
"""
Runs a kubectl command to delete all pods.
.. caution::
This operation is only valid for a Kubernetes cluster.
:param string namespace: the namespace in which to delete the pods, if None, uses the namespace set in kubectl's current context
:param string label_filter: delete only pods matching a label filter
:param boolean dry_run: if True, execute the command as a "dry run"
:return: a dict containing whether the deletion succeeded, a list of deleted pod names, and
debug info for the underlying kubectl command
:rtype: dict
"""
return self.client._perform_json(
"POST", "/admin/clusters/%s/k8s/pods/actions/delete-all" % self.cluster_id,
params={'namespace': namespace, 'labelFilter': label_filter, 'dryRun': dry_run})
class DSSClusterSettings(object):
"""
The settings of a cluster.
.. important::
Do not instantiate directly, use :meth:`DSSCluster.get_settings` instead.
"""
def __init__(self, client, cluster_id, settings):
self.client = client
self.cluster_id = cluster_id
self.settings = settings
def get_raw(self):
"""
Gets all settings as a raw dictionary.
Changes made to the returned object will be reflected when saving.
Fields that can be updated:
* **permissions**, **usableByAll**, **owner**
* **params**
:return: reference to the raw settings, not a copy. See :meth:`DSSCluster.get_definition()`
:rtype: dict
"""
return self.settings
def get_plugin_data(self):
"""
Get the opaque data returned by the cluster's start.
.. caution::
You should generally not modify this
:return: the data stored by the plugin in the cluster, None if the cluster is not created by a plugin
:rtype: dict
"""
return self.settings.get("data", None)
def save(self):
"""
Saves back the settings to the cluster
"""
return self.client._perform_json(
"PUT", "/admin/clusters/%s" % (self.cluster_id), body=self.settings)
class DSSClusterStatus(object):
"""
The status of a cluster.
.. important::
Do not instantiate directly, use :meth:`DSSCluster.get_status` instead.
"""
def __init__(self, client, cluster_id, status):
self.client = client
self.cluster_id = cluster_id
self.status = status
def get_raw(self):
"""
Gets the whole status as a raw dictionary.
:return: status information, with fields:
* **state** : current state of the cluster. Possible values are NONE, STARTING, RUNNING, STOPPING
* **clusterType** : type of cluster. Can be "manual" or a plugin cluster element type
* **usages** : list of usages of the cluster. Each usage is a dict with either **projectKey** (when cluster is set in the project's settings), or **scenarioId**, **scenarioProjectKey** and **scenarioRunId** (when the cluster is created and used by a scenario run)
* **otherProjectUsagesCount** : number of projects that use the cluster but that the user cannot access (these projects are not in **usages**)
* **otherScenarioUsagesCount** : number of scenarios that use the cluster but that the user cannot access (these scenarios are not in **usages**)
* **error** : if the cluster start failed, a dict with error information
:rtype: dict
"""
return self.status
class DSSInstanceVariables(dict):
"""
Dict containing the instance variables.
The variables can be modified directly in the dict and persisted using its :meth:`save` method.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_global_variables` instead.
"""
def __init__(self, client, variables):
super(dict, self).__init__()
self.update(variables)
self.client = client
def save(self):
"""
Save the changes made to the instance variables.
.. note::
This call requires an API key with admin rights.
"""
return self.client._perform_empty("PUT", "/admin/variables/", body=self)
class DSSGlobalUsageSummary(object):
"""
The summary of the usage of the DSS instance.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_global_usage_summary` instead.
"""
def __init__(self, data):
self.data = data
@property
def raw(self):
"""
Get the usage summary structure
The summary report has top-level fields per object type, like **projectSummaries** or **datasets**, each
containing counts, usually a **all** global count, then several **XXXXByType** dict with counts by object
sub-type (for example, for datasets the sub-type would be the type of the connection they're using)
:rtype: dict
"""
return self.data
@property
def projects_count(self):
"""
Get the number of projects on the instance
:rtype: int
"""
return self.data["projects"]
@property
def total_datasets_count(self):
"""
Get the number of datasets on the instance
:rtype: int
"""
return self.data["datasets"]["all"]
@property
def total_recipes_count(self):
"""
Get the number of recipes on the instance
:rtype: int
"""
return self.data["recipes"]["all"]
@property
def total_jupyter_notebooks_count(self):
"""
Get the number of code nobteooks on the instance
:rtype: int
"""
return self.data["notebooks"]["nbJupyterNotebooks"]
@property
def total_sql_notebooks_count(self):
"""
Get the number of sql notebooks on the instance
:rtype: int
"""
return self.data["notebooks"]["nbSqlNotebooks"]
@property
def total_scenarios_count(self):
"""
Get the number of scenarios on the instance
:rtype: int
"""
return self.data["scenarios"]["all"]
@property
def total_active_with_trigger_scenarios_count(self):
"""
Get the number of active scenarios on the instance
:rtype: int
"""
return self.data["scenarios"]["activeWithTriggers"]
class DSSCodeStudioTemplateListItem(object):
"""
An item in a list of code studio templates.
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.list_code_studio_templates`
"""
def __init__(self, client, data):
self.client = client
self._data = data
def to_code_studio_template(self):
"""
Get the handle corresponding to this code studio template
:rtype: :class:`DSSCodeStudioTemplate`
"""
return DSSCodeStudioTemplate(self.client, self._data["id"])
@property
def label(self):
"""
Get the label of the template
:rtype: string
"""
return self._data["label"]
@property
def id(self):
"""
Get the identifier of the template
:rtype: string
"""
return self._data["id"]
@property
def build_for_configs(self):
"""
Get the list of container configurations this template is built for
:return: a list of configuration name
:rtype: list[string]
"""
return self._data.get("buildFor", [])
@property
def last_built(self):
"""
Get the timestamp of the last build of the template
:return: a timestamp, or None if the template was never built
:rtype: :class:`datetime.datetime`
"""
ts = self._data.get("lastBuilt", 0)
if ts > 0:
return datetime.fromtimestamp(ts / 1000)
else:
return None
class DSSCodeStudioTemplate(object):
"""
A handle to interact with a code studio template on the DSS instance
.. important::
Do not instantiate directly, use :meth:`dataikuapi.DSSClient.get_code_studio_template`.
"""
def __init__(self, client, template_id):
self.client = client
self.template_id = template_id
########################################################
# Template description
########################################################
def get_settings(self):
"""
Get the template's settings.
:return: a :class:`DSSCodeStudioTemplateSettings` object to interact with code studio template settings
:rtype: :class:`DSSCodeStudioTemplateSettings`
"""
settings = self.client._perform_json("GET", "/admin/code-studios/%s" % (self.template_id))
return DSSCodeStudioTemplateSettings(self.client, self.template_id, settings)
########################################################
# Building
########################################################
def build(self):
"""
Build or rebuild the template.
.. note::
This call needs an API key which has an user to impersonate set, or a personal API key.
:return: a handle to the task of building the image
:rtype: :class:`~dataikuapi.dss.future.DSSFuture`
"""
future_response = self.client._perform_json("POST", "/admin/code-studios/%s/build" % (self.template_id))
return DSSFuture(self.client, future_response.get('jobId', None), future_response)
class DSSCodeStudioTemplateSettings(object):
"""
The settings of a code studio template
.. important::
Do not instantiate directly, use :meth:`DSSCodeStudioTemplate.get_settings`
"""
def __init__(self, client, template_id, settings):
self.client = client
self.template_id = template_id
self.settings = settings
def get_raw(self):
"""
Gets all settings as a raw dictionary.
:return: a reference to the raw settings, not a copy. Keys are
* **id** : unique identifier of the template
* **type** : type of the template. Builtin values are 'manual' and 'block_based'; more types can be added via plugin components
* **label** : label of the template in the UI
* **icon** : icon to use for code studios on this template
* **tags** : list of tags (strings)
* **isEditor** : whether the template defines a code studio that can be used to edit objects in DSS
* **owner**, **defaultPermission** and **permissions** : definition of the permissions on the template
* **defaultContainerConf** : container config to use on code studios created on this template
* **allowContainerConfOverride** : if True, the container config of code studios on this template can be overriden at the project level
* **allContainerConfs** : if True, build the container images for all configs of the instance
* **containerConfs** : if **allContainerConfs** is False, a list of container config names to build images for
* **params** : definition of the contents of the template. Depends on the **type**
:rtype: dict
"""
return self.settings
def get_built_for_all_container_confs(self):
"""
Whether the template an image for each container config
:rtype: boolean
"""
return self.settings.get("allContainerConfs", False)
def get_built_container_confs(self):
"""
Get the list of container configs for which the template builds an image (if not all)
:return: a list of container configuration names
:rtype: list[string]
"""
return self.settings.get("containerConfs", [])
def set_built_container_confs(self, *configs, **kwargs):
"""
Set the list of container configs for which the template builds an image
:param boolean all: if True, an image is built for each config
:param list[string] configs: list of configuration names to build images for
"""
all = kwargs.get("all", False)
self.settings['allContainerConfs'] = all
if not all:
self.settings['containerConfs'] = configs
def save(self):
"""
Saves the settings of the code studio template
"""
self.client._perform_empty("PUT", "/admin/code-studios/%s" % (self.template_id), body=self.settings)
|
# Generated by Django 3.1.13 on 2022-03-24 19:37
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('democracylab', '0009_auto_20210302_2036'),
('taggit', '0003_taggeditem_add_unique_index'),
('civictechprojects', '0056_project_event_created_from'),
]
operations = [
migrations.CreateModel(
name='RSVPVolunteerRelation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('deleted', models.BooleanField(default=False)),
('application_text', models.CharField(blank=True, max_length=10000)),
('rsvp_date', models.DateTimeField(default=django.utils.timezone.now)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvp_volunteers', to='civictechprojects.event')),
('event_project', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='rsvp_volunteers', to='civictechprojects.eventproject')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TaggedRSVPVolunteerRole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='civictechprojects.rsvpvolunteerrelation')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='civictechprojects_taggedrsvpvolunteerrole_items', to='taggit.tag')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='rsvpvolunteerrelation',
name='role',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='rsvp_roles', through='civictechprojects.TaggedRSVPVolunteerRole', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='rsvpvolunteerrelation',
name='volunteer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='rsvp_events', to='democracylab.contributor'),
),
]
|
#-*- coding:utf-8; mode:python; indent-tabs-mode: nil; c-basic-offset: 2; tab-width: 2 -*-
import copy, os, os.path as path
from collections import namedtuple
from bes.system.check import check
from bes.fs.file_check import file_check
from bes.fs.file_util import file_util
from .config_data import config_data
class config_file(namedtuple('config_file', 'root_dir, filename, data')):
def __new__(clazz, filename):
filename = path.abspath(filename)
check.check_string(filename)
file_check.check_file(filename)
content = file_util.read(filename, codec = 'utf-8')
root_dir = path.normpath(path.join(path.dirname(filename), '..'))
data = config_data.parse(content, filename = filename)
return clazz.__bases__[0].__new__(clazz, root_dir, filename, data)
def substitute(self, variables):
variables = copy.deepcopy(variables)
variables['root_dir'] = self.root_dir
return self.__class__.__bases__[0].__new__(self.__class__,
self.root_dir,
self.filename,
self.data.substitute(variables))
@property
def nice_filename(self):
home = path.expanduser('~/')
return self.filename.replace(home, '~/')
|
"""
HTML inlines
Usage: [Text content: will be Markdown formatted]{ attributes to use }
Examples:
[A normal span tag with a class]{ .warning }
[A mark tag]{ /mark }
[A del tag]{ /del }
"""
import markdown
from markdown.util import etree
import re
from tag_attribute_parser import parse_attribute_string, attributes_to_element
LINE_RE = r'\[(?P<content>.*?)\](?P<attributes>\{.*?\})'
class HtmlInlineExtension(markdown.Extension):
def extendMarkdown(self, md):
md.registerExtension(self)
# 21 > 20 ('html_block') to make sure we ignore block parsing
md.inlinePatterns.register(HtmlInlineProcessor(LINE_RE), 'html_attribute_inline', 21)
class HtmlInlineProcessor(markdown.inlinepatterns.InlineProcessor):
def handleMatch(self, m, data):
parsed = parse_attribute_string(m.group('attributes'))
el = attributes_to_element(parsed, default='span') if parsed else etree.Element('span')
el.text = m.group('content')
return el, m.start(0), m.end(0)
def makeExtension(*args, **kwargs):
return HtmlInlineExtension(*args, **kwargs)
|
"""
改良予定
現状は各変数の分布のみだが、相関分析できるようにする
"""
from configparser import ConfigParser
from pathlib import Path
import sys
from typing import List
from math import log2, ceil
import itertools
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.preprocessing import LabelEncoder
import numpy as np
# グローバル
args = list()
path_input_dir = Path()
path_output_dir_root = Path()
input_charset = ''
threshould_hist = 0
corr_mode = 'all'
def get_args():
"""
コマンドライン引数取得
"""
global args
args = sys.argv
if len(args) >= 3:
print('引数が多すぎます。')
sys.exit()
def get_conf():
"""
設定ファイル読込
読み込んだ値を全てグローバル変数に代入
"""
conf = ConfigParser()
conf.read('02_conf_analysis_correlation.ini')
global path_input_dir
global path_output_dir_root
global input_charset
global threshould_hist
if len(args) == 2:
path_input_dir = Path(args[1])
else:
# get_args()で引数チェックしているため、引数なしの場合のみ
path_input_dir = Path(conf['file']['input_dir'])
path_output_dir_root = Path(conf['file']['output_dir'])
input_charset = conf['file']['input_charset']
threshould_hist = int(conf['graph']['threshould_hist'])
corr_mode = conf['graph']['corr_mode']
print(f'入力フォルダ: {path_input_dir}')
print(f'出力フォルダ: {path_output_dir_root}')
print(f'入力ファイルの文字コード: {input_charset}')
print(f'相関係数算出モード: {corr_mode}')
def get_inputfilepaths():
"""
入力ファイルリスト取得
Returns:
List[pathlib.Path]: 入力ファイルパスのリスト
"""
list_path_inputfiles = list(path_input_dir.glob('*.csv'))
return list_path_inputfiles
def make_output_dir(list_path_inputfiles):
"""
出力先フォルダ作成
入力CSVファイルと同名のフォルダを作成
Args:
list_path_inputfiles (List[pathlib.Path]): 入力ファイルパスのリスト
Returns:
List[pathlib.Path]: 出力先フォルダパスのリスト
"""
list_path_outputdirs = list()
for inputfilename in list_path_inputfiles:
path_output_dir = path_output_dir_root/inputfilename.name.replace('.csv', '')
path_output_dir.mkdir(parents=True, exist_ok=True)
list_path_outputdirs.append(path_output_dir)
return list_path_outputdirs
def calc_starges(num_samples):
"""
スタージェスの公式により、サンプル数からヒストグラムのビンの数を算出
Args:
num_samples (int): データのサンプル数
Returns:
int: ヒストグラムのビンの数
"""
return int(round(log2(num_samples) + 1, 0))
def set_log_scale(ax, sr_counts):
if sr_counts.max() // sr_counts.min() >= 1000:
ax.set_yscale('log')
ax.set_ylim([sr_counts.min() - (sr_counts.min() / 2), sr_counts.max() + (sr_counts.max() / 2)])
def make_hist(df_data, path_outputdir, num_bin, num_sample):
"""
ヒストグラム作成
全レコード欠損の場合はヒストグラム作成せず
ヒストグラムのビンの数について:
数値データはスタージェスの公式数
カテゴリデータはカテゴリ数
Args:
df_data (pd.DataFrame): 入力データCSV
path_outputdir (pathlib.Path): 出力先フォルダパス
num_bin (int): 数値データヒストグラムビン数
num_sample (int): DataFrameレコード数
"""
num_cols = len(df_data.columns)
num_fig_rows = ceil(num_cols / 4)
fig = plt.figure(figsize=(40,7 * num_fig_rows))
for idx, col in enumerate(df_data.columns):
if df_data[col].isna().sum() == num_sample:
print(f'{col}: ヒストグラム化不可')
continue
print(f'{col}: ヒストグラム化可')
ax = fig.add_subplot(num_fig_rows,4,idx+1)
if (df_data[col].dtype == 'object') or (len(df_data[col].unique()) <= threshould_hist):
sr_counts = df_data[col].value_counts().sort_index()
ax.bar(sr_counts.index.astype(str).fillna('欠損'), sr_counts)
set_log_scale(ax, sr_counts)
else:
sr_feature = df_data[col].dropna()
ax.hist(sr_feature, bins=num_bin)
sr_feature_bins = sr_feature // ((sr_feature.max() - sr_feature.min()) / num_bin)
sr_feature_counts = sr_feature_bins.value_counts()
set_log_scale(ax, sr_feature_counts)
ax.set_title(col, fontsize=19)
ax.set_xlabel('value', fontsize=15)
ax.set_ylabel('count', fontsize=15)
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
plt.tight_layout()
outputfilepath = path_outputdir/(path_outputdir.name + '_histgram.png')
plt.savefig(outputfilepath, dpi=100)
plt.close()
print(f'ヒストグラムファイルパス: {outputfilepath}')
def analysis_distribution(df_data, path_outputdir):
"""
ヒストグラムによるデータ分布作成
Args:
df_data (pd.DataFrame): 入力データCSV
path_outputdir (pathlib.Path): 出力先フォルダパス
"""
num_sample = len(df_data)
num_bin = calc_starges(num_sample)
print(f'ビンの数: {num_bin}')
# ヒストグラム生成
make_hist(df_data, path_outputdir, num_bin, num_sample)
def calc_pearson_corr(sr_subject, df_object_num):
"""
Pearsonの相関係数算出
数値型×数値型の相関係数をPearsonの相関係数で算出
Args:
sr_subject (pd.Series): 主目的特徴量
df_object_num (pd.DataFrame): 相手となるその他の数値型の特徴量
Returns:
pd.DataFrame: Pearsonの相関係数および相関係数タイプ(相関係数)
"""
df_pearson_corr = pd.DataFrame(index=df_object_num.columns, columns=['相関係数', '相関係数タイプ'])
for object_feature in df_object_num.columns:
sr_object_feature = df_object_num[object_feature].dropna()
list_match_index = list(set(sr_subject.index) & set(sr_object_feature.index))
df_pearson_corr.loc[object_feature, '相関係数'] = sr_subject[list_match_index].corr(sr_object_feature[list_match_index])
df_pearson_corr['相関係数タイプ'] = '相関係数'
return df_pearson_corr
def rate_corr(sr_numeric_feature, sr_categorical_feature):
"""
相関比算出
Args:
sr_numeric_feature (pd.Series): 数値型変数配列
sr_categorical_feature (pd.Series): カテゴリ型変数配列
Returns:
float: 相関比
"""
numeric = sr_numeric_feature.name
category = sr_categorical_feature.name
df_calc = pd.concat([sr_numeric_feature, sr_categorical_feature], axis=1)
# クラス毎の平均値
groupby_calc = df_calc.groupby(category)
sr_group_ave = groupby_calc.mean()[numeric]
# 偏差平方和
list_dev = list()
for label in df_calc[category].unique().tolist():
query = df_calc[category] == label
sr_numeric_by_label = df_calc[query][numeric]
list_dev.append(pow((sr_numeric_by_label - sr_group_ave[label]), 2).sum())
# 級内変動
dev_inner_class = sum(list_dev)
# 級間変動
ave_all = sr_numeric_feature.mean()
sr_deviation_inter_class = pow((sr_group_ave - ave_all), 2)
sr_group_count = groupby_calc.count()
dev_inter_class = np.dot(sr_group_count.values.reshape(-1), sr_deviation_inter_class.values.reshape(-1))
return dev_inter_class / (dev_inner_class + dev_inter_class)
def calc_rate_corr(sr_subject_feature, df_object, category_col=None):
"""
相関比の算出
SeriesとDataFrameとどちらがカテゴリ型変数化に応じて適切な引数設定をし、相関比算出処理を実行
Args:
sr_subject_feature (pd.Series): 主目的特徴量
df_object (pd.DataFrame): 相手となる特徴量全て
category_col (str, optional): カテゴリ型変数がsr('left')かdf('right)のどちらか. Defaults to None.
Raises:
ValueError: カテゴリ変数の指定方法誤り
Returns:
pd.DataFrame: 相関比リスト
"""
df_rate_corr = pd.DataFrame(index=df_object.columns, columns=['相関係数', '相関係数タイプ'])
if category_col == 'left':
for object_feature in df_object.columns:
sr_object_feature = df_object[object_feature].dropna()
list_match_index = list(set(sr_subject_feature.index) & set(sr_object_feature.index))
df_rate_corr.loc[object_feature, '相関係数'] = rate_corr(sr_object_feature[list_match_index], sr_subject_feature[list_match_index])
elif category_col == 'right':
for object_feature in df_object.columns:
sr_object_feature = df_object[object_feature].dropna()
list_match_index = list(set(sr_subject_feature.index) & set(sr_object_feature.index))
df_rate_corr.loc[object_feature, '相関係数'] = rate_corr(sr_subject_feature[list_match_index], sr_object_feature[list_match_index])
else:
raise ValueError(f'カテゴリ変数の指定が誤りがあります: category_col={category_col}')
df_rate_corr['相関係数タイプ'] = '相関比'
return df_rate_corr
def cramersV(x, y):
"""
Cramerの連関係数算出
Args:
x (np.ndarray, pd.Series): カテゴリ型の特徴量配列1
y (np.ndarray, pd.Series): カテゴリ型の特徴量配列2
Returns:
float: Cramerの連関係数
"""
table = np.array(pd.crosstab(x, y)).astype(np.float32)
n = table.sum()
colsum = table.sum(axis=0)
rowsum = table.sum(axis=1)
expect = np.outer(rowsum, colsum) / n
chisq = np.sum((table - expect) ** 2 / expect)
return np.sqrt(chisq / (n * (np.min(table.shape) - 1)))
def calc_cramers_corr(sr_subject_feature, df_object_cate):
"""
Cramerの連関係数算出
カテゴリ型×カテゴリ型の特徴量の相関係数をCramerの連関係数で算出
Args:
sr_subject_feature (pd.Series): 主目的特徴量
df_object_cate (pd.DataFrame): 相手となるその他のカテゴリ型の特徴量
Returns:
pd.DataFrame: Cramerの連関係数および相関係数タイプ(連関係数)
"""
df_cramers_corr = pd.DataFrame(index=df_object_cate.columns, columns=['相関係数', '相関係数タイプ'])
for object_feature in df_object_cate.columns:
sr_object_feature = df_object_cate[object_feature].dropna()
list_match_index = list(set(sr_subject_feature.index) & set(sr_object_feature.index))
df_cramers_corr.loc[object_feature, '相関係数'] = cramersV(sr_subject_feature[list_match_index], sr_object_feature[list_match_index])
df_cramers_corr['相関係数タイプ'] = '連関係数'
return df_cramers_corr
def drop_all_null_cols(df_data, num_sample):
"""
計算可能カラム抽出
全サンプル欠損の特徴量以外の特徴量(列名)を抽出
Args:
df_data (pd.DataFrame): CSVファイルデータ
num_sample (float): 全サンプル数
Returns:
pd.Index: 計算可能特徴量リスト
"""
sr_num_null = df_data.isna().sum()
query_not_all_null = sr_num_null != num_sample
effective_features = sr_num_null[query_not_all_null].index
return effective_features
def create_label_encoder(sr_feature):
"""
ラベルエンコーダ作成
Args:
sr_feature (pd.Series): 特徴量データ
Returns:
sklearn.preprocessing.LabelEncoder: 当該特徴量のラベルエンコーダ
"""
le = LabelEncoder().fit(sr_feature)
return le
def make_correlation(df_data, path_outputdir, num_sample):
"""
総当たりで相関係数算出・散布図描画
Args:
df_data (pd.DataFrame): CSVファイルのデータ
path_outputdir (pathlib.Path): 出力先フォルダパス
num_sample (float): 総サンプル数
"""
num_cols = len(df_data.columns)
num_fig_rows = ceil(num_cols / 4)
effective_features = drop_all_null_cols(df_data, num_sample)
# Xに割り当てる特徴量を総当たり
list_sr_correlation = list()
for subject_feature in effective_features:
sr_subject_feature = df_data[subject_feature].dropna()
dtype_subject = sr_subject_feature.dtype
object_features = effective_features.drop(subject_feature)
print('【X軸】')
print(subject_feature, dtype_subject)
# 相関係数算出
# Xに対して、Yの組み合わせを総当たり
sr_dtype_subject = pd.Series(dtype_subject, index=object_features, name='subject')
sr_dtype_object = df_data.drop(subject_feature, axis=1).dtypes
sr_dtype_object.name = 'object'
df_dtype_combination = pd.concat([sr_dtype_subject, sr_dtype_object], axis=1)
# 数値・カテゴリの組み合わせで変数名を相関係数の算出方法でグループ分け
is_numeric_subject = dtype_subject in [int, float]
list_df_corr_subject = list()
if is_numeric_subject:
# 数値×数値
query_num_num = (df_dtype_combination['subject'].apply(lambda x: x in [int, float])) & (df_dtype_combination['object'].apply(lambda x: x in [int, float]))
list_num_num = df_dtype_combination.iloc[query_num_num.values.tolist(), :].index.tolist()
# 数値×カテゴリ
query_num_cate = (df_dtype_combination['subject'].apply(lambda x: x in [int, float])) & ~(df_dtype_combination['object'].apply(lambda x: x in [int, float]))
list_num_cate = df_dtype_combination.iloc[query_num_cate.values.tolist(), :].index.tolist()
df_corr_num_num = calc_pearson_corr(sr_subject_feature, df_data[list_num_num])
df_corr_num_cate = calc_rate_corr(sr_subject_feature, df_data[list_num_cate], category_col='right')
list_df_corr_subject.append(df_corr_num_num)
list_df_corr_subject.append(df_corr_num_cate)
else:
# カテゴリ×数値
query_cate_num = ~(df_dtype_combination['subject'].apply(lambda x: x in [int, float])) & (df_dtype_combination['object'].apply(lambda x: x in [int, float]))
list_cate_num = df_dtype_combination.iloc[query_cate_num.values.tolist(), :].index.tolist()
# カテゴリ×カテゴリ
query_cate_cate = ~(df_dtype_combination['subject'].apply(lambda x: x in [int, float])) & ~(df_dtype_combination['object'].apply(lambda x: x in [int, float]))
list_cate_cate = df_dtype_combination.iloc[query_cate_cate.values.tolist(), :].index.tolist()
df_corr_cate_num = calc_rate_corr(sr_subject_feature, df_data[list_cate_num], category_col='left')
df_corr_cate_cate = calc_cramers_corr(sr_subject_feature, df_data[list_cate_cate])
list_df_corr_subject.append(df_corr_cate_num)
list_df_corr_subject.append(df_corr_cate_cate)
df_corr_subject = pd.concat(list_df_corr_subject, axis=0)
sr_corr_subject = df_corr_subject['相関係数']
sr_corr_subject.name = subject_feature
list_sr_correlation.append(sr_corr_subject)
# 散布図描画
fig = plt.figure(figsize=(40,7 * num_fig_rows))
# 散布図で表示できるようにラベルエンコーディング
if dtype_subject == 'object':
le_subject = create_label_encoder(sr_subject_feature)
sr_subject_feature = pd.Series(le_subject.transform(sr_subject_feature), index=sr_subject_feature.index)
print('【Y軸】')
# Yに割り当てる特徴量をXを除いて総当たり
for idx, object_feature in enumerate(object_features):
sr_subject_feature_tmp = sr_subject_feature.copy()
sr_object_feature = df_data[object_feature].dropna()
dtype_object = sr_object_feature.dtype
print(object_feature, dtype_object)
# どちらの特徴量にも存在するサンプルのみを抽出
list_match_index = list(set(sr_subject_feature_tmp.index) & set(sr_object_feature.index))
if len(list_match_index) == 0:
continue
# 散布図で表示できるようにラベルエンコーディング
if dtype_object == 'object':
le_object = create_label_encoder(sr_object_feature)
sr_object_feature = pd.Series(le_object.transform(sr_object_feature), index=sr_object_feature.index)
# 有効なデータで散布図描画
ax = fig.add_subplot(num_fig_rows,4,idx+1)
ax.scatter(sr_subject_feature_tmp[list_match_index], sr_object_feature[list_match_index])
ax.set_title(f'{subject_feature} vs {object_feature}', fontsize=19)
ax.set_xlabel(subject_feature, fontsize=15)
ax.set_ylabel(object_feature, fontsize=15)
# 軸を文字列に復元(LabelEncoderにもともと存在しないラベル値は削除)
if dtype_subject == 'object':
ar_xticks = np.unique(np.asarray(ax.get_xticks(), dtype=np.int32))
while True:
try:
ar_xticks = ar_xticks[ar_xticks >= 0]
ax.set_xticks(ar_xticks)
ax.set_xticklabels(le_subject.inverse_transform(ar_xticks))
break
except ValueError:
ar_xticks[-1] = ar_xticks[-1] - 1
if dtype_object == 'object':
ar_yticks = np.unique(np.asarray(ax.get_yticks(), dtype=np.int32))
while True:
try:
ar_yticks = ar_yticks[ar_yticks >= 0]
ax.set_yticks(ar_yticks)
ax.set_yticklabels(le_object.inverse_transform(ar_yticks))
break
except ValueError:
ar_yticks[-1] = ar_yticks[-1] - 1
# 3桁以上変化がある場合にlogスケール変換(数値のみ)
if dtype_subject != 'object':
if sr_subject_feature_tmp[list_match_index].max() // sr_subject_feature_tmp[list_match_index].min() >= 1000:
ax.set_xscale('log')
ax.set_xlim([sr_subject_feature_tmp[list_match_index].min() - (sr_subject_feature_tmp[list_match_index].min() / 2), sr_subject_feature_tmp[list_match_index].max() + (sr_subject_feature_tmp[list_match_index].max() / 2)])
if dtype_object != 'object':
if sr_object_feature[list_match_index].max() // sr_object_feature[list_match_index].min() >= 1000:
ax.set_yscale('log')
ax.set_ylim([sr_object_feature[list_match_index].min() - (sr_object_feature[list_match_index].min() / 2), sr_object_feature[list_match_index].max() + (sr_object_feature[list_match_index].max() / 2)])
# 相関係数表示
str_corr_type = df_corr_subject.loc[object_feature, '相関係数タイプ']
str_corr_val = str(df_corr_subject.loc[object_feature, '相関係数'])
str_corr = f'{str_corr_type}: {str_corr_val}'
ax.text(x=ax.get_xlim()[0], y=ax.get_ylim()[0], s=str_corr, fontsize=12)
# 全特徴量について描画終了後
plt.tight_layout()
subject_feature = subject_feature.replace('/', '/')
outputfilepath = path_outputdir/(f'{subject_feature}_scatters_correlations.png')
plt.savefig(outputfilepath, dpi=100)
plt.close()
# 相関係数行列出力
df_correlation = pd.concat(list_sr_correlation, axis=1, sort=False)
path_corr_output = path_outputdir/'correlation.csv'
df_correlation.to_csv(path_corr_output, encoding=input_charset)
def analysis_correlation(df_data, path_outputdir):
num_sample = len(df_data)
# 散布図描画・相関係数算出
make_correlation(df_data, path_outputdir, num_sample)
def exec_analysis(list_path_inputfiles, list_path_outputdirs):
"""
グラフ解析処理
Args:
list_path_inputfiles (List[pathlib.Path]): 入力ファイルパスリスト
list_path_outputdirs (List[pathlib.Path]): 出力フォルダパスリスト
"""
for inputfilepath, outputdirpath in zip(list_path_inputfiles, list_path_outputdirs):
print(f'入力ファイル名: {inputfilepath}')
df_data = pd.read_csv(inputfilepath, encoding=input_charset)
print(f'カラム名: {df_data.columns}')
print(f'レコード数: {len(df_data)}')
print('データ例: ')
print(df_data.head(5))
analysis_distribution(df_data, outputdirpath)
analysis_correlation(df_data, outputdirpath)
if __name__ =='__main__':
get_args()
get_conf()
if not(path_input_dir.exists()):
print(f'次のフォルダは存在しません。終了します。: {path_input_dir.name}')
sys.exit()
list_path_inputfiles = get_inputfilepaths()
if len(list_path_inputfiles) == 0:
print('入力フォルダにCSVファイルが存在しません。終了します。')
sys.exit()
list_path_outputdirs = make_output_dir(list_path_inputfiles)
print(f'出力先フォルダパス: {list_path_outputdirs}')
exec_analysis(list_path_inputfiles, list_path_outputdirs)
# 終了
print('全ての処理が終了しました。')
|
from IPython import display
from matplotlib import pyplot as plt
from mxnet import autograd,nd
import random
# 生成数据集
# 设训练数据集样本数为1000,输入个数(特征数)为2。给定随机生成的批量样本特征
# 使用线性回归模型真实权重w ,偏差b ,一个随机噪声e , 来生成标签y
num_inputs = 2
num_examples = 1000
true_w = [2,-3.4]
true_b = 4.2
features = nd.random.normal(scale=1,shape=(num_examples,num_inputs))
labels = true_w[0] * features[:,0] + true_w[1] * features[:,1] + true_b
labels = labels + nd.random.normal(scale=0.01 , shape=labels.shape)
# features 的每一行是一个长度为2的向量
# lebels 的每一行是一个长度为 1 的向量
# print(features[0],labels[0])
def use_svg_display():
# 矢量图显示
display.set_matplotlib_formats('svg')
def set_figsize(figsize =(3.5,2,5)):
use_svg_display()
plt.rcParams['figure.figsize'] = figsize
# 读取数据
# 在训练模型的时候,需要遍历数据集并不断读取小批量数据样本
# 定义一个函数 每次返回batch_size(批量大小)个随机样本的特征和标签
def data_iter(batch_size,features,labels):
num_examples = len(features)
indices = list(range(num_examples))
random.shuffle(indices)
for i in range(0,num_examples,batch_size):
j = nd.array(indices[i:min(i + batch_size ,num_examples)])
yield features.take(j),labels.take(j)
batch_size = 10
for x,y in data_iter(batch_size,features,labels):
print(x,y)
break
# 初始化模型参数
# 将权重初始化成均值为0 标准差为0.01的正态随机数,偏差初始化为0
w = nd.random.normal(scale=0.01,shape=(num_inputs,1))
b = nd.zeros(shape=(1,))
# 之后模型训练,需要对这些参数求梯度来迭代参数的值, 创建他们的梯度
w.attach_grad()
b.attach_grad()
# 定义模型
# 线性回归的矢量计算表达式实现,使用dot函数做矩阵乘法
def linreg(X,w,b):
return nd.dot(X,w) + b
# 定义损失函数
def squared_loss(y_hat,y):
return (y_hat - y.reshape(y_hat.shape)) ** 2 /2
# 定义优化算法
# sgd函数实现小批量随机梯度下降,通过不断迭代来优化损失函数
def sgd(params,lr,batch_size):
for param in params:
param[:] = param -lr * param.grad /batch_size
# 训练模型
# 训练中 多次迭代模型参数
# 每次迭代根据当前读取的小批量数据样本(特征X,标签y), 通过调用反向函数backward计算小批量随机梯度
# 并调用优化算法sgd迭代模型参数
lr = 0.03
num_epochs = 3
net = linreg
loss = squared_loss
for epoch in range(num_epochs):
for X,y in data_iter(batch_size,features,labels):
with autograd.record():
l = loss() |
# O(n*m)
# n = len(matrix) | m = len(matrix[0])
class Solution:
def maximalRectangle(self, matrix: List[List[str]]) -> int:
onesColumnHeight = [[0 for _ in row] for row in matrix]
for y in range(len(matrix)):
for x in range(len(matrix[y])):
if matrix[y][x] == "1":
onesColumnHeight[y][x] = 1
if y - 1 >= 0:
onesColumnHeight[y][x] += onesColumnHeight[y - 1][x]
res = 0
for y in range(len(matrix)):
res = max(res, self.bestRectangle(onesColumnHeight[y]))
return res
def bestRectangle(self, heights):
res = 0
stack = []
for i in range(len(heights)):
start = i
while len(stack) > 0 and heights[i] < stack[-1][1]:
start, prevHeight = stack.pop()
res = max(res, (i - start) * prevHeight)
stack.append((start, heights[i]))
while len(stack) > 0:
start, prevHeight = stack.pop()
res = max(res, (len(heights) - start) * prevHeight)
return res
|
import json
import argparse
import configparser
""" Compresses all json files into a single one and draws connections between them.
It has two uses:
1) Simply fetches all the
"""
FAILS = 0
ALL_ENTRIES = dict()
## Read path to unlabelled papers and model from config
config = configparser.RawConfigParser()
config.read('config.cfg')
unlabelled_path = config.get('Data', 'unlabelled')
model_path = config.get('Model', 'model')
predictions_path = config.get('Data', 'predictions')
kumu_path = config.get('Data', 'outKumu')
semantics_pattern = config.get('Semantics', 'filePattern')
def fetch_data():
def inner_fetch(name):
sem_dir = config.get('Semantics', 'pathToData')
entries = []
f = open(sem_dir + name, 'r')
for line in f.readlines():
tmp = json.loads(line)
if 'year' in tmp.keys():
entries.append(tmp)
else:
global FAILS
FAILS += 1
return entries
def gen_name(digits, index):
return semantics_pattern + digits + str(index)
all_entries = []
for i in range(10):
all_entries += inner_fetch(gen_name('-00', i))
for i in range(10,100):
all_entries += inner_fetch(gen_name('-0', i))
for i in range(100,181):
all_entries += inner_fetch(gen_name('-', i))
all_entries += inner_fetch('s2-corpus-additions')
return all_entries
complete_entries = fetch_data()
complete_entries = sorted(complete_entries, key=lambda k: k['year'])
for item in complete_entries:
ALL_ENTRIES[item['id']] = item
# abbreviate entries
abbreviated_entries = list()
for paper in complete_entries:
short_authors = [author['name'].split(' ')[-1] for author in paper['authors']]
if len(short_authors) > 2:
abbrv = [short_authors[i][0] for i in range(3)] + list("+")
short_authors = abbrv
complete_authors = [author['name'] for author in paper['authors']]
short_year = str(paper['year'])[2:]
abbreviated_entries.append({
"label": "".join(author for author in short_authors) + short_year,
"abstract": paper['paperAbstract'],
"id": paper['id'],
"venue": paper['venue'],
"s2Url": paper['s2Url'],
"authors": complete_authors,
"title": paper['title'],
"year": paper['year'],
})
abbreviated_entries_by_id = dict()
for paper in abbreviated_entries:
abbreviated_entries_by_id[paper['id']] = paper
def add_corections():
corrections = None
with open("corrections.json", "r") as dataFile:
corrections = json.load(dataFile)
for paper in corrections:
paper_id = paper['id']
for field in paper.keys():
abbreviated_entries_by_id[paper_id][field] = paper[field]
add_corections()
abbreviated_entries = list()
for key in abbreviated_entries_by_id.keys():
paper = abbreviated_entries_by_id[key]
abbreviated_entries.append(paper)
def output_all():
return ({'elements': abbreviated_entries},unlabelled_path)
def output_allplus():
pass
def output_sparse(limit):
# this outputs all papers that have at least limit citation and draws the graph between them
# also makes sense to run this function after identified the coed_papers
coed_papers = None
with open(predictions_path) as json_file:
coed_papers = json.load(json_file)
# get rid of annoying kumu format
coed_papers = [p for p in coed_papers['elements'] if p['pred']]
coed_papers = list(filter(lambda p: len(ALL_ENTRIES[p['id']]['inCitations']) >= limit, coed_papers))
# put every coed paper id into a set to find them easily
coed_paper_ids = set()
for paper in coed_papers:
coed_paper_ids.add(paper['id'])
connections = list()
NUM_CONNECTIONS = 0
NUM_SKIPPED = 0
for paper in coed_papers:
paper_id = paper['id']
paper['numInCitations'] = len(ALL_ENTRIES[paper_id]['inCitations'])
paper.pop('pred', None)
# outcitations are references inside paper
# incitations are papers which cite the paper
out_citations = ALL_ENTRIES[paper_id]['outCitations']
for citation in out_citations:
# only add a connection if it's between two COED papers
if citation in coed_paper_ids:
connections.append({
"id": NUM_CONNECTIONS,
"from": citation,
"to": paper_id,
"direction": "directed"
})
NUM_CONNECTIONS += 1
else:
NUM_SKIPPED += 1
print("Total papers: ", len(coed_papers))
print("Total connections: ", NUM_CONNECTIONS)
print("Total skipped: ", NUM_SKIPPED)
# now compress venues to avoid duplicates such as CCS, CCS '04, etc
venues = list()
f = open('venues.txt', 'r')
for line in f.readlines():
venues.append(json.loads(line))
from difflib import SequenceMatcher
for paper in coed_papers:
best_venue = {'venue': paper['venue']}
best_score = 0
for venue in venues:
for possible_name in venue['names']:
match_score = SequenceMatcher(None, possible_name, paper['venue']).ratio()
if match_score > best_score and match_score > 0.5:
best_score = match_score
best_venue = venue
paper['venue'] = best_venue['venue']
global kumu_path
kumu_path += "outKumu" + str(limit) + ".json"
return (dict({"elements": coed_papers, "connections": connections}), kumu_path)
def main():
parser = argparse.ArgumentParser(description='Process output type')
parser.add_argument('--output', dest='output_type', type=int,
default=0, help='all/all+/sparse')
parser.add_argument('--limit', dest='limit', type=int,
default=0, help='limit citations to')
args = parser.parse_args()
print(args.output_type)
toWrite = None
papers = None
if args.output_type == 0:
papers, toWrite = output_all()
elif args.output_type == 1:
papers = output_allplus()
elif args.output_type == 2:
papers, toWrite = output_sparse(args.limit)
with open(toWrite, "w", encoding='utf-8') as ku:
json.dump(papers, ku, indent=2,ensure_ascii=False)
if __name__=="__main__":
main()
|
# Generated by Django 3.2.3 on 2021-07-01 05:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('inventory', '0021_auto_20210530_2228'),
]
operations = [
migrations.RemoveField(
model_name='rack',
name='capacity',
),
migrations.AddField(
model_name='products',
name='product_height_cm',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
migrations.AddField(
model_name='products',
name='product_length_cm',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
migrations.AddField(
model_name='products',
name='product_weight_g',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
migrations.AddField(
model_name='products',
name='product_width_cm',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
migrations.AddField(
model_name='rack',
name='available_storage_vol',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
migrations.AddField(
model_name='rack',
name='total_storage_vol',
field=models.DecimalField(decimal_places=10, default='0', max_digits=19),
),
]
|
from tkinter import *
import tkinter.font as tkfont
from display import *
from internet import *
from app import new, last
class GUI():
def __init__(self, item):
self.root = Tk()
self.liked = False
self.item = item
self.name = "Windows Spotlight"
self.author = "made with ❤ by Shikher Srivastava"
self.bg = '#000000'
self.fg = '#eeeeee'
self.root.like = PhotoImage(file='icons/heart.png')
self.root.dislike = PhotoImage(file='icons/broken-heart.png')
self.root.author = PhotoImage(file='icons/author.png')
self.root.next = PhotoImage(file='icons/arrow-right.png')
self.root.previous = PhotoImage(file='icons/arrow-left.png')
self.root.outline = PhotoImage(file='icons/camera.png')
self.root.bing = PhotoImage(file='icons/bing.png')
self.root.overrideredirect(True)
# self.root.attributes('-topmost', True)
self.root.wm_attributes("-transparentcolor", "yellow")
# self.root.config(bg=self.bg)
self.root.attributes('-alpha', 0.75)
self.frame1 = Frame((self.root), bg='yellow', width=70)
self.frame2 = Frame((self.root), bg=self.bg)
self.like_buttons = Frame(self.frame2, bg=self.bg)
self.navigation_buttons = Frame(self.frame2, bg=self.bg)
self.link_buttons = Frame(self.frame2, bg=self.bg)
self.title_font = tkfont.Font(family='Helvetica',size=20,weight='bold')
self.copyright_font = tkfont.Font(family='Helvetica',size=8,weight='bold')
self.font = tkfont.Font(family='Helvetica',size=12,weight='bold')
self.title = Label(self.frame2, text=self.name, bg=self.bg, fg=self.fg, font=self.title_font)
# self.shikhersrivastava = Label( self.frame2,text=self.author , bg=self.bg, fg=self.fg, font=self.font)
self.copyright = Label(self.frame2, bg=self.bg, fg=self.fg, font=self.font)
self.copyright = Label(self.frame2, bg=self.bg, fg=self.fg, font=self.copyright_font)
self.outline = Button((self.frame1), image=self.root.outline, bg='#ffff00', fg='black', width=280, cursor='hand2', font=self.title_font, command=lambda: self.like(), borderwidth=0)
self.like_button = Button((self.like_buttons), text="Like", image=self.root.like, bg=self.bg, fg='black', cursor='hand2',font=self.font, command=lambda: self.like())
self.dislike_button = Button((self.like_buttons), text='Dislike', image=self.root.dislike, bg=self.bg, fg='black', cursor='hand2',font=self.font, command=lambda: self.dislike())
self.prev_button = Button((self.navigation_buttons), text='Previous', image=self.root.previous, bg=self.bg, fg='black', cursor='hand2',font=self.font, command=lambda: self.previous())
self.next_button = Button((self.navigation_buttons), text='Next', image=self.root.next, bg=self.bg, fg='black', cursor='hand2',font=self.font, command=lambda: self.next())
self.bing_button = Button((self.link_buttons), text='Bing', image=self.root.bing, bg=self.bg, fg=self.fg, cursor='hand2',font=self.font, command=lambda: self.link(self.item["url"]))
self.shikhersrivastava = Button((self.link_buttons), text=self.author, image=self.root.author, bg=self.bg, fg=self.fg, cursor='hand2', font=self.font, command=lambda: self.link("https://www.shikhersrivastava.com/windows-spotlight"))
self.exit_button = Button((self.frame2), text='EXIT', bg="#000000", fg=self.fg, cursor='hand2',font=self.font, command=lambda: self.exit())
self.outline.pack(side=TOP, fill=BOTH)
# Hover Event
self.frame1.bind('<Enter>', lambda event: self.enter(event))
self.frame2.bind('<Leave>', lambda event: self.leave(event))
self.width, self.height, self.y = get_resolution()
self.title_width = self.title_font.measure(self.name)
self.author_width = self.font.measure(self.author)
self.root.geometry("%dx%d+%d+%d" % (40, 40, (self.width-41), self.y))
self.title.pack(side=TOP, fill=BOTH)
self.copyright.pack(side=TOP, fill=BOTH)
self.like_button.pack(side=LEFT)
self.dislike_button.pack(side=RIGHT)
self.like_buttons.pack(side=TOP, fill=BOTH)
self.navigation_buttons.pack(side=TOP, fill=BOTH)
self.link_buttons.pack(side=TOP, fill=BOTH)
self.bing_button.pack(side=LEFT, fill=BOTH)
self.shikhersrivastava.pack(side=RIGHT, fill=BOTH)
self.exit_button.pack(side=BOTTOM, fill=BOTH)
self.prev_button.pack(side=LEFT, fill=BOTH)
self.next_button.pack(side=RIGHT, fill=BOTH)
self.outline.pack()
self.frame1.pack()
self.root.mainloop()
# Utility Functions
def set_text(self):
title= self.item['title']
self.title.config(text=self.item['title'])
self.title_width = self.title_font.measure(title)
copyright= self.item['copyright']
self.copyright.config(text=copyright)
self.copyright_width = self.copyright_font.measure(copyright)
# Callback functions
def link(self, url):
print("LINK")
open_link(url)
def like(self):
print("LIKE")
# self.like_button.config(state="disabled")
# self.dislike_button.config(state="normal")
like(self.item)
self.liked = True
self.update()
def dislike(self):
print("DISLIKE")
# self.dislike_button.config(image=self.root.dislike)
dislike(self.item)
self.liked = False
# self.like_button.config(state="normal")
# self.dislike_button.config(state="disabled")
item = self.next()
self.update()
# self.item = item
# self.set_text()
# self.root.destroy()
def previous(self):
print("PREVIOUS")
self.item = last()
self.set_text()
self.update()
def next(self):
print('NEXT')
self.item = new()
self.set_text()
self.liked = False
self.update()
def exit(self):
self.root.destroy()
# Hover Effects
def enter(self, event):
# on Hover display like and dislike buttons
self.frame1.pack_forget()
self.set_text()
self.frame2.pack()
self.update()
# self.root.geometry("%dx%d+%d+%d" % (280, 230, (self.width-m_len), self.y))
# self.root.winfo_toplevel().wm_geometry("")
def leave(self, event):
# on Hover Stop hide like and dislike buttons
self.frame2.pack_forget()
self.frame1.pack()
self.root.geometry("%dx%d+%d+%d" % (40, 40, (self.width-41), self.y))
# self.root.winfo_toplevel().wm_geometry("")
def update(self):
if self.liked:
self.like_button.config(state="disabled")
else:
self.like_button.config(state="normal")
net_width = max([self.copyright_width, self.title_width, self.author_width]) + 20
self.like_button.config(width=net_width/2)
self.dislike_button.config(width=net_width/2)
self.prev_button.config(width=net_width/2)
self.next_button.config(width=net_width/2)
self.bing_button.config(width=net_width/2)
self.shikhersrivastava.config(width=net_width/2)
self.root.geometry("%dx%d+%d+%d" % (net_width, 300, (self.width-net_width), self.y))
# item = {
# "copyright_text": {"tx": "Michael Jackson, Mississipi"},
# "copyright_text": {"tx": "\u00a9 Kevin Carden / Adobe Stock"},
# "copyright_destination_url": {"t": "url", "u": "https://www.bing.com/search?q=triple+falls+north+carolina&filters=IsConversation%3a%22True%22+BTEPKey:%22Encyclo_WL_TripleFallsNCarolina%22&FORM=EMSDS0"}
# }
# GUI(item, item) |
"""Parse a GPS track and add it to a DecoratedMap."""
from __future__ import print_function
import xml.sax
import os
from motionless import LatLonMarker, DecoratedMap
current_dir = os.path.dirname(os.path.abspath(__file__))
class GPXHandler(xml.sax.handler.ContentHandler):
"""GPS track parser"""
def __init__(self, gmap):
self.gmap = gmap
self.first = True
self.prev = None
def startElement(self, name, attrs):
if name == 'trkpt':
self.gmap.add_path_latlon(attrs['lat'], attrs['lon'])
self.prev = (attrs['lat'], attrs['lon'])
if self.first:
self.first = False
marker = LatLonMarker(attrs['lat'], attrs['lon'],
color='green', label='S')
self.gmap.add_marker(marker)
def endElement(self, name):
if name == 'trk':
marker = LatLonMarker(self.prev[0], self.prev[1],
color='red', label='E')
self.gmap.add_marker(marker)
# Make an empty map and fill it
munich = DecoratedMap(size_x=640, size_y=640, pathweight=8, pathcolor='blue')
parser = xml.sax.make_parser()
parser.setContentHandler(GPXHandler(munich))
fpath = os.path.join(current_dir, 'gps_track.gpx')
with open(fpath) as f:
parser.feed(f.read())
htmlPage = """
<html>
<body>
<h2>Munich</h2>
<i>Trip from Schwabing (S) to Airport (E). gps_track.gpx file taken of my
Garmin device and edited down to single track.</i>
<p/>
<p/>
<img src="%s"/>
</body>
</html>
""" % munich.generate_url()
with open("munich.html", "w")as html:
html.write(htmlPage)
print("munich.html file created")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.