index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
988,300 | cb5531a6176e2c6c642a310e4973708266ef0722 | # Given a string, find the first non-repeating character in it and return it's index. If it doesn't exist, return -1.
#
# Examples:
#
# s = "leetcode"
# return 0.
#
# s = "loveleetcode",
# return 2.
# First Approach:
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
count = {}
for element in s:
try:
count[element] += 1
except:
count[element] = 1
temp_lst = [i for i in s if count[i] == 1]
if temp_lst:
return (list(s).index(str(temp_lst[0])))
else:
return (-1)
# Run Time Exceeds
# Second Approach:
class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
count = {}
for element in s:
try:
count[element] += 1
except:
count[element] = 1
ans = -1
for i, element in enumerate(s):
if count[element]==1:
return (i)
return (-1)
# Other's one line version:
class Solution(object):
def firstUniqChar(self, s):
return min([s.find(c) for c in string.ascii_lowercase if s.count(c)==1] or [-1])
|
988,301 | b13c3b63d58a7cfdfd968c40c78dbe4a68512fe1 | """
Authors: Sean Moloney and Jon Killinger
Class: CS 415
Assignment: Project 3
Description: This project implents FFT, IFFT and component multiplication.
Executes by running: python2 project3.py
"""
import random
import cmath
import math
import sys
import timeit
import time
#sys.setrecursionlimit(10000000)
def appendZeros(array2append, n):
"""
Input: Array, number of 0s to append
Output: Array
Description: This function takes in an array and appends n number of 0s to the array.
"""
for i in range(0, n):
array2append.append(0)
return array2append
def genPoly(n):
"""
Input: An integer
Output: Integer array
Description: This function takes in an integer, n, and will generate a random polynomial of
n degrees. It will do so by generating an array and appending 1 or 0 to each element of the array which
will stand for the coeffiecent for each degree.
"""
v = []
for i in range(0, n + 1):
v.append(random.uniform(0,1))
v = appendZeros(v,n + 1)
return v
def splitArray(array2split):
"""
Input: An array
Output: 2 arrays, each half the size of the original array.
Description: This function will take in an array and split this original array
into two different arrays. One array will contain all data from the odd indexes. The other
will contain the data from the even indexes.
"""
oddArray = []
evenArray = []
for i in range(len(array2split)):
if(i % 2 == 0):
evenArray.append(array2split[i])
else:
oddArray.append(array2split[i])
return (evenArray, oddArray)
def FFT(inputArray,w):
"""
Input: Array, integer
Output: Array with Fast Fourier Transformed performed on it.
Desciption: This function will take in an array and the nth root of unity. It will then perform the FFT algorithm on the array.
"""
if len(inputArray) == 1:
return inputArray
evenArray, oddArray = splitArray(inputArray)
s = FFT(evenArray,w*w)
sPrime = FFT(oddArray,w*w)
r = appendZeros([],len(inputArray))
for j in range(0,len(inputArray)/2 ):
r[j] = s[j] + ( pow(w,j) * sPrime[j] )
r[j + len(inputArray) / 2] = s[j] - ( pow(w,j) * sPrime[j])
return r
def compMult(u,v):
"""
Input: 2 arrays
Output: One array
Description: This function will perform component multiplcation on the two arrays by multipying each element
of u and v together and placing it into a third array.
"""
result = []
for i in range(0,len(u)):
result.append(u[i] * v[i])
return result
def IFFT(inputArray,myW):
"""
Input: An array and the nth root of unity
Output: Inverse Fast Fourier Transform of the array
Description: This function takes in an array and the nth root of unity. It will first perform FFT on the array.
Next, the function will create a new array and the first value from the FFT array will be appended to it. Then, we will reverse
all of the FFT array and divide each element by the length of the FFT array.
"""
inputArray = FFT(inputArray,myW)
n = len(inputArray)
new = []
new.append(inputArray[0])
i = n - 1
while i != 0:
new.append(inputArray[i])
i = i - 1
new = [round((x * 1/float(n)).real,2) for x in new]
return new
def slowPolyMult(a,b):
"""
Input: 2 integer arrays
Output: Product of the two arrays
Description: This function will take in two integer arrays. It will then loop through each element of the first array and multiply it by
by the second array at k - j. It will add this to the previous sum and append it to a new array.
"""
result = []
result = appendZeros(result,len(a))
kLim = len(a)
for k in range(0,kLim):
mySum = 0
j = 0
while j <= k:
myProduct = a[j] * b[k-j]
mySum = mySum + myProduct
j = j + 1
result[k] = round(mySum,2)
return result
def main():
degree = input('Enter degree: ')
v = genPoly(degree)
u = genPoly(degree)
myW = cmath.exp((2*cmath.pi*1j)/len(v))
start_time = time.time()
FFTV = FFT(v,myW)
FFTU = FFT(u,myW)
componentMult = compMult(FFTV,FFTU)
myIFFT = IFFT(componentMult,myW)
print("FFT CPU time: ( %s seconds )" % (time.time() - start_time))
start_time = time.time()
mySlowConvolution = slowPolyMult(u,v)
print("Slow Poly Multiply CPU time: ( %s seconds )" % (time.time() - start_time))
if(degree <= 100):
print("v: ",v)
print("u: ",u)
print("Result of homegrown inverse DFT convolution",myIFFT)
print("Result of homegrown n-squared complexity convolution", mySlowConvolution)
else:
f = open('out.txt', 'w')
#TODO: NEWLINES
print >> f, 'Input degree n:', degree
print >> f, ' Generated polynomial u:', u
print >> f, ' Generated polynomial v:', v
print >> f, ' My FFT convolve:', myIFFT
print >> f, ' My slow convolve:', mySlowConvolution
main()
|
988,302 | 0f6622c19338d8cd1b2e2a0ac05b08a57531b745 | """Radio frequency types and allocators."""
import itertools
from dataclasses import dataclass
from typing import Dict, Iterator, List, Set
@dataclass(frozen=True)
class RadioFrequency:
"""A radio frequency.
Not currently concerned with tracking modulation, just the frequency.
"""
#: The frequency in kilohertz.
hertz: int
def __str__(self):
if self.hertz >= 1000000:
return self.format("MHz", 1000000)
return self.format("kHz", 1000)
def format(self, units: str, divisor: int) -> str:
converted = self.hertz / divisor
if converted.is_integer():
return f"{int(converted)} {units}"
return f"{converted:0.3f} {units}"
@property
def mhz(self) -> float:
"""Returns the frequency in megahertz.
Returns:
The frequency in megahertz.
"""
return self.hertz / 1000000
def MHz(num: int, khz: int = 0) -> RadioFrequency:
return RadioFrequency(num * 1000000 + khz * 1000)
def kHz(num: int) -> RadioFrequency:
return RadioFrequency(num * 1000)
@dataclass(frozen=True)
class Radio:
"""A radio.
Defines the minimum (inclusive) and maximum (exclusive) range of the radio.
"""
#: The name of the radio.
name: str
#: The minimum (inclusive) frequency tunable by this radio.
minimum: RadioFrequency
#: The maximum (exclusive) frequency tunable by this radio.
maximum: RadioFrequency
#: The spacing between adjacent frequencies.
step: RadioFrequency
def __str__(self) -> str:
return self.name
def range(self) -> Iterator[RadioFrequency]:
"""Returns an iterator over the usable frequencies of this radio."""
return (RadioFrequency(x) for x in range(
self.minimum.hertz, self.maximum.hertz, self.step.hertz
))
class OutOfChannelsError(RuntimeError):
"""Raised when all channels usable by this radio have been allocated."""
def __init__(self, radio: Radio) -> None:
super().__init__(f"No available channels for {radio}")
class ChannelInUseError(RuntimeError):
"""Raised when attempting to reserve an in-use frequency."""
def __init__(self, frequency: RadioFrequency) -> None:
super().__init__(f"{frequency} is already in use")
# TODO: Figure out appropriate steps for each radio. These are just guesses.
#: List of all known radios used by aircraft in the game.
RADIOS: List[Radio] = [
Radio("AN/ARC-164", MHz(225), MHz(400), step=MHz(1)),
Radio("AN/ARC-186(V) AM", MHz(116), MHz(152), step=MHz(1)),
Radio("AN/ARC-186(V) FM", MHz(30), MHz(76), step=MHz(1)),
# The AN/ARC-210 can also use [30, 88) and [108, 118), but the current
# implementation can't implement the gap and the radio can't transmit on the
# latter. There's still plenty of channels between 118 MHz and 400 MHz, so
# not worth worrying about.
Radio("AN/ARC-210", MHz(118), MHz(400), step=MHz(1)),
Radio("AN/ARC-222", MHz(116), MHz(174), step=MHz(1)),
Radio("SCR-522", MHz(100), MHz(156), step=MHz(1)),
Radio("A.R.I. 1063", MHz(100), MHz(156), step=MHz(1)),
Radio("BC-1206", kHz(200), kHz(400), step=kHz(10)),
# Note: The M2000C V/UHF can operate in both ranges, but has a gap between
# 150 MHz and 225 MHz. We can't allocate in that gap, and the current
# system doesn't model gaps, so just pretend it ends at 150 MHz for now. We
# can model gaps later if needed.
Radio("TRT ERA 7000 V/UHF", MHz(118), MHz(150), step=MHz(1)),
Radio("TRT ERA 7200 UHF", MHz(225), MHz(400), step=MHz(1)),
# Tomcat radios
# # https://www.heatblur.se/F-14Manual/general.html#an-arc-159-uhf-1-radio
Radio("AN/ARC-159", MHz(225), MHz(400), step=MHz(1)),
# AN/ARC-182 can also operate from 30 MHz to 88 MHz, as well as from 225 MHz
# to 400 MHz range, but we can't model gaps with the current implementation.
# https://www.heatblur.se/F-14Manual/general.html#an-arc-182-v-uhf-2-radio
Radio("AN/ARC-182", MHz(108), MHz(174), step=MHz(1)),
# Also capable of [103, 156) at 25 kHz intervals, but we can't do gaps.
Radio("FR 22", MHz(225), MHz(400), step=kHz(50)),
# P-51 / P-47 Radio
# 4 preset channels (A/B/C/D)
Radio("SCR522", MHz(100), MHz(156), step=kHz(25)),
Radio("R&S M3AR VHF", MHz(120), MHz(174), step=MHz(1)),
Radio("R&S M3AR UHF", MHz(225), MHz(400), step=MHz(1)),
]
def get_radio(name: str) -> Radio:
"""Returns the radio with the given name.
Args:
name: Name of the radio to return.
Returns:
The radio matching name.
Raises:
KeyError: No matching radio was found.
"""
for radio in RADIOS:
if radio.name == name:
return radio
raise KeyError
class RadioRegistry:
"""Manages allocation of radio channels.
There's some room for improvement here. We could prefer to allocate
frequencies that are available to the fewest number of radios first, so
radios with wide bands like the AN/ARC-210 don't exhaust all the channels
available to narrower radios like the AN/ARC-186(V). In practice there are
probably plenty of channels, so we can deal with that later if we need to.
We could also allocate using a larger increment, returning to smaller
increments each time the range is exhausted. This would help with the
previous problem, as the AN/ARC-186(V) would still have plenty of 25 kHz
increment channels left after the AN/ARC-210 moved on to the higher
frequencies. This would also look a little nicer than having every flight
allocated in the 30 MHz range.
"""
# Not a real radio, but useful for allocating a channel usable for
# inter-flight communications.
BLUFOR_UHF = Radio("BLUFOR UHF", MHz(225), MHz(400), step=MHz(1))
def __init__(self) -> None:
self.allocated_channels: Set[RadioFrequency] = set()
self.radio_allocators: Dict[Radio, Iterator[RadioFrequency]] = {}
radios = itertools.chain(RADIOS, [self.BLUFOR_UHF])
for radio in radios:
self.radio_allocators[radio] = radio.range()
def alloc_for_radio(self, radio: Radio) -> RadioFrequency:
"""Allocates a radio channel tunable by the given radio.
Args:
radio: The radio to allocate a channel for.
Returns:
A radio channel compatible with the given radio.
Raises:
OutOfChannelsError: All channels compatible with the given radio are
already allocated.
"""
allocator = self.radio_allocators[radio]
try:
while (channel := next(allocator)) in self.allocated_channels:
pass
self.reserve(channel)
return channel
except StopIteration:
raise OutOfChannelsError(radio)
def alloc_uhf(self) -> RadioFrequency:
"""Allocates a UHF radio channel suitable for inter-flight comms.
Returns:
A UHF radio channel suitable for inter-flight comms.
Raises:
OutOfChannelsError: All channels compatible with the given radio are
already allocated.
"""
return self.alloc_for_radio(self.BLUFOR_UHF)
def reserve(self, frequency: RadioFrequency) -> None:
"""Reserves the given channel.
Reserving a channel ensures that it will not be allocated in the future.
Args:
frequency: The channel to reserve.
Raises:
ChannelInUseError: The given frequency is already in use.
"""
if frequency in self.allocated_channels:
raise ChannelInUseError(frequency)
self.allocated_channels.add(frequency)
|
988,303 | 3690568bb1aced10755a166a66e085a91abbe1f7 | #import our library
import pyshorteners
#Get url from user/client
url = input("Enter your url: ")
#printing out short link :)
print("Your url: ", pyshorteners.Shortener().tinyurl.short(url)) |
988,304 | 6747cd3f7f70fccfed192ed30d9d18b0a3056e53 | # 2020/03/08 MySQL 練習 4 -- rollback()
#
# 每次更動的最後的要 cursor.commit() 來提交結果
# 但當更動失敗的時候,可以透過 rollback() 來返回
import pymysql
from os import getpid
print('該程式 pid:', getpid()) # 打印出執行程式的 pid,用來對照是否連線成功
db = pymysql.connect(
host = "localhost", # 欲連接的 Server IP
port = 3306, # 該 Server 的 port
user = "root", # 要連接該 Server 的使用者名稱
password = "123456", # 要連接該 Server 的使用者密碼
database = 'school' # 先轉換至 school 資料庫
)
# 新內容
new_info = [(5279, '老G樂隊', 1, '1901-1-5', '台灣嗨嗨', 2),
(5245, '老科樂隊', 0, '1901-1-5', '台灣嗨嗨', 2),
(5245, '老科樂隊', 0, '1901-1-5', '台灣嗨嗨', 2, 6)] # 多一個輸入
# 執行 MySQL 語法
for data in new_info:
new_row = "INSERT into tb_student (stuid, stuname, stusex, stubirth, stuaddr, collid) values " + str(data)
try:
with db.cursor() as cursor: # 這樣就不需要用 cursor.close() 釋放
ans = cursor.execute(new_row)
if ans:
print(data,'\n輸入成功!\n')
db.commit() # 提交結果以修改資料庫
except:
# 發生錯誤回復到上一次 commit 狀態
db.rollback()
# 連線解除
db.close()
|
988,305 | 0479d0485c545b291a3053fe56c4f98df9f445ce | #import requests
#
#url = "https://skyscanner-skyscanner-flight-search-v1.p.rapidapi.com/apiservices/browsedates/v1.0/US/USD/en-US/SFO-sky/LAX-sky/2021-06-22"
#
#querystring = {"inboundpartialdate":"2021-06-23"}
#
#headers = {
# 'x-rapidapi-key': "e5eb6fe9efmshdc31df18adcf9acp183317jsndafb7aac79e2",
# 'x-rapidapi-host': "skyscanner-skyscanner-flight-search-v1.p.rapidapi.com"
# }
#
#response = requests.request("GET", url, headers=headers, params=querystring)
#
#print(response.text)
print("Hello World") |
988,306 | 0a8f9b87fee127aa2bba0348dcd26c826bf31606 | import sys
for i in sys.stdin:
a = i.split()
nums = [int(a[0]), int(a[1])]
if( len(nums) == 2):
upperLim = nums[0] * nums[1]
test = upperLim / nums[0]
while( test > nums[1] - 1 ):
upperLim = upperLim - 1
test = upperLim / nums[0]
print(upperLim+1)
|
988,307 | b3e6799de0148a1cf96e08a7b1fa2649241f2b82 | #!/usr/local/bin/python
# author: Gary Wang
import subprocess
import sys
import time
import fcntl
import struct
import termios
import random as rand
class matrix(object):
def __init__(self):
# constants
self.height, self.width = struct.unpack('hh', fcntl.ioctl(1, termios.TIOCGWINSZ, '1234'))
self.half = int(self.width / 2)
self.green = '\033[92m'
self.yellow = '\033[93m'
self.red = '\033[91m'
self.dark = '\033[90m'
self.bold = '\033[1m' + self.green
self.end = '\033[0m'
self.colors = [self.green, self.yellow, self.red, self.dark, self.bold]
# variables
self.matrix = [[' ' for col in range(self.width)] for row in range(self.height)]
self.mat = [[' ' for col in range(self.half)] for row in range(self.height)]
self.status = [['' for col in range(self.width)] for row in range(self.height)]
self.cnt = 0
print(self.green + ''.join(sum(self.matrix, [])))
def get_char(self):
r = rand.random()
if r > 0.5:
return unichr(rand.randrange(0x0021, 0x007E))
elif r > 0.2:
return unichr(rand.randrange(0x0180, 0x024F))
else:
return unichr(rand.randrange(0x16A0, 0x16F0))
def get_jp(self):
r = rand.random()
if r > 0.5:
return unichr(rand.randrange(0x0021, 0x007E)) + ' '
else:
return unichr(rand.randrange(0x3041, 0x3097))
def run(self, colorful=False, jp=True, head=False):
matrix = self.mat if jp else self.matrix
width = self.half if jp else self.width
self.cnt += 1
# keep most of the matrix
for i in range(self.height - 1, 0, -1):
for j in range(width - 1, -1, -1):
# create flow effect
if j % 7 == 0 and self.cnt % 2 != 0:
continue
elif j % 3 == 0 and self.cnt % 3 != 0:
continue
elif j % 2 == 0 and self.cnt % 3 != 0:
continue
# move one step, only generate new char if is not same
if matrix[i][j].isspace() != matrix[i - 1][j].isspace():
if matrix[i - 1][j] == ' ':
matrix[i][j] = ' '
elif matrix[i - 1][j] == ' ':
matrix[i][j] = ' '
elif colorful:
self.status[i][j] = self.status[i - 1][j]
char = self.get_jp() if jp else self.get_char()
if head:
matrix[i - 1][j] = matrix[i - 1][j].replace(self.end, '')
matrix[i][j] = self.status[i][j] + self.end + char
else:
matrix[i][j] = self.status[i][j] + char
else:
char = self.get_jp() if jp else self.get_char()
if head:
matrix[i - 1][j] = matrix[i - 1][j].replace(self.end, '')
matrix[i][j] = self.end + char + self.green
else:
matrix[i][j] = char
# generate rain
for k in range(width):
r = rand.random()
if matrix[1][k] == ' ' or matrix[1][k] == ' ':
if r > 0.05:
self.matrix[0][k] = ' ' if jp else ' '
elif colorful:
char = self.get_jp() if jp else self.get_char()
self.status[0][k] = rand.sample(self.colors, 1)[0]
matrix[0][k] = self.status[0][k] + char
else:
char = self.get_jp() if jp else self.get_char()
matrix[0][k] = char
else:
char = self.get_jp() if jp else self.get_char()
if r > 0.1:
matrix[0][k] = char
else:
matrix[0][k] = ' ' if jp else ' '
# control speed
time.sleep(0.08)
# generate output
self.matrix = matrix
output = []
for i in matrix:
line = ''.join(i)
if jp and self.width % 2 == 1:
line += ' '
output.append(line)
print(self.green + ''.join(output))
if __name__ == '__main__':
color = True if 'color' in sys.argv else False
jp = False if 'no-jp' in sys.argv else True
head = True if 'head' in sys.argv else False
rain = matrix()
while True:
try:
rain.run(color, jp, head)
except KeyboardInterrupt:
subprocess.call(["clear"])
sys.exit()
|
988,308 | f9c018385247cf917085980e17228ac18781c4f3 | #
# Copyright (c) 2013-2021 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
from django.conf.urls import include # noqa
from django.conf.urls import url
from starlingx_dashboard.dashboards.admin.inventory.cpu_functions import \
views as cpu_functions_views
from starlingx_dashboard.dashboards.admin.inventory.devices import \
views as device_views
from starlingx_dashboard.dashboards.admin.inventory.filesystems import \
views as fs_views
from starlingx_dashboard.dashboards.admin.inventory.interfaces.address import \
views as address_views
from starlingx_dashboard.dashboards.admin.inventory.interfaces.route import \
views as route_views
from starlingx_dashboard.dashboards.admin.inventory.interfaces import \
views as interface_views
from starlingx_dashboard.dashboards.admin.inventory.kubernetes_labels import \
views as label_views
from starlingx_dashboard.dashboards.admin.inventory.lldp import \
views as lldp_views
from starlingx_dashboard.dashboards.admin.inventory.memories import \
views as memory_views
from starlingx_dashboard.dashboards.admin.inventory.ports import \
views as port_views
from starlingx_dashboard.dashboards.admin.inventory.sensors import \
views as sensor_views
from starlingx_dashboard.dashboards.admin.inventory.storages import \
urls as storages_urls
from starlingx_dashboard.dashboards.admin.inventory.storages import \
views as storage_views
from starlingx_dashboard.dashboards.admin.inventory.views import \
AddView
from starlingx_dashboard.dashboards.admin.inventory.views import \
DetailView
from starlingx_dashboard.dashboards.admin.inventory.views import \
IndexView
from starlingx_dashboard.dashboards.admin.inventory.views import \
UpdateView
urlpatterns = [
url(r'^$', IndexView.as_view(), name='index'),
url(r'^(?P<host_id>[^/]+)/detail/$',
DetailView.as_view(), name='detail'),
url(r'^create/$', AddView.as_view(), name='create'),
url(r'^(?P<host_id>[^/]+)/update/$',
UpdateView.as_view(), name='update'),
url(r'^(?P<host_id>[^/]+)/editcpufunctions/$',
cpu_functions_views.UpdateCpuFunctionsView.as_view(),
name='editcpufunctions'),
url(r'^(?P<host_id>[^/]+)/addinterface/$',
interface_views.AddInterfaceView.as_view(),
name='addinterface'),
url(
r'^(?P<host_id>[^/]+)/interfaces/(?P<interface_id>[^/]+)/update/$',
interface_views.UpdateView.as_view(),
name='editinterface'),
url(
r'^(?P<host_id>[^/]+)/interfaces/(?P<interface_id>[^/]+)/detail/$',
interface_views.DetailView.as_view(),
name='viewinterface'),
url(
r'^(?P<host_id>[^/]+)/interfaces/(?P<interface_id>[^/]+)/addaddress/$',
address_views.CreateView.as_view(),
name='addaddress'),
url(
r'^(?P<host_id>[^/]+)/interfaces/(?P<interface_id>[^/]+)/addroute/$',
route_views.CreateView.as_view(),
name='addroute'),
url(
r'^(?P<host_id>[^/]+)/ports/(?P<port_id>[^/]+)/update/$',
port_views.UpdateView.as_view(), name='editport'),
url(
r'^(?P<host_id>[^/]+)/ports/(?P<port_id>[^/]+)/detail/$',
port_views.DetailView.as_view(),
name='viewport'),
url(r'^(?P<host_id>[^/]+)/addstoragevolume/$',
storage_views.AddStorageVolumeView.as_view(),
name='addstoragevolume'),
url(r'^(?P<host_id>[^/]+)/updatememory/$',
memory_views.UpdateMemoryView.as_view(),
name='updatememory'),
url(r'^(?P<host_id>[^/]+)/addlocalvolumegroup/$',
storage_views.AddLocalVolumeGroupView.as_view(),
name='addlocalvolumegroup'),
url(r'^(?P<host_id>[^/]+)/addphysicalvolume/$',
storage_views.AddPhysicalVolumeView.as_view(),
name='addphysicalvolume'),
url(r'^(?P<pv_id>[^/]+)/physicalvolumedetail/$',
storage_views.DetailPhysicalVolumeView.as_view(),
name='physicalvolumedetail'),
url(r'^(?P<lvg_id>[^/]+)/localvolumegroupdetail/$',
storage_views.DetailLocalVolumeGroupView.as_view(),
name='localvolumegroupdetail'),
url(r'^(?P<lvg_id>[^/]+)/storages/',
include(storages_urls, namespace='storages')),
url(r'^(?P<host_id>[^/]+)/addsensorgroup/$',
sensor_views.AddSensorGroupView.as_view(),
name='addsensorgroup'),
url(r'^(?P<host_id>[^/]+)/sensorgroups/(?P<sensorgroup_id>[^/]+)/'
'updatesensorgroup/$',
sensor_views.UpdateSensorGroupView.as_view(),
name='editsensorgroup'),
url(r'^(?P<sensor_id>[^/]+)/sensordetail/$',
sensor_views.DetailSensorView.as_view(),
name='sensordetail'),
url(r'^(?P<sensorgroup_id>[^/]+)/sensorgroupdetail/$',
sensor_views.DetailSensorGroupView.as_view(),
name='sensorgroupdetail'),
url(
r'^(?P<host_id>[^/]+)/devices/(?P<device_uuid>[^/]+)/update/$',
device_views.UpdateView.as_view(),
name='editdevice'),
url(
r'^(?P<host_id>[^/]+)/devices/(?P<device_uuid>[^/]+)/detail/$',
device_views.DetailView.as_view(),
name='viewdevice'),
url(
r'^(?P<device_id>[^/]+)/devices/usage/$',
device_views.UsageView.as_view(),
name='viewusage'),
url(
r'^(?P<neighbour_uuid>[^/]+)/viewneighbour/$',
lldp_views.DetailNeighbourView.as_view(), name='viewneighbour'),
url(r'^(?P<host_id>[^/]+)/storages/(?P<stor_uuid>[^/]+)/'
'editstoragevolume/$',
storage_views.EditStorageVolumeView.as_view(),
name='editstoragevolume'),
url(r'^(?P<host_id>[^/]+)/createpartition/$',
storage_views.CreatePartitionView.as_view(),
name='createpartition'),
url(r'^(?P<host_id>[^/]+)/storages/(?P<partition_uuid>[^/]+)/'
'editpartition/$',
storage_views.EditPartitionView.as_view(),
name='editpartition'),
url(r'^(?P<host_id>[^/]+)/assignlabel/$',
label_views.AssignLabelView.as_view(),
name='assignlabel'),
url(r'^(?P<host_id>[^/]+)/updatefilesystems/$',
fs_views.UpdateFilesystemsView.as_view(),
name='updatefilesystems')
]
|
988,309 | 96e37c50cbce2b7b3718a71c8f0b931be95b85fb |
from google.cloud import datastore
from constants import PROJECT_ID, DATASTORE_DATA_KIND
datastore_client = None
def initialize_datastore_client():
global datastore_client
if datastore_client is None:
datastore_client = datastore.Client(PROJECT_ID)
def save_to_datastore(name, values):
initialize_datastore_client()
task_key = datastore_client.key(DATASTORE_DATA_KIND, name)
task = datastore.Entity(key=task_key)
for key, value in values.items():
task[key] = value
datastore_client.put(task)
def get_from_datastore(name):
initialize_datastore_client()
task_key = datastore_client.key(DATASTORE_DATA_KIND, name)
return datastore_client.get(task_key) |
988,310 | 99a1f4e03f69437b656528308403e3b9b1551e66 | import random
from typing import Iterable, Tuple, List
from cities.city import get_cities
from distances.helpers import keep_in_circle,\
DistanceCalculator, BatchCalculator, GPSPoint
from distances.haversine.haversine import haversine, time_estimation
from here_wrapper.wrapper import Routing
def get_random_path(ref_city: 'City', cities: List['City'],
distance: float,
distance_calculator: DistanceCalculator,
batch_calculator: BatchCalculator)\
-> Tuple['City', 'City']:
'''
Create a random path by selecting 2 cities
This cities is at equal distance each of then and make a triangle
'''
_distance = distance / 3
distances_ref = list(keep_in_circle(reference_point=ref_city,
points=cities,
distance_calculator=distance_calculator,
batch_calculator=batch_calculator,
radius=_distance))
first_city = _get_first_city(distances_ref)
other_cities = extract_city(first_city.name, cities)[1]
distances_first = list(keep_in_circle(reference_point=first_city,
points=other_cities,
distance_calculator=distance_calculator,
batch_calculator=batch_calculator,
radius=_distance))
second_city = _get_second_city(distances_ref, distances_first)
return (first_city, second_city)
def _get_second_city(l1: List[Tuple[float, 'City']],
l2: List[Tuple[float, 'City']])\
-> 'City':
'''
Get cities in union between lists and return the city farest
'''
points = [] # type: List[Tuple[float, 'City']]
for i in l1:
for j in l2:
if i[1] == j[1]:
points.append((i[0] + j[0], i, j))
break
return max(points, key=lambda x: x[0])[1][1]
def _get_first_city(cities_distances: List[Tuple[float, 'City']],
nb_random: int = 10) -> 'City':
'''
Select the city with the biggest distance from nb_random city select
'''
rd_cities = random.sample(cities_distances, nb_random)
return max(rd_cities, key=lambda c: c[0])[1]
def extract_city(city_name: str, cities: List['City'])\
-> Tuple['City', List['City']]:
'''
Found the city in cities and return it with other cities
'''
other_cities = [city for city in cities if city.name != city_name]
city = next((city for city in cities if city.name == city_name))
return (city, other_cities)
def d_calculator(**args) -> float:
# return Routing().calculate_route(**args)[1]
haversine_distance = haversine(**args)
return time_estimation(haversine_distance, 30)
def batch_calculator(**args) -> Iterable[Tuple[GPSPoint, float, float]]:
# Base distance on time in min, not distance in km
args['select_time'] = True
return Routing().batch_calculate_route(**args)
if __name__ == '__main__':
city_name = 'Bruxelles'
distance = 240
city, cities = extract_city(city_name, list(get_cities()))
c1, c2 = get_random_path(ref_city=city, cities=cities,
distance=distance,
distance_calculator=None,#d_calculator,
batch_calculator=batch_calculator)
print(city, c1.name, c2.name)
|
988,311 | 5eccdad56306755960eb260438b0b15a9288a713 | # @Author: Andrés Gúrpide <agurpide>
# @Date: 20-05-2021
# @Email: agurpidelash@irap.omp.eu
# @Last modified by: agurpide
# @Last modified time: 25-06-2021
# Script to compute extinction map
from astropy.io import fits
import argparse
import os
import numpy as np
import astropy.units as u
import glob
class C00:
def __init__(self, Rv):
self.Rv = Rv
def evaluate(self, wavelength):
"""wavelength must be in wavelength units"""
micron = wavelength.to(u.micron).value
x = 1 / micron
optical_indx = np.where(np.logical_and(0.63 <= micron, micron <= 2.20))
ir_indx = np.where(np.logical_and(0.12 <= micron, micron <= 0.63))
x = np.asarray(x)
if x.ndim == 0:
x = x[None]
k = np.empty(len(x))
k[optical_indx] = 2.659 * (-1.857 + 1.040 * x) + self.Rv
k[ir_indx] = 2.659 * (-2.156 + 1.509 * x - 0.198 * x**2 + 0.011 * x**3) + self.Rv
return k
ap = argparse.ArgumentParser(description='Calculate an extinction map using the extinction curve of Calzetti for a given value of R')
ap.add_argument("-r", "--rootname", nargs='?', help="Root name of the input line files", type=str, default="cleancamel")
ap.add_argument("-R", "--ratio", help="Ratio of total to selective extinction Av/E(B-V)", type=float, default=4.05, nargs="?")
ap.add_argument("-i", "--intrinsic", help="Intrinsic Balmer decrement ratio", type=float, default=2.86, nargs="?")
ap.add_argument("-b", "--hbeta", help="Path to HBETA flux maps", type=str, required=True, nargs=1)
ap.add_argument("-a", "--halpha", help="Path to HALPHA flux maps", type=str, required=True, nargs=1)
args = ap.parse_args()
halpha = 6563.0 * u.angstrom
hbeta = 4861.0 * u.angstrom
outdir = "deredden_momcheva"
lines = ['OII3727', 'OII3729', 'HBETA', 'NII6548', 'NII6583', 'SII6716', 'SII6731', 'OIII4959', 'OIII5007', 'HALPHA']
if not os.path.isdir(outdir):
os.mkdir(outdir)
# Rv = E(B - V)/Av
Rv = args.ratio
curve = "calzetti"
extincton_curve = C00(Rv)
halpha_file = glob.glob('%s/*_*[!e]flux_*%s.fits' % (args.halpha[0], "HALPHA"))[0]
hbeta_file = glob.glob('%s/*_*[!e]flux_*%s.fits' % (args.hbeta[0], "HBETA"))[0]
print("HALPHA file found: %s" % halpha_file)
print("HBETA file found: %s" % hbeta_file)
halpha_map = fits.open(halpha_file, extname="IMAGE")
if halpha_map[0].header["WCSAXES"] == 3:
halpha_map[0].header["WCSAXES"] = 2
hbeta_map = fits.open(hbeta_file, extname="IMAGE")
observed_ratios = halpha_map[0].data / hbeta_map[0].data
# get E(beta-halpha)
color_excess = 2.5 * np.log10(observed_ratios / args.intrinsic)
color_excess_map = fits.PrimaryHDU(data=color_excess, header=halpha_map[0].header)
color_excess_map.writeto("%s/decrement_color_excess.fits" % outdir, overwrite=True)
# E(alpha - beta) / A(V)
kbeta = extincton_curve.evaluate(hbeta)
kalpha = extincton_curve.evaluate(halpha)
curve_color_excess = (kbeta - kalpha)
print("Extinction curve at hbeta")
print(kbeta)
print("Extinction curve at Halpha")
print(kalpha)
print("k(beta) - k(halpha)")
print(curve_color_excess)
Ebv = color_excess / curve_color_excess
color_excess_map = fits.PrimaryHDU(data=Ebv, header=halpha_map[0].header)
color_excess_map.writeto("%s/color_excess_%s_Rv%.1f_i%.2f.fits" % (outdir, curve, Rv, args.intrinsic), overwrite=True)
print("Color excess map saved to %s/total_color_excess.fits" % outdir)
print("Mean color excess: %.2f" % np.nanmean(Ebv))
# compute uncertainties
halpha_efile = glob.glob('%s/*_*eflux_*%s.fits' % (args.halpha[0], "HALPHA"))[0]
hbeta_efile = glob.glob('%s/*_*eflux_*%s.fits' % (args.hbeta[0], "HBETA"))[0]
print("HALPHA error file found: %s" % halpha_efile)
print("HBETA error file found: %s" % hbeta_efile)
halpha_emap = fits.open(halpha_efile, extname="IMAGE")
hbeta_emap = fits.open(hbeta_efile, extname="IMAGE")
# derivative of log10(x) --> x'/x log10e (derivative of dHa/Hb/dHa = 1/Hb; d(Ha/Hb)/dHb = Ha/Hb^2
err_ebv = 2.5 * np.sqrt((np.log10(np.e) * (1 / halpha_map[0].data) * halpha_emap[0].data) ** 2 + (np.log10(np.e) * (1 / hbeta_map[0].data) * hbeta_emap[0].data)**2) / curve_color_excess
ecolor_excess_map = fits.PrimaryHDU(data=err_ebv, header=halpha_map[0].header)
ecolor_excess_map.writeto("%s/ecolor_excess_%s_Rv%.1f_i%.2f.fits" % (outdir, curve, Rv, args.intrinsic), overwrite=True)
for line in lines:
print("\tDereddening %s line" % line)
wavemap = glob.glob('./camel_*/cleaned_images/%s_*[!e]wave_*%s.fits' % (args.rootname, line))
if len(wavemap) == 0:
print("Line map for %s line not found" % line)
continue
wavemap = wavemap[0]
fluxmap = glob.glob('./camel_*/cleaned_images/%s_*[!e]flux_*%s.fits' % (args.rootname, line))[0]
print("Using wavelength map: %s" % wavemap)
print("Using flux map: %s" % fluxmap)
wavelenghts = fits.open(wavemap, extname="IMAGE")[0].data
fluxes_fits = fits.open(fluxmap, extname="IMAGE")[0]
fluxes = fluxes_fits.data
header = fluxes_fits.header
# deredden fluxes
for x in range(wavelenghts.shape[0]): # loop on the x range
for y in range(wavelenghts.shape[1]): # loop on the y range
if np.isnan(Ebv[x, y]):
fluxes[x, y] = np.nan
else:
fluxes[x, y] = fluxes[x, y] * 10 ** (0.4 * extincton_curve.evaluate(wavelenghts[x, y] * u.angstrom) * Ebv[x, y])
dereddened_fits = fits.PrimaryHDU(data=fluxes, header=header)
dereddened_fits.header['CURVE'] = "%s" % curve
dereddened_fits.header['R_v'] = "%.1f" % args.ratio
dereddened_fits.header['COMMENT'] = "Balmer ratio: %s/%s" % (halpha_file, hbeta_file)
outfile = fluxmap.replace(".fits", "deredden.fits")
dereddened_fits.writeto(outfile, overwrite=True)
python_argument = "%s -R %.1f -i %.1f -a %s -b %s" % (__file__, Rv, args.intrinsic, args.halpha[0], args.hbeta[0])
with open("%s/python_command.txt" % outdir, "w+") as f:
f.write(python_argument)
print("Dereddened flux map for line %s and stored it to %s" % (line, outfile))
|
988,312 | a01923acaf864bc63e4b07b3af8a7ca2356af469 | def towerofhanoi(n,beg,end,aux):
if n==1:
print('move disc',n,'from rod',beg,'to rod',end)
return
else:
towerofhanoi(n-1,beg,aux,end)
print('move disc',n,'from rod',beg,'to rod',end)
towerofhanoi(n-1,aux,end,beg)
n=int(input('enter no of disks '))
towerofhanoi(n,'A','C','B')
|
988,313 | 06231e7803a9ad774bc5cd99b2ea14a9f34d1188 | N = int(input())
T,A = map(int,input().split())
H = list(map(int,input().split()))
C = 999
for i in H:
tmp = T - 0.006 * i
a = abs(tmp - A)
if a < C:
C = a
x = int(H.index(i))+1
print(x) |
988,314 | 8adc475573705442312a86272f0b6d9e313f765f | def merge(P,Q):
ip = 0
iq = 0
res = []
while True:
if ip >= len(P) or iq >= len(Q):
break
if P[ip] > Q[iq]:
res.append(Q[iq])
iq += 1
elif P[ip] <= Q[iq]:
res.append(P[ip])
ip += 1
if ip < len(P):
res.extend(P[ip:])
if iq < len(Q):
res.extend(Q[iq:])
print str(P) + "+"+str(Q) + "=" +str(res)
return res
def merge_sort(A):
if len(A) == 1:
return A
elif len(A) == 0:
return []
A_p = merge_sort(A[:len(A)/2])
A_q = merge_sort(A[len(A)/2:])
return merge(A_p,A_q)
arr = map(int,str(raw_input()).split(" "))
print str(merge_sort(arr))
|
988,315 | 1734c852a4c9bbf477864a086ad4b08da4e9d99b | from sqlalchemy import Column, Integer, String
from app.models.base import Base
class Lab(Base):
id = Column(Integer, primary_key=True, index=True)
name = Column(String)
title = Column(String)
content = Column(String)
|
988,316 | 4e65916ed88ac12e8ca70137648ad8077f5b70f1 | from Rank import Rank
from functools import total_ordering
class HandIterator:
def __init__(self, hand):
self._hand = hand
self._index = 0
def __next__(self):
if self._index < (len(self._hand)):
self._index += 1
return self._hand[self._index - 1]
raise StopIteration
@total_ordering
class Hand:
def __init__(self, *args):
self._cards = [] if len(args) == 0 else [arg for arg in args]
self.value = self._set_value() if len(args) > 0 else None
def __repr__(self):
return ''.join([str(x) for x in self._cards if x is not None])
def __str__(self):
return ''.join([str(x) for x in self._cards if x is not None])
def __iter__(self):
return HandIterator(self._cards)
def __getitem__(self, key=None):
if not isinstance(key, int):
raise TypeError
if key > len(self._cards):
raise IndexError
if key is None:
key = 0
return self._cards[key]
def __len__(self):
return len(self._cards)
def __eq__(self, other):
if len(self) != len(other):
raise ValueError(f'Hands must have equal length.')
# split ties
if self.value == other.value:
return self._cards == other.get_cards()
return False
def __lt__(self, other):
if len(self) != len(other):
raise ValueError(f'Hands must have equal length.')
# split ties
if self.value == other.value:
# test for Ace-low straight
lo_self = (self._is_straight()) and (self[0].rank == Rank.ACE) and (self[1].rank == Rank.FIVE)
lo_other = (other._is_straight()) and (other[0].rank == Rank.ACE) and (other[1].rank == Rank.FIVE)
if lo_self and other.get_cards()[0].rank >= Rank.SIX:
# print('self')
return True
elif lo_other and self._cards[0].rank >= Rank.SIX:
# print('other')
return False
# otherwise compare Ranks card by card (already sorted from _set_value)
for a, b in zip(self, other):
if a < b:
return True
return self.value < other.value
def get_cards(self):
""" :return: reference to List of Cards in Hand """
return self._cards
def draw(self, card):
""" Add a (Card) to the Hand
:param card: (Card)
"""
self._cards.append(card) # add card to hand
self.value = self._set_value() # determine value
def discard(self, index=0):
""" Remove a (Card) from the Hand at given index (default 0)
:param index: (Integer)
:return: (Card)
"""
try:
card = self._cards.pop(index)
self.value = self._set_value()
return card
except IndexError:
raise IndexError
def _is_straight(self):
""" :return: True if Hand is a Straight """
# straight == Ranks of n, n-1, n-2, n-3, n-4
# test for Ace-Low Straight
if ((self[0].rank == Rank.ACE) and (self[1].rank == Rank.FIVE) and (self[2].rank == Rank.FOUR)
and (self[3].rank == Rank.THREE) and (self[4].rank == Rank.TWO)):
return True
return all((Rank(self[x].rank) == Rank(self[x + 1].rank + 1) for x in range(len(self) - 1)))
def _is_flush(self):
""" :return: True if Hand is a Flush """
# flush == Suit(0) = Suit(1) .... = Suit(n)
suit = self._cards[0].suit
return all((card.suit == suit for card in self._cards[1:]))
def evaluate_hand(self):
""" :return: Rank of Hand """
if len(self) == 0:
return None
if len(self) == 5:
straight = self._is_straight()
flush = self._is_flush()
if straight and flush:
return Rank.STRAIGHT_FLUSH
elif flush:
return Rank.FLUSH
elif straight:
return Rank.STRAIGHT
# create a tuple of the ranks for each card in the hand
ranks = tuple(self._cards.count(card) for card in self._cards)
if 4 in ranks:
return Rank.FOUR_OF_A_KIND
elif 3 in ranks and 2 in ranks or ranks.count(3) >= 6:
# 2nd condition covers hands with >5 Cards
return Rank.FULL_HOUSE
elif 3 in ranks:
return Rank.THREE_OF_A_KIND
elif 2 in ranks and ranks.count(2) >= 4:
return Rank.TWO_PAIR
elif 2 in ranks and ranks.count(2) == 2:
return Rank.ONE_PAIR
else:
return self._cards[0].rank
def _set_value(self):
""" :return: Rank value of Hand """
if len(self._cards) == 0:
return None
# Sort Cards in Hand by Rank, then by count
self._cards = sorted(self._cards, reverse=True, key=lambda x: (self._cards.count(x), x.rank))
# identify best possible 5-card hand from cards given
# Flushes, Straights, & Straight-Flushes are only possible in Hands of 5 Cards
# ie. no 6-card straights, just 2 straights in 1 hand eg. (2,3,4,5,6,7) == (2,3,4,5,6) and (3,4,5,6,7)
# so, return the higher scoring combination
if len(self._cards) <= 5:
return self.evaluate_hand()
else:
from itertools import combinations
combos = combinations(self._cards, 5)
best_result = self._cards[0].rank
for combo in combos:
h = Hand(*combo)
temp_result = Hand.evaluate_hand(h)
if temp_result > best_result:
best_result = temp_result
# add test for high card with equal results
return best_result
|
988,317 | 29012dadb46ae840f1ac7a2abf795fb522205812 | #!/usr/bin/env python3
print(sum(i == j for i, j in zip(*open(0).read().split())))
|
988,318 | e022921a41dcd73921a01f784c979202d803c5f1 | """
Мини сервер с использованием asyncio.
Из-за нового синтаксиса async/await нужен python 3.5 для запуска
Получает запрос в формате MSGPACK,
Проверяет поле test_date на соответствие заданному формату
Отсылает результат проверки в формате msgpack
"""
import re
import msgpack
from aiohttp import web
from msgpack.exceptions import UnpackException
"""
Регулярное выражение, которым будем проверять входящие данные.
т.к. в условии сказано, что это дата в формате день, месяц, год, я решил проверять также и правильность даты.
иначе можно подать серверу дату в формате 12.31.2016 (месяц, день, год) и он скажет, что всё ок.
И на 99.99.9999 тоже сказал бы, что всё ок.
"""
CHECKER = re.compile(r'(?P<day>[0-9]{2}).(?P<month>[0-9]{2}).[0-9]{4}')
# И сразу сделаем две заготовки для ответов, чтобы каждый запрос не вызывать msgpack
ANSWER_OK = msgpack.packb({'result': 'ok'})
ANSWER_ERR = msgpack.packb({'result': 'error'})
async def handle_post(request):
# получаем тело запроса
data = await request.read()
# разбираем msgpack. в случае если на сервер придёт "битое" сообщение, вернём 400-ю ошибку (BadRequest)
try:
data = msgpack.unpackb(data, encoding='utf-8')
except UnpackException:
return web.HTTPBadRequest()
# проверяем правильность поля test_date и отсылаем нужный ответ
check_result = CHECKER.search(data['test_date'])
if check_result:
# теперь дополнительно проверим, чтобы день был не больше 31, а месяц - не больше 12
if int(check_result.group('day')) <= 31 and int(check_result.group('month')) <= 12:
return web.Response(body=ANSWER_OK)
return web.Response(body=ANSWER_ERR)
# Создаём сервер
app = web.Application()
# Добавляем обработчик запросов
app.router.add_route('POST', '/', handle_post)
# запускаем сервер на порту 9999.
# Сервер "Вежливо" выключается по Ctrl+C, отправив ответы на все запросы, которые в момент выключения успели поступить.
web.run_app(app, port=9999)
|
988,319 | 7da6e3c8461e4074b097bd71e0f437c1cb31163c | from typing import Tuple
import pandas as pd
import numpy as np
from scipy import stats
def remove_outliers(df: pd.DataFrame, numerical_variables: list, strategy: str = 'IQR') -> pd.DataFrame:
"""Remove rows of the input dataframe having at least one variable with outlier
Args:
df (pd.DataFrame): Input dataframe
numerical_variables (list): list of the numerical variables names in the input dataframe
strategy (str, optional): IQR or z-score outlier strategy. Defaults to 'IQR'.
Returns:
pd.DataFrame: transformed dataframe
"""
assert strategy in [
'IQR', 'z-score'], "You must choose IQR or z-score strategy"
df_numerical = df[numerical_variables]
if strategy == "IQR":
Q1 = df_numerical.quantile(0.25)
Q3 = df_numerical.quantile(0.75)
IQR = Q3 - Q1
is_outlier = (df_numerical < (Q1 - 1.5 * IQR)
) | (df_numerical > (Q3 + 1.5 * IQR))
outliers = df_numerical[is_outlier.any(axis=1)]
elif strategy == 'z-score':
z = np.abs(stats.zscore(df))
outliers = df_numerical[(z >= 3).all(axis=1)]
return df.drop(outliers.index, axis=0)
def replace_rare_labels(df: pd.DataFrame, categorical_variables: list, percentage_rare: float = 0.01) -> pd.DataFrame:
"""Gather rare labels under the same label name **Rare**.
We will call rare labels for a categorical variable, a label that is shared by less than a certain percentage of the instances.
Args:
df (pd.DataFrame): Input dataframe
categorical_variables (list): list of the categorical variables names in the input dataframe
percentage_rare (float, optional): Threshold percentage of occurency of a label. Defaults to 0.01.
Returns:
pd.DataFrame: transformed dataframe
"""
for var in categorical_variables:
# Percentage occurency
per_occ = df[var].value_counts()/len(df)
# Find rare labels
rare_labels = per_occ[per_occ < 0.01].index.tolist()
# Transform the dataframe
df[var] = np.where(df[var].isin(rare_labels),
'Rare',
df[var])
return df
def summarize_common_variables(df1: pd.DataFrame, df2: pd.DataFrame, print_of: bool = True) -> Tuple[list]:
"""Check the difference in variables between two dataframes
Args:
df1, df2 (pd.DataFrame): The two dataframes to compare
Returns:
tuple(list): A tuple of 3 lists:
- the variables names in common
- the variables in df1 but not in df2
- the variables in df2 but not in df1
"""
common_vars = [var for var in df1.columns if var in df2.columns]
vars_df1_not_df2 = [var for var in df1.columns if var not in df2.columns]
vars_df2_not_df1 = [var for var in df2.columns if var not in df1.columns]
if print_of:
# Summary
print(f'In common: {len(common_vars)}')
print(common_vars)
print('\n')
print(f'In df1 and not in df2: {len(vars_df1_not_df2)}')
print(vars_df1_not_df2)
print('\n')
print(f'In df2 and not in df1: {len(vars_df2_not_df1)}')
print(vars_df2_not_df1)
return common_vars, vars_df1_not_df2, vars_df2_not_df1
def complete_one_hot_variables(df: pd.DataFrame, var_names: list) -> pd.DataFrame:
"""Complete missing variables in a dataframe by columns of 0
Args:
df (pd.DataFrame): Input dataframe
var_names (list): List of missing variables names
Returns:
pd.DataFrame: Dataframe with new variables
"""
df = df.copy()
for var in var_names:
# Create a column of 0s
df[var] = 0
return df
|
988,320 | 7599bf414480dc5086a14a17d451ad39f90119e0 | class Solution:
def countSquares(self, matrix: List[List[int]]) -> int:
res=0
if len(matrix)==0:
return 0
for i in range(len(matrix)):
for j in range(len(matrix[0])):
if matrix[i][j]==1:
if i==0 or j==0:
res+=1
else:
m=min(matrix[i-1][j-1],matrix[i-1][j],matrix[i][j-1])
matrix[i][j]=m+1
res+=matrix[i][j]
return res
|
988,321 | fd6cede6fde193ec7ac76c627f49d0ea2ba6d57c | import streamlit as st
import time
from load_css import local_css
import os, sqlite3
import pandas as pd
st.set_page_config(
page_title="SPI:Calc",
page_icon="😸",
layout="centered",
initial_sidebar_state="collapsed",
)
local_css("style.css")
############### SQLITTE ###############
def create_table():
conn = sqlite3.connect('spidata.db')
c = conn.cursor()
c.execute("CREATE TABLE IF NOT EXISTS spilog(unix REAL, uname TEXT, spi TEXT, sem TEXT)")
c.close()
conn.close()
def data_entry(unix, uname, spi, sem):
uname = uname.replace("'", "''")
conn = sqlite3.connect('spidata.db')
c = conn.cursor()
c.execute(f"INSERT INTO spilog VALUES('{unix}', '{uname}', '{spi}', '{sem}')")
conn.commit()
c.close()
conn.close()
def read_data(sem):
conn = sqlite3.connect('spidata.db')
c = conn.cursor()
c.execute(f"SELECT * FROM spilog WHERE sem = '{sem}'")
data = c.fetchall()
return data
############## SQLITE ################
if "spidata.db" not in os.listdir():
create_table()
SEM = st.sidebar.radio("",("Semester-2","Semester-1","CPI"))
if SEM == "Semester-2":
placeTitle = st.empty()
placeTitle.markdown(
f"<div style='font-size: 50px;;color:grey;font-family:orbitron;'><center><b>SPI Calculator</b></center></div>",
unsafe_allow_html=True)
st.markdown(
f"<div style='font-size: 12px;'><center>By <a href='https://github.com/cmdev007/'><span class='highlight blue'><span class='bold'>cMDev007</span></span></a></center></div>",
unsafe_allow_html=True)
st.markdown("---")
placeInfo = st.empty()
placeInfo.info("For CPI check sidebar")
app_state = st.experimental_get_query_params()
if ("nInput" not in app_state.keys()) or ("elec" not in app_state.keys()):
with st.beta_expander("SPI Calculator",expanded=True):
with st.form(key='columns_in_form'):
nInput = st.text_input("Your name please (keep empty if want to hide identity)")
elec = st.selectbox("Technical Elective", ("","Information Retrieval","No SQL Databases","Advanced Image Processing","Multimedia Security and Forensics"))
submitted = st.form_submit_button('Submit')
if submitted:
if elec!="":
if nInput.strip()=="":
nInput = "anonymous"
else:
nInput = nInput.lower().strip()
st.experimental_set_query_params(**{"nInput": nInput, "elec": elec})
else:
st.markdown(
f"<div style='font-size: 16px;'><center><span class='highlight green'><span class='bold'>Please select Technical Elective</span></span></center></div>",
unsafe_allow_html=True)
st.markdown("")
app_state = st.experimental_get_query_params()
if ("nInput" in app_state.keys()) and ("elec" in app_state.keys()):
placeTitle.markdown(f"<div style='font-size:30px;color:grey;font-family:orbitron;'>\
<center><b>SPI Calculator</b></center></div>",
unsafe_allow_html=True)
placeInfo.empty()
app_state = st.experimental_get_query_params()
allSub = ["Big Data Processing", "Machine Learning", "Numerical Methods for Data Science", "Optimization",
"SAS based Mini Project - 1"]
allSub.extend(app_state["elec"])
nInput = app_state["nInput"][0]
st.write(f"Hello, {nInput.title()}! Please enter your grades.")
subIndex = {}
subCred = {"Big Data Processing":3, "Machine Learning":4, "Numerical Methods for Data Science":4,
"Optimization":3, "SAS based Mini Project - 1":2, "Information Retrieval":4,
"No SQL Databases":4, "Advanced Image Processing":4, "Multimedia Security and Forensics":3}
gradeNum = {"":0,"AA":10,"AB":9,"BB":8,"BC":7,"CC":6,"CD":5,"DD":4}
for i in allSub:
subIndex[i] = ""
with st.form(key='gradedata'):
for i in allSub:
subIndex[i] = gradeNum[st.select_slider(i, ("AA","AB","BB","BC","CC","CD","DD"))]
submitted = st.form_submit_button('Submit')
if submitted:
Numerator = 0
Denominator = 0
MFleg = False
for i in allSub:
if subIndex[i] == 0:
MFleg = True
break
Numerator += subIndex[i]*subCred[i]
Denominator += 10*subCred[i]
if not MFleg:
SPI = str(round((Numerator*10/Denominator),2))
st.markdown(f"<div style='font-size: 16px;'><center>Your SPI: <span class='highlight green'><span class='bold'>{SPI}</span></span></center></div>", unsafe_allow_html=True)
data_entry(int(time.time()), nInput, SPI, SEM)
else:
st.markdown(f"<div style='font-size: 16px;'><center>Please select grades of <span class='highlight green'><span class='bold'>All Subjects!</span></span></center></div>", unsafe_allow_html=True)
st.markdown("---")
# Leaderboard
with st.beta_expander("Leader Board", expanded=False):
# st.markdown(
# f"<div style='font-size: 20px;'><center><span class='highlight green'><span class='bold'>Coming Soon!</span></span></center></div>",
# unsafe_allow_html=True)
allSPI = []
for i in read_data(SEM):
allSPI.append(float(i[2]))
allSPI.sort(reverse=True)
allName = [f"Classmate {i+1}" for i in range(len(allSPI))]
st.write(pd.DataFrame({
'Anonymous Name': allName,
'SPI': allSPI,
}))
if SEM == "Semester-1":
placeTitle = st.empty()
placeTitle.markdown(
f"<div style='font-size: 50px;;color:grey;font-family:orbitron;'><center><b>SPI Calculator</b></center></div>",
unsafe_allow_html=True)
st.markdown(
f"<div style='font-size: 12px;'><center>By <a href='https://github.com/cmdev007/'><span class='highlight blue'><span class='bold'>cMDev007</span></span></a></center></div>",
unsafe_allow_html=True)
st.markdown("---")
placeInfo = st.empty()
placeInfo.info("For CPI check sidebar")
st.markdown(
f"<div style='font-size: 20px;'><center><span class='highlight green'><span class='bold'>Coming Soon!</span></span></center></div>",
unsafe_allow_html=True)
if SEM == "CPI":
placeTitle = st.empty()
placeTitle.markdown(
f"<div style='font-size: 50px;;color:grey;font-family:orbitron;'><center><b>CPI Calculator</b></center></div>",
unsafe_allow_html=True)
st.markdown(
f"<div style='font-size: 12px;'><center>By <a href='https://github.com/cmdev007/'><span class='highlight blue'><span class='bold'>cMDev007</span></span></a></center></div>",
unsafe_allow_html=True)
st.markdown("---")
app_state = st.experimental_get_query_params()
if ("elec" not in app_state.keys()):
with st.beta_expander("CPI Calculator", expanded=True):
with st.form(key='columns_in_form'):
elec = st.selectbox("Technical Elective", (
"", "Information Retrieval", "No SQL Databases", "Advanced Image Processing",
"Multimedia Security and Forensics"))
submitted = st.form_submit_button('Submit')
if submitted:
if elec != "":
st.experimental_set_query_params(**{"elec": elec})
else:
st.markdown(
f"<div style='font-size: 16px;'><center><span class='highlight green'><span class='bold'>Please select Technical Elective</span></span></center></div>",
unsafe_allow_html=True)
st.markdown("")
app_state = st.experimental_get_query_params()
if ("elec" in app_state.keys()):
placeTitle.markdown(f"<div style='font-size:30px;color:grey;font-family:orbitron;'>\
<center><b>CPI Calculator</b></center></div>",
unsafe_allow_html=True)
app_state = st.experimental_get_query_params()
allSub = ["Big Data Processing", "Machine Learning", "Numerical Methods for Data Science", "Optimization",
"SAS based Mini Project - 1"]
allSub.extend(app_state["elec"])
subCred = {"Big Data Processing": 3, "Machine Learning": 4, "Numerical Methods for Data Science": 4,
"Optimization": 3, "SAS based Mini Project - 1": 2, "Information Retrieval": 4,
"No SQL Databases": 4, "Advanced Image Processing": 4, "Multimedia Security and Forensics": 3}
sem1Cred = 4+4+4+4+2
sem2Cred = 0
for i in allSub:
sem2Cred += subCred[i]
with st.form(key="CPI"):
sem1SPI = st.text_input("Enter Semester-1 SPI:")
sem2SPI = st.text_input("Enter Semester-2 SPI:")
submittedSPI = st.form_submit_button('Submit')
if submittedSPI:
sem1SPI = float(sem1SPI)
sem2SPI = float(sem2SPI)
CPI = str(round(((sem1SPI*sem1Cred)+(sem2SPI*sem2Cred))/(sem1Cred+sem2Cred), 2))
st.markdown(
f"<div style='font-size: 16px;'><center>Your CPI: <span class='highlight green'><span class='bold'>{CPI}</span></span></center></div>",
unsafe_allow_html=True)
hide_streamlit_style = """
<style>
#MainMenu {visibility: hidden;}
footer {visibility: hidden;}
</style>
"""
st.markdown(hide_streamlit_style, unsafe_allow_html=True) |
988,322 | e0d1dbd780c33356ce48f4f5ce48176d0673dc4e | #!/usr/bin/env python
import collections
import contextlib
import datetime
from . import delay_record
import errno
import fcntl
import logging
import os
from . import ozwd_get_value
from . import ozwd_set_value
from . import ozwd_util
import pickle
from . import spicerack
import time
LEVEL_FILE = '/var/lib/homeauto/upstairs_bathroom'
LEVEL_FILE_STAGING = '/var/lib/homeauto/upstairs_bathroom-staging'
DELAY_DURATION = datetime.timedelta(minutes=30)
DIM_DURATION_TOTAL = datetime.timedelta(minutes=30)
DIM_DURATION_MAX_INCREMENT = datetime.timedelta(seconds=127)
DIM_FLOOR = 5
DIMMER_VALUE = spicerack.Value.UPSTAIRS_BATHROOM
DIMMER_RAMP_TIME_VALUE = spicerack.Value.UPSTAIRS_BATHROOM_DIMMER_RAMP_TIME
@contextlib.contextmanager
def locked_level(create):
try:
level = open(LEVEL_FILE, 'r+b')
except IOError as e:
if e.errno != errno.ENOENT:
raise
if not create:
yield None
return
fd = os.open(LEVEL_FILE_STAGING, os.O_CREAT | os.O_EXCL | os.O_WRONLY)
try:
level = os.fdopen(fd, 'wb')
except:
os.remove(LEVEL_FILE_STAGING)
os.close(fd)
raise
else:
with level:
try:
yield level
os.link(LEVEL_FILE_STAGING, LEVEL_FILE)
os.unlink(LEVEL_FILE_STAGING)
except:
os.remove(LEVEL_FILE_STAGING)
raise
else:
with level:
fcntl.lockf(level, fcntl.LOCK_EX)
try:
yield level
finally:
fcntl.lockf(level, fcntl.LOCK_UN)
def record(position):
"""Record that the light was adjusted with a new timer"""
logging.info('Recording position %d', position)
with locked_level(create=True) as level:
try:
delay = pickle.load(level)
logging.info('Loaded pickle: %r', delay)
remaining_expected_positions = iter(delay.expected_positions)
if position == next(remaining_expected_positions, None):
# This is just the feedback from slow dimming that is already in progress
logging.info('Ignoring feedback')
next_delay = delay_record.Delay(delay.when, delay.source_position, delay.target_position, list(remaining_expected_positions))
expect(level, next_delay)
return
except IOError:
# The file is not open for writing, so there is no expected position yet
pass
next_delay = delay_record.Delay(datetime.datetime.utcnow() + DELAY_DURATION, position, position, [])
expect(level, next_delay)
def update():
"""If the timer is up, then dim the light a little more"""
with locked_level(create=False) as level:
if level is None:
# There is no timer
logging.info('There is currently no timer')
return
delay = pickle.load(level)
if delay.source_position <= DIM_FLOOR:
os.remove(LEVEL_FILE)
return
now = datetime.datetime.utcnow()
remaining = delay.when - now
if remaining >= datetime.timedelta(minutes=1):
logging.info('Aborting because the timer still has: %s', remaining)
return
if remaining.total_seconds() > 0:
logging.info('Sleeping because the timer still has: %s', remaining)
fcntl.lockf(level, fcntl.LOCK_UN)
time.sleep(remaining.total_seconds())
fcntl.lockf(level, fcntl.LOCK_EX)
if delay.expected_positions:
# There shouldn't be any expected positions left, so something has interrupted the dimmer
logging.info('Expected positions were not consumed, so reverting from %d to %d',
delay.target_position, delay.source_position)
position = delay.source_position
position_increment = min(
delay.target_position - DIM_FLOOR,
max(
1,
int((100 - DIM_FLOOR) * DIM_DURATION_MAX_INCREMENT.total_seconds() / DIM_DURATION_TOTAL.total_seconds())
)
)
if position_increment <= 0:
return
position = delay.target_position - position_increment
# This will be near DIM_DURATION_MAX_INCREMENT but accounts for rounding
ramp_time = datetime.timedelta(seconds=int(position_increment / (100 - DIM_FLOOR) * DIM_DURATION_TOTAL.total_seconds()))
if delay.target_position > DIM_FLOOR:
# The switch reports the old and then the new position when it dims
next_delay = delay_record.Delay(now + ramp_time, delay.target_position, position, [delay.target_position, position])
expect(level, next_delay)
else:
os.remove(LEVEL_FILE)
logging.info('Dimming to %d over %s', position, ramp_time)
with ozwd_util.get_thrift_client() as thrift_client, (
ozwd_util.get_stompy_client()) as stompy_client:
ozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, ramp_time.total_seconds(), thrift_client)
try:
ozwd_set_value.set_value_connected(DIMMER_VALUE.value, position, thrift_client)
finally:
ozwd_set_value.set_value_connected(DIMMER_RAMP_TIME_VALUE.value, 2, thrift_client)
def expect(level, delay):
logging.info('Writing pickle: %r', delay)
# Overwrite the existing contents
level.seek(0)
level.truncate()
# FIXME: A crash here would leave an empty file
pickle.dump(delay, level)
def main():
update()
if __name__ == "__main__":
main()
|
988,323 | dd524258a4fd68a0b6b66c1b2ff95aca00e68fc8 | from commands.pre_1_13.cmdex import CMDEx
from commands.upgrader.utils import command_upgrader_base
from ..utils import selector
CMDEXS = [
CMDEx('gc help'),
CMDEx('gc reload'),
CMDEx('gc fulllevelup {selector:player}'),
CMDEx('gc resetinfo {selector:player}'),
CMDEx('gc getclan {selector:player}'),
CMDEx('gc getguild {selector:player}'),
CMDEx('gc getlevel {selector:player}'),
CMDEx('gc gotoguild {selector:player} {str:guild}'),
CMDEx('gc setclan {selector:player} {str:clan}'),
CMDEx('gc setguild {selector:player} {str:guild}'),
CMDEx('gc setlevel {selector:player} {int:level}'),
]
def __upgrade(order, props):
result = ''
for tok in order:
if tok[0] == '#':
key = tok[1:]
if key == 'player':
result += selector.upgrade(props[key]) + ' '
else:
result += str(props[key]) + ' '
else:
result += tok + ' '
if result[-1] == ' ':
result = result[:-1]
return result
def upgrade(command: str) -> str:
return command_upgrader_base.upgrade(CMDEXS, command, __upgrade) |
988,324 | 8eda5e5d881a3f3725cdbe4eeac0d08ee03a209d | from flask_injector import Binder
from jamaah.provider import PROVIDERS
def configure(binder: Binder):
for provider in PROVIDERS:
binder.bind(provider, to=provider)
|
988,325 | 14fd8f85ce8d7ceec16c75cb9a6bb2d1ef4fdde4 | import sys
from ckeditor.fields import RichTextField
from django.db import models
from django.utils.timezone import make_aware
from django.contrib.auth.models import User
# from ckeditor.fields import RichTextField
import datetime
class Student(User):
andrewid = models.CharField(max_length=20, blank = True, default = '')
identity = models.CharField(max_length=10, blank = True, default = 'S') # 'S' for Student, P for Professor
def __str__(self):
return self.username
class Professor(User):
andrewid = models.CharField(max_length=20, blank = True, default = '')
identity = models.CharField(max_length=10, blank = True, default = 'P') # 'S' for Student, P for Professor
def __str__(self):
return self.username
class Course(models.Model):
name = models.CharField(max_length=300)
number = models.CharField(max_length=40)
instructor = models.ForeignKey(Professor,on_delete=models.CASCADE)
students = models.ManyToManyField(Student, blank=True)
def __str__(self):
return self.number + self.name
# def directory_path(instance, filename):
# # file will be uploaded to MEDIA_ROOT/user_<id>/<filename>
# return '{0}/{1}'.format(instance.number, filename)
class Note(models.Model):
author = models.ForeignKey(User,on_delete=models.CASCADE)
course = models.ForeignKey(Course, on_delete=models.CASCADE)
date = models.DateField(auto_now=True, auto_now_add=False)
time = models.TimeField(auto_now=True, auto_now_add=False)
access_type = models.CharField(max_length=20, blank=True, default='public')
# file = models.FileField(upload_to=directory_path, blank=False, null=False)
file = models.FileField(upload_to='notes/', blank=False, null=False,default = '')
filename = models.CharField(max_length=300, default='file1')
def __str__(self):
return self.content +"\n" + self.date + self.time
#datetime field
class TextNote(models.Model):
course = models.ForeignKey(Course, on_delete=models.CASCADE, default = '')
author = models.ForeignKey(User,on_delete=models.CASCADE, default = '')
filepath=models.CharField(max_length=200, default = '')
filename=models.CharField(max_length=200, default = '')
body = RichTextField(blank=True,default = '')
# body = models.CharField(max_length=sys.maxsize, blank=True, default='')
plaintext = models.CharField(max_length=100000,blank=True, default='')
access_type = models.CharField(max_length=20, blank=True, default='private')
def __str__(self):
return self.filename
|
988,326 | 5553b3154bcb10289752d92a962dc8ac19e32829 | # -*- coding: utf-8 -*-
# Created by #chuyong, on 2019/11/19.
# Copyright (c) 2019 3KWan.
# Description :
import unittest
def hello():
class TestSomething(unittest.TestCase):
def test_add(self):
print("test add")
def test_2(self):
print("test 2")
suite = unittest.makeSuite(TestSomething)
return suite
# suite = unittest.TestSuite()
# suite.addTest(TestSomething("test_add"))
# suite.addTest(TestSomething("test_2"))
# return suite
def world():
class TestOther(unittest.TestCase):
def test_a(self):
print("test a")
def test_b(self):
print("test b")
# suite = unittest.TestSuite()
# suite.addTest(TestOther("test_a"))
# suite.addTest(TestOther("test_b"))
# return suite
if __name__ == '__main__':
suite_1 = hello()
# suite_2 = world()
suite_base = unittest.TestSuite()
suite_base.addTests(suite_1)
# suite_base.addTest(suite_2)
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite_base)
|
988,327 | eac669c0072cedc0e428614c82c8e9923b205ae1 | def solve(s, k):
count = 0
s = list(s)
for i in range(len(s)-k + 1):
if s[i] == '-':
count += 1
for j in range(i, i+k):
if s[j] == '+':
s[j] = '-'
else:
s[j] = '+'
if '-' in s:
return 'IMPOSSIBLE'
else:
return count
if __name__ == "__main__":
t = int(raw_input())
for caseNr in xrange(1, t+1):
s = raw_input()
s = s.split()
print("Case #%i: %s" % (caseNr, solve(s[0], int(s[1]))))
|
988,328 | 1acaf9638d657e15b5b576c05d51cd7614c728d0 | import filter_keys
import boto3
from botocore.exceptions import ClientError
def get_service(function, dict_key, fields, extra_options={}):
return filter_keys.filter_key('application-autoscaling', function, dict_key, fields,
extra_options)
def get_all(remove_empty=False):
# Return all resources
resources = {}
resources["scalable_targets"] = get_targets()
resources["scaling_policies"] = get_policies()
resources["scheduled_actions"] = get_schedules()
if remove_empty:
resources = dict((key, value) for key, value in resources.items() if value)
return resources
def get_targets():
service_namespace = ["ecs", "elasticmapreduce", "ec2", "appstream",
"dynamodb", "rds", "sagemaker"]
fields = ["ServiceNamespace", "ResourceId", "ScalableDimension"]
targets = []
for service in service_namespace:
targets.append(get_service("describe_scalable_targets", "ScalableTargets",
fields, {"ServiceNamespace": service}))
targets = [x for x in targets if x]
return targets
def get_policies():
service_namespace = ["ecs", "elasticmapreduce", "ec2", "appstream",
"dynamodb", "rds", "sagemaker"]
fields = ["ServiceNamespace", "PolicyARN", "PolicyName", "ScalableDimension"]
targets = []
for service in service_namespace:
targets.append(get_service("describe_scaling_policies", "ScalingPolicies",
fields, {"ServiceNamespace": service}))
targets = [x for x in targets if x]
return targets
def get_schedules():
service_namespace = ["ecs", "elasticmapreduce", "ec2", "appstream",
"dynamodb", "rds", "sagemaker"]
fields = ["ServiceNamespace", "ScheduledActionARN", "ScheduledActionName", "ScalableDimension"]
targets = []
for service in service_namespace:
targets.append(get_service("describe_scheduled_actions", "ScheduledActions",
fields, {"ServiceNamespace": service}))
targets = [x for x in targets if x]
return targets
|
988,329 | 86b77f3666760ba1f3275714748034c073a12347 |
""" #Computer programmers refer to blocks of text as strings. In our last exercise, we created the string “Hello world!”. In Python a string is either surrounded by double quotes ("Hello world") or single quotes ('Hello world').
#It doesn’t matter which kind you use, just be consistent. """
print('Anupriya') |
988,330 | c86683a3e1cfae7e99cb25895cb9669db67995de | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-09-29 09:51
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0008_auto_20180927_1156'),
]
operations = [
migrations.AlterField(
model_name='buyingorder',
name='date',
field=models.DateField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='item',
name='stock',
field=models.IntegerField(default=0, verbose_name='Stock'),
),
]
|
988,331 | 7a642246656c5bd015a566df3b3e3ff3f67ba91f | """
https://leetcode.com/problems/multiply-strings/
43. Multiply Strings
Medium
--------------------
Given two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2, also represented as a string.
Note: You must not use any built-in BigInteger library or convert the inputs to integer directly.
Example 1:
Input: num1 = "2", num2 = "3"
Output: "6"
Example 2:
Input: num1 = "123", num2 = "456"
Output: "56088"
Constraints:
1 <= num1.length, num2.length <= 200
num1 and num2 consist of digits only.
Both num1 and num2 do not contain any leading zero, except the number 0 itself.
"""
class Solution:
def multiply(self, num1, num2):
return self.multiply_2(num1, num2)
def multiply_2(self, num1, num2):
"""
参考思路:https://leetcode.com/problems/multiply-strings/discuss/17605/Easiest-JAVA-Solution-with-Graph-Explanation
验证通过,性能一般:
Runtime: 196 ms, faster than 15.10% of Python3 online submissions for Multiply Strings.
Memory Usage: 14.4 MB, less than 10.51% of Python3 online submissions for Multiply Strings.
:param num1:
:param num2:
:return:
"""
if not num1 or not num2:
return ""
if num1 == "0" or num2 == "0":
return "0"
input1 = num1[::-1]
input2 = num2[::-1]
ret = ""
ret_arr = [0 for i in range(len(num1) + len(num2))]
for i in range(len(input1)):
for j in range(len(input2)):
tmp = int(input1[i]) * int(input2[j])
#个位
tmp0 = tmp % 10 + ret_arr[i + j]
ret_arr[i + j] = tmp0 % 10
#十位
tmp1 = tmp // 10 + ret_arr[i + j + 1] + tmp0 // 10
ret_arr[i + j + 1] = tmp1 % 10
#百位
if tmp1 // 10 > 0:
ret_arr[i + j + 2] += tmp1 // 10
for i in range(len(ret_arr) - 1, -1, -1):
ret += str(ret_arr[i])
return ret.lstrip("0")
def multiply_1(self, num1, num2):
"""
思路:模拟多位数相乘的办法,先依次计算出乘数和被乘数的乘积(补零),然后所有乘积相加.
验证通过,性能一般
Runtime: 280 ms, faster than 6.31% of Python3 online submissions for Multiply Strings.
Memory Usage: 14.5 MB, less than 6.19% of Python3 online submissions for Multiply Strings.
:param num1:
:param num2:
:return:
"""
if not num1 or not num2:
return ""
# 特殊情况处理
if num1 == "0" or num2 == "0":
return "0"
cache = [[] for i in range(len(num1))]
outflow = 0
input1 = num1[::-1]
input2 = num2[::-1]
for i in range(len(input1)):
# 多位数乘法补零
for k in range(i):
cache[i].append(0)
for j in range(len(input2)):
tmp = int(input1[i]) * int(input2[j])
tmp += outflow
cache[i].append(tmp % 10)
outflow = tmp // 10
# 处理循环结束后outflow剩余的情况
while outflow > 0:
cache[i].append(outflow)
outflow = outflow // 10
# cache中依次相加,注意:此时cache中是低位在前,高位在后
ret_1 = [0]
ret_2 = []
outflow = 0
for i in range(len(cache)):
index = 0
while index < len(ret_1) and index < len(cache[i]):
tmp = ret_1[index] + cache[i][index] + outflow
ret_2.append(tmp % 10)
outflow = tmp // 10
index += 1
for j in range(index, len(ret_1)):
tmp = ret_1[j] + outflow
ret_2.append(tmp % 10)
outflow = tmp // 10
for j in range(index, len(cache[i])):
tmp = cache[i][j] + outflow
ret_2.append(tmp % 10)
outflow = tmp // 10
# 处理循环结束后outflow剩余的情况
while outflow > 0:
ret_2.append(outflow % 10)
outflow = outflow // 10
ret_1 = ret_2.copy()
ret_2 = []
ret_1.reverse()
return "".join(list(map(str, ret_1)))
def main():
num1 = "2"
num2 = "3"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "6")
print("---------------------")
num1 = "123"
num2 = "456"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "56088")
print("---------------------")
num1 = "88989"
num2 = "0"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "0")
print("---------------------")
num1 = "88989"
num2 = "1000"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "88989000")
print("---------------------")
num1 = "99999"
num2 = "99"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "99899001")
print("---------------------")
num1 = "99999"
num2 = "999"
ret = Solution().multiply(num1, num2)
print(ret)
print(ret == "99899001")
print("---------------------")
if __name__ == "__main__":
main()
|
988,332 | 1e7b6c473d4c7b54dbabb77a4cafab3afcec70c5 | from apscheduler.schedulers.background import BackgroundScheduler
from datetime import datetime
# BlockingScheduler
scheduler = BackgroundScheduler()
# 输出时间
@scheduler.scheduled_job('cron', id='my_job_id', hour=17,minute=0,second=0)
def job():
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
def aa():
print("aa")
#scheduler.add_job(job, 'cron', day_of_week='1-5', hour=6, minute=30)
scheduler.add_job(aa, 'cron', day=1, hour=0, minute=0,second=0)
scheduler.start() |
988,333 | 293fde634357d0aab780913fa7a0591776bd8437 | import pytest
import yaml
from tests.commands.run_test_utils import ALTERNATIVE_YAML, RunAPIMock, RunTestSetup
from tests.fixture_data import CONFIG_YAML, PROJECT_DATA
from valohai_cli.commands.execution.run import run
from valohai_cli.ctx import get_project
from valohai_cli.models.project import Project
adhoc_mark = pytest.mark.parametrize('adhoc', (False, True), ids=('regular', 'adhoc'))
@pytest.fixture(params=['regular', 'adhoc'], ids=('regular', 'adhoc'))
def run_test_setup(request, logged_in_and_linked, monkeypatch):
return RunTestSetup(monkeypatch=monkeypatch, adhoc=(request.param == 'adhoc'))
@pytest.fixture()
def patch_git(monkeypatch):
def mock_resolve_commits(mock_self, *, commit_identifier):
return [{'identifier': commit_identifier}]
monkeypatch.setattr(Project, 'resolve_commits', mock_resolve_commits)
def test_run_requires_step(runner, logged_in_and_linked):
assert 'Usage: run' in runner.invoke(run, catch_exceptions=False).output
@pytest.mark.parametrize('pass_env_var', ('custom', 'override-default'))
def test_run_env_var(run_test_setup, pass_env_var):
run_test_setup.args.extend(['-v', 'greeting=hello'])
run_test_setup.args.extend(['--var', 'enable=1'])
run_test_setup.args.extend(['-vdebug=yes'])
if pass_env_var == 'override-default':
run_test_setup.args.extend(['--var', 'testenvvar='])
expected_testenvvar = ''
else:
expected_testenvvar = 'test' # default from YAML
run_test_setup.values['environment_variables'] = {
'greeting': 'hello',
'enable': '1',
'debug': 'yes',
'testenvvar': expected_testenvvar,
}
run_test_setup.run()
def test_run_env(run_test_setup):
run_test_setup.args.append('--environment=015dbd56-2670-b03e-f37c-dc342714f1b5')
run_test_setup.values['environment'] = '015dbd56-2670-b03e-f37c-dc342714f1b5'
run_test_setup.run()
def test_run_tags(run_test_setup):
run_test_setup.args.extend(['--tag=bark', '--tag=bork', '--tag=vuh', '--tag=hau'])
run_test_setup.values['tags'] = ['bark', 'bork', 'vuh', 'hau']
run_test_setup.run()
def test_run_spot_restart(run_test_setup):
run_test_setup.args.append('--environment=018161d4-2911-7bbb-85ea-8820559cce89')
run_test_setup.values['environment'] = '018161d4-2911-7bbb-85ea-8820559cce89'
run_test_setup.args.append('--autorestart')
run_test_setup.run()
assert run_test_setup.run_api_mock.last_create_execution_payload["runtime_config"] == {
'autorestart': True
}
def test_run_with_yaml_path(run_test_setup):
run_test_setup.args.remove('train')
# Use a step which is only present in the evaluation YAML
run_test_setup.args.append('batch feature extraction')
run_test_setup.args.append(f'--yaml={ALTERNATIVE_YAML}')
output = run_test_setup.run(verify_adhoc=run_test_setup.adhoc)
# Adhoc success case already verified in `run()
if not run_test_setup.adhoc:
assert '--yaml can only be used with --adhoc' in output
else:
assert f'from configuration YAML at {ALTERNATIVE_YAML}' in output
def test_run_input(run_test_setup):
run_test_setup.args.append('--in1=http://url')
run_test_setup.args.append('--in1=http://anotherurl')
run_test_setup.values['inputs'] = {'in1': ['http://url', 'http://anotherurl']}
run_test_setup.run()
@pytest.mark.parametrize('pass_param', ('direct', 'file', 'mix'))
def test_run_params(tmpdir, run_test_setup, pass_param):
values = {
'parameters': { # default from YAML
'max_steps': 300,
'learning_rate': 0.1337,
},
}
if pass_param in ('direct', 'mix'):
run_test_setup.args.append('--max-steps=1801')
values['parameters']['max_steps'] = 1801
if pass_param in ('file', 'mix'):
params_yaml = tmpdir.join('params.yaml')
params_yaml.write(yaml.safe_dump({'learning-rate': 1700}))
run_test_setup.args.append(f'--parameter-file={params_yaml}')
values['parameters']['learning_rate'] = 1700
run_test_setup.values.update(values)
run_test_setup.run()
payload = run_test_setup.run_api_mock.last_create_execution_payload
if pass_param == 'direct':
assert payload['parameters']['max_steps'] == 1801
assert payload['parameters']['learning_rate'] == 0.1337
if pass_param == 'file':
assert payload['parameters']['max_steps'] == 300
assert payload['parameters']['learning_rate'] == 1700.0
if pass_param == 'mix':
assert payload['parameters']['max_steps'] == 1801
assert payload['parameters']['learning_rate'] == 1700.0
def test_param_type_validation_integer(runner, logged_in_and_linked, patch_git, default_run_api_mock):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
rv = runner.invoke(run, ['train', '--max-steps=plonk'], catch_exceptions=False)
assert (
'\'plonk\' is not a valid integer' in rv.output or
'plonk is not a valid integer' in rv.output
)
def test_param_type_validation_flag(runner, logged_in_and_linked, patch_git, default_run_api_mock):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
rv = runner.invoke(run, ['train', '--enable-mega-boost=please'], catch_exceptions=False)
assert (
'\'please\' is not a valid boolean' in rv.output or
'please is not a valid boolean' in rv.output
)
@pytest.mark.parametrize('value, result', [
# Various forms supported by `click.BOOL`...
('yes', True),
('no', False),
('1', True),
('FALSE', False),
('True', True),
])
def test_flag_param_coercion(tmpdir, run_test_setup, value, result):
run_test_setup.values['parameters'] = { # default from YAML
'max_steps': 300,
'learning_rate': 0.1337,
'enable_mega_boost': result,
}
run_test_setup.args.append(f'--enable-mega-boost={value}')
run_test_setup.run()
def test_run_no_git(runner, logged_in_and_linked):
project_id = PROJECT_DATA['id']
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
args = ['train']
with RunAPIMock(project_id, 'f' * 16, {}):
output = runner.invoke(run, args, catch_exceptions=False).output
assert 'is not a Git repository' in output
def test_param_input_sanitization(runner, logged_in_and_linked, patch_git, default_run_api_mock):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write('''
- step:
name: Train model
image: busybox
command: "false"
inputs:
- name: Ridiculously Complex Input_Name
default: http://example.com/
parameters:
- name: Parameter With Highly Convoluted Name
pass-as: --simple={v}
type: integer
default: 1
''')
output = runner.invoke(run, ['train', '--help'], catch_exceptions=False).output
assert '--Parameter-With-Highly-Convoluted-Name' in output
assert '--parameter-with-highly-convoluted-name' in output
assert '--Ridiculously-Complex-Input-Name' in output
assert '--ridiculously-complex-input-name' in output
def test_multi_parameter_serialization(run_test_setup):
run_test_setup.run()
payload = run_test_setup.run_api_mock.last_create_execution_payload
assert payload['parameters']['multi-parameter'] == ["one", "two", "three"]
def test_multi_parameter_command_line_argument(run_test_setup):
run_test_setup.args.append('--multi-parameter=four')
run_test_setup.args.append('--multi-parameter=5')
run_test_setup.args.append('--multi-parameter="six"')
run_test_setup.run()
payload = run_test_setup.run_api_mock.last_create_execution_payload
assert payload['parameters']['multi-parameter'] == ["four", "5", "\"six\""]
def test_typo_check(runner, logged_in_and_linked, patch_git, default_run_api_mock):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
args = ['train', '--max-setps=80'] # Oopsy!
output = runner.invoke(run, args, catch_exceptions=False).output
assert '(Possible options:' in output or 'Did you mean' in output
assert '--max-steps' in output
def test_run_help(runner, logged_in_and_linked):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
output = runner.invoke(run, ['--help'], catch_exceptions=False).output
assert 'Train model' in output
def test_command_help(runner, logged_in_and_linked, patch_git, default_run_api_mock):
with open(get_project().get_config_filename(), 'w') as yaml_fp:
yaml_fp.write(CONFIG_YAML)
output = runner.invoke(run, ['Train model', '--help'], catch_exceptions=False).output
assert 'Parameter Options' in output
assert 'Input Options' in output
def test_remote(run_test_setup, tmpdir):
key = tmpdir.join("key.pub")
key.write_text("ssh blarp blep", "utf-8")
run_test_setup.args.append('--debug-port=8101')
run_test_setup.args.append(f'--debug-key-file={key}')
run_test_setup.run()
assert run_test_setup.run_api_mock.last_create_execution_payload["runtime_config"] == {
"remote_debug": {
'debug_key': 'ssh blarp blep',
'debug_port': 8101,
}
}
def test_remote_both_args(run_test_setup):
run_test_setup.args.append('--debug-port=8101')
assert "Both or neither" in run_test_setup.run(catch_exceptions=False, verify_adhoc=False)
|
988,334 | ca09162407e3d71412b3c0517c78cdba72dc8835 | from math import ceil
import pygame as pg
from functions import get_surface
from models.Float import Float
from models.GameObject import GameObject
class BarIndicatorColor:
def __init__(self, background, border, full, empty):
self.background = background
self.border = border
self.full = full
self.empty = empty
class BarIndicator:
def __init__(self, width: int, height: int, color: BarIndicatorColor = None,
border_size: tuple = (4, 4), max_value=100, current=0):
self.width = width
self.height = height
if color is None:
color = BarIndicatorColor((10, 10, 10), (100, 100, 100), (255, 0, 0), (0, 0, 0))
self.color = color
self.max = max_value
self.border_size = border_size
self.current = 0
self.image = get_surface(self.width, self.height)
self.set_current(current)
def update_image(self):
image = get_surface(self.width, self.height)
image.fill(self.color.border)
offset_x, offset_y = self.border_size
image.fill(self.color.background, (offset_x, offset_y, self.width - 2 * offset_x, self.height - 2 * offset_y))
cnt = self.width - 2 * offset_x
cnt = int(Float(ceil(cnt * self.current / self.max)))
image.fill(self.color.full, (offset_x, offset_y, cnt, self.height - 2 * offset_y))
self.image = image
def get_image(self, update=False):
if update:
self.update_image()
return self.image
def get_current(self):
return Float(self.current)
def set_current(self, value):
if 0 <= value <= self.max:
self.current = value
self.update_image()
|
988,335 | b913ea5e7b86e3913c38a1fe38d62178ff7d2fef | import argparse
import os
import pathlib
import json
import math
import numpy as np
from tqdm import tqdm
from functools import reduce
from yellowbrick.text import TSNEVisualizer
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
# Folder Settings
ROOT_FOLDER = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..')
DATA_FOLDER = os.path.join(ROOT_FOLDER, 'data')
SUBREDDIT_FOLDER = os.path.join(DATA_FOLDER, 'subreddits')
def visualize_scatter(data_2d, label_ids, id_to_label_dict, title, figsize=(20, 20)):
plt.figure(figsize=figsize)
plt.grid()
nb_classes = len(np.unique(label_ids))
for label_id in np.unique(label_ids):
plt.scatter(data_2d[np.where(label_ids == label_id), 0],
data_2d[np.where(label_ids == label_id), 1],
marker='o',
color=plt.cm.Set1(label_id / float(nb_classes)),
linewidth='1',
alpha=0.6,
label=id_to_label_dict[label_id])
plt.title(title)
plt.xlabel('z1')
plt.ylabel('z2')
plt.legend(loc='best')
plt.show()
def get_data_from_json_file(filename):
'''
'''
with open(filename, 'r') as f:
data_json = json.load(f)
titles = reduce(lambda x, y: x + [y['title']], data_json, [])
scores = reduce(lambda x, y: x + [int(y['ups'])], data_json, [])
try:
scores_avg = sum(scores) / len(scores)
except:
scores_avg = 0
try:
scores_median = scores[int(math.floor(len(scores)/2))]
except:
scores_median = 0
return titles, scores, scores_avg, scores_median
def vectorize_titles(titles):
# Vectorize
vectorizer = TfidfVectorizer(min_df=2, stop_words='english',
strip_accents='unicode', lowercase=True, ngram_range=(1, 2),
norm='l2', smooth_idf=True, sublinear_tf=False, use_idf=True)
X = vectorizer.fit_transform(titles)
return X
if __name__ == '__main__':
# Args settings
parser = argparse.ArgumentParser()
parser.add_argument("-pc", "--post-count", type=int, default=25,
help='Minimum post count to be accepted into preprocessing.')
parser.add_argument("-xr", "--x-range", type=float, default=None, nargs=2,
help='Prints out pca coordinates that is within this range on the x axis')
parser.add_argument("-yr", "--y-range", type=float, default=None, nargs=2,
help='Prints out pca coordinates that is within this range on the y axis')
parser.add_argument("-smm", "--score-median-min", type=int, default=75,
help='Only accepts reddit posts with >=<value>. Assume hivemind.')
parser.add_argument("-p", "--peroid", type=str, default='all', choices=[
'all', 'year', 'month', 'week', 'day', 'hour'], help="Scrapped subreddit data folder path (subreddits/<peroid>)")
args = parser.parse_args()
# Folder settings
PEROID_TYPE = args.peroid
FOLDER_PATH = os.path.join(SUBREDDIT_FOLDER, PEROID_TYPE)
# Read coinmarketcap dump
with open(os.path.join(DATA_FOLDER, 'cmc_dump.json'), 'r') as f:
cmc_dump = json.load(f)
# Format cmc data
cmc_data = reduce(lambda x, y: {**x, y['id']: {**y}}, cmc_dump, {})
# Get all files in the folder
_path = pathlib.Path(FOLDER_PATH)
titles = []
labels = []
index_to_title = []
for json_filepath in tqdm(_path.iterdir()):
# Get vector and median/average score
t, s, s_avg, s_median = get_data_from_json_file(json_filepath)
coin_name = json_filepath.name.replace('.json', '')
# Only handle coins in CMC data
if coin_name not in cmc_data:
continue
# Only want subreddits which
# reached the threshold of post count
if len(s) < args.post_count:
continue
# If median score for reddit
# post is < args.score_median_min, ignore,
# not enough content quality
if s_median < args.score_median_min:
continue
# Oh no mutating state :(
# Such a pleb
t = t[:args.post_count]
s = s[:args.post_count]
# Extend to titles
titles.extend(t)
# Our labels
metric = float(cmc_data[coin_name]['market_cap_usd'])
if metric >= 1_000_000_000:
label_custom = 'Marketcap 1Bil++'
elif metric >= 250_000_000:
label_custom = 'Marketcap 250Mil++'
elif metric >= 50_000_000:
label_custom = 'Marketcap 50Mil++'
else:
label_custom = 'Marketcap <50Mil'
# Lazy af lmao
label = list(map(lambda x: label_custom, s))
labels.extend(label)
# Index to Title
index_to_title.extend(list(map(lambda x: '[{}]\t{}:\t{:.64s}'.format(
label_custom, cmc_data[coin_name]['symbol'], x), t)))
# Plotting and labelling stuff
label_to_id_dict = {v: i for i, v in enumerate(np.unique(labels))}
id_to_label_dict = {v: k for k, v in label_to_id_dict.items()}
label_ids = np.array([label_to_id_dict[x] for x in labels])
# Vectorize our titles
v = vectorize_titles(titles)
v_np = np.array(v.todense().tolist())
# Fit through PCA / TSNE
pca_result = PCA().fit_transform(v_np)
# Print stuff
x_range = args.x_range
if x_range is not None:
x_range.sort()
y_range = args.y_range
if y_range is not None:
y_range.sort()
for i in range(len(pca_result)):
if x_range is not None and y_range is None:
if pca_result[i][0] >= x_range[0] and pca_result[i][0] <= x_range[1]:
print(index_to_title[i])
elif x_range is None and y_range is not None:
if pca_result[i][1] >= y_range[0] and pca_result[i][1] <= y_range[1]:
print(index_to_title[i])
elif x_range is not None and y_range is not None:
if pca_result[i][0] >= x_range[0] and pca_result[i][0] <= x_range[1] and pca_result[i][1] >= y_range[0] and pca_result[i][1] <= y_range[1]:
print(index_to_title[i])
visualize_scatter(pca_result, label_ids, id_to_label_dict,
'Marketcap according to subreddit titles ({}) [Median Score: >{}, Post Count: >{}]'.format(args.peroid, args.score_median_min, args.post_count))
|
988,336 | 07e0899543767e59993565fcfd0239dba18544d4 | import cv2
import numpy as np
import imutils
def nothing(x):
pass
def main():
cap = cv2.VideoCapture(0)
cv2.namedWindow("Trackbars")
cv2.createTrackbar("L - H", "Trackbars", 0, 179, nothing)
cv2.createTrackbar("L - S", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("L - V", "Trackbars", 0, 255, nothing)
cv2.createTrackbar("U - H", "Trackbars", 179, 179, nothing)
cv2.createTrackbar("U - S", "Trackbars", 255, 255, nothing)
cv2.createTrackbar("U - V", "Trackbars", 255, 255, nothing)
while (cap.isOpened()):
ret, frame = cap.read()
if ret is True:
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
lower_h = cv2.getTrackbarPos("L - H", "Trackbars")
lower_s = cv2.getTrackbarPos("L - S", "Trackbars")
lower_v = cv2.getTrackbarPos("L - V", "Trackbars")
upper_h = cv2.getTrackbarPos("U - H", "Trackbars")
upper_s = cv2.getTrackbarPos("U - S", "Trackbars")
upper_v = cv2.getTrackbarPos("U - V", "Trackbars")
lower_range = np.array([lower_h, lower_s, lower_v])
upper_range = np.array([upper_h, upper_s, upper_v])
mask = cv2.inRange(hsv, lower_range, upper_range)
result = cv2.bitwise_and(frame, frame, mask=mask)
cv2.imshow("frame", frame)
cv2.imshow("mask", mask)
cv2.imshow("result", result)
# ESC to break
k = cv2.waitKey(1) & 0xFF
if k == 27:
break
else:
continue
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main() |
988,337 | 5d8555a54e50efe44538ac3e05e0ea54ad2f22bc | import sys
sys.path.append('../')
import argparse
import configparser
import numpy as np
from sklearn.metrics import make_scorer, f1_score, accuracy_score, recall_score, precision_score, classification_report, precision_recall_fscore_support
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import make_scorer, f1_score, accuracy_score, recall_score, precision_score, classification_report, precision_recall_fscore_support
from sklearn.utils import shuffle
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.model_selection import KFold
from sklearn.linear_model import LogisticRegression
from sklearn.externals import joblib
from sklearn.utils import shuffle
import gensim, sklearn
from collections import defaultdict
from batch_gen import batch_gen
from text_processor import TextProcessor
import json
import os
def load_files(dir_in):
doc_list = list()
tw_files = sorted([file for root, dirs, files in os.walk(dir_in)
for file in files if file.endswith('.json')])
tw_class = list()
for tw_file in tw_files:
temp = list()
with open(dir_in+tw_file) as data_file:
for line in data_file:
tweet = json.loads(line)
temp.append(tweet['text'])
doc_list.append(tweet['text'])
tw_class.append(tw_file.split(".")[0])
return doc_list, tw_class
def gen_data(tweets, tw_class):
y_map = dict()
for i, v in enumerate(sorted(set(tw_class))):
y_map[v] = i
print(y_map)
X, y = [], []
for i, tweet in enumerate(tweets):
emb = np.zeros(EMBEDDING_DIM)
for word in tweet:
try:
emb += word2vec_model[word]
except:
pass
emb /= len(tweet)
X.append(emb)
y.append(y_map[tw_class[i]])
return X, y
def select_tweets(tweets):
# selects the tweets as in mean_glove_embedding method
# Processing
X, Y = [], []
tweet_return = []
for tweet in tweets:
_emb = 0
for w in tweet:
if w in word2vec_model: # Check if embeeding there in GLove model
_emb += 1
if _emb: # Not a blank tweet
tweet_return.append(tweet)
print('Tweets selected:', len(tweet_return))
return tweet_return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='BagOfWords model validation')
parser.add_argument('-m', '--model', required=True)
parser.add_argument('-f', '--embeddingfile', required=True)
parser.add_argument('-d', '--dimension', required=True)
args = parser.parse_args()
MODEL_FILE = args.model
W2VEC_MODEL_FILE = args.embeddingfile
EMBEDDING_DIM = int(args.dimension)
cf = configparser.ConfigParser()
cf.read("../file_path.properties")
path = dict(cf.items("file_path"))
dir_w2v = path['dir_w2v']
dir_val = path['dir_val']
word2vec_model = gensim.models.Word2Vec.load(dir_w2v+W2VEC_MODEL_FILE)
tp = TextProcessor()
doc_list, tw_class = load_files(dir_val)
tweets = tp.text_process(doc_list, text_only=True)
tweets = select_tweets(tweets)
X, Y = gen_data(tweets, tw_class)
model = joblib.load(dir_w2v + MODEL_FILE)
result = model.predict(X)
print(classification_report(Y, result))
#python validation_BoW.py -m logistic.skl -f model_word2vec -d 100 |
988,338 | b14e9b1a5a96a285941e0a6856b2141c6ea23ee5 | import time
import jwt
import config
class JwtUtil(object):
@staticmethod
def create_token(username):
payload = {
"iat": int(time.time()),
"exp": int(time.time()) + 86400 * 7,
"username": username,
"scopes": ['open']
}
token = str(jwt.encode(payload, config.secret, algorithm='HS256'), encoding="utf-8")
return token
@staticmethod
def verify_bearer_token(token):
try:
payload = jwt.decode(token, config.secret, algorithms=['HS256'])
if payload:
return True
return False
except:
return False
@staticmethod
def get_token_username(token):
if config.token_header in token:
token = token.split(config.token_header)[1]
payload = jwt.decode(token, config.secret, algorithms=['HS256'])
print(payload)
return payload["username"]
|
988,339 | 0da7db33f7b7c1f09747e234f7d39c01386d547c | from state_machine import State,Event,acts_as_state_machine,after,before,InvalidStateTransition
@acts_as_state_machine
class Process:
created=State(initial=True)
waiting=State()
running=State()
terminated=State()
blocked=State()
swapped_out_waiting=State()
swapped_out_blocked=State()
wait = Event(from_states=(created,running,blocked,
swapped_out_waiting),to_state=waiting)
run = Event(from_states=waiting,to_state=running)
terminate = Event(from_states=running,to_state=terminated)
block = Event(from_states=(running,swapped_out_blocked),to_state=blocked)
swap_wait = Event(from_states=waiting,to_state=swapped_out_waiting)
swap_block = Event(from_states=blocked,to_state=swapped_out_blocked)
def __init__(self,name):
self.name = name
@after('wait')
def wait_info(self):
print('{} entered waiting mode'.format(self.name))
@after('run')
def run_info(self):
print('{} is running'.format(self.name))
@after('terminate')
def terminate_info(self):
print('{} terminated mode'.format(self.name))
@after('block')
def block_info(self):
print('{} is blocked'.format(self.name))
@after('swap_wait')
def swap_wait_info(self):
print('{} is swapped out and waiting'.format(self.name))
@after('swap_block')
def swap_block_info(self):
print('{} is swapped out and blocked'.format(self.name))
def transition(process,event,event_name):
try:
event()
except InvalidStateTransition as err:
print('Error: transition of {} to {} failed'.format(process.name,process.current_state,event_name))
def state_info(process):
print('state of {}:{}'.format(process.name,process.current_state))
def main():
RUNNING = 'running'
WAITING = 'waiting'
BLOCKED = 'blocked'
TERMINATED = 'terminated'
p1,p2 = Process('process1'),Process('process2')
[state_info(p) for p in (p1,p2)]
print()
transition(p1,p1.wait,WAITING)
transition(p2,p2.terminate,TERMINATED)
[state_info(p) for p in (p1,p2)]
print()
transition(p1,p1.run,RUNNING)
transition(p2,p2.wait,WAITING)
[state_info(p) for p in (p1,p2)]
print()
transition(p2,p2.run,RUNNING)
[state_info(p) for p in (p1,p2)]
print()
[transition(p,p.block,BLOCKED) for p in (p1,p2)]
[state_info(p) for p in (p1,p2)]
print()
[transition(p,p.terminate,TERMINATED) for p in (p1,p2)]
[state_info(p) for p in (p1,p2)]
if __name__ == '__main__':
main() |
988,340 | 873afe542c4d17b2e530eb07917ff5884ea6b544 | import sys
from parser import *
ST = []
truthTable = ["True"]
class STV:
def __init__(self, ident, lineNum):
self.ident = ident
self.lineNum = lineNum
def verify(ident, lineNum):
global ST
verified = False
for x in range(0,len(ST)):
if(ident == ST[x].ident):
verified = True
if(verified == False):
print("Static Semantic Error: Variable "+ident+" has been accessed before it was defined. Line Number: "+str(lineNum)+"\n")
truthTable[0]="False"
def statSem(root):
global ST
global truthTable
hasId = True
if(root.ident == None):
hasId = False
if(hasId == True and root.label == "<vars>"):
ident = root.ident
lineNum = root.lineNumber
for x in range(0,len(ST)):
#Checking to see if the value we are defining has already been defined.
if(ident == ST[x].ident):
print("Static Semantic Error: Data "+ST[x].ident+" has already been defined on line number "+str(ST[x].lineNum)+
"\nProgram is attempting to redefine it on line: "+str(lineNum)+" which is not allowed.\n")
truthTable[0]="False"
else:
#Creating another value to add to the ST list.
possSTV = STV(ident,lineNum)
ST.append(possSTV)
elif(hasId == True):
ident = root.ident
lineNum = root.lineNumber
verify(ident, lineNum)
if root.left:
statSem(root.left)
if root.right:
statSem(root.right)
if root.child3:
statSem(root.child3)
if root.child4:
statSem(root.child4)
def statSemDriver(root):
global truthTable
#print("Checking static semantics...\n")
statSem(root)
if(truthTable[0] == "False"):
print("Above issues found with the static semantics. Need to be corrected. Exiting...")
sys.exit()
#print("Static Semantics are good.")
|
988,341 | dd7a909c3891ad761d91e8a2fe37cd6fe009c2a4 | #!/usr/bin/env python
"""
recognizer.py is a wrapper for pocketsphinx.
parameters:
~lm - filename of language model
~dict - filename of dictionary
~mic_name - set the pulsesrc device name for the microphone input.
e.g. a Logitech G35 Headset has the following device name: alsa_input.usb-Logitech_Logitech_G35_Headset-00-Headset_1.analog-mono
To list audio device info on your machine, in a terminal type: pacmd list-sources
publications:
~output (std_msgs/String) - text output
services:
~start (std_srvs/Empty) - start speech recognition
~stop (std_srvs/Empty) - stop speech recognition
"""
import roslib; roslib.load_manifest('handsfree_speech')
import rospy
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import pygst
pygst.require('0.10')
gobject.threads_init()
import gst
from std_msgs.msg import String
from std_srvs.srv import *
import os
import commands
class recognizer(object):
""" GStreamer based speech recognizer. """
def __init__(self):
# Start node
rospy.init_node("recognizer")
self._device_name_param = "~mic_name" # Find the name of your microphone by typing pacmd list-sources in the terminal
self._lm_param = "~lm"
self._dic_param = "~dict"
self._hmm_param = "~hmm"
# Configure mics with gstreamer launch config
if rospy.has_param(self._device_name_param):
self.device_name = rospy.get_param(self._device_name_param)
self.device_index = self.pulse_index_from_name(self.device_name)
self.launch_config = "pulsesrc device=" + str(self.device_index)
rospy.loginfo("Using: pulsesrc device=%s name=%s", self.device_index, self.device_name)
elif rospy.has_param('~source'):
# common sources: 'alsasrc'
self.launch_config = rospy.get_param('~source')
else:
self.launch_config = 'gconfaudiosrc'
rospy.loginfo("Launch config: %s", self.launch_config)
self.launch_config += " ! audioconvert ! audioresample " \
+ '! vader name=vad auto-threshold=true ' \
+ '! pocketsphinx name=asr ! fakesink'
# Configure ROS settings
self.started = False
rospy.on_shutdown(self.shutdown)
self.pub = rospy.Publisher('~output', String, queue_size=5)
rospy.Service("~start", Empty, self.start)
rospy.Service("~stop", Empty, self.stop)
if rospy.has_param(self._lm_param) and rospy.has_param(self._dic_param):
self.start_recognizer()
else:
rospy.logwarn("lm and dic parameters need to be set to start recognizer.")
def start_recognizer(self):
rospy.loginfo("Starting recognizer... ")
self.pipeline = gst.parse_launch(self.launch_config)
self.asr = self.pipeline.get_by_name('asr')
self.asr.connect('partial_result', self.asr_partial_result)
self.asr.connect('result', self.asr_result)
#self.asr.set_property('configured', True)
self.asr.set_property('dsratio', 1)
# Configure language model
if rospy.has_param(self._lm_param):
lm = rospy.get_param(self._lm_param)
else:
rospy.logerr('Recognizer not started. Please specify a language model file.')
return
if rospy.has_param(self._dic_param):
dic = rospy.get_param(self._dic_param)
else:
rospy.logerr('Recognizer not started. Please specify a dictionary.')
return
if rospy.has_param(self._hmm_param):
hmm = rospy.get_param(self._hmm_param)
else:
rospy.logerr('what is param hmm?')
return
self.asr.set_property('lm', lm)
self.asr.set_property('dict', dic)
self.asr.set_property('hmm', hmm)
self.bus = self.pipeline.get_bus()
self.bus.add_signal_watch()
self.bus_id = self.bus.connect('message::application', self.application_message)
self.pipeline.set_state(gst.STATE_PLAYING)
self.started = True
def pulse_index_from_name(self, name):
output = commands.getstatusoutput("pacmd list-sources | grep -B 1 'name: <" + name + ">' | grep -o -P '(?<=index: )[0-9]*'")
if len(output) == 2:
return output[1]
else:
raise Exception("Error. pulse index doesn't exist for name: " + name)
def stop_recognizer(self):
if self.started:
self.pipeline.set_state(gst.STATE_NULL)
self.pipeline.remove(self.asr)
self.bus.disconnect(self.bus_id)
self.started = False
def shutdown(self):
""" Delete any remaining parameters so they don't affect next launch """
for param in [self._device_name_param, self._lm_param, self._dic_param]:
if rospy.has_param(param):
rospy.delete_param(param)
""" Shutdown the GTK thread. """
gtk.main_quit()
def start(self, req):
self.start_recognizer()
rospy.loginfo("recognizer started")
return EmptyResponse()
def stop(self, req):
self.stop_recognizer()
rospy.loginfo("recognizer stopped")
return EmptyResponse()
def asr_partial_result(self, asr, text, uttid):
""" Forward partial result signals on the bus to the main thread. """
struct = gst.Structure('partial_result')
struct.set_value('hyp', text)
struct.set_value('uttid', uttid)
asr.post_message(gst.message_new_application(asr, struct))
def asr_result(self, asr, text, uttid):
""" Forward result signals on the bus to the main thread. """
struct = gst.Structure('result')
struct.set_value('hyp', text)
struct.set_value('uttid', uttid)
asr.post_message(gst.message_new_application(asr, struct))
def application_message(self, bus, msg):
""" Receive application messages from the bus. """
msgtype = msg.structure.get_name()
if msgtype == 'partial_result':
self.partial_result(msg.structure['hyp'], msg.structure['uttid'])
if msgtype == 'result':
self.final_result(msg.structure['hyp'], msg.structure['uttid'])
def partial_result(self, hyp, uttid):
""" Delete any previous selection, insert text and select it. """
rospy.logdebug("Partial: " + hyp)
def final_result(self, hyp, uttid):
""" Insert the final result. """
msg = String()
msg.data = str(hyp.lower())
rospy.loginfo(msg.data)
self.pub.publish(msg)
if __name__ == "__main__":
start = recognizer()
gtk.main()
|
988,342 | 16fafa4284e597a2a01e213ced28df989d564c9d | from django.contrib.auth.models import User
from rest_framework import generics, permissions
from oauth2_provider.contrib.rest_framework import (TokenHasReadWriteScope, TokenHasScope)
from api.models import Product, Address
from api.serializers import ProdListSerializer, ManufacturerSerializer, AdressSerializer, ProdDetailedSerializer, UserSerializer
class Productlist(generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = Product.objects.all()
serializer_class = ProdListSerializer
def get(self, request, *args, **kwargs):
if 'manufacturerCode' in self.request.query_params:
if len(self.request.query_params['manufacturerCode']) > 0:
self.queryset = self.queryset.filter(manufacturerCode=self.request.query_params['manufacturerCode'])
if 'itemno' in self.request.query_params:
if len(self.request.query_params['itemno']) > 0:
self.queryset = self.queryset.filter(itemno=self.request.query_params['itemno'])
if 'LTPrice' in self.request.query_params:
if len(self.request.query_params['LTPrice']) > 0:
self.queryset = self.queryset.filter(price__lte=self.request.query_params['LTPrice'])
if 'GTPrice' in self.request.query_params:
if len(self.request.query_params['GTPrice']) > 0:
self.queryset = self.queryset.filter(price__gte=self.request.query_params['GTPrice'])
if 'colour' in self.request.query_params:
if len(self.request.query_params['colour']) > 0:
self.queryset = self.queryset.filter(colour=self.request.query_params['colour'])
return super().get(request, *args, **kwargs)
class ProdDetailed(generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = Product.objects.all()
serializer_class = ProdDetailedSerializer
def get(self, request, *args, **kwargs):
if 'itemno' in self.request.query_params:
if len(self.request.query_params['itemno']) > 0:
self.queryset = self.queryset.filter(itemno=self.request.query_params['itemno'])
return super().get(request, *args, **kwargs)
class Manufacturerlist(generics.ListAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = Product.objects.values('manufacturerCode').distinct()
serializer_class = ManufacturerSerializer
class AddressList(generics.ListCreateAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = Address.objects.all()
serializer_class = AdressSerializer
class Login(generics.ListCreateAPIView):
permission_classes = [permissions.IsAuthenticated, TokenHasReadWriteScope]
queryset = User.objects.all()
serializer_class = UserSerializer
def post(self, request, *args, **kwargs):
return super().post(request, *args, **kwargs) |
988,343 | 1e95b27ce7d7d91d1c0b02a5ad1dc1a6aa715772 | from main.fusioncharts import FusionCharts
from main.models import *
from main.apis import get_customer_used,get_customer_money
def customer_used_chart(customer=None,r = 12):
times = Month.objects.all()
top_times = times[:r]
top_times = top_times.reverse()
labels = [{'label': '%s / %s'%(time.month,time.year)} for time in top_times]
top_useds = [{'value': get_customer_used(customer=customer,month=time.month,year=time.year)}
for time in top_times
]
source = {}
source["chart"] = {
"caption": 'Thong ke su dung nuoc',
"subcaption": '12 thang gan nhat',
"xaxisname": "Thang",
"yaxisname": "So nuoc",
# "numberprefix": "VND",
"theme": "ocean"
}
source["categories"] = [{
"category": labels
}]
source["dataset"] = [{
"seriesname": "Tong so nuoc da su dung",
"data": top_useds
}
]
chart = FusionCharts("mscombi2d", "customer_used_chart", "100%", 400, "used_chart", "json", source)
return chart
def customer_money_chart(customer=None,r = 12):
times = Month.objects.all()
top_times = times[:r]
top_times = top_times.reverse()
labels = [{'label': '%s / %s'%(time.month,time.year)} for time in top_times]
top_useds = [{'value': get_customer_money(customer=customer,month=time.month,year=time.year)}
for time in top_times
]
source = {}
source["chart"] = {
"caption": 'Thong ke tien nuoc',
"subcaption": '12 thang gan nhat',
"xaxisname": "Thang",
"yaxisname": "Tien nuoc",
# "numberprefix": "VND",
"theme": "ocean"
}
source["categories"] = [{
"category": labels
}]
source["dataset"] = [{
"seriesname": "So tien phai chi tra",
"data": top_useds
}
]
chart = FusionCharts("mscombi2d", "customer_money_chart", "100%", 400, "money_chart", "json", source)
return chart
|
988,344 | d5ded2130fe21dbac19bc4d6587dd1f2626fe84c | dic = {1,2,3,4,7,5}
sum = 0
for i in dic:
sum = sum + i
print(sum)
|
988,345 | 3e266941ac2420c79cc51da63d1b82470156d81f |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import csv
import sys
tmp = []
with open('jk3.xyz') as csvfile:
r = csv.reader(csvfile, delimiter=' ')
# header = next(r)
for line in r:
p = list(map(float, line)) #-- convert each str to a float
tmp.append(p)
pts = np.array(tmp)
cm = np.zeros(pts.shape[0])
for i in range(pts.shape[0]):
cm[i] = abs(pts[i][3] - pts[i][2])
# print(cm.reshape(1000, 1))
# sys.exit()
# fig, ax = plt.subplots(1, 2)
fig, ax = plt.subplots(2, 2)
im0 = mpimg.imread('/Users/hugo/teaching/terrainbook/interpol/figs/jackknife/dem_01_preview.tiff')
imgplot = ax[0][0].imshow(im0)
im1 = ax[0][1].scatter(pts[:,0], pts[:,1], s=10, c=pts[:,2], alpha=0.5, cmap='Reds')
im2 = ax[1][0].scatter(pts[:,0], pts[:,1], s=10, c=cm, alpha=0.5, cmap='Reds')
im3 = ax[1][1].scatter(pts[:,2], pts[:,3], s=10, c=cm, alpha=0.5, cmap='Reds')
# ax[0].legend()
# ax.grid(True)
# ax[0].set_xlabel(r'$x$', fontsize=10)
# ax[0].set_ylabel(r'$y$', fontsize=10)
# ax[0].set_title('RMSE from IDW')
ax[0][0].get_xaxis().set_visible(False)
ax[0][0].get_yaxis().set_visible(False)
ax[0][1].get_xaxis().set_visible(False)
ax[0][1].get_yaxis().set_visible(False)
ax[1][0].get_xaxis().set_visible(False)
ax[1][0].get_yaxis().set_visible(False)
ax[1][1].get_xaxis().set_visible(False)
ax[1][1].get_yaxis().set_visible(False)
# fig.colorbar(im1)
cbar = fig.colorbar(im2, ax=ax.ravel().tolist(), shrink=0.8)
cbar = fig.colorbar(im1, ax=ax.ravel().tolist(), shrink=0.8)
# plt.show()
plt.savefig("total.pdf", bbox_inches='tight')
# import numpy as np
# import matplotlib.pyplot as plt
# import csv
# import sys
# tmp = []
# with open('jk3.xyz') as csvfile:
# r = csv.reader(csvfile, delimiter=' ')
# # header = next(r)
# for line in r:
# p = list(map(float, line)) #-- convert each str to a float
# tmp.append(p)
# pts = np.array(tmp)
# cm = np.zeros(pts.shape[0])
# for i in range(pts.shape[0]):
# cm[i] = abs(pts[i][3] - pts[i][2])
# # print(cm.reshape(1000, 1))
# # sys.exit()
# fig, ax = plt.subplots()
# plt.scatter(pts[:,0], pts[:,1], c=cm, alpha=0.5, cmap='Reds')
# # plt.scatter(pts[:,2], pts[:,3])
# # plt.legend()
# # plt.grid(True)
# # plt.set_xlabel(r'$x$', fontsize=15)
# # plt.set_ylabel(r'$y$', fontsize=15)
# # plt.set_title('RMSE from IDW')
# plt.grid(True)
# # fig.tight_layout()
# plt.colorbar()
# plt.show()
# # plt.savefig("foo.pdf", bbox_inches='tight') |
988,346 | fa8a0890dfb994c6fc210d652b148aaac555463c | from __future__ import print_function
import os
from bleualign.align import Aligner
if __name__ == '__main__':
current_path = os.path.dirname(os.path.abspath(__file__))
options = {
# source and target files needed by Aligner
# they can be filenames, arrays of strings or io objects.
'srcfile':os.path.join(current_path, '..', 'eval', 'eval1989.de'),
'targetfile': os.path.join(current_path, '..', 'eval', 'eval1989.fr'),
# translations of srcfile and targetfile, not influenced by 'factored'
# they can be filenames, arrays of strings or io objects, too.
'srctotarget': [os.path.join(current_path, '..', 'eval', 'eval1957.europarlfull.fr')],
'targettosrc': [],
# passing filenames or io object for them in respectly.
# if not passing anything or assigning None, they will use StringIO to save results.
'output-src': None, 'output-target': None,
# other options ...
}
a = Aligner(options)
a.mainloop()
output_src, output_target = a.results()
# output_src, output_target is StringIO because options['output-src'] is None
src = output_src.getvalue() # StringIO member function
trg = output_target.getvalue().splitlines() # array of string
print('output_src.getvalue()')
print(src[:30])
print()
print('output_target.getvalue().splitlines()')
print(trg[:3])
|
988,347 | a183cd565e917668b11ffd6321d09d7e1e355460 | """
Copyright (c) 2023, Florian GARDIN
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
"""
from .constants import *
class Melody:
"""
Main class to write a melody in MusicLang. A melody is a serie of notes
Examples
--------
For example here is an example of a melody that plays the seven note of a scale :
>>> from musiclang.write.library import *
>>> melody = s0 + s1 + s2 + s3 + s4 + s5 + s6
>>> melody
s0 + s1 + s2 + s3 + s4 + s5 + s6
You can also create it using the melody class :
>>> from musiclang import Melody
>>> melody = Melody([s0, s1, s2, s3, s4, s5, s6])
>>> melody
s0 + s1 + s2 + s3 + s4 + s5 + s6
"""
def __init__(self, notes, nb_bars=1, tags=None):
from .note import Note
if isinstance(notes, Note):
notes = []
self.notes = notes
self.tags = set(tags) if tags is not None else set()
self.nb_bars = nb_bars
def has_tag(self, tag):
"""
Check if the tag exists for this object
Returns a copy of the object
Parameters
----------
tag: str
Returns
-------
melody: Melody
"""
return tag in self.tags
def add_tag(self, tag):
"""
Add a tag to this object
Returns a copy of the object
Parameters
----------
tag: str
Returns
-------
melody: Melody
"""
cp = self.copy()
cp.tags.add(tag)
return cp
def add_tags(self, tags):
"""
Add several tags to the object.
Returns a copy of the object
Parameters
----------
tags: List[str]
tags to add
Returns
-------
melody: Melody
"""
cp = self.copy()
cp.tags = cp.tags.union(set(tags))
return cp
def remove_tags(self, tags):
"""
Remove several tags from the object.
Returns a copy of the object
Parameters
----------
tags: List[str]
Returns
-------
melody: Melody
"""
cp = self.copy()
cp.tags = cp.tags - set(tags)
return cp
def remove_tag(self, tag):
"""
Remove a tag from this object
Returns a copy of the object
Parameters
----------
tag: str
Returns
-------
melody: Melody
"""
cp = self.copy()
cp.tags.remove(tag)
return cp
def clear_tags(self):
"""
Clear all tags from this object
Returns a copy of the object
Parameters
----------
tag: str
Returns
-------
melody: Melody
"""
cp = self.copy()
cp.tags = set()
return cp
def set_amp(self, amp):
return Melody([n.set_amp(amp) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def set_duration(self, duration):
return self.augment(duration / self.duration)
def to_drum(self):
return Melody([n.to_drum() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def to_melody(self):
return self.copy()
def remove_accidents(self):
return Melody([n.remove_accidents() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def __getstate__(self):
return self.__dict__
def __setstate__(self, d):
self.__dict__.update(d)
def __add__(self, other):
from .note import Note
if other is None:
return self.copy()
if isinstance(other, Note):
return Melody(self.notes + [other], nb_bars=self.nb_bars, tags=set(self.tags))
if isinstance(other, Melody):
return Melody(self.notes + other.notes, nb_bars=self.nb_bars, tags=set(self.tags).union(other.tags))
def __or__(self, other):
from .note import Note
if other is None:
return self.copy()
if isinstance(other, Note):
assert other.duration == self.duration / self.nb_bars, f"Invalid duration of bar {other}: {other.duration}q, expected : {self.duration / self.nb_bars}q/bar"
return Melody(self.notes + [other], nb_bars=self.nb_bars + 1, tags=set(self.tags))
if isinstance(other, Melody):
assert other.duration / other.nb_bars == self.duration / self.nb_bars, \
f"Invalid duration of bar {other} : {other.duration}q with {other.nb_bars} bars, expected : {self.duration / self.nb_bars}q/bar"
return Melody(self.notes + other.notes, nb_bars=self.nb_bars + other.nb_bars, tags=set(self.tags).union(other.tags))
else:
raise Exception(f'Invalid type when adding melody, {other.__class__}')
def __hash__(self):
return hash(self.__repr__())
def __eq__(self, other):
from .note import Note
if isinstance(other, Note):
return self.__eq__(Melody([other]))
return isinstance(other, Melody) and str(other) == str(self)
def to_absolute_note(self, chord):
return Melody([n.to_absolute_note(chord) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def get_pitches(self, chord, track_idx, time, last_note_array=None):
"""
Parameters
----------
chord :
track_idx :
time :
last_note_array :
(Default value = None)
Returns
-------
"""
pitches = []
for note in self.notes:
result = note.pitch(chord, track_idx, time, last_note_array)
if not last_note_array[SILENCE] and not last_note_array[CONTINUATION]:
last_note_array = result
pitches.append(result)
return pitches
def decompose_duration(self):
""" """
return Melody(sum([note.decompose_duration() for note in self.notes], None).notes, nb_bars=self.nb_bars, tags=set(self.tags))
def replace_pitch(self, to_replace, new_note):
"""
Parameters
----------
to_replace :
new_note :
Returns
-------
"""
new_melody = []
for note in self.notes:
to_add = note.copy()
if note.val == to_replace.val and note.type == to_replace.type:
to_add.type = new_note.type
to_add.val = new_note.val
new_melody.append(to_add)
return sum(new_melody, None)
def replace(self, to_replace, new_note, **kwargs):
"""
Parameters
----------
to_replace :
new_note :
Returns
-------
"""
return Melody([n.replace(to_replace, new_note, **kwargs) for n in self.notes],
nb_bars=self.nb_bars, tags=set(self.tags))
def set_tempo(self, tempo):
new_melody = self.copy()
new_melody.notes[0] = new_melody.notes[0].set_tempo(tempo)
return new_melody
def accelerando(self, start, end):
"""
Accelerate or decelerate melody from start tempo to end tempo
Parameters
----------
start: int
Starting tempo
end: int
Ending tempo
"""
new_melody = self.copy()
L = len(new_melody)
for i in range(len(new_melody)):
local_tempo = int(end * (i / L) + start * ((L - i)/L))
new_melody.notes[i] = new_melody.notes[i].set_tempo(local_tempo)
return new_melody
def realize_tags(self, last_note=None, final_note=None):
new_melody = None
for idx, note in enumerate(self.notes):
last_note = self.notes[idx - 1] if idx - 1 >= 0 else last_note
next_note = self.notes[idx + 1] if idx + 1 < len(self.notes) else None
if idx == len(self.notes) - 1:
next_note = final_note
new_melody += note.realize_tags(last_note=last_note, next_note=next_note)
new_melody.nb_bars = self.nb_bars
new_melody.tags = self.tags
return new_melody
@property
def pedal_on(self):
new_melody = self.copy()
new_melody.notes[0] = new_melody.notes[0].pedal_on
return new_melody
@property
def pedal(self):
"""
Apply pedal on first note and release on last
"""
new_melody = self.copy()
new_melody.notes[0] = new_melody.notes[0].pedal_on
new_melody.notes[-1] = new_melody.notes[-1].pedal_off
return new_melody
@property
def pedal_off(self):
new_melody = self.copy()
new_melody.notes[0] = new_melody.notes[0].pedal_off
return new_melody
def remove_effects(self):
"""
Remove pedals and tempo change
Returns
-------
"""
return Melody([n.remove_effects() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def remove_tempo(self):
"""
Remove pedals and tempo change
Returns
-------
"""
return Melody([n.remove_tempo() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def remove_pedal(self):
"""
Remove pedals and tempo change
Returns
-------
"""
return Melody([n.remove_pedal() for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def to_sequence(self, chord, inst):
"""Transform in a list of [(start_time, end_time, pitch, self)]
:return:
Parameters
----------
chord :
inst :
Returns
-------
"""
time = 0
sequence = []
for note in self.notes:
pitch = chord.to_pitch(note)
start = time
end = time + note.duration
sequence.append([start, end, pitch, chord.to_chord(), inst, note])
time += note.duration
return sequence
def to_standard_note(self, chord):
return Melody([n.to_standard_note(chord) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def clear_note_tags(self):
return sum([n.clear_note_tags() for n in self.notes], None)
def to_extension_note(self, chord):
return Melody([n.to_extension_note(chord) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def to_chord_note(self, chord):
return Melody([n.to_chord_note(chord) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def to_code(self):
""" """
return " + ".join([n.to_code() for n in self.notes])
def convert_to_drum_note(self, chord):
return Melody([n.convert_to_drum_note(chord) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
@property
def is_continuation(self):
""" """
return all([n.is_continuation for n in self.notes])
@property
def starts_with_absolute_note(self):
""" """
if len(self.notes) > 0:
return self.notes[0].starts_with_absolute_note
else:
return False
@property
def had_absolute_note(self):
""" """
return any([n.starts_with_absolute_note for n in self.notes])
@property
def starts_with_absolute_or_silence(self):
""" """
if len(self.notes) > 0:
return self.notes[0].starts_with_absolute_or_silence
else:
return False
@property
def starts_with_note(self):
""" """
if len(self.notes) > 0:
return self.notes[0].starts_with_note
else:
return False
def augment(self, value):
"""
Parameters
----------
value :
Returns
-------
"""
return Melody([n.augment(value) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
@property
def duration(self):
""" """
return sum([n.duration for n in self.notes])
def __iter__(self):
return self.notes.__iter__()
def __getitem__(self, item):
return Melody(self.notes[item])
def __radd__(self, other):
if other is None:
return self.copy()
def __and__(self, other):
return Melody([n & other for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def __matmul__(self, other):
# Apply a function to each note
return Melody([n @ other for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def __mul__(self, other):
from .note import Note
if isinstance(other, int):
melody_copy = self.copy()
return Melody(melody_copy.notes * other, nb_bars=self.nb_bars, tags=set(self.tags))
if isinstance(other, Note):
return self * Melody([other.copy()], nb_bars=self.nb_bars, tags=set(self.tags))
else:
raise Exception('Cannot multiply Melody and ' + str(type(other)))
def __len__(self):
return len(self.notes)
def o(self, octave):
"""
Parameters
----------
octave :
Returns
-------
"""
return Melody([n.o(octave) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
def __hasattr__(self, item):
try:
self.__getattr__(item)
except:
return False
return True
def __getattr__(self, item):
try:
res = Melody([getattr(n, item) for n in self.notes], nb_bars=self.nb_bars, tags=set(self.tags))
return res
except:
raise AttributeError(f"Not existing property : {item}")
def copy(self):
""" """
return Melody([s.copy() for s in self.notes], tags=set(self.tags), nb_bars=self.nb_bars)
def __repr__(self):
return self.to_code() |
988,348 | ef2dc3ca51f1027bc4c953fa212fec484defe9bb | story = "Harry is good.\nHe\tis ve\\ry good"
print(story)
# \n stands for new line
# \t stands for an extra step
# \\ add one backslash |
988,349 | 7cc0e354ee70591104e4b3c5664da25fdc49a855 | import pandas as pd
import csv
data_dir = "data/province_bps_crosswalk/"
rows = [["province_code", "province"]]
with open(data_dir+"pasted.txt", "r") as file:
lines = file.readlines()
line_num = 1
for line in lines:
if line_num > 8: # roughly with eyes
line = line.strip()
if line.startswith("21"):
num = 21
name = "twenty one"
elif line.startswith("76"):
num = 76
name = "West Sulawesi"
else:
if line.startswith("\\"):
line = line[16:-20].strip() # roughly with eyes
data = line.split(".", 1)
if len(data) < 2:
continue
num = data[0]
name = data[1].split(" ", 1)[0]
rows.append([num, name])
line_num+=1
with open(data_dir+"crosswalk.csv", "w+") as file:
writer = csv.writer(file)
writer.writerows(rows)
df = pd.read_csv(data_dir+"crosswalk.csv")
df.to_stata(data_dir+"crosswalk.dta")
|
988,350 | 317b40ae8161e838eca364679104dd3e4212ecf1 | from main.utils.graph import get_connected_components
from main.utils.read_csv import read_relationship_csv, read_grid_csv
import main.utils.data_pathnames as data_pathnames
import random
import os
import pickle
def find_non_ancestor_same_comp_negatives(dict_grid2comp, dict_comp2grid, dict_comp2ancestors):
'''
Finds negatives which are non ancestors in the same component for each grid id
:param dict_grid2comp: dictionary of grid_id to comp_idx
:param dict_comp2grid: dictionary of comp_idx to grid_id
:param dict_comp2ancestors: dictionary of comp idx to (num_grid, dictionary grid id to ancestors )
:return: dictionary of grid_id to negatives
'''
dict_grid2negatives = {}
for (grid_id, comp_idx) in dict_grid2comp.items():
# Get the ancestors of the current grid_id
ancestors = set(dict_comp2ancestors[comp_idx][1][grid_id])
# Get all possible negatives, which are every grid_id in the same component
possible_negatives = set(dict_comp2grid[comp_idx])
# Remove the grid_id itself as a negative
possible_negatives = possible_negatives.difference(ancestors)
possible_negatives = possible_negatives.difference(set([grid_id]))
negatives = list(possible_negatives)
if len(negatives) > 1000:
negatives = random.sample(negatives, 1000)
dict_grid2negatives[grid_id] = negatives
return dict_grid2negatives
def find_ancestor(parent_of, grid_id):
'''
Finds all the ancestors of a node
:param parent_of: dictionary of parent of
:param grid_id: grid_id to get ancestors of
:param list_ancestors: list of ancestors to return
:return:
'''
list_ancestors = []
if grid_id in parent_of:
list_parent_grid_id = parent_of[grid_id]
for parent_grid_id in list_parent_grid_id:
list_ancestors.append(parent_grid_id)
old_parent_grid_id = str(parent_grid_id)
ancestors = find_ancestor(parent_of, parent_grid_id)
list_ancestors.extend(ancestors)
return list_ancestors
def get_all_ancestors(parent_of, dict_comp2grid):
'''
Gets all the ancestors for every node
:param parent_of: dictionary of parents to children
:param dict_comp2grid: dictionary of component to grid_id
:return: dictionary mapping comp_idx to dictionary of grid_id to its ancestors
:return: comp_idx of the largest component
'''
dict_comp2ancestors = {}
list_large_comp_idx = []
for (comp_idx, list_grid_id) in dict_comp2grid.items():
if len(list_grid_id) > 1:
# Keep track of number of ancestors in each component
num_anc = 0
# dictionary of grid_id to its ancestors
dict_id2ancestors = {}
for grid_id in list_grid_id:
ancestors = find_ancestor(parent_of, grid_id)
num_anc += len(ancestors)
dict_id2ancestors[grid_id] = ancestors
dict_comp2ancestors[comp_idx] = (num_anc, dict_id2ancestors)
# Save the large comp idx
if num_anc > 1000:
list_large_comp_idx.append(comp_idx)
else:
dict_comp2ancestors[comp_idx] = (0, {list_grid_id[0]: []})
return dict_comp2ancestors, list_large_comp_idx
def init_data_dict():
'''
Init data dict that will be reused for all folds
'''
dict_id2name = read_grid_csv()
parent_of, child_of = read_relationship_csv()
comp_dict = os.path.join("data", "dict", "comp_dict")
if not os.path.exists(comp_dict):
os.makedirs(comp_dict)
dict_grid2comp, dict_comp2grid = get_connected_components(dict_id2name, parent_of, child_of)
dict_comp2ancestors, large_comp_idx = get_all_ancestors(parent_of, dict_comp2grid)
dict_grid2non_ancestor_negative_same_comp = find_non_ancestor_same_comp_negatives(dict_grid2comp, dict_comp2grid, dict_comp2ancestors)
pickle.dump(dict_grid2comp, open(data_pathnames.DICT_GRID2COMP, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(dict_comp2grid, open(data_pathnames.DICT_COMP2GRID, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(dict_comp2ancestors, open(data_pathnames.DICT_COMP2ANCESTORS, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(large_comp_idx, open(data_pathnames.LARGE_COMP_IDX, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(dict_grid2non_ancestor_negative_same_comp, open(data_pathnames.DICT_GRID2NONANCESTOR_SAME_COMP_NEGATIVE, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
data_pathnames.init()
init_data_dict() |
988,351 | f334ace0f62634e2103b7197416decb839d10be5 | #!/usr/bin/env python3
"""
Test the performance of the 2D costmap_monitor layer by sending a very large
get plan cost service request. This is a useful baseline for comparing its
performance with the 3D version.
"""
import sys
import math
import random
import rospy
import tf2_ros
import tf2_geometry_msgs
import tf.transformations
import costmap_monitor_msgs.srv
import geometry_msgs.msg
if __name__ == "__main__":
rospy.init_node("test_costmap3d_get_plan_cost_performance", anonymous=True)
tfBuffer = tf2_ros.Buffer()
tfListener = tf2_ros.TransformListener(tfBuffer)
try:
xform = tfBuffer.lookup_transform('odom', 'base_footprint',
rospy.Time(0.0), rospy.Duration(10.0))
except tf2_ros.TransformException as e:
rospy.logerr("Cannot determine base footprint transform")
sys.exit(1)
get_cost_srv = rospy.ServiceProxy("/move_base/local_costmap/costmap_monitor/get_plan_cost",
costmap_monitor_msgs.srv.GetPlanCostService)
req = costmap_monitor_msgs.srv.GetPlanCostServiceRequest()
req.lazy = False
req.header.frame_id = "odom"
req.padding = 0.0
pt = geometry_msgs.msg.Point()
pt.z = 0.0
pt.x = .372
pt.y = 0.0
req.footprint.append(pt)
pt.x = .368
pt.y = -.178
req.footprint.append(pt)
pt.x = .345
pt.y = -.235
req.footprint.append(pt)
pt.x = .293
pt.y = -.272
req.footprint.append(pt)
pt.x = .0114
pt.y = -.285
req.footprint.append(pt)
pt.x = -0.262
pt.y = -0.272
req.footprint.append(pt)
pt.x = -0.312
pt.y = -0.242
req.footprint.append(pt)
pt.x = -0.343
pt.y = -0.192
req.footprint.append(pt)
pt.x = -0.350
pt.y = 0.0
req.footprint.append(pt)
pt.x = -0.343
pt.y = 0.192
req.footprint.append(pt)
pt.x = -0.312
pt.y = 0.242
req.footprint.append(pt)
pt.x = -0.262
pt.y = 0.272
req.footprint.append(pt)
pt.x = 0.0114
pt.y = 0.285
req.footprint.append(pt)
pt.x = 0.293
pt.y = 0.272
req.footprint.append(pt)
pt.x = 0.345
pt.y = 0.235
req.footprint.append(pt)
pt.x = 0.368
pt.y = 0.178
req.footprint.append(pt)
for i in range(0,100000):
pose = geometry_msgs.msg.PoseStamped()
pose.header.frame_id = "base_footprint"
pose.pose.position.x = random.uniform(-4.0, 4.0)
pose.pose.position.y = random.uniform(-4.0, 4.0)
pose.pose.position.z = 0.0
theta = random.uniform(-math.pi, math.pi)
q = tf.transformations.quaternion_from_euler(0.0, 0.0, theta)
pose.pose.orientation.x = q[0]
pose.pose.orientation.y = q[1]
pose.pose.orientation.z = q[2]
pose.pose.orientation.w = q[3]
req.poses.append(tf2_geometry_msgs.do_transform_pose(pose, xform))
res = get_cost_srv(req)
rospy.loginfo("Result: " + str(res))
|
988,352 | 252b5415aeb413e64e87b927c93f63474bf5ce65 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 5 20:59:18 2020
@author: Odegov Ilya
"""
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
parent_dir = os.path.dirname(current_dir)
if parent_dir not in sys.path:
sys.path.append(parent_dir)
from time import time
from transformers import pipeline
from utils import (
set_seed, create_tokenizer, create_model
)
def _create_pipiline(tokenizer, model, device, framework):
"""Create text generation pipiline."""
tg_params = dict(
task="text-generation", tokenizer=tokenizer, model=model,
framework="pt", device=device,
)
text_generation_pipiline = pipeline(**tg_params)
return text_generation_pipiline
class TextGenerator:
"""Text generator pipiline."""
def __init__(self, tokenizer, model, device=-1, framework="pt"):
"""Init class object."""
set_seed(int(time()))
tokenizer = create_tokenizer(tokenizer)
model = create_model(model)
self._text_generation_pipiline = _create_pipiline(
tokenizer, model, device, framework)
def __call__(self, seqs):
"""Call class object."""
seqs = [seqs] if isinstance(seqs, str) else seqs
max_length = max(map(len, seqs)) * 2
return self._text_generation_pipiline(seqs, max_length=max_length)
def create_generator(tokenizer, model, framework="pt", device=-1):
"""Create text generator."""
tg_params = dict(
tokenizer=tokenizer, model=model,
framework=framework, device=device,
)
text_generator = TextGenerator(**tg_params)
return text_generator
def run():
"""Start script."""
gpt = "sberbank-ai/rugpt3large_based_on_gpt2"
generator = create_generator(gpt, gpt)
print(generator("")[0]["generated_text"])
if __name__ == "__main__":
run()
|
988,353 | 3a97cbdbc6e4cef8fced45e4b97c1fce34bf20d4 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import copy
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetOrganizationsOrganizationResult',
]
@pulumi.output_type
class GetOrganizationsOrganizationResult(dict):
def __init__(__self__, *,
id: str,
organization_id: str,
organization_name: str):
"""
:param str id: The ID of the Organization.
:param str organization_id: The first ID of the resource.
:param str organization_name: Company name.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "organization_id", organization_id)
pulumi.set(__self__, "organization_name", organization_name)
@property
@pulumi.getter
def id(self) -> str:
"""
The ID of the Organization.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="organizationId")
def organization_id(self) -> str:
"""
The first ID of the resource.
"""
return pulumi.get(self, "organization_id")
@property
@pulumi.getter(name="organizationName")
def organization_name(self) -> str:
"""
Company name.
"""
return pulumi.get(self, "organization_name")
|
988,354 | 7fdb9d0813784f341760f30f7a9547884975133c | import unittest
class TestEnvs(unittest.TestCase):
"""Tests for the environment generation methods."""
def test_grid_env(self):
"""
"""
pass
def test_merge_env(self):
"""
"""
pass
def test_ring_env(self):
"""
"""
pass
class TestFlowDensityExperiment(unittest.TestCase):
"""Tests for the FlowDensityExperiment class."""
def test_run(self):
"""
"""
pass
def test_plotter(self):
"""
"""
pass
class TestDataGenerationExperiment(unittest.TestCase):
"""Tests for the DataGenerationExperiment class."""
def test_run(self):
"""
"""
pass
class TestGenData(unittest.TestCase):
"""Tests for data generation script."""
def test_run_grid(self):
"""
"""
pass
def test_run_merge(self):
"""
"""
pass
def test_run_ring(self):
"""
"""
pass
if __name__ == '__main__':
unittest.main()
|
988,355 | 852cdcb1fd42958f436af3afe6972159f7585509 | from analyse import read_data
import numpy as np
import matplotlib.pyplot as plt
import glob
def main():
plot_jupiter_stability()
def plot_jupiter_stability():
filenames = [f for f in glob.glob('out/EJS_unfix/*') if f.endswith('.bin')]
fig, [ax1,ax2] = plt.subplots(2, sharex=True, figsize=(6,4))
AU = 149597870700
year = 31556926
sun_mass = 1.99e30
Astrojoule = (AU/year)**2 * sun_mass
n_values = []
for i, f in enumerate(filenames):
temp = f.split('.')[0]
num = temp[temp.find('1'):]
print(num)
if num[0] != '1':
num = 'Normal'
data = read_data(f, None)
planet = data["pos"][:,1]
n_values.append(data["steps_per_year"])
time = data["time"]
kinetic, potential, total, angmom = data['energies']
x, y, z = planet.T
r = np.sqrt(x**2 + y**2 + z**2)
ax1.plot(time, r, label='Jupiter mass = %s' %num)
ax2.plot(time, total*Astrojoule, label='Jupiter mass = %s' %num)
ax1.legend()
ax2.set_title('Total energy (Jupiter + Earth)')
ax1.set_ylabel('distance to origin [au]')
ax2.set_xlabel("Time [yr]")
ax2.set_ylabel("Energy [Joule]")
plt.tight_layout()
num = len(glob.glob('results/energyEarthJupiter*.pdf'))
plt.savefig('results/energyEarthJupiter%d.pdf' %num)
plt.show()
if __name__ == "__main__":
main()
|
988,356 | 26e8753696e8729abafd9a467f89773d5552b744 | import torch
from allennlp.nn.util import masked_softmax, masked_log_softmax
from pytorch_pretrained_bert.modeling import BertPreTrainedModel, BertModel
from torch import nn
from torch.nn import functional as F
from bert_model import layers
class BertQAYesnoHierarchicalNegHalf(BertPreTrainedModel):
"""
BertForQuestionAnsweringForYesNo
Model Hierarchical Attention:
- Use Hierarchical attention module to predict Non/Yes/No.
- Add supervised to sentence attention.
This model is just to test that, if we use only the half of hidden vectors of bert, how will the final model perform.
"""
def __init__(self, config, evidence_lambda=0.8, negative_lambda=1.0, add_entropy: bool = False,
split_num: int = 3, split_index: int = 0):
super(BertQAYesnoHierarchicalNegHalf, self).__init__(config)
print(f'The model {self.__class__.__name__} is loading...')
print(f'The coefficient of evidence loss is {evidence_lambda}')
print(f'The coefficient of negative samples loss is {negative_lambda}')
print(f'Add entropy loss: {add_entropy}')
layers.set_seq_dropout(True)
layers.set_my_dropout_prob(config.hidden_dropout_prob)
self.bert = BertModel(config)
# self.dropout = nn.Dropout(config.hidden_dropout_prob)
# self.answer_choice = nn.Linear(config.hidden_size, 2)
self.split_num = split_num
self.split_size = config.hidden_size // self.split_num
self.split_index = split_index
self.split_sizes = [((i - 1) * self.split_size, i * self.split_size) for i in range(1, split_num)] + [
((split_num - 1) * self.split_size, config.hidden_size)]
print(f'Split BERT output into {self.split_num}.')
print(f'Current model use the {self.split_index}th hidden state.')
print(f'Read hidden state in {self.split_sizes[self.split_index]}.')
self.doc_sen_self_attn = layers.LinearSelfAttnAllennlp(self.split_size)
self.que_self_attn = layers.LinearSelfAttn(self.split_size)
self.word_similarity = layers.AttentionScore(self.split_size, 250, do_similarity=False)
self.vector_similarity = layers.AttentionScore(self.split_size, 250, do_similarity=False)
# self.yesno_predictor = nn.Linear(config.hidden_size, 2)
self.yesno_predictor = nn.Linear(self.split_size * 2, 3)
self.evidence_lam = evidence_lambda
self.negative_lam = negative_lambda
self.add_entropy = add_entropy
self.apply(self.init_bert_weights)
def select_separate_hidden(self, hidden_state):
hidden_start, hidden_end = self.split_sizes[self.split_index]
return hidden_state[:, :, hidden_start: hidden_end]
def forward(self, input_ids, token_type_ids=None, attention_mask=None, answer_choice=None,
sentence_span_list=None, sentence_ids=None, sentence_label=None):
sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False)
sequence_output = self.select_separate_hidden(sequence_output)
# mask: 1 for masked value and 0 for true value
# doc, que, doc_mask, que_mask = layers.split_doc_que(sequence_output, token_type_ids, attention_mask)
doc_sen, que, doc_sen_mask, que_mask, sentence_mask = \
layers.split_doc_sen_que(sequence_output, token_type_ids, attention_mask, sentence_span_list)
# check_sentence_id_class_num(sentence_mask, sentence_ids)
batch, max_sen, doc_len = doc_sen_mask.size()
# que_len = que_mask.size(1)
que_vec = layers.weighted_avg(que, self.que_self_attn(que, que_mask)).view(batch, 1, -1)
doc = doc_sen.reshape(batch, max_sen * doc_len, -1)
# [batch, max_sen, doc_len] -> [batch * max_sen, doc_len]
word_sim = self.word_similarity(que_vec, doc).view(batch * max_sen, doc_len)
doc = doc_sen.reshape(batch * max_sen, doc_len, -1)
doc_mask = doc_sen_mask.reshape(batch * max_sen, doc_len)
# [batch * max_sen, doc_len] -> [batch * max_sen, 1, doc_len] -> [batch * max_sen, 1, h]
word_hidden = masked_softmax(word_sim, 1 - doc_mask, dim=1).unsqueeze(1).bmm(doc)
word_hidden = word_hidden.view(batch, max_sen, -1)
doc_vecs = layers.weighted_avg(doc, self.doc_sen_self_attn(doc, doc_mask)).view(batch, max_sen, -1)
# [batch, 1, max_sen]
sentence_sim = self.vector_similarity(que_vec, doc_vecs)
sentence_scores = masked_softmax(sentence_sim, 1 - sentence_mask)
sentence_hidden = sentence_scores.bmm(word_hidden).squeeze(1)
yesno_logits = self.yesno_predictor(torch.cat([sentence_hidden, que_vec.squeeze(1)], dim=1))
sentence_scores = sentence_scores.squeeze(1)
max_sentence_score = sentence_scores.max(dim=-1)
output_dict = {'yesno_logits': yesno_logits,
'sentence_logits': sentence_scores,
'max_weight': max_sentence_score[0],
'max_weight_index': max_sentence_score[1]}
loss = 0
if answer_choice is not None:
choice_loss = F.cross_entropy(yesno_logits, answer_choice, ignore_index=-1)
loss += choice_loss
if sentence_ids is not None:
log_sentence_sim = masked_log_softmax(sentence_sim.squeeze(1), 1 - sentence_mask, dim=-1)
sentence_loss = self.evidence_lam * F.nll_loss(log_sentence_sim, sentence_ids, ignore_index=-1)
loss += sentence_loss
if self.add_entropy:
no_evidence_mask = (sentence_ids != -1)
entropy = layers.get_masked_entropy(sentence_scores, mask=no_evidence_mask)
loss += self.evidence_lam * entropy
if sentence_label is not None:
# sentence_label: batch * List[k]
# [batch, max_sen]
# log_sentence_sim = masked_log_softmax(sentence_sim.squeeze(1), 1 - sentence_mask, dim=-1)
sentence_prob = 1 - sentence_scores
log_sentence_sim = - torch.log(sentence_prob + 1e-15)
negative_loss = 0
for b in range(batch):
for sen_id, k in enumerate(sentence_label[b]):
negative_loss += k * log_sentence_sim[b][sen_id]
negative_loss /= batch
loss += self.negative_lam * negative_loss
output_dict['loss'] = loss
return output_dict
|
988,357 | f1e5226200ec7f6fa7b9f2558cdc9619ca2ee8e4 | import os
import shutil
def allFilePath(rootPath,allFIleList):
fileList = os.listdir(rootPath)
for temp in fileList:
if os.path.isfile(os.path.join(rootPath,temp)):
allFIleList.append(os.path.join(rootPath,temp))
else:
allFilePath(os.path.join(rootPath,temp),allFIleList)
rootfile = r"./"
fileList =[]
allFilePath(rootfile,fileList)
for file in fileList:
# if file.endswith(".txt"):
print(file)
|
988,358 | dd4dec785be6afdf9aeca4afe1d7ae13672178ff | # class PeopleName:
# def __init__(self):
# self._names = ["XiaoWang", "ZhangSan", "LiYuan"]
# def __len__(self):
# return len(self._names)
# def __getitem__(self, position):
# return self._names[position]
# class DogName:
# def __init__(self):
# self._names = ["HaShiQi", "TuGou", "TaiDi"]
# def __len__(self):
# return len(self._names)
# def __getitem__(self, position):
# return self._names[position]
# FORMAT = "{0:<60}{1}"
# peopleName = PeopleName()
# print(FORMAT.format("Length of People Name:", len(peopleName)))
# for name in peopleName:
# print(FORMAT.format("People Name:", name))
# dogName = DogName()
# print(FORMAT.format("Length of Dog Name:", len(dogName)))
# for name in dogName:
# print(FORMAT.format("Dog Name:", name))
# #######################################
# class PeopleName:
# def __init__(self):
# self._names = ["XiaoWang", "ZhangSan", "LiYuan"]
# def __len__(self):
# return len(self._names)
# def __getitem__(self, position):
# return self._names[position]
# def __call__(self, a):
# return a
# FORMAT = "{0:<60}{1}"
# peopleName = PeopleName()
# print(FORMAT.format("Length of People Name:", len(peopleName)))
# for name in peopleName:
# print(FORMAT.format("People Name:", name))
# print(FORMAT.format("Output of the function:", peopleName("This is a test")))
# #################################
# class Example():
# def func1(self):
# print('Original One')
# def func2():
# print('Replace One')
# def func3():
# print('Replace Two')
# instance = Example()
# instance.func1()
# instance.func1 = func2
# instance.func1()
# instance.func1 = func3
# instance.func1()
# ##################################
# class Example():
# def func1(self):
# print('Original One')
# def func2(self):
# print('Replace One')
# def func3(self):
# print('Replace Two')
# instance = Example()
# instance.func1()
# Example.func1 = func2
# instance.func1()
# Example.func1 = func3
# instance.func1()
# ########################
# class MySeq:
# TEST_SEQ = ["A", "B", "C", "D", "E", "F", "G"]
# def __getitem__(self, index):
# return MySeq.TEST_SEQ[index]
# def __len__(self):
# return len(MySeq.TEST_SEQ)
# my_seq = MySeq()
# print(len(my_seq))
# print(my_seq[0])
# print(my_seq[2:6:3])
# #####################
# class MySeq:
# TEST_SEQ = ["A", "B", "C", "D", "E", "F", "G"]
# def __getitem__(self, index):
# return MySeq.TEST_SEQ[index]
# my_seq = MySeq()
# print(my_seq[0])
# print(my_seq[2:6:3])
# #####################
# class MySeq:
# TEST_SEQ = ["A", "B", "C", "D", "E", "F", "G"]
# def __len__(self):
# return len(MySeq.TEST_SEQ)
# my_seq = MySeq()
# print(len(my_seq))
# ######################
# class MySeq:
# def __getitem__(self, index):
# return index
# my_seq = MySeq()
# FORMAT = "{0:<10}{1:<60}{2:<10}{3}"
# print(FORMAT.format("Index:", str(my_seq[0]), "Type:", type(my_seq[0])))
# ############################
# class MySeq:
# def __getitem__(self, index):
# return index
# my_seq = MySeq()
# FORMAT = "{0:<10}{1:<60}{2:<10}{3}"
# print(FORMAT.format("Index:", str(my_seq[0: 10]), "Type:", type(my_seq[0: 10])))
# print(FORMAT.format("Index:", str(my_seq[1:4:2]), "Type:", type(my_seq[1:4:2])))
# print(FORMAT.format("Index:", str(my_seq[1:4:2, 9]), "Type:", type(my_seq[1:4:2, 9])))
# print(FORMAT.format("Index:", str(my_seq[1:4:2, 7:9]), "Type:", type(my_seq[1:4:2, 7:9])))
# ##################
# class MySeq:
# TEST_SEQ = ["A", "B", "C"]
# def __getitem__(self, index):
# print("Index: %s" % index)
# return MySeq.TEST_SEQ[index]
# for i in MySeq():
# print("Data: %s" % i)
# ##################
# class Foo:
# def __enter__(self):
# return 1
# def __exit__(self, exc_type, exc_val, exc_tb):
# return True
# with Foo() as handler:
# print("Returned value Of __enter__: %s" % handler)
# ########################
# class Foo:
# def __init__(self, name):
# self.name = name
# def __enter__(self):
# print('This is in __enter__')
# return self
# def __exit__(self, exc_type, exc_val, exc_tb):
# print("Exception Type: %s" % exc_type)
# print("Exception Value: %s" % exc_val)
# print("Traceback: %s" % exc_tb)
# return True
# with Foo('test') as handler:
# print("Name: %s" % handler.name)
# print("Done")
# ########################
# class Foo:
# def __init__(self, name):
# self.name = name
# def __enter__(self):
# print('This is in __enter__')
# return self
# def __exit__(self, exc_type, exc_val, exc_tb):
# print("Exception Type: %s" % exc_type)
# print("Exception Value: %s" % exc_val)
# print("Traceback: %s" % exc_tb)
# return False
# with Foo('test') as handler:
# print("Name: %s" % handler.name)
# 1 / 0
# print("Done")
# #######################
# class Foo:
# def __init__(self, name):
# self.name = name
# def __enter__(self):
# print('This is in __enter__')
# return self
# def __exit__(self, exc_type, exc_val, exc_tb):
# print("Exception Type: %s" % exc_type)
# print("Exception Value: %s" % exc_val)
# print("Traceback: %s" % exc_tb)
# return True
# with Foo('test') as handler:
# print("Name: %s" % handler.name)
# 1 / 0
# print("Done")
# #################
# import contextlib
# @contextlib.contextmanager
# def test(a, b):
# print('This is in __enter__')
# c = a + b
# yield c
# print('This is in __exit__')
# with test(1, 2) as handler:
# print(handler)
# print("Done")
# ################
# import contextlib
# @contextlib.contextmanager
# def test(a, b):
# print('This is in __enter__')
# c = a + b
# yield c
# print('This is in __exit__')
# with test(1, 2) as handler:
# print(handler)
# 1 / 0
# print("Done")
# ####################
# class Test:
# def __call__(self, a, b):
# return a + b
# test = Test()
# print(test.__call__(1, 2))
# #########
class Test:
def __call__(self, a, b):
return a + b
test = Test()
print(test(1, 2)) |
988,359 | 749438bc042cc6a9b875dd56c27ef3d4f966f751 | # Copyright 2015 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
"""Checks recipes for stylistic and hygenic issues.
Currently only checks that recipes only import python modules from a whitelist.
Imports are not safe in recipes if they depend on the platform or have functions
which otherwise directly interact with the OS (since all recipe code must run
correctly for all platforms under simulation).
"""
# TODO(luqui): Implement lint for recipe modules also.
from __future__ import absolute_import
import re
import types
ALLOWED_MODULES = [
r'ast',
r'base64',
r'collections',
r'contextlib',
r'copy',
r'datetime',
r'difflib',
r'functools',
r'hashlib',
r'itertools',
r'json',
r'math',
r're',
r'textwrap',
r'urlparse',
r'zlib',
# non stdlib
r'attr',
r'google\.protobuf',
# From recipe ecosystem
r'PB',
r'RECIPE_MODULES',
]
def ImportsTest(recipe, allowed_modules):
"""Tests that recipe_name only uses allowed imports.
Returns a list of errors, or an empty list if there are no errors (duh).
"""
for _, val in sorted(recipe.global_symbols.iteritems()):
if isinstance(val, types.ModuleType):
module_name = val.__name__
for pattern in allowed_modules:
if pattern.match(val.__name__):
break
else:
yield ('In %s:\n'
' Non-whitelisted import of %s' % (recipe.path, module_name))
def add_arguments(parser):
# TODO(iannucci): merge this with the test command, doesn't need to be top
# level.
parser.add_argument(
'--whitelist',
'-w',
action='append',
default=[],
help=('A regexp matching module names to add to the default whitelist. '
'Use multiple times to add multiple patterns,'))
parser.set_defaults(func=main)
def main(args):
allowed_modules = map(re.compile, ALLOWED_MODULES + args.whitelist)
errors = []
for recipe in args.recipe_deps.main_repo.recipes.itervalues():
errors.extend(ImportsTest(recipe, allowed_modules))
if errors:
print '\n'.join(str(e) for e in errors)
return 1
return 0
|
988,360 | 65d5db3a40f96abc95504efc9448ad80544d25b9 | from my_task.main import app
# 任务队列的链接地址
broker_url = "redis://127.0.0.1:6379/11"
# 结果队列的链接地址
result_backend = "redis://127.0.0.1:6379/12"
# 定时任务调度
app.conf.beat_schedule = {
'check_order_status': {
# 指定定时执行的哪个任务 直接写任务名
'task': 'check_order',
# 定时任务执行的周期
'schedule': 30.0,
# 定时任务所需参数 有参数就传递
# 'args': (16, 16)
},
} |
988,361 | df44982c5639196ddc656660b963a5c0fb71f111 | import neo4j
driver = neo4j.GraphDatabase().driver('bolt://0.0.0.0:7687', auth=('neo4j', 'dupablada'))
def get_stops(tx):
q = "MATCH (n:STOP) RETURN n;"
return tx.run(q)
def get_connections(tx, f, t):
q = "MATCH (f:STOP{name:'%s'}), (t:STOP{name:'%s'})" %(f,t) \
+ " WITH f,t" \
+ " MATCH path = allShortestPaths((f)-[:STOPS_AT*..20]-(t))" \
+ " RETURN path;"
return tx.run(q)
with driver.session() as session:
stops = session.read_transaction(get_stops)
stops = list(stops)
stops = [s['n']['name'] for s in list(stops)]
with driver.session() as session:
for f in stops:
for t in stops:
if f ==t : continue
connections = session.read_transaction(get_connections,f,t)
connections = list(connections)
print "============"
print connections
|
988,362 | 974e970ed7cf5a0c33d8c37bcb42def346223a0c | '''Crie um programa que vai ler vários números e colocar em uma lista.
Depois disso, mostre:
A) Quantos números foram digitados.
B) A lista de valores, ordenada de forma decrescente.
C) Se o valor 5 foi digitado e está ou não na lista.'''
numero = list()
while True:
n = int(input("Digite um valor: "))
if n not in numero:
numero.append(n)
else:
print("Valor duplicado! Não adicionado!")
resposta = str(input("Quer continuar? [S/N] ")).upper().strip()
if resposta in "N":
break
print("=+" *30)
print(f'Você digitou {len(numero)} elementos.')
print(f'Você digitou os valores {numero}')
numero.sort()
print(f'Os valores em ordem crescente são: {numero}')
numero.sort(reverse=True)
print(f'Os valores em ordem decrescente são: {numero}')
if 5 in numero:
print("O valor 5 foi digitado.")
else:
print("O valor 5 não foi digitado.")
|
988,363 | 5f3fc33169ee3ed427ad3de372c9407b2a3a2501 | # ### Scaling and Resizing
#
# cv2.resize(image, dsize(output image size), x scale, y scale, interpolation)
#
# Import necessary libraries
import cv2
import numpy as np
# Read/load input image
input_image = cv2.imread('62274667.jpg')
cv2.imshow('Original Image', input_image)
cv2.waitKey()
# Making image 1/2 of it's original size
scaled_image1 = cv2.resize(input_image, None, fx=0.5, fy=0.5)
cv2.imshow('Scaled Image', scaled_image1)
cv2.waitKey()
# Making image 1.5 times the size of original image
scaled_image2 = cv2.resize(input_image, None, fx=1.5, fy=1.5, interpolation = cv2.INTER_AREA)
cv2.imshow('Scaling - Area Interpolation', scaled_image2)
cv2.waitKey()
# Skewing the resizing by setting exact dimensions
scaled_image3 = cv2.resize(input_image, (700, 250), interpolation = cv2.INTER_AREA)
cv2.imshow('Scaling - Skewed Size', scaled_image3)
cv2.waitKey()
cv2.destroyAllWindows()
# ### Image Pyramid
#Import necessary libraries
import cv2
#Load/Read an image
input_image = cv2.imread('62274667.jpg')
smaller_img = cv2.pyrDown(input_image) # this will convert to half of original size
larger_img = cv2.pyrUp(input_image) # this will conver to double of original size
cv2.imshow('Original', input_image )
cv2.imshow('Smaller ', smaller_img )
cv2.imshow('Larger ', larger_img )
cv2.waitKey(0)
cv2.destroyAllWindows()
|
988,364 | 4626698fce1c071893fe394d51c9d3159138fd3a | from .replay import ReplayMemory
|
988,365 | 7f5e8496aa695e841d0218d759054bc6d7d5f71c | import copy
def Floyd(graph: list) -> list:
'''
弗洛伊德算法:最短路径
:param graph: whole graph
:return: the graph of shortest paths
'''
short_graph = copy.deepcopy(graph)
n = len(graph)
for k in range(n):
for i in range(n):
for j in range(n):
print('Comparing short_graph[%s][%s] and {short_graph[%s][%s]+short_graph[%s][%s]}' % (i, j, i, k, k, j))
print('Former short_graph[%s][%s] = %s' % (i, j, short_graph[i][j]))
short_graph[i][j] = min(short_graph[i][j], short_graph[i][k] + short_graph[k][j])
print('Present short_graph[%s][%s] = %s\n' % (i, j, short_graph[i][j]))
return short_graph
if __name__ == "__main__":
# inf = float('inf')
# graph = [[0, 1, 12, inf, inf, inf],
# [inf, 0, 9, 3, inf, inf],
# [inf, inf, 0, inf, 5, inf],
# [inf, inf, 4, 0, 13, 15],
# [inf, inf, inf, inf, 0, 4],
# [inf, inf, inf, inf, inf, 0]]
# short_graph = Floyd(graph)
# print(short_graph)
|
988,366 | 7f763bef98df0475675942884c4e17e1e5f0a558 | from ..utils import extract_initial_data
def test_index_no_login_no_data(app_blank_db):
client = app_blank_db.test_client()
response = client.get('/')
assert response.status_code == 200
initial_data = extract_initial_data(response.data.decode())
assert initial_data is not None
assert 'pins' in initial_data
assert len(initial_data['pins']) == 0
assert 'user' in initial_data
assert initial_data['user'] is None
assert b'const initialData = {' in response.data
assert b'"user": null' in response.data
assert b'"pins": []' in response.data
def test_index_no_login_with_data(app_filled_db):
client = app_filled_db.test_client()
response = client.get('/')
assert response.status_code == 200
initial_data = extract_initial_data(response.data.decode())
assert initial_data is not None
assert 'pins' in initial_data
assert len(initial_data['pins']) == 2
assert 'user' in initial_data
assert initial_data['user'] is None
|
988,367 | a24f798ae63d2c794b87f36ec35df5b48e4ddf88 | from Graph import Graph
def test_all_path():
graph = Graph.Graph()
graph.add_path('June', 'Mary', 'cast away')
path = graph.print_graph(None)
assert path == ['June', 'Mary']
def test_from_one_path():
graph = Graph.Graph()
graph.add_path('June', 'Mary', 'cast away')
path = graph.print_graph('Mary')
assert path == ['Mary', 'June']
path = graph.print_graph('June')
assert path == ['June', 'Mary']
def test_from_two_path():
graph = Graph.Graph()
graph.add_path('June', 'Mary', 'cast away')
graph.add_path('June', 'Kevin', 'cast away')
path = graph.print_graph('Mary')
assert path == ['Mary', 'June', 'Kevin']
path = graph.print_graph('Kevin')
assert path == ['Kevin', 'June', 'Mary']
def test_from_distinct_path():
graph = Graph.Graph()
graph.add_path('June', 'Mary', 'cast away')
assert len(graph.nodes) == 2
graph.add_path('Julia', 'Kevin', 'pretty woman')
assert len(graph.nodes) == 4
path = graph.print_graph('Mary')
assert path == ['Mary', 'June']
path = graph.print_graph('Kevin')
assert path == ['Kevin', 'Julia']
def test_multi_path():
graph = Graph.Graph()
graph.add_path('June', 'Mary', 'cast away')
graph.add_path('June', 'Kevin', 'cast away')
graph.add_path('Mary', 'Kevin', 'cast away')
graph.add_path('Kevin', 'Jake', 'Love')
graph.add_path('Jake', 'Amber', 'pretty woman')
graph.add_path('Jake', 'Mannie', 'pretty woman')
graph.add_path('Mannie', 'Amber', 'pretty woman')
assert len(graph.nodes) == 6
path = graph.print_graph('Mary')
assert path == ["Mary", "June", "Kevin", "Jake", "Amber", "Mannie"]
path = graph.print_graph('June')
assert path == ["June", "Mary", "Kevin", "Jake", "Amber", "Mannie"]
path = graph.print_graph('Amber')
assert path == ["Amber", "Jake", "Mannie", "Kevin", "June", "Mary"]
path = graph.print_graph('Jake')
assert path == ["Jake", "Kevin", "Amber", "Mannie", "June", "Mary"] |
988,368 | 41fc518152226477c57bd8a58705abb9dcd831c1 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import queue
class Solution:
def verticalTraversal(self, root: TreeNode) -> List[List[int]]:
levels = queue.Queue()
nodes = queue.Queue()
nodes.put(root)
levels.put((0,0))
out = dict()
while not nodes.empty():
node = nodes.get()
level = levels.get()
if level[0] not in out:
out[level[0]] = dict()
if level[1] not in out[level[0]]:
out[level[0]][level[1]] = list()
out[level[0]][level[1]].append(node.val)
if node.left:
nodes.put(node.left)
levels.put((level[0] - 1, level[1] + 1))
if node.right:
nodes.put(node.right)
levels.put((level[0] + 1, level[1] + 1))
ans = list()
for x in sorted(out):
line = list()
for y in sorted(out[x]):
line.extend(sorted(out[x][y]))
ans.append(line)
return ans
|
988,369 | 04abd810137376eb2938c3f0569805dd2dcf5365 | # Name: Sergiy Ensary, Kyle Moses
# Date: October 31st
# Course: COSC 2316 Fall 2019 (Dr. Shebaro)
# Program Description: Character Creation
######### Algorithm/Psuedocode ########
# 1. Menu to let user create or load a character or exit
# 2. Method to ask for a player name
# 3. Method to ask for a player gender
# 4. Method to ask for a player class (With description)
# 5. Method to ask for a player race
# 6. Method to input player stats that are withing race bounds
# 7. Method to ask for a favored weapon
# 8. Method to allocate skill points
# 9. Method to write a character to file
# 10. Method to select a pre-made character file
# 11. Method to load a pre-made character from file
############# Python Code #############
import random
import os
from enum import Enum
from os import scandir
# Race construction
class Race:
race, age, weight, speed, height = None, None, None, None, None
def __init__(self, race, age, height, weight, speed):
self.age = age
self.race = race.type.value
self.height = height
self.weight = weight
self.speed = speed
def display(self):
print("Race: ", self.race,
"\nAge: ", self.age,
"\nHeight: ", self.height,
"\nWeight: ", self.weight,
"\nSpeed: ", self.speed)
def formatForFile(self):
formatedString = (str(self.race) + "\n"
+ str(self.age) + "\n"
+ str(self.height) + "\n"
+ str(self.weight) + "\n"
+ str(self.speed) + "\n")
return formatedString
# Human stat bounds
class Human(Enum):
type = "Human"
minAge, maxAge = 15, 90
minHeight, maxHeight = 60, 80
minWeight, maxWeight = 100, 200
speed = 25
# Elf stat bounds
class Elf(Enum):
type = "Elf"
minAge, maxAge = 15, 90
minHeight, maxHeight = 60, 86
minWeight, maxWeight = 100, 200
speed = 30
# Orc stat bounds
class Orc(Enum):
type = "Orc"
minAge, maxAge = 15, 90
minHeight, maxHeight = 60, 90
minWeight, maxWeight = 100, 225
speed = 25
# Ogre stat bounds
class Ogre(Enum):
type = "Ogre"
minAge, maxAge = 15, 90
minHeight, maxHeight = 60, 95
minWeight, maxWeight = 120, 250
speed = 25
# Vampire stat bounds
class Vampire(Enum):
type = "Vampire"
minAge, maxAge = 90, 190
minHeight, maxHeight = 60, 86
minWeight, maxWeight = 100, 200
speed = 30
# Dwarf stat bounds
class Dwarf(Enum):
type = "Dwarf"
minAge, maxAge = 15, 90
minHeight, maxHeight = 40, 60
minWeight, maxWeight = 80, 180
speed = 25
# Giant stat bounds
class Giant(Enum):
type = "Giant"
minAge, maxAge = 15, 200
minHeight, maxHeight = 70, 120
minWeight, maxWeight = 150, 400
speed = 25
# Gnome stat bounds
class Gnome(Enum):
type = "Gnome"
minAge, maxAge = 15, 60
minHeight, maxHeight = 20, 50
minWeight, maxWeight = 60, 180
speed = 30
# Halfling stat bounds
class Halfling(Enum):
type = "Halfling"
minAge, maxAge = 15, 90
minHeight, maxHeight = 30, 50
minWeight, maxWeight = 50, 150
speed = 25
# Description: Find out if integer value is within given bounds
# Pre: all inputs are integers, minValue <= maxValue
# Post: returns true or false if value is within bounds
def inRange(value, minValue, maxValue):
return minValue <= value <= maxValue
# Description: Pick a race that you want to make
# Pre: call anytime
# Post: returns a selected race
def selectRace():
raceSelected = False
while not raceSelected:
print("\n" + "Please Select a Race from options below:\n")
for index in range(len(listRaces)):
print(index + 1, ". ", listRaces[index].type.value)
try:
race = int(input("\n>>>"))
if inRange(race, 1, 9):
race = listRaces[race - 1]
raceSelected = True
except ValueError:
print("Invalid choice, try again\n")
return race
# Description: Lets you decide the stats of your character
# Pre: value is a string (name of stat) min/max inputs are integers, min <= max
# Post: returns stats for your character
def getParameters(min, max, value):
validChoice = False
while not validChoice:
try:
print("\nNow select your", value, " in the range provided:\nmin -", min, " max -", max)
parameter = int(input("\n>>>"))
if inRange(parameter, min, max):
return parameter
else:
print("Invalid Value")
except ValueError:
print("Please enter a correct value")
def createPlayer():
race = selectRace()
age = getParameters(race.minAge.value, race.maxAge.value, "age")
height = getParameters(race.minHeight.value, race.maxHeight.value, "height")
weight = getParameters(race.minWeight.value, race.maxWeight.value, "weight")
print("The speed of ", race.type.value, " race is ", race.speed.value)
player = Race(race, age, height, weight, race.speed.value)
return player
# Function Description: Formats a string with all attributes of the character that the user created
# Pre: the user has gone through all the steps of the character creation
# Post: returns a string that is the correct format of what should be written to the text file
def formatPlayer():
string = ""
string += playerList[0] + "\n"
string += playerList[1] + "\n"
string += playerList[2] + "\n"
string += playerList[3].formatForFile()
string += playerList[4] + "\n"
for key, value in playerList[5].items():
string += key + " " + str(value) + "\n"
return string
# Function Description: Writes a new text file with the characters name as the file title
# Pre: runs when the character creation is finished
# Post: Writes a file with the title as the characters name, using the formatPlayer function
def writeFile():
characterFile = open(str(playerList[0]).format() + ".txt", "w+")
characterFile.write(formatPlayer())
characterFile.close()
# Function Description:Takes all .txt files and lets you select the one you want to load
# Pre: called to load an existing character
# Post: returns a filepath to a selected .txt file
def getSelectedFile():
dir_entries = scandir(os.getcwd())
listOfTxtFiles = []
for entry in dir_entries:
ext = os.path.splitext(entry.name)[-1].lower()
if ext == ".txt":
listOfTxtFiles.append(entry)
if len(listOfTxtFiles) < 1:
print("There are no saved characters")
menu()
print("Available Characters:")
for index in range(len(listOfTxtFiles)):
print(str(index + 1) + ". " + listOfTxtFiles[index].name + "\n")
fileNotChosen = True
while fileNotChosen:
try:
userInput = int(input("Which file do you want to load:\n"))
return listOfTxtFiles[userInput - 1].name
except ValueError or UnboundLocalError:
print("\nNot a Valid Input\n")
# Function Description: Reads a selected character file
# Pre: fileName is a valid file path
# Post: stores all values in the files into a player container
def readCharacterFromFile(fileName):
characterFile = open(fileName, "r")
lines = characterFile.readlines()
playerList[0] = lines[0].rstrip()
playerList[1] = lines[1].rstrip()
playerList[2] = lines[2].rstrip()
race = None
for raceIndex in range(len(listRaces)):
value = listRaces[raceIndex].type.value
valueToSearch = str(lines[3].rstrip())
if str(valueToSearch).format() == str(value).format():
race = listRaces[raceIndex]
playerList[3] = Race(race, int(lines[4]), int(lines[5]), int(lines[6]), int(lines[7]))
playerList[4] = lines[8].rstrip()
attributes = {}
for line in range(6):
attr = lines[line + 9].split()
attributes.update({attr[0]: int(attr[1])})
playerList[5] = attributes
# Algorithm:
# 1. Starts by creating a dictionary with all attributes and 0 as the value
# 2. Display attributes to the user and tell them they have 10 points to put into all attributes
# 3. goes through each key in dictionary, asking the user how many points they want to input into that attribute
# 4. if they have 0 points left to spend then skip through the rest of the attributes
# 5. return the dictionary and put it into the playerList global variable
# Function Description: This function has the user input their characters attribute stats
# Precondition: Runs as the last selection option
# Postcondition: Returns a dictionary with the specific stat and the number of points they inputted
def placeAttributes():
attributes = {"Strength": 0, "Dexterity": 0, "Wisdom": 0, "Intelligence": 0, "Charisma": 0, "Constitution": 0}
totalPoints = 10
print("\n" + "***ATTRIBUTES***")
print("These are the attributes... You have", totalPoints, "Attribute Points to start")
print("1) Strength ")
print("2) Dexterity ")
print("3) Wisdom ")
print("4) Intelligence ")
print("5) Charisma ")
print("6) Constitution ")
for key in attributes.keys():
catchBool = False
while not catchBool:
if totalPoints == 0:
break
try:
selection = int(input("How many points would you like to put into " + key.upper() + ": "))
except ValueError as e:
selection = 100
if totalPoints >= selection >= 0:
attributes[key] += selection
totalPoints -= selection
print(selection, "points added to", key.upper())
print("You have", totalPoints, "Attribute Points left")
catchBool = True
else:
print("You cannot put ", selection, "of points in", key.upper())
return attributes
# Algorithm:
# 1. have default name as UN-NAMED, ask the user to input a name
# 2. if the user enters nothing for their name, name is set to default
# 3. returns a string that is used as their file name and their character name, stored in global playerList
# Function Description: Asks the user what there name is, default value for name is "UN-NAMED"
# Pre: Recieves nothing and is run when the program starts
# Post: Returns the name as a string
def askCharacterName():
characterName = "UN-NAMED"
print("Please enter your character's name (if no name entered, default name is: UN-NAMED")
name = input("CHARACTER NAME: ")
if len(name) != 0:
characterName = name
return characterName
# Algorithm:
# 1. create a list of strings with all class names
# 2. display class names to user, then ask them to select a number to see the class description
# 3. When number is selected, find that classes index and display the class description
# 4. Use confirm class function to ask the user if they are sure that this is the class they want
# 5. if they type Y or y, then the name of the class is returned as a string and stored in global playerList
# Function Description: This function asks the user to select a class from 8 individual choices
# Pre: Is run as the third thing from the start of the program
# Post: Returns the selected class as a string to be stored into the playerList
def selectClass():
print("\n" + "***CLASS SELECTION***")
classes = ["Fighter", "Wizard", "Archer", "Rogue", "Sorcerer", "Barbarian", "Paladin", "Monk"]
# prints out classes in the list
classNumber = 1
for i in classes:
print(classNumber, ") ", i)
classNumber += 1
# asks user to select a class from the list
catchBool = False
while not catchBool:
try:
classSelect = int(input("Enter Class Selection Number to See Class Description: "))
except ValueError:
classSelect = 100
if len(classes) >= classSelect > 0:
classChoice = classes[classSelect - 1]
if classChoice == "Fighter":
print("""Fighter Description: A fighter uses speed and agility to vanquish their foes.
The fighter is a versatile, weapons-oriented warrior who fights using skill,
strategy, and tactics.""")
elif classChoice == "Wizard":
print("""Wizard Description: Wizards are magic users, able to cast spells upon both the enemies and their
friends. Their power can range from fire, ice, regeneration to even mind control.""")
elif classChoice == "Archer":
print("""Archer Description: The archer is crafty nibble and Nobel.
The archer usually stalks their prey from range, hunting them patiently, but in a pinch,
the hunter can quickly escape from their foe.""")
elif classChoice == "Rogue":
print("""Rogue Description: The rogue is a versatile character,
capable of sneaky combat and nimble tricks. The rogue is stealthy and dexterous.""")
elif classChoice == "Sorcerer":
print("""Sorcerer Description: The sorcerer calls upon dark magic to cast their spells upon enemies.
Dark and twisted, they are not afraid to trick friends or foes to get what they want""")
elif classChoice == "Barbarian":
print("""Barbarian Description: The barbarian is filled with rage and power.
Their strength allows them to wield even the heaviest of weapons with great proficiency,
tearing through their enemies.""")
elif classChoice == "Paladin":
print("""Paladin Description: The paladin is the holiest of warriors.
In the name of justice and honor, the paladin casts holy magic spells""")
else:
print("""Monk Description: The monk is a master of martial arts and inner tranquility.
The fight mostly with their fists and with their minds.""")
# asks the user if they want to confirm there class or continue looking at descriptions
if confirmClass():
catchBool = True
else:
print("You did not enter a valid class number")
return classChoice
# Function Description: selectClass helper method. asks the user to confirm if they want the selected class
# Pre: Run when the user selects a class
# Post: Returns true if the user inputs Y or y, and false if they input anything else
def confirmClass():
print("Would you like to confirm this as your class?")
try:
confirmation = input("Type Y or N: ")
confirmation = confirmation.upper()
if confirmation == "Y":
return True
else:
return False
except:
return False
# Algorithm:
# 1. create list of all weapon types as strings
# 2. iterate through list and display all names to user
# 3. ask the user to select number for weapon and find that weapons index
# 4. if option 11 is chosen, find a random value within the length of the list and use that for weapon
# 5. return a string of the weapon chosen by the user, that is then stored into the player list
# Function Description: This function asks the user select a weapon
# Pre: Runs during the program
# Post: returns a string of the weapon selection
def askWeaponType():
print("\n" + "***WEAPON SELECTION***")
# defualt selection
weaponChoice = "Sword"
# list of weapons
weapons = ["Shortbow", "Longbow", "Pike", "Shortsword", "Longsword", "Staff", "Dagger", "BroadSword", "Crossbow",
"Halbert", "RandomSelection"]
# prints out weapons in the list
weaponNumber = 1
for i in weapons:
print(weaponNumber, ") ", i)
weaponNumber += 1
# asks user to select a weapon from the list
catchBool = False
while not catchBool:
try:
weaponSelect = int(input("Enter Weapon Selection Number: "))
except ValueError:
weaponSelect = 100
if weaponSelect <= len(weapons) and weaponSelect > 0:
weaponChoice = weapons[weaponSelect - 1]
if weaponChoice == "RandomSelection":
weaponChoice = weapons[random.randrange(0, len(weapons) - 1)]
catchBool = True
else:
print("You did not select a valid weapon number...")
# returns the users weapon choice
return weaponChoice
# Function Description: This function asks the user select a gender
# Pre: Runs during the program
# Post: returns a string of the input gender
def askGender():
gender = ""
print("Please enter your character's gender")
try:
inputGender = str(input("CHARACTER GENDER: "))
gender = inputGender
return gender
except ValueError:
print("Please enter your gender")
askGender()
# Algorithm:
# 1. Use global playerList to display all choices made by user to the console after character creation is finished
# Function Description: prints out all the selections that the user made for their character
# Pre: Runs after the character creation is finished
# Post: prints out all selections to console
def outputToConsole():
print("\n" + "***YOUR CHARACTER***" + "\n")
print("Name:", playerList[0])
print("Gender:", playerList[1])
print("Class:", playerList[2])
playerList[3].display()
print("Weapon:", playerList[4])
print("Attributes:", playerList[5])
# Function Description: Create or Load a character, or Exit program
# Pre: Starts the program
# Post: Lets you selected options in the program
def menu():
print("\nDo you want to create or load a character:\n"
"1. Create\n"
"2. Load\n"
"3. Exit\n")
try:
userInput = int(input(">>>"))
if userInput == 1:
createCharacter()
if userInput == 2:
file = getSelectedFile()
readCharacterFromFile(file)
for index in range(len(playerList)):
if index == 3:
playerList[index].display()
elif index == 5:
for key, value in playerList[index].items():
print(key + ": " + str(value))
elif index == 0 or 1 or 2 or 4 or 6:
print(playerList[index])
input("Press Enter to continue back to menu")
menu()
if userInput == 3:
exit()
except ValueError:
print("Wrong input try again")
menu()
menu()
# Function Description: Create a new character
# Pre: Call to create a new character
# Post: Prints everything about your character, populates the player list
def createCharacter():
playerList[0] = askCharacterName()
playerList[1] = askGender()
playerList[2] = selectClass()
playerList[3] = createPlayer()
playerList[4] = askWeaponType()
playerList[5] = placeAttributes()
formatPlayer()
writeFile()
outputToConsole()
# Global variable playerList -- used to store data from character
listRaces = [Human, Elf, Orc, Ogre, Vampire, Dwarf, Giant, Gnome, Halfling]
playerList = [0 for i in range(6)]
# ----------- Driver Program -----------
menu()
|
988,370 | aa43ba4a40e1ecc12f07eda1f7f224fb15a9afbf | import facemesh as fm
import cv2
def main():
#Instantiate the class
fullSet = "C:/Users/fleis/Desktop/portraits"
testSet = "C:/Users/fleis/Desktop/test"
faceReader = fm.FaceMesh("shape_predictor_68_face_landmarks.dat")
outputImgs = faceReader.align(fullSet)
#Save all the alined images to output folder
counter = 0
for img in outputImgs:
counter+= 1
cv2.imwrite("output/" + str(counter) + ".png", img.astype('uint8'))
#Save one average image
av = faceReader.getAvarage()
cv2.imwrite("avrage.png", av.astype('uint8'))
if __name__ == '__main__':
main() |
988,371 | 651a7937a14eaf0a6e7e23590ddbdc285951a71f | # import datetime
# import os
# from datetime import datetime
# import time
#
# from apscheduler.schedulers.blocking import BlockingScheduler
#
#
# def TimeStampToTime(timestamp):
# timeStruct = time.localtime(timestamp)
# return time.strftime("%Y-%m-%d %H:%M:%S", timeStruct)
#
#
# def get_FileCreateTime(filePath):
# filePath = filePath, "utf8"
# t = os.path.getctime(filePath)
# return TimeStampToTime(t)
#
#
# def get_FileModifyTime(filePath):
# filePath = filePath, "utf8"
# t = os.path.getmtime(filePath)
# return TimeStampToTime(t)
#
#
# def deleteFiles():
# path = os.path.abspath(".")
# print("begin path",path)
#
# for item in os.listdir(path):
# print("sds",path)
#
# get_FileCreateTime(item)
# # print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
#
#
# # BlockingScheduler
# scheduler = BlockingScheduler()
# scheduler.add_job(deleteFiles, "cron", day_of_week="1-5", hour=6, minute=30)
# scheduler.start()
#
# if __name__ == "__main__":
# deleteFiles()
if __name__ =="__main__":
temp = []
for i in range(100):
temp.append(str(i))
print(temp)
|
988,372 | 899530e5a05c24a0204c69aef2ce28780946cfea | #!/usr/bin/env python
"""
Computes the min/max FMCW beat frequency expected for a given range vs. sweep time and RF bandwidth
You might consider planning your sweep frequency and beat frequencies to land within the range of a PC sound card, say 200Hz - 24kHz
(I try to avoid 60,120,180Hz for powerline harmonics)
Refs:
1) D. G. Luck, Frequency Modulated Radar. New York: McGraw-Hill, 1949.
2) M. Hirsch. “A Low-Cost Approach to L-band FMCW Radar: Thru-Wall Microwatt Radar".
Ottawa, Ontario: North American Radio Science Meeting, July 2007.
"""
import numpy as np
from scipy.constants import c
def range2beat(range_m: np.ndarray, tm: float, bw: float):
"""
range_m: one-way range to target in meters
bw: FMCW linear chirp bandwidth
tm: time of sweep
"""
return 2 * np.asarray(range_m) * bw / (tm * c)
def beat2range(beats: np.ndarray, tm: float, bw: float):
"""
beats: beat frequencies from target returns
bw: FMCW linear chirp bandwidth
tm: time of sweep
"""
return c * beat2time(beats, tm, bw) # distance estimate, meters
def beat2time(beats: np.ndarray, tm: float, bw: float):
return beats * tm / (2 * bw) # two-way travel time, seconds
def bw2rangeres(bw: float):
return c / (2 * bw)
def beatlinear1d(x: np.ndarray, y: np.ndarray, tm: float, bw: float):
"""
returns linear FMCW beat frequencies as a result of 1-D displacement x, perpendicular distance y from radar antenna
x: vector of x-displacement [m]
y: distance from radar
tm: time of sweep
bw: FMCW linear chirp bandwidth
"""
# theta = np.angle1d(x,y)
srng = np.hypot(x, y)
return range2beat(srng, tm, bw)
def angle1d(x: float, y: float):
"""
returns angles due to 1-D displacement in x relative to a reference at position y
right triangle geometry
"""
return np.degrees(np.arctan(y / x))
def simtone(tm, fs, SNR, Ftone, Nobs):
t = np.arange(0, tm, 1 / fs) # time samples
x = np.sqrt(2) * np.exp(1j * 2 * np.pi * Ftone * t) # noise-free signal
nvar = 10 ** (-SNR / 10.0) # variance of noise
noise = np.sqrt(nvar) * (np.random.randn(Nobs, x.size) + 1j * np.random.randn(Nobs, x.size))
print("SNR measured {:.1f} dB".format(snrest(x, noise[0, :])))
y = x + noise # noisy observation
return t, y
def uvm2dbm(uvm: float, range_m: float = 3.0):
"""
converts microvolts per meter uV/m to dBm in a 50 ohm system
inputs:
uvm: microvolts/meter
r: standoff distance (meters)
outputs:
dbm: decibels relative to 1 mW in 50 ohm system
S = E^2/(120*pi) = P/(4*pi*r^2) #[W/m^2] Power density vs. E-field,power,distance
P = E^2*r^2/30 # [W] Power vs. E-field,distance
We are interested in dBm, so we want dBm ~ uV/m
10*log10(P) = 10*log10(E^2) + 10*log10(r^2/30) # dBW = dBV + dBfriis
dBm -30 = (20*log10(uvm)-120) + 10*log10(r^2/30) # dBm = dBuV + dBfriis
dBm = 20*log10(uvm) - 120 + 30 + 10*log10(r^2/30)
Example:
dBm = 20*log10(uvm) - 95.2287874528 for r=3m (FCC)
"""
return dbuvm2dbm(20.0 * np.log10(uvm), range_m)
def dbuvm2dbm(dbuvm: float, range_m: float = 3.0):
"""
converts microvolts(dB) per meter dBuV/m to dBm in a 50 ohm system
inputs:
dbuvm: microvolts(dB)/meter
r: standoff distance (meters)
outputs:
dBm: decibels relative to 1mW in 50 ohm system
"""
return dbuvm - 90.0 + 10.0 * np.log10(range_m ** 2.0 / 30.0)
# %% estimation
def rssq(x: np.ndarray, axis=None):
"""
root-sum-of-squares
"""
x = np.asarray(x)
return np.sqrt(ssq(x, axis))
def ssq(x: np.ndarray, axis=None):
"""
sum-of-squares
this method is ~10% faster than (abs(x)**2).sum()
"""
x = np.asarray(x)
return (x * x.conj()).real.sum(axis)
def snrest(noisy: np.ndarray, noise: np.ndarray, axis=None):
"""
Computes SNR [in dB] when you have:
"noisy" signal+noise time series
"noise": noise only without signal
"""
Psig = ssq(noisy, axis)
Pnoise = ssq(noise)
return 10 * np.log10(Psig / Pnoise) # SNR in dB
def psd(x: np.ndarray, fs: int, zeropadfact: float = 1, wintype=np.hanning):
"""
https://www.mathworks.com/help/signal/ug/psd-estimate-using-fft.html
take 10*log10(Pxx) for [dB/Hz]
"""
nt = x.size
win = wintype(nt)
nfft = int(zeropadfact * nt)
X = np.fft.fft(win * x, nfft, axis=-1)
X = X[: nfft // 2]
Pxx = 1.0 / (fs * nfft) * abs(X) ** 2.0
Pxx[1:-1] = 2 * Pxx[1:-1] # scales DC appropriately
f = np.arange(0.0, fs / 2.0, fs / nfft)[: Pxx.size] # shifted fft freq. bins
return Pxx, f
|
988,373 | f4d86fa94038b2d22b83fce054179d810d75fc15 | import numpy as np
import torch
from torch import nn
from .head import NerdHead
from .ops import init_weights
from transformers import BertModel
class NerdEncoder(nn.Module):
def __init__(self):
super().__init__()
self.bert = BertModel.from_pretrained("hfl/chinese-roberta-wwm-ext")
def init(self):
self.bert = BertModel.from_pretrained("hfl/chinese-roberta-wwm-ext")
def forward(self, sent, sent_mask, token_type=None):
if token_type is None:
output = self.bert(sent, attention_mask=sent_mask)
else:
output = self.bert(sent, attention_mask=sent_mask, token_type_ids=token_type)
return output
class NerdModel(nn.Module):
def __init__(self, config=None):
super().__init__()
self.encoder = NerdEncoder()
self.cls_head = NerdHead(config)
# self.cls_head.apply(init_weights)
def init(self):
self.cls_head.apply(init_weights)
self.encoder.init()
def forward(self, sent, sent_mask, entity_id, entity_pos, entity_mask, token_type=None):
embs, cls_pooled = self.encoder(sent, sent_mask, token_type)
output = self.cls_head(
embs,
cls_pooled,
sent_mask,
entity_id,
entity_pos,
entity_mask
)
return output
|
988,374 | 96536f60f15b9f5683991a191ebbab88d9b67ea7 | from flask import Flask ,render_template ,request
import sys
from dbconnect import connection
import gc
row=[3,3,4,4,5,6,10]
col=[3,4,4,5,5,6,10]
app = Flask(__name__)
@app.route('/')
def homepage():
return render_template('regular.html',h=2,r=2)
@app.route('/regular/<int:t>')
def regular(t):
h=row[t-1]
r=col[t-1]
return render_template('regular.html',h=h,r=r)
@app.route('/star/<int:t>')
def star(t):
h=row[t-1]
r=col[t-1]
return render_template('star.html',h=h,r=r)
@app.route('/flower/<int:t>')
def flower(t):
h=row[t-1]
r=col[t-1]
return render_template('flower.html',h=h,r=r)
@app.route('/straight/<int:t>')
def straight(t):
h=row[t-1]
r=col[t-1]
return render_template('straight.html',h=h,r=r)
@app.route('/curve/<int:t>')
def curve(t):
h=row[t-1]
r=col[t-1]
return render_template('curve.html',h=h,r=r)
@app.route('/thankyou',methods=["GET","POST"])
def thankyou():
return render_template('thankyou.html')
@app.route('/data',methods=["GET","POST"])
def data():
if request.method=="POST":
data=request.get_json()
# print(data)
time=data["mydata"][0]["time"]
gender=data["mydata"][1]["gender"]
age=data["mydata"][2]["age"]
shape=data["mydata"][3]["shape"]
rsize=data["mydata"][4]["rsize"]
csize=data["mydata"][5]["csize"]
clicks=data["mydata"][6]["clicks"]
c,conn=connection()
c.execute("INSERT INTO info (gender,time,age,shape,rsize,csize,clicks) VALUES (%s, %s, %s, %s,%s,%s,%s)",(gender,time,age,shape,rsize,csize,clicks))
conn.commit()
c.close()
conn.close()
gc.collect()
return render_template('thankyou.html')
if __name__ == "__main__":
app.run() |
988,375 | fb5f87737a8d6c5987e26b272506f90add51be04 | import random
class QLearning():
# Learning Rate determines how quickly the agent tries to learn (closer to 0 means considering less info while closer to 1 means only considering more recent info)
# Discount Rate determines how valuable the agent thinks each reward is (closer to 0 means considering short term rewards while closer to 1 means considering long term rewards)
# Exploration Rate determines how often the agent explores an alternate option
def __init__(self, learnRate, discRate, explorationRate, numGames):
self.learnRate = learnRate
self.discRate = discRate
self.numGames = numGames
self.explorationRate = explorationRate
self.explorationDecay = 0.995
self.explorationMin = 0.01
self.QTable = dict()
self.validActions = list(range(4))
# Creates QValue if obs doesn't exist in QTable
# Set initial value to 0 for each possible action of the new QValue
def createQValue(self, obs):
if obs not in self.QTable:
if(obs[3]):
self.validActions = list(range(4))
else:
self.validActions = list(range(3))
self.QTable[obs] = dict((action, 0.0) for action in self.validActions)
# Returns the max Q value of the obs
def getMaxQValue(self, obs):
self.createQValue(obs)
return max(self.QTable[obs].values())
# Choose which action to perform
# Either random exploration or highest QValue
def decideAction(self, obs):
self.createQValue(obs)
# Choose action based on highest QValue
if random.random() > self.explorationRate:
maxQ = self.getMaxQValue(obs)
maxQList = []
for key in self.QTable[obs].keys():
if self.QTable[obs][key] == maxQ:
maxQList.append(key)
action = random.choice(maxQList)
# Choose action based on random pick
else:
if(obs[3]):
self.validActions = list(range(4))
else:
self.validActions = list(range(3))
action = random.choice(self.validActions)
self.explorationRate *= self.explorationDecay
if(self.explorationRate < self.explorationMin):
self.explorationRate = self.explorationMin
return action
# Update QValue after performing an action
def updateQValue(self, obs, action, reward, nextObs):
# QValue = old QValue + learning rate * (reward + (discount rate * max QValue after action) - old QValue)
self.QTable[obs][action] += self.learnRate * (reward + (self.discRate * self.getMaxQValue(nextObs)) - self.QTable[obs][action])
# Return entire QTable
def getQTable(self):
return self.QTable |
988,376 | a0537a16993d1424dfc38139d6d8679eb460559f | # encoding: UTF-8
import re
import json
import datetime
# ro = {}
#
# p_text = re.compile(r'text\': u\'(\S+)\'')
# for i in p_text.findall(str(ro)):
# print i.decode('unicode_escape')
#
# p_resourceid = re.compile(r'resource-id\': u\'(\S+)\'')
# for i in p_resourceid.findall(str(ro)):
# print i
#
# #作者微信:2501902696
# from PIL import Image
# import pytesseract
# #上面都是导包,只需要下面这一行就能实现图片文字识别
# text=pytesseract.image_to_string(Image.open('denggao.jpeg'),lang='chi_sim')
# print(text)
#
# pip install PIL
# pip install pytesseract
# 安装识别引擎tesseract-ocr
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
# if isinstance(obj, datetime):
# return obj.strftime('%Y-%m-%d %H:%M:%S')
if isinstance(obj, datetime.date):
return obj.strftime('%Y-%m-%d %H:%M:%S')
else:
return json.JSONEncoder.default(self, obj)
a = datetime.datetime.now()
print(a)
b = json.dumps([{'time':a}],cls=CJsonEncoder)
print(b)
|
988,377 | 7fa7a102a5f4ddabc868aa9b5f373affc240b98e | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras import Input, Model
from keras.layers import Wrapper
from attention import Attention, SelfAttention
from ffn import FeedFowardNetwork
from embedding import EmbeddingSharedWeights
from model_utils import *
from backend2 import sequence_beam_search
EOS_ID = 1
class Transformer(Model):
def __init__(self, params):
super(Transformer, self).__init__(name='transformer')
self.params = params
self.embedding_softmax_layer = EmbeddingSharedWeights(
params.vocab_size, params.hidden_size)
self.encoder_stack = EncoderStack(params)
self.decoder_stack = DecoderStack(params)
def call(self, inputs, train=True):
assert isinstance(inputs, (list, tuple)) and len(inputs) == 2
x = inputs[0]
targets = inputs[1]
attention_bias = get_padding_bias(x)
encoder_outputs = self.encode(x, attention_bias, train=train)
logits = self.decode(targets, encoder_outputs, attention_bias, train=train)
return logits
def compute_output_shape(self, input_shape):
assert len(input_shape) == 2
return input_shape[0] + (self.params.hidden_size,)
def encode(self, inputs, attention_bias, train):
embedded_inputs = self.embedding_softmax_layer(inputs)
inputs_padding = get_padding(inputs)
length = K.shape(embedded_inputs)[1]
pos_encoding = get_position_encoding(length, self.params.hidden_size)
encoder_inputs = embedded_inputs + pos_encoding
if train:
encoder_inputs = K.dropout(
encoder_inputs, self.params.layer_postprocess_dropout)
# TODO: add input padding
return self.encoder_stack([encoder_inputs, attention_bias], train=train)
def decode(self, targets, encoder_outputs, attention_bias, train):
decoder_inputs = self.embedding_softmax_layer(targets)
decoder_inputs_shape = K.shape(decoder_inputs)
length = decoder_inputs_shape[1]
decoder_inputs = K.slice(K2.pad(decoder_inputs, [[0, 0], [1, 0], [0, 0]]),
(0, 0, 0),
(decoder_inputs_shape[0],
length,
decoder_inputs_shape[2]))
# decoder_inputs = K2.pad(decoder_inputs, [[0, 0], [1, 0], [0, 0]])[:, :-1, :]
decoder_inputs += get_position_encoding(length, self.params.hidden_size)
if train:
decoder_inputs = K.dropout(
decoder_inputs, self.params.layer_postprocess_dropout)
decoder_self_attention_bias = get_decoder_self_attention_bias(length)
outputs = self.decoder_stack(
[decoder_inputs,
encoder_outputs,
decoder_self_attention_bias,
attention_bias], train=train)
logits = self.embedding_softmax_layer.linear(outputs)
return logits
@property
def uses_learning_phase(self):
return True
def get_loss(self):
smoothing = self.params.label_smoothing
vocab_size = self.params.vocab_size
def transformer_loss(y_true, y_pred, with_xent=False):
"""
:param y_true: onehot labels: Tensor of size [batch_size, length_labels, vocab_size]
:param y_pred: logits: Tensor of size [batch_size, length_logits, vocab_size]
:return: loss
"""
confidence = 1.0 - smoothing
low_confidence = (1.0 - confidence) / float(vocab_size - 1)
logits = y_pred
soft_targets = y_true * (confidence - low_confidence) + low_confidence
xentropy = K.categorical_crossentropy(soft_targets, logits, from_logits=True)
# Calculate the best (lowest) possible value of cross entropy, and
# subtract from the cross entropy loss.
normalizing_constant = -(
confidence * math.log(confidence) + float(vocab_size - 1) *
low_confidence * math.log(low_confidence + 1e-20))
xentropy = xentropy - normalizing_constant
weights = 1.0 - K.squeeze(K.slice(y_true, (0, 0, 0), (-1, -1, 1)), axis=2)
xentropy = xentropy * weights
loss = K.sum(xentropy) / K.sum(weights)
if with_xent:
return loss, xentropy, weights
else:
return loss
return transformer_loss
def _get_symbols_to_logits_fn(self, max_decode_length):
timing_signal = get_position_encoding(
max_decode_length + 1, self.params.hidden_size)
decoder_self_attention_bias = get_decoder_self_attention_bias(
max_decode_length)
def symbols_to_logits_fn(ids, i, cache):
"""Generate logits for next potential IDs.
Args:
ids: Current decoded sequences.
int tensor with shape [batch_size * beam_size, i + 1]
i: Loop index
cache: dictionary of values storing the encoder output, encoder-decoder
attention bias, and previous decoder attention values.
Returns:
Tuple of
(logits with shape [batch_size * beam_size, vocab_size],
updated cache values)
"""
# Set decoder input to the last generated IDs
# ids_shape = K.shape(ids)
decoder_input = K.slice(ids, (0, i), (-1, 1))
# Preprocess decoder input by getting embeddings and adding timing signal.
decoder_input = self.embedding_softmax_layer(decoder_input)
decoder_input += K.slice(timing_signal, (i, 0), (1, -1))
self_attention_bias = K.slice(decoder_self_attention_bias,
(0, 0, i, 0),
(-1, -1, 1, i + 1))
decoder_outputs = self.decoder_stack(
[decoder_input,
cache.get("encoder_outputs"),
self_attention_bias,
cache.get("encoder_decoder_attention_bias")], cache=cache, train=False)
logits = self.embedding_softmax_layer.linear(decoder_outputs)
logits = K.squeeze(logits, axis=1)
return logits, cache
return symbols_to_logits_fn
def _get_predict_function(self):
# inputs: int tensor with shape [batch_size, input_length].
inputs = Input(shape=(None,), dtype="int32")
attention_bias = get_padding_bias(inputs)
# Run the inputs through the encoder layer to map the symbol
# representations to continuous representations.
encoder_outputs = self.encode(inputs, attention_bias, train=False)
eo_shape = K.shape(encoder_outputs)
batch_size = eo_shape[0]
input_length = eo_shape[1]
max_decode_length = input_length + self.params.extra_decode_length
symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length)
# Create initial set of IDs that will be passed into symbols_to_logits_fn.
initial_ids = K.zeros([batch_size], dtype='int32')
# Create cache storing decoder attention values for each layer.
cache = {
"layer_%d" % layer: {
"k": K.zeros([batch_size, 0, self.params.hidden_size]),
"v": K.zeros([batch_size, 0, self.params.hidden_size]),
} for layer in range(self.params.num_hidden_layers)}
# Add encoder output and attention bias to the cache.
cache["encoder_outputs"] = encoder_outputs
cache["encoder_decoder_attention_bias"] = attention_bias
# Use beam search to find the top beam_size sequences and scores.
decoded_ids, scores = sequence_beam_search(
symbols_to_logits_fn=symbols_to_logits_fn,
initial_ids=initial_ids,
initial_cache=cache,
vocab_size=self.params.vocab_size,
beam_size=self.params.beam_size,
alpha=self.params.alpha,
max_decode_length=max_decode_length,
eos_id=EOS_ID)
# Get the top sequence for each batch element
top_decoded_ids = K.slice(decoded_ids, (0, 0, 1), (-1, 1, -1))
top_decoded_ids = K.squeeze(top_decoded_ids, 1)
# force 2 inputs to match fit inputs, the second input is always None when predict.
predict_function = K.function([inputs, None],
[top_decoded_ids], name='predict_function')
return predict_function
def _make_predict_function(self):
if self.predict_function is None:
self.predict_function = self._get_predict_function()
class PrePostProcessingWrapper(Wrapper):
def __init__(self,
layer,
params,
**kwargs):
super(PrePostProcessingWrapper, self).__init__(layer, **kwargs)
self.layer = layer
self.postprocess_dropout = params.layer_postprocess_dropout
self.layer_norm = LayerNormalization(params.hidden_size)
def compute_output_shape(self, input_shape):
if isinstance(input_shape, list):
return input_shape[0]
else:
return input_shape
def call(self, inputs, train=True, **kwargs):
if isinstance(inputs, (list, tuple)):
x = inputs[0]
else:
x = inputs
# Preprocessing: apply layer normalization
y = self.layer_norm(x)
# Get layer output
if isinstance(inputs, (list, tuple)):
layer_inputs = [y] + inputs[1:]
else:
layer_inputs = y
y = self.layer(layer_inputs, train=train, **kwargs)
# Postprocessing: apply dropout and residual connection
if train:
y = K.dropout(y, self.postprocess_dropout)
return x + y
class EncoderStack(Layer):
def __init__(self, params):
super(EncoderStack, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
# Create sublayers for each layer.
self_attention_layer = SelfAttention(params.hidden_size,
params.num_heads,
params.attention_dropout)
feed_forward_network = FeedFowardNetwork(params.hidden_size,
params.filter_size,
params.relu_dropout)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params),
PrePostProcessingWrapper(feed_forward_network, params)])
# Create final layer normalization layer.
self.output_normalization = LayerNormalization(params.hidden_size)
def compute_output_shape(self, input_shape):
return input_shape[0]
# input: x [batch_size, seq_len, hidden_size], attention_bias
def call(self, inputs, train=False):
assert isinstance(inputs, (list, tuple)) and len(inputs) == 2
y = inputs[0]
attention_bias = inputs[1]
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
feed_forward_network = layer[1]
y = self_attention_layer([y, attention_bias], train=train)
y = feed_forward_network(y, train=train)
return self.output_normalization(y)
# TODO: add get_config/from_config
class DecoderStack(Layer):
def __init__(self, params):
super(DecoderStack, self).__init__()
self.layers = []
for _ in range(params.num_hidden_layers):
self_attention_layer = SelfAttention(params.hidden_size,
params.num_heads,
params.attention_dropout)
enc_dec_attention_layer = Attention(params.hidden_size,
params.num_heads,
params.attention_dropout)
feed_forward_network = FeedFowardNetwork(params.hidden_size,
params.filter_size,
params.relu_dropout)
self.layers.append([
PrePostProcessingWrapper(self_attention_layer, params),
PrePostProcessingWrapper(enc_dec_attention_layer, params),
PrePostProcessingWrapper(feed_forward_network, params)])
# Create final layer normalization layer.
self.output_normalization = LayerNormalization(params.hidden_size)
def compute_output_shape(self, input_shape):
return input_shape[0]
def call(self, inputs, cache=None, train=False):
assert isinstance(inputs, (list, tuple)) and len(inputs) == 4
decoder_inputs = inputs[0]
encoder_outputs = inputs[1]
decoder_self_attention_bias = inputs[2]
attention_bias = inputs[3]
y = decoder_inputs
for n, layer in enumerate(self.layers):
# Run inputs through the sublayers.
self_attention_layer = layer[0]
enc_dec_attention_layer = layer[1]
feed_forward_network = layer[2]
layer_name = "layer_%d" % n
layer_cache = cache[layer_name] if cache is not None else None
y = self_attention_layer(
[y, decoder_self_attention_bias],
cache=layer_cache,
train=train)
y = enc_dec_attention_layer(
[y, encoder_outputs, attention_bias], train=train)
y = feed_forward_network(y, train=train)
return self.output_normalization(y)
# TODO: add get_config/from_config
|
988,378 | 9f8636d86bc78ef879bfcf356750280aafe7be21 | '''
To drive mysql in Python3.x.
Need mysqlclient
# pip install mysqlclient
'''
import pymysql
pymysql.install_as_MySQLdb()
# default_app_config = 'MetalFighter.apps.MetalfighterConfig' |
988,379 | 4b3654ffe11c7ad4f4b9cf90fe0d271e7c52785b | import os, sys
def make_fmt(formatter, specials):
return lambda item: specials.get(item, formatter(item))
def make_seq_fmt(sep, item_fmt=str):
return lambda items: sep.join([item_fmt(i) for i in items])
class ReadWrite:
"""
Utility class for Google CodeJam.
Handles input/output, counting and formatting cases.
"""
def __init__(self, file_name=None, verbose=True, formatter=str):
self.verbose = verbose
self.formatter = formatter
if file_name is None:
self.in_file = sys.stdin
self.out_file = sys.stdout
else:
self.in_file = open(file_name)
self.out_file = open(os.path.splitext(file_name)[0] + '.out', 'w')
self._case_idx = 1
def msg(self, output, end='\n'):
sys.stderr.write(str(output) + end)
def read_line(self, *types):
"""
Read a line from the input file. Divide that line into
space-separated words, use *types to convert each word in order. If
there are more words in the line than provided types, the last
provided type will be used for all subsequent words.
"""
words = self.in_file.readline().strip().split()
if len(words) == 1:
return types[0](words[0])
return [types[min(i, len(types) - 1)](words[i])
for i in range(len(words))]
def write_case(self, output, pfx_char=' '):
pfx = "Case #%d:" % self._case_idx
self._case_idx += 1
text = pfx + pfx_char + self.formatter(output)
self.out_file.write(text + '\n')
if self.verbose:
self.msg(text)
else:
self.msg(pfx)
DIGITS = ("ZERO", "ONE", "TWO", "THREE", "FOUR", "FIVE", "SIX", "SEVEN",
"EIGHT", "NINE")
INDICATORS_0 = ("Z", None, "W", None, "U", None, "X", None, "G", None)
INDICATORS_1 = (None, "O", None, "T", None, "F", None, "S", None, "N")
def take_one(digit, letters):
result = []
t = list(letters)
for c in DIGITS[digit]:
if c not in t:
return (None, letters)
t.remove(c)
return (digit, tuple(t))
def take_many(digit, ind, letters):
result = []
while len(letters) > 0 and ind in letters:
d, letters = take_one(digit, letters)
if d is None:
return (result, letters)
result.append(d)
return (result, letters)
def solve(letters):
result = []
digits = []
for indicators in (INDICATORS_0, INDICATORS_1):
for i in range(10):
ind = indicators[i]
if ind is not None:
(digits, letters) = take_many(i, ind, letters)
result.extend(digits)
result.sort()
assert len(letters) == 0
return ''.join([str(d) for d in result])
if __name__ == '__main__':
input_name = sys.argv[1] if len(sys.argv) > 1 else 'A-small-attempt0.bin'
rw = ReadWrite(input_name, formatter=str)
T = rw.read_line(int)
for t in range(T):
S = tuple(rw.read_line(str))
rw.write_case(solve(S))
|
988,380 | d502032c09e03a3205afd77173e8f96cfea37693 | import os
import matplotlib.pyplot as plt
ppscanno_knl_folder = '/home/yche/mnt/luocpu8/nfsshare/share/python_experiments/ppSCANNO_knl'
ppscan_knl_folder = '/home/yche/mnt/luocpu8/nfsshare/share/python_experiments/scalability_simd_paper2'
thread_num_knl = 256
ppscanno_gpu23_folder = '/home/yche/mnt/gpu-23/projects/python_experiments/ppSCANNO_gpu23'
ppscan_gpu23_folder = '/home/yche/mnt/gpu-23/projects/python_experiments/scalability_simd_paper_gpu23'
thread_num_gpu23 = 64
gpu23_tag = 'gpu23'
knl_tag = 'knl'
u = 5
eps_lst = [float(i + 1) / 10 for i in xrange(9)]
data_set_lst = ['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster']
tag_lst = ['prune execution time', 'check core first-phase bsp time', 'check core second-phase bsp time',
'3rd: core clustering time', 'non-core clustering time', 'Total time without IO']
legend_lst = ['1. prune', '2. check core', '3. cluster core', '4. cluster non-core', 'in total']
def filter_time_lst(runtime_tag, lines):
runtime_lst = map(lambda time_str: eval(time_str.split('ms')[0]) if 'ms' in time_str else eval(time_str) / 1000,
map(lambda my_str: my_str.split(':')[-1], filter(lambda line: runtime_tag in line, lines)))
return runtime_lst
def find_min(lst):
min_val = 999999999
min_idx = -1
for idx, value in enumerate(lst):
if value < min_val:
min_val = value
min_idx = idx
return min_idx
def get_algorithm_checking_core_runtime(dataset, eps, min_pts, thread_num, root_dir_path):
file_path = os.sep.join([root_dir_path, dataset, 'eps-' + str(eps), 'min_pts-' + str(min_pts),
'-'.join(['output', dataset, str(eps), str(min_pts), str(thread_num)]) + '.txt'])
with open(file_path) as ifs:
lines = ifs.readlines()
my_lst_lst = map(lambda tag: filter_time_lst(tag, lines), tag_lst)
min_idx = find_min(my_lst_lst[-1])
min_lst = map(lambda lst: lst[min_idx], my_lst_lst)
assert sum(min_lst[:-1]) < min_lst[-1]
# 1st, 2nd, 3rd, 4th, total
breakdown_time_lst = [min_lst[0], min_lst[1] + min_lst[2], min_lst[3], min_lst[4], min_lst[5]]
for i in xrange(len(tag_lst)):
assert len(my_lst_lst[0]) == len(my_lst_lst[i])
return breakdown_time_lst[1]
def display_comparison_txt():
print '====================On KNL(AVX512)=========================='
for data_set in data_set_lst:
for eps in eps_lst:
time_no_opt = get_algorithm_checking_core_runtime(data_set, eps, u, thread_num_knl, ppscanno_knl_folder)
time_opt = get_algorithm_checking_core_runtime(data_set, eps, u, thread_num_knl, ppscan_knl_folder)
print time_no_opt, time_opt, float(time_no_opt) / time_opt
print
print '====================On GPU23(AVX2)=========================='
for data_set in data_set_lst:
for eps in eps_lst:
time_no_opt = get_algorithm_checking_core_runtime(data_set, eps, u, thread_num_gpu23, ppscanno_gpu23_folder)
time_opt = get_algorithm_checking_core_runtime(data_set, eps, u, thread_num_gpu23, ppscan_gpu23_folder)
print time_no_opt, time_opt, float(time_no_opt) / time_opt
print
def get_time_lst(dataset, thread_num, folder):
def generator_warp():
for eps in eps_lst:
yield get_algorithm_checking_core_runtime(dataset, eps, u, thread_num, folder)
return map(lambda integer: float(integer) / 1000, list(generator_warp()))
def get_speedup_lst(dataset, tag):
if tag == knl_tag:
ppscan_no_lst = get_time_lst(dataset, thread_num_knl, ppscanno_knl_folder)
ppscan_lst = get_time_lst(dataset, thread_num_knl, ppscan_knl_folder)
return map(lambda my_pair: my_pair[0] / my_pair[1], zip(ppscan_no_lst, ppscan_lst))
elif tag == gpu23_tag:
ppscan_no_lst = get_time_lst(dataset, thread_num_gpu23, ppscanno_gpu23_folder)
ppscan_lst = get_time_lst(dataset, thread_num_gpu23, ppscan_gpu23_folder)
return map(lambda my_pair: my_pair[0] / my_pair[1], zip(ppscan_no_lst, ppscan_lst))
else:
print 'err'
def draw_time():
data_set_lst = ['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster']
eps_lst = [float(i + 1) / 10 for i in xrange(9)]
exp_figure, ax_tuple = plt.subplots(1, 4, sharex=True, figsize=(16, 3))
for ax_idx, ax in enumerate(ax_tuple):
time_lst_lst = []
param_lst = [(thread_num_gpu23, ppscanno_gpu23_folder), (thread_num_gpu23, ppscan_gpu23_folder),
(thread_num_knl, ppscanno_knl_folder), (thread_num_knl, ppscan_knl_folder)]
for idx, param_pair in enumerate(param_lst):
time_lst = get_time_lst(data_set_lst[ax_idx], param_pair[0], param_pair[1])
time_lst_lst.append(time_lst)
shape_lst = ['o-.', 's-.', '^:', 'v:', 'x-']
ax.plot(eps_lst, time_lst, shape_lst[idx], markersize=10, markerfacecolor='none')
ax.legend(['ppSCAN on CPU', 'ppSCAN-NO, CPU', 'ppSCAN, KNL', 'ppSCAN-NO, KNL'], ncol=2, columnspacing=0)
ax.set_ylim(0, float(max(max(time_lst_lst))) * 1.5)
sub_titles = ['(a) dataset = orkut', '(b) dataset = webbase', '(c) dataset = twitter', '(d) dataset = friendster']
for idx, my_ax in enumerate(ax_tuple):
my_ax.set_title(sub_titles[idx], fontsize=12)
if idx == 0:
my_ax.set_ylabel('Checking Core Times (s)', fontsize=12)
my_ax.set_xlabel('$\\epsilon = $')
my_ax.xaxis.set_label_coords(0.00, -0.045)
my_ax.grid(True)
exp_figure.subplots_adjust(wspace=0)
plt.tight_layout()
plt.savefig('set_intersection_opt_time' + '.pdf', dpi=1200)
plt.close()
def draw_speedup():
data_set_lst = ['snap_orkut', 'webgraph_webbase', 'webgraph_twitter', 'snap_friendster']
eps_lst = [float(i + 1) / 10 for i in xrange(9)]
exp_figure, ax_tuple = plt.subplots(1, 4, sharex=True, figsize=(16, 2.5))
for ax_idx, ax in enumerate(ax_tuple):
time_lst_lst = []
tag_lst = [gpu23_tag, knl_tag]
for idx, tag in enumerate(tag_lst):
time_lst = get_speedup_lst(data_set_lst[ax_idx], tag)
time_lst_lst.append(time_lst)
shape_lst = ['o-.', 's--', '^:', 'v:', 'x-']
ax.plot(eps_lst, time_lst, shape_lst[idx], markersize=10, markerfacecolor='none')
# ax.set_ylim(0, float(max(max(time_lst_lst))) * 1.2)
lim_lst = [(0.8, 2.5), (0.8, 4.2), (0.8, 5.2), (0.8, 3.2)]
ax.set_ylim(lim_lst[ax_idx])
if ax_idx == 2:
ax.set_yticks([1, 2, 3, 4, 5])
sub_titles = ['(a) dataset = orkut', '(b) dataset = webbase', '(c) dataset = twitter', '(d) dataset = friendster']
for idx, my_ax in enumerate(ax_tuple):
# my_ax.set_title(sub_titles[idx], fontsize=12)
if idx == 0:
my_ax.set_ylabel('Core Checking Speedup', fontsize=12)
my_ax.set_xlabel('$\\epsilon $' + '\n' + sub_titles[idx], fontsize=12)
my_ax.grid(True)
exp_figure.subplots_adjust(wspace=0)
plt.tight_layout()
legend_lst = ['ppSCAN speedup over ppSCAN-NO on CPU (AVX2)',
'ppSCAN speedup over ppSCAN-NO on KNL (AVX512)']
plt.subplots_adjust(top=0.85)
plt.legend(legend_lst, ncol=len(legend_lst),
prop={'size': 12, "weight": "bold"}, loc=2,
bbox_to_anchor=(-3.25, 1.3, 4.0, 0.0), mode='expand')
plt.savefig('set_intersection_opt_speedup' + '.png', dpi=300)
plt.close()
if __name__ == '__main__':
# display_comparison_txt()
# draw_time()
# print get_speedup_lst('snap_orkut', knl_tag)
# print get_speedup_lst('snap_orkut', gpu23_tag)
draw_speedup()
|
988,381 | 1f6b0f2652081cdbb79075d23573a17d640f5829 | from typing import List
from pyVmomi import pbm, vim, vmodl
from pyVmomi.VmomiSupport import ManagedObject
class ReplicationManager(ManagedObject):
def QueryReplicationGroups(self, entities: List[pbm.ServerObjectRef]) -> List[QueryReplicationGroupResult]: ...
class QueryReplicationGroupResult(vmodl.DynamicData):
@property
def object(self) -> pbm.ServerObjectRef: ...
@object.setter
def object(self, value: pbm.ServerObjectRef):
self._object = value
@property
def replicationGroupId(self) -> vim.vm.replication.ReplicationGroupId: ...
@replicationGroupId.setter
def replicationGroupId(self, value: vim.vm.replication.ReplicationGroupId):
self._replicationGroupId = value
@property
def fault(self) -> vmodl.MethodFault: ...
@fault.setter
def fault(self, value: vmodl.MethodFault):
self._fault = value |
988,382 | 0b898e0ad39cf756b7c395715e3b1ecd5e374565 | from unittest import TestCase
from simplefsabstraction import SimpleFS
class TestSimpleFS(TestCase):
def test_allowed_extension_fails(self):
self.assertFalse(SimpleFS._check_extension('abc.png', ['jpg']))
def test_allowed_extension_succeed(self):
self.assertTrue(SimpleFS._check_extension('abc.png', ['png']))
|
988,383 | b01973ba98ae2dc194ee68c83f7c971961dcff17 | import sys
sys.path.append('..')
import numpy as np
import os
import argparse
import numpy as np
from dataset import *
from objects import *
from proposal import *
import multiprocessing
import shutil
import matplotlib.pyplot as plt
import copy
import joblib
def hit_target_in_path(zone_graph, depth, max_depth):
if zone_graph.is_done():
return True
if depth == max_depth:
return False
next_extrusions = get_proposals(zone_graph)
random.shuffle(next_extrusions)
next_zone_graph = zone_graph.update_to_next_zone_graph(next_extrusions[0])
ret = hit_target_in_path(next_zone_graph, depth+1, max_depth)
return ret
def generate_neg_steps(pos_steps):
neg_steps = []
for i in range(len(pos_steps)):
time_limit = 100
start_time = time.time()
target_depth = len(pos_steps) - i
sample_number = 10 * target_depth
base_zone_graph = pos_steps[i][0]
extrusions = get_proposals(base_zone_graph)
random.shuffle(extrusions)
min_hit_ratio = 0.999
best_neg_extrusion = None
print('candidate neg extrusions:', len(extrusions))
for extrusion in extrusions:
#display_extrusion(extrusion, base_zone_graph, show=True)
next_zone_graph = base_zone_graph.update_to_next_zone_graph(extrusion)
hit_count = 0
for sample_index in range(0, sample_number):
ret = hit_target_in_path(next_zone_graph, 0, target_depth)
if ret:
hit_count += 1
hit_ratio = hit_count/sample_number
print('hit ratio', hit_ratio)
if hit_ratio <= min_hit_ratio and hit_ratio <= 0.2 and extrusion.hash() != pos_steps[i][1].hash():
min_hit_ratio = hit_ratio
best_neg_extrusion = extrusion
if abs(hit_ratio) < 0.0000001:
break
cur_time = time.time()
elapsed_time = cur_time - start_time
print('elapsed_time', elapsed_time)
if elapsed_time > time_limit:
break
if best_neg_extrusion:
print('neg extrusion found ')
neg_steps.append((copy.deepcopy(base_zone_graph), copy.deepcopy(best_neg_extrusion)))
return neg_steps
def process_single_data(seq_id, raw_data_path, processed_data_path):
data_mgr = DataManager()
print('precessing episode:', seq_id)
sequence_length = len(list(Path(os.path.join(raw_data_path, seq_id)).glob('*')))
print('sequence_length', sequence_length)
gt_seq = []
#try:
gt_seq, error_type = data_mgr.load_raw_sequence(os.path.join(raw_data_path, seq_id), 0, sequence_length)
if len(gt_seq) == 0:
return
#except:
#return
print('start simulation--------------------------------------------------------------------')
ret = data_mgr.simulate_sequence(gt_seq)
print('simulation done------------------------------------------------------------------------------------', ret)
if not ret:
return
seq_gt_folder = os.path.join(processed_data_path, seq_id, 'gt')
if not os.path.exists(seq_gt_folder):
os.makedirs(seq_gt_folder)
for i in range(len(gt_seq)):
gt_step = gt_seq[i]
joblib.dump(gt_step[0], os.path.join(seq_gt_folder, str(i) + '_g.joblib'))
joblib.dump(gt_step[1], os.path.join(seq_gt_folder, str(i) + '_e.joblib'))
seq_train_folder = os.path.join(processed_data_path, seq_id, 'train')
if not os.path.exists(seq_train_folder):
os.makedirs(seq_train_folder)
step_index = 0
k = 100
start_index = 0
while start_index < sequence_length:
end_index = min(start_index + k, sequence_length)
print('start_index', start_index, 'end_index', end_index)
if sequence_length <= k:
pos_seq = gt_seq
else:
pos_seq = []
try:
pos_seq, error_type = data_mgr.load_raw_sequence(os.path.join(raw_data_path, seq_id), start_index, end_index)
if len(pos_seq) == 0:
break
except:
break
start_index += k
neg_steps = generate_neg_steps(pos_seq)
for i in range(len(neg_steps)):
pos_step = pos_seq[i]
#display_zone_graph(pos_step[0], file=os.path.join(seq_train_folder, 'step' + str(step_index) + '_' + str('pos') + '_canvas.png'), show=False)
#display_extrusion(pos_step[1], pos_step[0], file=os.path.join(seq_train_folder, 'step' + str(step_index) + '_' + str('pos') + '_extrusion.png'), show=False)
joblib.dump(pos_step[0], os.path.join(seq_train_folder, str(step_index) + '_' + str(1) + '_g.joblib'))
joblib.dump(pos_step[1], os.path.join(seq_train_folder, str(step_index) + '_' + str(1) + '_e.joblib'))
neg_step = neg_steps[i]
#display_zone_graph(neg_step[0], file=os.path.join(seq_train_folder, 'step' + str(step_index) + '_' + str('neg') + 'canvas.png'), show=False)
#display_extrusion(neg_step[1], neg_step[0], file=os.path.join(seq_train_folder, 'step' + str(step_index) + '_' + str('neg') + 'extrusion.png'), show=False)
joblib.dump(neg_step[0], os.path.join(seq_train_folder, str(step_index) + '_' + str(0) + '_g.joblib'))
joblib.dump(neg_step[1], os.path.join(seq_train_folder, str(step_index) + '_' + str(0) + '_e.joblib'))
step_index += 1
print('single data processing complete !')
def process(raw_data_path, processed_data_path):
if not os.path.exists(processed_data_path):
os.makedirs(processed_data_path)
seq_ids = os.listdir(raw_data_path)
for seq_id in seq_ids:
sequence_length = len(list(Path(os.path.join(raw_data_path, seq_id)).glob('*')))
print('sequence_length', sequence_length)
worker_process = multiprocessing.Process(target=process_single_data, name="process_single_data", args=(seq_id, raw_data_path, processed_data_path))
worker_process.start()
worker_process.join(200 + sequence_length * 100)
if worker_process.is_alive():
print ("process_single_data is running... let's kill it...")
worker_process.terminate()
worker_process.join()
print('all data processing complete !')
def split_data_for_training(dataset_path):
train_ids = []
validate_ids = []
test_ids = []
all_ids = os.listdir(dataset_path)
random.shuffle(all_ids)
length = len(all_ids)
train_ids = all_ids[0: int(0.85 * length)]
validate_ids = all_ids[int(0.85 * length) + 1: int(0.9 * length)]
test_ids = all_ids[int(0.9 * length) + 1: int(1.0 * length)]
write_list_to_file('train_ids.txt', train_ids)
write_list_to_file('test_ids.txt', test_ids)
write_list_to_file('validate_ids.txt', validate_ids)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='train_preprocess')
parser.add_argument('--data_path', default='../data/fusion_processed', type=str)
parser.add_argument('--output_path', default='processed_data', type=str)
args = parser.parse_args()
split_data_for_training(args.data_path)
process(args.data_path, args.output_path)
|
988,384 | 7272866a3fdc9fd3f7b6b7dc5ea3d1b2d9ee5d9e | '''Проверить, является ли введенная строка палиндромом.
Палиндром - это слово, которое одинаково читается
слева-направо и справа-налево'''
message = input('Введите что-то: ').strip()
print(message == message[::-1])
|
988,385 | d9b232bc5946f73450f84a8916d18eb7ea151270 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Return all active recommendations on a given project.
python get_recommendation.py \
--project="[YOUR-PROJECT-ID]" \
--service_account_file_path="[FILE-PATH-TO-SERVICE-ACCOUNT]" \
--to_json="[FILE-PATH-TO-STORE-THE-DATA]"
"""
import argparse
import logging
import common
from googleapiclient.discovery import build
from google.oauth2 import service_account
# scopes for the credentials.
SCOPES = ["https://www.googleapis.com/auth/cloud-platform"]
RECOMMENDATION_TYPE = "google.iam.policy.Recommender"
def main():
parser = argparse.ArgumentParser(
description="Get recommendations for a given project.")
parser.add_argument(
"--project_id",
required=True,
type=str,
help="Enter project id for which you want the recommendation status.")
parser.add_argument(
"--service_account_file_path",
required=True,
type=str,
help="Enter the location of service account key for the resources.")
parser.add_argument(
"--to_json",
type=str,
nargs="?",
default="",
help="Enter the json file name to store the recommendation data.")
parser.add_argument("--log",
type=str,
nargs="?",
default="INFO",
help="Enter the log level.")
parser.add_argument("--recommendation_state",
type=str,
nargs="?",
default="ACTIVE",
help="Enter the state of recommendation.")
args = parser.parse_args()
logging.basicConfig(format="%(levelname)s[%(asctime)s]:%(message)s",
level=args.log)
credentials = service_account.Credentials.from_service_account_file(
args.service_account_file_path, scopes=SCOPES)
recommender = build("recommender",
"v1",
credentials=credentials,
cache_discovery=False)
recommendation_data = common.get_recommendations(args.project_id,
recommender,
args.recommendation_state,
credentials)
recommendations_jsonified = common.describe_recommendations(
recommendation_data)
if not args.to_json:
print(recommendations_jsonified)
else:
common.writefile(recommendations_jsonified, args.to_json)
logging.info("Find the project:%s recommendations at location %s.",
args.project_id, args.to_json)
if __name__ == "__main__":
main()
|
988,386 | 66c5e566063525eb70aad8ff66b861bde3f5f246 | import os
b2c_tenant = os.getenv('TENANT_NAME')
signupsignin_user_flow = os.getenv('SIGNUPSIGNIN_USER_FLOW')
editprofile_user_flow = os.getenv('EDITPROFILE_USER_FLOW')
resetpassword_user_flow = os.getenv('RESETPASSWORD_USER_FLOW') # Note: Legacy setting.
# If you are using the new
# "Recommended user flow" (https://docs.microsoft.com/en-us/azure/active-directory-b2c/user-flow-versions),
# you can remove the resetpassword_user_flow and the B2C_RESET_PASSWORD_AUTHORITY settings from this file.
authority_template = "https://{tenant}.b2clogin.com/{tenant}.onmicrosoft.com/{user_flow}"
# Application (client) ID of app registration
CLIENT_ID = os.getenv("CLIENT_ID")
# Application's generated client secret: never check this into source control!
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
AUTHORITY = authority_template.format(tenant=b2c_tenant, user_flow=signupsignin_user_flow)
B2C_PROFILE_AUTHORITY = authority_template.format(tenant=b2c_tenant, user_flow=editprofile_user_flow)
B2C_RESET_PASSWORD_AUTHORITY = authority_template.format(tenant=b2c_tenant, user_flow=resetpassword_user_flow)
# If you are using the new
# "Recommended user flow" (https://docs.microsoft.com/en-us/azure/active-directory-b2c/user-flow-versions),
# you can remove the resetpassword_user_flow and the B2C_RESET_PASSWORD_AUTHORITY settings from this file.
REDIRECT_PATH = "/getAToken" # Used for forming an absolute URL to your redirect URI.
# The absolute URL must match the redirect URI you set
# in the app's registration in the Azure portal.
# This is the API resource endpoint (*not* the endpoint of this app!)
ENDPOINT = ''
# These are the scopes you've exposed in the web API app registration in the Azure portal
SCOPE = []
SESSION_TYPE = "filesystem" # Specifies the token cache should be stored in server-side session
|
988,387 | 9d87e0cf841a0a585c1d9d6f0593f3920ea98201 | from datetime import datetime
from pytz import timezone
class Workbook:
tz = timezone('Europe/Minsk')
def __init__(self, user_id, user_first_name, subject, link, last_modified=None):
self.user_id = user_id
self.user_first_name = user_first_name
self.subject = subject
self.link = link
self.last_modified = last_modified or datetime.now(Workbook.tz)
def serialize(self):
return [None, self.subject.id, self.user_id, self.link, self.last_modified.strftime("%d.%m.%Y в %H:%M")]
def __str__(self):
return "{} | {}".format(self.user_first_name, self.last_modified)
|
988,388 | f0954e8341fe9066968860a72d3663484251a3e8 | from generics.utils import extract_a
from generics.spiders import JAVSpider
from . import get_article, article_json
def studios(links):
for url, t in extract_a(links):
studio = get_article(url, t)
article_json(studio)
yield studio
class StudioListSpider(JAVSpider):
name = 'ave.studios'
start_urls = (
'http://aventertainments.com/studiolists.aspx?Dept_ID=29',
'http://aventertainments.com/ppv/ppv_studiolists.aspx',
)
def parse(self, response):
yield from studios(response.css('li.studio'))
yield from studios(response.css('div.tb'))
|
988,389 | f5ccf18e4d54b84a25cd4b327c4630549689ba4d | import math
def is_int(n):
return True if int(n) == n else False
def dist(x, y):
buf = 0
for i in range(len(x)):
buf += (x[i] - y[i]) ** 2
return math.sqrt(buf)
N, D = list(map(lambda a: int(a), input().split(" ")))
X = []
for i in range(N):
X.append(list(map(lambda x: int(x), input().split(" "))))
cnt = 0
for j in range(N):
for k in range(j):
if is_int(dist(X[j], X[k])):
cnt += 1
print(cnt)
|
988,390 | 4dd19f97f37ca5a2b53f6d09f127d40c75358f6d | # This scripts performs two split
# The first group the labelled dataset per dialect and split into train, valid,
# and test set.
# The second does the same, but on the predictions from the entire twitter
# module.
### Settings #################################################################
dataset_path = "data/twitter_all_predicted.csv"
label_names = "BE,CE,EA,GR,NW,VS,ZH".split(',')
out_dir = "data/dialect_specific"
################################################################################
import pandas as pd
from preprocessing.split_dataset import *
import os
from pathlib import Path
df = pd.read_csv(dataset_path, sep="\t")
#--- Group per known dialect --------------------------------------------------
print("*"*80 + "\nCompute known dialects sets\n" + "*"*80)
known_path = os.path.join(out_dir, "known_labels")
Path(known_path).mkdir(parents=True, exist_ok=True)
all_known = dict()
for dialect in label_names:
print(dialect)
sub = df[df.dialect == dialect]
sets = split_dataset(sub, 0.8, 0.1)
dialect_dir = os.path.join(known_path, dialect)
Path(dialect_dir).mkdir(parents=True, exist_ok=True)
for i, name in enumerate(["train", "valid", "test"]):
print(name)
path = os.path.join(dialect_dir, name + ".csv")
cur_set = pd.DataFrame(sets[i].sentence)
print(cur_set.shape[0])
cur_set.to_csv(path, sep="\t", header=None, index=False)
all_known[name] = pd.concat([all_known.get(name, pd.DataFrame()), cur_set])
#--- Group per predicted dialect ---------------------------------------------
print("*"*80 + "\nCompute predicted dialects sets\n" + "*"*80)
predicted_path = os.path.join(out_dir, "predicted_labels")
Path(predicted_path).mkdir(parents=True, exist_ok=True)
all_pred = dict()
for dialect in label_names:
print(dialect)
sub = df[df.dialect_predicted == dialect]
sets = split_dataset(sub, 0.8, 0.1)
dialect_dir = os.path.join(predicted_path, dialect)
Path(dialect_dir).mkdir(parents=True, exist_ok=True)
for i, name in enumerate(["train", "valid", "test"]):
print(name)
path = os.path.join(dialect_dir, name + ".csv")
cur_set = pd.DataFrame(sets[i].sentence)
print(cur_set.shape[0])
cur_set.to_csv(path, sep="\t", header=None, index=False)
all_pred[name] = pd.concat([all_pred.get(name, pd.DataFrame()), cur_set])
#--- Save the sets with all dialect ------------------------------------------
# First we need to remove all train or valid sentences that appear in one of the
# two test sets. This is to avoid training from scratch twice, one for known
# dialects and one for predicted dialects. Once we know which method is the
# best, we can remove the other one.
print("*"*80 + "\nSaving sets with all dialects\n" + "*"*80)
test_sentences = pd.concat([all_known["test"], all_pred["test"]])
test_sentences = set(test_sentences.sentence.values)
for name in ["train", "valid", "test"]:
print(name)
print("known")
all_dir = os.path.join(known_path, "all")
Path(all_dir).mkdir(parents=True, exist_ok=True)
sub = all_known[name]
print(sub.shape[0])
if name == "train" or name == "valid":
sub = sub[~sub.iloc[:, 0].isin(test_sentences)]
print(sub.shape[0])
path = os.path.join(all_dir, name + ".csv")
sub.to_csv(path, sep="\t", header=None, index=False)
print("pred")
all_dir = os.path.join(predicted_path, "all")
Path(all_dir).mkdir(parents=True, exist_ok=True)
sub = all_pred[name]
print(sub.shape[0])
if name == "train" or name == "valid":
sub = sub[~sub.iloc[:, 0].isin(test_sentences)]
print(sub.shape[0])
path = os.path.join(all_dir, name + ".csv")
sub.to_csv(path, sep="\t", header=None, index=False)
|
988,391 | b09012d71666be3f47ca1966a471a36642061a57 | import numpy as np
from os import listdir
from os.path import isfile, join
import pandas as pd
import matplotlib.pyplot as plt
from FFClust import IOFibers
def get_list_of_files(directory):
files = [f for f in listdir(directory) if isfile(join(directory, f))]
dirs = [f for f in listdir(directory) if not isfile(join(directory, f))]
return files, dirs
directory = '/home/cocobio/Documents/ARCHI_Database/'
fiber_dir = '/OverSampledFibers/'
files, subjects = get_list_of_files(directory)
subjects.sort()
fiber_data = []
distance = []
n_fibers = []
for subject in subjects:
print('Processing', subject)
subject_fibers_path = directory + subject + fiber_dir
fibers, tmp = get_list_of_files(subject_fibers_path)
del tmp
fibers = [f for f in fibers if f.endswith('.bundles')]
for f_path in fibers:
f = subject_fibers_path + f_path
bundles, name = IOFibers.read_bundles(f)
for fibra in bundles[0]:
length = 0.0
for i in range(1, len(fibra)):
length = length + np.linalg.norm(fibra[i] - fibra[i - 1])
distance.append(length)
n_fibers.append(len(fibra))
headers = ["Distance", "N Fibers"]
fibers = [distance, n_fibers]
new_table = pd.DataFrame()
for i in range(len(headers)):
new_table[headers[i]] = fibers[i]
new_table = pd.DataFrame({"Distance":distance, "N Fibers":n_fibers})
new_table.to_csv('Tabla de Datos')
print(new_table)
print(new_table.info())
print(new_table.describe())
new_table.hist(bins=50, figsize=(20,15))
plt.show() |
988,392 | adf12dfc176142afee4e33f3943507654e29d758 | #!/usr/bin/env python
#
# Project: Video Streaming with Flask
# Author: Log0 <im [dot] ckieric [at] gmail [dot] com>
# Date: 2014/12/21
# Website: http://www.chioka.in/
# Description:
# Modified to support streaming out with webcams, and not just raw JPEGs
# Modified by Jeff Bryant to support finding faces usin the OpenCV demo.
# Most of the code credits to Miguel Grinberg, except that I made a small tweak. Thanks!
# Credits: http://blog.miguelgrinberg.com/post/video-streaming-with-flask
#
# Usage:
# 1. Install Python dependencies: cv2, flask. (wish that pip install works like a charm)
# 2. Run "python main.py".
# 3. Navigate the browser to the local webpage.
import os
import cv2
from flask import Flask, render_template, Response
class VideoCamera(object):
def __init__(self):
# Using OpenCV to capture from device 0. If you have trouble capturing
# from a webcam, comment the line below out and use a video file
# instead.
self.video = cv2.VideoCapture(0)
# If you decide to use video.mp4, you must have this file in the folder
# as the main.py.
# self.video = cv2.VideoCapture('video.mp4')
pdir = 'C:/Users/jeffrey.f.bryant/Desktop/FirstRobotics/haarcascades/'
self.face_cascade = cv2.CascadeClassifier(pdir + 'haarcascade_frontalface_alt.xml')
self.eye_cascade = cv2.CascadeClassifier(pdir + 'haarcascade_eye.xml')
def __del__(self):
self.video.release()
def get_frame(self):
success, image = self.video.read()
# We are using Motion JPEG, but OpenCV defaults to capture raw images,
# so we must encode it into JPEG in order to correctly display the
# video stream.
image = cv2.resize(image,(640,480))
image = self.findFaces(image)
newImage = cv2.resize(image,(320,240))
ret, jpeg = cv2.imencode('.jpg', newImage)
return jpeg.tobytes()
def findFaces(self,frame) :
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
eyes = self.eye_cascade.detectMultiScale(roi_gray)
for (ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)
return frame
app = Flask(__name__)
@app.route('/')
def index():
loc = 'index.html'
return render_template(loc)
def gen(camera):
counter = 0
while True:
frame = camera.get_frame()
counter = counter+1
if (counter > 3):
counter = 0;
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n\r\n')
@app.route('/video_feed')
def video_feed():
return Response(gen(VideoCamera()),
mimetype='multipart/x-mixed-replace; boundary=frame')
if __name__ == '__main__':
print os.curdir
app.run(host='0.0.0.0', debug=True)
|
988,393 | 26911cc0ea0e86cf0be4add7d0ce5f537d26270e | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'dlgscore.ui'
#
# Created by: PyQt5 UI code generator 5.9
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Dialog(object):
def setupUi(self, Dialog):
Dialog.setObjectName("Dialog")
Dialog.resize(500, 400)
self.verticalLayout = QtWidgets.QVBoxLayout(Dialog)
self.verticalLayout.setContentsMargins(25, -1, 25, -1)
self.verticalLayout.setObjectName("verticalLayout")
self.horizontalLayout = QtWidgets.QHBoxLayout()
self.horizontalLayout.setObjectName("horizontalLayout")
self.label_2 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.horizontalLayout.addWidget(self.label_2)
self.cb0 = QtWidgets.QComboBox(Dialog)
font = QtGui.QFont()
font.setFamily("Eras Demi ITC")
font.setPointSize(10)
self.cb0.setFont(font)
self.cb0.setObjectName("cb0")
import sqlite3
conn = sqlite3.connect('fantasy.db')
self.horizontalLayout.addWidget(self.cb0)
sql="select name from teams"
cur=conn.execute(sql)
teams=[]
for row in cur:
self.cb0.addItem(row[0])
conn.close()
self.label = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.label.setFont(font)
self.label.setObjectName("label")
self.horizontalLayout.addWidget(self.label)
self.cb1 = QtWidgets.QComboBox(Dialog)
font = QtGui.QFont()
font.setFamily("Eras Demi ITC")
font.setPointSize(10)
self.cb1.setFont(font)
self.cb1.setObjectName("cb1")
self.cb1.addItem("")
self.cb1.addItem("")
self.cb1.addItem("")
self.cb1.addItem("")
self.cb1.addItem("")
self.horizontalLayout.addWidget(self.cb1)
self.verticalLayout.addLayout(self.horizontalLayout)
self.line = QtWidgets.QFrame(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.line.setFont(font)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout.addWidget(self.line)
self.horizontalLayout_4 = QtWidgets.QHBoxLayout()
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
self.label_5 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.horizontalLayout_4.addWidget(self.label_5)
self.label_4 = QtWidgets.QLabel(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.verticalLayout.addLayout(self.horizontalLayout_4)
self.line_2 = QtWidgets.QFrame(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.line_2.setFont(font)
self.line_2.setFrameShape(QtWidgets.QFrame.HLine)
self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_2.setObjectName("line_2")
self.verticalLayout.addWidget(self.line_2)
self.horizontalLayout_2 = QtWidgets.QHBoxLayout()
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.lw1 = QtWidgets.QListWidget(Dialog)
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.lw1.setFont(font)
self.lw1.setObjectName("lw1")
self.horizontalLayout_2.addWidget(self.lw1)
spacerItem = QtWidgets.QSpacerItem(200, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem)
self.lw2 = QtWidgets.QListWidget(Dialog)
font = QtGui.QFont()
font.setFamily("Tahoma")
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.lw2.setFont(font)
self.lw2.setObjectName("lw2")
self.horizontalLayout_2.addWidget(self.lw2)
self.verticalLayout.addLayout(self.horizontalLayout_2)
self.line_3 = QtWidgets.QFrame(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.line_3.setFont(font)
self.line_3.setFrameShape(QtWidgets.QFrame.HLine)
self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line_3.setObjectName("line_3")
self.verticalLayout.addWidget(self.line_3)
self.horizontalLayout_3 = QtWidgets.QHBoxLayout()
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.pushButton = QtWidgets.QPushButton(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.pushButton.clicked.connect(self.calculate)
self.horizontalLayout_3.addWidget(self.pushButton)
spacerItem1 = QtWidgets.QSpacerItem(150, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
#self.scorelabel = QtWidgets.QLabel(Dialog)
self.scoreline = QtWidgets.QLineEdit(Dialog)
font = QtGui.QFont()
font.setFamily("Gill Sans MT")
font.setBold(True)
font.setPointSize(12)
font.setWeight(75)
#self.scorelabel.setFont(font)
self.scoreline.setFont(font)
#self.scorelabel.setObjectName("scorelabel")
self.scoreline.setObjectName("scoreline")
#self.horizontalLayout_3.addWidget(self.scorelabel)
self.horizontalLayout_3.addWidget(self.scoreline)
self.verticalLayout.addLayout(self.horizontalLayout_3)
self.retranslateUi(Dialog)
QtCore.QMetaObject.connectSlotsByName(Dialog)
def calculate(self):
import sqlite3
conn = sqlite3.connect('fantasy.db')
team=self.cb0.currentText()
self.lw1.clear()
sql1="select players, value from teams where name='"+team+"'"
cur=conn.execute(sql1)
row=cur.fetchone()
#print (row[0])
selected=row[0].split(',')
#print (selected)
self.lw1.addItems(selected)
teamttl=0
#print("ded")
self.lw2.clear()
match=self.cb1.currentText()
for i in range(self.lw1.count()):
ttl, batscore, bowlscore, fieldscore=0,0,0,0
nm=self.lw1.item(i).text()
cursor=conn.execute("select * from "+match+" where player='"+nm+"'")
row=cursor.fetchone()
#print ("fekn")
#print(row)
batscore=int(row[1]/2)
if batscore>=50: batscore+=5
if batscore>=100: batscore+=10
if row[1]>0:
sr=row[1]/row[2]
if sr>=80 and sr<100: batscore+=2
if sr>=100:batscore+=4
batscore=batscore+row[3]
batscore=batscore+2*row[4]
#print ("batting score=", batscore)
bowlscore=row[8]*10
if row[8]>=3: bowlscore=bowlscore+5
if row[8]>=5: bowlscore=bowlscore=bowlscore+10
if row[7]>0:
er=6*row[7]/row[5]
#print ("eco:")
if er<=2: bowlscore=bowlscore+10
if er>2 and er<=3.5: bowlscore=bowlscore+7
if er>3.5 and er<=4.5: bowlscore=bowlscore+4
fieldscore=(row[9]+row[10]+row[11])*10
ttl=batscore+bowlscore+fieldscore
#print("frfm")
self.lw2.addItem(str(ttl))
#print("rb")
teamttl=teamttl+ttl
#int(teamttl)
#self.scorelabel.setText(str(teamttl))
#print("gt")
self.scoreline.setText(str(teamttl))
#print("du")
def retranslateUi(self, Dialog):
_translate = QtCore.QCoreApplication.translate
Dialog.setWindowTitle(_translate("Dialog", "Dialog"))
self.label_2.setText(_translate("Dialog", "Choose Team"))
self.label.setText(_translate("Dialog", "Choose Match"))
self.cb1.setItemText(0, _translate("Dialog", "Match1"))
self.cb1.setItemText(1, _translate("Dialog", "Match2"))
self.cb1.setItemText(2, _translate("Dialog", "Match3"))
self.cb1.setItemText(3, _translate("Dialog", "Match4"))
self.cb1.setItemText(4, _translate("Dialog", "Match5"))
self.label_5.setText(_translate("Dialog", "Players"))
self.label_4.setText(_translate("Dialog", "Score"))
self.pushButton.setText(_translate("Dialog", "Calculate Score"))
#self.scorelabel.setText(_translate("Dialog", "00"))
self.scoreline.setText(_translate("Dialog", "00"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Dialog = QtWidgets.QDialog()
ui = Ui_Dialog()
ui.setupUi(Dialog)
Dialog.show()
sys.exit(app.exec_())
|
988,394 | 51207780e4adf7aeded6eb5d2ce3ca4054e6ca7a | # Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import os.path
import gtk
import vte
import pango
from tab_interface import *
from debugger import *
class OutputTab(gtk.VBox):
def __init__(self,mc):
TabInterface.validate_implementation(self)
gtk.VBox.__init__(self)
self._id = None
self._mc = mc
self._mc.settings.register("OutputTab_ScrollbackLines", int, 1000)
# ls
self._ls = PListStore(Text = str, Pty = object, Term = object)
# cbox
cbox = gtk.ComboBox(self._ls)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True);
cbox.add_attribute(cell, 'text', self._ls.Text)
self._cbox = cbox
self.pack_start(cbox,False,False,0)
cbox.connect('changed', self._on_active_pty_changed)
# term box
term_box = gtk.VBox() # gtk.ScrolledWindow()
# term_box.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self._term_box = term_box
self.pack_start(term_box,True,True,0)
self.show_all()
# connect up to debugger...
mc.debugger.ptys.item_added.add_listener(self._on_pty_added)
mc.debugger.ptys.item_deleted.add_listener(self._on_pty_deleted)
mc.debugger.active_frame_changed.add_listener(self._on_active_frame_changed)
# active pty
self._active_pty = None
# add any ptys that were alive when we were created
for i in range(len(mc.debugger.ptys)):
pty = mc.debugger.ptys[i]
self._on_pty_added(i,pty)
if len(self._ls) and self._cbox.get_active() == -1:
self._cbox.set_active(0)
# tabbase interface
@property
def id(self):
return self._id
@id.setter
def id(self,id):
self._id = id
def on_rerun(self): # called by MC when the program is re-run
self._ls.clear()
# terminal creation
def _on_pty_added(self,idx,pty):
r = self._ls.append()
r.Text = pty.name
r.Pty = pty
r.Term = vte.Terminal()
r.Term.set_property('scrollback-lines', self._mc.settings.OutputTab_ScrollbackLines)
# r.Term.set_size(80, 8)
r.Term.set_pty(pty.master_fd)
desc = r.Term.get_font().copy()
desc.set_size(self._mc.resources.SMALL_FONT_SIZE*pango.SCALE)
r.Term.set_font(desc)
pty.name_changed.add_listener(self._on_pty_renamed)
if len(self._ls) and self._cbox.get_active() == -1:
self._cbox.set_active(0)
def _on_pty_renamed(self,pty):
r = self._ls.find(lambda x: x.Pty == pty)
r.Text = pty.name
def _on_pty_deleted(self,idx,pty):
# if self._active_pty == pty:
# if len(self._ls) and self._cbox.get_active() == -1:
# self._cbox.set_active(0)
# else:
# self._cbox.set_active(-1)
r = self._ls.find(lambda x: x.Pty == pty)
r.Text = pty.name + " <defunct>"
pty.name_changed.remove_listener(self._on_pty_renamed)
# r = self._ls.find(lambda r: r.Pty == pty)
# self._ls.remove(r)
# ignore for now...
def _on_active_frame_changed(self):
pass
# active changed
def _on_active_pty_changed(self, x):
if self._cbox.get_active_iter() == None:
self._set_active_pty(None)
else:
row = self._ls[self._cbox.get_active_iter()]
self._set_active_pty(row.Pty)
# active pty
def _set_active_pty(self,pty):
# find item in the ls
if self._active_pty != None:
self._term_box.remove(self._term_box.get_children()[0])
self._active_pty = pty
# make the new pty active
if self._active_pty:
r = self._ls.find(lambda r: r.Pty == pty)
self._term_box.add(r.Term)
self.show_all()
# not sure what this shit does
def _update_combobox(self):
pass
# md_combo = gtk.combo_box_new_text()
# for i in range(1,_MAX_DEPTH+1):
# md_combo.append_text("%s" % i)
# md_combo.append_text("No limit");
# md_combo.set_active(_MAX_DEPTH) # "no limit"
|
988,395 | b9023455b6737907164e56449465dfc1a4d5ed7c |
from functools import wraps
from flask import current_app, request, redirect, flash, url_for
from flask_login import current_user
def admin_required(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if not current_user.is_authenticated:
return current_app.login_manager.unauthorized()
elif not current_user.is_admin:
return current_app.login_manager.unauthorized()
return func(*args, **kwargs)
return decorated_view
|
988,396 | 1be9d6a9c8a0dc1fbb744f1e07d6734afcd0cc56 | t=int(input())
for i in range(0,t):
x=input()
n,m=x.split()
n=int(n)
m=int(m)
if m%n==0:
print("Yes")
else:
print("No")
|
988,397 | d6ad08f853d524e0a028b2dea3291bc1374174bc | print "Hello, twat" |
988,398 | cec241f7477c9e2d069f07b1ebdbb3d35802bbef | import turtle
import pandas
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
data = pandas.read_csv("50_states.csv")
all_states = data.state.to_list()
text = turtle.Turtle()
text.hideturtle()
text.penup()
guessed_states = []
while len(guessed_states) < 50:
answer_state = screen.textinput(title=f"{len(guessed_states)}/50 States Correct", prompt="What's another state "
"name?").title()
if answer_state == "Exit":
# Create missing_state.csv
missed_state = [state for state in all_states if state not in guessed_states]
# missing_state = []
# for state in all_states:
# if state not in guessed_states:
# missed_state.append(state)
missing_state = pandas.DataFrame(missed_state)
missing_state.to_csv("missing_state.csv")
if answer_state in all_states:
guessed_states.append(answer_state)
state_data = data[data.state == answer_state]
x_cor = float(state_data.x)
y_cor = float(state_data.y)
text.goto(x=x_cor, y=y_cor)
text.write(f"{answer_state}")
|
988,399 | 7d40de7e0b1173fd98fa49ab6de9ad28dc4d91c0 | # -*- coding:utf-8 -*-
# @Time : 2020/3/9 19:33
# @Author: wsp
# 函数的参数
# 1.位置参数
def power(x):
return x * x
print(power(5))
# 递归函数:理论上所有的递归函数都可以写成循环函数,但是循环函数没有递归函数的逻辑清晰。
# point:使用递归时要注意防止栈溢出,在计算机中函数的调用是通过栈这种数据结构实现的,每当进入一个函数调用,
# 栈就会加一层栈帧,每当函数返回,栈就会减一层栈帧。由于栈的大小不是无限的,所以,递归调用的次数过多,会导致栈溢出。可以试试fact(1000):
def fact(n):
if n == 1:
return 1
return n * fact(n - 1)
# 解决递归内存溢出的方式是尾递归优化,尾递归优化实际上也是一种循环,
# 在函数返回的时候返回函数本身而不是返回一个表达式,这样编译器或者解释器会对尾递归做优化,使递归本身无论运行多少次,都只占用一个栈帧,不会出现栈溢出的情况
def fact_one(n):
return fact_iter(n, 1)
def fact_iter(num, product):
if num == 1:
return product
else:
return fact_iter(num - 1, num * product)
if __name__ == '__main__':
print(fact_one(100))
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.