seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
35300100688 | # @nzm_ort
# https://github.com/nozomuorita/atcoder-workspace-python
# import module ------------------------------------------------------------------------------
from collections import defaultdict, deque, Counter
import math
from itertools import combinations, permutations, product, accumulate, groupby, chain
from heapq import heapify, heappop, heappush
import bisect
import sys
# sys.setrecursionlimit(100000000)
inf = float('inf')
mod1 = 10**9+7
mod2 = 998244353
def ceil_div(x, y): return -(-x//y)
# main code ------------------------------------------------------------------------------------
N = int(input())
s1 = [1]
if N == 1:
print(1)
exit()
for i in range(2, N+1):
s2 = i
s = s1.copy()
s.append(s2)
s.extend(s1)
s1 = s
print(*s) | nozomuorita/atcoder-workspace-python | abc/abc247/C/answer.py | answer.py | py | 782 | python | en | code | 0 | github-code | 13 |
31544634834 | #!/usr/bin/env python3
import time, threading, random, logging, os, hashlib, json, queue#, pdb
import glovar
from network import broadMessage
# committee process
class BlockProcessing(threading.Thread):
def __init__(self, cominfo, logdirectory):
threading.Thread.__init__(self)
self.cominfo = cominfo
self.logdirectory = logdirectory
def run(self):
filename = self.logdirectory + 'Blockgenlog.txt'
self.logger = logging.getLogger(str(self.cominfo[1]))
self.logger.setLevel(level = logging.INFO)
handler = logging.FileHandler(filename)
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(message)s')
handler.setFormatter(formatter)
self.logger.addHandler(handler)
incomno = self.cominfo[0]
comid = self.cominfo[1]
commember = self.cominfo[2]
logcontent = 'Start committee ' + str(incomno)
self.logger.info(logcontent)
# Select a leader to generate block
randomstring = "".join(str(commember))
idchoose = int(hashlib.sha256(randomstring.encode('utf-8')).hexdigest(), 16) % len(commember)
logcontent = 'The ' + str(idchoose+1) + ' member: ' + str(commember[idchoose]) + ' is chosen to generate a block'
self.logger.info(logcontent)
# Self is selected
transactions = []
if comid == commember[idchoose]:
# Add transactions into the new block
for each in glovar.ComList:
if comid == each[1]:
# If the PoS node has received transactions
if len(each[3]['transactionlist']):
for every in each[3]['transactionlist']:
transactions.append(every)
each[3]['transactionlist'].clear()
# Create a new transaction to add
else:
trans_input_no = 1
trans_input_item = ['abc']
trans_input = [trans_input_no, trans_input_item]
trans_output_no = 1
trans_output_output1 = ['efg', 5]
trans_output_item = [trans_output_output1]
trans_output = [trans_output_no, trans_output_item]
timestamp = time.time()
temptransaction = [trans_input, trans_output, timestamp]
temp = str(temptransaction)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
newtransaction = [hashvalue, trans_input, trans_output, timestamp]
transactions.append(newtransaction)
timestamp = time.time()
temp = str(incomno) + str(comid) + str(commember) + str(timestamp)+ str(transactions)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
newblock = [incomno, comid, commember, timestamp, hashvalue, transactions]
json_block = json.dumps(newblock)
senddata = {'messageid':hashvalue,'type':'firstblock','No':1,'content':json_block}
# Wait for other Pos node to start up receive process
time.sleep(5)
glovar.messageLock.acquire()
glovar.MessageList.append(hashvalue)
glovar.messageLock.release()
broadMessage(senddata)
self.logger.info('---------------------------------')
logcontent = str(incomno) + ' :broadcast a block:' + str(hashvalue)
self.logger.info(logcontent)
# Change the corresponding global status
for each in glovar.ComList:
if comid == each[1]:
glovar.ComlistLock.acquire()
each[3]['genblock'] = 1
each[3]['blockhash'] = hashvalue
each[3]['stage'] = 1
each[3]['newblock'].append(newblock)
glovar.ComlistLock.release()
# Check if there is another PoS in the same committee
for every in glovar.ComList:
if (every[0] == incomno and every[1] != comid):
# Send data to this PoS node process
glovar.ComlistLock.acquire()
every[4].put(senddata)
glovar.ComlistLock.release()
# Get message from its queue belong to this PoS node
for each in glovar.ComList:
if comid == each[1]:
while True:
try:
data = each[4].get_nowait()
# logcontent = 'Get a data'
# self.logger.info(logcontent)
#broadMessage(data)
self.dataHandle(data)
except queue.Empty:
time.sleep(0.05)
# Check if the committee has changed
if glovar.ComChange:
each[6] = 0
break
# End the process
logcontent = 'The process is ended.'
self.logger.info(logcontent)
# Handle message for the generation committee
def dataHandle(self, data):
if data['type'] == 'transaction':
# logcontent = "Handle a transaction:" + str(data['messageid'])
# self.logger.info(logcontent)
for each in glovar.ComList:
if self.cominfo[1] == each[1]:
each[3]['transactionlist'].append(data['content'])
elif data['type'] == 'firstblock':
# Verify whether it is commited by the committee member
if data['No'] == 1:
# logcontent = 'Handle a firstblock:' + str(data['messageid'])
# self.logger.info(logcontent)
blockdata = json.loads(data['content'])
# Verify if the node is in our committee
if blockdata[1] in self.cominfo[2]:
logcontent = 'Verify a firstblock:' + str(blockdata[4]) + ' from comid:' + str(blockdata[1]) + ' in committee:' +str(self.cominfo[0])
self.logger.info(logcontent)
# Change the corresponding global status
for each in glovar.ComList:
if self.cominfo[1] == each[1]:
# glovar.ComlistLock.acquire()
each[3]['newblock'].append(blockdata)
# glovar.ComlistLock.release()
# Send a commitment message
content = {'blockhash':blockdata[4],'incomno':blockdata[0],'comid':self.cominfo[1],'commit':1}
beforesend = {'type':'firstblock','No':2,'content':content}
temp = str(beforesend)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
senddata = {'messageid':hashvalue,'type':'firstblock','No':2,'content':content}
glovar.messageLock.acquire()
glovar.MessageList.append(hashvalue)
glovar.messageLock.release()
broadMessage(senddata)
logcontent = 'Send a commitment for block:' + str(senddata['content']['blockhash'])
self.logger.info(logcontent)
# Check if there is another PoS in the same committee
for each in glovar.ComList:
if each[0] == self.cominfo[0] and each[1] != self.cominfo[1]:
each[5].acquire()
each[4].put(senddata)
each[5].release()
elif data['No'] ==2:
# logcontent = 'Handle a commitment:' + str(data['messageid'])
# self.logger.info(logcontent)
# Verify whether it is commited by the committee member
if data['content']['comid'] in self.cominfo[2]:
logcontent = 'Verify a commitment for block:' + str(data['content']['blockhash']) + ' from comid:' + str(data['content']['comid'])
self.logger.info(logcontent)
for each in glovar.ComList:
if each[1] == self.cominfo[1]:
if each[3]['genblock']:
if data['content']['blockhash'] == each[3]['blockhash']:
each[3]['commit'] += 1
each[3]['commitlist'].append(data['content']['comid'])
logcontent = 'Block:' + str(each[3]['blockhash']) + ' receive a commit. Total:' + str(each[3]['commit'])
self.logger.info(logcontent)
# Receive enough commitment
if (each[3]['commit'] >= glovar.Firstcommem//2+1):
logcontent = 'Receive enough commitment for blocblock:' + str(each[3]['blockhash'])
self.logger.info(logcontent)
self.broadFirstCommitBlock()
self.addBlock(each[3]['newblock'][0])
## logcontent = 'Run a new round to Generate a block'
## self.logger.info(logcontent)
elif data['No'] == 3:
# logcontent = 'Handle a commit firstblock:' + str(data['content']['block'][4])
# self.logger.info(logcontent)
# Verify the commit firstblock
listisin = True
for each in data['content']['comlist']:
if each not in self.cominfo[2]:
listisin = False
break
if listisin:
logcontent = 'Verified the commit firstblock:' + str(data['content']['block'][4])
self.logger.info(logcontent)
for each in glovar.ComList:
if each[1] == self.cominfo[1]:
if len(each[3]['transactionlist']):
each[5].acquire()
temptransactions = []
for every in each[3]['transactionlist']:
if every not in data['content']['block'][5]:
temptransactions.append(every)
each[3]['transactionlist'] = temptransactions.copy()
each[5].release()
self.addBlock(data['content']['block'])
else:
logcontent = "Unkown data with data['type']:firstblock"
self.logger.info(logcontent)
# Receive a commit secondblock
elif data['type'] == 'secondblock' and data['No'] == 3:
logcontent = 'Handle a commit secondblock:'
self.logger.info(logcontent)
for each in glovar.ComList:
if each[1] == self.cominfo[1]:
if len(each[3]['transactionlist']):
each[5].acquire()
temptransactions = []
for every in each[3]['transactionlist']:
if every not in data['content']['block'][7]:
temptransactions.append(every)
each[3]['transactionlist'] = temptransactions.copy()
each[5].release()
glovar.blockchainLock.acquire()
if len(glovar.BLOCKCHAIN):
if glovar.BLOCKCHAIN[len(glovar.BLOCKCHAIN)-1][1] != data['content']['block'][1]:
glovar.BLOCKCHAIN.append(data['content']['block'])
logcontent = 'Add a secondblock to the chain'
self.logger.info(logcontent)
else:
glovar.BLOCKCHAIN.append(data['content']['block'])
logcontent = 'Add a secondblock to the chain'
self.logger.info(logcontent)
glovar.blockchainLock.release()
else:
logcontent = 'Handle an unkown data:' + str(data)
self.logger.info(logcontent)
# Send the commit firstblock to last committee
def broadFirstCommitBlock(self):
for each in glovar.ComList:
if self.cominfo[1] == each[1]:
if len(each[3]['newblock']):
content = {'block':each[3]['newblock'][0],'incomno':each[0],\
'comid':each[1],'commitnum':each[3]['commit'],\
'comlist':each[3]['commitlist']}
else:
return
beforesend = {'type':'firstblock','No':3,'content':content}
temp = str(beforesend)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
senddata = {'messageid':hashvalue,'type':'firstblock','No':3,'content':content}
for each in glovar.ComList:
if each[0] == self.cominfo[0] and each[1] != self.cominfo[1]:
each[5].acquire()
each[4].put(senddata)
each[5].release()
if each[0] == glovar.Firstcomno + 1:
each[5].acquire()
each[4].put(senddata)
each[5].release()
glovar.messageLock.acquire()
glovar.MessageList.append(hashvalue)
glovar.messageLock.release()
broadMessage(senddata)
logcontent = 'Broad a commit firstblock:' + \
str(senddata['content']['block'][4])
self.logger.info(logcontent)
# Add the block to the chain
def addBlock(self, block):
glovar.firstchainLock.acquire()
if len(glovar.FIRSTBLOCKCHAIN):
if glovar.FIRSTBLOCKCHAIN[len(glovar.FIRSTBLOCKCHAIN)-1][4] != block[4]:
glovar.FIRSTBLOCKCHAIN.append(block)
# logcontent = 'Add a firstblock to the chain'
# self.logger.info(logcontent)
else:
glovar.FIRSTBLOCKCHAIN.append(block)
# logcontent = 'Add a firstblock to the chain'
# self.logger.info(logcontent)
glovar.firstchainLock.release()
# Change the status of committee member
for each in glovar.ComList:
if each[1] == self.cominfo[1]:
# each[5].acquire()
each[3]['genblock'] = 0
each[3]['blockhash'] = '0'
each[3]['commit'] = 0
each[3]['commitlist'].clear()
each[3]['newblock'].clear()
# each[5].release()
# self.logger.info('----------------------------------------------')
# logcontent = 'Choose a new leader to Generate a block'
# self.logger.info(logcontent)
incomno = self.cominfo[0]
comid = self.cominfo[1]
commember = self.cominfo[2]
# Select a leader to generate block
randomstring = "".join(str(self.cominfo[2])) + str(block[1]) + str(block[4])
idchoose = int(hashlib.sha256(randomstring.encode('utf-8')).hexdigest(), 16) % len(commember)
logcontent = 'The ' + str(idchoose+1) + ' member: ' + str(commember[idchoose]) + ' is chosen to generate a block'
self.logger.info(logcontent)
# Self is selected
transactions = []
if comid == commember[idchoose]:
# Add transactions into the new block
for each in glovar.ComList:
if comid == each[1]:
# If the PoS node has received transactions
if len(each[3]['transactionlist']):
for every in each[3]['transactionlist']:
transactions.append(every)
each[3]['transactionlist'].clear()
# Create a new transaction to add
else:
trans_input_no = 1
trans_input_item = ['abc']
trans_input = [trans_input_no, trans_input_item]
trans_output_no = 1
trans_output_output1 = ['efg', 5]
trans_output_item = [trans_output_output1]
trans_output = [trans_output_no, trans_output_item]
timestamp = time.time()
temptransaction = [trans_input, trans_output, timestamp]
temp = str(temptransaction)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
newtransaction = [hashvalue, trans_input, trans_output, timestamp]
transactions.append(newtransaction)
timestamp = time.time()
temp = str(incomno) + str(comid) + str(commember) + str(timestamp)+ str(transactions)
hashvalue = hashlib.sha256(temp.encode('utf-8')).hexdigest()
newblock = [incomno, comid, commember, timestamp, hashvalue, transactions]
json_block = json.dumps(newblock)
senddata = {'messageid':hashvalue,'type':'firstblock','No':1,'content':json_block}
glovar.messageLock.acquire()
glovar.MessageList.append(hashvalue)
glovar.messageLock.release()
broadMessage(senddata)
self.logger.info('---------------------------------')
logcontent = str(incomno) + ' :broadcast a block:' + str(hashvalue)
self.logger.info(logcontent)
# Change the corresponding global status
for each in glovar.ComList:
if comid == each[1]:
glovar.ComlistLock.acquire()
each[3]['genblock'] = 1
each[3]['blockhash'] = hashvalue
each[3]['newblock'].append(newblock)
glovar.ComlistLock.release()
# Check if there is another PoS in the same committee
for every in glovar.ComList:
if (every[0] == incomno and every[1] != comid):
# Send data to this PoS node process
each[5].acquire()
every[4].put(senddata)
each[5].release()
| louis0121/pos | blockgen.py | blockgen.py | py | 18,630 | python | en | code | 0 | github-code | 13 |
9156811140 | import barcode
from barcode.writer import ImageWriter
text="enter your text here"
text1=str(text)
code=barcode.get_barcode_class("code128")
image=code(text,writer=ImageWriter)
save_img=image.save('my image barcode')
| satkar2001/BarcodeGenerator | test.py | test.py | py | 219 | python | en | code | 1 | github-code | 13 |
71284048659 | from . import constants
import sys
from .charsetprober import CharSetProber
class CharSetGroupProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mActiveNum = 0
self._mProbers = []
self._mBestGuessProber = None
def reset(self):
CharSetProber.reset(self)
self._mActiveNum = 0
for prober in self._mProbers:
if prober:
prober.reset()
prober.active = True
self._mActiveNum += 1
self._mBestGuessProber = None
def get_charset_name(self):
if not self._mBestGuessProber:
self.get_confidence()
if not self._mBestGuessProber:
return None
# self._mBestGuessProber = self._mProbers[0]
return self._mBestGuessProber.get_charset_name()
def feed(self, aBuf):
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
continue
st = prober.feed(aBuf)
if not st:
continue
if st == constants.eFoundIt:
self._mBestGuessProber = prober
return self.get_state()
elif st == constants.eNotMe:
prober.active = False
self._mActiveNum -= 1
if self._mActiveNum <= 0:
self._mState = constants.eNotMe
return self.get_state()
return self.get_state()
def get_confidence(self):
st = self.get_state()
if st == constants.eFoundIt:
return 0.99
elif st == constants.eNotMe:
return 0.01
bestConf = 0.0
self._mBestGuessProber = None
for prober in self._mProbers:
if not prober:
continue
if not prober.active:
if constants._debug:
sys.stderr.write(prober.get_charset_name()
+ ' not active\n')
continue
cf = prober.get_confidence()
if constants._debug:
sys.stderr.write('%s confidence = %s\n' %
(prober.get_charset_name(), cf))
if bestConf < cf:
bestConf = cf
self._mBestGuessProber = prober
if not self._mBestGuessProber:
return 0.0
return bestConf
# else:
# self._mBestGuessProber = self._mProbers[0]
# return self._mBestGuessProber.get_confidence()
| arduino/Arduino | arduino-core/src/processing/app/i18n/python/requests/packages/charade/charsetgroupprober.py | charsetgroupprober.py | py | 2,606 | python | en | code | 13,827 | github-code | 13 |
10173133495 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 19 19:22:00 2017
@author: thinktic
"""
fd = open("datos_ficheros","r",encoding="UTF-8")
linea= fd.readline()
print("La longitud de la priemra linea es {}".format(len(linea.rstrip())))
#fd.close() Usamos seek(0) para reiniciar la suma y no hace falta cerrar fd
## Nuemro de lineas del fichero
#fd = open("datos_ficheros","r",encoding="UTF-8")
fd.seek(0)
lineas= fd.readlines()
x=0
for linea in lineas:
x=x+1
fd.close()
print("El numero de lineas {}".format(x))
#Numero la priema liena del fichero
fd = open("datos_ficheros","r",encoding="UTF-8")
linea= fd.readline()
x=0
for c in linea:
if c in ["1","2","3","4","5","6","7","8","9","0"]:
x=x+1
fd.close()
print("El numero de lineas {}".format(x))
#Suma de los numero en la priemra linea
with open("datos_ficheros","r",encoding="UTF-8") as fd:
linea= fd.readline()
x=0
for num in linea.split():
x= x+ int(num)
#fd.close() con el with no es necesario
print("La suma de los numeros es {}".format(x))
with open("resultado_eje1.txt","w",encoding="UTF-8") as fd:
fd.write(str(x))
#fd.close()
| jmchema/CursoPython | Dia3/Ejercicios/Ejercicio1.py | Ejercicio1.py | py | 1,146 | python | en | code | 0 | github-code | 13 |
71348361618 | """
Created on Thu Aug 20 17:44:07 2020
@author: nrdas
"""
import sounddevice as sd
from pyAudioAnalysis import audioSegmentation as ag
from pyAudioAnalysis import audioBasicIO as aIO
from scipy.io.wavfile import write
duration = 15
fs = 44100
print(sd.query_devices())
print('recording now!')
sample = sd.rec(int(duration*fs), samplerate=fs, channels=2)
sd.wait()
print('recording done!')
print(type(sample))
write('output.wav', fs, sample)
# This gets event segments correctly but it is very sensitive
[Fs, x] = aIO.read_audio_file("output.wav")
segments = ag.silence_removal(x, Fs, 0.020, 0.020, smooth_window=1.0, weight=0.3)
print(segments)
| Dashora7/VoiceID | processing.py | processing.py | py | 652 | python | en | code | 0 | github-code | 13 |
71528340499 | #!/usr/bin/env python3
# Run with:
# ./week2_dotplots.py ./sorted_SRVelvet_lastz.out
# ./week2_dotplots.py ./sorted_BCVelvet_lastz.out
# ./week2_dotplots.py ./sorted_SRSpades_lastz.out
# ./week2_dotplots.py ./sorted_BCSpades_lastz.out
# ./week2_dotplots.py ./sorted_LRSpades_lastz.out
"""
Usage: dotplot.py ./sorted_SRVelvet_lastz.out
"""
import sys
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize = (8, 6))
lastXEnd = 0
contigDict = {}
referenceDict = {}
filename = open(sys.argv[1])
for i, line in enumerate(filename):
fields = line.rstrip("\r\n").split("\t")
if fields[-3] in contigDict:
contigDict[(fields[-3])].append((int(fields[-2]), int(fields[-1])))
referenceDict[(fields[-3])].append([int(fields[1]), int(fields[2]), fields[0]])
else:
contigDict[(fields[-3])] = [(int(fields[-2]), int(fields[-1]))]
referenceDict[(fields[-3])] = [(int(fields[1]), int(fields[2]), fields[0])]
for key in contigDict:
x = []
y = []
for valueC in contigDict[key]:
x1, x2 = valueC
x1 += lastXEnd
x2 += lastXEnd
x.append(x1)
x.append(x2)
for valueR in referenceDict[key]:
y1, y2, strand = valueR
if strand == "+":
y.append(y1)
y.append(y2)
else:
y.append(y2)
y.append(y1)
for i in range(0, len(x), 2):
plt.plot(x[i:i+2], y[i:i+2])
fig.suptitle("Contigs Aligned to Reference",fontsize = 14)
ax.set_xlabel("Contigs", fontsize = 12)
ax.set_ylabel("Position in Reference", fontsize = 12)
#fig.savefig("week2_SRVelvet_dotplot.png")
#fig.savefig("week2_BCVelvet_dotplot.png")
#fig.savefig("week2_SRSpades_dotplot.png")
#fig.savefig("week2_BCSpades_dotplot.png")
fig.savefig("week2_LRSpades_dotplot.png")
plt.close(fig) | JSYamamoto/qbb2018-answers | lab2/dotplots.py | dotplots.py | py | 1,827 | python | en | code | 0 | github-code | 13 |
29218780221 | from typing import List
from pyteal.ir import TealBlock
from pyteal.errors import TealInternalError
def sortBlocks(start: TealBlock, end: TealBlock) -> List[TealBlock]:
"""Topologically sort the graph which starts with the input TealBlock.
Args:
start: The starting point of the graph to sort.
Returns:
An ordered list of TealBlocks that is sorted such that every block is guaranteed to appear
in the list before all of its outgoing blocks.
"""
S = [start]
order = []
visited = set() # I changed visited to a set to be more efficient
while len(S) != 0:
n = S.pop()
if id(n) in visited:
continue
S += n.getOutgoing()
order.append(n)
visited.add(id(n))
endIndex = -1
for i, block in enumerate(order):
if block is end:
endIndex = i
break
if endIndex == -1:
raise TealInternalError("End block not present")
order.pop(endIndex)
order.append(end)
return order
| algorand/pyteal | pyteal/compiler/sort.py | sort.py | py | 1,039 | python | en | code | 269 | github-code | 13 |
7954870719 | """Created: Friday July 20, 2018
Modified: Thursday August 2, 2018
Jorge Luis Flores
Calculates the similarity between each mRNA and a sequence from a given file containing the calculated vectors of mRNA.
Stores in .csv files a matrix where each row corresponds to an mRNA, and each column corresponds to the Jaccard index of the
79-nucleotide window starting at that nucleotide position (where mRNA is zero-indexed, and each column skips 4 nucleotides).
Similarity is measured by calculating p-values for each motif, assuming a Poisson distribution. These p-values are then used
to determine whether each motif is 'important' or not, using a predefined a cut-off (p-val < 0.05 in this case).
Each 'important' motif is scored as '1', and then the similarity between each window and the signature of aptamer-21 is
calculated as a Jaccard index."""
import sys
sys.path.append('/u/floresj/Pyth_modules/')
import multiprocessing
import subprocess
from datetime import datetime as dt
from Bio.SeqIO import parse
import motifs_vector_3 as mv
import numpy as np
import pandas as pd
from scipy.stats import poisson
ALL_MOTIFS = mv.list_of_all_motifs()
WIN_SIZE = 79
NT_SKIP = 4
#FILE_NUMBER = '02'
if __name__ == '__main__':
# specify file number to process
#FILE_NUMBER = input('01, 02, 03, ... 13\nWhich file subset to process...')
FILE_NUMBER = sys.argv[1]
print( 'Script is processing subset: '+FILE_NUMBER )
def same_length(vec_1, vec_2):
"""Checks that both vectors are of the same length and thus comparable."""
# signatures must be of same length, i.e. 940 entries
if not len(vec_1) == len(vec_2):
raise TypeError(f'The vector and the signature are not of the same length. Vector is {len(mrna_vector)} and signature is {len(target_sig)}')
def get_jaccard_index_pval(mrna_vector, target_sig):
"""Calculates the Jaccard index of mrna_vector with respect to target_sig ().
The Jaccard index is defined as J(x, y) = sum(min(x_i, y_i)) / sum(max(x_i, y_i)).
https://en.wikipedia.org/wiki/Jaccard_index"""
# signatures must be of same length, i.e. 940 entries
same_length( mrna_vector, target_sig )
dividend = sum( [min(x, y) for x, y in zip(mrna_vector, target_sig)] )
divisor = sum( [max(x, y) for x, y in zip(mrna_vector, target_sig)] )
return dividend / divisor
def get_simple_similarity(rna_vector, target_sig):
same_length( rna_vector, target_sig )
# signatures must both contain either 0 or 1
for i in range(len(rna_vector)):
if not ( (rna_vector[i] == 0) or (rna_vector[i] == 1) ):
raise ValueError('The vector contains values other than 0 or 1.')
if not ( (target_sig[i] == 0) or (target_sig[i] == 1) ):
raise ValueError('The signature contains values other than 0 or 1.')
# check every binary attribute in both vectors
M_11 = 0 # number of attributes where both M and T have value of 1
M_01 = 0 # number of attributes where M is 0 and T is 1
M_10 = 0 # number of attributes where M is 1 and T is 0
for i in range(len(rna_vector)):
if ( rna_vector[i] == target_sig[i] == 1 ):
M_11 += 1
#elif ( rna_vector[i] == 0 ) and ( target_sig[i] == 1 ):
# M_01 += 1
#elif ( rna_vector[i] == 1 ) and ( target_sig[i] == 0 ):
# M_10 += 1
return M_11
def get_jaccard_index(mrna_vector, target_sig):
"""Calculates the Jaccard index of mrna_vector (M) with respect to target_sig (T) (binary values based on p-values).
Each signature is treated as a set containing a binary value for a feature.
The Jaccard index is thus defined as J(M, T) = | M n T | / | M u T |"""
# signatures must be of same length, i.e. 940 entries
same_length( mrna_vector, target_sig )
# signatures must both contain either 0 or 1
for i in range(len(mrna_vector)):
if not ( (mrna_vector[i] == 0) or (mrna_vector[i] == 1) ):
raise ValueError('The vector contains values other than 0 or 1.')
if not ( (target_sig[i] == 0) or (target_sig[i] == 1) ):
raise ValueError('The signature contains values other than 0 or 1.')
# check every binary attribute in both vectors
M_11 = 0 # number of attributes where both M and T have value of 1
M_01 = 0 # number of attributes where M is 0 and T is 1
M_10 = 0 # number of attributes where M is 1 and T is 0
for i in range(len(mrna_vector)):
if ( mrna_vector[i] == target_sig[i] == 1 ):
M_11 += 1
elif ( mrna_vector[i] == 0 ) and ( target_sig[i] == 1 ):
M_01 += 1
elif ( mrna_vector[i] == 1 ) and ( target_sig[i] == 0 ):
M_10 += 1
return ( M_11 / (M_01+M_10+M_11) )
def get_jaccard_distance(mrna_vector, target_sig):
jacc_index = get_jaccard_index(mrna_vector, target_sig)
return 1 - jacc_index
def get_binary_features(vector, pval=.05):
"""Takes as input a vector containing a collection of p-values and transforms it in a vector of binary values.
In this vector, 0 means that p-value(feature) > cut-off,
and 1 means that p-value(feature) < cut-off."""
features = np.zeros(940, dtype=int)
## a p-value = 0, in the context of Mathieu's script for aptamer-21, means that it is extremely small
# check every feature, score as 1 if the p-value is below threshold
# otherwise feature remains at 0
for i, feat_value in enumerate(vector):
#if (feat_value < pval)and(feat_value != 0):
if (feat_value < pval):
features[i] = 1
return features
def get_high_binary_features(rna_pval, rna_sides, pval_cutoff=.05):
"""Takes as input two vectors, one of p-values and one of sides, and transforms them in a vector of binary values.
In this vector, 0 means that p-value(feature) > cut-off,
and 1 means that p-value(feature) < cut-off and side == 'high'.
This means that the feature is overrepresented in the motif, above a certain threshold."""
features = np.zeros( len(rna_pval), dtype=np.uint8 )
# check all high features, score as 1 if the p-value is below threshold
# otherwise feature remains at 0
for i in range( len(rna_pval) ):
if (rna_sides[i]=='high')&(rna_pval[i] < pval_cutoff):
features[i] = 1
return features
def get_pvalues(mrna_df):
"""Takes the dataframe of a single mRNA, where the rows are the vector containing the frequency of each motif per nucleotide
(with the index as the nucleotide-position), and the columns are the indices of each motif.
Returns a list of vectors containing the p-values for each motif per nucleotide position."""
pval_list = [] # contains the list of signatures as p-values
side_list = [] # contains the list of sides
# get the p-value of each row while keeping the index of the mRNA
for nt_position, win_vector in mrna_df.iterrows():
nb_dotbs = win_vector[0]
cur_poisson, cur_sides = mv.normalize_poisson(win_vector[1:], nb_dotbs, WIN_SIZE)
pval_list.append( cur_poisson )
side_list.append( cur_sides )
return pval_list, side_list
def calculate_similarity(mrna_df, target_sign, target_sides, pval_cutoff=.05, rna_id=''):
"""Takes as input the dataframe of a single mRNA, calculates its pvalues, and returns a vector of distances/similarity
to the passed signature, with the indices*NT_SKIP representing the nucleotide position.
mrna_df = dataframe of calculated signature (columns) for each nucleotide position (indices). First column is nb_dotbs
target_signature = p-values of the signature that each window is being compared against
pval_cutoff = p-value to use to decide whether a feature is "important" or not"""
# get p-values and translate to 1/0 feature-scoring system
rna_pval, rna_sides = get_pvalues(mrna_df)
feature_values = []
for i in range( len(rna_pval) ):
#feature_values.append( get_high_binary_features( rna_pval[i], rna_sides[i], pval_cutoff ) )
# use only first 100 motifs
feature_values.append( get_high_binary_features( rna_pval[i][:100], rna_sides[i][:100], pval_cutoff ) )
#feature_values = [ get_high_binary_features(win_pval, win_sides, pval_cutoff) for win_pval, win_sides in mrna_pval_and_sides ]
# translate target signature to 1/0 feature-scoring system as well
#target_features = get_high_binary_features(target_sign, target_sides, pval_cutoff)
# use only first 100 motifs
target_features = get_high_binary_features(target_sign[:100], target_sides[:100], pval_cutoff)
jacc_values = [ get_jaccard_index(win_vector, target_features) for win_vector in feature_values ]
if rna_id:
return (jacc_values, mrna_df.index.values, rna_id)
else:
return (jacc_values, mrna_df.index.values)
if __name__ == '__main__':
class TimeCounter:
'''Object used for multithreading'''
def __init__(self, target):
self.target = target # how many TimeCounters must be used
self.count = 0 # how many TimeCounters have been used
# in this case, this corresponds to the number of transcripts processed
self.similarity_dict = {} # dictionary stores distance per nucleotide, with rna_id as indices
def print_progress(self, met_result):
jacc_val, nt_indices, rna_id = met_result
# add resulting similarity vector to similarity dictionary
jacc_val_df = pd.DataFrame(jacc_val, index=nt_indices).transpose()
self.similarity_dict[ rna_id ] = jacc_val_df
self.count += 1 # keeps track of number of RNAs processed
# prints current status
if self.count % 100 == 0 or self.count == self.target:
print(f'{dt.now() - start_time}\t{self.count}')
## keep track of execution time
#with open(f'/u/floresj/Transcriptome_scanning_apta21/Distances/distance_measuring_{FILE_NUMBER}', 'a') as fp:
# fp.write(f'{dt.now() - start_time}\t{self.count}\n')
# logs into file to keep track
if self.count % 500 == 0 or self.count == self.target or self.count == 1:
with open(f'/u/floresj/Scripts_log/log_distance_file{FILE_NUMBER}.txt', 'w') as fp:
fp.write(f'Subset\t{FILE_NUMBER}\n')
fp.write(f'Processed {self.count} out of {self.target}\n')
# retrieve the DF of motifs from file
print('Loading file...')
df = pd.read_parquet(f'/u/floresj/mRNA_norm/mRNA_vectors/mrna_folded_int_subset{FILE_NUMBER}')
print('File has loaded.')
df_indices = sorted( list( {rna_id for rna_id, _ in df.index.values} ) )
# calculate the p-values for aptamer-21
seq_list = [seq for seq in parse('/u/floresj/Transcriptome_scanning_apta21/aptamer_21.fa', format='fasta')]
apt_21 = seq_list[0]
dotbs, shapes = mv.dotbs_and_shapes(apt_21.seq)
a21tmp_signvec, nb_dotbs, _, _ = mv.shape60_ncm40_ncmexp500_expseq340_nodiv(apt_21.seq, dotbs, shapes)
apt21_signvec, sides = mv.normalize_poisson(a21tmp_signvec, nb_dotbs, len(apt_21.seq))
## keep track of execution time
print('Version: Thursday July 26, 2018')
print('Time\t\tProcessed')
start_time = dt.now()
# multiprocessing setup
multiprocessing.set_start_method('spawn')
compteur = TimeCounter(len(df_indices))
pool = multiprocessing.Pool(20)
# process all transcripts
for ind in df_indices:
pool.apply_async(calculate_similarity, (df.loc[ind], apt21_signvec, .05, ind), callback=compteur.print_progress)
df.drop(ind, inplace=True)
# print(ind)
# compteur.print_progress( calculate_similarity(df.loc[ind], apt21_signvec, .05, ind) )
pool.close()
pool.join()
# save all similarity vectors in a file
# tranposed because parquet must have string column names
similarities = pd.concat(compteur.similarity_dict)
#similarities.to_parquet('/u/floresj/Transcriptome_scanning_apta21/Distances/similarity_measuring_example.pq')
similarities.to_csv(f'/u/floresj/Transcriptome_scanning_apta21/Distances/similarities_subset{FILE_NUMBER}.csv') | jl-flores/udem-2018-bioinfo | distance-calc/Distance_measuring.py | Distance_measuring.py | py | 12,672 | python | en | code | 0 | github-code | 13 |
35360566510 | import os
import pytest
from typing import Dict
from pathlib import Path
import yaml
import logging
from unittest import mock
# Import dvc
from dvc.core.config import ConfigReader, ConfigDefault
from dvc.core.database import SupportedDatabaseFlavour
import logging
@pytest.fixture(params=(logging.DEBUG, logging.WARNING, logging.CRITICAL, logging.ERROR))
def dummy_user_configuration_with_supported_db_flavour(request) -> Dict:
"""
Fixture of User Configruration
"""
DUMMY_USER_CONFIG: Dict = {
"logging_level": request.param,
"target_schema": 'dvc',
"database_revision_sql_files_folder": "sample_revision_sql_files",
"credentials": {
"user": "peter_parker",
"password": "1234",
"host": "localhost",
"port": 5432,
"dbname": "superman_db",
"dbflavour": SupportedDatabaseFlavour.Postgres.value
}
}
return DUMMY_USER_CONFIG
@pytest.fixture()
def dummy_user_configuration_with_unsupported_db_flavour() -> Dict:
"""
Fixture of User Configruration
"""
DUMMY_USER_CONFIG_FILE: Dict = {
"logging_level": logging.INFO,
"target_schema": 'dvc',
"database_revision_sql_files_folder": "sample_revision_sql_files",
"credentials": {
"user": "peter_parker",
"password": "1234",
"host": "localhost",
"port": 5432,
"dbname": "superman_db",
"dbflavour": "UNSUPPORTED DATABASE FLAVOUR!!!!!"
}
}
return DUMMY_USER_CONFIG_FILE
@pytest.fixture
def dummy_existing_config_file_path(
tmp_path,
dummy_user_configuration_with_supported_db_flavour):
"""
Set up the dummy config file
Yield the dummy config file path
Delete the dummy config file afterwards
"""
dummy_existing_config_file_path = tmp_path.joinpath('dummy_existing_config_file_path.yaml')
# Set up
with open(dummy_existing_config_file_path, 'w') as dummy_config_file:
logging.info(f"creating file {dummy_existing_config_file_path}")
yaml.dump(dummy_user_configuration_with_supported_db_flavour, dummy_config_file, default_flow_style=False)
yield dummy_existing_config_file_path
# Tear down
logging.info(f"deleting file {dummy_existing_config_file_path}")
dummy_existing_config_file_path.unlink()
@pytest.fixture
def dummy_absent_config_file_path(
tmp_path,
dummy_user_configuration_with_supported_db_flavour,
monkeypatch
):
"""
Return path to a non-existing config file
"""
dummy_absent_config_file_path = tmp_path.joinpath('dummy_absent_config_file_path.yaml')
# Set up:
# Step 1: Remove the file if it exists
if dummy_absent_config_file_path.is_file():
logging.info(f"File {dummy_absent_config_file_path} is found. Deleting...")
dummy_absent_config_file_path.unlink()
yield dummy_absent_config_file_path
# Tear down:
# Step 1: Remove the file if it exists
if dummy_absent_config_file_path.is_file():
logging.info(f"File {dummy_absent_config_file_path} is found. Deleting...")
dummy_absent_config_file_path.unlink()
@pytest.fixture
def dummy_absent_config_file_path_with_env_var(
dummy_absent_config_file_path,
dummy_user_configuration_with_supported_db_flavour,
monkeypatch
):
"""
Return path to a non-existing config file.
Set environment variables additionally
"""
# Set environment variables
monkeypatch.setenv(ConfigDefault.KEY__DATABASE_REVISION_SQL_FILES_FOLDER, dummy_user_configuration_with_supported_db_flavour['database_revision_sql_files_folder'])
monkeypatch.setenv(ConfigDefault.KEY__TARGET_SCHEMA, dummy_user_configuration_with_supported_db_flavour['target_schema'])
monkeypatch.setenv(ConfigDefault.KEY__USER, dummy_user_configuration_with_supported_db_flavour['credentials']['user'])
monkeypatch.setenv(ConfigDefault.KEY__PASSWORD, dummy_user_configuration_with_supported_db_flavour['credentials']['password'])
monkeypatch.setenv(ConfigDefault.KEY__HOST, dummy_user_configuration_with_supported_db_flavour['credentials']['host'])
monkeypatch.setenv(ConfigDefault.KEY__PORT, str(dummy_user_configuration_with_supported_db_flavour['credentials']['port']))
monkeypatch.setenv(ConfigDefault.KEY__DBNAME, dummy_user_configuration_with_supported_db_flavour['credentials']['dbname'])
monkeypatch.setenv(ConfigDefault.KEY__DBFLAVOUR, dummy_user_configuration_with_supported_db_flavour['credentials']['dbflavour'])
monkeypatch.setenv(ConfigDefault.KEY__LOGGING_LEVEL, str(dummy_user_configuration_with_supported_db_flavour['logging_level']))
yield dummy_absent_config_file_path
@pytest.fixture()
def dummy_config_file_reader_with_supported_db_flavour(
dummy_user_configuration_with_supported_db_flavour
):
"""
Yield dummy config file reader with user_config property patched
"""
with mock.patch('dvc.core.config.ConfigReader') as mock_cls:
mock_config_reader = mock_cls.return_value
mock_config_reader.user_config = dummy_user_configuration_with_supported_db_flavour
mock_config_reader.requested_db_flavour = dummy_user_configuration_with_supported_db_flavour['credentials'][
'dbflavour']
yield mock_config_reader
@pytest.fixture()
def dummy_config_file_reader_with_unsupported_db_flavour(
dummy_user_configuration_with_unsupported_db_flavour
):
"""
Yield dummy config file reader with user_config property patched
"""
with mock.patch('dvc.core.config.ConfigReader') as mock_cls:
mock_config_reader = mock_cls.return_value
mock_config_reader.user_config = dummy_user_configuration_with_unsupported_db_flavour
mock_config_reader.requested_db_flavour = dummy_user_configuration_with_unsupported_db_flavour['credentials'][
'dbflavour']
yield mock_config_reader
| kenho811/Python_Database_Version_Control | tests/fixtures/config_service.py | config_service.py | py | 6,001 | python | en | code | 2 | github-code | 13 |
26302021202 | import json
from datetime import datetime
from django.db import models
from django.utils.translation import gettext_lazy as _
from IOTdevices.actions import *
from server.settings import PROJECT_ID, subscriber, publisher
class OperationMode(models.TextChoices):
NORMAL = 'NL', _("Normal")
OVERRIDE = 'OR', _("OverRide")
class SignalState(models.TextChoices):
RED = 'RD', _("Red")
GREEN = 'GR', _("Green")
def controlListDefault():
return [[]]
class TrafficSignal(models.Model):
location = models.CharField(max_length=100)
lat = models.DecimalField(max_digits=9, decimal_places=6, default=0)
lng = models.DecimalField(max_digits=9, decimal_places=6, default=0)
controlList = models.JSONField(default=controlListDefault)
timer = models.DateTimeField(default=datetime.now(), editable=True)
def __str__(self):
return f"#{self.id} - {self.location} - ({self.lat},{self.lng}) - {self.timer}"
def getTopicID(self):
return "STS" + str(self.id).zfill(5)
def getSubscriptionID(self):
return "STM" + str(self.id).zfill(5)
def createTopic(self):
TOPIC_ID = self.getTopicID()
topic_path = publisher.topic_path(PROJECT_ID, TOPIC_ID)
try:
topic = publisher.create_topic(request={"name": topic_path})
print(f"created topic {TOPIC_ID}")
except Exception as err:
print(err)
def deleteTopic(self):
topic_path = publisher.topic_path(PROJECT_ID, self.getTopicID())
try:
publisher.delete_topic(request={"topic": topic_path})
print(f"deleted topic {self.getTopicID()}")
except Exception as err:
print(err)
def createSubscription(self):
SUBSCRIPTION_ID = self.getSubscriptionID()
TOPIC_ID = self.getTopicID()
subscription_path = subscriber.subscription_path(PROJECT_ID, SUBSCRIPTION_ID)
topic_path = publisher.topic_path(PROJECT_ID, TOPIC_ID)
try:
subscription = subscriber.create_subscription(
request={"name": subscription_path, "topic": topic_path}
)
print(f"created subscription {SUBSCRIPTION_ID}")
except Exception as err:
print(err)
def deleteSubscription(self):
subscription_path = subscriber.subscription_path(PROJECT_ID, self.getSubscriptionID())
try:
subscriber.delete_subscription(request={"subscription": subscription_path})
print(f"deleted subscription {self.getSubscriptionID()}")
except Exception as err:
print(err)
class TrafficLight(models.Model):
signal = models.ForeignKey(TrafficSignal, on_delete=models.CASCADE)
direction = models.IntegerField()
heartbeat = models.DateTimeField(default=datetime.now())
operationMode = models.CharField(max_length=2, choices=OperationMode.choices, default=OperationMode.NORMAL)
signalState = models.CharField(max_length=2, choices=SignalState.choices, default=SignalState.RED)
def __str__(self):
return f"#{self.id} - {self.direction} - {self.signal}"
def getSubscriptionID(self):
return "STL" + str(self.id).zfill(5)
def createSubscription(self):
SUBSCRIPTION_ID = self.getSubscriptionID()
TOPIC_ID = self.signal.getTopicID()
subscription_path = subscriber.subscription_path(PROJECT_ID, SUBSCRIPTION_ID)
topic_path = publisher.topic_path(PROJECT_ID, TOPIC_ID)
try:
subscription = subscriber.create_subscription(
request={"name": subscription_path, "topic": topic_path}
)
print(f"created subscription {SUBSCRIPTION_ID}")
except Exception as err:
print(err)
def switch_to_normal_ride(self):
topic_path = publisher.topic_path(PROJECT_ID, self.signal.getTopicID())
bundle = {
ACTION_TYPE: NORMAL_RIDE,
RECIPIENT: self.getSubscriptionID(),
PAYLOAD: {}
}
bundle_json = json.dumps(bundle).encode("utf-8")
publisher.publish(topic_path, bundle_json)
def over_ride_to(self, over_ride_color):
topic_path = publisher.topic_path(PROJECT_ID, self.signal.getTopicID())
bundle = {
ACTION_TYPE: OVER_RIDE,
RECIPIENT: self.getSubscriptionID(),
PAYLOAD: {
OPERATION_COLOR: over_ride_color
}
}
bundle_json = json.dumps(bundle).encode("utf-8")
publisher.publish(topic_path, bundle_json)
def deleteSubscription(self):
subscription_path = subscriber.subscription_path(PROJECT_ID, self.getSubscriptionID())
try:
subscriber.delete_subscription(request={"subscription": subscription_path})
print(f"deleted subscription {self.getSubscriptionID()}")
except Exception as err:
print(err)
class Hospital(models.Model):
location = models.CharField(max_length=100)
lat = models.DecimalField(max_digits=9, decimal_places=6)
lng = models.DecimalField(max_digits=9, decimal_places=6)
def __str__(self):
return f"#{self.id} - {self.location} - ({self.lat},{self.lng})"
class Route(models.Model):
route_info = models.JSONField()
def __str__(self):
return f"#{self.id} - {self.route_info}"
| naveennvrgup/smart-traffic-light | maps/models.py | models.py | py | 5,371 | python | en | code | 0 | github-code | 13 |
36038750585 | import math
def is_prime_num(num):
for i in range(2, math.floor(math.sqrt(num)) + 1):
if (num % i == 0 and num != i):
return False
return num > 1
def problem3(num):
largest = num
for i in range(2, math.floor(math.sqrt(num)) + 1):
if (is_prime_num(i) and num % i == 0):
largest = i
return largest
if __name__ == "__main__":
print (problem3(600851475143)) | karthigb/recreational | challenge/projectEuler/problem3.py | problem3.py | py | 420 | python | en | code | 0 | github-code | 13 |
17248105286 | """flashio URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.conf import settings
from django.contrib import admin
from django.urls import include, path
from django.views import defaults as default_views
from django.conf.urls.static import static
from django.views import generic
from flashcards.views import (
form,
form_text,
UserDecksListView,
DeckDetailView,
DeckDeleteView,
CsvDownloadView,
XlsxDownloadView,
ApkgDownloadView,
DecksListView
)
from users.views import (
SuccessView,
PricingView,
CreateCheckoutSessionView,
stripe_webhook,
cancel_subscription
)
urlpatterns = [
path("", generic.TemplateView.as_view(template_name="pages/home.html"), name="home"),
path('admin/', admin.site.urls),
path("form/", form, name="form"),
path("form-text/", form_text, name="form-text"),
path("decks/", UserDecksListView.as_view(), name="user-decks"),
path("decks/all/", DecksListView.as_view(), name="all-decks"),
path('decks/<int:pk>/', DeckDetailView.as_view(), name='deck-detail'),
path('decks/delete/<int:pk>', DeckDeleteView.as_view(), name='deck-delete'),
path("decks/download-csv/<int:pk>", CsvDownloadView.as_view(), name="csv-download"),
path("decks/download-xlsx/<int:pk>", XlsxDownloadView.as_view(), name="xlsx-download"),
path("decks/download-apkg/<int:pk>", ApkgDownloadView.as_view(), name="apkg-download"),
path("success/", SuccessView.as_view(), name="success"),
path("pricing/", PricingView.as_view(), name="pricing"),
path("checkout/<int:price>/", CreateCheckoutSessionView.as_view(), name="checkout"),
path("stripe/webhook/", stripe_webhook, name="stripe-webhook"),
path("stripe/cancel/", cancel_subscription, name="cancel-sub"),
# Django Admin, use {% url 'admin:index' %}
# User management
path("users/", include("users.urls", namespace="users")),
path("accounts/", include("allauth.urls")),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# This allows the error pages to be debugged during development, just visit
# these url in browser to see how these error pages look like.
urlpatterns += [
path(
"400/",
default_views.bad_request,
kwargs={"exception": Exception("Bad Request!")},
),
path(
"403/",
default_views.permission_denied,
kwargs={"exception": Exception("Permission Denied")},
),
path(
"404/",
default_views.page_not_found,
kwargs={"exception": Exception("Page not Found")},
),
path("500/", default_views.server_error),
]
| patraz/cardsio | flashio/urls.py | urls.py | py | 3,379 | python | en | code | 0 | github-code | 13 |
1652921645 | #要实现还是很简单...然后我就超时了
#关键应该在那个对1337模运算上
class Solution(object):
def superPow(self, a, b):
"""
:type a: int
:type b: List[int]
:rtype: int
"""
B=0
l=len(b)
for i in range(l):
B+=b[i]*10**(l-i-1)
return a**B%1337
#或者这个更简洁:
sb=''
for x in b:
sb+=str(x)
B=int(sb)
return a**B%1337
#AC..一行代码..便于学习 我改了下
#其实原理差不多,只是用到了reduce,lambda,学习下
class Solution(object):
def superPow(self, a, b):
"""
:type a: int
:type b: List[int]
:rtype: int
"""
#return 0 if a % 1337 == 0 else pow(a, reduce(lambda x, y: (x * 10 + y) % 1140, b) + 1140, 1337)
if a % 1337 == 0:
return 0
else:
return pow(a, reduce(lambda x, y: (x * 10 + y) % 1040, b) + 1040, 1337)
#reduce()对list的每个元素反复调用函数f,并返回最终结果值。这里是对b中每个元素反复调用lambda
#先计算b中头两个数,然后把结果和第三个数运算,再以此下去
#lambda函数,参数为x,y 返回值为(x * 10 + y) % 1140
#pow中有三个参数时,pow(x,y,z):表示x的y次幂后除以z的余数
###问题来了,为啥要对1140取余然后加1140呢,测试了下不加1140是85ms,加了82ms,就是说提速了3ms...
#目的应该是把最后的求余分散到之前简化运算量,同时减少空间复杂度。
#而至于为什么选1140...我改成1240、1040居然都报错了
#好吧,继续去看了解释:
'''
1337 only has two divisors 7 and 191 exclusive 1 and itself, so judge if a has a divisor of 7 or 191, and note that 7 and 191 are prime numbers, phi of them is itself - 1, then we can use the Euler's theorem, see it on wiki https://en.wikipedia.org/wiki/Euler's_theorem, it's just Fermat's little theorem if the mod n is prime.
see how 1140 is calculated out:
phi(1337) = phi(7) * phi(191) = 6 * 190 = 1140
'''
#好像用到了欧拉定理...不管了,反正去掉1140也可以,先掌握能掌握的吧
#熟悉reduce用法、lambda用法,及pow()三个参数的用法
| fire717/Algorithms | LeetCode/python/_372.SuperPow.py | _372.SuperPow.py | py | 2,458 | python | zh | code | 6 | github-code | 13 |
21498375229 | def filter_with_new_bit(array_to_filter, current_bit_index, most_common=True):
nr_ones= sum([int(x[current_bit_index]) for x in array_to_filter]) #Count number of ones at a certain index in array
if nr_ones>=len(array_to_filter)-nr_ones: #1 is more common
result_bit= "1" if most_common else "0"
else: #0 is more common
result_bit = "0" if most_common else "1"
return list(filter(lambda x:x[current_bit_index]==result_bit,array_to_filter))
if __name__ == "__main__":
with open("input_day_3","r") as fp:
oxygen_gen_most = [line.strip() for line in fp.readlines()]
scrubber_rate_least = oxygen_gen_most.copy()
oxygen_val=-1
scrubber_val=-1
for i in range(len(oxygen_gen_most[0])):
if oxygen_val==-1:
oxygen_gen_most=filter_with_new_bit(oxygen_gen_most,i,most_common=True)
if len(oxygen_gen_most)==1:
oxygen_val=oxygen_gen_most[0]
if scrubber_val==-1:
scrubber_rate_least=filter_with_new_bit(scrubber_rate_least,i, most_common=False)
if len(scrubber_rate_least)==1:
scrubber_val=scrubber_rate_least[0]
print("The result is:")
print(int(oxygen_val,2)*int(scrubber_val,2)) | shaefeli/AdventOfCode2021 | day03/day3_py_part2.py | day3_py_part2.py | py | 1,270 | python | en | code | 0 | github-code | 13 |
43231756874 | import numpy as np
import pygame as pg
from modules.swarm_path_search import SwarmPathSearch
pg.font.init()
def render(srf: pg.Surface, points: np.ndarray, rsa: SwarmPathSearch):
pid_font = pg.font.SysFont("Ubuntu", 12, True)
maxw = np.max(rsa.weights)
for i in range(points.shape[0]):
for j in range(i):
t = rsa.weights[i][j]/maxw
if t > 0.1:
pg.draw.line(srf, 'white', points[i], points[j], 2)
for i, p in enumerate(points):
pg.draw.circle(srf, 'red', p, 12)
pg.draw.circle(srf, 'black', p, 10)
id_text = pid_font.render(
f"{rsa.vertex_priority[i]:.0f}", True, 'yellow', 'black'
)
srf.blit(id_text, p - np.array(id_text.get_rect().size) / 2)
| VY354/my_repository | Python/projects/swarm_intelligence/swarm_path_search/src/modules/swarm_path_search_visualizer.py | swarm_path_search_visualizer.py | py | 769 | python | en | code | 0 | github-code | 13 |
36021905808 | from Letter_frequency_a_and_c import polish_tables_to_import, english_tables_to_import, german_tables_to_import
from math import fabs
def getting_frequencies(table):
sum_of_letters = 0
for value in table.values():
sum_of_letters += value
for k, v in table.items():
if sum_of_letters != 0:
table[k] = v / sum_of_letters
return table
def creating_relative_letter_frequencies2(text):
text = text.lower()
text.strip()
alphabet = 'abcdefghijklmnopqrstuvwxyz'
vowels = 'aeiouy'
tails = {'ą': 'a', 'ć': 'c', 'ę': 'e', 'ł': 'l', 'ó': 'o', 'ś': 's', 'ź': 'z', 'ż': 'z', 'ä': 'a', 'ö': 'o',
'ß': 's', 'ü': 'u'}
relative_vowel_frequency = {}
relative_consonant_frequency = {}
for letter_index in range(len(text)):
for key, value in tails.items():
if text[letter_index] == key:
text = text[:letter_index] + value + text[letter_index + 1:]
for let in alphabet:
if let in vowels:
relative_vowel_frequency[let] = 0
else:
relative_consonant_frequency[let] = 0
for letter in text:
if letter in relative_vowel_frequency:
relative_vowel_frequency[letter] += 1
elif letter in relative_consonant_frequency:
relative_consonant_frequency[letter] += 1
else:
continue
getting_frequencies(relative_vowel_frequency)
getting_frequencies(relative_consonant_frequency)
tables = [relative_vowel_frequency, relative_consonant_frequency]
return tables
def specified_comparison(tables):
languages_names = ['polish', 'english', 'german']
languages = [polish_tables_to_import, english_tables_to_import, german_tables_to_import]
language_index_v = 0
language_index_c = 0
vowels_results = {}
consonants_results = {}
vowels_values = []
consonants_values = []
for language in languages:
distance_v = 0
distance_c = 0
for key in (tables[0]).keys():
distance_v += fabs(tables[0][key] - language[0][key])
vowels_results[languages_names[language_index_v]] = distance_v
vowels_values.append(distance_v)
language_index_v += 1
for key in (tables[1]).keys():
distance_c += fabs(tables[1][key] - language[1][key])
consonants_results[languages_names[language_index_c]] = distance_c
consonants_values.append(distance_c)
language_index_c += 1
print(f'Vowels:\n{vowels_results}\nConsonants:\n{consonants_results}')
the_most_similar_language_vowels_value = min(vowels_values)
the_most_similar_language_consonants_value = min(consonants_values)
for k, v in vowels_results.items():
if v == the_most_similar_language_vowels_value:
print(f'The most similar language when we look only at vowels is {str(k)}.')
for k, v in consonants_results.items():
if v == the_most_similar_language_consonants_value:
print(f'The most similar language when we look only at consonants is {str(k)}.')
| MatPatCarry/Algorithms_univerity_classes | WDA_List_5/Letter_frequency_c_functions.py | Letter_frequency_c_functions.py | py | 3,209 | python | en | code | 0 | github-code | 13 |
23636995249 | import json, os, random, h5py, tqdm, ast
from collections import Counter
from PIL import Image
import numpy as np
from torch.utils.data import Dataset
import torch
def parse_and_prepare_data(dataset, karpathy_json_path, image_folder, captions_per_image, min_word_freq, output_folder, max_len):
dataset = dataset.lower()
assert dataset in {'coco', 'flickr8k', 'flickr30k'}
with open(karpathy_json_path, 'r') as j:
karpathy_data = json.load(j)
GTS = {}
trn_image_ids = []
vld_image_ids = []
tst_image_ids = []
trn_image_paths = []
vld_image_paths = []
tst_image_paths = []
trn_image_captions = []
vld_image_captions = []
tst_image_captions = []
word_freq = Counter()
print("READING CAPTIONS.....")
for img in tqdm.tqdm(karpathy_data["images"]):
image_id = img['imgid']
captions = []
for c in img['sentences']:
word_freq.update(c['tokens'])
if len(c['tokens']) <= max_len:
captions.append(c['tokens'])
if image_id not in GTS.keys():
GTS[image_id] = []
GTS[image_id].append(' '.join(c['tokens']))
if len(captions) == 0:
continue
path = os.path.join(image_folder, img['filepath'], img['filename']) if dataset == 'coco' else os.path.join(image_folder, img['filename'])
if img['split'] in {'train', 'restval'}:
trn_image_ids.append(image_id)
trn_image_paths.append(path)
trn_image_captions.append(captions)
elif img['split'] in {'val'}:
vld_image_ids.append(image_id)
vld_image_captions.append(captions)
vld_image_paths.append(path)
elif img['split'] in {'test'}:
tst_image_ids.append(image_id)
tst_image_paths.append(path)
tst_image_captions.append(captions)
print(GTS, file=open(os.path.join(output_folder, dataset.upper()+"_"+"GTS.txt"), "w+"))
assert len(trn_image_captions) == len(trn_image_paths)
assert len(vld_image_captions) == len(vld_image_paths)
assert len(tst_image_captions) == len(tst_image_paths)
words = [w for w in word_freq.keys() if word_freq[w]>min_word_freq]
word_map = {k:v+1 for v, k in enumerate(words)}
word_map['<unk>'] = len(word_map)+1
word_map['<start>'] = len(word_map)+1
word_map['<end>'] = len(word_map)+1
word_map['<pad>'] = 0
base_filename = dataset+"_"+str(captions_per_image)+"_cap_per_img_"+str(min_word_freq)+"_min_word_freq"
with open(os.path.join(output_folder, "WORDMAP_"+base_filename+".json"), 'w') as j:
json.dump(word_map, j)
random.seed(947)
print(trn_image_ids,file=open(os.path.join(output_folder, dataset.upper()+"_"+"TRAIN_IMG_IDS.txt"), "w+"))
print(vld_image_ids,file=open(os.path.join(output_folder, dataset.upper()+"_"+"VALID_IMG_IDS.txt"), "w+"))
print(tst_image_ids,file=open(os.path.join(output_folder, dataset.upper()+"_"+"TEST_IMG_IDS.txt"), "w+"))
for img, cap, split in [(trn_image_paths, trn_image_captions, "TRAIN"), (vld_image_paths, vld_image_captions, "VALID"), (tst_image_paths, tst_image_captions, "TEST")]:
with h5py.File(os.path.join(output_folder, split+"_IMAGES_"+base_filename+".hdf5"), 'a') as h:
h.attrs['captions_per_image'] = captions_per_image
images = h.create_dataset('images', (len(img), 3, 256, 256), dtype='uint8')
print("\nReading %s images and captions, storing to file ....\n"%split)
enc_captions = []
caplens = []
for i, path in enumerate(tqdm.tqdm(img)):
if len(cap[i]) < captions_per_image:
captions = cap[i]+[random.choice(cap[i]) for _ in range(captions_per_image - len(cap[i]))]
else:
captions = random.sample(cap[i], k=captions_per_image)
assert len(captions) == captions_per_image
image = Image.open(img[i])
image = image.convert("RGB")
image = image.resize((256,256))
image = np.transpose(np.array(image), (2,0,1))
assert image.shape == (3,256,256)
assert np.max(image)<=255
images[i] = image
for j, c in enumerate(captions):
enc_c = [word_map['<start>']]+[word_map.get(word, word_map['<unk>']) for word in c]+[word_map['<end>']]+[word_map['<pad>']]*(max_len-len(c))
c_len = len(c)+2
enc_captions.append(enc_c)
caplens.append(c_len)
assert images.shape[0]*captions_per_image == len(enc_captions) == len(caplens)
with open(os.path.join(output_folder, split+'_CAPTIONS_'+base_filename+'.json'), 'w') as j:
json.dump(enc_captions, j)
with open(os.path.join(output_folder, split+'_CAPLENS_'+base_filename+'.json'), 'w') as j:
json.dump(caplens, j)
class ImageCaptionDataset(Dataset):
def __init__(self, data_folder, base_filename, split, transform=None):
self.split = split.upper()
assert self.split in {"TRAIN", "TEST", "VALID"}
self.dataset_name = base_filename.split("_")[0].upper()
self.h = h5py.File(os.path.join(data_folder, self.split+"_IMAGES_"+base_filename+".hdf5"), 'r')
self.images = self.h['images']
self.image_ids = os.path.join(data_folder,self.dataset_name+"_"+self.split+"_IMG_IDS.txt")
with open(self.image_ids, "r") as f:
self.image_ids = ast.literal_eval(f.read())
self.cpi = self.h.attrs['captions_per_image']
with open(os.path.join(data_folder, self.split+"_CAPTIONS_"+base_filename+".json"), 'r') as j:
self.captions = json.load(j)
with open(os.path.join(data_folder, self.split+"_CAPLENS_"+base_filename+".json"), 'r') as j:
self.caplens = json.load(j)
self.transform = transform
self.dataset_size = len(self.captions)
def __len__(self):
return self.dataset_size
def __getitem__(self, i):
img_i = i//self.cpi
img = torch.FloatTensor(self.images[img_i]/255.0)
img_id = self.image_ids[img_i]
if self.transform is not None:
img = self.transform(img)
caption = torch.LongTensor(self.captions[i])
caplen = torch.LongTensor([self.caplens[i]])
if self.split == "TRAIN":
return img_id, img, caption, caplen
else:
all_captions = torch.LongTensor(
self.captions[((i//self.cpi)*self.cpi):(((i//self.cpi)*self.cpi)+self.cpi)]
)
return img_id, img, caption, caplen, all_captions
if __name__ == "__main__":
pass
# data = ImageCaptionDataset("./output", "")
# This is for testing the code in this file and creating the dataset for the first time
# parse_and_prepare_data('coco',
# '/mnt/BRCD-2/Datasets/karpathy_captions/dataset_coco.json',
# '/mnt/BRCD-2/Datasets/coco',
# 5,
# 5,
# './output/',
# 50
# )
# parse_and_prepare_data('flickr8k',
# '/mnt/BRCD-2/Datasets/karpathy_captions/dataset_flickr8k.json',
# '/mnt/BRCD-2/Datasets/flickr8k/flickr8k_images',
# 5,
# 5,
# './output/',
# 50
# )
# parse_and_prepare_data('flickr30k',
# '/mnt/BRCD-2/Datasets/karpathy_captions/dataset_flickr30k.json',
# '/mnt/BRCD-2/Datasets/flickr30k/flickr30k_images',
# 5,
# 5,
# './output/',
# 50
# ) | numan947/DescribeIt-A-ReImplementation-of-Show-Attend-And-Tell | data.py | data.py | py | 8,467 | python | en | code | 0 | github-code | 13 |
39530376054 | import math
import resource
import socket
from logging import getLogger
import errno
import msgpack
import outcome
import anyio
from async_generator import asynccontextmanager
from .exceptions import SerfClosedError, SerfConnectionError, SerfError
from .result import SerfResult
from .util import ValueEvent, CancelledError
logger = getLogger(__name__)
_conn_id = 0
class SerfTimeout(TimeoutError):
pass
class _StreamReply:
"""
This class represents a multi-message reply.
Actually, it also represents the query itself, which is not started
until you enter the stream's context.
This is an internal class. See :meth:`Serf.stream` for details.
"""
# pylint: disable=protected-access,too-many-instance-attributes,too-many-arguments
send_stop = True
head = None
def __init__(self, conn, command, params, seq, expect_body):
self._conn = conn
self._command = command
self._params = params
self.seq = seq
self._q_w, self._q_r = anyio.create_memory_object_stream(10000)
self.expect_body = -expect_body
async def set(self, value):
await self._q_w.send(outcome.Value(value))
async def set_error(self, err):
await self._q_w.send(outcome.Error(err))
async def get(self):
res = await self._q_r.receive()
return res.unwrap()
def __aiter__(self):
return self
async def __anext__(self):
try:
res = await self._q_r.receive()
except anyio.EndOfStream:
raise StopAsyncIteration # pylint:disable=raise-missing-from
return res.unwrap()
async def __aenter__(self):
reply = await self._conn._call(self._command, self._params, _reply=self)
res = await reply.get()
if res is not None:
self.head = res.head
return self
async def __aexit__(self, *exc):
hdl = self._conn._handlers
if self.send_stop:
async with anyio.open_cancel_scope(shield=True):
try:
await self._conn.call("stop", params={b"Stop": self.seq}, expect_body=False)
except (anyio.ClosedResourceError, CancelledError):
pass
if hdl is not None:
# TODO remember this for a while?
try:
await self._conn.tg.spawn(self._cleanup, hdl)
except RuntimeError: # TG may be closed already
pass
async def _cleanup(self, hdl, *, result=None):
if result is not None:
await result.set()
await anyio.sleep(2)
del hdl[self.seq]
async def cancel(self):
await self._q_w.aclose()
self._q_w = None
class SerfConnection:
"""
Manages RPC communication to and from a Serf agent.
This is an internal class; see :class:`asyncserf.Serf` for methods
you're supposed to call. ;-)
"""
# pylint: disable=too-many-instance-attributes
# Read from the RPC socket in blocks of this many bytes.
# (Typically 4k)
_socket_recv_size = resource.getpagesize()
_conn_id = 0
def __init__(self, tg, host="localhost", port=7373):
type(self)._conn_id += 1
self._conn_id = type(self)._conn_id
self.tg = tg
self.host = host
self.port = port
self._socket = None
self._seq = 0
self._handlers = {}
self._send_lock = anyio.create_lock()
# handler protocol: incoming messages are passed in using `.set`.
# If .expect_body is True then the reader will add a body to the
# request. If it's -1 then the first reply is the body-less OK (we
# hope) and only subsequent replies will have a body.
def __repr__(self):
return "<%(class)s counter=%(c)s host=%(h)s port=%(p)s>" % {
"class": self.__class__.__name__,
"c": self._seq,
"h": self.host,
"p": self.port,
}
def stream(self, command, params=None, expect_body=True):
"""
Sends the provided command to Serf for evaluation, with
any parameters as the message body. Expect a streamed reply.
Returns a ``_StreamReply`` object which affords an async context
manager plus async iterator, which will return replies.
"""
return _StreamReply(self, command, params, self._counter, expect_body)
async def _call(self, command, params=None, expect_body=True, *, _reply=None):
"""
Sends the provided command to Serf for evaluation, with
any parameters as the message body.
Returns the reply object. If the connection is being torn down and
no reply is explected, return ``None``.
"""
# pylint: disable=protected-access ## owch
class SingleReply(ValueEvent):
# pylint: disable=protected-access,no-self-argument
"""
A helper class, used to process a single reply.
"""
def __init__(slf, seq, expect_body):
super().__init__()
slf.seq = seq
slf.expect_body = expect_body
async def set(slf, val): # pylint: disable=arguments-differ
if self._handlers is not None:
del self._handlers[slf.seq]
await super().set(val)
async def set_error(slf, err): # pylint: disable=arguments-differ
if self._handlers is not None:
del self._handlers[slf.seq]
await super().set_error(err)
if self._socket is None:
return
if _reply is None:
seq = self._counter
_reply = SingleReply(seq, expect_body)
else:
seq = _reply.seq
if self._handlers is not None:
self._handlers[seq] = _reply
else:
_reply = None
if params:
logger.debug("%d:Send %s:%s =%s", self._conn_id, seq, command, repr(params))
else:
logger.debug("%d:Send %s:%s", self._conn_id, seq, command)
msg = msgpack.packb({"Seq": seq, "Command": command})
if params is not None:
msg += msgpack.packb(params)
async with self._send_lock: # pylint: disable=not-async-context-manager ## owch
if self._socket is None:
raise anyio.ClosedResourceError()
await self._socket.send(msg)
return _reply
async def call(self, command, params=None, expect_body=True):
"""
Fire a single-reply command, wait for the reply (and return it).
"""
res = await self._call(command, params, expect_body=expect_body)
if res is None:
return res
return await res.get()
async def _handle_msg(self, msg):
"""Handle an incoming message.
Return True if the message is incomplete, i.e. the reader should
wait for a body, attach it to the message, and then call this
method again.
"""
if self._handlers is None:
logger.warning("Reader terminated:%s", msg)
return
try:
seq = msg.head[b"Seq"]
except KeyError:
raise RuntimeError( # pylint:disable=raise-missing-from
"Reader got out of sync: " + str(msg)
)
try:
hdl = self._handlers[seq]
except KeyError:
logger.warning("Spurious message %s: %s", seq, msg)
return
if (
msg.body is None
and hdl.expect_body > 0
and (hdl.expect_body > 1 or not msg.head[b"Error"])
):
return True
# Do this here because stream replies might arrive immediately
# i.e. before the queue listener gets around to us
if hdl.expect_body < 0:
hdl.expect_body = -hdl.expect_body
if msg.head[b"Error"]:
await hdl.set_error(SerfError(msg))
await anyio.sleep(0.01)
else:
await hdl.set(msg)
return False
async def _reader(self, *, result: ValueEvent = None):
"""Main loop for reading
TODO: add a timeout for receiving message bodies.
"""
unpacker = msgpack.Unpacker(object_hook=self._decode_addr_key)
cur_msg = None
async with anyio.open_cancel_scope(shield=True) as s:
if result is not None:
await result.set(s)
try:
while self._socket is not None:
if cur_msg is not None:
logger.debug("%d:wait for body", self._conn_id)
try:
async with anyio.fail_after(5 if cur_msg else math.inf):
buf = await self._socket.receive(self._socket_recv_size)
except TimeoutError:
seq = cur_msg.head.get(b"Seq", None)
hdl = self._handlers.get(seq, None)
if hdl is not None:
await hdl.set_error(SerfTimeout(cur_msg))
else:
raise SerfTimeout(cur_msg) from None
except anyio.ClosedResourceError:
return # closed by us
except OSError as err:
if err.errno == errno.EBADF:
return
raise
if len(buf) == 0: # Connection was closed.
raise SerfClosedError("Connection closed by peer")
unpacker.feed(buf)
for msg in unpacker:
if cur_msg is not None:
logger.debug("%d::Body=%s", self._conn_id, msg)
cur_msg.body = msg
await self._handle_msg(cur_msg)
cur_msg = None
else:
logger.debug("%d:Recv =%s", self._conn_id, msg)
msg = SerfResult(msg)
if await self._handle_msg(msg):
cur_msg = msg
finally:
hdl, self._handlers = self._handlers, None
async with anyio.open_cancel_scope(shield=True):
for m in hdl.values():
await m.cancel()
async def handshake(self):
"""
Sets up the connection with the Serf agent and does the
initial handshake.
"""
return await self.call("handshake", {"Version": 1}, expect_body=False)
async def auth(self, auth_key):
"""
Performs the initial authentication on connect
"""
return await self.call("auth", {"AuthKey": auth_key}, expect_body=False)
@asynccontextmanager
async def _connected(self):
"""
This async context manager handles the actual TCP connection to
the Serf process.
"""
reader = None
try:
async with await anyio.connect_tcp(self.host, self.port) as sock:
self._socket = sock
reader = await self.tg.spawn(self._reader)
yield self
except socket.error as e:
raise SerfConnectionError(self.host, self.port) from e
finally:
sock, self._socket = self._socket, None
if sock is not None:
await sock.aclose()
if reader is not None:
await reader.cancel()
reader = None
@property
def _counter(self):
"""
Returns the current value of our message sequence counter and increments it.
"""
current = self._seq
self._seq += 1
return current
@staticmethod
def _decode_addr_key(obj_dict):
"""
Callback function to handle the decoding of the 'Addr' field.
Serf msgpack 'Addr' as an IPv6 address, and the data needs to be
unpacked using socket.inet_ntop().
:param obj_dict: A dictionary containing the msgpack map.
:return: A dictionary with the correct 'Addr' format.
"""
key = b"Addr"
ip_addr = obj_dict.get(key, None)
if ip_addr is not None:
if len(ip_addr) == 4: # IPv4
ip_addr = socket.inet_ntop(socket.AF_INET, obj_dict[key])
else:
ip_addr = socket.inet_ntop(socket.AF_INET6, obj_dict[key])
# Check if the address is an IPv4 mapped IPv6 address:
# ie. ::ffff:xxx.xxx.xxx.xxx
if ip_addr.startswith("::ffff:"):
ip_addr = ip_addr[7:]
# The msgpack codec is set to raw,
# thus everything needs to be bytes
obj_dict[key] = ip_addr.encode("utf-8")
return obj_dict
| smurfix/asyncserf | asyncserf/connection.py | connection.py | py | 13,002 | python | en | code | 3 | github-code | 13 |
29821977560 | import subprocess
import os
import socket
import fcntl
import struct
import json
import time
import datetime
import picamera
import picamera.array
import atexit
configFile = '/home/pi/camera.json';
HOST = '192.168.1.99'
# HOST = '192.168.10.2'
PORT = 81
CODE_PING_PONG = 100
CODE_ADD_SCANNER = 1000
CODE_TAKE_THUMB = 1001
CODE_TAKE_PREVIEW = 1002
CODE_TAKE_PHOTO = 1003
CODE_SET_PHOTO_SETTINGS = 1004
CODE_UPLOAD_THUMB = 1005
CODE_UPLOAD_PREVIEW = 1006
CODE_UPLOAD_PHOTO1 = 1007
CODE_UPLOAD_PHOTO2 = 1008
CODE_SET_SCANNER_NUMBER = 1009
CODE_EXECUTE_SHELL = 1010
CODE_UPDATE_BUSY_STATE = 1011
CODE_LOG_DATA = 1020
MAX_RES = (3280, 2464)
def _json_object_hook(d): return namedtuple('X', d.keys())(*d.values())
def json2obj(data): return json.loads(data, object_hook=_json_object_hook)
def get_ip_address(ifname):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', bytes(ifname[:15], 'utf-8')))[20:24])
except IOError as e:
return ''
while True:
current_ip = get_ip_address('eth0')
if len(current_ip) > 2:
break
current_ip = get_ip_address('wlan0')
if len(current_ip) > 2:
break
time.sleep(2)
print("Current IP: ", current_ip)
class SocketHandler:
frames = 3
payload = ''
lastOpCode = 0
lastLength = 0
timer = 0
config = {'numb': ""}
def __init__(self):
self.getScannerNumber()
def initSocket(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def filenames(self):
frame = 0
while frame < self.frames:
yield './image%02d.jpg' % frame
frame += 1
def connect(self, host, port):
try:
# host = socket.gethostbyname(host)
print("Host: ", host)
self.sock.connect((host, port))
return True
except ConnectionRefusedError as e:
print("ConnectionRefused error({0}): {1}".format(e.errno, e.strerror))
return False
def addScanner(self):
scanner = {'ip': current_ip, 'numb': self.config['numb'], 'files': []}
self.sendJSON(CODE_ADD_SCANNER, scanner)
def updateBusyState(self, state):
print("Update state: " + str(state))
scanner = {'isBusy': state}
self.sendJSON(CODE_UPDATE_BUSY_STATE, scanner)
def setHeader(self, opCode, length):
print("OpCode: ", opCode)
print("Length: ", length)
c = struct.pack(">I", opCode)
l = struct.pack(">I", length)
self.sock.send(c + l)
def logData(self, data, is_json=True):
if is_json:
data = json.dumps(data).encode('utf-8')
else:
data = data.encode('utf-8')
dataLength = len(data)
self.setHeader(CODE_LOG_DATA, dataLength)
self.sock.send(data)
def sendJSON(self, opCode, obj):
j = json.dumps(obj).encode('utf-8')
self.sendRaw(opCode, j, len(j))
def sendRaw(self, opCode, msg, length):
self.setHeader(opCode, length)
totalsent = 0
while totalsent < length:
sent = self.sock.send(msg[totalsent:])
if sent == 0:
raise RuntimeError("socket connection broken")
totalsent = totalsent + sent
def sendFile(self, opCode, filename):
length = os.path.getsize(filename)
self.setHeader(opCode, length)
f = open(filename, 'rb')
print('Sending file...')
l = f.read(1024 * 4)
while (l):
##print ('Sending...')
self.sock.send(l)
l = f.read(1024 * 4)
f.close()
print('File sent')
def receive(self):
if self.lastOpCode == 0:
self.payload = ''
self.lastOpCode = self.sock.recv(4)
self.lastOpCode = struct.unpack(">I", self.lastOpCode)[0]
self.timer = self.sock.recv(4)
self.timer = struct.unpack(">I", self.timer)[0]
self.lastLength = self.sock.recv(4)
self.lastLength = struct.unpack(">I", self.lastLength)[0]
print("Receive operation: " + str(self.lastOpCode) + " with length: " + str(self.lastLength))
return
while len(self.payload) < self.lastLength:
chunk = self.sock.recv(self.lastLength - len(self.payload))
self.payload = self.payload + chunk.decode('utf-8')
if self.lastOpCode == CODE_TAKE_THUMB:
self.takeThumb()
if self.lastOpCode == CODE_TAKE_PREVIEW:
self.takePreview()
if self.lastOpCode == CODE_TAKE_PHOTO:
self.takePhoto(self.timer)
if self.lastOpCode == CODE_PING_PONG:
print("Ping")
if self.lastOpCode == CODE_ADD_SCANNER:
print(self.payload)
if self.lastOpCode == CODE_SET_PHOTO_SETTINGS:
self.setPhotoSettings(self.payload)
if self.lastOpCode == CODE_SET_SCANNER_NUMBER:
self.config['numb'] = self.payload
self.setScannerNumber()
if self.lastOpCode == CODE_EXECUTE_SHELL:
self.executeShell(self.payload)
self.timer = 0
self.lastOpCode = 0
self.lastLength = 0
self.payload = ''
def executeShell(self, data):
cmd = json.loads(data)
self.updateBusyState(True)
process = ''
try:
process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
for line in process.stdout:
self.logData(line.decode('utf-8'), False)
print(line.decode('utf-8'))
except Exception as e:
print("Error execute shell({0})")
if process != '':
process.kill()
self.updateBusyState(False)
def getScannerNumber(self):
try:
file = open(configFile, "r")
jdata = json.loads(file.read())
self.config['numb'] = jdata['numb']
file.close()
except:
print("File doesn't exist")
def setScannerNumber(self):
file = open(configFile, "w")
file.write(json.dumps(self.config))
file.close()
def setPhotoSettings(self, data):
self.settings = json.loads(data)
def applySettings(self):
settings = self.settings
try:
self.camera.awb_mode = settings.get('awb');
if settings.get('awbgains') and settings.get('awb') == 'off':
g = settings.get('awbgains').split(',')
self.camera.awb_gains = (float(g[0]), float(g[1]))
print(g[0] + " - " + g[1])
else:
self.camera.awb_gains = (1.0, 1.0)
self.camera.exposure_mode = settings.get('exposure')
self.camera.framerate = int(settings.get('framerate') or 15)
self.camera.sharpness = int(settings.get('sharpness') or 0)
self.camera.contrast = int(settings.get('contrast') or 0)
self.camera.brightness = int(settings.get('brightness') or 50)
self.camera.saturation = int(settings.get('saturation') or 0)
self.camera.shutter_speed = int(settings.get('shutter') or 0)
self.camera.iso = int(settings.get('ISO') or 100)
self.camera.meter_mode = settings.get('metering')
except ValueError as e:
print("Value error({0}): {1}".format(e.errno, e.strerror))
def takeThumb(self):
print("Take thumb")
thumbFileName = './thumb.jpg'
self.camera = picamera.PiCamera()
self.applySettings()
self.camera.resolution = (160, 90)
self.updateBusyState(True)
self.camera.capture_sequence([thumbFileName], 'jpeg', use_video_port=True)
time.sleep(0.2)
self.sendFile(CODE_UPLOAD_THUMB, thumbFileName)
self.updateBusyState(False)
self.camera.close()
def takePreview(self):
print("Take preview")
previewFileName = './preview.jpg'
self.camera = picamera.PiCamera()
self.applySettings()
self.camera.resolution = MAX_RES
self.updateBusyState(True)
self.camera.capture_sequence([previewFileName], 'jpeg', use_video_port=True)
time.sleep(0.2)
self.sendFile(CODE_UPLOAD_PREVIEW, previewFileName)
self.updateBusyState(False)
self.camera.close()
def takePhoto(self, timer):
timer = time.time() + 2.0
print("Take photo")
self.camera = picamera.PiCamera()
self.applySettings()
self.camera.resolution = MAX_RES
self.updateBusyState(True)
print("Timer %d" % timer)
if timer != 0:
time_shift = float(timer) - time.time()
time.sleep(max(time_shift, 0.0))
self.camera.capture_sequence(self.filenames(), 'jpeg', use_video_port=True)
time.sleep(1)
self.sendFile(CODE_UPLOAD_PHOTO1, './image00.jpg')
self.sendFile(CODE_UPLOAD_PHOTO2, './image02.jpg')
self.updateBusyState(False)
self.camera.close()
#### SocketHandler instance
s = SocketHandler()
def exit_handler():
print('Close')
atexit.register(exit_handler)
while True:
try:
s.initSocket()
if s.connect(HOST, PORT):
s.addScanner()
while True:
s.receive()
except TimeoutError as e:
print('Timeout Error: '.format(e.errno, e.strerror))
except ConnectionResetError as e:
print('Connection Reset Error: '.format(e.errno, e.strerror))
except IOError as e:
print('IOError: '.format(e.errno, e.strerror))
except RuntimeError as e:
print('Runtime error')
time.sleep(3)
| amakaroff82/scanner | scanerPI/server.py | server.py | py | 9,792 | python | en | code | 0 | github-code | 13 |
7927061610 | #############
# Camera.py #
#############
# This is the proverbial sausage factory.
# There are lots of things to play with in here
# and I've tried to mark areas of interest
from collections import namedtuple
from math import radians, tan, sqrt
from random import uniform
from time import time
from Utility.Vector import cross, Ray
import Utility.Color as Color
# Global settings
scene_refraction_index = 1.0
total_rays = 0
# A frustum is a conical shape with the top chopped off
Frustum = namedtuple('Frustum', 'near far')
class Camera(object):
"""Holds the camera's parameters and calculates the screenspace coordinate frame"""
def __init__(self, position, direction, up, resolution, FOV, samples, depth, frustum):
direction = direction.unit()
self.position = position
self.width = resolution[0]
self.height = resolution[1]
self.samples = samples
self.depth = depth
self.frustum = Frustum._make(frustum)
# Calculate the screen dimensions given the FOV
screen_width = tan(radians(FOV / 2.0))
screen_height = (float(self.height) / float(self.width)) * screen_width
# Calculate the coordinate frame for screenspace
i_star = cross(direction, up).unit()
j_star = cross(i_star, direction).unit()
# Compute the dimensions of a pixel represented in screenspace
self.i_hat = i_star * (2 * screen_width / self.width)
self.j_hat = j_star * (2 * screen_height / self.height)
# The top left of the screenspace is the origin of our image
self.origin = (direction
- (i_star * screen_width)
+ (j_star * screen_height))
def __str__(self):
return "Camera (p: {}, w:{}, h: {}, ih: {}, jh: {})".format(self.position, self.width, self.height, abs(self.i_hat), abs(self.j_hat))
def capture(scene, camera, verbose, draw_heatmap):
global total_rays
total_rays = 0
# Create the empty pixel array to convert to an image
pixels = []
metadata = []
begin = time()
# Build the image one pixel at a time
for y in range(camera.height):
pixels.append([])
metadata.append([])
for x in range(camera.width):
pixel, data = get_pixel(scene, camera, x, y)
pixels[y].append(pixel)
metadata[y].append(data)
print("Total tracing time: {:.2f}s".format(time() - begin))
if verbose:
per_pixel = float(total_rays)/float(camera.width * camera.height)
print("Total number of rays traced: {}, Average per pixel: {}".format(total_rays, per_pixel))
print("Mean time per pixel: [], Median: [], Max: []")
begin = time()
# Convert our array from what is essentially a bitmap to an image
image = Color.image_from_pixels(pixels, (camera.width, camera.height))
heatmap = None
if draw_heatmap:
heatmap = Color.heatmap_from_data(metadata, (camera.width, camera.height))
if verbose:
print("Image processing time: {:.2f}s".format(time() - begin))
return image, heatmap
def get_pixel(scene, camera, x, y):
pixel = Color.black
begin = time()
# Collect samples of the scene for this current pixel
for _ in range(camera.samples):
# Randomly generate offsets for the current subsample
x_n = x + uniform(0, 1)
y_n = y + uniform(0, 1)
# Get the subsample position and construct a ray from it
screen_coordinate = camera.origin + camera.i_hat * x_n - camera.j_hat * y_n
sample = Ray(camera.position, screen_coordinate)
pixel = pixel + trace(scene, sample, camera.depth, camera.frustum)
# Color correction
pixel = pixel / camera.samples
#TODO: check how this looks with transform
#pixel = pixel.apply_transform(sqrt)
endtime = time()
return pixel, endtime - begin
def trace(scene, ray, depth, frustum):
# Base case; try changing the color and seeing what you get!
if depth <= 0:
return Color.black
# Check to see if our ray hits an object, or just shoots into space
intersect = cast_ray(scene, ray, frustum)
if intersect:
#return Color.black
# Get the color of that object and the bounce vector for recursion if there is recursion
sample, bounce = intersect.material.scatter(ray, intersect, scene_refraction_index)
#TODO: check how this looks with color
# Here is the actual color blending; it's very simple
return sample * trace(scene, bounce, depth - 1, frustum) if bounce else Color.black
else:
#return Color.white
return Color.sky_gradient(ray.direction.z)
def cast_ray(scene, ray, frustum):
global total_rays
total_rays += 1
closest = None
for shape in scene:
intersect = shape.intersect_ray(ray, Frustum(frustum.near, (closest.distance if closest else frustum.far)))
if intersect:
closest = intersect
return closest
| mld2443/PythonRayTracer | Camera.py | Camera.py | py | 5,012 | python | en | code | 0 | github-code | 13 |
12088324723 | # 语句
# 3个物理行,3个逻辑行
# a = 10
# b = 20
# print(a, b)
# a = 10; b = 20 # 不推荐
# print(a, b)
#
# # a = 10
# # b = 20 # 不推荐
# # print(a, b)
#
# # 物理行长 --》换行
# print(1 + 2 + 3 + 4 + 5 + 6 + 7 + 8)
#
# # 显式换行
# # \ 续行符,表示下一行也是上一行未完的语句
# result = 1 + 2 + \
# 3 + 4 + \
# 5 + 6 + \
# 7 + 8
# print(result)
#
# # 隐式换行
# result = (1 + 2 +
# 3 + 4 +
# 5 + 6 +
# 7 + 8)
# print(result)
# def func(num):
# for i in range(num):
# if i > 4:
# return
# print(i, end='')
#
#
# func(6)
name = "张无忌"
names = ["赵敏", "周芷若"]
tuple01 = ("张翠山", name, names)
name = "无忌哥哥"
tuple01[2][0] = "敏儿"
print(tuple01)
| 15149295552/Code | Month01/Day02/demo04_statement.py | demo04_statement.py | py | 814 | python | en | code | 1 | github-code | 13 |
33253338869 | import numpy as np
import math
import random
# free parameters in common
J = 1
L = 64
BLOCK = 2
# fun_phi
NUM_SYSTEM = 128 # num of independent systems
STEP = 5 # sampling time divide
nRG = 4 # RG iteration times
# MCRG
NUM_INTERACTION = 8 # 0 for Odd interaction
def Initial():
global L
Ising = np.empty((L, L, L), dtype=int)
for i in range(L):
for j in range(L):
for k in range(L):
Ising[i, j, k] = random.choice([-1, 1])
return Ising
def Esingle(Ising, i, j, k):
global J
s = 0.0
x = i + 1
y = j + 1
z = k + 1
if x == Ising.shape[0]: # 边界周期性条件
x -= Ising.shape[0]
if y == Ising.shape[1]:
y -= Ising.shape[1]
if z == Ising.shape[2]:
z -= Ising.shape[2]
s += (Ising[i - 1, j, k] + Ising[i, j - 1, k] + Ising[i, j, k - 1] + Ising[x, j, k] + Ising[i, y, k] + Ising[
i, j, z]) # 对于下标-1,python会自动处理
return -Ising[i, j, k] * s * J
def E(Ising):
global J
energy = 0.0
for i in range(Ising.shape[0]):
for j in range(Ising.shape[1]):
for k in range(Ising.shape[2]):
energy += Esingle(Ising, i, j, k)
return 0.5 * energy # 总能量是每个能量之和的一半
def Ssingle(Ising, i, j, k, alpha): # 第alpha近的粒子对
L1 = Ising.shape[0]
L2 = Ising.shape[1]
L3 = Ising.shape[2]
s = 0.0
if alpha == 0:
s = Ising[i, j, k]
elif alpha == 1:
x = i + 1
y = j + 1
z = k + 1
if x == L1: # 边界周期性条件
x -= L1
if y == L2:
y -= L2
if z == L3:
z -= L3
s += (Ising[i - 1, j, k] + Ising[i, j - 1, k] + Ising[i, j, k - 1] + Ising[x, j, k] + Ising[i, y, k] + Ising[
i, j, z]) # 对于下标-1,python会自动处理
s *= (Ising[i, j, k] * 0.5)
elif alpha == 2:
x = i + 1
y = j + 1
z = k + 1
if x == L1:
x -= L1
if y == L2:
y -= L2
if z == L3:
z -= L3
s += (
Ising[x, y, k] + Ising[x, j - 1, k] + Ising[i - 1, y, k] + Ising[i - 1, j - 1, k] + Ising[x, j, z] + Ising[
x, j, k - 1] + Ising[i - 1, j, z] + Ising[i - 1, j, k - 1] + Ising[i, y, z] + Ising[i, j - 1, z] +
Ising[i, y, k - 1] + Ising[i, j - 1, k - 1])
s *= (Ising[i, j, k] * 0.5)
elif alpha == 3:
x = i + 1
y = j + 1
z = k + 1
if x == L1:
x -= L1
if y == L2:
y -= L2
if z == L3:
z -= L3
s += (
Ising[x, y, z] + Ising[i - 1, y, z] + Ising[x, j - 1, z] + Ising[x, y, k - 1] + Ising[i - 1, j - 1, z] +
Ising[i - 1, y, k - 1] + Ising[x, j - 1, k - 1] + Ising[i - 1, j - 1, k - 1])
s *= (Ising[i, j, k] * 0.5)
elif alpha == 4: # four spins nearest neighbor
s += Ising[i, j - 1, k] * Ising[i, j, k - 1] * Ising[i, j - 1, k - 1] + Ising[i - 1, j, k] * Ising[
i, j, k - 1] * Ising[i - 1, j, k - 1] + Ising[i - 1, j, k] * Ising[i - 1, j, k] * Ising[i - 1, j - 1, k]
s *= Ising[i, j, k]
elif alpha == 5: # four spins next-nearest neighbor
x = i + 1
y = j + 1
z = k + 1
if x == L1:
x -= L1
if y == L2:
y -= L2
if z == L3:
z -= L3
s += Ising[i, j - 1, k - 1] * Ising[i, j - 2, k] * Ising[i, j - 1, z] + Ising[i - 1, j, k - 1] * Ising[
i, j, k - 2] * Ising[x, j, k - 1] + Ising[i - 1, j - 1, k] * Ising[i - 2, j, k] * Ising[i - 1, y, k]
s *= Ising[i, j, k]
elif alpha == 6: # four spins tetrahedral vertices in each cube
s += Ising[i - 1, j, k] * Ising[i, j - 1, k] * Ising[i, j, k - 1] * Ising[i - 1, j - 1, k - 1] + \
Ising[i - 1, j - 1, k] * Ising[i, j - 1, k - 1] * Ising[i - 1, j, k - 1] * Ising[i, j, k]
elif alpha == 7:
s += Ising[i - 2, j, k] + Ising[i, j - 2, k] + Ising[i, j, k - 2]
s *= Ising[i, j, k]
else:
print('Error,alpha==' + str(alpha))
exit()
return s
def S_alpha(Ising, alpha):
s = 0.0
for i in range(Ising.shape[0]):
for j in range(Ising.shape[1]):
for k in range(Ising.shape[2]):
s += Ssingle(Ising, i, j, k, alpha)
return s
def BlockIsing(Ising, b):
if Ising.shape[0] % b != 0 or Ising.shape[1] % b != 0 or Ising.shape[2] % b != 0:
return "error b"
IsingB = np.empty((Ising.shape[0] // b, Ising.shape[1] // b, Ising.shape[2] // b))
for i in range(Ising.shape[0] // b):
for j in range(Ising.shape[1] // b):
for k in range(Ising.shape[2] // b):
s = sum(sum(sum(Ising[b * i:b * i + b, b * j:b * j + b, b * k:b * k + b])))
if s > 0:
IsingB[i, j, k] = 1
elif s < 0:
IsingB[i, j, k] = -1
else:
IsingB[i, j, k] = random.choice([-1, 1])
return IsingB
def Pflip(Ising, i, j, k, T):
dH = -2 * Esingle(Ising, i, j, k)
return min(math.exp(-dH / T), 1)
def MCpass(Ising, T): # 一个pass随机取L*L次粒子
global L
for n in range(L * L * L):
i = random.randint(0, L - 1)
j = random.randint(0, L - 1)
k = random.randint(0, L - 1)
P = Pflip(Ising, i, j, k, T)
if P == 1:
Ising[i, j, k] *= -1
else:
r = random.random()
if r < P:
Ising[i, j, k] *= -1
return 0
'''unfinished
def Ebtw(Ising, Ising0, i, j):
if Ising.shape[0] != Ising0.shape[0]:
print('error')
global J
s = 0.0
if i == Ising.shape[0] - 1: # 边界周期性条件
s += Ising0[0, j]
else:
s += Ising0[i + 1, j]
if j == Ising.shape[1] - 1:
s += Ising0[i, 0]
else:
s += Ising0[i, j + 1]
s += (Ising0[i - 1, j] + Ising0[i, j - 1]) # 对于下标-1,python会自动处理
return Ising[i, j] * s * J
def funE(Ising, Ising0):
N = Ising.shape[0] * Ising.shape[1]
sum = 0.0
for i in range(Ising.shape[0]):
for j in range(Ising.shape[1]):
sum += Ebtw(Ising, Ising0, i, j)
return sum / N
'''
def mag(Ising):
M = 0.0
for i in range(Ising.shape[0]):
for j in range(Ising.shape[1]):
for k in range(Ising.shape[2]):
M += Ising[i][j][k]
return M
def magavg(IsingG):
global NUM_SYSTEM
avg = 0.0
for j in range(NUM_SYSTEM):
avg += mag(IsingG[j])
avg = avg / NUM_SYSTEM
return avg
def funPhi(IsingG, IsingG0):
global NUM_SYSTEM
M0avg = magavg(IsingG0)
Mtavg = magavg(IsingG)
M0Mtavg = 0.0
M0sigma = 0.0
Mtsigma = 0.0
for j in range(NUM_SYSTEM):
M0 = mag(IsingG0[j])
Mt = mag(IsingG[j])
M0Mtavg += M0 * Mt
M0sigma += (M0 - M0avg) ** 2
Mtsigma += (Mt - Mtavg) ** 2
M0Mtavg = M0Mtavg / NUM_SYSTEM
M0sigma = math.sqrt(M0sigma / NUM_SYSTEM)
Mtsigma = math.sqrt(Mtsigma / NUM_SYSTEM)
phi = (M0Mtavg - M0avg * Mtavg) / M0sigma / Mtsigma
return phi
def matrixAB(IsingGroup):
global NUM_INTERACTION, BLOCK
n = len(IsingGroup)
matrixA = np.empty((NUM_INTERACTION, NUM_INTERACTION))
matrixB = np.empty((NUM_INTERACTION, NUM_INTERACTION))
S = np.zeros((NUM_INTERACTION, n))
SR = np.zeros((NUM_INTERACTION, n))
for j in range(n):
Tmp = BlockIsing(IsingGroup[j], BLOCK)
for a in range(NUM_INTERACTION):
S[a, j] = S_alpha(IsingGroup[j], a)
SR[a, j] = S_alpha(Tmp, a)
for a in range(NUM_INTERACTION):
for b in range(NUM_INTERACTION):
matrixA[a, b] = sum(S[b] * SR[a]) / n - sum(S[b]) * sum(SR[a]) / (n * n)
matrixB[a, b] = sum(SR[b] * SR[a]) / n - sum(SR[b]) * sum(SR[a]) / (n * n)
return matrixA, matrixB
def write_nparray(file, array):
for j in range(array.shape[1]):
for i in range(array.shape[0]):
file.write(str(array[i, j]) + "\t")
file.write("\n")
return 0
| helloworld0909/MCRG_on_Ising_model | source.py | source.py | py | 8,469 | python | en | code | 1 | github-code | 13 |
5776487207 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image, ImageOps, ImageFilter
import os
import shutil
from sklearn.model_selection import train_test_split
#data = pd.read_csv('train.csv', sep=",", header=None)
data = pd.read_csv('test.csv', sep=",", header=None)
#data = pd.read_csv('other.csv', sep=",", header=None)
data.columns = ["img", "x1", "y1", "x2", "y2", "id"]
tmp = data.copy()
print("Data has image files with traffic signs numbers:", len(data['img'].unique()))
print("Data has traffic signs class numbers:", len(data['id'].unique()))
print("Data has traffic signs instance numbers:", data['id'].count())
red_round_labels = ['p1', 'p2', 'p3', 'p4', 'p5', 'p6', 'p7', 'p8', 'p9', 'p10', 'p11', 'p12', 'p13', 'p14', 'p15', 'p16','p17', 'p18',
'p19', 'p20', 'p21', 'p22', 'p23', 'p24', 'p25', 'p26', 'p27', 'p28', 'p29', 'pa10', 'pb', 'pc', 'pd',
'pe', 'ph3.5', 'pl40', 'pm10', 'pn', 'pne', 'pnl', 'pw3']
data['id'] = np.where(tmp['id'].isin(red_round_labels), 1, 0)
labels = data['id'].values
full_data = data.drop('id', 1)
x_train, x_test, y_train, y_test = train_test_split(full_data, labels, test_size=0.3, random_state=42)
train_data = []
test_data = []
#data_dir = '../data/train'
data_dir = '../data/test'
#data_dir = '../data/other'
for i in range(x_train.shape[0]):
temp = str(x_train['img'].iloc[i])+'.jpg'
curr_im = os.path.join(data_dir,temp )
img = Image.open(curr_im)
cropped_rect = (x_train['x1'].iloc[i], x_train['y1'].iloc[i], x_train['x2'].iloc[i], x_train['y2'].iloc[i])
crop_im = img.crop(cropped_rect)
crop_im = crop_im.resize((48, 48), Image.ANTIALIAS)
crop_im = ImageOps.autocontrast(crop_im)
train_data.append(np.array(crop_im))
for i in range(x_test.shape[0]):
temp = str(x_test['img'].iloc[i]) + '.jpg'
curr_im = os.path.join(data_dir, temp )
img = Image.open(curr_im)
cropped_rect = (x_test['x1'].iloc[i], x_test['y1'].iloc[i], x_test['x2'].iloc[i], x_test['y2'].iloc[i])
crop_im = img.crop(cropped_rect)
crop_im = crop_im.resize((48, 48), Image.ANTIALIAS)
crop_im = ImageOps.autocontrast(crop_im)
test_data.append(np.array(crop_im))
train_data = np.array(train_data)
test_data = np.array(test_data)
num_train = train_data.shape[0]
num_test = test_data.shape[0]
num_classes = 2
train_label = np.zeros((num_train, num_classes), dtype=np.int8)
test_label = np.zeros((num_test, num_classes), dtype=np.int8)
for i in range(len(y_train)):
if y_train[i] == 1:
train_label[i][1] = 1
else:
train_label[i][0] = 1
for i in range(len(y_test)):
if y_test[i] == 1:
test_label[i][1] = 1
else:
test_label[i][0] = 1
count = 0
for i in range(len(y_train)):
if y_train[i] == 1:
count += 1
print("Number of red round signs in training data: ", count)
print("Number of negatives in training data: ", num_train - count)
np.save('train_data.npy', train_data)
np.save('test_data.npy', test_data)
np.save('train_labels.npy', train_label)
np.save('test_labels.npy', test_label)
| jiegenghua/Traffic-Sign-Detection | datapreprocessing.py | datapreprocessing.py | py | 3,102 | python | en | code | 1 | github-code | 13 |
7511257632 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 17:53:38 2015
@author: andy
"""
import gensim
import hansard_fetcher as fetcher
WORDVECS = 'temporary/GoogleNews-vectors-negative300.bin'
def load_wordvecs():
return gensim.models.Doc2Vec.load_word2vec_format(WORDVECS, binary=True)
def get_test_sentences():
speeches = fetcher.load_test_speeches()
text = fetcher.get_text_by_speaker(speeches)
results = []
for sid, sentences in text.iteritems():
for sentence in sentences:
words = gensim.utils.to_unicode(sentence).split()
labelled_sentence = gensim.models.doc2vec.LabeledSentence(words, [sid])
results.append(labelled_sentence)
return results | andyljones/hansard_analysis | word2vec_interface.py | word2vec_interface.py | py | 738 | python | en | code | 0 | github-code | 13 |
19702044425 | from homeassistant.components.switch import SwitchEntity, SwitchEntityDescription, SwitchDeviceClass
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import DeviceInfo
from .. import DOMAIN, SIGNAL_UPDATE_DATA, RedmondKettle, MODE_BOIL, STATUS_ON
class RedmondPowerSwitch(SwitchEntity):
def __init__(self, kettle: RedmondKettle):
self._kettle = kettle
self.entity_description = SwitchEntityDescription(
key="power_on",
name=kettle._name + " Turn power"
)
self._attr_is_on = False
self._attr_unique_id = f'{DOMAIN}[{kettle._mac}][switch][{self.entity_description.key}]'
self._attr_device_info = DeviceInfo(connections={("mac", kettle._mac)})
async def async_added_to_hass(self):
self.async_on_remove(async_dispatcher_connect(self._kettle.hass, SIGNAL_UPDATE_DATA, self.update))
def update(self):
self._attr_is_on = False
if self._kettle._status == STATUS_ON and self._kettle._mode == MODE_BOIL:
self._attr_is_on = True
self.schedule_update_ha_state()
@property
def should_poll(self):
return False
@property
def available(self):
return self._kettle._available
async def async_turn_on(self, **kwargs):
await self._kettle.modeOn()
async def async_turn_off(self, **kwargs):
await self._kettle.modeOff()
| Nemiroff/hassio-r4s | custom_components/ready4sky/switches/power_switch.py | power_switch.py | py | 1,447 | python | en | code | null | github-code | 13 |
28462325130 | import random
def get_random_int(min, max):
result = random.randint(min, max)
return result
def game(my_random, min, max, attempts):
if attempts == 0:
print("Вы проиграли! А число было: %d" % my_random)
return
user_in = input("Угадай число от %s до %s. Осталось %s попыток: " % (min, max, attempts))
try:
user_num = int(user_in)
except ValueError:
print("Только целое число вводи!")
game(my_random, min, max, attempts)
else:
if my_random > user_num:
print("Больше ;)")
game(my_random, min, max, attempts - 1)
elif my_random < user_num:
print("Меньше :)")
game(my_random, min, max, attempts - 1)
else:
print("Правильно! Вы угадали за %d попыток." % (7 - attempts + 1))
num = get_random_int(0, 20)
game(num, 0, 20, 7) | Sadburritos/python_homework | June06/04.py | 04.py | py | 1,010 | python | ru | code | 0 | github-code | 13 |
33481709115 | import unittest
import datetime
import io
import os
import shutil
import replicate_polymer.replicate_polymer as replicate_polymer
from replicate_polymer_lib.check_connect_pdb import check_conect_pdb
class TestCheckConnectPdb(unittest.TestCase):
# ===============================================================
@classmethod
def setUpClass(cls):
cls.filelog = "./test/test01_check_connect_pdb.log"
cls.log = replicate_polymer.init_logger("Output", fileoutput=cls.filelog, append=False, inscreen=False)
m = "\n\t***************** START Check_connect_pdb TEST *****************"
print(m) if cls.log is None else cls.log.info(m)
now = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
cls.log.info("\t\tStarting: \t {}\n".format(now))
# ===============================================================
def test_01_pdb_connect(self):
m = "\tTest_01: CONECT section exists in the PDB file."
print(m) if self.log is None else self.log.info(m)
m2 = "\t"+len(m)*"="+"\n"
print(m2) if self.log is None else self.log.info(m2)
datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
# CONECT exists in the pdb file
fnamepdb = "./data/103191_noctane.order_cryst.pdb"
filenamepdb = check_conect_pdb(fnamepdb)
self.assertListEqual(
list(io.open(fnamepdb)),
list(io.open(filenamepdb)))
m = "\tBoth files are the same\n"
m += "\tPASSED\n"
print(m) if self.log is None else self.log.info(m)
# ===============================================================
def test_02_pdb_noconnect(self):
m = "\tTest_02: CONECT section does not exist in the PDB file."
print(m) if self.log is None else self.log.info(m)
m2 = "\t"+len(m)*"="+"\n"
print(m2) if self.log is None else self.log.info(m2)
datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
# CONECT does not exist in the pdb file
fnamepdb = "./data/103191_noctane.order_cryst_noconect.pdb"
filenamepdb = check_conect_pdb(fnamepdb)
# Check if file already exists and move the file
fullpath = os.path.abspath(filenamepdb)
destdir = os.path.join(os.path.dirname(fullpath), "test")
filename = os.path.basename(fullpath)
destfullpath = os.path.join(destdir,filename)
if os.path.isfile(destfullpath):
os.remove(destfullpath)
with open(filenamepdb, 'r') as f:
lines = f.readlines()
ncon = sum('CONECT' in s for s in lines)
shutil.move(fullpath, destdir)
self.assertGreater(ncon, 0)
m = "\tCONECT entries have been created.\n"
m += "\tPASSED\n"
print(m) if self.log is None else self.log.info(m)
# ===============================================================
def test_03_pdb_noconnect_noresname(self):
m = "\tTest_03: CONECT section does not exist in the PDB file.\n"
m += "\t RESNAME in PDB are not defined. This raises an error\n"
m += "\t in parmed package used by check_conect_pdb"
print(m) if self.log is None else self.log.info(m)
m2 = "\t"+len(m)*"="+"\n"
print(m2) if self.log is None else self.log.info(m2)
datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
# CONECT does not exist in the pdb file
fnamepdb = "./data/103191_noctane.order_cryst_noconect_noresname.pdb"
filenamepdb = check_conect_pdb(fnamepdb)
self.assertIsNone(filenamepdb)
m = "\tBad format for PDB. No residues name in PDB file\n"
m += "\tNone object is capture!!!\n"
m += "\tPASSED\n"
print(m) if self.log is None else self.log.info(m)
# ===============================================================
@classmethod
def tearDownClass(cls):
m = "\n\t***************** END Check_connect_pdb TEST *****************"
print(m) if cls.log is None else cls.log.info(m)
now = datetime.datetime.now().strftime("%d-%m-%Y %H:%M:%S")
cls.log.info("\t\tFinishing: \t {}\n".format(now)) | jrdcasa/replicate_polymer_topology | tests/01-test_check_connect_pdb.py | 01-test_check_connect_pdb.py | py | 4,181 | python | en | code | 0 | github-code | 13 |
8865933561 | # -*- encoding: utf-8 -*-
from setuptools import setup, find_packages
from codecs import open
import os
basedir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(basedir, "README.md"), encoding="utf-8") as readmefile:
long_description = readmefile.read()
setup(
name="cosmopvcod",
version="0.0.1",
description="extract cloud properties from cosmo file for use in pv model inversion",
long_description=long_description,
long_description_content_type="text/markdown",
url="",
author="James Barry",
author_email="",
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
],
keywords="",
packages=find_packages(),
install_requires=["xarray"], #"pyresample",
extra_require={
"dev": ["check-manifest"],
"test": [],
},
data_files=[],
entry_points={
"console_scripts": [
"cosmo2pvcod=cosmopvcod.cosmo_cloud_props:main",
],
},
project_urls={
},
)
| jamesmhbarry/PVRAD | cosmopvcod/setup.py | setup.py | py | 1,039 | python | en | code | 1 | github-code | 13 |
14218506129 | import requests
import streamlit as st
from PIL import Image
from io import BytesIO
import datetime
API_BASE_URL = "https://api.ebird.org/v2"
st.title("Recent Bird Sightings in New Hanover County")
def get_recent_sightings():
today = datetime.date.today()
last_month = today - datetime.timedelta(days=30)
url = f"{API_BASE_URL}/data/obs/US-NC-129/recent"
params = {
"back": 30,
"key": st.secrets["EBIRD_API_KEY"],
}
response = requests.get(url, params=params)
data = response.json()
return data
sightings = get_recent_sightings()
# Pagination
items_per_page = 10
page_number = st.number_input(
label="Page Number", min_value=1, value=1, step=1, format="%i"
)
start_index = (page_number - 1) * items_per_page
end_index = start_index + items_per_page
sightings_to_display = sightings[start_index:end_index]
for sighting in sightings_to_display:
bird_name = sighting.get("comName", "Unknown")
rarity_score = sighting.get("howMany", "Unknown")
species_code = sighting.get("speciesCode", "Unknown")
observation_date = sighting.get("obsDt", "Unknown")
# Display bird info
col1, col2 = st.columns(2)
with col1:
# st.image(bird_img, width=200) # Uncomment when you have an image API
pass
with col2:
st.header(bird_name)
st.subheader(f'Rarity Score: {rarity_score}')
st.write(f'Species Code: {species_code}')
st.write(f'Observation Date: {observation_date}')
# Add a small blurb about the bird
st.write("Bird description or blurb goes here.")
# Trending status could be calculated based on historical data or other factors
st.write("Trending: Upward/Downward")
| weshuth/nhcbirds | main.py | main.py | py | 1,724 | python | en | code | 0 | github-code | 13 |
37996828378 | include.block( "SMEWTrilepSkim/SMTRILEP_PhotonSelector.py" )
from D3PDMakerConfig.D3PDMakerFlags import D3PDMakerFlags
from D2PDMaker.D2PDMakerConf import D2PDPhotonSelector
from AthenaCommon.AlgSequence import AlgSequence
preseq = AlgSequence (D3PDMakerFlags.PreD3PDAlgSeqName())
preseq += D2PDPhotonSelector( "SMTRILEP_HighPtPhotonFilter",
inputCollection = 'PhotonAODCollection',
outputCollection = 'HighPtPhotons',
photonVetoAuthorList = [ 128 ],
photonID = egammaPID.PhotonIDLoose,
etMin = 10.0*Units.GeV )
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/D3PDMaker/PhysicsD3PDMaker/share/SMTRILEP_PhotonSelector.py | SMTRILEP_PhotonSelector.py | py | 722 | python | en | code | 1 | github-code | 13 |
2056803620 | import datetime
from scrapy.http.response.html import HtmlResponse
from climatedb import parse
from climatedb.crawl import create_article_name, find_start_url
from climatedb.models import ArticleItem
from climatedb.spiders.base import BaseSpider
class ChinaDailySpider(BaseSpider):
name = "china_daily"
def parse(self, response: HtmlResponse) -> ArticleItem:
"""
@url https://www.chinadaily.com.cn/a/202301/19/WS63c8a4a8a31057c47ebaa8e4.html
@returns items 1
@scrapes headline date_published body article_name article_url
"""
# article body
body = response.xpath('//div[@id="Content"]/p[not(@class="email")]/text()')
body = " ".join(body.getall())
body = parse.clean_body(body)
return ArticleItem(
body=body,
html=response.text,
headline=response.xpath('//meta[@property="og:title"]/@content').get(),
date_published=datetime.date.fromisoformat(
response.xpath('//meta[@name="publishdate"]/@content').get()
),
article_url=response.url,
article_name=create_article_name(response.url),
article_start_url=find_start_url(response),
)
| ADGEfficiency/climate-news-db | climatedb/spiders/china_daily.py | china_daily.py | py | 1,244 | python | en | code | 12 | github-code | 13 |
3013405280 | from rest_framework import status
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from base.utils import get_today
from base.messages import Messages
from django. db. models import Sum
from ..models import DailyMenu, MainMenu, Feedback
from .serializers import (
DailyMenuCreateSerializer,
FeedbackSerializer,
MainMenuCreateSerializer
)
class MainMenuCreateView(APIView):
serializer_class = MainMenuCreateSerializer
def post(self, request):
try:
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(Messages.MAIN_MENU,
status=status.HTTP_201_CREATED)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response(str(e), status=status.\
HTTP_500_INTERNAL_SERVER_ERROR)
class MainMenuView(APIView):
serializer_class = MainMenuCreateSerializer
permission_classes = (AllowAny,)
def get(self, request):
try:
restraunt = request.query_params.get('restraunt')
querset = MainMenu.objects.filter(restraunt=restraunt)
serializer = self.serializer_class(querset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.\
HTTP_500_INTERNAL_SERVER_ERROR)
class DailyMenuCreateView(APIView):
serializer_class = DailyMenuCreateSerializer
def post(self, request):
try:
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(Messages.DAILY_MANU,
status=status.HTTP_201_CREATED)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
except Exception as e:
return Response(str(e), status=status.\
HTTP_500_INTERNAL_SERVER_ERROR)
class DailyMenuView(APIView):
serializer_class = DailyMenuCreateSerializer
permission_classes = (AllowAny,)
def get(self, request):
try:
restraunt = request.query_params.get('restraunt')
querset = DailyMenu.objects.filter(restraunt=restraunt,
days__date=get_today())
serializer = self.serializer_class(querset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
except Exception as e:
return Response(str(e), status=status.\
HTTP_500_INTERNAL_SERVER_ERROR)
class FeedbackView(APIView):
serializer_class = FeedbackSerializer
permission_classes = (AllowAny,)
def post(self, request):
try:
queryset = Feedback.objects.filter(
restraunt=request.\
data['restraunt'],
created_at__date=get_today(),
email=request.data['email']).exists()
if not queryset:
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(Messages.FEEDBACK,
status=status.HTTP_201_CREATED)
return Response(serializer.errors,
status=status.HTTP_400_BAD_REQUEST)
else:
return Response(Messages.FEEDBACK_NOT_SUBMIT,
status=status.HTTP_406_NOT_ACCEPTABLE)
except Exception as e:
return Response(str(e), status=status.\
HTTP_500_INTERNAL_SERVER_ERROR)
class FeedbackListView(APIView):
serializer_class = FeedbackSerializer
def get(self, request):
restraunt = request.query_params.get('restraunt')
queryset = Feedback.objects.filter(created_at__date=get_today(),
restraunt=restraunt)
serializer = self.serializer_class(queryset, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
class TopMenuListView(APIView):
serializer_class = FeedbackSerializer
def get(self, request):
qr = Feedback.objects.values('restraunt').annotate(vote_count=
Sum('vote')).order_by('-vote_count')[:3]
feedback_data = [i['restraunt'] for i in qr]
serializer = self.serializer_class(feedback_data, many=True)
return Response(serializer.data, status=status.HTTP_200_OK)
| shreeramy/RestaurantAPITask | restrant/api/api_views.py | api_views.py | py | 4,890 | python | en | code | 0 | github-code | 13 |
39389390276 | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 1 21:16:00 2020
@author: Emerl2
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import tensorflow as tf
train_path = r'C:\Users\Emerl2\shopee-product-detection-student\train\train\train\\'
test_path = r'C:\Users\Emerl2\shopee-product-detection-student\test\test\test\\'
broken_fnames = []
for label in os.listdir(train_path):
label_path = train_path + label
for filename in os.listdir(label_path):
if len(filename) > 36:
print(label_path + filename)
broken_fnames.append(label_path + filename)
print()
for filename in os.listdir(test_path):
if len(filename) > 36:
print(test_path + filename)
broken_fnames.append(test_path + filename)
f = open('broken-file-names.txt', 'w')
f.write('\n'.join(broken_fnames))
f.close() | Emerler/Product-Detection | find-broken-file.py | find-broken-file.py | py | 889 | python | en | code | 0 | github-code | 13 |
16455537139 | import discord
from discord.ext import commands
from discord.ext.commands import Greedy
from typing import Union
TOKEN = "token-here"
bot = commands.Bot(command_prefix='pls',
help_command=None,
activity=discord.Game(name="`pls yoink <emotes>` or `delete <emotes>`"),
strip_after_prefix=True)
@bot.event
async def on_ready():
print('Ready!')
@commands.guild_only()
@bot.command(aliases=['add'])
async def yoink(ctx, emotes: Greedy[Union[discord.Emoji, discord.PartialEmoji]]):
if not emotes:
return await ctx.send('You didn\'t specify any emotes >:(')
in_server, added = [], []
for emote in emotes:
if isinstance(emote, discord.Emoji) and emote.guild == ctx.guild:
in_server.append(emote)
else:
added.append(await ctx.guild.create_custom_emoji(
name=emote.name,
image=await emote.url.read(),
reason=f'Added by {ctx.author} ({ctx.author.id})'))
if not added:
return await ctx.send(f'Specified emote{"s" if len(emotes) != 1 else ""} are already in this server >:(')
if in_server:
return await ctx.send(f'{" ".join(map(str, added))} have been added to this server, while '
f'{" ".join(map(str, in_server))} wasn\'t because they are already added!')
await ctx.send(f'{" ".join(map(str, added))} has been added to this server!')
@commands.is_owner()
@commands.guild_only()
@bot.command(aliases=['remove', 'del'])
async def delete(ctx, emotes: Greedy[discord.Emoji]):
if not emotes:
return await ctx.send('You didn\'t specify any emotes >:(')
for emote in emotes:
await emote.delete()
await ctx.send(f'{len(emotes)} successfully deleted!')
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send('That\'s not a command >:(')
elif isinstance(error, commands.NoPrivateMessage):
await ctx.send('You can\'t use that command in DM\'s >:(')
bot.run(TOKEN)
| DoggieLicc/Emote-Yoinker | emoteyoink.py | emoteyoink.py | py | 2,156 | python | en | code | 0 | github-code | 13 |
37785399865 | """Data Structures for Disjoint Sets, Reference - CLRS Page 565"""
# ------------------------- Visual Representation for Linked List implementation on Page 565 CLRS --------------------------
set_dict = {} # Make it global to make the program simpler. Disjoint sets are stored here with key = representative of a set(i.e first element of linked list)
class DisjointSet:
class Node:
def __init__(self,info,FORWARD_LINK = None, BACKWARD_LINK = None):
self.info = info
self.FORWARD_LINK = FORWARD_LINK
self.BACKWARD_LINK = BACKWARD_LINK
class SetObject:
def __init__(self,set_object_name = None,head = None,tail = None):
self.set_object_name = set_object_name
self.head = None
self.tail = None
def __hash__(self):
return hash(self.set_object_name)
def Make_Set(self,item,setname): # Creates a new linked list, whose only object is 'item'
setObject = self.SetObject(setname)
newNode= self.Node(item)
newNode.BACKWARD_LINK = setObject
setObject.head = newNode
setObject.tail = newNode
set_dict[item] = setObject
return setObject
def Find_Set(self,item):
if set_dict.get(item) is not None:
return set_dict[item].head
else:
for value in set_dict.values():
node = value.head
while node is not None:
if node.info == item:
return node.BACKWARD_LINK
node = node.FORWARD_LINK
raise ValueError('Cannot find this item in any Disjoint Set')
# Representative of Set
def Union(self,x,y): # Implementation as per page 565 CLRS
x.tail.FORWARD_LINK = y.head
x.tail = y.tail
node = y.head
while node is not None:
node.BACKWARD_LINK = x
node = node.FORWARD_LINK
value = x.head.info
set_dict[value] = x # Create new disjoint set in set_dict
del set_dict[y.head.info] # Remove old set 'y' as y has been merged with 'x' now
y.head = None # head and tail of 'y' are now None
y.tail = None
return x # Return new Disjoint set x
d = DisjointSet()
s1 = d.Make_Set(25,'S1')
s2 = d.Make_Set(8,'S2')
s3 = d.Make_Set(225,'S3')
s4 = d.Make_Set(78,'S4')
x = d.Union(s1,s2)
y = d.Union(s3,s4)
d.Union(x,y)
a = d.Find_Set(8)
b = d.Find_Set(78)
print(a is b)
| anantvir/Graph_DataStructures | Disjoint_Sets_Union_Find.py | Disjoint_Sets_Union_Find.py | py | 2,630 | python | en | code | 0 | github-code | 13 |
35113917718 | import numpy as np
import csv
import cv2
import os
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Flatten, Dense, Lambda, Dropout
from keras.layers.convolutional import Convolution2D, Cropping2D
# data conditioning
samples = []
with open('./learn/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
#split train and validation samples from input
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
# generator to reduce memory usage
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
for batch_sample in batch_samples:
# import pictures (central picture for ideal course and left/right pictures for recentering)
source_path_center = batch_sample[0]
source_path_left = batch_sample[1]
source_path_right = batch_sample[2]
path, filename_center = os.path.split(source_path_center)
path, filename_left = os.path.split(source_path_left)
path, filename_right = os.path.split(source_path_right)
current_path_center = './learn/IMG/' + filename_center
current_path_left = './learn/IMG/' + filename_left
current_path_right = './learn/IMG/' + filename_right
image_center = mpimg.imread(current_path_center)
images.append(image_center)
image_left = mpimg.imread(current_path_left)
images.append(image_left)
image_right = mpimg.imread(current_path_right)
images.append(image_right)
# import steering wheel angle (center for ideal course / correction represents maneuver to steer back to center)
correction = 0.2 # this is a parameter to tune
measurement = float(batch_sample[3])
measurements.append(measurement)
measurements.append(measurement + correction)
measurements.append(measurement - correction)
augmented_images, augmented_measurements = [], []
# pictures are flipped and steer angles mirrored to enlarge training set and adapt to right curves
for image, measurement in zip(images, measurements):
augmented_images.append(image)
augmented_measurements.append(measurement)
augmented_images.append(cv2.flip(image,1))
augmented_measurements.append(measurement*-1.0)
yield shuffle(np.array(augmented_images), np.array(augmented_measurements))
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
# model definition
# implementation of NVIDIA model
model = Sequential()
model.add(Lambda(lambda x: x / 255 - 0.5, input_shape=(160, 320, 3))) # normalization
model.add(Cropping2D(cropping=((70,25),(0,0)))) #cropping: only use relevant picture data
model.add(Convolution2D(24, 5, 5 ,subsample=(2,2), activation="relu")) #convolution: depth: 24; kernel: 5x5; stride: 2
model.add(Convolution2D(36, 5, 5 ,subsample=(2,2), activation="relu")) #convolution: depth: 36; kernel: 5x5; stride: 2
model.add(Convolution2D(48, 5, 5 ,subsample=(2,2), activation="relu")) #convolution: depth: 48; kernel: 5x5; stride: 2
model.add(Convolution2D(64, 3, 3, activation="relu")) #convolution: depth: 64; kernel: 3x3; stride: 1
model.add(Convolution2D(64, 3, 3, activation="relu")) #convolution: depth: 64; kernel: 3x3; stride: 1
model.add(Dropout(0.5)) #dropout to avoid over-fitting
model.add(Flatten())
model.add(Dense(100)) #fully connected: output: 100
model.add(Dense(50)) #fully connected: output: 50
model.add(Dense(10)) #fully connected: output: 10 (output of NVIDIA model)
model.add(Dense(1)) #fully connected: output: 1 (for only controlling steering wheel angle)
model.compile(optimizer='adam', loss='mse') # use adam optimizer and MSE loss function
# training with history output and generators / sample length increased due to augmentation and correction
history_object = model.fit_generator(train_generator, samples_per_epoch= len(train_samples)*6, validation_data=validation_generator, nb_val_samples=len(validation_samples)*6, nb_epoch=3, verbose = 1)
### print the keys contained in the history object
print(history_object.history.keys())
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
#save model
model.save('model.h5')
| DirkH78/CarND-Behavioral-Cloning-P3 | model.py | model.py | py | 5,256 | python | en | code | 0 | github-code | 13 |
70858175377 | """
Author: Daniel Krusch
Purpose: To convert product type data to json
Methods: GET, POST
"""
"""View module for handling requests about product categories"""
from django.contrib.auth.models import User
from django.http import HttpResponseServerError
from rest_framework.viewsets import ViewSet
from rest_framework.response import Response
from rest_framework import serializers
from rest_framework import status
from swipehomeapi.models import Search, AppUser, UserType
from rest_framework.permissions import IsAuthenticatedOrReadOnly
class UserSerializer(serializers.ModelSerializer):
"""JSON serializer for app_user's related Django user"""
class Meta:
model = User
fields = ('first_name', 'last_name', 'username')
class AppUserSerializer(serializers.ModelSerializer):
"""JSON serializer for app_users"""
user = UserSerializer(many=False)
class Meta:
model = AppUser
fields = ('user',)
class SearchSerializer(serializers.ModelSerializer):
"""JSON serializer for product type"""
app_user = AppUserSerializer(many=False)
class Meta:
model = Search
# url = serializers.HyperlinkedIdentityField(
# view_name='search',
# lookup_field='id'
# )
fields = ('id', 'city', 'state_code', 'postal_code', 'app_user', 'userType')
class Searches(ViewSet):
"""Categories for products"""
# permission_classes = (IsAuthenticatedOrReadOnly,)
def create(self, request):
"""Handle POST operations
Returns:
Response -- JSON serialized product type instance
"""
new_search = Search()
new_search.city = request.data["city"]
new_search.state_code = request.data["state_code"]
new_search.postal_code = request.data["postal_code"]
new_search.app_user = AppUser.objects.get(user=request.data["app_user"])
new_search.userType = UserType.objects.get(pk=request.data["userType"])
new_search.save()
serializer = SearchSerializer(new_search, context={'request': request})
return Response(serializer.data, status=status.HTTP_201_CREATED)
def update(self, request, pk=None):
"""
@api {PUT} /products/:id PUT changes to product
@apiName UpdateProduct
@apiGroup Product
@apiHeader {String} Authorization Auth token
@apiHeaderExample {String} Authorization
Token 9ba45f09651c5b0c404f37a2d2572c026c146611
@apiParam {id} id Product Id to update
@apiSuccessExample {json} Success
HTTP/1.1 204 No Content
"""
search = Search.objects.get(pk=pk)
search.city = request.data["city"]
search.state_code = request.data["state_code"]
search.postal_code = request.data["postal_code"]
search.app_user = AppUser.objects.get(app_user=request.data["app_user"])
search.userType = request.data["userType"]
search.save()
return Response({}, status=status.HTTP_204_NO_CONTENT)
def retrieve(self, request, pk=None):
"""Handle GET requests for single search"""
try:
search = Search.objects.get(pk=pk)
serializer = SearchSerializer(search, context={'request': request})
return Response(serializer.data)
except Exception as ex:
return HttpResponseServerError(ex)
def list(self, request):
"""Handle GET requests to Topping resource"""
app_user = AppUser.objects.get(user=request.auth.user.id)
search = Search.objects.filter(app_user=app_user)
# Support filtering Toppings by area id
# name = self.request.query_params.get('name', None)
# if name is not None:
# ProductCategories = ProductCategories.filter(name=name)
serializer = SearchSerializer(
search, many=True, context={'request': request})
return Response(serializer.data)
def destroy(self, request, pk=None):
"""
@api {DELETE} /products/:id DELETE product
@apiName DeleteProduct
@apiGroup Product
@apiHeader {String} Authorization Auth token
@apiHeaderExample {String} Authorization
Token 9ba45f09651c5b0c404f37a2d2572c026c146611
@apiParam {id} id Product Id to delete
@apiSuccessExample {json} Success
HTTP/1.1 204 No Content
"""
try:
search = Search.objects.get(pk=pk)
search.delete()
return Response({}, status=status.HTTP_204_NO_CONTENT)
except Search.DoesNotExist as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_404_NOT_FOUND)
except Exception as ex:
return Response({'message': ex.args[0]}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
| thejmdw/swipehome-server | swipehomeapi/views/search.py | search.py | py | 4,882 | python | en | code | 0 | github-code | 13 |
71924192339 | import tensorflow as tf
class Resnet:
"""
Builds a model up to the final convolutional layer
34 layer resnet
number_of_sections = 5
out_width & out_height == image_size*((0.5)**number_of_sections)
returns a conv layer of shape (batch_size, out_width, out_height, 512)
the 34 layer resnet archetecture can be defined in the paper here: https://arxiv.org/pdf/1512.03385.pdf
"""
def __init__(self, tf, image_height, image_width, image_channels, number_of_sections=5):
self.tf = tf
self.IMAGE_HEIGHT = image_height
self.IMAGE_WIDTH = image_width
self.IMAGE_CHANNELS = image_channels
self.number_of_sections = number_of_sections
self.batch_norm_momentum = 0.997
self.batch_norm_eps = 1e-5
self.model = self.create_model()
def get_model(self):
return self.model
def create_residual_block(self, inputs, filter_size, num_filters, projection_shortcut=False):
"""
A projection_shortcut occurs when the input and output dimensions are changing
Order of operations if shortcut block: batch_norm(inputs) --> relu() --> shortcut() --> batch_norm() --> relu() --> conv() --> batch_norm() --> relu() --> conv()
Order of operations if regular block: batch_norm(inputs) --> relu() --> conv() --> batch_norm() --> relu() --> conv()
"""
if projection_shortcut is False:
shortcut = inputs
else:
""" Reduce dimensions for shortcut addition """
shortcut = self.tf.keras.layers.SeparableConv2D(filters=num_filters, kernel_size=[1, 1], strides=[2, 2], padding="valid", data_format="channels_last")(inputs)
shortcut = self.tf.keras.layers.BatchNormalization(axis=-1, momentum=self.batch_norm_momentum, epsilon=self.batch_norm_eps)(shortcut)
conv1 = self.tf.keras.layers.SeparableConv2D(filters=num_filters, kernel_size=[filter_size, filter_size], strides=[1, 1], padding="same", data_format="channels_last")(shortcut)
norm_conv1 = self.tf.keras.layers.BatchNormalization(axis=-1, momentum=self.batch_norm_momentum, epsilon=self.batch_norm_eps)(conv1)
relu_conv1 = self.tf.keras.layers.Activation("relu")(norm_conv1)
conv2 = self.tf.keras.layers.SeparableConv2D(filters=num_filters, kernel_size=[filter_size, filter_size], strides=[1, 1], padding="same", data_format="channels_last")(relu_conv1)
norm_conv2 = self.tf.keras.layers.BatchNormalization(axis=-1, momentum=self.batch_norm_momentum, epsilon=self.batch_norm_eps)(conv2)
shortcut_connected_conv2 = self.tf.keras.layers.Add()([norm_conv2, shortcut])
relu_conv2 = self.tf.keras.layers.Activation("relu")(shortcut_connected_conv2)
return relu_conv2
def create_model(self):
orig_inputs = self.tf.keras.Input(shape=(self.IMAGE_HEIGHT, self.IMAGE_WIDTH, self.IMAGE_CHANNELS))
#===============================================================
#Conv1
#===============================================================
""" First reduce dimensions """
inputs = self.tf.keras.layers.SeparableConv2D(filters=64, kernel_size=[1, 1], strides=[2, 2], padding="valid", data_format="channels_last")(orig_inputs)
inputs = self.tf.keras.layers.BatchNormalization(axis=-1, momentum=self.batch_norm_momentum, epsilon=self.batch_norm_eps)(inputs)
inputs = self.tf.keras.layers.Activation("relu")(inputs)
""" Next perform first actual conv """
conv1 = self.tf.keras.layers.SeparableConv2D(filters=64, kernel_size=[7, 7], strides=[1, 1], padding="same", data_format="channels_last")(inputs)
norm_conv1 = self.tf.keras.layers.BatchNormalization(axis=-1, momentum=self.batch_norm_momentum, epsilon=self.batch_norm_eps)(conv1)
relu_conv1 = relu_conv2 = self.tf.keras.layers.Activation("relu")(norm_conv1)
conv1_pool = self.tf.keras.layers.MaxPool2D(pool_size=[2, 2], strides=[2, 2], padding="valid", data_format="channels_last")(relu_conv1)
#===============================================================
#Conv2
#===============================================================
conv2_1 = self.create_residual_block(inputs=conv1_pool, filter_size=3, num_filters=64)
conv2_2 = self.create_residual_block(inputs=conv2_1, filter_size=3, num_filters=64)
conv2_3 = self.create_residual_block(inputs=conv2_2, filter_size=3, num_filters=64)
if (self.number_of_sections == 2):
model = self.tf.keras.Model(inputs=orig_inputs, outputs=conv2_3)
return model
#===============================================================
#Conv3
#===============================================================
conv3_1 = self.create_residual_block(inputs=conv2_3, filter_size=3, num_filters=128, projection_shortcut=True)
conv3_2 = self.create_residual_block(inputs=conv3_1, filter_size=3, num_filters=128)
conv3_3 = self.create_residual_block(inputs=conv3_2, filter_size=3, num_filters=128)
conv3_4 = self.create_residual_block(inputs=conv3_3, filter_size=3, num_filters=128)
if (self.number_of_sections == 3):
model = self.tf.keras.Model(inputs=orig_inputs, outputs=conv3_4)
return model
#===============================================================
#Conv4
#===============================================================
conv4_1 = self.create_residual_block(inputs=conv3_4, filter_size=3, num_filters=256, projection_shortcut=True)
conv4_2 = self.create_residual_block(inputs=conv4_1, filter_size=3, num_filters=256)
conv4_3 = self.create_residual_block(inputs=conv4_2, filter_size=3, num_filters=256)
conv4_4 = self.create_residual_block(inputs=conv4_3, filter_size=3, num_filters=256)
conv4_5 = self.create_residual_block(inputs=conv4_4, filter_size=3, num_filters=256)
conv4_6 = self.create_residual_block(inputs=conv4_5, filter_size=3, num_filters=256)
if (self.number_of_sections == 4):
model = self.tf.keras.Model(inputs=orig_inputs, outputs=conv4_6)
return model
#===============================================================
#Conv5
#===============================================================
conv5_1 = self.create_residual_block(inputs=conv4_6, filter_size=3, num_filters=512, projection_shortcut=True)
conv5_2 = self.create_residual_block(inputs=conv5_1, filter_size=3, num_filters=512)
conv5_3 = self.create_residual_block(inputs=conv5_2, filter_size=3, num_filters=512)
if (self.number_of_sections == 5):
model = self.tf.keras.Model(inputs=orig_inputs, outputs=conv5_3)
return model
else:
return None
| colinsteidtmann/object-detection | models/resnet_model.py | resnet_model.py | py | 6,982 | python | en | code | 0 | github-code | 13 |
71945808978 | '''
Ordered dictionaries: they remember the insertion order. So when we iterate over them,
they return values in the order they were inserted.
For normal dictionary, when we test to see whether two dictionaries are equal,
this equality os only based on their K and V.
For ordered dictionary, when we test to see whether two dictionaries are equal,
insertion order is considered as an equality test between two OrderedDicts with
same key and values but different insertion order.
'''
od1 = OrderedDict()
od1['one'] = 1
od1['two'] = 2
od2 = OrderedDict()
od2['two'] = 2
print(od1 == od2) #Output: False
'''
OrderedDict is often used in conjunction with sorted method to create a sorted dictionary.
For example,
'''
od3 = OrderedDict(sorted(od1.items(), key = lambda t : (4*t[1]) - t[1]**2))
od3.values() #Output: odict_values([6, 5, 4, 1, 3, 2])
| AniketKul/learning-python3 | ordereddictionaries.py | ordereddictionaries.py | py | 849 | python | en | code | 0 | github-code | 13 |
26785344165 | from flask import Flask
from flask import request
from flask import jsonify
import os
import tempfile
from speech_to_text import speech_to_text_translated, speech_to_text
from summary_with_openai import summary_with_davinci
from text_to_italian import translate
from text_to_summary import text_to_summary
from video_to_speech import video_to_speech
app = Flask(__name__)
@app.route("/")
def hello_world():
return "<p>Hello, World!</p>"
@app.route("/speech_to_text_translated", methods=["POST"])
def speech_to_text_translated_api():
file = request.files['file']
if not file:
return "bad request"
with tempfile.TemporaryDirectory() as tmpdirname:
print('created temporary directory', tmpdirname)
filename = file.filename
input_file_path = os.path.join(tmpdirname, filename)
output_file_path = os.path.join(tmpdirname, 'audio.mp3')
file.save(input_file_path)
video_to_speech(input_file_path, output_file_path)
res = speech_to_text_translated(output_file_path)
return jsonify(res)
@app.route("/text_to_summary", methods=["POST"])
def text_to_summary_api():
input_ = request.get_json()['input']
res = text_to_summary(input_)
return jsonify(res)
@app.route("/translate", methods=["POST"])
def translate_api():
input_ = request.get_json()['input']
res = translate(input_)
return jsonify(res)
@app.route("/summarize", methods=["POST"])
def summarize_api():
file = request.files['file']
if not file:
return "bad request"
with tempfile.TemporaryDirectory() as tmpdirname:
print('created temporary directory', tmpdirname)
filename = file.filename
input_file_path = os.path.join(tmpdirname, filename)
output_file_path = os.path.join(tmpdirname, 'audio.mp3')
file.save(input_file_path)
video_to_speech(input_file_path, output_file_path)
[_, transcription] = speech_to_text_translated(output_file_path)
[_, summary] = text_to_summary(transcription)
[_, res] = translate(summary)
return jsonify(res)
@app.route("/summarize_with_openai", methods=["POST"])
def summarize_with_openai_api():
file = request.files['file']
if not file:
return "bad request"
with tempfile.TemporaryDirectory() as tmpdirname:
print('created temporary directory', tmpdirname)
filename = file.filename
input_file_path = os.path.join(tmpdirname, filename)
output_file_path = os.path.join(tmpdirname, 'audio.mp3')
file.save(input_file_path)
video_to_speech(input_file_path, output_file_path)
[_, transcription] = speech_to_text(output_file_path)
print("Audio transcription:", transcription)
[_, summary] = summary_with_davinci(transcription)
return jsonify(summary)
if __name__ == "__main__":
app.run(debug=True)
| AndreaCaglio97/video-summarization | app.py | app.py | py | 2,902 | python | en | code | 0 | github-code | 13 |
40131128370 | class Solution:
def defangIPaddr(self, address: str) -> str:
address = list(address)
for i in range(len(address)):
if address[i] == '.':
address[i] = '[.]'
answer = ''
for c in address:
answer += c
return answer
| dlwlstks96/codingtest | LeetCode/1108_Defanging an IP Address.py | 1108_Defanging an IP Address.py | py | 326 | python | en | code | 2 | github-code | 13 |
26603260750 | # 可视化神经网络的过滤器
# 想要观察卷积神经网络学到的过滤器
# 显示每个过滤器所响应的视觉模式
from keras.applications import VGG16
from keras import backend as K
import matplotlib.pyplot as plt
import numpy as np
# import tensorflow as tf
#
# tf.compat.v1.disable_eager_execution()
model = VGG16(weights='imagenet',
include_top=False)
model.summary()
layer_name = 'block3_conv1'
filter_index = 0
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
# 获取损失相对于输入的梯度
grads = K.gradients(loss, model.input)[0]
# 梯度标准化技巧
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
# 给定numpy输入值,得到numpy输出值
iterate = K.function([model.input], [loss, grads])
# 通过随机梯度下降让损失最大化
input_img_data = np.random.random((1, 150, 150, 3)) * 20 + 128.
step = 1.
for i in range(40):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
# 将张量转换为有效图像的实用函数
def deprocess_image(x):
x -= x.mean()
x /= (x.std() + 1e-5)
x *= 0.1
x += 0.5
x = np.clip(x, 0, 1)
# x *= 255
# x = np.clip(x, 0, 255)
# x/=255.
return x
# 生成过滤器可视化的函数
# 构建一个损失函数,将该层第 n 个过滤器的激活最大化
def generate_pattern(layer_name, filter_index, size=150):
layer_output = model.get_layer(layer_name).output
loss = K.mean(layer_output[:, :, :, filter_index])
grads = K.gradients(loss, model.input)[0]
grads /= (K.sqrt(K.mean(K.square(grads))) + 1e-5)
iterate = K.function([model.input], [loss, grads])
input_img_data = np.random.random((1, size, size, 3)) * 20 + 128.
step = 1.
for i in range(40):
loss_value, grads_value = iterate([input_img_data])
input_img_data += grads_value * step
img = input_img_data[0]
return deprocess_image(img)
# block3_conv1 层第 0 个过滤器响应的是波尔卡点(polka-dot)图案
plt.imshow(generate_pattern('block3_conv1', 0))
plt.show()
# 生成某一层中所有过滤器响应模式组成的网格
# 查看如下5个层的过滤器模式
layer_names = ['block1_conv1', 'block2_conv1', 'block3_conv1', 'block4_conv1', 'block5_conv1']
for layer_name in layer_names:
# 显示通道中的前64个滤波器
size = 64
margin = 5
results = np.zeros((8 * size + 7 * margin, 8 * size + 7 * margin, 3))
for i in range(8):
for j in range(8):
filter_img = generate_pattern(layer_name, i + (j * 8), size=size)
horizontal_start = i * size + i * margin
horizontal_end = horizontal_start + size
vertical_start = j * size + j * margin
vertical_end = vertical_start + size
results[horizontal_start: horizontal_end, vertical_start: vertical_end, :] = filter_img
plt.figure(figsize=(20, 20))
plt.imshow(results)
plt.show()
| linhexiu/Attention | V9.py | V9.py | py | 3,023 | python | en | code | 0 | github-code | 13 |
12261348210 | #!/usr/bin/env python
from gimpfu import *
GRID_COLUMNS = 12
GRID_COLUMN_WIDTH = 80
GRID_WIDTH = GRID_COLUMNS * GRID_COLUMN_WIDTH
def python_grid(image, draw, guide_1, guide_2):
image.undo_group_start();
offset_left = (image.width - GRID_WIDTH) / 2
for i in range (GRID_COLUMNS):
base = offset_left + i * GRID_COLUMN_WIDTH
image.add_vguide(base)
image.add_vguide(base + guide_1)
image.add_vguide(base + guide_2)
image.add_vguide(base + GRID_COLUMN_WIDTH);
image.undo_group_end();
pass
register(
"python_fu_grid",
"Add 960 grid",
"Add guides according to 960px 12 column grid",
"Mihail Menshikov", "Mihail Menshikov", "2011",
"<Image>/Image/Guides/Add 960 grid...",
None,
[
(PF_SLIDER, "guide_1", "Guide 1", 10, (0, GRID_COLUMN_WIDTH, 1)),
(PF_SLIDER, "guide_2", "Guide 2", 70, (0, GRID_COLUMN_WIDTH, 1))
],
[],
python_grid)
main()
| wellspring/dotfiles | config/GIMP/2.10/plug-ins/python_grid.py | python_grid.py | py | 872 | python | en | code | 2 | github-code | 13 |
4054637220 | import cgi
import datetime
import urllib
import webapp2
import jinja2
import os
import random
from Data import *
from google.appengine.ext import db
from google.appengine.api import users
jinja_environment = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)))
class Vote(webapp2.RequestHandler):
def get(self):
self.response.out.write('in get')
def post(self):
user_name = users.get_current_user().nickname()
url = users.create_logout_url('/')
url_linktext = 'Logout'
selected_user = getField(self, 'author_name')
task_name = getField(self, 'task_name')
if task_name == "choose_category":
# display categories of the selected user
categories = db.GqlQuery( "SELECT * "
"FROM Category "
"WHERE ANCESTOR IS :1 ",
category_key(selected_user))
template_values = {
'user_name': user_name,
'selected_user': selected_user,
'url': url,
'url_linktext': url_linktext,
'categories': categories,
'back_url': self.request.url,
'home_url': '/',
}
template = jinja_environment.get_template('choosecategory.html')
self.response.out.write(template.render(template_values))
elif task_name == "vote_category":
category_name = getField(self, 'category_name')
selected_user = getField(self, 'selected_user')
self.displayItemsToVote(user_name, selected_user, category_name, url, url_linktext)
elif task_name == "cast_vote":
selected_user = getField(self, 'selected_user')
category_name = getField(self, 'category_name')
vote = getField(self, 'item_to_vote')
item1 = getField(self, 'item1')
item2 = getField(self, 'item2')
self.displayItemsToVote(user_name, selected_user, category_name, url, url_linktext, vote, item1, item2)
def displayItemsToVote(self, user_name, selected_user, category_name, url, url_linktext, vote=None, item1=None, item2=None):
# select 2 random items in this category. Display error message in case of not enough items, i.e <2
items = db.GqlQuery("SELECT * "
"FROM Item "
"WHERE ANCESTOR IS :1 ",
item_key(selected_user, category_name))
if items.count() >= 2:
index1 = random.randint(0, items.count()-1)
index2 = index1
while index2 == index1:
index2 = random.randint(0, items.count()-1)
itemsToVote = []
itemsToVote.append(items[index1].name)
itemsToVote.append(items[index2].name)
template_values = {
'user_name': user_name,
'selected_user': selected_user,
'category_name': category_name,
'url': url,
'url_linktext': url_linktext,
'itemsToVote': itemsToVote,
'back_url': self.request.url,
'home_url': '/',
}
if vote:
# update votes for item 1
item1_votes = 0
for item in items:
if item.name == item1:
if vote == "1":
item.votesFor = item.votesFor + 1
else:
item.votesAgainst = item.votesAgainst + 1
item1_votes = item.votesFor
item.put()
break
# update votes for item 2
item2_votes = 0
for item in items:
if item.name == item2:
if vote == "2":
item.votesFor = item.votesFor + 1
else:
item.votesAgainst = item.votesAgainst + 1
item2_votes = item.votesFor
item.put()
break
# add voted info to be displayed on the page
template_values['votedFor'] = item1 if vote=="1" else item2
template_values['votedAgainst'] = item2 if vote=="1" else item1
template_values['votedFor_votes'] = item1_votes if vote=="1" else item2_votes
template_values['votedAgainst_votes'] = item2_votes if vote=="1" else item1_votes
template = jinja_environment.get_template('voteitem.html')
self.response.out.write(template.render(template_values))
else:
template_values = {
'user_name': user_name,
'selected_user': selected_user,
'category_name': category_name,
'url': url,
'url_linktext': url_linktext,
'error_msg': "The category does not have enough items to vote",
'home_url': '/',
}
template = jinja_environment.get_template('voteitem.html')
self.response.out.write(template.render(template_values))
| sujalw/votemash | Vote.py | Vote.py | py | 4,349 | python | en | code | 0 | github-code | 13 |
41831549812 | from ClaseAMPL import SolverWithAMPL
import os
import time
class Control:
def __init__(self):
self.urlCarpetaCodigo = os.getcwd() + "\\codigosAMPL\\"
self.urlCarpetaData = os.getcwd() + "\\datosAMPL\\"
def createArchive(self, filename, listVariables, listPrices, listRestrictions):
# Creación de ambos ".txt"
filenameComplete = filename + ".txt"
rutacompletaCodigo = self.urlCarpetaCodigo + filenameComplete
rutacompletaData = self.urlCarpetaData + filenameComplete
# ".txt" donde se va a guardar la data (ARCHIVO VACIO)
file = open(rutacompletaData, "w")
file.close()
# ".txt" donde se va a guardar el código
file = open(rutacompletaCodigo, "w")
# Empezar a escribir el código de AMPL
# Escribir las variables
for var in listVariables:
file.write(f"var {var} integer >= 0;" + os.linesep)
# Escribir la Función a optimizar
# Unir las dos listas de datos principales en un String
position = 0
Funcion = ""
for var in listVariables:
variable = str(var)
precio = str(listPrices[position])
position += 1
Funcion += precio + "*" + variable + " + "
FuncionArreglada = Funcion.rstrip(" + ;")
# Escribe en el ".txt" la Función a optimizar
file.write(f"minimize cost: {FuncionArreglada};" + os.linesep)
# Escribir las restricciones
numeroRestriccion = 1
for restriccion in listRestrictions:
file.write(
f"subject to Restriccion{numeroRestriccion}: {restriccion};"
+ os.linesep
)
numeroRestriccion += 1
# Escribir los displays para guardar la data en otro archivo
# Escribir el comando 'solve' para que AMPL trabaje la data
RutaSolverGurobi = os.getcwd() + "\\AMPL\\gurobi.exe"
print(RutaSolverGurobi)
file.write(f'option solver "{RutaSolverGurobi}";' + os.linesep)
file.write(f"solve;" + os.linesep)
# Guardar los datos mediante displays (Código AMPL)
StringVariables = ""
for var in listVariables:
StringVariables += str(var) + ","
StringVariablesFixed = StringVariables.rstrip(",")
# Escribir el comando y especificarle donde guardar los archivos
file.write(f"display {StringVariablesFixed} > {rutacompletaData};")
# Cerrar el archivo
file.close()
# Correr el archivo en AMPL
# Inicializar AMPL dandole la url del programa
RutaAMPL = os.getcwd() + "\\AMPL\\ampl.exe"
test = SolverWithAMPL(RutaAMPL)
# Ejecutar el código que acabo de crear
test.ejecutarCodigo(rutacompletaCodigo)
return None
def controlDeError(self, filename):
filenameComplete = filename + ".txt"
rutacompletaData = self.urlCarpetaData + filenameComplete
time.sleep(5.5)
archivo = open(f"{rutacompletaData}", "r")
data = archivo.read()
if data != "":
archivo.close()
return 1
else:
archivo.close()
return 0
def borrarArchivo(self, filename):
filenameComplete = filename + ".txt"
rutacompletaCodigo = self.urlCarpetaCodigo + filenameComplete
rutacompletaData = self.urlCarpetaData + filenameComplete
os.remove(rutacompletaCodigo)
os.remove(rutacompletaData)
return None
| CerberusStar/ProyectoJoeStar | Controladora.py | Controladora.py | py | 3,525 | python | es | code | 1 | github-code | 13 |
27997775772 | import pygame
import random
pygame.display.init()
pygame.font.init()
def menu():
print('menu()')
global run
loop = True
while loop:
global text_score
clock.tick(0)
screen.fill((255, 255, 255))
# render font to image
text_score = font.render('Score: ' + str(score), True, (0, 0, 0))
# temp block - text_miss = font.render('Miss: ' + str(miss), True, (0, 0, 0))
text_menu1 = menu_font.render('Press Escape to exit', True, (0, 0, 0))
text_menu2 = menu_font.render('Press Enter to continue', True, (0, 0, 0))
# add rendered images to screen
screen.blit(text_hiscore, (0, 0))
screen.blit(text_score, (0, 30))
# temp block - screen.blit(text_miss, (0, 60))
screen.blit(text_menu1, (0, (screen_height/2-40)))
screen.blit(text_menu2, (0, screen_height/2))
pygame.display.flip()
response = True
while response:
for menu_event in pygame.event.get():
if events.type == pygame.QUIT:
print('\npygame.QUIT')
response = False
run = False
if menu_event.type == pygame.KEYDOWN:
if menu_event.key == pygame.K_ESCAPE:
print('\nescape')
response = False
print('response = False')
run = False
print('run = False')
print('quit()')
quit()
elif menu_event.key == pygame.K_RETURN:
print('\nkey pressed, enter')
response = False
print('event for loop break')
break
print('response while loop break')
break
print('return from menu()')
return
# screen dimensions, font type, player dimensions, line placements
screen_width = 400
screen_height = 540
font = pygame.font.SysFont('franklingothicbook', 21)
menu_font = pygame.font.SysFont('franklingothicbook', 36)
score = 0
hiscore = 0
miss = 0
life = 3
player_diameter = round(screen_width/3)
player_x_pos = round((screen_width/2))
player_y_pos = round(screen_height-(round(player_diameter/2)))
obstacle_diameter = round(screen_width/6)
obstacle_x_pos = round((screen_width/2))
obstacle_x_random = [round((screen_width/2)), round((screen_width/2))-round(screen_width/3),
round((screen_width/2))+round(screen_width/3)]
obstacle_y_pos = round(obstacle_diameter/2)
obstacle_y_speed = 10
line_list = [(player_diameter, 0), (player_diameter, screen_height)]
line_list2 = [(player_diameter*2, 0), (player_diameter*2, screen_height)]
# make screen, title
screen = pygame.display.set_mode([screen_width, screen_height])
pygame.display.set_caption("Ball On Three Lanes")
# game loop
tick = 30
clock = pygame.time.Clock()
run = True
while run:
# render text to image
text_score = font.render('Score: '+str(score), True, (0, 0, 0))
text_hiscore = font.render('High Score: ' + str(hiscore), True, (0, 0, 0))
text_life = font.render('Life: ' + str(life), True, (0, 0, 0))
# fill screen with white
screen.fill((255, 255, 255))
# draw lane lines
pygame.draw.lines(screen, (0, 0, 0), True, line_list, 2)
pygame.draw.lines(screen, (0, 0, 0), True, line_list2, 2)
# add rendered text images to screen
screen.blit(text_hiscore, (0, 0))
screen.blit(text_score, (0, 30))
screen.blit(text_life, (0, 60))
# obstacle
obstacle = pygame.draw.circle(screen, (255, 0, 0), [obstacle_x_pos, obstacle_y_pos], round(obstacle_diameter/2) - 1)
# draw player
player = pygame.draw.circle(screen, (0, 0, 0), [player_x_pos, player_y_pos], round(player_diameter/2)-1)
# event triggers
for events in pygame.event.get():
if events.type == pygame.QUIT:
run = False
if events.type == pygame.KEYDOWN:
# escape key brings up menu
if events.key == pygame.K_ESCAPE:
print('escape key pressed')
menu()
# left right keys to set player x positions for player drawing
if events.key == pygame.K_LEFT:
player_x_pos -= round(screen_width/3)
if events.key == pygame.K_RIGHT:
player_x_pos += round(screen_width/3)
# prevents player from moving out of x bounds
if player_x_pos < 0:
player_x_pos = round((screen_width/2)) - round(screen_width/3)
if player_x_pos > screen_width:
player_x_pos = round((screen_width/2)) + round(screen_width/3)
# obstacle movement check & loop
if 0 < obstacle_y_pos < screen_height:
obstacle_y_pos += obstacle_y_speed
if obstacle_y_pos >= screen_height-30:
obstacle_y_pos = round(obstacle_diameter/2)
obstacle_x_pos = random.choice(obstacle_x_random)
# score counting
if obstacle_x_pos == player_x_pos:
if obstacle_y_pos == player_y_pos:
score += 1
tick += 1
if score >= hiscore+1:
hiscore += 1
# miss counting
if obstacle_y_pos == player_y_pos:
if obstacle_x_pos != player_x_pos:
miss += 1
life -= 1
if life < 1:
life = 3
score = 0
tick = 30
menu()
pygame.display.flip()
clock.tick(tick)
quit()
| zephyrdark/threelaneball | threelaneball.py | threelaneball.py | py | 5,631 | python | en | code | 0 | github-code | 13 |
25901550046 | import os
import shutil
import re
import codecs
import math
import xml.etree.ElementTree
import traceback
from qgis.PyQt.QtCore import QDir, QSize
from qgis.core import (QgsSingleSymbolRenderer,
QgsCategorizedSymbolRenderer,
QgsGraduatedSymbolRenderer,
QgsRuleBasedRenderer,
QgsSimpleMarkerSymbolLayer,
QgsSvgMarkerSymbolLayer,
QgsFontMarkerSymbolLayer,
QgsSimpleLineSymbolLayer,
QgsSimpleFillSymbolLayer,
QgsLinePatternFillSymbolLayer,
QgsSymbolLayerUtils)
from qgis2web.exp2js import compile_to_file
from qgis2web.utils import safeName, getRGBAColor, handleHiddenField, TYPE_MAP
def exportStyles(layers, folder, clustered, feedback):
stylesFolder = os.path.join(folder, "styles")
QDir().mkpath(stylesFolder)
legendFolder = os.path.join(stylesFolder, "legend")
QDir().mkpath(legendFolder)
vtStyles = {}
mapUnitLayers = []
for count, (layer, cluster) in enumerate(zip(layers, clustered)):
sln = safeName(layer.name()) + "_" + str(count)
if layer.type() != layer.VectorLayer:
continue
pattern = ""
setPattern = ""
vts = layer.customProperty("VectorTilesReader/vector_tile_url")
labelText = getLabels(layer, folder, sln)
defs = "var size = 0;\nvar placement = 'point';"
renderer = layer.renderer()
layer_alpha = layer.opacity()
try:
if isinstance(renderer, QgsSingleSymbolRenderer):
(style, pattern, setPattern, value,
useMapUnits) = singleSymbol(renderer, stylesFolder,
layer_alpha, sln, legendFolder,
layer, feedback)
elif isinstance(renderer, QgsCategorizedSymbolRenderer):
(style, pattern, setPattern, value, defs,
useMapUnits) = categorized(defs, sln, layer, renderer,
legendFolder, stylesFolder,
layer_alpha, feedback)
elif isinstance(renderer, QgsGraduatedSymbolRenderer):
(style, pattern, setPattern, value,
useMapUnits) = graduated(layer, renderer, legendFolder, sln,
stylesFolder, layer_alpha, feedback)
elif isinstance(renderer, QgsRuleBasedRenderer):
(style, pattern, setPattern, value,
useMapUnits) = ruleBased(renderer, folder, stylesFolder,
layer_alpha, sln, layer, feedback)
else:
value = "''"
style = """
var style = [ new ol.style.Style({
text: createTextStyle(feature, resolution, labelText, labelFont,
labelFill, placement, bufferColor, bufferWidth)
})];"""
useMapUnits = False
if useMapUnits:
if vts is None:
mapUnitLayers.append(sln)
else:
mapUnitLayers.append(safeName(vts))
(labelRes, size, face, color,
bufferColor, bufferWidth) = getLabelFormat(layer)
if style != "":
geom = TYPE_MAP[layer.wkbType()].replace("Multi", "")
style = getStyle(style, cluster, labelRes, labelText,
sln, size, face, color, bufferColor,
bufferWidth, value, geom)
else:
style = "''"
except Exception:
style = "''"
feedback.showFeedback("""Exception in layer {} with renderer {}:
<span style=\"color: red\">{}</span>""".format(layer.id(),
renderer.dump(),
traceback.format_exc()))
if vts is None:
path = os.path.join(stylesFolder, sln + "_style.js")
with codecs.open(path, "w", "utf-8") as f:
f.write('''%(defs)s
%(pattern)s
var style_%(name)s = %(style)s;
%(setPattern)s''' %
{"defs": defs, "pattern": pattern, "name": sln,
"style": style, "setPattern": setPattern})
elif style != "" and style != "''":
new_vtStyle = defs
new_vtStyle += "if (feature.get('layer') == "
new_vtStyle += """'%s' && feature.getGeometry().getType() == '%s'){
return %s(feature, resolution);
}""" % (
layer.name(), TYPE_MAP[layer.wkbType()].replace("Multi", ""),
style)
try:
old_vtStyles = vtStyles[vts]
new_vtStyles = """%s
%s""" % (old_vtStyles, new_vtStyle)
except Exception:
new_vtStyles = new_vtStyle
vtStyles[vts] = new_vtStyles
for k, v in vtStyles.items():
styleName = safeName(k)
styleString = v
path = os.path.join(stylesFolder, styleName + "_style.js")
with codecs.open(path, "w", "utf-8") as f:
f.write('''
var style_%(name)s = function(feature, resolution) {
%(style)s;
}''' % {"defs": defs, "pattern": pattern, "name": styleName,
"style": styleString, "setPattern": setPattern})
return mapUnitLayers
def getLabels(layer, folder, sln):
labelling = layer.labeling()
if labelling is not None and layer.labelsEnabled():
palyr = labelling.settings()
if palyr and palyr.fieldName and palyr.fieldName != "":
labelField = palyr.fieldName
if labelField != "":
if str(layer.customProperty(
"labeling/isExpression")).lower() == "true":
exprFilename = os.path.join(folder, "resources",
"qgis2web_expressions.js")
fieldName = layer.customProperty("labeling/fieldName")
name = compile_to_file(fieldName, "label_%s" % sln,
"OpenLayers3", exprFilename)
js = "%s(context)" % (name)
js = js.strip()
labelText = js
else:
fieldIndex = layer.fields().indexFromName(
labelField)
# editFormConfig = layer.editFormConfig()
editorWidget = layer.editorWidgetSetup(fieldIndex).type()
if (editorWidget == 'Hidden'):
labelField = "q2wHide_" + labelField
labelText = ('feature.get("%s")' %
labelField.replace('"', '\\"'))
else:
labelText = '""'
else:
labelText = '""'
else:
labelText = '""'
return labelText
def getLabelFormat(layer):
size = 10
# italic = layer.customProperty("labeling/fontItalic")
# bold = layer.customProperty("labeling/fontWeight")
bufferColor = ""
bufferWidth = 0
try:
color = layer.labeling().settings().format().color().name()
except:
color = "#000000"
labelling = layer.labeling()
face = ","
if labelling is not None:
palyr = labelling.settings()
labelFormat = palyr.format()
labelFont = labelFormat.font()
face = labelFont.family()
size = labelFont.pointSize() * 1.3
if face is not None:
face = " \\'%s\\'," % face
sv = palyr.scaleVisibility
if sv:
min = float(palyr.minimumScale)
max = float(palyr.maximumScale)
if min != 0:
min = 1 / ((1 / min) * 39.37 * 90.7)
if max != 0:
max = 1 / ((1 / max) * 39.37 * 90.7)
labelRes = " && resolution > %(min)d " % {"min": max}
labelRes += "&& resolution < %(max)d" % {"max": min}
else:
labelRes = ""
labelBuffer = labelFormat.buffer()
buffer = labelBuffer.enabled()
if buffer:
bufferColor = labelBuffer.color().name()
bufferWidth = labelBuffer.size()
else:
labelRes = ""
return (labelRes, size, face, color, bufferColor, bufferWidth)
def singleSymbol(renderer, stylesFolder, layer_alpha, sln, legendFolder,
layer, feedback):
symbol = renderer.symbol()
(style, pattern, setPattern,
useMapUnits) = getSymbolAsStyle(symbol, stylesFolder,
layer_alpha, renderer, sln, layer,
feedback)
style = "var style = " + style
legendIcon = QgsSymbolLayerUtils.symbolPreviewPixmap(
symbol, QSize(16, 16))
legendIcon.save(os.path.join(legendFolder, sln + ".png"))
value = 'var value = ""'
return (style, pattern, setPattern, value, useMapUnits)
def categorized(defs, sln, layer, renderer, legendFolder, stylesFolder,
layer_alpha, feedback):
# cluster = False
defs += """
function categories_%s(feature, value, size, resolution, labelText,
labelFont, labelFill, bufferColor, bufferWidth,
placement) {
switch(value.toString()) {""" % sln
cats = []
useAnyMapUnits = False
for cnt, cat in enumerate(renderer.categories()):
legendIcon = QgsSymbolLayerUtils.symbolPreviewPixmap(cat.symbol(),
QSize(16, 16))
legendIcon.save(os.path.join(legendFolder,
sln + "_" + str(cnt) + ".png"))
if (cat.value() is not None and cat.value() != ""):
categoryStr = "case '%s':" % str(cat.value()).replace("'", "\\'")
else:
categoryStr = "default:"
(style, pattern, setPattern,
useMapUnits) = (getSymbolAsStyle(cat.symbol(), stylesFolder,
layer_alpha, renderer, sln, layer,
feedback))
if useMapUnits:
useAnyMapUnits = True
categoryStr += '''
return %s;
break;''' % style
cats.append(categoryStr)
defs += "\n".join(cats) + "}};"
style = """
var style = categories_%s(feature, value, size, resolution, labelText,
labelFont, labelFill, bufferColor,
bufferWidth, placement)""" % sln
value = getValue(layer, renderer)
return (style, pattern, setPattern, value, defs, useAnyMapUnits)
def graduated(layer, renderer, legendFolder, sln, stylesFolder, layer_alpha,
feedback):
# cluster = False
ranges = []
elseif = ""
useAnyMapUnits = False
for cnt, ran in enumerate(renderer.ranges()):
legendIcon = QgsSymbolLayerUtils.symbolPreviewPixmap(
ran.symbol(), QSize(16, 16))
legendIcon.save(os.path.join(
legendFolder, sln + "_" + str(cnt) + ".png"))
(symbolstyle, pattern, setPattern,
useMapUnits) = getSymbolAsStyle(ran.symbol(), stylesFolder,
layer_alpha, renderer, sln, layer,
feedback)
ranges.append("""%sif (value >= %f && value <= %f) {
style = %s
}""" % (elseif, ran.lowerValue(), ran.upperValue(),
symbolstyle))
elseif = " else "
if useMapUnits:
useAnyMapUnits = True
style = "".join(ranges)
value = getValue(layer, renderer)
return (style, pattern, setPattern, value, useAnyMapUnits)
def ruleBased(renderer, folder, stylesFolder, layer_alpha, sln, layer,
feedback):
# cluster = False
template = """
function rules_%s(feature, value) {
var context = {
feature: feature,
variables: {}
};
// Start of if blocks and style check logic
%s
else {
return %s;
}
}
var style = rules_%s(feature, value);
"""
elsejs = "[]"
js = ""
root_rule = renderer.rootRule()
rules = root_rule.children()
expFile = os.path.join(folder, "resources", "qgis2web_expressions.js")
ifelse = "if"
useAnyMapUnits = False
for count, rule in enumerate(rules):
symbol = rule.symbol()
(styleCode, pattern, setPattern,
useMapUnits) = getSymbolAsStyle(symbol, stylesFolder, layer_alpha,
renderer, sln, layer, feedback)
name = "".join((sln, "rule", str(count)))
exp = rule.filterExpression()
if rule.isElse():
elsejs = styleCode
continue
name = compile_to_file(exp, name, "OpenLayers3", expFile)
js += """
%s (%s(context)) {
return %s;
}
""" % (ifelse, name, styleCode)
js = js.strip()
ifelse = "else if"
if useMapUnits:
useAnyMapUnits = True
value = ("var value = '';")
style = template % (sln, js, elsejs, sln)
return (style, pattern, setPattern, value, useAnyMapUnits)
def getValue(layer, renderer):
classAttr = handleHiddenField(layer, renderer.classAttribute())
value = ('var value = feature.get("%s");' % classAttr)
return value
def getStyle(style, cluster, labelRes, labelText, sln, size,
face, color, bufferColor, bufferWidth, value, geom):
placement = "point"
if geom == "LineString":
placement = "line"
this_style = '''function(feature, resolution){
var context = {
feature: feature,
variables: {}
};
%(value)s
var labelText = "";
''' % {
"value": value}
if cluster:
this_style += '''var clusteredFeatures = feature.get("features");
var labelFont = "%(size)spx%(face)s sans-serif";
var labelFill = "%(labelFill)s";
var bufferColor = "%(bufferColor)s";
var bufferWidth = %(bufferWidth)s;
size = clusteredFeatures.length;
var textAlign = "center";
var offsetX = 0;
var offsetY = 0;
if (size == 1) {
textAlign = "left"
offsetX = 8
offsetY = 3
var feature = clusteredFeatures[0];
if (%(label)s !== null%(labelRes)s) {
labelText = String(%(label)s);
}
key = value + "_" + labelText
} else {
labelText = size.toString()
size = 2*(Math.log(size)/ Math.log(2))
}
%(style)s;\n''' % {"style": style, "labelRes": labelRes,
"label": labelText, "size": size, "face": face,
"labelFill": color, "bufferColor": bufferColor,
"bufferWidth": bufferWidth}
else:
this_style += '''size = 0;
var labelFont = "%(size)spx%(face)s sans-serif";
var labelFill = "%(labelFill)s";
var bufferColor = "%(bufferColor)s";
var bufferWidth = %(bufferWidth)s;
var textAlign = "left";
var offsetX = 8;
var offsetY = 3;
var placement = '%(placement)s';
if (%(label)s !== null%(labelRes)s) {
labelText = String(%(label)s);
}
%(style)s;\n''' % {"style": style, "placement": placement,
"labelRes": labelRes, "label": labelText, "size": size,
"face": face, "labelFill": color,
"bufferColor": bufferColor, "bufferWidth": bufferWidth}
this_style += '''
return style;
}''' % {"cache": "styleCache_" + sln, "size": size, "face": face,
"color": color}
return this_style
def getSymbolAsStyle(symbol, stylesFolder, layer_transparency, renderer, sln,
layer, feedback):
styles = {}
useMapUnits = False
if layer_transparency == 0:
alpha = symbol.alpha()
else:
alpha = layer_transparency
for i in range(symbol.symbolLayerCount()):
sl = symbol.symbolLayer(i)
props = sl.properties()
pattern = ""
setPattern = ""
if isinstance(sl, QgsSimpleMarkerSymbolLayer):
color = getRGBAColor(props["color"], alpha)
borderColor = getRGBAColor(props["outline_color"], alpha)
borderWidth = props["outline_width"]
sizeUnits = props["size_unit"]
size = None
if sizeUnits != "MapUnit":
size = sl.size() * 2
try:
shape = sl.shape()
except Exception:
shape = sl.name()
try:
if shape == 0 or shape == "square":
style, useMapUnits = getSquare(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 1 or shape == "diamond":
style, useMapUnits = getDiamond(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 2 or shape == "pentagon":
style, useMapUnits = getPentagon(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 3 or shape == "hexagon":
style, useMapUnits = getHexagon(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 4 or shape == 5 or shape == "triangle":
style, useMapUnits = getTriangle(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 6 or shape == "star":
style, useMapUnits = getStar(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 9 or shape == "cross":
style, useMapUnits = getCross(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 11 or shape == "cross2":
style, useMapUnits = getCross2(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
elif shape == 12 or shape == "line":
style, useMapUnits = getLine(color, borderColor,
borderWidth, size, props)
style = "text: %s" % style
else:
style, useMapUnits = getCircle(color, borderColor,
borderWidth, size, props)
style = "image: %s" % style
except Exception:
style, useMapUnits = getCircle(color, borderColor, borderWidth,
size, props)
style = "image: %s" % style
elif isinstance(sl, QgsSvgMarkerSymbolLayer):
path = os.path.join(stylesFolder, os.path.basename(sl.path()))
svg = xml.etree.ElementTree.parse(sl.path()).getroot()
try:
svgWidth = svg.attrib["width"]
svgWidth = re.sub("px", "", svgWidth)
svgWidth = re.sub("mm", "", svgWidth)
except Exception:
svgWidth = "5"
try:
svgHeight = svg.attrib["height"]
svgHeight = re.sub("px", "", svgHeight)
svgHeight = re.sub("mm", "", svgHeight)
except Exception:
svgHeight = "5"
if symbol.dataDefinedAngle().isActive():
if symbol.dataDefinedAngle().useExpression():
rot = "0"
else:
rot = "feature.get("
rot += symbol.dataDefinedAngle().expressionOrField()
rot += ") * 0.0174533"
else:
rot = str(sl.angle() * 0.0174533)
shutil.copy(sl.path(), path)
style = ("image: %s" %
getIcon("styles/" + os.path.basename(sl.path()),
sl.size(), svgWidth, svgHeight, rot))
elif isinstance(sl, QgsFontMarkerSymbolLayer):
char = sl.character()
color = getRGBAColor(props["color"], alpha)
style = """text: new ol.style.Text({
text: '%s',
%s})""" % (char, getFillStyle(color, props))
elif isinstance(sl, QgsSimpleLineSymbolLayer):
color = getRGBAColor(props["line_color"], alpha)
line_width = props["line_width"]
line_style = props["line_style"]
line_units = props["line_width_unit"]
lineCap = sl.penCapStyle()
lineJoin = sl.penJoinStyle()
style, useMapUnits = getStrokeStyle(color, line_style, line_width,
line_units, lineCap, lineJoin)
elif isinstance(sl, QgsSimpleFillSymbolLayer):
fillColor = getRGBAColor(props["color"], alpha)
borderColor = getRGBAColor(props["outline_color"], alpha)
borderStyle = props["outline_style"]
borderWidth = props["outline_width"]
line_units = props["outline_width_unit"]
try:
lineCap = sl.penCapStyle()
lineJoin = sl.penJoinStyle()
except Exception:
lineCap = 0
lineJoin = 0
symbolStyles = []
style = ""
(stroke, useMapUnits) = getStrokeStyle(borderColor, borderStyle,
borderWidth, line_units,
lineCap, lineJoin)
if stroke != "":
symbolStyles.append(stroke)
fill = getFillStyle(fillColor, props)
if fill != "":
symbolStyles.append(fill)
style = ",".join(symbolStyles)
elif isinstance(sl, QgsLinePatternFillSymbolLayer):
weight = sl.subSymbol().width()
spaceWeight = sl.distance()
color = sl.color().name()
angle = 360 - sl.lineAngle()
pattern = """
var fill_%s = new ol.style.Fill();""" % sln
style = """
fill: fill_%s""" % sln
setPattern = """
fill_%s.setColor(stripe(%s, %s, %s, '%s'));""" % (sln, weight, spaceWeight,
angle, color)
else:
color = getRGBAColor(props["color"], alpha)
sizeUnits = props["size_unit"]
props['outline_style'] = "no"
size = None
if sizeUnits != "MapUnit":
size = sl.size() * 2
style, useMapUnits = getCircle(color, None, None, size, props)
style = "image: %s" % style
feedback.showFeedback(
"""Layer {}: replacing symbol layer
<span style=\"color: red\">{}</span> with
circle.""".format(layer.id(), sl.layerType()))
style = ""
if renderer.usingSymbolLevels():
k = sl.renderingPass()
else:
k = i
if style != "":
style += ","
ts = ""
vts = layer.customProperty("VectorTilesReader/vector_tile_url")
if vts is None:
ts = """
text: createTextStyle(feature, resolution, labelText, labelFont,
labelFill, placement, bufferColor,
bufferWidth)"""
styles[k] = '''new ol.style.Style({
%s%s
})''' % (style, ts)
return ("[ %s]" % ",".join(styles[s] for s in sorted(styles.keys())),
pattern, setPattern, useMapUnits)
def getSquare(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 4,
angle: Math.PI/4, %s %s})""" % (size, stroke,
getFillStyle(color, props)),
useMapUnits)
def getDiamond(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 4,
%s %s})""" % (size, stroke, getFillStyle(color, props)),
useMapUnits)
def getPentagon(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 5,
%s %s})""" % (size, stroke, getFillStyle(color, props)),
useMapUnits)
def getHexagon(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 6,
%s %s})""" % (size, stroke, getFillStyle(color, props)),
useMapUnits)
def getTriangle(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 3,
%s %s})""" % (size, stroke, getFillStyle(color, props)),
useMapUnits)
def getStar(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 5,
radius2: %s, %s %s})""" % (size, size / 2, stroke,
getFillStyle(color, props)),
useMapUnits)
def getCircle(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
useMapUnits = None
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.Circle({radius: %s + size,
%s %s})""" % (size, stroke, getFillStyle(color, props)),
useMapUnits)
def getCross(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
useMapUnits = None
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size, points: 4,
radius2: 0, %s %s})""" % (size, stroke,
getFillStyle(color, props)), useMapUnits)
def getCross2(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
useMapUnits = None
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
stroke += ","
return ("""new ol.style.RegularShape({radius: %s + size,
points: 4,
radius2: 0,
angle: Math.PI / 4,
%s
%s})""" % (size, stroke,
getFillStyle(color,
props)),
useMapUnits)
def getLine(color, borderColor, borderWidth, size, props):
if props['outline_style'] == "no":
stroke = ""
useMapUnits = None
else:
line_units = props["outline_width_unit"]
stroke, useMapUnits = getStrokeStyle(borderColor, "", borderWidth,
line_units, 0, 0)
rot = props["angle"]
return ("""new ol.style.Text({
rotation: %s * Math.PI/180,
text: '\u2502', %s})""" % (rot, stroke), useMapUnits)
def getIcon(path, size, svgWidth, svgHeight, rot):
size = math.floor(float(size) * 3.8)
anchor = size / 2
scale = str(float(size) / float(svgWidth))
return '''new ol.style.Icon({
imgSize: [%(w)s, %(h)s],
scale: %(scale)s,
anchor: [%(a)d, %(a)d],
anchorXUnits: "pixels",
anchorYUnits: "pixels",
rotation: %(rot)s,
src: "%(path)s"
})''' % {"w": svgWidth, "h": svgHeight,
"scale": scale, "rot": rot,
"s": size, "a": anchor,
"path": path.replace("\\", "\\\\")}
def getStrokeStyle(color, dashed, width, line_units, linecap, linejoin):
if dashed == "no":
return ("", False)
if line_units != "MapUnit":
width = str(int(float(width) * 3.8))
useMapUnits = False
else:
width = "m2px(%s)" % width
useMapUnits = True
dash = dashed.replace("dash", "10,5")
dash = dash.replace("dot", "1,5")
dash = dash.replace("solid", "")
dash = dash.replace(" ", ",")
dash = "[%s]" % dash
if dash == "[]" or dash == "[no]":
dash = "null"
capString = "round"
if linecap == 0:
capString = "butt"
if linecap == 16:
capString = "square"
joinString = "round"
if linejoin == 0:
joinString = "miter"
if linejoin == 64:
joinString = "bevel"
strokeString = ("stroke: new ol.style.Stroke({color: %s, lineDash: %s, " %
(color, dash))
strokeString += ("lineCap: '%s', lineJoin: '%s', width: %s})" %
(capString, joinString, width))
return (strokeString, useMapUnits)
def getFillStyle(color, props):
try:
if props["style"] == "no":
return ""
except Exception:
pass
return "fill: new ol.style.Fill({color: %s})" % color
| tomchadwin/qgis2web | qgis2web/olStyleScripts.py | olStyleScripts.py | py | 32,071 | python | en | code | 494 | github-code | 13 |
715187830 | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 10 14:04:15 2023
@author: fzbri
"""
import pandas as pd
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.utils import resample
#from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
def get_missing_values(df):
"""
Returns the complete and missing value subsets of a DataFrame,
along with the predictors used for linear imputation.
Args:
df (DataFrame): The input DataFrame.
Returns:
tuple: A tuple containing three elements:
- df_complete (DataFrame): Subset of the input DataFrame with complete rows (no missing values).
- df_nan (DataFrame): Subset of the input DataFrame with rows containing missing values.
- predictors (list): List of column names representing the predictors for the linear imputation.
"""
df_complete = df.dropna()
df_nan = df[df.isna().any(axis=1)]
# select the predictors for the regression model
predictors = ['person_age', 'person_income', 'loan_amnt', 'loan_status']
return df_complete, df_nan, predictors
def fill_missing_values(df, df_complete, df_nan, predictors, col):
"""
Fills missing values in a DataFrame column using regression-based imputation.
Args:
df (DataFrame): The original DataFrame.
df_complete (DataFrame): Subset of the original DataFrame with complete rows (no missing values).
df_nan (DataFrame): Subset of the original DataFrame with rows containing missing values.
predictors (list): List of column names representing the predictors for the regression model.
col (str): The name of the column to fill missing values for.
Returns:
None: This function modifies the input DataFrame in-place by filling missing values in the specified column.
"""
# train the regression model using cross-validation
regression_model = LinearRegression()
#scores = cross_val_score(regression_model, df_complete[predictors], df_complete[col], cv=5)
# fit the regression model on the complete observations
regression_model.fit(df_complete[predictors], df_complete[col])
# predict the missing values using the trained model
df_nan[col+'_predicted'] = regression_model.predict(df_nan[predictors])
# replace the missing values with the predicted values
df[col].fillna(value=df_nan[col+'_predicted'], inplace=True)
def clean_extreme_values(df):
"""
Cleans extreme values in specific columns of a DataFrame by setting them to NaN and removing rows with missing values.
Args:
df (DataFrame): The input DataFrame.
Returns:
DataFrame: A cleaned version of the input DataFrame with extreme values set to NaN and rows with missing values dropped.
"""
# set the value of person_emp_length to NaN for rows where it equals 123
df.loc[df['person_emp_length'] == 123, 'person_emp_length'] = np.nan
# set the value of person_emp_length to NaN for rows where it equals 123
df.loc[df['person_age'] == 144, 'person_age'] = np.nan
df.loc[df['person_age'] == 123, 'person_age'] = np.nan
df = df.dropna()
return df
def oversample_data(df):
"""
Performs oversampling on the df to balance the class distribution.
Args:
df (DataFrame): The input DataFrame.
Returns:
DataFrame: The oversampled DataFrame with balanced class distribution.
"""
# Separate majority and minority classes
df_majority = df[df.loan_status==0]
df_minority = df[df.loan_status==1]
# Oversample minority class
df_minority_oversampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=len(df_majority), # match number in majority class
random_state=42) # for reproducibility
# Combine majority class with oversampled minority class
df_oversampled = pd.concat([df_majority, df_minority_oversampled])
# Check class distribution
df_oversampled.loan_status.value_counts()
return df_oversampled
def split_scale_data(df):
"""
Splits the dataset into training and testing sets, and scales the data using StandardScaler.
Args:
df (DataFrame): The input DataFrame.
Returns:
tuple: A tuple containing six elements:
- X (DataFrame): The features of the original DataFrame without the target column.
- X_train (ndarray): The scaled training features.
- X_test (ndarray): The scaled testing features.
- y_train (Series): The target values for the training set.
- y_test (Series): The target values for the testing set.
- scaler (StandardScaler): The fitted StandardScaler object used for scaling the data.
"""
# Split the dataset into training and testing sets
X = df.drop('loan_status', axis=1)
y = df['loan_status']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Scale the data
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
return X, X_train, X_test, y_train, y_test, scaler
| bricha-fz/ml_credit_risk_xai | data_processing.py | data_processing.py | py | 5,396 | python | en | code | 0 | github-code | 13 |
40649434695 | import model_q3
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import ModelCheckpoint
import numpy as np
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from shutil import copyfile
import os
img_width, img_height = 224, 224
valid_data_dir = '/data/datasets/rbonatti/data_processed/3'
batch_size = 1
val_samples=300
if __name__ == "__main__":
# network = model_q3.VGG_16('/data/datasets/rbonatti/ml_weights2/weights.25-2.47.hdf5')
network = model_q3.VGG_16('/data/datasets/rbonatti/ml_weights3/weights.16-8.53.hdf5')
adam = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
network.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])
# prepare data augmentation configuration
datagen = ImageDataGenerator(rescale=1. / 255)
valid_generator = datagen.flow_from_directory(
valid_data_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode=None,
color_mode='grayscale',
shuffle=False)
predictions=network.predict_generator(
generator=valid_generator,
val_samples=val_samples
)
pca=PCA(n_components=100)
pred_new=pca.fit_transform(predictions)
scores=np.zeros(20)
for i in range(20):
kmeans = KMeans(n_clusters=i+1).fit(pred_new)
scores[i]=kmeans.score(pred_new)
kmeans=KMeans(n_clusters=5).fit(pred_new)
res=kmeans.predict(pred_new)
# copy files to respective clusters to see how things are
for i in range(300):
n=str(i+1)
filename_src='/data/datasets/rbonatti/data_processed/3/all/'
filename_src+=n.zfill(5)+'.jpg'
cluster=res[i]
directory='/data/datasets/rbonatti/data_processed/3_clusters/'+str(cluster)
if not os.path.exists(directory):
os.makedirs(directory)
filename_dst='/data/datasets/rbonatti/data_processed/3_clusters/'+str(cluster)+'/'+n.zfill(5)+'.jpg'
copyfile(filename_src, filename_dst)
np.savetxt('/data/datasets/rbonatti/ml_prediction_q3.out', predictions, delimiter=',') | rogeriobonatti/ml_proj | eval3.py | eval3.py | py | 2,036 | python | en | code | 0 | github-code | 13 |
10111527115 | import asyncio
import discord
from discord import Embed, ApplicationContext, Interaction, Colour, MISSING
from discord.ui import View, Button
import czbook
from bot import BaseCog, Bot
from utils.discord import get_or_fetch_message_from_reference, context_info
class InfoCog(BaseCog):
def __init__(self, bot: Bot) -> None:
super().__init__(bot)
@discord.slash_command(
guild_only=True,
name="info",
description="取得書本資訊",
)
@discord.option(
"link",
str,
description="欲查詢的書本連結",
)
async def info(self, ctx: ApplicationContext, link: str):
await ctx.defer()
code = czbook.utils.get_code(link) or link
try:
novel = await self.bot.db.get_or_fetch_novel(code)
await ctx.respond(
embed=novel.overview_embed(),
view=InfoView(self.bot),
)
except czbook.NotFoundError:
await ctx.respond(
embed=Embed(title="未知的書本", color=discord.Color.red()),
)
@info.error
async def on_info_error(self, ctx: ApplicationContext, error):
await ctx.respond(
embed=Embed(title="發生未知的錯誤", color=discord.Color.red()),
ephemeral=True,
)
@discord.Cog.listener()
async def on_ready(self):
self.bot.add_view(InfoView(self.bot))
class InfoView(View):
def __init__(self, bot: Bot):
super().__init__(timeout=None)
self.bot = bot
self.overview_button = Button(
custom_id="overview_button",
label="書本總覽",
row=0,
disabled=True,
)
self.overview_button.callback = self.overview_button_callback
self.add_item(self.overview_button)
self.chapter_button = Button(
custom_id="chapter_button",
label="章節列表",
row=0,
)
self.chapter_button.callback = self.chapter_button_callback
self.add_item(self.chapter_button)
self.comment_button = Button(
custom_id="comment_button",
label="觀看評論",
row=0,
)
self.comment_button.callback = self.comment_button_callback
self.add_item(self.comment_button)
self.get_content_button = Button(
custom_id="get_content_button",
label="取得內文",
row=1,
)
self.get_content_button.callback = self.get_content_button_callback
self.add_item(self.get_content_button)
cancel_get_content_button = Button(
custom_id="cancel_get_content_button",
label="取消擷取",
style=discord.ButtonStyle.red,
)
cancel_get_content_button.callback = self.cancel_get_content
self.cancel_get_content_view = View(cancel_get_content_button, timeout=None)
async def overview_button_callback(self, interaction: Interaction):
self.overview_button.disabled = True
self.chapter_button.disabled = False
self.comment_button.disabled = False
self.get_content_button.disabled = interaction.message.components[-1].children[0].disabled
await interaction.response.defer()
novel = await self.bot.db.get_or_fetch_novel(
czbook.utils.get_code(interaction.message.embeds[0].url)
)
await interaction.message.edit(embed=novel.overview_embed(), view=self)
async def chapter_button_callback(self, interaction: Interaction):
self.overview_button.disabled = False
self.chapter_button.disabled = True
self.comment_button.disabled = False
self.get_content_button.disabled = interaction.message.components[-1].children[0].disabled
await interaction.response.defer()
novel = await self.bot.db.get_or_fetch_novel(
czbook.utils.get_code(interaction.message.embeds[0].url)
)
await interaction.message.edit(embed=novel.chapter_embed(), view=self)
async def comment_button_callback(self, interaction: Interaction):
self.overview_button.disabled = False
self.chapter_button.disabled = False
self.comment_button.disabled = True
self.get_content_button.disabled = interaction.message.components[-1].children[0].disabled
await interaction.response.defer()
novel = await self.bot.db.get_or_fetch_novel(
czbook.utils.get_code(interaction.message.embeds[0].url)
)
await interaction.message.edit(embed=await novel.comment_embed(), view=self)
async def get_content_button_callback(self, interaction: Interaction):
self.get_content_button.disabled = interaction.message.components[-1].children[0].disabled
self.get_content_button.disabled = True
await interaction.response.edit_message(view=self)
novel = await self.bot.db.get_or_fetch_novel(
czbook.utils.get_code(interaction.message.embeds[0].url)
)
if novel.content_cache:
return await interaction.followup.send(
content=f"- 書名: {novel.title}\n- 總字數: `{novel.word_count}`字",
file=discord.File(novel.filelike_content, filename=f"{novel.id}.txt"),
)
self.bot.logger.info(f"{context_info(interaction)}: get content of {novel.title}")
msg = await interaction.message.reply(
embed=Embed(
title="擷取內文中...",
description="正在計算進度...",
),
view=self.cancel_get_content_view,
)
stats = novel.get_content()
self.bot.get_content_msg.add(msg.id)
while True:
await asyncio.sleep(1)
if stats.finished:
break
if msg.id not in self.bot.get_content_msg:
return
await msg.edit(
embed=Embed(
title="擷取內文中",
description=stats.get_progress(),
color=Colour.from_rgb(
min(int(510 * (1 - stats.percentage)), 255),
min(int(510 * stats.percentage), 255),
0,
),
),
view=None if stats.eta < 2 else MISSING,
)
self.bot.db.add_or_update_cache(novel)
await msg.edit(
content=f"- 書名: {novel.title}\n- 總字數: `{novel.word_count}`字",
file=discord.File(novel.filelike_content, filename=f"{novel.id}.txt"),
embed=None,
view=None,
)
async def cancel_get_content(self, interaction: Interaction):
message = await get_or_fetch_message_from_reference(interaction.message)
novel = await self.bot.db.get_or_fetch_novel(czbook.utils.get_code(message.embeds[0].url))
if novel.content_cache:
return
self.bot.get_content_msg.discard(interaction.message.id)
if not self.bot.get_content_msg:
novel.cencel_get_content()
self.bot.logger.info(f"{context_info(interaction)}: cancel get content of {novel.title}")
await interaction.response.edit_message(
embed=Embed(title="已取消"),
view=None,
delete_after=3,
)
self.get_content_button.disabled = False
await message.edit(view=self)
def setup(bot: Bot):
bot.add_cog(InfoCog(bot))
| watermelon1024/czbooks-helper | cogs/info.py | info.py | py | 7,543 | python | en | code | 1 | github-code | 13 |
31917686723 | import time
import pygame
# Import constants and game-related functions from other modules
from checkers.constants import WIDTH, HEIGHT, RED, WHITE
from checkers.game import Game
from minimax.algorithm import minimax,alpha_beta,get_all_moves
from winner_gui import display_winner
from difficulty_selection_gui import DifficultySelectionGUI
from algorithm_selection_gui import AlgorithmSelectionGUI
# Set the frames per second for Pygame window updates
FPS = 1
# Create a Pygame window with specified dimensions
WIN = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption('Checkers')
# Main game loop
def main():
run = True
clock = pygame.time.Clock()
# Create a new game and store its initial state
game = Game(WIN)
new_board = game.get_board()
# Open GUI for selecting AI algorithm
gui1 = AlgorithmSelectionGUI()
gui1.run()
algorithm = gui1.algorithm
print("Selected algorithm:", algorithm)
# Open GUI for selecting difficulty level
gui2 = DifficultySelectionGUI()
gui2.run()
difficulty = gui2.difficulty
print("Selected difficulty:", difficulty)
# Determine search depth based on selected difficulty level
if difficulty=="easy":
level = 2
elif difficulty=="medium":
level = 3
else:
level = 4
start_time=time.time()
while run:
clock.tick(FPS)
# If it's the AI player's turn (white), choose a move
if game.turn == WHITE:
moves = get_all_moves(game.get_board(), WHITE, game)
# Check if there are any valid moves for the AI player
if not moves:
display_winner("no more moves ,WHITE WINS!",start_time,algorithm)
run = False
# Choose a move using selected algorithm and search depth
else:
if algorithm == "minimax":
value, new_board = minimax(game.get_board(), level, WHITE, game)
else:
value, new_board = alpha_beta(game.get_board(), level, float('-inf'), float('inf'), WHITE, game)
# Update the game state with the chosen move
game.ai_move(new_board)
# If it's the human player's turn (red), choose a random move
else:
moves = get_all_moves(game.get_board(), RED, game)
# Check if there are any valid moves for the human player
if not moves:
display_winner("no moves for red ,WHITE WINS!",start_time,algorithm)
run = False
# Choose a random move
else:
game.random_move(new_board)
# Check if there is a winner yet
if game.winner() is not None:
# Display winner message and end the game loop
if(game.winner()==WHITE):
display_winner("WHITE WINS!",start_time,algorithm)
else:
display_winner("RED WINS!",start_time,algorithm)
run = False
# Update the Pygame window with the current game state
game.update()
# Quit the Pygame window after the game ends
pygame.quit()
# Call the main function to execute the game
main()
| Yahia-Hasan/Ai-Checkers-Game | main.py | main.py | py | 3,220 | python | en | code | 0 | github-code | 13 |
74021098579 | from django.contrib.auth import login
from django.contrib.auth.decorators import login_required
from django.shortcuts import render, redirect
from .decorators import *
from .forms import *
from .models import *
# from tablib import Dataset
# Create your views here.
def index_view(request):
if request.method == "POST":
return login_view(request)
return render(request, 'app1/index.html')
def register_view(request):
context = {}
form = RegistrationForm()
if request.method == "POST":
form = RegistrationForm(request.POST)
if form.is_valid():
form.save()
return redirect("index")
else:
context['form'] = form
context = {"form": form}
return render(request, "app1/register.html", context)
def login_view(request):
form = LoginForm(request.POST or None)
if request.POST and form.is_valid():
user = form.login(request)
if user:
login(request, user)
return redirect("acceuil")
return render(request, 'app1/login.html', {'form': form })
'''
def logout_view(request):
logout(request)
return redirect("home")
def employee_view(request):
form = Employee_Form()
return render(request, 'AppDemo/home.html', {'form': form})
'''
@staff_required
def admin_view(request):
parties = Partie.objects.all()
context = {"parties": parties}
form1 = PartieModelForm()
form2 = ChapitreModelForm()
if request.method == "POST":
if request.POST.get("partie_create"):
form1 = PartieModelForm(request.POST)
if form1.is_valid():
form1.save()
success = True
message = "Partie crée avec succès"
context["success"] = success
context["message"] = message
context["form1"] = PartieModelForm()
context["form2"] = ChapitreModelForm()
return render(request, 'app1/admin.html', context)
else:
context["form1"] = form1
elif request.POST.get("chapitre_create"):
form2 = ChapitreModelForm(request.POST)
if form2.is_valid():
form2.save()
success = True
message = "Chapitre crée avec succès"
context["success"] = success
context["message"] = message
context["form1"] = PartieModelForm()
context["form2"] = ChapitreModelForm()
return render(request, 'app1/admin.html', context)
else:
context["form2"] = form2
context["form1"] = form1
context["form2"] = form2
return render(request, 'app1/admin.html', context)
@login_required(login_url='login')
def acceuil_view(request):
parties = Partie.objects.all()
context = {"parties": parties}
return render(request, 'app1/acceuil.html', context)
def partie_create_view(request):
form = PartieModelForm()
context = {'form': form}
if request.method == "POST":
form = PartieModelForm(request.POST)
if form.is_valid():
form.save()
#return redirect("home")
return HttpResponse('partie created')
return render(request, 'app1/admin.html', context)
def partie_details_view(request, idpartie):
partie = Partie.objects.get(pk=idpartie)
#chapitres = partie.chapitre_set.all()
context = {"partie": partie}
return render(request, 'app1/partie_details.html', context)
def partie_update_view(request, idpartie):
partie = Partie.objects.get(pk=idpartie)
form = PartieUpdateForm(instance=partie)
context = {'form': form, 'partie': partie}
if request.method == "POST":
form = PartieModelForm(request.POST, instance=partie)
if form.is_valid():
form.save()
return HttpResponse('updated !')
return render(request, 'app1/partie_update.html', context)
def partie_delete_view(request, idpartie):
item = Partie.objects.get(pk=idpartie)
context = {'item': item}
if request.method == 'POST':
item.delete()
return HttpResponse('deleted !')
return render(request, 'app1/partie_delete.html', context)
def search_programme_view(request):
searched = None
parties = None
chapitres = None
articles = None
found = False
if request.method == 'POST':
searched = request.POST.get("searched")
parties = Partie.objects.filter(titre__contains=searched)
articles = Article.objects.filter(contenu__contains=searched)
chapitres = Chapitre.objects.filter(titre__contains=searched)
if articles or chapitres or parties:
found = True
context = {'searched': searched,
'parties': parties,
'articles': articles,
'chapitres': chapitres,
'found': found}
return render(request, 'app1/search_programme.html', context)
def search_view(request):
return render(request, 'app1/search.html')
def chapitre_create_view(request):
form = ChapitreModelForm()
context = {'form': form}
if request.method == "POST":
form = ChapitreModelForm(request.POST)
if form.is_valid():
form.save()
#return redirect("home")
return HttpResponse('chapitre created')
return render(request, 'app1/chapitre_create.html', context)
def chapitre_update_view(request, idchapitre):
chapitre = Chapitre.objects.get(pk=idchapitre)
form = ChapitreUpdateForm(instance=chapitre)
context = {'form': form, 'chapitre': chapitre}
if request.method == "POST":
form = ChapitreModelForm(request.POST, instance=chapitre)
if form.is_valid():
form.save()
return HttpResponse('updated !')
return render(request, 'app1/chapitre_update.html', context)
def chapitre_delete_view(request, idchapitre):
item = Chapitre.objects.get(pk=idchapitre)
context = {'item': item}
if request.method == 'POST':
item.delete()
return HttpResponse('deleted !')
return render(request, 'app1/chapitre_delete.html', context)
def article_create_view(request):
form = ArticleModelForm()
context = {'form': form}
if request.method == "POST":
form = ArticleModelForm(request.POST)
if form.is_valid():
form.save()
return redirect("app1/admin.html")
# return HttpResponse('article created')
return render(request, 'app1/article_create.html', context)
def article_update_view(request, idarticle):
article = Article.objects.get(pk=idarticle)
form = ArticleUpdateForm(instance=article)
context = {'form': form, 'article': article}
if request.method == "POST":
form = ArticleModelForm(request.POST, instance=article)
if form.is_valid():
form.save()
return HttpResponse('updated !')
return render(request, 'app1/article_update.html', context)
def article_delete_view(request, idarticle):
item = Article.objects.get(pk=idarticle)
context = {'item': item}
if request.method == 'POST':
item.delete()
return HttpResponse('deleted !')
return render(request, 'app1/article_delete.html', context)
# for testing
def test_view(request):
form1 = False
form2 = False
if request.method == "POST":
if request.POST.get("partie"):
form = ChapitreModelForm(request.POST)
if form.is_valid():
form.save()
form2 = True
if form1 or form2:
return HttpResponse("success")
return render(request, 'app1/test.html')
| douniagh/MyApplication- | projet1/myapplication/app1/views.py | views.py | py | 7,766 | python | en | code | 0 | github-code | 13 |
17934644397 | #https://www.youtube.com/watch?v=clMJ8BwCGa0&ab_channel=Exponent
import math
array = [1,2,3,4,1]
def find_repeated_number(arr:list)->int:
'''
brute force solution to find a repeated number. We know there is only one repeated number
o(n^2)
'''
for i in range(0, len(arr)):
temp = arr[0:i] + arr[i+1:len(arr)]
if arr[i] in temp:
return arr[i]
return -1
def find_array_good(arr:list)->int:
for i in range(len(arr)):
for j in range(i+1, len(arr)):
if arr[i] == arr[j]:
return arr[i]
return -1
# print(find_array_good([1,2,3,4,7,10,3,12]))
def find_repeated_number_better(arr:list)->int:
'''
Complexity sort o(nlogn)
nope: modify array
'''
if not arr or len(arr) < 2:
return -1
arr.sort()
temp = None
for i in arr:
if temp is None or temp != i:
temp = i
elif temp == i:
return i
return -1
def find_repeated_bitarray(arr:list)->int:
'''
Accept zeros
time complexity 0(n)
space complexity ?
'''
if not arr:
return -1
storage = None
for i in arr:
if storage is None:
storage = 1<<i+1
continue
move = 1<<(i+1)
suma = storage + move
storage = storage ^ move
if storage != suma:
return i
return -1
def find_repeated_mark(arr:list)->int:
"""
number of elements (max n+1)
range 1 to n
"""
for i in range(0, len(arr)):
current = abs(arr[i])-1
if arr[current] < 0:
return abs(arr[i])
arr[current] = -arr[current]
return -1
print(find_repeated_mark([2,1,3,5,1]))
| antoniojsp/retos | coding_interview/repeated_number_interview.py | repeated_number_interview.py | py | 1,728 | python | en | code | 0 | github-code | 13 |
12574214044 | """add category column to items
Revision ID: 366a6fa49471
Revises: a0a65e21f96f
Create Date: 2022-01-12 12:01:15.398481
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '366a6fa49471'
down_revision = 'a0a65e21f96f'
branch_labels = None
depends_on = None
def upgrade():
op.add_column('items', sa.Column('category_id', sa.Integer, nullable=False))
op.drop_table('categories_to_items')
def downgrade():
op.drop_column('items', 'category_id')
op.create_table('categories_to_items',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('item_id', sa.Integer(), nullable=False),
sa.Column('category_id', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['category_id'], ['categories.id'], ),
sa.ForeignKeyConstraint(['item_id'], ['items.id'], ),
sa.PrimaryKeyConstraint('id')
)
| justinrusso/loot-locker | migrations/versions/20220112_120115_add_category_column_to_items.py | 20220112_120115_add_category_column_to_items.py | py | 887 | python | en | code | 2 | github-code | 13 |
20293397794 | # -*- coding: utf-8 -*-
import re
from chaoslib.types import Configuration, Secrets
from logzero import logger
from chaosazure.application_gateway.constants import RES_TYPE_SRV_AG
from chaosazure.application_gateway.actions import __network_mgmt_client
from chaosazure.common.resources.graph import fetch_resources
__all__ = ["describe_application_gateways", "count_application_gateways", "describe_routes"]
def describe_application_gateways(filter: str = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Describe Azure application gateways.
Parameters
----------
filter : str
Filter the application gateways. If the filter is omitted all application gateways in
the subscription will be selected for the probe.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
"""
logger.debug(
"Start describe_application_gateways: configuration='{}', filter='{}'".format(
configuration, filter))
application_gateways = fetch_resources(filter, RES_TYPE_SRV_AG, secrets, configuration)
return application_gateways
def count_application_gateways(filter: str = None,
configuration: Configuration = None,
secrets: Secrets = None) -> int:
"""
Return count of Azure application gateways.
Parameters
----------
filter : str
Filter the application gateways. If the filter is omitted all application_gateways in
the subscription will be selected for the probe.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
"""
logger.debug(
"Start count_application_gateways: configuration='{}', filter='{}'".format(
configuration, filter))
application_gateways = fetch_resources(filter, RES_TYPE_SRV_AG, secrets, configuration)
return len(application_gateways)
def describe_routes(filter: str = None,
name_pattern: str = None,
configuration: Configuration = None,
secrets: Secrets = None):
"""
Describe Azure application gateways routes.
Parameters
----------
filter : str
Filter the application_gateways. If the filter is omitted all application gateways in
the subscription will be selected for the probe.
Filtering example:
'where resourceGroup=="myresourcegroup" and name="myresourcename"'
name_pattern : str
Filter the routes. If the filter is omitted all routes in
the server will be selected for the probe.
Pattern example:
'app[0-9]{3}'
"""
logger.debug(
"Start describe_routes: configuration='{}', filter='{}', name_pattern='{}'".format(
configuration, filter, name_pattern))
pattern = None
if name_pattern:
pattern = re.compile(name_pattern)
application_gateways = fetch_resources(filter, RES_TYPE_SRV_AG, secrets, configuration)
client = __network_mgmt_client(secrets, configuration)
routes = []
for agw in application_gateways:
group = agw['resourceGroup']
application_gateway_name = agw['name']
app_gw = client.application_gateways.get(group, application_gateway_name)
for r in app_gw.request_routing_rules:
name = r.name
if pattern is None or pattern.search(name):
routes.append(r.as_dict())
return routes
| chaostoolkit-incubator/chaostoolkit-azure | chaosazure/application_gateway/probes.py | probes.py | py | 3,590 | python | en | code | 22 | github-code | 13 |
42363839131 | #! /usr/bin/env python
#
from astroquery.admit import ADMIT
if True:
import pickle
a = pickle.load(open('alma.pickle','rb'))
a = ADMIT()
a.query(source_name_alma="NGC3504")
a.check()
if False:
r = a.sql("SELECT * from win")
print(len(r),r)
if False:
q1 = 'SELECT * from spw, sources WHERE sources.flux > 9 and sources.spw_id = spw.id and sources.lines_id = 0;'
r1 = a.sql(q1)
print('Should find the 1 CubeSum source, 10 Jy')
print(r1)
q2 = 'SELECT * from spw, sources, lines WHERE sources.flux > 7 AND sources.spw_id = spw.id and sources.lines_id = lines.id and lines.spw_id = spw.id;'
r2 = a.sql(q2)
print('Should find the 1 CO LineCube source, 8 Jy')
print(r2)
| teuben/study7 | check1.py | check1.py | py | 729 | python | en | code | 0 | github-code | 13 |
36520150294 | import torch
import random
from torch import nn, optim
from torch.nn import functional as F
import numpy as np
from copy import deepcopy
from tqdm import trange
import matplotlib.pyplot as plt
class ReplayMemory():
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, transition):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = transition # a transition should be (s, a, r, s', done)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN():
def __init__(self, env, dqn_net, action_size, state_shape, learning_rate=1e-3, eps_start=0.9, eps_end=0.05, eps_decay=200, num_episodes=100, num_test_episodes=10, batch_size=128, update_interval=10, preprocessing_network=None):
# Pytorch specific
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Environment related vars
self.env = env
self.actions = [torch.cuda.FloatTensor([i]) for i in range(action_size)]
self.state_shape = state_shape
self.gamma = 0.999
self.preprocessing_network = preprocessing_network
# Optimization parameters
self.learning_rate = learning_rate
self.eps_start = eps_start
self.eps_end = eps_end
self.eps_decay = eps_decay
self.num_episodes = num_episodes
self.batch_size = batch_size
self.update_interval = update_interval
self.num_test_episodes = num_test_episodes
# Networks
self.dqn_net = dqn_net
self.dqn_net = self.dqn_net.to(self.device)
self.target_net = deepcopy(self.dqn_net)
# self.target_net.load_state_dict(self.dqn_net.state_dict())
self.target_net.eval()
self.target_net = self.target_net.to(self.device)
self.optimizer = optim.RMSprop(self.dqn_net.parameters(), lr=self.learning_rate)
# Experience buffer
self.memory = ReplayMemory(capacity=int(10000))
def get_action(self, state, eps):
if np.random.random() < eps:
action = [np.random.randint(len(self.actions))]
action = torch.cuda.FloatTensor(action)
else:
action = torch.cuda.FloatTensor([self.best_action(state)])
return action
def q_values(self, state, target=False):
if not target:
with torch.no_grad():
return torch.stack([self.dqn_net(torch.cat([state, a]).to(self.device)) for a in self.actions])
else:
with torch.no_grad():
return torch.stack([self.target_net(torch.cat([state, a]).to(self.device)) for a in self.actions])
def best_action(self, state):
action = torch.argmax(self.q_values(state))
return action
def train(self):
self.dqn_net.train()
self.target_net.eval()
global_step = 0
episode_rewards = []
losses = []
for i_e in trange(self.num_episodes):
episode_reward = 0.
state = self.env.reset()
state = torch.cuda.FloatTensor(state)
if self.preprocessing_network is not None:
state = self.preprocessing_network(state)
counter = 0
while True:
# Get and take action
eps = self.eps_end + (self.eps_start - self.eps_end) * np.exp(-1. * global_step / self.eps_decay)
action = self.get_action(state, eps)
next_state, reward, done, _ = self.env.step(int(action.cpu().detach().item()))
if self.preprocessing_network is not None:
next_state = self.preprocessing_network(next_state)
next_state = torch.cuda.FloatTensor(next_state)
reward = torch.cuda.FloatTensor([reward])
done = torch.cuda.FloatTensor([done])
# Bookkeeping
global_step += 1
counter += 1
episode_reward += reward
# Add to memory buffer
self.memory.push([state, action, reward, next_state, done])
state = next_state
# Update target network
if global_step % self.update_interval == 0:
# print("UPDATING")
self.target_net = deepcopy(self.dqn_net)
self.target_net.eval()
# Train DQN
# Q(s, a) = r + gamma * max_a' Q(s', a')
if len(self.memory) >= self.batch_size:
sample = self.memory.sample(self.batch_size)
inputs = []
labels = []
for s, a, r, s_n, d in sample:
inputs.append(torch.cat([s, a]))
label = r
if d == 0.:
label += self.gamma * torch.max(self.q_values(s_n, target=True))
labels.append(label)
inputs = torch.stack(inputs)
labels = torch.stack(labels)
labels = labels.to(self.device)
# print(labels)
# inputs = torch.stack([torch.cat(x[:2]) for x in sample]) #s, a
# inputs = inputs.to(self.device)
# # Single DQN
# labels = torch.stack([r if d == 1. else r + self.gamma * torch.max(self.q_values(s_n, target=False)) for x in sample])
# # Double DQN
# # selected_state_action = torch.cat([x[3], torch.cuda.FloatTensor([self.best_action(x[3])])])
# # labels = torch.stack([x[2] if x[4] == 1. else x[2] + self.gamma * self.target_net(selected_state_action) for x in sample])
predictions = self.dqn_net(inputs).to(self.device)
# loss = F.mse_loss(predictions, labels)
loss = F.smooth_l1_loss(predictions, labels)
losses.append(loss)
# if i_e % 10 == 0:
# print(loss)
self.optimizer.zero_grad()
loss.backward()
for param in self.dqn_net.parameters():
param.grad.data.clamp_(-1, 1)
self.optimizer.step()
if done:
episode_rewards.append(episode_reward)
# if i_e % 10 == 0:
# print("reward", episode_reward)
break
return episode_rewards, losses
def test(self):
self.dqn_net.eval()
global_step = 0
episode_rewards = 0.
losses = []
for _ in trange(self.num_test_episodes):
episode_reward = 0.
state = self.env.reset()
state = torch.cuda.FloatTensor(state)
while True:
# Get and take action
print(self.q_values(state))
action = self.get_action(state, 0.)
self.env.render()
next_state, reward, done, _ = self.env.step(int(action.cpu().detach().item()))
next_state = torch.cuda.FloatTensor(next_state)
reward = torch.cuda.FloatTensor([reward])
done = torch.cuda.FloatTensor([done])
# Bookkeeping
global_step += 1
episode_reward += reward
if done:
episode_rewards += episode_reward
break
return episode_rewards / self.num_test_episodes
| joshnroy/TransferLearningThesis | new/dqn.py | dqn.py | py | 7,879 | python | en | code | 0 | github-code | 13 |
73488209617 | # Evaluate the value of an arithmetic expression in Reverse Polish Notation.
# Valid operators are +, -, *, and /. Each operand may be an integer or another expression.
# Note that division between two integers should truncate toward zero.
# It is guaranteed that the given RPN expression is always valid. That means the expression would always evaluate to a result,
# and there will not be any division by zero operation.
# Example 1:
# Input: tokens = ["2","1","+","3","*"]
# Output: 9
# Explanation: ((2 + 1) * 3) = 9
# Example 2:
# Input: tokens = ["4","13","5","/","+"]
# Output: 6
# Explanation: (4 + (13 / 5)) = 6
# Example 3:
# Input: tokens = ["10","6","9","3","+","-11","*","/","*","17","+","5","+"]
# Output: 22
# Explanation: ((10 * (6 / ((9 + 3) * -11))) + 17) + 5
# = ((10 * (6 / (12 * -11))) + 17) + 5
# = ((10 * (6 / -132)) + 17) + 5
# = ((10 * 0) + 17) + 5
# = (0 + 17) + 5
# = 17 + 5
# = 22
# Constraints:
# 1 <= tokens.length <= 104
# tokens[i] is either an operator: "+", "-", "*", or "/", or an integer in the range [-200, 200].
class Solution:
def evalRPN(self, tokens: List[str]) -> int:
# we will use a stack: if we meet a number, we push it into the stack
# if we meet an operation, we pop the last 2 numbers and apply the operation
# then we push the result
stack = []
operations = ['+', '-', '*', '/']
for token in tokens:
# if the token is an operation
if token in operations:
# we pop two operands from stack and apply the operation
second = int(stack.pop())
first = int(stack.pop())
if token == '+':
result = first + second
elif token == '-':
result = first - second
elif token == '*':
result = first * second
else:
result = int(first / second)
# now we push the result back to the stack
stack.append(result)
else:
stack.append(token)
# we know at this point, there is only one number in the stack
return stack[0]
| aslamovamir/LeetCode | Evaluate_Reverse_Polish_Notation.py | Evaluate_Reverse_Polish_Notation.py | py | 2,244 | python | en | code | 0 | github-code | 13 |
39025807376 | import random
class SkipList:
"""
Class representing skiplist as created by William Pugh
A skip list is built in layers. The bottom layer is an ordinary ordered
linked list. Each higher layer acts as an "express lane" for the lists below,
where an element in layer i appears in layer i+1 with some fixed probability p
(two commonly used values for p are 1/2 or 1/4). On average, each element appears
in 1/(1-p) lists, and the tallest element (usually a special head element at the
front of the skip list) in all the lists. The skip list contains log_1/p(n) lists.
In an ordinary sorted list, insert, delete, and search operations require sequential
traversal of the list. This results in O(n) performance per operation. Skip Lists allow
intermediate nodes in the list to be skipped during a traversal - resulting in an
expected performance of O(log n) per operation.
Attributes:
MAX_LEVEL(int): Constant denoting maximum number of levels in skip list
P(float): Constant denoting fraction of nodes with level i pointers also having level i + 1 pointers
header(SkipNode): Head of skiplist which is a dummy node
level(int): Number of levels in skiplist i.e. SkipNode with highest level
Methods:
insert(key): Inserts SkipNode with key into skip list
search(key): Searches for SkipNode with specified key in skip list
erase(key): Deletes SkipNode with specified key from skip list if it exists
random_level(): Generates random level at which to insert new SkipNode
"""
def __init__(self, max_level = 16, p = 0.5):
"""
Initializes attributes of SkipList with values
"""
# Maximum level for this skip list
self.__MAX_LEVEL = max_level
# P is the fraction of the nodes with level
# i references also having level i+1 references
self.__P = p
# Create header node and initialize to -1
self.__header = self.SkipNode(-1, self.__MAX_LEVEL)
# Current level of skip list
self.__level = 0
def random_level(self):
"""
Before we start inserting the elements in the skip list we need
to decide the nodes level.Each element in the list is represented
by a node, the level of the node is chosen randomly while insertion
in the list. Level does not depend on the number of elements in the node.
The level for node is decided by the following algorithm.
__MAX_LEVEL is the upper bound on number of levels in the skip list. It can
be determined as – L(N) = log_p/2(N). This algorithm assures that random level
will never be greater than __MAX_LEVEL. Here p is the fraction of the nodes
with level i pointers also having level i+1 pointers and N is the number of
nodes in the list.
"""
level = 0
while random.random() < self.__P and level < self.__MAX_LEVEL:
level += 1
return level
def insert(self, key):
"""
We will start from highest level in the list and compare key of
next node of the current node with the key to be inserted. Basic idea is:
1. Key of next node of current node is less than key to be inserted
then we keep on moving forward on the same level
2. Key of next node of current node is greater than the key to be inserted
then we store the pointer to current node i at update[i] and move one level
down and continue our search.
The insert algorithm maintains two local variables(besides the skip list header):
- X (current): a pointer which points to a node whose forward pointers point to nodes
whose key we are currently comparing to the key we want to insert this lets us quickly
compare keys, and follow forward pointers
- update: an array of node pointers which point to nodes whose forward pointers may need
to be updated to point to the newly inserted node, if the new node is inserted in the list
just before the node X points to this, lets us quickly update all the pointers necessary to
splice in the new node
"""
# Here update[i] holds the pointer to node at level i from which we moved down
# to level i-1 and pointer of node left to insertion position at level 0.
update = [None] * (self.__MAX_LEVEL + 1)
# X (current)
current = self.__header
# Start from highest level of skip list move the current reference forward while node next
# to current's key is less than key. Otherwise insert current in update and move one level
# down and continue search
for i in range(self.__MAX_LEVEL, -1, -1):
while current.forward[i] and current.forward[i].key < key:
current = current.forward[i]
update[i] = current
# Reached level 0 and forward reference to right is desired position to insert key.
current = current.forward[0]
# if current is None that means we have reached to end of the level or if current's key is
# not equal to key to insert that means we have to insert node between update[0] and
# current node
if current == None or current.key != key:
# Generate random level at which we will insert SkipNode
random_level = self.random_level()
# If random level is greater than list's current level (node with highest level inserted
# in list so far), initialize update value with reference to header for further use
if random_level > self.__level:
for i in range(self.__level + 1, random_level + 1):
update[i] = self.__header
self.__level = random_level
# Create new SkipNode with randomly generated level
new_node = self.SkipNode(key, random_level)
# Insert node by rearranging pointer references
for i in range(random_level + 1):
new_node.forward[i] = update[i].forward[i]
update[i].forward[i] = new_node
def search(self, key):
"""
Searching an element is very similar to the approach for searching for a spot to insert
an element in the Skip list. The basic idea is as follows;
1. Key of next node is less than search key then we keep on moving forward on the same level.
2. Key of next node is greater than the key to be inserted then we store the pointer to current
node i at update[i] and move one level down and continue our search.
At the lowest level (0), if the element next to the rightmost element (update[0]) has key equal
to the search key, then we have found key otherwise failure.
The expected number of steps in each linked list is at most 1/p, which can be seen by tracing
the search path backwards from the target until reaching an element that appears in the next
higher list or reaching the beginning of the current list. Therefore, the total expected cost
of a search is 1/p(log_1/p(n)) which is O(log n) when p is a constant.
Args:
key: Data to search for in skip list
Return:
Boolean indicating whether key was found
Raises:
"""
# X (current)
current = self.__header
# Start from highest level of skip list move the current reference forward while node next
# to current's key is less than key.
for i in range(self.__MAX_LEVEL, -1, -1):
while current.forward[i] and current.forward[i].key < key:
current = current.forward[i]
# Reached level 0 and advance reference to right, which is possibly our desired node
current = current.forward[0]
# If current node have key equal to search key, we have found our target node
if current and current.key == key:
return True
else:
return False
def erase(self, key):
"""
Deletion of an element k is preceded by locating element in the Skip list using above
mentioned search algorithm. Once the element is located, rearrangement of pointers is done
to remove element from list just like we do in singly linked list. We start from lowest level
and do rearrangement until element next to update[i] is not k.After deletion of element there
could be levels with no elements, so we will remove these levels as well by decrementing the
level of Skip list.
Args:
key: Data to remove from skip list
Return:
Raises:
ValueError: Key not in skiplist
"""
# Here update[i] holds the pointer to node at level i from which we moved down
# to level i-1 and pointer of node left to deletion position at level 0.
update = [None] * (self.__MAX_LEVEL + 1)
# X (current)
current = self.__header
# Start from highest level of skip list move the current reference forward while node next
# to current's key is less than key. Otherwise insert current in update and move one level
# down and continue search
for i in range(self.__MAX_LEVEL, -1, -1):
while current.forward[i] and current.forward[i].key < key:
current = current.forward[i]
update[i] = current
# Reached level 0 and forward reference to right is desired position to delete key.
current = current.forward[0]
# If current node is target node
if current and current.key == key:
# start from lowest level and rearrange references just like we do in singly linked list
# to remove target node
for i in range(self.__level + 1):
# If at level i, next node is not target node, break the loop, no need to move
# to a further level
if update[i].forward[i] != current:
break
update[i].forward[i] = current.forward[i]
# Remove levels having no elements
while(self.__level > 0 and self.__header.forward[self.__level] == None):
self.__level -= 1
elif current and current.key != key:
raise ValueError('Key not in skip list')
def __str__(self):
"""
Return string representation of skip list
"""
stringrep, header = '\n*****Skip List******\n', self.__header
for level in range(self.__level + 1, -1, -1):
stringrep += "Level {}: ".format(level)
node = header.forward[level]
while(node != None):
stringrep += '{} '.format(node.key)
node = node.forward[level]
stringrep += "\n"
return stringrep
class SkipNode:
"""
Inner class representing nodes in skiplist
We speak of a Skip List node having levels, one level per
forward reference. The number of levels in a node is called
the size of the node.
Attributes:
key: Data stored in node
forward: forward array carrying pointers to nodes of a different level. A level i node carries i forward pointers indexed through 0 to i.
"""
def __init__(self, key, level):
"""
Initialize SkipNode attributes with values
Args:
key: Data stored in SkipNode
level: Level of the node is chosen randomly during insertion in the list. Level i being the topmost level and level 0 being the bottom level.
"""
self.key = key
self.forward = [None] * (level + 1)
myskiplist = SkipList()
myskiplist.insert(5)
myskiplist.insert(7)
myskiplist.insert(20)
myskiplist.insert(8)
print(myskiplist)
print(myskiplist.search(12))
myskiplist.erase(7)
print(myskiplist) | wiknwo/data_structures_and_algortithms | Data Structures/Linked List/SkipList.py | SkipList.py | py | 12,104 | python | en | code | 0 | github-code | 13 |
73147135377 | import os
import joblib
import numpy as np
import tensorflow as tf
from feature_processing import extract_feature
from cnn_model import get_model
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import OneHotEncoder
from sklearn.model_selection import train_test_split
# TO DO
# Need as command line arguments
features_path = '/home/ilya/Documents/GitHub/sound_classification/dataset/us8k_features_test.npy'
labels_path = '/home/ilya/Documents/GitHub/sound_classification/dataset/us8k_labels_test.npy'
scalers_path = '/home/ilya/Documents/GitHub/sound_classification/dataset/us8k_scales.pkl'
sample_path = '/home/ilya/Documents/UrbanSound8K/audio/fold1/24074-1-0-1.wav'
model_path = '/home/ilya/Documents/GitHub/sound_classification/model-009-0.892507-0.869084.h5'
# Load a model
model = tf.keras.models.load_model(model_path)
# Let's check test data
features = np.load(features_path)
labels = np.load(labels_path)
scalers = joblib.load(scalers_path)
for idx in range(features.shape[1]):
features[:, idx, :] = scalers[idx].transform(features[:, idx, :])
features = np.transpose(np.array(features, ndmin=4), axes=(1, 2, 3, 0))
encoder = OneHotEncoder(sparse=False)
labels = encoder.fit_transform(labels.reshape(len(labels), 1))
model.evaluate(features, labels)
# Let's predict on sample
sample_feature = extract_feature(sample_path, 'melspec', duration=4)
sample_feature = np.array(sample_feature, ndmin=3)
for idx in range(sample_feature.shape[1]):
sample_feature[:, idx, :] = scalers[idx].transform(
sample_feature[:, idx, :])
sample_feature = np.transpose(
np.array(sample_feature, ndmin=4), axes=(1, 2, 3, 0))
result = model.predict(sample_feature)
# Print predicted class name
classes = ['air_conditioner', 'car_horn', 'children_playing', 'dog_bark',
'drilling', 'engine_idling', 'gun_shot', 'jackhammer', 'siren', 'street_music']
print(classes[np.argmax(result[0])])
| ILuxa15/sound_classification | example_predict.py | example_predict.py | py | 1,938 | python | en | code | 1 | github-code | 13 |
17608780387 | class ListElement:
def __init__(self, value=None, next=None):
self.value = value
self.next = next
def delNode(pos):
global listHead
if pos == 1:
listHead = listHead.next
else:
prev = listHead
for i in range(2, pos):
prev = prev.next
if prev is None:
return
prev.next = prev.next.next
# テストケース
if __name__ == '__main__':
# リストを初期化
listHead = ListElement(1, ListElement(2, ListElement(3, ListElement(4, ListElement(5)))))
# リストの要素を表示
current = listHead
while current is not None:
print(current.value, end=' ')
current = current.next
print()
# 2番目の要素を削除
delNode(2)
# 削除後のリストの要素を表示
current = listHead
while current is not None:
print(current.value, end=' ')
current = current.next
print()
# 1番目の要素を削除
delNode(1)
# 削除後のリストの要素を表示
current = listHead
while current is not None:
print(current.value, end=' ')
current = current.next
print()
# 3番目の要素を削除
delNode(3)
# 削除後のリストの要素を表示
current = listHead
while current is not None:
print(current.value, end=' ')
current = current.next
print()
"""
1 2 3 4 5
1 3 4 5
3 4 5
3 4
""" | ishizukuma/FE_B_sample | Python/Q10.py | Q10.py | py | 1,530 | python | en | code | 3 | github-code | 13 |
7181719100 | #!/usr/bin/python3
#-*- coding:utf8 -*-
'''
页面主体在 <div id="contentmain"> 标签内
标题为 <div id="title">
正文为 <div id="content">
下一页 <a href="54900.htm">下一页</a>
'''
import requests
from bs4 import BeautifulSoup
import re
import os
from multiprocessing import Pool
header = {'Host': 'www.wenku8.net',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:71.0) Gecko/20100101 Firefox/71.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,en-US;q=0.7,en;q=0.3', 'Accept-Encoding': 'gzip, deflate, br',
'Connection': 'keep-alive'}
UrlRoot = "https://www.xxxxx.net/novel/1/1538/"# 隐掉了网址
def get_page(url, f, numberOfimages):
r = requests.get(UrlRoot + url, headers=header)
r.encoding = 'gbk'
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.find_all("img", class_='imagecontent')
if len(images) > 0:
for i in images:
print(i['src'])
image = requests.get(i['src'])
if image.content:
with open("%d.jpg" % numberOfimages, 'wb') as imageFile:
imageFile.write(image.content)
numberOfimages = numberOfimages + 1
title = soup.find(id="title").get_text()
f.write(title)
content = soup.find(id="content").get_text()
f.write(content)
next_page = soup.find("a", string="下一页")
try:
next_url = re.findall(r"^<a href=\"(\d+.htm)", str(next_page))[0]
except:
next_url = None
return next_url, numberOfimages
def startSpider(NextUrl, workPath, endUrl):
os.chdir("/home/uno/PycharmProjects/BlackHat/"+workPath)
numberOfimages = 1
with open('text', 'a') as f:
NextUrl, numberOfimages = get_page(NextUrl, f, numberOfimages)
while (1):
if NextUrl and NextUrl != endUrl:
NextUrl, numberOfimages = get_page(NextUrl, f, numberOfimages)
print(NextUrl)
else:
break
if __name__ == '__main__':
print("Process Starting...")
urls = ["51563.htm", "54900.htm", "59028.htm", "63864.htm"]
pool = Pool(3)
for i in range(1, 4):
pool.apply_async(startSpider, (urls[i-1], str(i), urls[i]))# 加入进程池,起始url,工作目录名,结束的url
print("=====Start====")
pool.close()
pool.join()
print("===end===")
| CatAndCoffee/playground | 埃罗芒阿老师小说爬虫/spider_multiprocessing.py | spider_multiprocessing.py | py | 2,449 | python | en | code | 4 | github-code | 13 |
25002939509 | from string import Template
import stories
class aa():
class StoryMemberPostsTopicOnBook(stories.Story):
ID = "MemberPostsTopicOnBook"
TitleTemplate = _('<a href="/Members/$MemberKey">$MemberFullname</a> has posted a new topic under <a href="ParentURL">$ParentTitle</a>')
BodyTemplate = _('''
<strong>The message said: $TopicTitle </strong>
<br />" $TopicBody "
''')
Icon = "comment.png"
#The way by which the data "property bag" is handled is needlessly
#Cumbersome... this should be simplified
def getTitle(self, params):
member = params["member"]
parentURL = params["parentURL"]
parentTitle = params["parentTitle"]
data = {
"MemberFullname": member.fullname(),
"MemberKey": member.key(),
"ParentTitle": parentTitle,
"ParentURL": parentURL
}
template = Template(self.TitleTemplate)
return template.safe_substitute(data)
def getBody(self, params):
member = params["member"]
topic = params["topic"]
parentURL = params["parentURL"]
parentTitle = params["parentTitle"]
data = {
"MemberFullname": member.fullname(),
"MemberAvatar": member.gravatar30(),
"MemberKey": member.key(),
"ParentTitle": parentTitle,
"ParentURL": parentURL,
"TopicTitle": topic.Title,
"TopicBody": topic.Body
}
template = Template(self.BodyTemplate)
return template.safe_substitute(data)
def getTargets(self, params): #Get the list of member who will receive the story posts
targets = []
if params["recipient"]:
targets.append(membership.Member.get(params["recipient"]))
targets.append(params["member"])
return targets
| wrook/wrook | root/feathers/talk_stories.py | talk_stories.py | py | 1,640 | python | en | code | 4 | github-code | 13 |
73053461457 | # dataset settings
dataset_type = 'WordEmbeddingDataset'
train_pipeline = [
dict(type='LoadEmbeddingFromFile'),
dict(type='ToTensor', keys=['emb']),
dict(type='ToTensor', keys=['gt_label']),
dict(type='MyCollect', keys=['emb', 'gt_label'])
]
test_pipeline = [
dict(type='LoadEmbeddingFromFile'),
dict(type='ToTensor', keys=['emb']),
dict(type='MyCollect', keys=['emb'])
]
data = dict(
samples_per_gpu=32,
workers_per_gpu=1,
train=dict(
type=dataset_type,
data_prefix='./data/kdxf_cls/training_set_txt',
ann_file='./data/kdxf_cls/train.txt',
classes='./data/kdxf_cls/classes.txt',
pipeline=train_pipeline),
val=dict(
type=dataset_type,
data_prefix='./data/kdxf_cls/training_set_txt',
ann_file='./data/kdxf_cls/val.txt',
classes='./data/kdxf_cls/classes.txt',
pipeline=test_pipeline),
test=dict(
# replace `data/val` with `data/test` for standard test
type=dataset_type,
data_prefix='./data/kdxf_cls/test_set',
ann_file='./data/kdxf_cls/test.txt',
classes='./data/kdxf_cls/classes.txt',
pipeline=test_pipeline))
evaluation = dict(interval=1, metric='accuracy', metric_options=dict(topk=(1, 5)))
| LiUzHiAn/kdxf | configs/datasets/word_emb_dataset_config.py | word_emb_dataset_config.py | py | 1,270 | python | en | code | 0 | github-code | 13 |
34139190948 | import fasttext
import sys
if len(sys.argv) < 2:
print("Data folder is required as an argument")
sys.exit(1)
folder = sys.argv[1]
if folder[-1] != "/":
folder = folder + "/"
# train classifier
model = fasttext.train_supervised(input=folder + "training.txt", lr=0.1, epoch=25, wordNgrams=2)
model.save_model(folder + "model.bin")
# load and evaluate classifier
model = fasttext.load_model(folder + "model.bin")
print("evaluation:")
print("number of samples, precision and recall: " + str(model.test(folder+"validation.txt")))
print("examples:")
example1 = "France’s Renault, which controls Russian car maker _entity_ , fell 9.3 per cent."
print(str(model.predict(example1)) + " for: " + example1)
example2 = "Germany's Lufthansa halted flights to Ukraine from Monday, joining _entity_ which already suspended flights."
print(str(model.predict(example2)) + " for: " + example2)
| jeromechoo/sanctions-tracker | 4_train_model.py | 4_train_model.py | py | 898 | python | en | code | 4 | github-code | 13 |
16463128313 | import numpy as np
import codecs
#计算欧氏距离
def distance(x1,x2):
return np.sqrt(sum(np.power(x1-x2,2)))
#对一个样本找到与该样本距离最近的聚类中心
def nearest(point, cluster_centers):
min_dist = np.inf
m = np.shape(cluster_centers)[0] # 当前已经初始化的聚类中心的个数
for i in range(m):
# 计算point与每个聚类中心之间的距离
d = distance(point, cluster_centers[i, ])
# 选择最短距离
if min_dist > d:
min_dist = d
return min_dist
#选择尽可能相距较远的类中心
def get_centroids(dataset, k):
m, n = np.shape(dataset)
cluster_centers = np.zeros((k , n))
index = np.random.randint(0, m)
cluster_centers[0,] = dataset[index, ]
# 2、初始化一个距离的序列
d = [0.0 for _ in range(m)]
for i in range(1, k):
sum_all = 0
for j in range(m):
# 3、对每一个样本找到最近的聚类中心点
d[j] = nearest(dataset[j, ], cluster_centers[0:i, ])
# 4、将所有的最短距离相加
sum_all += d[j]
# 5、取得sum_all之间的随机值
sum_all *= np.random.rand()
# 6、获得距离最远的样本点作为聚类中心点
for j, di in enumerate(d):
sum_all=sum_all - di
if sum_all > 0:
continue
cluster_centers[i,] = dataset[j, ]
break
return cluster_centers
#主程序
def Kmeans(dataset,k):
row_m=np.shape(dataset)[0]
cluster_assign=np.zeros((row_m,2))
center=get_centroids(dataset,k)
change=True
while change:
change=False
for i in range(row_m):
mindist=np.inf
min_index=-1
for j in range(k):
distance1=distance(center[j,:],dataset[i,:])
if distance1<mindist:
mindist=distance1
min_index=j
if cluster_assign[i,0] != min_index:
change=True
cluster_assign[i,:]=min_index,mindist**2
for cen in range(k):
cluster_data=dataset[np.nonzero(cluster_assign[:,0]==cen)]
center[cen,:]=np.mean(cluster_data,0)
return center ,cluster_assign
#cluster_center,cluster_assign=Kmeans(datas,3)
def kmeanspp_select(gradient, k):
center, cluster_assign = Kmeans(gradient, k)
keep_idx = []
tot = cluster_assign.shape[0]
mapp = {}
for i in range(tot):
cid = cluster_assign[i][0]
dist = cluster_assign[i][1]
if not cid in mapp:
mapp[cid] = (dist, i)
else:
if dist < mapp[cid][0]:
mapp[cid] = (dist, i)
for cid in mapp:
keep_idx.append(mapp[cid][1])
return keep_idx
| yunyikristy/CM-ACC | kmeanspp.py | kmeanspp.py | py | 2,810 | python | en | code | 19 | github-code | 13 |
36114818405 | import os
import shutil
from docutils import nodes
from docutils.parsers.rst import Directive
from docutils.parsers.rst import directives, Directive
from sphinx.util.docutils import SphinxDirective, SphinxTranslator
def setup(app):
app.add_node(pdfimage,
html=(visit, depart))
app.add_directive('pdfimage', PDFImageDirective)
class pdfimage(nodes.General, nodes.Element):
pass
def visit(self, node):
filepath = node['filepath']
rel_filepath = node['rel_filepath']
# print (filepath)
# print(rel_filepath)
width = '100%'
if 'width' in node:
width = node['width']
height = '100%'
if 'height' in node:
height = node['height']
align = 'center'
if 'align' in node:
align = node['align']
# ../../_images
# print(self.builder.imgpath)
#print(self.builder.outdir)
filename = os.path.basename(filepath)
outpath = os.path.join(self.builder.outdir, self.builder.imagedir, filename)
shutil.copyfile(filepath, outpath)
src = f'{self.builder.imgpath}/{filename}'
#print(outpath)
# print(src)
content = f"""
<object
data="{src}"
width="{width}"
align="{align}"
type="application/pdf">
<param name="view" value="Fit" />
<param name="pagemode" value="none" />
<param name="toolbar" value="1" />
<param name="scrollbar" value="0" />
</object>
"""
self.body.append(content)
def depart(self, node):
pass
def align_spec(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class PDFImageDirective(SphinxDirective):
name = 'pdfimage'
node_class = pdfimage
has_content = False
required_arguments = 1
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'align': align_spec,
}
def run(self):
node = self.node_class()
src = self.arguments[0]
rel_filepath, filepath = self.env.relfn2path(src)
self.env.note_dependency(rel_filepath)
node['filepath'] = filepath
node['rel_filepath'] = rel_filepath
if 'height' in self.options:
node['height'] = self.options['height']
if 'width' in self.options:
node['width'] = self.options['width']
if 'align' in self.options:
node['align'] = self.options['align']
return [node]
| carnotresearch/cr-vision | docs/extensions/pdfimage.py | pdfimage.py | py | 2,504 | python | en | code | 2 | github-code | 13 |
9087834350 | #https://www.acmicpc.net/problem/15489
#백준 15489번 파스칼 삼각형(DP)
#import sys
#input = sys.stdin.readline
r, c, w = map(int, input().split())
limit = r+w-1
dp = [[0]*limit for _ in range(limit)]
for i in range(limit):
for j in range(i+1):
if j == 0 or j == i :
dp[i][j] = 1
else:
dp[i][j] = dp[i-1][j-1]+dp[i-1][j]
result = 0
cnt = 1
for i in range(r-1,limit):
result += sum(dp[i][c-1:c+cnt-1])
cnt+=1
print(result) | MinsangKong/DailyProblem | 07-14/1.py | 1.py | py | 504 | python | en | code | 0 | github-code | 13 |
6808406273 | from __future__ import print_function
import sys, os, re, arcpy, traceback
from arcpy import env
from arcpy.sa import *
from safe_print import Safe_Print
######Created By Brian Mulcahy##########
#Step 5 will create rasters based off the given stream's xs and stream vertices
#If user corrected for backwater and was given initial flood polygons then the script will add fields based on
#If the current stream's flood polygon interacts with the stream it is intersecting's flood polygon
#and if the cross-sections are marked as corrected for backwater.
#this information should be used to redelineate the flood boundary
class WSEL_Step_5:
def __init__(self, config, streams):
self.streams = streams
self.config = config
def __enter__(self):
arcpy.CheckOutExtension("3D")
arcpy.CheckOutExtension("Spatial")
self.scratch = self.config['scratch']
self.scratchgdb = self.config['scratchgdb']
self.xs_original = self.config['xs_original']
self.output_workspace = self.config['output_workspace']
self.xs_dataset = self.config['xs_dataset']
self.streams_original = self.config['streams_original']
self.flood_original =self.config['flood_original']
self.xs_intersect_dataset = self.config['xs_intersect_dataset']
self.streams_intersect_dataset = self.config['streams_intersect_dataset']
self.routes_dataset = self.config['routes_dataset']
self.streams_dataset = self.config['streams_dataset']
self.vertices_dataset = self.config['vertices_dataset']
self.sr = self.config['sr']
self.tin_folder=self.config['tin_folder']
self.multi=self.config['multiproc']
self.modelbuilder=self.config['modelbuilder']
self.backwater=self.config['backwater']
self.flood_boundary=self.config['flood_boundary']
self.flood_dataset=self.config['flood_dataset']
self.wsel_field=self.config['wsel_field']
self.print_config = {'multi': self.multi, 'modelbuilder': self.modelbuilder}
self.safe_print = Safe_Print(self.print_config)
env.scratchWorkspace = self.scratchgdb
#env.parallelProcessingFactor = "4"
env.overwriteOutput = True
env.MResolution = 0.0001
env.MDomain = "0 10000000"
env.outputMFlag = "Enabled"
env.outputZFlag = "Enabled"
return self
def __exit__(self, type, value, traceback):
return self.result
def points_to_tin(self, points, xs_lines, name):
out_raster = self.output_workspace+name+'_'+self.wsel_field
self.safe_print.print_out("Converting "+name+" elevation points to Tin")
tin = self.tin_folder+"\\tin_"+name
heightfield = "POINT_Z"
xs_height ="WSEL_REG"
projection = arcpy.SpatialReference(self.sr)
tin_out = arcpy.CreateTin_3d(tin, projection, [[points, heightfield , "Mass_Points"],[xs_lines,xs_height,"hardline"]], "CONSTRAINED_DELAUNAY")
self.safe_print.print_out("Converting "+name+" Tin to Raster")
raster = arcpy.TinRaster_3d(tin_out, out_raster, "FLOAT", "LINEAR", "CELLSIZE 3", 1)
return raster
def backwater_correction(self, points, xs_lines, name):
sqlexp ="{0}={1}".format("Backwater", "'no'")
sqlexp3="Shape_Area"
sql_intersect ="{0}={1}".format("Route_ID", "'"+name+"'")
sql_raster ="{0}={1}".format("Overlap", "'no'")
out_raster = self.output_workspace+name+'_'+self.wsel_field
keep_fields = ["Overlap","flood_area","flood_main"]
boundary = self.flood_original+"\\"+name+"_flood_boundary"
intersect_name = [r[0] for r in arcpy.da.SearchCursor (self.scratchgdb+'\\streams_intersect_all_2', ["Intersects"],sql_intersect)]
avail_intersect = len(intersect_name)
if avail_intersect>0 and self.flood_boundary == True:
intersect_bound = self.flood_original+"\\"+intersect_name[0]+"_flood_boundary"
temp_bound = self.flood_dataset+"\\"+name+"_flood_temp"
flood_bound = self.flood_dataset+"\\"+name+"_boundary"
dis_bound =self.flood_dataset+"\\"+name+"_flood_dis"
erase1 =self.flood_dataset+"\\"+name+"_flood_erase1"
erase2 =self.flood_dataset+"\\"+name+"_flood_erase2"
pts_layer = arcpy.MakeFeatureLayer_management (points, "pts")
xs_layer = arcpy.MakeFeatureLayer_management (xs_lines, "xs")
arcpy.Near_analysis(pts_layer, xs_layer)
arcpy.AddJoin_management(pts_layer,"NEAR_FID",xs_layer,"OBJECTID")
arcpy.SelectLayerByAttribute_management(xs_layer,"CLEAR_SELECTION",sqlexp)
arcpy.SelectLayerByAttribute_management(pts_layer,"CLEAR_SELECTION",sqlexp)
#arcpy.SelectLayerByAttribute_management(xs_layer,"NEW_SELECTION",sqlexp)
#arcpy.SelectLayerByAttribute_management(pts_layer,"NEW_SELECTION",sqlexp)
#if int(arcpy.GetCount_management(xs_layer).getOutput(0)) <= 0:
#arcpy.SelectLayerByAttribute_management(xs_layer,"CLEAR_SELECTION",sqlexp)
#arcpy.SelectLayerByAttribute_management(pts_layer,"CLEAR_SELECTION",sqlexp)
tin = self.tin_folder+"\\tin_"+name
heightfield = name+"_stream_vertices_feature.POINT_Z"
xs_height ="WSEL_REG"
projection = arcpy.SpatialReference(self.sr)
#THIS COMMENTED OUT CODE WOULD CREATE A TIN FROM THE XS AND PTS USE THIS AS THE BEGINNING OF CREATING A FLOOD POLYGON FROM
#SCRATCH. WILL NEED TO ADD LOGIC FOR SUBTRACTING LIDAR ELEV
#if self.lidar == True:
#tin_out = arcpy.CreateTin_3d(tin, projection, [[pts_layer, heightfield , "Mass_Points"],[xs_layer,xs_height,"hardline"]], "CONSTRAINED_DELAUNAY")
#raster = arcpy.TinRaster_3d(tin_out, out_raster, "INT", "LINEAR", "CELLSIZE 3", 1)
#arcpy.RasterToPolygon_conversion(raster, temp_bound, "NO_SIMPLIFY")
#arcpy.Dissolve_management(temp_bound,dis_bound,"#","#","SINGLE_PART")
if self.flood_boundary == True:
if avail_intersect != 0:
arcpy.AddField_management(boundary, "Overlap", "TEXT",4)
arcpy.CalculateField_management(boundary, "Overlap", "'no'","PYTHON")
arcpy.Erase_analysis(boundary, intersect_bound, erase1)
arcpy.Erase_analysis(boundary,erase1,erase2)
arcpy.CalculateField_management(erase2, "Overlap", "'yes'","PYTHON")
arcpy.Merge_management([erase1,erase2],temp_bound)
arcpy.Delete_management(erase1)
arcpy.Delete_management(erase2)
else:
arcpy.CopyFeatures_management(boundary,temp_bound)
arcpy.AddField_management(temp_bound, "Overlap", "TEXT",4)
arcpy.CalculateField_management(temp_bound, "Overlap", "'no'","PYTHON")
arcpy.MultipartToSinglepart_management(temp_bound,flood_bound)
arcpy.AddField_management(flood_bound, "flood_area", "FLOAT",10,3)
arcpy.CalculateField_management(flood_bound, "flood_area", "float(!SHAPE.AREA!)","PYTHON")
arcpy.AddField_management(flood_bound, "flood_main", "TEXT",4)
arcpy.CalculateField_management(flood_bound, "flood_main", "'no'","PYTHON")
temp_poly =arcpy.CopyFeatures_management(flood_bound,self.flood_dataset+"\\"+name+"_flood_boundary")
areaList = [r[0] for r in arcpy.da.SearchCursor (flood_bound, ["flood_area"])]
if len(areaList)>0:
max_area = max(areaList)
sqlexp2 ="{0}<>{1}".format("flood_area", max_area)
arcpy.MakeFeatureLayer_management (temp_poly, "flood_temp")
arcpy.SelectLayerByAttribute_management("flood_temp","NEW_SELECTION",sqlexp2)
arcpy.CalculateField_management("flood_temp", "flood_main", "'yes'","PYTHON")
#if int(arcpy.GetCount_management("flood_temp").getOutput(0)) > 0:
#arcpy.DeleteFeatures_management("flood_temp")
arcpy.Delete_management(temp_bound)
#arcpy.Delete_management(dis_bound)
arcpy.Delete_management(flood_bound)
fields = [f.name for f in arcpy.ListFields(temp_poly) if not f.required and f.name not in keep_fields ]
arcpy.DeleteField_management(temp_poly, fields)
tin_out = arcpy.CreateTin_3d(tin, projection, [[pts_layer, heightfield , "masspoints"],[xs_layer,xs_height,"hardline"]], "CONSTRAINED_DELAUNAY")
raster = arcpy.TinRaster_3d(tin_out, out_raster, "FLOAT", "LINEAR", "CELLSIZE 1.5", 1)
if self.flood_boundary == True:
self.safe_print.print_out("Clipping "+name+"'s raster to Flood Boundary")
arcpy.MakeFeatureLayer_management(temp_poly, "flood_temp")
#arcpy.SelectLayerByAttribute_management("flood_temp","NEW_SELECTION",sql_raster)#This will clip the boundary being overlapped by the stream it is flowing into
outExtractByMask = ExtractByMask(raster, "flood_temp")
outExtractByMask.save(self.output_workspace+name+'_'+self.wsel_field)
return
def raster_extract(self, raster, name):
boundary = self.flood_original+"\\"+name+"_flood_boundary"
self.safe_print.print_out("Clipping "+name+"'s raster to Flood Boundary")
outExtractByMask = ExtractByMask(raster, boundary)
outExtractByMask.save(self.output_workspace+name+'_'+self.wsel_field)
return
def processStream(self):
all_streams = self.streams
self.result =[]
for streams in all_streams:
name = streams
self.safe_print.print_out("Step 5 processing "+name)
stream_vertices = self.vertices_dataset+'/'+name+"_stream_vertices_feature"
xs = self.xs_dataset+'/'+name+"_xs"
if self.backwater == True:
self.backwater_correction(stream_vertices ,xs, name)
else:
raster = self.points_to_tin(stream_vertices ,xs, name)
if self.flood_boundary == True:
self.raster_extract(raster, name)
self.safe_print.print_out("Finished Step 5 for "+name)
return
| bmulcahy/WSEL-Python-Tool | WSEL_Step_5.py | WSEL_Step_5.py | py | 10,327 | python | en | code | 1 | github-code | 13 |
29728328005 | """
Some solvers for simple linear systems.
Last updated: 10.2.2019
"""
import numpy as np
import math
import pandas as pd
def gj_method(mat, augment_c):
"""A (procedural?) function for solving a linear system using the Gauss-Jordan method
Contrary to the np.lingalg method arsenal, the input to this function expects the matrix in its augmented form.
That is,
[ a b c D ]
[ e f g H ]
[i j k L ]
Where the capital letters represent the column to the right of the augment-dividing-line.
WARNING: The current version of this function only supports 2x2 and 3x3 matricies.
Args:
mat (np.matrix): an augmented matrix
augment_c (int): the column number where the augmented portion of the matrix is located
Returns:
a matrix where the left side of the augmentation is the Identity Matrix
the function will fail with a "divide by zero" error in cases with no or inf solutions
"""
if augment_c > mat.shape[1]:
raise ValueError('You have specified a value of augement_c that is outside of the range of the given matrix.')
# Allows us to iterate through column
i = 0
# Allows us to find out which element should be == 1
protected = 0
while i < augment_c:
# Selecting the rows we wish to perform operations on
# First, create a list with an element for each row
row_list = [x for x in range(mat.shape[0])]
# Now removing the row that contains the "protected" element (the 1 in the Identity Matrix)
# This leaves us with the row numbers that we could be altering
del row_list[protected]
# Explicit check for the length of row_list
# This will allow this algo to generalize to any size matrix
if len(row_list) > 2:
# Looping through row_list 2 times
# selecting two of the elements randomly
# then reassigning the row_list variable
rl = []
for i in range(2):
rl.append(np.random.choice(row_list))
row_list = rl
# Now we will be looping through each row and performing the row update operation
for row in row_list:
# Explicit check to see if the given row is already 0, if so, continue
if mat[row, i] == 0:
continue
# Finding the multiplicands that will update the row containing the "protected" element
# and the loop counter "row"
# The coefn's are supposed to be dynamically typed
# the mat_coefn's are our static references to the values in the matricies
coef1 = mat[protected, i]
coef2 = mat[row, i]
mat_coef1 = coef1
mat_coef2 = coef2
# Surely there is a better way to do this... Please glaze over the next
# 10 or so lines and imagine that I wrote some beautifully elegant piece of code :)
if (mat_coef1 * -coef2) + (mat_coef2 * coef1) == 0:
coef2 = -1 * coef2
if (mat_coef1 * coef2) + (mat_coef2 * -coef1) == 0:
coef1 = -1 * coef1
# Pulling out each of the row vectors
protected_vector = mat[protected].copy()
row_to_update = mat[row].copy()
# Multiplying them guys
protected_vector = coef2 * protected_vector
row_to_update = coef1 * row_to_update
# Add them guys together
row_to_update = row_to_update + protected_vector
# Re-assigning the row to its updated form
mat[row] = row_to_update
# Incrementing protected to shift down the "protected" row (the 1 in the Identity Matrix)
# Incrementing i, our counter that will iterate us through the columns
protected += 1
i += 1
# Now that we have "zero-ed out" our matrix, let's finish the transformation to the Identity Matrix
# Resetting our counters...
i = 0
protected = 0
# Looping through the columns
while i < augment_c:
# Simply dividing the entire row vector by the "protected" value
mat[protected, :] = mat[protected, :] / mat[protected, i]
# Incrementing protected to shift down the "protected" row (the 1 in the Identity Matrix)
# Incrementing i, our counter that will iterate us through the columns
protected += 1
i += 1
return mat
class IO_Model:
def __init__(self, closed=False, X=None, A=None, D=None, parameter=None):
""""""
self.closed = closed
self.X = X
self.A = A
self.D = D
self.parameter = parameter
self.multipliers = None
self.production_used = None
def solve_for_X(self):
"""
This function serves as a gateway depending on whether the IO_Model is open or closed
"""
if self.A is None:
raise AttributeError('In order to solve for X, A must be known.')
if self.D is None:
raise AttributeError('In order to solve for X, D must be known')
if not self.closed:
self._open_model_solve_X()
else:
self._closed_model_solve_X()
def solve_for_production_requirements(self):
""""""
if self.A is None:
raise AttributeError('In order to solve for production requirements, A is needed.')
if self.X is None:
raise AttributeError('In order to solve for production requirements, X is needed.')
# Since A gives the amount,in units, of each commodity used to produce 1 unit of each commodity
# and X gives the number of units of each commodity produced...
# AX gives the production spend
self.production_used = self.A * self.X
print(self.production_used)
def _open_model_solve_X(self):
""""""
# The ultimate equation to solve is X = (I-A)^-1 * D
# First, creating the Identity Matrix of the same shape as A
I = np.identity(n=self.A.shape[0])
# Subtracting A from the Identity Matrix and inverting
# If it doesn't work, throw an explicit error explaining the problem
try:
inverted = (I - self.A) ** -1
# (I-A)^-1 has "important economic interpretations"
self.multipliers = inverted
except:
raise RuntimeError('Unable to invert (I-A). The resulting matrix is singular.')
# Multiplying the inverted matrix by D to solve for X
self.X = inverted * self.D
# Setting the amount of production used in this process
self.production_used = self.A * self.X
print(self.X)
def _closed_model_solve_X(self):
""""""
if self.parameter is None:
raise AttributeError('In order to solve a closed model, a parameter needs to be declared.')
# (I-A)X = 0
# First, creating the Identity Matrix of the same shape as A
I = np.identity(n=self.A.shape[0])
# (I - A)
intermediate = I - self.A
#
| trevormcinroe/mathematics | linear_systems.py | linear_systems.py | py | 7,104 | python | en | code | 0 | github-code | 13 |
20533996775 | from __future__ import annotations
from gi.repository import GLib, Gio
import turtlico.utils as utils
from turtlico.locale import _
def compile(input_file: Gio.File, output_file_path: str) -> int:
if input_file is None:
utils.error(_('No input file specified'))
return 1
output_file = Gio.File.new_for_commandline_arg(output_file_path)
import turtlico.lib as lib
project = lib.ProjectBuffer()
project.load_from_file(input_file)
compiler = lib.Compiler(project)
code, debug_info = compiler.compile(project.code.lines)
try:
if output_file.query_exists():
if not output_file.delete():
utils.error(_('Failed to overwrite output file'))
outs = output_file.create(Gio.FileCreateFlags.NONE, None)
outs.write_all(code.encode(), None)
outs.close()
except GLib.Error as e:
utils.error(_('Cannot save output file: {}').format(e))
return 0
| saytamkenorh/turtlico | turtlico/app/cli.py | cli.py | py | 966 | python | en | code | 3 | github-code | 13 |
25306301567 | import CheckProxy, getFreeProxy, setting
import time
class RegularlyCheck(object):
def __init__(self):
self.redis_clien = setting.redis_clien() # redis数据库对象
self.get_proxies = getFreeProxy.GetProxy()
self.check_proxies = CheckProxy.CheckProxy()
def regularlyGetProxy(self):
# 当前数据库中的proxy小于指定要求的proxy数量时, 开始从网上抓取proxy, 并筛选
while self.redis_clien.llen('http') < setting.PROXY_REQUIRDE_NUMBER:
self.get_proxies.run()
self.check_proxies.run()
time.sleep(setting.PROXY_UPDATE_TIME) # proxy更新时间, 默认5分钟
if __name__ == '__main__':
check = RegularlyCheck()
check.regularlyGetProxy()
| luzehe/proxy_pool | proxy_pool/ProxyPool-1.0/proxy_pool/main.py | main.py | py | 690 | python | en | code | 1 | github-code | 13 |
74098122579 | import numpy as np
A = np.array([[1,2],[-1,0],[2,1]])
B = np.array([[1,3],[2,1],[-3,-2]])
C = np.array([[2,5],[0,3],[4,2]])
print(2*A - 3*B + 2*C)
A = np.array([[2, -1], [1, 0], [-3, 4]])
B = np.array([[1, -2, 5], [3, 4, 0]])
print(A.dot(B))
print(B.dot(A))
A = np.array([[1,2,3],[4,5,6],[7,8,9]])
B = A.T
print((A+B).T == A.T+B.T)
print((A.dot(B)).T ==B.T.dot(A.T))
def nhap_ma_tran():
# Nhập kích thước ma trận từ bàn phím
str = input("Nhập kích thước ma trận: ").strip()
kich_thuoc = [int(x) for x in str.split()]
hang_str = input("Nhập tất cả phần tử của ma trận từ trái sang phải từ trên xuống dưới: ")
hang = [float(x) for x in hang_str.split()]
ma_tran = np.array(hang).reshape(kich_thuoc)
return ma_tran
def tich_ma_tran(A, B):
if A.shape[1] != B.shape[0]:
print("size of A and B mismatch")
return None
return A.dot(B)
A = nhap_ma_tran()
B = nhap_ma_tran()
C = tich_ma_tran(A, B)
print(C)
import sympy
x = sympy.Symbol(x)
y = sympy.Symbol(y)
z = sympy.Symbol(z) | tranduythanh/learn-python | math-software/lab3b.py | lab3b.py | py | 1,084 | python | vi | code | 0 | github-code | 13 |
41897328572 | #Uses python3
import sys
def acyclic(adj):
visited=[]
marked=[0]*len(adj)
label=False
for i in range(len(adj)):
if i not in visited:
marked[i]=1
label=explore(adj,i,visited,marked)
if label:
break
marked[i]=0
if label:
return 1
return 0
def explore(adj,x,visited,marked):
if x not in visited:
visited.append(x)
for i in range(len(adj[x])):
if marked[adj[x][i]]==1:
return True
if adj[x][i] in visited:
continue
label=explore(adj,adj[x][i],visited,marked)
if label:
return True
return False
if __name__ == '__main__':
input = sys.stdin.read()
data = list(map(int, input.split()))
# data=[5,7,1,2,2,3,1,3,3,4,1,4,2,5,3,5]
n, m = data[0:2]
data = data[2:]
edges = list(zip(data[0:(2 * m):2], data[1:(2 * m):2]))
adj = [[] for _ in range(n)]
for (a, b) in edges:
adj[a - 1].append(b - 1)
print(acyclic(adj))
| Shaun10020/Algorithms-on-Graphs | Graph decomposition 2/CS curriculum/acyclicity.py | acyclicity.py | py | 1,045 | python | en | code | 0 | github-code | 13 |
14121956759 | from pyfda.libs.compat import (QWidget, pyqtSignal, QComboBox, QIcon, QSize,
QPushButton, QHBoxLayout, QVBoxLayout)
import numpy as np
import scipy.signal as sig
from scipy.signal import signaltools
from scipy.special import sinc
import pyfda.filterbroker as fb # importing filterbroker initializes all its globals
from pyfda.libs.pyfda_lib import fil_save, round_odd, pprint_log
from pyfda.libs.pyfda_qt_lib import qfilter_warning
from pyfda.libs.pyfda_fft_windows_lib import QFFTWinSelector, get_windows_dict
from pyfda.plot_widgets.plot_fft_win import Plot_FFT_win
from .common import Common, remezord
import logging
logger = logging.getLogger(__name__)
# TODO: Hilbert, differentiator, multiband are missing
# TODO: Improve calculation of F_C and F_C2 using the weights
# TODO: Automatic setting of density factor for remez calculation?
# Automatic switching to Kaiser / Hermann?
# TODO: Parameters for windows are not stored in fil_dict?
__version__ = "2.2"
classes = {'Firwin': 'Windowed FIR'} #: Dict containing class name : display name
class Firwin(QWidget):
FRMT = 'ba' # output format(s) of filter design routines 'zpk' / 'ba' / 'sos'
# currently, only 'ba' is supported for firwin routines
sig_tx = pyqtSignal(object) # local signal between FFT widget and FFTWin_Selector
sig_tx_local = pyqtSignal(object)
from pyfda.libs.pyfda_qt_lib import emit
def __init__(self):
QWidget.__init__(self)
self.ft = 'FIR'
win_names_list = ["Boxcar", "Rectangular", "Barthann", "Bartlett", "Blackman",
"Blackmanharris", "Bohman", "Cosine", "Dolph-Chebyshev", "DPSS",
"Flattop", "General Gaussian", "Gauss", "Hamming", "Hann",
"Kaiser", "Nuttall", "Parzen", "Triangular", "Tukey"]
self.cur_win_name = "Kaiser" # set initial window type
self.alg = "ichige"
# initialize windows dict with the list above for firwin window settings
self.win_dict = get_windows_dict(
win_names_list=win_names_list,
cur_win_name=self.cur_win_name)
# get initial / last setting from dictionary, updating self.win_dict
self._load_dict()
# instantiate FFT window with windows dict
self.fft_widget = Plot_FFT_win(
win_dict=self.win_dict, sym=True, title="pyFDA FIR Window Viewer")
# hide window initially, this is modeless i.e. a non-blocking popup window
self.fft_widget.hide()
c = Common()
self.rt_dict = c.rt_base_iir
self.rt_dict_add = {
'COM': {
'min': {
'msg': ('a',
"<br /><b>Note:</b> Filter order is only a rough "
"approximation and most likely far too low!")},
'man': {
'msg': ('a', "Enter desired filter order <b><i>N</i></b> and "
"<b>-6 dB</b> pass band corner "
"frequency(ies) <b><i>F<sub>C</sub></i></b> .")},
},
'LP': {'man': {}, 'min': {}},
'HP': {'man': {'msg': ('a', r"<br /><b>Note:</b> Order needs to be odd!")},
'min': {}},
'BS': {'man': {'msg': ('a', r"<br /><b>Note:</b> Order needs to be odd!")},
'min': {}},
'BP': {'man': {}, 'min': {}},
}
self.info = """**Windowed FIR filters**
are designed by truncating the
infinite impulse response of an ideal filter with a window function.
The kind of used window has strong influence on ripple etc. of the
resulting filter.
**Design routines:**
``scipy.signal.firwin()``
"""
# self.info_doc = [] is set in self._update_UI()
# ------------------- end of static info for filter tree ---------------
# ------------------------------------------------------------------------------
def process_sig_rx(self, dict_sig=None):
"""
Process local signals from / for
- FFT window widget
- qfft_win_select
"""
logger.debug("SIG_RX - vis: {0}\n{1}"
.format(self.isVisible(), pprint_log(dict_sig)))
if dict_sig['id'] == id(self):
logger.warning(f"Stopped infinite loop:\n{pprint_log(dict_sig)}")
# --- signals coming from the FFT window widget or the qfft_win_select
if dict_sig['class'] in {'Plot_FFT_win', 'QFFTWinSelector'}:
if 'closeEvent' in dict_sig: # hide FFT window windget and return
self.hide_fft_wdg()
return
else:
if 'view_changed' in dict_sig and 'fft_win' in dict_sig['view_changed']:
# self._update_fft_window() # TODO: needed?
# local connection to FFT window widget and qfft_win_select
self.emit(dict_sig, sig_name='sig_tx_local')
# global connection to upper hierachies
# send notification that filter design has changed
self.emit({'filt_changed': 'firwin'})
# --------------------------------------------------------------------------
def construct_UI(self):
"""
Create additional subwidget(s) needed for filter design:
These subwidgets are instantiated dynamically when needed in
select_filter.py using the handle to the filter object, fb.filObj .
"""
# Combobox for selecting the algorithm to estimate minimum filter order
self.cmb_firwin_alg = QComboBox(self)
self.cmb_firwin_alg.setObjectName('wdg_cmb_firwin_alg')
self.cmb_firwin_alg.addItems(['ichige', 'kaiser', 'herrmann'])
# Minimum size, can be changed in the upper hierarchy levels using layouts:
self.cmb_firwin_alg.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.cmb_firwin_alg.hide()
self.qfft_win_select = QFFTWinSelector(self.win_dict)
# Minimum size, can be changed in the upper hierarchy levels using layouts:
# self.qfft_win_select.setSizeAdjustPolicy(QComboBox.AdjustToContents)
self.but_fft_wdg = QPushButton(self)
self.but_fft_wdg.setIcon(QIcon(":/fft.svg"))
but_height = self.qfft_win_select.sizeHint().height()
self.but_fft_wdg.setIconSize(QSize(but_height, but_height))
self.but_fft_wdg.setFixedSize(QSize(but_height, but_height))
self.but_fft_wdg.setToolTip('<span>Show / hide FFT widget (select window type '
' and display its properties).</span>')
self.but_fft_wdg.setCheckable(True)
self.but_fft_wdg.setChecked(False)
self.layHWin1 = QHBoxLayout()
# self.layHWin1.addWidget(self.cmb_firwin_win)
# self.layHWin1.addWidget(self.but_fft_wdg)
self.layHWin1.addWidget(self.cmb_firwin_alg)
self.layHWin2 = QHBoxLayout()
self.layHWin2.addWidget(self.but_fft_wdg)
self.layHWin2.addWidget(self.qfft_win_select)
self.layVWin = QVBoxLayout()
self.layVWin.addLayout(self.layHWin1)
self.layVWin.addLayout(self.layHWin2)
self.layVWin.setContentsMargins(0, 0, 0, 0)
# Widget containing all subwidgets (cmbBoxes, Labels, lineEdits)
self.wdg_fil = QWidget(self)
self.wdg_fil.setObjectName('wdg_fil')
self.wdg_fil.setLayout(self.layVWin)
# ----------------------------------------------------------------------
# GLOBAL SIGNALS & SLOTs
# ----------------------------------------------------------------------
# connect FFT widget to qfft_selector and vice versa and to signals upstream:
self.fft_widget.sig_tx.connect(self.process_sig_rx)
self.qfft_win_select.sig_tx.connect(self.process_sig_rx)
# connect process_sig_rx output to both FFT widgets
self.sig_tx_local.connect(self.fft_widget.sig_rx)
self.sig_tx_local.connect(self.qfft_win_select.sig_rx)
# ----------------------------------------------------------------------
# SIGNALS & SLOTs
# ----------------------------------------------------------------------
self.cmb_firwin_alg.currentIndexChanged.connect(self._update_fft_window)
self.but_fft_wdg.clicked.connect(self.toggle_fft_wdg)
# ----------------------------------------------------------------------
# ==============================================================================
def _update_fft_window(self):
""" Update window type for FirWin - unneeded at the moment """
self.alg = str(self.cmb_firwin_alg.currentText())
self.emit({'filt_changed': 'firwin'})
# --------------------------------------------------------------------------
def _load_dict(self):
"""
Reload window selection and parameters from filter dictionary
and set UI elements accordingly. load_dict() is called upon
initialization and when the filter is loaded from disk.
"""
self.N = fb.fil[0]['N']
# alg_idx = 0
if 'wdg_fil' in fb.fil[0] and 'firwin' in fb.fil[0]['wdg_fil']\
and type(fb.fil[0]['wdg_fil']['firwin']) is dict:
self.win_dict = fb.fil[0]['wdg_fil']['firwin']
self.emit({'view_changed': 'fft_win_type'}, sig_name='sig_tx_local')
# --------------------------------------------------------------------------
def _store_dict(self):
"""
Store window and parameter settings using `self.win_dict` in filter dictionary.
"""
if 'wdg_fil' not in fb.fil[0]:
fb.fil[0].update({'wdg_fil': {}})
fb.fil[0]['wdg_fil'].update({'firwin': self.win_dict})
# --------------------------------------------------------------------------
def _get_params(self, fil_dict):
"""
Translate parameters from the passed dictionary to instance
parameters, scaling / transforming them if needed.
"""
self.N = fil_dict['N']
self.F_PB = fil_dict['F_PB']
self.F_SB = fil_dict['F_SB']
self.F_PB2 = fil_dict['F_PB2']
self.F_SB2 = fil_dict['F_SB2']
self.F_C = fil_dict['F_C']
self.F_C2 = fil_dict['F_C2']
# firwin amplitude specs are linear (not in dBs)
self.A_PB = fil_dict['A_PB']
self.A_PB2 = fil_dict['A_PB2']
self.A_SB = fil_dict['A_SB']
self.A_SB2 = fil_dict['A_SB2']
# self.alg = 'ichige' # algorithm for determining the minimum order
# self.alg = self.cmb_firwin_alg.currentText()
def _test_N(self):
"""
Warn the user if the calculated order is too high for a reasonable filter
design.
"""
if self.N > 1000:
return qfilter_warning(self, self.N, "FirWin")
else:
return True
def _save(self, fil_dict, arg):
"""
Convert between poles / zeros / gain, filter coefficients (polynomes)
and second-order sections and store all available formats in the passed
dictionary 'fil_dict'.
"""
fil_save(fil_dict, arg, self.FRMT, __name__)
try: # has the order been calculated by a "min" filter design?
fil_dict['N'] = self.N # yes, update filterbroker
except AttributeError:
pass
self._store_dict()
# ------------------------------------------------------------------------------
def firwin(self, numtaps, cutoff, window=None, pass_zero=True,
scale=True, nyq=1.0, fs=None):
"""
FIR filter design using the window method. This is more or less the
same as `scipy.signal.firwin` with the exception that an ndarray with
the window values can be passed as an alternative to the window name.
The parameters "width" (specifying a Kaiser window) and "fs" have been
omitted, they are not needed here.
This function computes the coefficients of a finite impulse response
filter. The filter will have linear phase; it will be Type I if
`numtaps` is odd and Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
window : ndarray or string
string: use the window with the passed name from scipy.signal.windows
ndarray: The window values - this is an addition to the original
firwin routine.
pass_zero : bool, optional
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool, optional
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
- 0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True)
- `nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise
nyq : float, optional
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : (numtaps,) ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
See also
--------
scipy.firwin
"""
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most "
"one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency {0}: frequencies must be "
"greater than 0 and less than nyq.".format(cutoff))
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies "
"must be strictly increasing.")
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff
# is even, and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0] * pass_zero, cutoff, [1.0] * pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of
# a passband.
bands = cutoff.reshape(-1, 2)
# Build up the coefficients.
alpha = 0.5 * (numtaps - 1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
if type(window) == str:
# Get and apply the window function.
# from scipy.signal.signaltools import get_window
win = signaltools.get_window(window, numtaps, fftbins=False)
elif type(window) == np.ndarray:
win = window
else:
logger.error("The 'window' was neither a string nor a numpy array, "
"it could not be evaluated.")
return None
# apply the window function.
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
def _firwin_ord(self, F, W, A, alg):
# http://www.mikroe.com/chapters/view/72/chapter-2-fir-filters/
delta_f = abs(F[1] - F[0]) * 2 # referred to f_Ny
# delta_A = np.sqrt(A[0] * A[1])
if "Kaiser" in self.win_dict and self.win_dict['cur_win_name'] == "Kaiser":
N, beta = sig.kaiserord(20 * np.log10(np.abs(fb.fil[0]['A_SB'])), delta_f)
# logger.warning(f"N={N}, beta={beta}, A_SB={fb.fil[0]['A_SB']}")
self.win_dict["Kaiser"]["par"][0]["val"] = beta
self.qfft_win_select.led_win_par_0.setText(str(beta))
self.qfft_win_select.ui2dict_params() # pass changed parameter to other widgets
else:
N = remezord(
F, W, A, fs=1, alg=alg)[0]
self.emit({'view_changed': 'fft_win_type'}, sig_name='sig_tx_local')
return N
def LPmin(self, fil_dict):
self._get_params(fil_dict)
self.N = self._firwin_ord([self.F_PB, self.F_SB], [1, 0],
[self.A_PB, self.A_SB], alg=self.alg)
if not self._test_N():
return -1
fil_dict['F_C'] = (self.F_SB + self.F_PB)/2 # average calculated F_PB and F_SB
self._save(fil_dict,
self.firwin(self.N, fil_dict['F_C'], nyq=0.5,
window=self.qfft_win_select.get_window(self.N, sym=True)))
def LPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
logger.warning(self.win_dict["cur_win_name"])
self._save(fil_dict,
self.firwin(self.N, fil_dict['F_C'], nyq=0.5,
window=self.qfft_win_select.get_window(self.N, sym=True)))
def HPmin(self, fil_dict):
self._get_params(fil_dict)
N = self._firwin_ord([self.F_SB, self.F_PB], [0, 1],
[self.A_SB, self.A_PB], alg=self.alg)
self.N = round_odd(N) # enforce odd order
if not self._test_N():
return -1
fil_dict['F_C'] = (self.F_SB + self.F_PB)/2 # average calculated F_PB and F_SB
self._save(fil_dict,
self.firwin(self.N, fil_dict['F_C'], pass_zero=False, nyq=0.5,
window=self.qfft_win_select.get_window(self.N, sym=True)))
def HPman(self, fil_dict):
self._get_params(fil_dict)
self.N = round_odd(self.N) # enforce odd order
if not self._test_N():
return -1
self._save(fil_dict,
self.firwin(self.N, fil_dict['F_C'], pass_zero=False, nyq=0.5,
window=self.qfft_win_select.get_window(self.N, sym=True)))
# For BP and BS, F_PB and F_SB have two elements each
def BPmin(self, fil_dict):
self._get_params(fil_dict)
self.N = remezord([self.F_SB, self.F_PB, self.F_PB2, self.F_SB2], [0, 1, 0],
[self.A_SB, self.A_PB, self.A_SB2], fs=1, alg=self.alg)[0]
if not self._test_N():
return -1
fil_dict['F_C'] = (self.F_SB + self.F_PB)/2 # average calculated F_PB and F_SB
fil_dict['F_C2'] = (self.F_SB2 + self.F_PB2)/2
self._save(fil_dict,
self.firwin(self.N, [fil_dict['F_C'], fil_dict['F_C2']], nyq=0.5,
pass_zero=False,
window=self.qfft_win_select.get_window(self.N, sym=True)))
def BPman(self, fil_dict):
self._get_params(fil_dict)
if not self._test_N():
return -1
self._save(fil_dict,
self.firwin(self.N, [fil_dict['F_C'], fil_dict['F_C2']], nyq=0.5,
pass_zero=False,
window=self.qfft_win_select.get_window(self.N, sym=True)))
def BSmin(self, fil_dict):
self._get_params(fil_dict)
N = remezord([self.F_PB, self.F_SB, self.F_SB2, self.F_PB2], [1, 0, 1],
[self.A_PB, self.A_SB, self.A_PB2], fs=1, alg=self.alg)[0]
self.N = round_odd(N) # enforce odd order
if not self._test_N():
return -1
fil_dict['F_C'] = (self.F_SB + self.F_PB) / 2 # average calculated F_PB and F_SB
fil_dict['F_C2'] = (self.F_SB2 + self.F_PB2) / 2
self._save(fil_dict,
self.firwin(self.N, [fil_dict['F_C'], fil_dict['F_C2']],
window=self.qfft_win_select.get_window(self.N, sym=True),
pass_zero=True, nyq=0.5))
def BSman(self, fil_dict):
self._get_params(fil_dict)
self.N = round_odd(self.N) # enforce odd order
if not self._test_N():
return -1
self._save(fil_dict,
self.firwin(self.N, [fil_dict['F_C'], fil_dict['F_C2']],
window=self.qfft_win_select.get_window(self.N, sym=True),
pass_zero=True, nyq=0.5))
# ------------------------------------------------------------------------------
def toggle_fft_wdg(self):
"""
Show / hide FFT widget depending on the state of the corresponding button
When widget is shown, trigger an update of the window function.
"""
if self.but_fft_wdg.isChecked():
self.fft_widget.show()
self.emit({'view_changed': 'fft_win_type'}, sig_name='sig_tx_local')
else:
self.fft_widget.hide()
# --------------------------------------------------------------------------
def hide_fft_wdg(self):
"""
The closeEvent caused by clicking the "x" in the FFT widget is caught
there and routed here to only hide the window
"""
self.but_fft_wdg.setChecked(False)
self.fft_widget.hide()
# ------------------------------------------------------------------------------
def main():
import sys
from pyfda.libs.compat import QApplication, QFrame
app = QApplication(sys.argv)
# instantiate filter widget
filt = Firwin()
filt.construct_UI()
wdg_firwin = getattr(filt, 'wdg_fil')
layVDynWdg = QVBoxLayout()
layVDynWdg.addWidget(wdg_firwin, stretch=1)
filt.LPman(fb.fil[0]) # design a low-pass with parameters from global dict
print(fb.fil[0][filt.FRMT]) # return results in default format
frmMain = QFrame()
frmMain.setFrameStyle(QFrame.StyledPanel | QFrame.Sunken)
frmMain.setLayout(layVDynWdg)
mainw = frmMain
mainw.show()
app.exec_()
if __name__ == "__main__":
'''test using "python -m pyfda.filter_widgets.firwin" '''
main()
| chipmuenk/pyfda | pyfda/filter_widgets/firwin.py | firwin.py | py | 23,890 | python | en | code | 601 | github-code | 13 |
17053241774 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.InsAgreementDTO import InsAgreementDTO
from alipay.aop.api.domain.InsurePlanDTO import InsurePlanDTO
class InsureRecommResultDTO(object):
def __init__(self):
self._agreement_list = None
self._inst_id = None
self._inst_logo = None
self._inst_name = None
self._inst_short_name = None
self._insure_plans = None
self._premium_payer_type = None
self._prod_no = None
self._product_code = None
self._product_desc = None
self._product_info_url = None
self._product_name = None
self._product_plan_id = None
self._recommend_flow_id = None
self._show_uninsured_option = None
self._sp_no = None
@property
def agreement_list(self):
return self._agreement_list
@agreement_list.setter
def agreement_list(self, value):
if isinstance(value, list):
self._agreement_list = list()
for i in value:
if isinstance(i, InsAgreementDTO):
self._agreement_list.append(i)
else:
self._agreement_list.append(InsAgreementDTO.from_alipay_dict(i))
@property
def inst_id(self):
return self._inst_id
@inst_id.setter
def inst_id(self, value):
self._inst_id = value
@property
def inst_logo(self):
return self._inst_logo
@inst_logo.setter
def inst_logo(self, value):
self._inst_logo = value
@property
def inst_name(self):
return self._inst_name
@inst_name.setter
def inst_name(self, value):
self._inst_name = value
@property
def inst_short_name(self):
return self._inst_short_name
@inst_short_name.setter
def inst_short_name(self, value):
self._inst_short_name = value
@property
def insure_plans(self):
return self._insure_plans
@insure_plans.setter
def insure_plans(self, value):
if isinstance(value, list):
self._insure_plans = list()
for i in value:
if isinstance(i, InsurePlanDTO):
self._insure_plans.append(i)
else:
self._insure_plans.append(InsurePlanDTO.from_alipay_dict(i))
@property
def premium_payer_type(self):
return self._premium_payer_type
@premium_payer_type.setter
def premium_payer_type(self, value):
self._premium_payer_type = value
@property
def prod_no(self):
return self._prod_no
@prod_no.setter
def prod_no(self, value):
self._prod_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def product_desc(self):
return self._product_desc
@product_desc.setter
def product_desc(self, value):
self._product_desc = value
@property
def product_info_url(self):
return self._product_info_url
@product_info_url.setter
def product_info_url(self, value):
self._product_info_url = value
@property
def product_name(self):
return self._product_name
@product_name.setter
def product_name(self, value):
self._product_name = value
@property
def product_plan_id(self):
return self._product_plan_id
@product_plan_id.setter
def product_plan_id(self, value):
self._product_plan_id = value
@property
def recommend_flow_id(self):
return self._recommend_flow_id
@recommend_flow_id.setter
def recommend_flow_id(self, value):
self._recommend_flow_id = value
@property
def show_uninsured_option(self):
return self._show_uninsured_option
@show_uninsured_option.setter
def show_uninsured_option(self, value):
self._show_uninsured_option = value
@property
def sp_no(self):
return self._sp_no
@sp_no.setter
def sp_no(self, value):
self._sp_no = value
def to_alipay_dict(self):
params = dict()
if self.agreement_list:
if isinstance(self.agreement_list, list):
for i in range(0, len(self.agreement_list)):
element = self.agreement_list[i]
if hasattr(element, 'to_alipay_dict'):
self.agreement_list[i] = element.to_alipay_dict()
if hasattr(self.agreement_list, 'to_alipay_dict'):
params['agreement_list'] = self.agreement_list.to_alipay_dict()
else:
params['agreement_list'] = self.agreement_list
if self.inst_id:
if hasattr(self.inst_id, 'to_alipay_dict'):
params['inst_id'] = self.inst_id.to_alipay_dict()
else:
params['inst_id'] = self.inst_id
if self.inst_logo:
if hasattr(self.inst_logo, 'to_alipay_dict'):
params['inst_logo'] = self.inst_logo.to_alipay_dict()
else:
params['inst_logo'] = self.inst_logo
if self.inst_name:
if hasattr(self.inst_name, 'to_alipay_dict'):
params['inst_name'] = self.inst_name.to_alipay_dict()
else:
params['inst_name'] = self.inst_name
if self.inst_short_name:
if hasattr(self.inst_short_name, 'to_alipay_dict'):
params['inst_short_name'] = self.inst_short_name.to_alipay_dict()
else:
params['inst_short_name'] = self.inst_short_name
if self.insure_plans:
if isinstance(self.insure_plans, list):
for i in range(0, len(self.insure_plans)):
element = self.insure_plans[i]
if hasattr(element, 'to_alipay_dict'):
self.insure_plans[i] = element.to_alipay_dict()
if hasattr(self.insure_plans, 'to_alipay_dict'):
params['insure_plans'] = self.insure_plans.to_alipay_dict()
else:
params['insure_plans'] = self.insure_plans
if self.premium_payer_type:
if hasattr(self.premium_payer_type, 'to_alipay_dict'):
params['premium_payer_type'] = self.premium_payer_type.to_alipay_dict()
else:
params['premium_payer_type'] = self.premium_payer_type
if self.prod_no:
if hasattr(self.prod_no, 'to_alipay_dict'):
params['prod_no'] = self.prod_no.to_alipay_dict()
else:
params['prod_no'] = self.prod_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.product_desc:
if hasattr(self.product_desc, 'to_alipay_dict'):
params['product_desc'] = self.product_desc.to_alipay_dict()
else:
params['product_desc'] = self.product_desc
if self.product_info_url:
if hasattr(self.product_info_url, 'to_alipay_dict'):
params['product_info_url'] = self.product_info_url.to_alipay_dict()
else:
params['product_info_url'] = self.product_info_url
if self.product_name:
if hasattr(self.product_name, 'to_alipay_dict'):
params['product_name'] = self.product_name.to_alipay_dict()
else:
params['product_name'] = self.product_name
if self.product_plan_id:
if hasattr(self.product_plan_id, 'to_alipay_dict'):
params['product_plan_id'] = self.product_plan_id.to_alipay_dict()
else:
params['product_plan_id'] = self.product_plan_id
if self.recommend_flow_id:
if hasattr(self.recommend_flow_id, 'to_alipay_dict'):
params['recommend_flow_id'] = self.recommend_flow_id.to_alipay_dict()
else:
params['recommend_flow_id'] = self.recommend_flow_id
if self.show_uninsured_option:
if hasattr(self.show_uninsured_option, 'to_alipay_dict'):
params['show_uninsured_option'] = self.show_uninsured_option.to_alipay_dict()
else:
params['show_uninsured_option'] = self.show_uninsured_option
if self.sp_no:
if hasattr(self.sp_no, 'to_alipay_dict'):
params['sp_no'] = self.sp_no.to_alipay_dict()
else:
params['sp_no'] = self.sp_no
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsureRecommResultDTO()
if 'agreement_list' in d:
o.agreement_list = d['agreement_list']
if 'inst_id' in d:
o.inst_id = d['inst_id']
if 'inst_logo' in d:
o.inst_logo = d['inst_logo']
if 'inst_name' in d:
o.inst_name = d['inst_name']
if 'inst_short_name' in d:
o.inst_short_name = d['inst_short_name']
if 'insure_plans' in d:
o.insure_plans = d['insure_plans']
if 'premium_payer_type' in d:
o.premium_payer_type = d['premium_payer_type']
if 'prod_no' in d:
o.prod_no = d['prod_no']
if 'product_code' in d:
o.product_code = d['product_code']
if 'product_desc' in d:
o.product_desc = d['product_desc']
if 'product_info_url' in d:
o.product_info_url = d['product_info_url']
if 'product_name' in d:
o.product_name = d['product_name']
if 'product_plan_id' in d:
o.product_plan_id = d['product_plan_id']
if 'recommend_flow_id' in d:
o.recommend_flow_id = d['recommend_flow_id']
if 'show_uninsured_option' in d:
o.show_uninsured_option = d['show_uninsured_option']
if 'sp_no' in d:
o.sp_no = d['sp_no']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InsureRecommResultDTO.py | InsureRecommResultDTO.py | py | 10,269 | python | en | code | 241 | github-code | 13 |
14645882675 | from sqlalchemy import Boolean, Column, ForeignKey, Identity, Integer, String, Table
from stripe_openapi.file import File
from . import metadata
IssuingDisputeCanceledEvidenceJson = Table(
"issuing_dispute_canceled_evidencejson",
metadata,
Column(
"additional_documentation",
File,
ForeignKey("File"),
comment="(ID of a [file upload](https://stripe.com/docs/guides/file-upload)) Additional documentation supporting the dispute",
nullable=True,
),
Column(
"canceled_at", Integer, comment="Date when order was canceled", nullable=True
),
Column(
"cancellation_policy_provided",
Boolean,
comment="Whether the cardholder was provided with a cancellation policy",
nullable=True,
),
Column(
"cancellation_reason",
String,
comment="Reason for canceling the order",
nullable=True,
),
Column(
"expected_at",
Integer,
comment="Date when the cardholder expected to receive the product",
nullable=True,
),
Column(
"explanation",
String,
comment="Explanation of why the cardholder is disputing this transaction",
nullable=True,
),
Column(
"product_description",
String,
comment="Description of the merchandise or service that was purchased",
nullable=True,
),
Column(
"product_type",
String,
comment="Whether the product was a merchandise or service",
nullable=True,
),
Column(
"return_status",
String,
comment="Result of cardholder's attempt to return the product",
nullable=True,
),
Column(
"returned_at",
Integer,
comment="Date when the product was returned or attempted to be returned",
nullable=True,
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["issuing_dispute_canceled_evidence.json"]
| offscale/stripe-sql | stripe_openapi/issuing_dispute_canceled_evidence.py | issuing_dispute_canceled_evidence.py | py | 2,018 | python | en | code | 1 | github-code | 13 |
27308901555 | import functools
import click
from clef.esgf import esgf_query
from clef.helpers import load_vocabularies
def tidy_facet_count(v):
return v[::2]
@functools.lru_cache()
def get_esgf_facets(project):
q = esgf_query(limit=0, project=project, type="Dataset", facets="*")
q = {k: tidy_facet_count(v) for k, v in q["facet_counts"]["facet_fields"].items()}
return q
cli_facets = {
"domain": {"short": ["-d"], "help": "CORDEX region name", "controlled_vocab": True},
"experiment": { "short": ["-e"],
"help": "Experiment",
"controlled_vocab": True,
},
"driving_experiment": { "short": ["-dex"],
"help": "CMIP5 experiment of driving GCM or 'evaluation' for re-analysis",
"controlled_vocab": True,
},
"driving_model": { "short": ["-dmod"],
"help": "Model/analysis used to drive the model (eg. ECMWFERAINT)",
"controlled_vocab": True,
},
"rcm_name": {"short": ["-m"], "help": "Identifier of the CORDEX Regional Climate Model", "controlled_vocab": True},
"rcm_version": {"short": ["-rcmv"],
"help": "Identifier for reruns with perturbed parameters or smaller RCM release upgrades",
"controlled_vocab": True,
},
"variable": {"short": ["-v"], "help": "Variable name in file", "controlled_vocab": True},
"time_frequency": {"short": ["-f"], "help": "Output frequency indicator", "controlled_vocab": True},
"ensemble": {"short": ["-en"],
"help": "Ensemble member of the driving GCM",
"controlled_vocab": True,
},
"version": {"short": ['-vrs'], "help": "Data publication version", "controlled_vocab": True},
"cf_standard_name": {"short": ['-cf'], "help": "CF-Conventions name of the variable",
"controlled_vocab": True},
"experiment_family": {"short": ['-ef'], 'one': True, "controlled_vocab": True,
"help": "Experiment family: All, Historical, RCP"},
"institute": { "short": ['-inst'],
"help": "identifier for the institution that is responsible for the scientific aspects of the CORDEX simulation",
"controlled_vocab": True,
}
}
class CordexCommand(click.Command):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
#facets = get_esgf_facets(project="CORDEX,CORDEX-Adjust,CORDEX-ESD,CORDEXReklies")
facets = load_vocabularies('CORDEX')
facets['rcm_name'].append('CCAM-1391M')
for k, v in cli_facets.items():
opt = click.Option(
[f"--{k}"] + v['short'], help=v["help"], multiple=(False if 'one' in v.keys() else True), metavar="FACET"
)
if v.get("controlled_vocab", False):
opt.type = click.Choice(facets[k], case_sensitive=False)
self.params.append(opt)
opt = click.Option(
["--and", "and_attr"],
multiple=True,
type=click.Choice(cli_facets.keys()),
help="Attributes for which we want to add AND filter, i.e. -v tasmin -v tasmax --and variable will return only model/ensemble that have both",
)
self.params.append(opt)
| coecms/clef | clef/cordex.py | cordex.py | py | 3,147 | python | en | code | 7 | github-code | 13 |
11996856653 | # -*- coding: utf-8 -*-
# DISTRIBUTION STATEMENT A. Approved for public release. Distribution is unlimited.
# This material is based upon work supported under Air Force Contract No. FA8702-15-D-0001.
# Any opinions,findings, conclusions or recommendations expressed in this material are those
# of the author(s) and do not necessarily reflect the views of the Centers for Disease Control.
# (c) 2020 Massachusetts Institute of Technology.
# The software/firmware is provided to you on an As-Is basis
# Delivered to the U.S. Government with Unlimited Rights, as defined in DFARS Part 252.227-7013
# or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government rights in this work
# are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed above. Use of this work
# other than as specifically authorized by the U.S. Government may violate any copyrights that
# exist in this work.
# Copyright (c) 2020 Massachusetts Institute of Technology
# SPDX short identifier: MIT
# Developed as part of: SimAEN, 2020
# Authors: DI25756, JO26228, ED22162
import WorkflowModel as simaen
import readConfig
from multiConfig import generateConfig
import uuid
#from processTreeReduced import process
from webUIprocess import process
#from DBProcess import process
from random import random
import json
# Read the default configuration
config = readConfig.default()
# Specify the number of iterations for each configuration
trials = 1
config['starting_cases'] = [200, 500, 800]
config['test_delay'] = [0, 2, 4]
config['p_running_app'] = [0, 0.25, 0.5, 0.75]
config['p_app_detects_generator'] = [0.36, 0.67, 0.86]
config['false_discovery_rate'] = [0.122, 0.23, 0.3]
config['p_upload_key_given_positive_test'] = 0.5
config['p_mask_given_norm'] = [0.25, 0.5, 0.75]
config['mean_new_cases'] = [2.1, 2.5, 2.9]
config['mean_total_contacts'] = [2.1, 2.7, 3.2]
config['p_maximal_restriction_given_PH_call'] = [0.5, 0.9]
config['p_maximal_restriction_given_AEN_notification'] = [0.5, 0.9]
config['p_vaccinated'] = [0, 0.2, 0.5]
config['n_contact_tracers'] = [10,100]
config['key_upload_requires_call'] = [0,1]
# Specify variables that change in unison
# Example: groups = (["p_app_detects_generator","false_discovery_rate"],)
groups = (["p_app_detects_generator","false_discovery_rate"],['mean_new_cases','mean_total_contacts'])
uuid = str(uuid.uuid4())
values = []
c = 0
for n, setup in enumerate(generateConfig(config,groups=groups)):
for i in range(trials):
c += 1
setup['config_group_num'] = n
setup['config_num_in_group'] = i
setup['uuid'] = uuid
with open(r'C:/Users\DI25756\PycharmProjects\CDC\grid\configs/'+repr(n%10000)+'.txt','a') as fh:
for var in setup:
fh.write(var+':'+repr(setup[var])+',')
fh.write('\n') | informaticslab/SimAEN | Simaen-Model/src/generate_simaen_config.py | generate_simaen_config.py | py | 2,881 | python | en | code | 2 | github-code | 13 |
27765020829 | import requests
import pandas as pd
import time
app_url = "http://localhost:5000"
auth_route = "/login"
upload_route = "/update/data"
username = "user1"
password = "password123"
print(f"Logging into API now @ {app_url + auth_route}")
# Authenticate and retrieve JWT token
response = requests.post(
app_url + auth_route,
json={"username": username, "password": password}
)
print("Response text: \n", response.text)
# Check if login was successful
if response.status_code != 200:
print("Authentication failed!")
exit()
token = response.json()["access_token"]
# Load data from the Excel file
try:
print("Loading Excel File!")
df = pd.read_excel(
os.path.join(os.path.curdir, "event_schedule.xlsx"),
engine="openpyxl",
index_col="Time Block",
parse_dates=True,
)
# Convert the index to the correct datetime format and then to ISO format
df.index = pd.to_datetime(df.index).tz_localize('America/New_York').tz_convert('UTC')
df.index = df.index.strftime('%Y-%m-%dT%H:%M:%SZ') # ISO format
except PermissionError:
print("You forgot to close the Excel file! Please try again...")
exit()
# Convert DataFrame to a dictionary for JSON payload
post_this_dict = df.to_dict(orient="index")
print("Extracting data from the Excel file was successful!")
print(f"Posting data to API now @ {app_url + upload_route}")
# Set up headers with the JWT token
headers = {"Authorization": f"Bearer {token}"}
print(post_this_dict)
# Post the data to the Flask API
r = requests.post(app_url + upload_route, json=post_this_dict, headers=headers)
print("Server response:", r.text)
# Check the status of the data upload
if r.status_code == 200:
print(f"Data upload successful. Check in web browser: {app_url}/payload/current")
time.sleep(10)
else:
print(f"Error uploading data. Status code: {r.status_code}")
exit()
| bbartling/demand-response-research | old/posting_script/posting_script.py | posting_script.py | py | 1,896 | python | en | code | 1 | github-code | 13 |
20386972028 | import numpy as np
import tensorflow as tf
import os
import matplotlib.pyplot as plt
import cv2 as cv
from tensorflow import keras
from skimage.io import imread , imshow
from skimage.transform import resize
from tqdm import tqdm
import random
IMG_WIDTH = 128
IMG_HIGHT = 128
IMG_CHANNEL = 3
TRAIN_PATH = 'stage1_train/'
TEST_PATH = 'stage1_test/'
train_ids = next(os.walk(TRAIN_PATH))[1]
test_ids = next(os.walk(TEST_PATH))[1]
X_train = np.zeros((len(train_ids), IMG_WIDTH , IMG_HIGHT , IMG_CHANNEL) , dtype = np.uint8)
Y_train = np.zeros((len(train_ids),IMG_WIDTH,IMG_HIGHT,1),dtype = np.bool)
print('resizing')
for n , id_ in tqdm(enumerate(train_ids) , total = len(train_ids)):
path = TRAIN_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[: , : , :IMG_CHANNEL]
img = resize(img ,(IMG_HIGHT ,IMG_WIDTH) , mode='constant' , preserve_range = True)
X_train[n] = img
mask = np.zeros((IMG_HIGHT , IMG_WIDTH ,1) , dtype = np.bool)
for i in next(os.walk(path + '/masks/'))[2]:
mask_ = imread(path + '/masks/' + i)
mask_ = np.expand_dims(resize(mask_ , (IMG_HIGHT , IMG_WIDTH) , mode = 'constant' ,
preserve_range = True), axis = -1)
mask = np.maximum(mask , mask_)
Y_train[n] = mask
X_test = np.zeros((len(test_ids) , IMG_HIGHT , IMG_HIGHT , IMG_CHANNEL) , dtype = np.uint8)
sizes_test = []
print('resizing test')
for n , id_ in tqdm(enumerate(test_ids) , total = len(test_ids)):
path = TEST_PATH + id_
img = imread(path + '/images/' + id_ + '.png')[: , : , :IMG_CHANNEL]
img = resize(img ,(IMG_HIGHT ,IMG_WIDTH) , mode='constant' , preserve_range = True)
X_test[n] = img
image_x = random.randint(0 , len(train_ids))
imshow(X_train[image_x])
plt.show()
inputs = tf.keras.layers.Input((IMG_WIDTH , IMG_HIGHT , IMG_CHANNEL))
s = tf.keras.layers.Lambda(lambda x: x/255)(inputs)
c1 = tf.keras.layers.Conv2D(16 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(s)
c1 = tf.keras.layers.Dropout(0.1)(c1)
c1 = tf.keras.layers.Conv2D(16 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(c1)
p1 = tf.keras.layers.MaxPooling2D((2,2))(c1)
c2 = tf.keras.layers.Conv2D(32 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(p1)
c2 = tf.keras.layers.Dropout(0.1)(c2)
c2 = tf.keras.layers.Conv2D(32 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(c2)
p2 = tf.keras.layers.MaxPooling2D((2,2))(c2)
c3 = tf.keras.layers.Conv2D(64 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(p2)
c3 = tf.keras.layers.Dropout(0.2)(c3)
c3 = tf.keras.layers.Conv2D(64 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(c3)
p3 = tf.keras.layers.MaxPooling2D((2,2))(c3)
c4 = tf.keras.layers.Conv2D(128 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(p3)
c4 = tf.keras.layers.Dropout(0.2)(c4)
c4 = tf.keras.layers.Conv2D(128 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(c4)
p4 = tf.keras.layers.MaxPooling2D((2,2))(c4)
c5 = tf.keras.layers.Conv2D(256 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(p4)
c5 = tf.keras.layers.Dropout(0.3)(c5)
c5 = tf.keras.layers.Conv2D(256 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding='same')(c5)
u6 = tf.keras.layers.Conv2DTranspose(128 , (2,2) , strides = (2,2) ,padding = 'same')(c5)
u6 = tf.keras.layers.concatenate([u6,c4])
c6 = tf.keras.layers.Conv2D(128 , (3,3) , activation = 'relu' , kernel_initializer ='he_normal' , padding = 'same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128 ,(3,3) , activation = 'relu' , kernel_initializer = 'he_normal' , padding = 'same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64 , (2,2) , strides = (2,2) ,padding = 'same')(c6)
u7 = tf.keras.layers.concatenate([u7,c3])
c7 = tf.keras.layers.Conv2D(64 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding = 'same')(u7)
c7 = tf.keras.layers.Dropout(0.1)(c7)
c7 = tf.keras.layers.Conv2D(64 ,(3,3) , activation = 'relu' , kernel_initializer = 'he_normal' , padding = 'same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(64 , (2,2) , strides = (2,2) ,padding = 'same')(c7)
u8 = tf.keras.layers.concatenate([u8,c2])
c8 = tf.keras.layers.Conv2D(32 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding = 'same')(u8)
c8 = tf.keras.layers.Dropout(0.1)(c8)
c8 = tf.keras.layers.Conv2D(32 ,(3,3) , activation = 'relu' , kernel_initializer = 'he_normal' , padding = 'same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16 , (2,2) , strides = (2,2) ,padding = 'same')(c8)
u9 = tf.keras.layers.concatenate([u9,c1] , axis =3)
c9 = tf.keras.layers.Conv2D(16 , (3,3) , activation = 'relu' , kernel_initializer='he_normal' , padding = 'same')(u9)
c9 = tf.keras.layers.Dropout(0.1)(c9)
c9 = tf.keras.layers.Conv2D(16 ,(3,3) , activation = 'relu' , kernel_initializer = 'he_normal' , padding = 'same')(c9)
outputs = tf.keras.layers.Conv2D(1 , (1,1) , activation = 'sigmoid')(c9)
model = tf.keras.Model(inputs = [inputs] , outputs = [outputs])
model.compile(optimizer = 'adam' , loss = 'binary_crossentropy' , metrics = ['accuracy'])
model.summary()
checkpointer = tf.keras.callbacks.ModelCheckpoint('model_for_nuclie.hs' , verbose = 1 , save_best_only = True)
#callbacks = [tf.keras.callbacks.EarlyStopping(patience = 2 , monitor = 'val_loss'),
#tf.keras.callbacks.TensorBoard(log_dir = 'logs')]
res = model.fit(X_train , Y_train , validation_split = 0.1 , batch_size = 16 , epochs = 1 )
idx = random.randint(0 , len(X_train))
preds_train = model.predict(X_train[ :int(X_train.shape[0]*0.9)] , verbose = 1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):] , verbose = 1)
preds_test = model.predict(X_test , verbose =1)
pred_train_t = (preds_train>0.5).astype(np.uint8)
pred_val_t = (preds_val>0.5).astype(np.uint8)
pred_test_t = (preds_test>0.5).astype(np.uint8)
ix = random.randint(0 , len(pred_test_t))
imshow(np.squeeze(preds_test[ix]))
plt.show()
| Onkarsus13/Cell-Detection | uCNN.py | uCNN.py | py | 6,196 | python | en | code | 0 | github-code | 13 |
21680114405 | import os
import requests
import zipfile
import sqlite3
import pandas as pd
import isbnlib
from dotenv import load_dotenv
from datetime import date
load_dotenv()
url = os.getenv("ISBN_URL")
pwd = os.getenv("ISBN_PWD")
def update_data():
"""Downloads product list (zip)"""
# Note: file contains only Kierrätyskeskus products (warehouse = 10)
try:
file = open("last_run.tmp", "r")
except FileNotFoundError:
open("last_run.tmp", "x").close()
file = open("last_run.tmp", "r")
finally:
last_download = file.readline()
file.close()
if str(date.today()) != last_download:
print("\nDownloading data...")
try:
r = requests.get(url, allow_redirects=False, timeout=(5, 15))
except Exception as e:
print(e)
try:
with open("last_run.tmp", "w") as f:
f = open("dump.zip", "wb").write(r.content)
print("Data downloaded")
if extract():
update_db()
update_date()
except Exception as e:
print(e)
def extract():
"""Extracts the zip"""
if pwd is None:
print("Database was not updated:")
print("Password is missing from env variables.")
return False
try:
with zipfile.ZipFile("dump.zip") as f:
f.extractall(pwd=bytes(pwd, "cp1252"))
return True
except Exception as e:
print("Database was not updated:")
print(e)
return False
def update_date():
"""Saves the download date"""
try:
with open("last_run.tmp", "w") as f:
f.write(str(date.today()))
except Exception as e:
print(e)
def update_db():
"""Reading tuotedumb.csv to sqlite"""
df = pd.read_csv("tuotedump.csv", on_bad_lines="skip", encoding="cp1252")
df = df[df["Passiivinen"] == False] # Select active products
df = df[df["Verkkokauppa"] == True] # Select online products
df = df.drop(df.columns[[0, 1, 2, 3, 6, 7]], axis=1) # Remove unused columns
df.fillna("empty", inplace=True) # Remove NaN values
print("\nReading data...")
for i in range(len(df)):
if len(isbnlib.get_isbnlike(df.iloc[i, 0], level="normal")) > 0:
isbn = isbnlib.get_isbnlike(df.iloc[i, 0], level="normal")[0]
isbn13 = isbnlib.to_isbn13(isbn)
df.iloc[i, 0] = isbn13
else:
df.iloc[i, 0] = "missing"
db = df[df["kuvaus"] != "missing"] # Drop rows, if ISBN is missing
db.reset_index(drop=True, inplace=True) # Reset index
try:
con = sqlite3.connect("book.db")
db.to_sql("data", con, if_exists="replace")
except Exception as e:
print(e)
finally:
con.close()
print("Database updated")
| EskoJanatuinen/isbn_search | data_etl.py | data_etl.py | py | 2,832 | python | en | code | 0 | github-code | 13 |
35517240284 | from SLL import *
class length_SLL(SLL) :
'''
This class is inherited from SLL class.
It will be used to add functionality of finding length of the linked list.
'''
def getLength(self) :
'''
Find the length of/ number of nodes in the linked list.
Returns :
count (int) : length of the linked list.
'''
self.temp = self.head
count = 0
# Iterate over the linked list. Keep incrementing count each iteration.
while self.temp :
self.temp = self.temp.next
count += 1
return count
if __name__ == "__main__" :
ll = length_SLL()
ll.insert(0)
ll.insert(1)
ll.insert(2)
ll.insert(3)
ll.insert(4)
ll.insert(5)
ll.printAll()
print("\nThe Length of Linked List is : ", ll.getLength()) | paramSonawane/99Problems | Python/P04.py | P04.py | py | 848 | python | en | code | 0 | github-code | 13 |
34672448476 | #度和热度计算
import pandas as pd
import networkx as nx
date=list(range(201901,201913))
date+=list(range(202001,202013))
for day in date:
data=pd.read_csv("./month/"+str(day)+".csv",low_memory=False)
data=data[(data["org_continent"]=="EU")&(data["dst_continent"]=="EU")]
G=nx.from_pandas_edgelist(data,"origin","destination",True,nx.MultiDiGraph)
airports={}
for index, item in data.iterrows():
airports[item["origin"]]={"lat":item["org_lat"],"lon":item["org_lon"],"name":item["org_name"],"city":item["org_city"],"country":item["org_country"],"continent":item["org_continent"]}
airports[item["destination"]]={"lat":item["dst_lat"],"lon":item["dst_lon"],"name":item["dst_name"],"city":item["dst_city"],"country":item["dst_country"],"continent":item["dst_continent"]}
degree=G.degree()
output=[]
for item in degree:
output.append({"name":item[0],"lat":airports[item[0]]["lat"],"lon":airports[item[0]]["lon"],
"city":airports[item[0]]["city"],"country":airports[item[0]]["country"],
"continent":airports[item[0]]["continent"],"weight":item[1]})
pd.DataFrame(output).to_csv("./hot/"+str(day)+".csv")
print(day)
#图的案例绘制
import networkx as nx
import matplotlib.pyplot as plt
import random
plt.figure(dpi=600,figsize=(15,12))
plt.subplot(2,2,1)
#啥都没
G=nx.Graph()
for u, v in nx.barabasi_albert_graph(10,2,seed=1).edges():
G.add_edge(u,v)
pos=nx.spring_layout(G,iterations=20)
nx.draw_networkx_edges(G,pos)
nx.draw_networkx_nodes(G,pos)
plt.title("(a)", y=-0.15,fontsize = 24)
plt.subplot(2,2,2)
#方向
G=nx.DiGraph()
for u, v in nx.barabasi_albert_graph(10,2,seed=1).edges():
G.add_edge(u,v)
pos=nx.spring_layout(G,iterations=20)
#以下语句绘制以带宽为线的宽度的图
nx.draw_networkx_edges(G,pos)
nx.draw_networkx_nodes(G,pos)
plt.title("(b)", y=-0.15,fontsize = 24)
plt.subplot(2,2,3)
#权重
G=nx.Graph()
for u, v in nx.barabasi_albert_graph(10,2,seed=1).edges():
G.add_edge(u,v,weight=random.uniform(0,0.4))
pos=nx.spring_layout(G,iterations=20)
#以下语句绘制以带宽为线的宽度的图
nx.draw_networkx_edges(G,pos,width=[float(d['weight']*10) for (u,v,d) in G.edges(data=True)])
nx.draw_networkx_nodes(G,pos)
plt.title("(c)", y=-0.15,fontsize = 24)
plt.subplot(2,2,4)
#方向权重
G=nx.DiGraph()
for u, v in nx.barabasi_albert_graph(10,2,seed=1).edges():
G.add_edge(u,v,weight=random.uniform(0,0.4))
pos=nx.spring_layout(G,iterations=20)
#以下语句绘制以带宽为线的宽度的图
nx.draw_networkx_edges(G,pos,width=[float(d['weight']*10) for (u,v,d) in G.edges(data=True)])
nx.draw_networkx_nodes(G,pos)
plt.title("(d)", y=-0.15,fontsize = 24)
plt.savefig('./graph.svg', dpi=600, bbox_inches='tight')
plt.show() | hinczhang/Graduate-Thesis | degreeAndHotness.py | degreeAndHotness.py | py | 2,793 | python | en | code | 0 | github-code | 13 |
31405903363 |
from tkinter import filedialog
from PIL import ImageTk, Image
import cv2
import math as m
import numpy as np
import tkinter as tk
import tkinter.ttk as ttk
import os
import sys
root_path = os.path.abspath(os.path.join('..'))
sys.path.append(root_path)
import _init_paths
import numpy as np
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import torch.utils.data as data
from PIL import Image
import cv2
from libs.networks.vgg_refinedet import VGGRefineDet
from libs.networks.resnet_refinedet import ResNetRefineDet
from libs.utils.config import voc320, MEANS
from libs.data_layers.transform import base_transform
from matplotlib import pyplot as plt
import pdb
is_gpu = False
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
is_gpu = True
# for VOC
class_names = ['__background__', # always index 0
'buffalo', 'elephant', 'gazellegrants',
'gazellethomsons','impala',
'giraffe', 'koribustard', 'lionmale',
'lionfemale', 'wildebeest', 'zebra']
num_classes = len(class_names)
win = tk.Tk()
win.geometry("700x900+400+100")
win.resizable(width=True, height=True)
win.title("A Wildlife Recognition System")
# frm = tk.Frame(win)
# frm.pack()
frm_top = tk.Frame(win)
frm_top_left = tk.Frame(win)
frm_top_right = tk.Frame(win)
frm_bottom_left = tk.Frame(win)
frm_bottom_right = tk.Frame(win)
# frm_right = tk.Frame(frm)
# frm_bottom = tk.Frame(frm)
frm_top.grid(row = 0, column = 0, padx = 5, pady = 5)
frm_top_left.grid(row = 1, column = 0, padx = 5, pady = 5)
frm_top_right.grid(row = 1, column = 1, padx = 5, pady = 5)
frm_bottom_left.grid(row = 2, column = 0, padx = 5, pady = 5)
frm_bottom_right.grid(row = 2, column = 1, padx = 5, pady = 5)
# frm_right.pack(side = "right")
# frm_bottom.pack(side = "bottom")
panel_input = tk.Label(frm_top_left, image = None)
panel_predict = tk.Label(frm_top_right, image = None)
#panel_concat = tk.Label(frm_bottom_left, image = None)
#panel_concat.pack(side="bottom")
panel_input.grid(row = 0, column = 0, padx = 5, pady = 5)
panel_predict.grid(row = 0, column = 1, padx = 5, pady = 5)
#panel_concat.grid(row = 0, column = 0, padx = 5, pady = 5)
cfg = voc320
base_network = 'vgg16'
model_path = '../output/vgg16_refinedet320_voc_40000.pth'
print('Construct {}_refinedet network.'.format(base_network))
refinedet = VGGRefineDet(cfg['num_classes'], cfg)
refinedet.create_architecture()
# for CPU
net = refinedet
# for GPU
if is_gpu:
net = refinedet.cuda()
cudnn.benchmark = True
# load weights
net.load_weights(model_path)
net.eval()
check = 0
def resize(img):
img_size = img.size
ratio = img_size[1]/img_size[0]
size = 900
if img_size[1] > img_size[0]:
img = img.resize((round(size*(1/ratio)), size), Image.ANTIALIAS)
else:
img = img.resize((size, round(size*ratio)), Image.ANTIALIAS)
return img
def openfile():
global file_path
# destroy_frm_left()
# destroy_frm_right()
file_path = filedialog.askopenfilename()
destroy_frm_bottom_right()
img = Image.open(file_path)
image = resize(img)
img_input = ImageTk.PhotoImage(image)
panel_input.config(image = '')
panel_predict.config(image = '')
#panel_concat.config(image = '')
panel_input.image = img_input
panel_input.config(image = img_input)
#how to put your result in panel
#panel_concat.image = img_input
#panel_concat.config(image = img_input)
#end
# panel_input.pack()
# panel_predict.pack()
# panel_concat.pack()
def destroy_frm_left():
global frm_left
global panel_input
frm_left.destroy()
frm_left = tk.Frame(frm)
frm_left.pack(side = "left")
panel_input = tk.Label(frm_left, image = None)
def destroy_frm_bottom_right():
global frm_bottom_right
frm_bottom_right.destroy()
frm_bottom_right = tk.Frame(win)
frm_bottom_right.grid(row = 2, column = 1, padx = 5, pady = 5)
#panel_predict = tk.Label(frm_right, image = None)
def img_bottom(img):
#img_input = Image.fromarray(img)
img_concat = resize(img)
img_concat = ImageTk.PhotoImage(img_concat)
panel_predict.image = img_concat
panel_predict.config(image = img_concat)
def img_right(img):
#img_predict = Image.fromarray(img)
img_predict = resize(img)
img_predict = ImageTk.PhotoImage(img_predict)
panel_predict.config(image = '')
panel_predict.config(image = img_predict)
panel_predict.image = img_predict
panel_predict.pack()
def _from_rgb(rgb):
return "#%02x%02x%02x" % rgb
def detection():
global save_path
global label_img
global check
'''
check = 0
num=1
testGene = testGenerator(file_path, num)
results = model.predict_generator(testGene,num,verbose=1)
save_path = "./output/"+file_path.rsplit("/", 1)[1]
label = saveResult_1(save_path,results)
img = Image.open(save_path)
img_right(img)
img = cv2.imread(file_path)
label = np.uint8(label)
label_img = np.uint8(label)
label_gray = cv2.cvtColor(label, cv2.COLOR_BGR2GRAY)
mask = np.zeros((label.shape[0], label.shape[1]), dtype=int)
mask = mask+255
mask[label_gray==255]=0
print(mask.shape)
for i in range(0,3):
tmp = img[:,:,i] - mask
tmp[tmp<0] = 0
img[:,:,i] = tmp
mask = 255 - mask
for i in range(0,3):
tmp = label[:,:,i] - mask
tmp[tmp<0] = 0
label[:,:,i] = tmp
img[:,:,i] = img[:,:,i] + label[:,:,i]
'''
#==================================================
image=cv2.imread(file_path, cv2.IMREAD_COLOR)
# preprocess
# norm_image = base_transform(image, (320, 320), MEANS)
norm_image = cv2.resize(image, (320, 320)).astype(np.float32)
norm_image -= MEANS
norm_image = norm_image.astype(np.float32)
norm_image = torch.from_numpy(norm_image).permute(2, 0, 1)
# forward
input_var = Variable(norm_image.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
input_var = input_var.cuda()
detection = net(input_var)
# scale each detection back up to the image,
# scale = (width, height, width, height)
scale = torch.Tensor(image.shape[1::-1]).repeat(2)
threshold = 0.5
num_top = detection.size(2)
colors = (plt.cm.hsv(np.linspace(0, 1, num_classes)) * 255).tolist()
for i in range(1, num_classes):
for j in range(num_top):
score = detection[0, i, j, 0]
if score < threshold:
continue
label_name = class_names[i]
display_txt = '%s: %.2f' % (label_name, score)
pts = (detection[0, i, j, 1:] * scale).cpu().numpy().astype(np.int32)
pts = tuple(pts)
cv2.rectangle(image, pts[:2], pts[2:], colors[i], 4)
cv2.putText(image, display_txt,
pts[:2],
cv2.FONT_HERSHEY_COMPLEX_SMALL,
2.5, colors[i])
# pdb.set_trace()
name, ext = os.path.split(file_path)
cv2.imwrite(name + '_result' + ext, image)
#=================================
cv2.imwrite("./output/save.png", image)
img = Image.open("./output/save.png")
img_bottom(img)
file = ttk.Button(frm_top, text = "Open File", command = openfile)
file.grid(row = 0, column = 0, padx = 5, pady = 5)
hist = ttk.Button(frm_top, text = "Detection", command = detection)
hist.grid(row = 0, column = 1, padx = 5, pady = 5)
win.mainloop() | lsc25846/Wildlife-Recognition-System | demo/GUI.py | GUI.py | py | 7,720 | python | en | code | 1 | github-code | 13 |
2723167840 | from django.urls import path
from . import views
# from django.conf import settings
# from django.conf.urls.static import static
# +static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
urlpatterns = [
# path for user
path('usercreate', views.UserCreateAPI.as_view()),
path('useralldata', views.UserDataAPI.as_view()),
path('useronedata/<int:pk>', views.UserOneDataAPI.as_view()),
path('userupdate/<int:pk>',views.UserUpdateAPI.as_view()),
path('userupt/<int:pk>', views.UserUpdatePartialAPI.as_view()),
path('userdelete/<int:pk>', views.UserDeleteAPI.as_view()),
# path for posts
path('postcreate', views.PostCreateAPI.as_view()),
path('postalldata', views.PostDataAPI.as_view()),
path('postonedata/<int:pk>', views.PostOneDataAPI.as_view()),
path('postupdate/<int:pk>',views.PostUpdateAPI.as_view()),
path('postupt/<int:pk>', views.PostUpdatePartialAPI.as_view()),
path('postdelete/<int:pk>', views.PostDeleteAPI.as_view()),
# path for like
path('likecreate', views.LikeCreateAPI.as_view()),
path('likealldata', views.LikeDataAPI.as_view()),
path('likeonedata/<int:pk>', views.LikeOneDataAPI.as_view()),
path('likeupdate/<int:pk>',views.LikeUpdateAPI.as_view()),
path('likeupt/<int:pk>', views.LikeUpdatePartialAPI.as_view()),
path('likedelete/<int:pk>', views.LikeDeleteAPI.as_view()),
] | vidhansharma026/Blog-API | BlogAPI/blog/urls.py | urls.py | py | 1,386 | python | en | code | 0 | github-code | 13 |
13141023941 | #!/usr/bin/env python
"""
LogicRLUtils.py
The general utilities for LogicRL.
"""
__version__ = "0.0.1"
__author__ = "David Qiu"
__email__ = "dq@cs.cmu.edu"
__website__ = "http://www.davidqiu.com/"
__copyright__ = "Copyright (C) 2018, David Qiu. All rights reserved."
import numpy as np
import cv2
import pdb, IPython
import torch
decoder_frame_width = 84
decoder_frame_height = 84
rl_frame_width = 84
rl_frame_height = 84
rl_state_joint = 4
def FrameToDecoderState(frame):
"""
Convert a raw frame to a decoder state.
@param frame The raw frame received from environment.
@return The decoder state converted from the raw frame.
"""
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
resized_image = cv2.resize(image, (decoder_frame_width, decoder_frame_height), interpolation = cv2.INTER_CUBIC)[np.newaxis,np.newaxis,:,:]
return torch.FloatTensor(resized_image)
def FramesToRLState(frames):
"""
Convert a list of raw frames to a RL agent state.
@param frames The raw frames to received as a list.
@return The RL agent state converted from the raw frames.
"""
assert(len(frames) == rl_state_joint)
results = []
for frame in frames:
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
resized_image = cv2.resize(image, (rl_frame_width, rl_frame_height), interpolation = cv2.INTER_CUBIC)[np.newaxis,:,:,np.newaxis]
results.append(resized_image)
results = np.concatenate(results, -1)
return results
| LogicRL/MontezumaRevenge | src/utils/LogicRLUtils.py | LogicRLUtils.py | py | 1,473 | python | en | code | 2 | github-code | 13 |
4094002495 | from django import template
from post.models import Post,Comment,Notification
register = template.Library()
@register.inclusion_tag('post/show_notifications.html', takes_context=True)
def show_notifications(context):
request_user = context['request'].user
unseen = Notification.objects.filter(to_user= request_user,user_has_seen = False)
notifications = Notification.objects.filter(to_user= request_user).order_by('user_has_seen','-date')
unread_count = unseen.count()
return {'notifications': notifications,'unread_count':unread_count} | Dristy03/Programming-Community | account/templatetags/custom_tags.py | custom_tags.py | py | 559 | python | en | code | 0 | github-code | 13 |
16654336100 | from sklearn.linear_model import LinearRegression
import numpy as np
import pandas as pd
import csv
import sys
sc_expression_data = sys.argv[1]
human_tfs = sys.argv[2]
test_data = pd.read_table(sc_expression_data, index_col=0)
tfs = pd.read_table(human_tfs, index_col=0, names='TF')
reg = LinearRegression()
X = test_data.loc[test_data.index.intersection(tfs.index)].T
coefs = pd.DataFrame(index=X.columns)
for target in test_data.index:
# Get the expression values of target
y = test_data.loc[target]
# Expression of TFs
X_local = X.loc[:,X.columns != target]
# Run regression
reg.fit(X_local, y)
# Update the coefficients (regulatory relationships)
coefs[target] = 0
coefs.loc[X_local.columns,target] = reg.coef_
network = coefs.reset_index().melt(id_vars="index")
network
print(network.to_csv(sep="\t", quoting=csv.QUOTE_NONE))
| prullens/GRNi_Benchmarking | LinearRegression.py | LinearRegression.py | py | 874 | python | en | code | 0 | github-code | 13 |
1076266449 | # i have to build a faulty calculator which shows the correct result
# for all the operations except some of the operation like
# 45*3=555, 56+9=77,56/6=4
operator = input('enter the operator\n'
'* for multiplication\n'
'+ for addition\n'
'/ for division\n'
'- for substraction')
val1 = int(input('enter the first operand'))
val2 = int(input('enter the second operand'))
if operator == '*':
if val1 == 45 and val2 == 3:
print('555')
else:
print('multiplication is:', val1*val2)
elif operator == '+':
if val1 == 56 and val2 == 9:
print('77')
else:
print('sum is:', val1 + val2)
elif operator == '/':
if val1 == 56 and val2 == 6:
print('4')
else:
print('division is:', val1/val2)
elif operator == '-':
print('substraction is:', val1-val2)
| manishkumarsahgopsaheb/faulty_calculator | main.py | main.py | py | 895 | python | en | code | 0 | github-code | 13 |
326272827 | import pandas as pd
import csv
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
iris = datasets.load_iris()
boston = datasets.load_boston()
iris_features = iris.data
iris_labels = iris.target
print(iris)
print(len(iris.target))
print(len(iris.data))
Features_train, Features_test, Labels_train, Labels_test = train_test_split(iris_features, iris_labels, test_size=0.2, train_size=0.8)
print(Features_train)
print(Features_test)
print(len(Features_train))
print(len(Features_test))
X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
y = np.array([1, 1, 2, 2])
#This code is referenced from: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
linear = SVC()
linear.fit(Features_train, Labels_train)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='linear',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
#predict values
linearArray =[]
linearCount = 0
for i in range(0, len(Features_test)):
print(Features_test[i])
print(Labels_test[i])
print(linear.predict([Features_test[i]]))
linearArray.append(linear.predict([Features_test[i]]))
if((linear.predict([Features_test[i]])) != (Labels_test[i])):
linearCount += 1
#This code is referenced from: http://scikit-learn.org/stable/modules/generated/sklearn.svm.SVC.html
rbf = SVC()
rbf.fit(Features_train, Labels_train)
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape='ovr', degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
#predict values
rbfArray =[]
rbfCount = 0
for i in range(0, len(Features_test)):
print(Features_test[i])
print(Labels_test[i])
print(rbf.predict([Features_test[i]]))
rbfArray.append(rbf.predict([Features_test[i]]))
if ((rbf.predict([Features_test[i]])) != (Labels_test[i])):
rbfCount += 1
print('RBF Errors: ',rbfCount)
print('Linear Errors: ',linearCount) | santosh500/Deep-Learning-and-Python-Projects | Python Project 3/Source/Problem2.py | Problem2.py | py | 2,172 | python | en | code | 0 | github-code | 13 |
16466746262 | from django.test import LiveServerTestCase
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import WebDriverException
import time
import unittest
MAX_WAIT = 10
class NewVisitorTest(LiveServerTestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def wait_for_row_in_list_table(self, row_text):
start_time = time.time()
while True:
try:
table = self.browser.find_element_by_id('id_med_table')
rows = table.find_elements_by_tag_name('tr')
self.assertIn(row_text, [row.text for row in rows])
return
except (AssertionError, WebDriverException) as e:
if time.time() - start_time > MAX_WAIT:
raise e
time.sleep(0.5)
def test_can_start_a_list_for_one_user(self):
#opens homepage
self.browser.get(self.live_server_url)
#Homepage allows user to search for drug type
self.assertIn('Medication', self.browser.title)
header_text = self.browser.find_element_by_tag_name('h1').text
self.assertIn('medication', header_text)
#Search for a drug concept
inputbox = self.browser.find_element_by_id('id_new_item')
self.assertEqual(
inputbox.get_attribute('placeholder'),
'Enter medication name'
)
#ex Search for alavert
inputbox.send_keys('Alavert')
#Hit enter to search for similar meds, the page updates, and now the page lists
#'1. Alavert' as a similar medication
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Alavert')
#There is still a text box to search another medication.
#Enter 'Advil' and search
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Advil')
inputbox.send_keys(Keys.ENTER)
#Page updates again, showing complete list
self.wait_for_row_in_list_table('1: Alavert')
self.wait_for_row_in_list_table('2: Advil')
def test_multiple_users_can_start_lists_at_different_urls(self):
#User starts new search
self.browser.get(self.live_server_url)
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Alavert')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Alavert')
#This search as a unique url
edith_list_url = self.browser.current_url
self.assertRegex(edith_list_url, '/app/.+')
#New User comes along
self.browser.quit()
self.browser = webdriver.Firefox()
#New user visits the home page. There is not sign of previous user
self.browser.get(self.live_server_url)
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Alavert', page_text)
self.assertNotIn('Advil', page_text)
#User starts a new list by entering a new item.
inputbox = self.browser.find_element_by_id('id_new_item')
inputbox.send_keys('Neproxin')
inputbox.send_keys(Keys.ENTER)
self.wait_for_row_in_list_table('1: Neproxin')
#New user gets new unique URL
francis_list_url = self.browser.current_url
self.assertRegex(francis_list_url, '/app/.+')
self.assertNotEqual(francis_list_url, edith_list_url)
#No trace of Edith's list
page_text = self.browser.find_element_by_tag_name('body').text
self.assertNotIn('Alavert', page_text)
self.assertIn('Neproxin', page_text)
#Drug concepts will appear on the page
#Homepage gives explanation about what it does
#Select a particular drug concept to serve as the reference drug ex Select Alavert 10 MG Oral Tablet .
#Return a list of all generic and branded drugs that contain the same active ingredients as a reference drug
#List groups in generic and branded
#Would be great for app to describe what this group of meds does
self.fail('Finish the test!')
# if __name__ == '__main__':
# unittest.main(warnings='ignore') | kathryn-rowe/django_test_the_goat | similar_med_app/functional_tests/tests.py | tests.py | py | 3,750 | python | en | code | 0 | github-code | 13 |
574406497 | from flask import render_template,flash,redirect,url_for,current_app
from app.main.forms import EditProfileForm,PostForm
from app import db
from app.main import bp
from flask_login import current_user,login_required
from app.models import User,Post
from flask import request
from datetime import datetime
from werkzeug.urls import url_parse
from flask_babel import _
from guess_language import guess_language
from flask import g
from flask_babel import get_locale
from flask import jsonify
from app.translate import translate
@bp.route('/', methods=['GET', 'POST'])
@bp.route('/index', methods=['GET', 'POST'])
# 带上login_required装饰器代表上面的索引都需要登录后访问
@login_required
def index():
form = PostForm()
if form.validate_on_submit():
language = guess_language(form.post.data)
if language == 'UNKNOWN' or len(language) > 5:
language = ''
post = Post(body=form.post.data, author=current_user, language=language)
db.session.add(post)
db.session.commit()
flash(_('Your post is now live!'))
return redirect(url_for('main.index'))
# 看url里有没有带page信息,没有就默认是1
page = request.args.get('page',1,type=int)
# 显示第page页的内容
posts = current_user.followed_posts().paginate(
page,current_app.config['POSTS_PER_PAGE'],False)
next_url = url_for('main.index', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.index', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html',
title=_('Home'),
form=form,
posts=posts.items,
next_url=next_url,
prev_url=prev_url
)
@bp.route('/user/<username>')
@login_required
def user(username):
# 用username查询数据库,找到返回,没找到就404
user = User.query.filter_by(username=username).first_or_404()
# 看url里有没有带page信息,没有就默认是1
page = request.args.get('page', 1, type=int)
# 显示第page页的内容
posts = current_user.followed_posts().paginate(
page, current_app.config['POSTS_PER_PAGE'], False)
next_url = url_for('main.user', username=user.username, page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.user', username=user.username,page=posts.prev_num) \
if posts.has_prev else None
return render_template('user.html',
user=user,
posts=posts.items,
next_url=next_url,
prev_url=prev_url)
# 跟踪用户的上次访问时间
@bp.before_request
def before_request():
if current_user.is_authenticated:
current_user.last_seen = datetime.utcnow()
db.session.commit()
g.locale = str(get_locale())
#if g.locale.startswith('zh'):
#g.locale = 'zh-CN'
# 用户写个人简介的页面
@bp.route('/edit_profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm(current_user.username)
if form.validate_on_submit():
# 这是主动修改的情况
current_user.username = form.username.data
current_user.about_me = form.about_me.data
db.session.commit()
flash(_('Your changes have been saved.'))
return redirect(url_for('main.edit_profile'))
elif request.method == 'GET':
# 这是没修改东西,单纯点了一下按钮来到了这个页面的情况
# 把数据库里预存的数据展现在页面上
form.username.data = current_user.username
form.about_me.data = current_user.about_me
return render_template('edit_profile.html', title=_('Edit Profile'),
form=form)
# 关注页面
@bp.route('/follow/<username>')
@login_required
def follow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('you cannot follow yourself!'))
return redirect(url_for('main.index', username=username))
current_user.follow(user)
db.session.commit()
flash(_('You are following %(username)s!', username=username))
return redirect(url_for('main.user', username=username))
@bp.route('/unfollow/<username>')
@login_required
def unfollow(username):
user = User.query.filter_by(username=username).first()
if user is None:
flash(_('User %(username)s not found.', username=username))
return redirect(url_for('main.index'))
if user == current_user:
flash(_('you cannot unfollow yourself!'))
return redirect(url_for('main.index', username=username))
current_user.unfollow(user)
db.session.commit()
flash(_('You are not following %(username)s.', username=username))
return redirect(url_for('main.user', username=username))
# 浏览其他用户的post,便于找到想follow的人
@bp.route('/explore')
@login_required
def explore():
page = request.args.get('page', 1, type=int)
posts = Post.query.order_by(Post.timestamp.desc()).paginate(
page, current_app.config['POSTS_PER_PAGE'],False)
next_url = url_for('main.explore', page=posts.next_num) \
if posts.has_next else None
prev_url = url_for('main.explore', page=posts.prev_num) \
if posts.has_prev else None
return render_template('index.html',
title=_('Explore'),
posts=posts.items,
next_url=next_url,
prev_url=prev_url)
# 客户端请求翻译的路由
@bp.route('/translate', methods=['POST'])
@login_required
def translate_text():
return jsonify({'text': translate(request.form['text'],
request.form['source_language'],
request.form['dest_language'])})
| sileyouhe/microblog | app/main/routes.py | routes.py | py | 6,169 | python | en | code | 0 | github-code | 13 |
1969322241 | import json
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from pathlib import Path
from alibi_detect.cd import KSDrift
from load_data import get_tables_from_folder, get_tables_from_path
from datatypes import TableInfo, Distribution, TableDistribution
from typing import List, Tuple, Callable
from config import DATA_PATH, DATA2_PATH
def get_time_distribution(
table_info: TableInfo,
hist_bins: int = 24 * 6,
hist_range: Tuple[float, float] = (-1, 23)
) -> TableDistribution:
df = pd.read_csv(table_info.path)
updates_time = pd.to_datetime(df['gpstime']).values - np.datetime64(table_info.date)
updates_time = updates_time.astype(float) / 1e9 / 3600
dist = np.histogram(updates_time, bins=hist_bins, range=hist_range)
dist[0][0] += np.sum(updates_time < dist[1][0])
dist[0][-1] += np.sum(updates_time > dist[1][-1])
return TableDistribution(
table_info=table_info,
distribution_pdf=dist[0],
distribution_space=dist[1][:-1]
)
def grab_time_distributions(table_info_list: List[TableInfo]) -> List[TableDistribution]:
return [get_time_distribution(table_info) for table_info in table_info_list]
def calculate_reference_distribution(
table_distribution_list: List[TableDistribution],
solving_func: Callable = lambda x: np.median(x, axis=-1)
) -> Distribution:
dist_space = table_distribution_list[0].distribution_space
is_same_space = all([np.allclose(dist.distribution_space, dist_space)
for dist in table_distribution_list])
if not is_same_space:
raise ValueError("Tables have different distribution space")
dist_pdf_list = [dist.distribution_pdf for dist in table_distribution_list]
ref_dist = Distribution(
distribution_space=dist_space,
distribution_pdf=solving_func(np.dstack(dist_pdf_list)[0])
)
return ref_dist
def get_update_references(reference_distribution_path: Path = Path("../data/gps_data_update_frequency.json")):
with open(reference_distribution_path, 'r') as f:
ref_dists = json.load(f)
return (
Distribution(
distribution_space=np.array(ref_dists["workday"][0]),
distribution_pdf=np.array(ref_dists["workday"][1]),
info={"name": 'workday', "label": "Workday ref dist"}
),
Distribution(
distribution_space=np.array(ref_dists["weekend"][0]),
distribution_pdf=np.array(ref_dists["weekend"][1]),
info={"name": 'weekend', "label": "Weekend ref dist"}
),
)
def detect_ks_drift(
ref: Distribution,
dist_to_check_list: List[Distribution],
p_val: float = 0.05
) -> List[Distribution]:
drift_detector = KSDrift(ref.distribution_pdf, p_val=p_val)
for i, dist in enumerate(dist_to_check_list):
skd = drift_detector.predict(dist.distribution_pdf)['data']
if dist.info:
dist.info["KSDrift result"] = skd
else:
dist.info = {"KSDrift result": skd}
return dist_to_check_list
def detect_bounded_drift(
ref: Distribution,
dist_to_check_list: List[Distribution],
bounds: Tuple[float, float] = (0.5, 1.35),
abs_tol: float = 300
) -> List[Distribution]:
l_bound = ref.distribution_pdf * bounds[0]
u_bound = ref.distribution_pdf * bounds[1]
mask = ref.distribution_pdf - l_bound < abs_tol
l_bound[mask] = ref.distribution_pdf[mask] - abs_tol
mask = u_bound - ref.distribution_pdf < abs_tol
u_bound[mask] = ref.distribution_pdf[mask] + abs_tol
for i, dist in enumerate(dist_to_check_list):
is_drifted = (l_bound < dist.distribution_pdf) & (dist.distribution_pdf < u_bound)
b_drift_value = (1 - sum(is_drifted)/len(is_drifted))*100
if dist.info:
dist.info["bound drift"] = is_drifted
dist.info["bound drift value"] = b_drift_value
else:
dist.info = {"bound drift": is_drifted, "bound drift value": b_drift_value}
return dist_to_check_list
def detect_bounded_drift_on_tables(
tables_to_check_list: List[TableInfo],
bounds: Tuple[float, float] = (0.5, 1.35),
abs_tol: float = 300
) -> List[Distribution]:
table_distribution_list = grab_time_distributions(tables_to_check_list)
workday_ref_dist, weekend_ref_dist = get_update_references()
weekends_table_dist_list = [dist for dist in table_distribution_list
if dist.table_info.weekday in ['Saturday', 'Sunday']]
weekends_table_dist_list = detect_bounded_drift(weekend_ref_dist, weekends_table_dist_list, bounds, abs_tol)
workdays_table_dist_list = [dist for dist in table_distribution_list
if dist.table_info.weekday not in ['Saturday', 'Sunday']]
workdays_table_dist_list = detect_bounded_drift(workday_ref_dist, workdays_table_dist_list, bounds, abs_tol)
return weekends_table_dist_list + workdays_table_dist_list
def show_distributions(dists: List[Distribution]):
plt.figure(figsize=(15, 5))
for dist in dists:
plt.plot(dist.distribution_space, dist.distribution_pdf,
label=dist.info.get('label', None),
linestyle=dist.info.get('linestyle', None)
)
plt.legend()
plt.grid()
plt.show()
if __name__ == "__main__":
workday_ref_dist, weekend_ref_dist = get_update_references()
show_distributions([workday_ref_dist, weekend_ref_dist])
# table_info_list = get_tables_from_folder(DATA_PATH)[-10:]
#
# table_distribution_list = grab_time_distributions(table_info_list)
# table_distribution_list = detect_KS_drift(workday_ref_dist, table_distribution_list)
# for dist in table_distribution_list:
# ksd_res = dist.info['KSDrift result']
# dist.info['label'] = f"{dist.table_info.date.strftime('%d_%B')} ({dist.table_info.weekday}) |" \
# f"is_drift - {ksd_res['is_drift']}, dist={ksd_res['distance'][0]:.4f}, p_val={ksd_res['p_val'][0]:.4f}"
# dist.info['linestyle'] = '-.'
# table_distribution_list = detect_bounded_drift_on_tabels(table_info_list)
# for dist in table_distribution_list:
# b_drift = dist.info['bound drift']
# dist.info['label'] = f"{dist.table_info.date.strftime('%d_%B')} ({dist.table_info.weekday}) |" \
# f"{dist.info['bound drift value']:.2f}"
# dist.info['linestyle'] = '-.'
# show_distributions([workday_ref_dist, weekend_ref_dist] + table_distribution_list)
| VadyusikhLTD/prjctr-ML-in-Prod | week6/src/univariate_update_frequency.py | univariate_update_frequency.py | py | 6,681 | python | en | code | 0 | github-code | 13 |
29493256875 | # -*- coding: utf-8 -*-
# @Time : 2023/4/5 下午6:40
# @Author : Lingo
# @File : trainer_conica.py
import torch.nn.functional
from torch import nn
from transformers.trainer import *
from transformers.trainer_utils import PredictionOutput
from torch.utils.data import Dataset, DataLoader
from typing import Optional, List, Dict, Union, Any, Tuple
from transformers.deepspeed import is_deepspeed_zero3_enabled
from transformers.trainer_utils import EvalLoopOutput
from torch.nn.utils.rnn import pad_sequence
from utils.ptbtokenizer import PTBTokenizer
from utils.cider import Cider
from utils.bleu import Bleu
from utils.rouge import Rouge
from utils.meteor import Meteor
class ConicaTrainer(Trainer):
def __init__(self,
model=None,
args=None,
train_dataset=None,
eval_dataset=None,
tokenizer=None,
model_init=None,
callbacks=None,
optimizers=(None, None),
preprocess_logits_for_metrics=None,
train_cached_cider=None,
scst=False,
scst_num_sample_sequences=None,
scst_baseline_type=None,
add_mean_cls=True,
init_tau=False):
super().__init__(model,
args,
None,
train_dataset,
eval_dataset, tokenizer,
model_init, None,
callbacks,
optimizers,
preprocess_logits_for_metrics)
self.train_cider = Cider(cider_cached=train_cached_cider)
self.data_collator = self.caption_collator
self.scst = scst
self.count_similarity = model.config.count_similarity
self.compute_metrics = self.compute_cider_rouge_and_bleu
self.add_mean_cls = add_mean_cls
self.init_tau = False
if self.scst:
self.scst_num_sample_sequences = scst_num_sample_sequences
self.scst_baseline_type = scst_baseline_type
self.init_tau = init_tau
self.ptb_tokenizer = PTBTokenizer()
def caption_collator(self, batch):
feats, caps, gts = zip(*batch)
sentences = []
for cap in caps:
sentences.extend(cap)
_gts = []
for gt in gts:
_gts.append(gt)
gts = _gts
outputs = self.tokenizer.batch_encode_plus(sentences, padding=True, return_tensors='pt', max_length=62,
truncation=True)
feats = pad_sequence([torch.from_numpy(feat) for feat in feats], batch_first=True)
del sentences
return {
'vision_feats': feats,
'attention_mask': (feats.mean(-1) != 0).to(torch.float32),
'labels': outputs['input_ids'],
'decoder_attention_mask': outputs['attention_mask'],
'gts': gts}
def compute_cider_rouge_and_bleu(self, inputs: EvalPrediction):
predictions, gts = inputs
predictions = self.tokenizer.batch_decode(predictions, skip_special_tokens=True,
clean_up_tokenization_spaces=True)
res = self.ptb_tokenizer.tokenize([[_] for _ in predictions])
results = {}
bleu, bleus = Bleu(4).compute_score(gts, res)
for i, bleu_i in enumerate(bleu):
results['bleu_' + str(i + 1)] = bleu_i
rouge, rouges = Rouge().compute_score(gts, res)
results['rouge'] = rouge
cider, ciders = Cider(cider_cached=None).compute_score(gts, res)
results["cider"] = ciders
meteor, meteors = Meteor().compute_score(gts, res)
results["meteor"] = meteor
return results
def compute_training_cider(self, inputs: EvalPrediction):
predictions, gts = inputs
sample_num = len(predictions)
batch_size = len(gts)
sample_per_image = sample_num // batch_size
predictions = self.tokenizer.batch_decode(predictions, skip_special_tokens=False,
clean_up_tokenization_spaces=False)
_gts = []
for _ in gts:
_ = self.tokenizer.batch_encode_plus(_, add_special_tokens=False,max_length=62,truncation=True)
_ = self.tokenizer.batch_decode(_.input_ids, skip_special_tokens=True,
clean_up_tokenization_spaces=False)
_gts.append(_)
res_ = []
gts_ = []
for i in range(sample_num):
gts_.append([_gts[i // sample_per_image][j] + " " + self.tokenizer.eos_token for j in
range(len(gts[i // sample_per_image]))])
res_.append([predictions[i].replace(self.tokenizer.bos_token, "").replace(self.tokenizer.pad_token, "")])
cider, ciders = self.train_cider.compute_score(gts_, res_)
return {'cider': ciders}
def compute_loss(self, model, inputs, return_outputs=False):
"""
How the loss is computed by Trainer. By default, all models return the loss in the first element.
Subclass and override for custom behavior.
"""
labels = None
if self.scst:
labels = inputs.pop("labels")
gts = inputs.pop("gts")
elif self.label_smoother is not None and "labels" in inputs:
ids = inputs.pop("labels")
inputs['decoder_input_ids'] = ids
labels = ids[:, 1:]
labels.masked_fill(labels == self.model.config.pad_token_id, -100)
else:
gts = inputs.pop("gts")
if not self.scst:
outputs = model(**inputs)
if self.args.past_index >= 0:
self._past = outputs[self.args.past_index]
if labels is not None and self.label_smoother is not None:
if isinstance(outputs, dict):
outputs["logits"] = outputs["logits"][:, :-1, :]
else:
outputs[0] = outputs[0][:, :-1, :]
loss = self.label_smoother(outputs, labels)
else:
loss = outputs["loss"] if isinstance(outputs, dict) else outputs[0]
else:
batch_size = len(inputs["vision_feats"])
if self.model.config.do_sample:
gen_kwargs = {
"max_length": None if self.model.config.max_length is None else self.model.config.max_length,
"use_cache": self.model.config.use_cache,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
"do_sample": True,
"num_return_sequences": self.scst_num_sample_sequences,
'is_generate': True
}
else:
gen_kwargs = {
"max_length": None if self.model.config.max_length is None else self.model.config.max_length,
"num_beams": self.scst_num_sample_sequences,
"use_cache": self.model.config.use_cache,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
"do_sample": False,
"num_return_sequences": self.scst_num_sample_sequences,
'is_generate': True
}
outputs = model(**inputs, **gen_kwargs)
logprobs = outputs["logprobs"]
num_sample = len(outputs["sequences"]) // batch_size
# for i in range (len(outputs["sequences"])):
# print(outputs["sequences"][i])
# print(outputs["logprobs"][i])
similarity = outputs["similarity"].view(-1, num_sample)
with torch.no_grad():
sample_rewards = torch.as_tensor(
self.compute_training_cider(EvalPrediction(predictions=outputs["sequences"], label_ids=gts))[
'cider'].reshape(batch_size, num_sample), device=logprobs.device, dtype=torch.float32)
if self.scst_baseline_type == 'greedy':
greedy_kwargs = {
"max_length": self.model.config.max_length,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
'use_cache': self.model.config.use_cache,
'num_return_sequences': 1,
'num_beams': 1,
'do_sample': False,
'is_generate': True,
}
model.eval()
greedy_outputs = model(**inputs, **greedy_kwargs)
model.train()
base_rewards = \
torch.as_tensor(self.compute_training_cider(
EvalPrediction(predictions=greedy_outputs['sequences'], label_ids=gts))[
'cider'].reshape(batch_size, 1), device=logprobs.device,
dtype=torch.float32)
elif self.scst_baseline_type == "avg_rest":
base_rewards = (sample_rewards.sum(1, keepdim=True) - sample_rewards) / (num_sample - 1)
else:
base_rewards = sample_rewards.mean(1, keepdim=True)
reward = (sample_rewards - base_rewards).view(-1, 1)
loss = -(logprobs * reward).sum(-1) / model.config.max_length
loss = loss.mean()
# ListMLE
# sorted_reward, sorted_idx = sample_rewards.sort(-1, descending=True)
# sorted_similarity = similarity.gather(-1, sorted_idx)
# similarity_max,_ = similarity.max(-1, keepdim=True)
# similarity_logits = sorted_similarity - similarity_max
# logcumsumexp = torch.logcumsumexp(similarity_logits.flip(-1),1).flip(-1)
# loss += ((logcumsumexp) - similarity_logits).sum(-1).mean()
# ListNet
similarity_label = torch.softmax(reward.view(-1, num_sample), dim=-1)
loss += torch.nn.functional.cross_entropy(similarity, similarity_label)
# similarity = torch.div(similarity, model.tau)
# reward = reward.view(-1, num_sample)
# loss += torch.nn.functional.softplus(torch.logsumexp(-reward * similarity, dim=1)).mean()
# similarity_label = torch.ones_like(reward, device=reward.device).masked_fill_(reward <= 0, 0)
# similarity_label /= similarity_label.sum(-1, keepdims=True)
# loss += torch.nn.functional.cross_entropy(similarity, similarity_label)
# sorted_similarity = similarity.gather(-1, sorted_idx)
# for i in range(0, num_sample - 1):
# similarity_logits = sorted_similarity[:, i:]
# similarity_label = sorted_reward[:, i:]
# similarity_label = similarity_label-similarity_label[:,-1:]
# print(similarity_label)
# loss += torch.nn.functional.cross_entropy(similarity_logits, similarity_label)
# ranking_loss = torch.max(torch.zeros_like(pos_similarity),
# pos_reward - neg_reward + neg_similarity - pos_similarity)
# loss += ranking_loss.sum() / batch_size
# same_mask = torch.abs(sorted_reward[:, :-i] - sorted_reward[:, i:] > 0.01).float()
# ones = torch.ones_like(pos_similarity, device=pos_similarity.device)
# margin_loss = torch.nn.functional.margin_ranking_loss(pos_similarity, neg_similarity, ones,
# margin=0.01 * i, reduction="none")
# if same_mask.sum() > 0:
# loss += (margin_loss * same_mask).sum() / batch_size
#
return (loss, outputs) if return_outputs else loss
def train(
self,
resume_from_checkpoint: Optional[Union[str, bool]] = None,
trial: Union["optuna.Trial", Dict[str, Any]] = None,
ignore_keys_for_eval: Optional[List[str]] = None,
**kwargs,
):
"""
Main training entry point.
Args:
resume_from_checkpoint (`str` or `bool`, *optional*):
If a `str`, local path to a saved checkpoint as saved by a previous instance of [`Trainer`]. If a
`bool` and equals `True`, load the last checkpoint in *args.output_dir* as saved by a previous instance
of [`Trainer`]. If present, training will resume from the model/optimizer/scheduler states loaded here.
trial (`optuna.Trial` or `Dict[str, Any]`, *optional*):
The trial run or the hyperparameter dictionary for hyperparameter search.
ignore_keys_for_eval (`List[str]`, *optional*)
A list of keys in the output of your model (if it is a dictionary) that should be ignored when
gathering predictions for evaluation during the training.
kwargs:
Additional keyword arguments used to hide deprecated arguments
"""
if resume_from_checkpoint is False:
resume_from_checkpoint = None
# memory metrics - must set up as early as possible
self._memory_tracker.start()
args = self.args
self.is_in_train = True
# do_train is not a reliable argument, as it might not be set and .train() still called, so
# the following is a workaround:
if (args.fp16_full_eval or args.bf16_full_eval) and not args.do_train:
self._move_model_to_device(self.model, args.device)
if "model_path" in kwargs:
resume_from_checkpoint = kwargs.pop("model_path")
warnings.warn(
"`model_path` is deprecated and will be removed in a future version. Use `resume_from_checkpoint` "
"instead.",
FutureWarning,
)
if len(kwargs) > 0:
raise TypeError(f"train() received got unexpected keyword arguments: {', '.join(list(kwargs.keys()))}.")
# This might change the seed so needs to run first.
self._hp_search_setup(trial)
self._train_batch_size = self.args.train_batch_size
# Model re-init
model_reloaded = False
if self.model_init is not None:
# Seed must be set before instantiating the model when using model_init.
enable_full_determinism(self.args.seed) if self.args.full_determinism else set_seed(self.args.seed)
self.model = self.call_model_init(trial)
model_reloaded = True
# Reinitializes optimizer and scheduler
self.optimizer, self.lr_scheduler = None, None
# Load potential model checkpoint
if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
resume_from_checkpoint = get_last_checkpoint(args.output_dir)
if resume_from_checkpoint is None:
raise ValueError(f"No valid checkpoint found in output directory ({args.output_dir})")
if resume_from_checkpoint is not None and not is_sagemaker_mp_enabled() and args.deepspeed is None:
self._load_from_checkpoint(resume_from_checkpoint)
# If model was re-initialized, put it on the right device and update self.model_wrapped
if model_reloaded:
if self.place_model_on_device:
self._move_model_to_device(self.model, args.device)
self.model_wrapped = self.model
if self.init_tau:
self.model.init_tau()
inner_training_loop = find_executable_batch_size(
self._inner_training_loop, self._train_batch_size, args.auto_find_batch_size
)
return inner_training_loop(
args=args,
resume_from_checkpoint=resume_from_checkpoint,
trial=trial,
ignore_keys_for_eval=ignore_keys_for_eval,
)
def training_step(self, model, inputs):
model.config.count_similarity = False
return super().training_step(model, inputs)
def evaluate(
self,
eval_dataset: Optional[Dataset] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
max_length: Optional[int] = None,
num_beams: Optional[int] = 5,
) -> Dict[str, float]:
self._max_length = max_length if max_length is not None else self.model.config.max_length
self._num_beams = num_beams if num_beams is not None else self.model.config.num_beams
return super().evaluate(eval_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def evaluation_loop(
self,
dataloader: DataLoader,
description: str,
prediction_loss_only: Optional[bool] = None,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "eval",
) -> EvalLoopOutput:
"""
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
Works both with or without labels.
"""
args = self.args
prediction_loss_only = prediction_loss_only if prediction_loss_only is not None else args.prediction_loss_only
# if eval is called w/o train init deepspeed here
if args.deepspeed and not self.deepspeed:
# XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval
# from the checkpoint eventually
deepspeed_engine, _, _ = deepspeed_init(
self, num_training_steps=0, resume_from_checkpoint=None, inference=True
)
self.model = deepspeed_engine.module
self.model_wrapped = deepspeed_engine
self.deepspeed = deepspeed_engine
model = self._wrap_model(self.model, training=False)
# if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called
# while ``train`` is running, cast it to the right dtype first and then put on device
if not self.is_in_train:
if args.fp16_full_eval:
model = model.to(dtype=torch.float16, device=args.device)
elif args.bf16_full_eval:
model = model.to(dtype=torch.bfloat16, device=args.device)
batch_size = dataloader.batch_size
logger.info(f"***** Running {description} *****")
if has_length(dataloader.dataset):
logger.info(f" Num examples = {self.num_examples(dataloader)}")
else:
logger.info(" Num examples: Unknown")
logger.info(f" Batch size = {batch_size}")
model.config.count_similarity = self.count_similarity
model.eval()
self.callback_handler.eval_dataloader = dataloader
# Do this before wrapping.
eval_dataset = dataloader.dataset
if args.past_index >= 0:
self._past = None
# Initialize containers
# losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)
losses_host = None
preds_host = None
labels_host = None
metrics_host = None
# losses/preds/labels on CPU (final containers)
all_losses = None
all_preds = None
all_labels = None
all_metrics = None
# Will be useful when we have an iterable dataset so don't know its length.
observed_num_examples = 0
# Main evaluation loop
for step, inputs in enumerate(dataloader):
# Update the observed num examples
observed_batch_size = find_batch_size(inputs)
if observed_batch_size is not None:
observed_num_examples += observed_batch_size
# For batch samplers, batch_size is not known by the dataloader in advance.
if batch_size is None:
batch_size = observed_batch_size
# Prediction step
loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
# Update containers on host
if loss is not None:
losses = self._nested_gather(loss.repeat(batch_size))
losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0)
if labels is not None:
if labels_host is None:
labels_host = labels
else:
labels_host.extend(labels)
if logits is not None:
logits = self._pad_across_processes(logits)
logits = self._nested_gather(logits)
if self.preprocess_logits_for_metrics is not None:
logits = self.preprocess_logits_for_metrics(logits, labels)
preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=0)
self.control = self.callback_handler.on_prediction_step(args, self.state, self.control)
# Gather all tensors and put them back on the CPU if we have done enough accumulation steps.
if args.eval_accumulation_steps is not None and (step + 1) % args.eval_accumulation_steps == 0:
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=0)
if labels_host is not None:
labels = labels_host
if all_labels is None:
all_labels = labels
else:
all_labels.extend(labels)
# Set back to None to begin a new accumulation
losses_host, preds_host, labels_host, metrics_host = None, None, None, None
if args.past_index and hasattr(self, "_past"):
# Clean the state at the end of the evaluation loop
delattr(self, "_past")
# Gather all remaining tensors and put them back on the CPU
if losses_host is not None:
losses = nested_numpify(losses_host)
all_losses = losses if all_losses is None else np.concatenate((all_losses, losses), axis=0)
if preds_host is not None:
logits = nested_numpify(preds_host)
all_preds = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=0)
if labels_host is not None:
labels = labels_host
if all_labels is None:
all_labels = labels
else:
all_labels.extend(labels)
if has_length(eval_dataset):
num_samples = len(eval_dataset)
# The instance check is weird and does not actually check for the type, but whether the dataset has the right
# methods. Therefore we need to make sure it also has the attribute.
elif isinstance(eval_dataset, IterableDatasetShard) and hasattr(eval_dataset, "num_examples"):
num_samples = eval_dataset.num_examples
else:
num_samples = observed_num_examples
# Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of
# samplers has been rounded to a multiple of batch_size, so we truncate.
if all_losses is not None:
all_losses = all_losses[:num_samples]
if all_preds is not None:
all_preds = nested_truncate(all_preds, num_samples)
if all_labels is not None:
all_labels = all_labels[:num_samples]
all_metrics = self.compute_metrics(EvalPrediction(predictions=all_preds, label_ids=all_labels))
# To be JSON-serializable, we need to remove numpy types or zero-d tensors
metrics = {k: all_metrics[k].mean().item() for k in all_metrics.keys()}
metrics = denumpify_detensorize(metrics)
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys()):
if not key.startswith(f"{metric_key_prefix}_"):
metrics[f"{metric_key_prefix}_{key}"] = metrics.pop(key)
return EvalLoopOutput(predictions=all_preds, label_ids=all_labels, metrics=metrics, num_samples=num_samples)
def predict(
self,
test_dataset: Dataset,
ignore_keys: Optional[List[str]] = None,
metric_key_prefix: str = "test",
max_length: Optional[int] = None,
num_beams: Optional[int] = 5,
) -> PredictionOutput:
self._max_length = max_length if max_length is not None else self.model.config.max_length
self._num_beams = num_beams if num_beams is not None else self.model.config.num_beams
return super().predict(test_dataset, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
def prediction_step(
self,
model: nn.Module,
inputs: Dict[str, Union[torch.Tensor, Any]],
prediction_loss_only: bool,
ignore_keys: Optional[List[str]] = None,
) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
"""
Perform an evaluation step on `model` using `inputs`.
Subclass and override to inject custom behavior.
Args:
model (`nn.Module`):
The model to evaluate.
inputs (`Dict[str, Union[torch.Tensor, Any]]`):
The inputs and targets of the model.
The dictionary will be unpacked before being fed to the model. Most models expect the targets under the
argument `labels`. Check your model's documentation for all accepted arguments.
prediction_loss_only (`bool`):
Whether or not to return the loss only.
Return:
Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and
labels (each being optional).
"""
# if prediction_loss_only:
# return super().prediction_step(
# model, inputs, prediction_loss_only=prediction_loss_only, ignore_keys=ignore_keys
# )
inputs = self._prepare_inputs(inputs)
# XXX: adapt synced_gpus for fairscale as well
gen_kwargs = {
"max_length": self._max_length if self._max_length is not None else self.model.config.max_length,
"num_beams": self._num_beams,
"use_cache": self.model.config.use_cache,
"synced_gpus": True if is_deepspeed_zero3_enabled() else False,
"do_sample": False,
"num_return_sequences": 1,
'is_generate': True,
"output_hidden_states": False,
"output_attentions": False
}
if "attention_mask" in inputs:
gen_kwargs["attention_mask"] = inputs.get("attention_mask", None)
outputs = self.model(
vision_feats=inputs["vision_feats"],
**gen_kwargs,
)
generated_tokens = outputs['sequences']
# _outputs = self.model(
# vision_feats=inputs["vision_feats"],
# attention_mask=gen_kwargs["attention_mask"],
# decoder_input_ids=generated_tokens,
# decoder_attention_mask=generated_tokens != self.model.config.pad_token_id,
# )
# v_embeds, t_embeds = _outputs.pooler_output
# similarity = torch.einsum("ij,ij->i", v_embeds, t_embeds)
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
generated_tokens = self._pad_tensors_to_max_len(generated_tokens, gen_kwargs["max_length"])
loss = None
if self.args.prediction_loss_only:
return (loss, None, None)
labels = inputs['gts']
return (loss, generated_tokens, labels)
def _pad_tensors_to_max_len(self, tensor, max_length):
if self.tokenizer is not None and hasattr(self.tokenizer, "pad_token_id"):
# If PAD token is not defined at least EOS token has to be defined
pad_token_id = (
self.tokenizer.pad_token_id if self.tokenizer.pad_token_id is not None else self.tokenizer.eos_token_id
)
else:
if self.model.config.pad_token_id is not None:
pad_token_id = self.model.config.pad_token_id
else:
raise ValueError("Pad_token_id must be set in the configuration of the model, in order to pad tensors")
padded_tensor = pad_token_id * torch.ones(
(tensor.shape[0], max_length), dtype=tensor.dtype, device=tensor.device
)
padded_tensor[:, : tensor.shape[-1]] = tensor
return padded_tensor
| DenglinGo/CONICA | utils/trainer_conica.py | trainer_conica.py | py | 29,247 | python | en | code | 0 | github-code | 13 |
72308557139 |
import cv2 as cv
import numpy as np
#改变一些图像
def access_pixels(image):
print(image.shape)
#获取图片的高宽和通道数
height = image.shape[0]
width = image.shape[1]
channels = image.shape[2]
print("width:%s,height:%s,channels:%s"%(width,height,channels))
for row in range(height):
for col in range(width):
for c in range(channels):
pv = image[row,col,c]
image[row,col,c] = 255 - pv
cv.imshow("pixels",image)
cv.imwrite('D:\\image\\2.png',image)
def inverse(image):
#像素取反
dst = cv.bitwise_not(image)
cv.imshow("new",dst)
#创建新的图像
def create_image():
#创建400*400的三通道图像
img = np.zeros([400,400,3],np.uint8)
cv.imshow("input image",img)
img[:,:,2]=np.ones([400,400])*200
cv.imshow("new",img)
cv.imwrite('D:\\image\\3.png', img)
ml = np.ones([3,3],np.uint8)
#填充数组
ml.fill(122.33)
print(ml)
m2 =ml.reshape([1,9])
print(m2)
print("******************************************")
#读取一张图片
src = cv.imread('D:\\image\\timg.jpg')#blue,green,red
print(src)
cv.namedWindow("input image",cv.WINDOW_AUTOSIZE)
#cv.imshow("input image",static)
#获取当前cpu转的时间
t1 = cv.getTickCount()
create_image()
#access_pixels(static)
t2 = cv.getTickCount()
#cv.getTickFrequency()每秒cpu 转的时间
time = (t2-t1)/cv.getTickFrequency()
print("time:%s"%(time*1000))
#不断刷新图片单位为毫秒,视频时有用
cv.waitKey(0)
#释放所有的内存
cv.destroyAllWindows()
| huangxinyu1/opencv- | opencv学习/02np数组操作.py | 02np数组操作.py | py | 1,860 | python | en | code | 0 | github-code | 13 |
8080719687 | from __future__ import print_function, division
import datetime
import os
import sys
import torch.nn as nn
import cv2
import numpy as np
from scipy.misc import imread
from utils import CFScore
import torch
from utils import helpers
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.modules.loss import _Loss
import pdb
def prepare_data(dataset_dir):
train_input_names = []
train_output_names = []
val_input_names = []
val_output_names = []
test_input_names = []
test_output_names = []
for file in os.listdir(dataset_dir + "/train"):
train_input_names.append(dataset_dir + "/train/" + file)
for file in os.listdir(dataset_dir + "/train_labels"):
train_output_names.append(dataset_dir + "/train_labels/" + file)
for file in os.listdir(dataset_dir + "/val"):
val_input_names.append(dataset_dir + "/val/" + file)
for file in os.listdir(dataset_dir + "/val_labels"):
# cwd = os.getcwd()
val_output_names.append(dataset_dir + "/val_labels/" + file)
for file in os.listdir(dataset_dir + "/test"):
test_input_names.append(dataset_dir + "/test/" + file)
for file in os.listdir(dataset_dir + "/test_labels"):
# cwd = os.getcwd()
test_output_names.append(dataset_dir + "/test_labels/" + file)
train_input_names.sort(), train_output_names.sort(), val_input_names.sort(), val_output_names.sort(), test_input_names.sort(), test_output_names.sort()
return train_input_names, train_output_names, val_input_names, val_output_names, test_input_names, test_output_names
# Input images & GT boundaries -- gray-scale images
def load_image1(path):
image = cv2.cvtColor(cv2.imread(path, 0), cv2.COLOR_GRAY2RGB)
return image
# GT -- color images
def load_image2(path):
image = cv2.cvtColor(cv2.imread(path, -1), cv2.COLOR_BGR2RGB)
return image
# GT -- color images
def load_image3(path):
image = cv2.imread(path, 0)
return image
def cp_loss(weights, labels, loss, batchsize):
# pdb.set_trace()
loss_batch = []
batch_size = min(batchsize, labels.shape[0])
for i in range(batch_size):
label = labels[i, :, :, :]
loss_new = loss[i, :, :]
weights_batch = torch.tensor(label).cuda()
loss_new = loss_new.cuda()
label = label.cuda()
a = weights.shape
for j in range(a[0]):
weights_batch[j, :, :] = weights[j]
weights_new = weights_batch * label
weights_new = (weights_new.sum(dim=0))
weighted_loss = torch.squeeze(loss_new) * weights_new
weighted_loss = torch.sum(weighted_loss)
loss_batch.append(weighted_loss)
aloss = 0
for item in loss_batch:
aloss += item
return aloss / batchsize
# Takes an absolute file path and returns the name of the file without th extension
def filepath_to_name(full_name):
file_name = os.path.basename(full_name)
file_name = os.path.splitext(file_name)[0]
return file_name
# Print with time
def LOG(X, f=None):
time_stamp = datetime.datetime.now().strftime("[%Y-%m-%d %H:%M:%S]")
if not f:
print(time_stamp + " " + X)
else:
f.write(time_stamp + " " + X)
# Resize the image
def resize_data(image, label, resize_height, resize_width):
image = cv2.resize(image, (resize_height, resize_width))
label = cv2.resize(label, (resize_height, resize_width))
return image, label
def compute_cpa(pred, label, num_classes, resize_height, resize_width):
pred = np.reshape(pred, (resize_height, resize_width))
label = np.reshape(label, (resize_height, resize_width))
total = [0.0] * num_classes
for val in range(num_classes):
total[val] = (label == val).sum()
count = [0.0] * num_classes
for i in range(num_classes):
idx, idy = np.where(pred == i)
for j in range(len(idx)):
xx = idx[j]
yy = idy[j]
if pred[xx, yy] == label[xx, yy]:
count[i] = count[i] + 1.0
accuracies = []
for i in range(len(total)):
# Remove noises in GT
if total[i] <= 20.1:
accuracies.append(8.8)
else:
accuracies.append(count[i] / total[i])
return accuracies
def compute_iou(pred, label, num_classes, resize_height, resize_width):
pred = np.reshape(pred, (resize_height, resize_width))
label = np.reshape(label, (resize_height, resize_width))
total1 = [0.0] * num_classes
total2 = [0.0] * num_classes
for val in range(num_classes):
total1[val] = (label == val).sum()
total2[val] = (pred == val).sum()
count = [0.0] * num_classes
for i in range(num_classes):
idx, idy = np.where(pred == i)
for j in range(len(idx)):
xx = idx[j]
yy = idy[j]
if pred[xx, yy] == label[xx, yy]:
count[i] = count[i] + 1.0
IoU = []
total = np.subtract(np.add(total1, total2), count)
for i in range(len(total)):
if total1[i] <= 20.1:
IoU.append(8.8)
else:
IoU.append(count[i] / total[i])
return IoU
def evaluate_segmentation(pred, label, num_classes, resize_height, resize_width):
flat_pred = pred.flatten()
flat_label = label.flatten()
cpa = compute_cpa(flat_pred, flat_label, num_classes, resize_height, resize_width)
iou = compute_iou(flat_pred, flat_label, num_classes, resize_height, resize_width)
tpos, fpos, fneg = CFScore.compute_F_Score_e(flat_pred, flat_label, num_classes, resize_height, resize_width)
return cpa, iou, tpos, fpos, fneg
def compute_class_weights(labels_dir, label_values):
'''
Arguments:
labels_dir(list): Directory where the image segmentation labels are
num_classes(int): the number of classes of pixels in all images
Returns:
class_weights(list): a list of class weights where each index represents each class label and the element is the class weight for that label.
'''
image_files = [os.path.join(labels_dir, file) for file in os.listdir(labels_dir) if file.endswith('.png')]
num_classes = len(label_values)
class_pixels = np.zeros(num_classes)
for n in range(len(image_files)):
image = imread(image_files[n])
for index, colour in enumerate(label_values):
class_map = np.all(np.equal(image, colour), axis=-1)
class_map = class_map.astype(np.float32)
class_pixels[index] += np.sum(class_map)
print("\rProcessing image: " + str(n) + " / " + str(len(image_files)), end="")
sys.stdout.flush()
total_pixels = float(np.sum(class_pixels))
index_to_delete = np.argwhere(class_pixels == 0.0)
class_pixels = np.delete(class_pixels, index_to_delete)
class_weights = total_pixels / class_pixels
class_weights = class_weights / np.sum(class_weights)
return class_weights
def mkdir(path):
isExists = os.path.exists(path) # 判断路径是否存在,若存在则返回True,若不存在则返回False
if not isExists: # 如果不存在则创建目录
os.makedirs(path)
return True
else:
return False
def save_feature_to_img(feature_images, name):
features = feature_images # 返回一个指定层输出的特征图,属于四维张量[batch,channel,width,height]
for i in range(features.shape[1]):
feature = features[:, i, :, :] # 在channel维度上,每个channel代表了一个卷积核的输出特征图,所以对每个channel的图像分别进行处理和保存
feature = feature.view(feature.shape[1], feature.shape[2]) # batch为1,所以可以直接view成二维张量
feature = feature.cuda().data.cpu().numpy() # 转为numpy
# 根据图像的像素值中最大最小值,将特征图的像素值归一化到了[0,1];
feature = (feature - np.amin(feature)) / (np.amax(feature) - np.amin(feature) + 1e-5) # 注意要防止分母为0!
feature = np.round(feature * 255) # [0, 1]——[0, 255],为cv2.imwrite()函数而进行
mkdir('/home/wujunxian/data/WTCNN-Net/wavwlet_cnn_master/featuremap/' + name) # 创建保存文件夹,以选定可视化层的序号命名
cv2.imwrite('/home/wujunxian/data/WTCNN-Net/wavwlet_cnn_master/featuremap/' + name + '/' + str(i) + '.jpg',
feature) # 保存当前层输出的每个channel上的特征图为一张图像
class DiceLoss(nn.Module):
def __init__(self):
super(DiceLoss, self).__init__()
def forward(self, input, target):
# pdb.set_trace()
N = target.size(0)
smooth = 1
input_flat = input.view(N, -1)
target_flat = target.view(N, -1)
intersection = input_flat * target_flat
loss = 2 * (intersection.sum(1) + smooth) / (input_flat.sum(1) + target_flat.sum(1) + smooth)
loss = 1 - loss.sum() / N
return loss
class MulticlassDiceLoss(nn.Module):
"""
requires one hot encoded target. Applies DiceLoss on each class iteratively.
requires input.shape[0:1] and target.shape[0:1] to be (N, C) where N is
batch size and C is number of classes
"""
def __init__(self):
super(MulticlassDiceLoss, self).__init__()
def forward(self, input, target, weights=None):
# pdb.set_trace()
C = target.shape[1]
# if weights is None:
# weights = torch.ones(C) #uniform weights for all classes
dice = DiceLoss()
totalLoss = 0
for i in range(C):
diceLoss = dice(input[:, i], target[:, i])
if weights is not None:
diceLoss *= weights[i]
totalLoss += diceLoss
return totalLoss
| wjx1198/MTSSN-WT | utils/utils.py | utils.py | py | 9,772 | python | en | code | 0 | github-code | 13 |
29877308275 | radius = int(2)
pie = float(3.1459)
diameter = 2 * radius
circumference = 2 * pie * radius
area = pie * (radius**2)
print('Diameter is',diameter)
print ("Circumference is", circumference)
print ('Area is',area)
| susannaholubiyi/Python | diameter.py | diameter.py | py | 220 | python | en | code | 0 | github-code | 13 |
12045836170 |
import matplotlib.pyplot as plt
import numpy as np
# functions to show an image
def imshow(img):
img = img / 2 + 0.5 # unnormalize
plt.imshow(np.transpose(img, (1, 2, 0)))
def plot_dataset_images(train_loader, no_images):
"""
This will plot 'n' (no_images) images for given dataset
:param train_loader: dataset
:param no_images: number of images to plot
:return:
"""
import math
# obtain one batch of training images
dataiter = iter(train_loader)
images, labels = next(dataiter)
images = images.numpy() # convert images to numpy for display
classes = ['airplane', 'automobile', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck']
# plot the images in the batch, along with the corresponding labels
fig = plt.figure(figsize=(25, 4))
# display 20 images
for idx in np.arange(no_images):
ax = fig.add_subplot(2, math.ceil(no_images / 2), idx + 1, xticks=[], yticks=[])
imshow(images[idx])
ax.set_title(classes[labels[idx]])
def plot_train_test_accuracy_loss(train_losses, train_acc, test_losses, test_acc):
"""
This function is used to plot the training and testing accuracy as well as the training and testing loss.
It creates a 2x2 grid of subplots in a figure to visualize the four plots.
:return:
"""
fig, axs = plt.subplots(2, 2, figsize=(15, 10))
axs[0, 0].plot(train_losses)
axs[0, 0].set_title("Training Loss")
axs[1, 0].plot(train_acc)
axs[1, 0].set_title("Training Accuracy")
axs[0, 1].plot(test_losses)
axs[0, 1].set_title("Test Loss")
axs[1, 1].plot(test_acc)
axs[1, 1].set_title("Test Accuracy")
| Paurnima-Chavan/cifar-s10 | src/utils.py | utils.py | py | 1,765 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.