index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
17,100 | 15ce460de811a43340446c3540d8982b46cc4b7c | # ์ด๋ฆ : ์คํ์ฑ
# ๋ ์ง : 11 02 2017
# ์ฃผ์ : 2016๋
์ ์์ผ ๊ณ์ฐ
def getDayName(month, day) :
dotw = ("FRI", "SAT", "SUN", "MON", "TUE", "WED", "THU")
beforeDays = 0
for i in range(1, month) :
if i == 2 :
beforeDays += 29
elif i <= 7 :
if i % 2 == 1 :
beforeDays += 31
else :
beforeDays += 30
else :
if i % 2 == 0 :
beforeDays += 31
else :
beforeDays ++ 30
beforeDays += (day - 1)
return dotw[beforeDays % 7]
while (True) :
insertMonth = int(input("๋ช์? : "))
insertDay = int(input("๋ฉฐ์น ? :"))
print(insertMonth,"์ ",insertDay,"์ผ์")
print(getDayName(insertMonth, insertDay), " ์
๋๋ค")
|
17,101 | 8146d2a9ed4b586efaa7e1d096af7fd61dbb018e | # day 4*
# text animation- show the words overtime(like pokemon and other JRPGs)
# i missed 3 days so it should be day 7 now but yea
import time
import pygame
pygame.init()
pygame.display.set_caption('dialogue')
# screen settings
size = width, height = 800, 800
bg_color = 0, 0, 0
white = (220, 220, 220)
red = (220, 0, 0)
screen = pygame.display.set_mode(size)
margin = 30
font = pygame.font.SysFont('tahoma', 32)
class Dialogue:
def __init__(self, txt):
self.interval = 2 # draw the next character every 2 frames
self.n = 1 # frame number
self.text = txt
self.len_txt = len(txt)
self.char_n = 0 # which character to draw (index if str)
self.line = 0 # which line in the dialogue
self.outputs = [""] # text that is outputted
# draw everything
def draw(self):
x, y = margin, margin
w, h = width - 2*margin, 20 + 50*len(self.outputs)
col = (255, 255, 255)
pygame.draw.rect(screen, col, (x, y, w, h))
# draw all the lines in the outputs
for i, line in enumerate(self.outputs):
out_text = font.render(line, True, bg_color, col)
out_text_rect = (x + 30, (y + 20)*(i+1) )
screen.blit(out_text, out_text_rect)
self.update()
def update(self):
# deal with changing the letter
self.char_n = self.n//self.interval
self.n += 1 if self.char_n <= self.len_txt else 0
self.outputs[self.line] = self.text[0:self.char_n]
# deal with new lines (dirtiest code i've ever seen)
if self.outputs[0]:
if self.outputs[self.line][-1] == "`":
self.outputs[self.line] = self.outputs[self.line][:-1] # remove the ` symbol
self.n = self.interval # this prevents the code from going in this if statement more than once
# (because it checks self.outputs every time. this line only makes it check once)
self.text = self.text[self.char_n:] # remove the previous text which is already in self.outputs
self.outputs.append("") # add a new line to the outputs
self.line += 1
# use ` to indicate new line
dialogue = Dialogue("There's something so refreshing about `Michelangeli's Chopin. It doesn't have `the same warmth I've become accustomed `to hearing, but it is so clear, and the `ideas are so well articulated and the `phrases so well formed. There is `something very pure and not self-indulgent `in it.")
def main():
clock = pygame.time.Clock()
run = True
while run:
clock.tick(60)
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
screen.fill(bg_color)
dialogue.draw()
pygame.display.flip()
pygame.quit()
quit()
main()
|
17,102 | cea0a7aa900d769f1f018275aa44eaf05f9ab40e | ## https://leetcode.com/problems/search-insert-position/
'''
ๅฐtargetๆธๅญๆๅ
ฅๅฐๅทฒๆๅบๅฅฝ็ๅบๅไธญ, ่ฟๅๅฏๆๅ
ฅ็index
่ฅๅบๅไธญๅซๆtarget, ๅ่ฟๅtarget็index
ex:
nums = [1,3,5,6], target = 5, return 2 (ๆพๅฐ)
nums = [1,3,5,6], target = 2, return 1 (ๆฒๆพๅฐ, ๆๅ
ฅๅจ1็ไฝ็ฝฎ)
nums = [1,3,5,6], target = 7, return 4 (ๆฒๆพๅฐ, ๅคงๆผๆๅพไธๅๅผ, ๆๅ
ฅๆๅพไธๅไฝ็ฝฎ)
nums = [1,3,5,6], target = 0, return 0 (ๆฒๆพๅฐ, ๅฐๆผ็ฌฌไธๅๅผ, ๆๅ
ฅ็ฌฌไธๅไฝ็ฝฎ)
'''
## Method 1: Using loop, time complexity O(N)
if target < nums[0]: ## ๅฐๆผ็ฌฌไธๅๅผ, return ็ฌฌไธๅไฝ็ฝฎ
return 0
elif target > nums[-1]: ## ๅคงๆผๆๅพไธๅๅผ, return ๆๅพไธๅไฝ็ฝฎ
return len(nums)
else:
for i in range(len(nums)):
if nums[i] == target or (i > 0 and target > nums[i-1] and target < nums[i]): ## ๅฆๆๆพๅฐ, ๆๆฏๅจ (nums[i-1], nums[i]) ไน้, return ็ฌฌiๅไฝ็ฝฎ
return i
## Method 2: Using binary search, time complexity O(log(N))
## ไฝฟ็จ Binary Search, ่ฅๆพๅฐๅณๅๅณ
l = 0
r = len(nums) - 1
while l <= r:
m = (l + r) // 2
if target < nums[m]:
r = m - 1
elif target > nums[m]:
l = m + 1
else:
return m
return l
'''
ๆฒๆพๅฐๅ3็จฎๆ
ๆณ:
(1) target ๆฏๆๅคง็, l=r=m=len(nums) ๅพๆ่งธ็ผ l=m+1, ๆไปฅๅๅณlๅฐฑๆฏๆๅพไธๅไฝ็ฝฎ
(2) target ๆฏๆๅฐ็, l=r=m=0 ๅพๆ่งธ็ผ r=m-1, ไฝฟๅพ l > r, ๆไปฅๅๅณlๅฐฑๆฏ็ฌฌไธๅไฝ็ฝฎ
(3) target ๅจๅบๅไธญ้, ๅๅณlๅณๅฏ
'''
|
17,103 | 2f53c3db8ff8ba2beceba23fb7eb616bec38cc7a | '''Defina os atributos e mรฉtodos das seguintes classes:
Veรญculos:'''
class carro:
def __init__(self,Marca,Modelo,Ano,Estado,Km_rodados):
self.marca = Marca
self.modelo = Modelo
self.ano = Ano
self.estado = Estado
self.km.rodados = Km_rodados
def base(self):
a = carro
a.marca = input("Digite a marca do carro:")
a.modelo = input("Digite o modo do carro:")
a.ano = input("Digite o ano do carro:")
print('\033[7;97m'+"Marca:", a.marca)
print("Modelo:", a.modelo)
print("Ano:", a.ano + '\033[0;0m')
def extra(self):
a = carro
a.estado = input("Digite o estado atual do carro:")
a.km_rodados = input("Digite quantos km possui o carro atualmente:")
print('\033[7;97m'+"O modelo", a.modelo,", da marca",a.Marca, "estรก com estado", c.estado, "e com",a.km_rodados,"km rodados."+'\033[0;0m')
carro.base(1)
carro.extra(1) |
17,104 | 86f41b5c6dab7d5edb1e30bd2cdd3bb0ee547dce | """
============
Mean filters
============
This example compares the following mean filters of the rank filter package:
* **local mean**: all pixels belonging to the structuring element to compute
average gray level
* **percentile mean**: only use values between percentiles p0 and p1
(here 10% and 90%)
* **bilateral mean**: only use pixels of the structuring element having a gray
level situated inside g-s0 and g+s1 (here g-500 and g+500)
Percentile and usual mean give here similar results, these filters smooth the
complete image (background and details). Bilateral mean exhibits a high
filtering rate for continuous area (i.e. background) while higher image
frequencies remain untouched.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.morphology import disk
import skimage.filter.rank as rank
a16 = (data.coins()).astype(np.uint16) * 16
selem = disk(20)
f1 = rank.percentile_mean(a16, selem=selem, p0=.1, p1=.9)
f2 = rank.bilateral_mean(a16, selem=selem, s0=500, s1=500)
f3 = rank.mean(a16, selem=selem)
# display results
fig, axes = plt.subplots(nrows=3, figsize=(15, 10))
ax0, ax1, ax2 = axes
ax0.imshow(np.hstack((a16, f1)))
ax0.set_title('percentile mean')
ax1.imshow(np.hstack((a16, f2)))
ax1.set_title('bilateral mean')
ax2.imshow(np.hstack((a16, f3)))
ax2.set_title('local mean')
plt.show()
|
17,105 | 58b34d088f718c5d119f75fdfa9320128bb60409 | """
Dots module.
@author: Jason Cohen
@author: Shaun Hamelin-Owens
@author: Sasithra Thanabalan
@author: Andrew Walker
"""
# Imports
from PointItem import PointItem
# Constants
DOT_POINTS = 10
class Dot(PointItem):
"""
Dot class.
This class contains the methods used in the creation of Dots.
It is of the tkinter library and inherits an instance of PointItem.
"""
def __init__(self, gameCanvas, specs):
"""
Initialization.
This method creates a new Dot, initializes it and draws it.
@param gameCanvas:
@param specs: Specifies the coordinates, radius, color, tag and points associated
with this dot.
"""
# Initialization of the Dot
super(Dot, self).__init__(gameCanvas, specs, specs['points'])
# Draw the Dot
self.draw()
|
17,106 | d66177d7c440210135281588a2fcfdfd6b5ce9c6 | #!/usr/bin/env python
import os
ZMQ_ADDR = os.getenv('NMZ_ETHER_ZMQ_ADDR')
from hexdump import hexdump
import pynmz
from pynmz.inspector.ether import EtherInspectorBase
from pynmz.signal.event import PacketEvent
LOG = pynmz.LOG.getChild(__name__)
class Zk2080Inspector(EtherInspectorBase):
# @Override
def map_packet_to_event(self, packet):
src, dst = packet['IP'].src, packet['IP'].dst
sport, dport = packet['TCP'].sport, packet['TCP'].dport
payload = packet['TCP'].payload
## heuristic: FLE ports tend to be these ones. (PortAssignment.java)
fle_ports = (11223, 11226, 11229, 11232)
if (sport in fle_ports or dport in fle_ports) and payload:
src_entity = 'entity-%s:%d' % (src, sport)
dst_entity = 'entity-%s:%d' % (dst, dport)
## TODO: use zktraffic to parse the payload
## Currently zktraffic does not work well, because some packets get corked when the delay is injected.
d = {'payload': hexdump(str(payload), result='return')}
deferred_event = PacketEvent.from_message(src_entity, dst_entity, d)
LOG.info('defer FLE packet: %s', deferred_event)
return deferred_event
else:
return None
if __name__ == '__main__':
d = Zk2080Inspector(zmq_addr=ZMQ_ADDR)
d.start()
|
17,107 | 969e2e97b9e538d7496fb259a07de8dab529b317 | from transformers import BertTokenizer, BertModel
MAX_LENGTH = 128
TOKENIZER = BertTokenizer.from_pretrained("bert-base-uncased")
TRAIN_BATCH_SIZE = 8
VALID_BATCH_SIZE = 4
MODEL = BertModel.from_pretrained("bert-base-uncased")
NUM_EPOCHS = 2
MODEL_PATH = "/"
|
17,108 | 5b57a6a869ea790be7fe97fee52b0c66b3993342 | #!/usr/bin/env python
#-----------------------------------------------------------------------------
# Copyright (c) 2011-2013, The BIOM Format Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
from __future__ import division
from pyqi.core.command import (Command, CommandIn, CommandOut,
ParameterCollection)
from pyqi.core.exception import CommandError
from biom.table import (SparseOTUTable, DenseOTUTable, SparsePathwayTable,
DensePathwayTable, SparseFunctionTable,
DenseFunctionTable, SparseOrthologTable,
DenseOrthologTable, SparseGeneTable, DenseGeneTable,
SparseMetaboliteTable, DenseMetaboliteTable,
SparseTaxonTable, DenseTaxonTable, table_factory)
from biom.parse import (parse_biom_table, MetadataMap, convert_biom_to_table,
convert_table_to_biom, generatedby)
__author__ = "Greg Caporaso"
__copyright__ = "Copyright 2011-2013, The BIOM Format Development Team"
__credits__ = ["Greg Caporaso", "Daniel McDonald",
"Jose Carlos Clemente Litran", "Jai Ram Rideout"]
__license__ = "BSD"
__url__ = "http://biom-format.org"
__maintainer__ = "Greg Caporaso"
__email__ = "gregcaporaso@gmail.com"
class TableConverter(Command):
MatrixTypes = ['sparse', 'dense']
TableTypes = {
'otu table': [SparseOTUTable, DenseOTUTable],
'pathway table': [SparsePathwayTable, DensePathwayTable],
'function table': [SparseFunctionTable, DenseFunctionTable],
'ortholog table': [SparseOrthologTable, DenseOrthologTable],
'gene table': [SparseGeneTable, DenseGeneTable],
'metabolite table': [SparseMetaboliteTable, DenseMetaboliteTable],
'taxon table': [SparseTaxonTable, DenseTaxonTable]
}
ObservationMetadataTypes = {
'sc_separated': lambda x: [e.strip() for e in x.split(';')],
'naive': lambda x: x
}
ObservationMetadataTypes['taxonomy'] = \
ObservationMetadataTypes['sc_separated']
BriefDescription = "Convert to/from the BIOM table format"
LongDescription = ("Convert between BIOM and 'classic' (tab-delimited) "
"table formats. Detailed usage examples can be found "
"here: http://biom-format.org/documentation/biom_conversion.html")
CommandIns = ParameterCollection([
### This is not an ideal usage of the pyqi framework because we are
# expecting a file-like object here, and a lot of the parameters deal
# with I/O-ish things, like converting between file formats. Even
# though no I/O is forced here, it would be better to have rich objects
# as input and output, instead of lines of data. However, this will
# likely require a refactoring/redesign of our interface for table
# conversions because the primary input here can be either a BIOM table
# or a classic table. One possible solution is to split out different
# types of conversions into their own (smaller and simpler) commands,
# which would allow us to avoid some of this I/O-ish stuff.
CommandIn(Name='table_file', DataType=file,
Description='the input table (file-like object), either in '
'BIOM or classic format', Required=True),
CommandIn(Name='matrix_type', DataType=str,
Description='the type of BIOM file to create (dense or '
'sparse) when a classic table is supplied',
Default='sparse'),
CommandIn(Name='biom_to_classic_table', DataType=bool,
Description='convert BIOM table file to classic table file',
Default=False, DefaultDescription='convert classic table '
'file to BIOM table file'),
CommandIn(Name='sparse_biom_to_dense_biom', DataType=bool,
Description='convert sparse BIOM table file to a dense BIOM '
'table file', Default=False, DefaultDescription='convert '
'classic table file to BIOM table file'),
CommandIn(Name='dense_biom_to_sparse_biom', DataType=bool,
Description='convert dense BIOM table file to a sparse BIOM '
'table file', Default=False, DefaultDescription='convert '
'classic table file to BIOM table file'),
CommandIn(Name='sample_metadata', DataType=MetadataMap,
Description='the sample metadata map (will add sample '
'metadata to the BIOM table, if provided). Only applies '
'when converting from classic table file to BIOM table '
'file'),
CommandIn(Name='observation_metadata', DataType=MetadataMap,
Description='the observation metadata map (will add '
'observation metadata to the BIOM table, if provided). Only '
'applies when converting from classic table file to BIOM '
'table file'),
CommandIn(Name='header_key', DataType=str,
Description='pull this key from observation metadata within '
'a BIOM table file when creating a classic table file',
DefaultDescription='no observation metadata will be '
'included'),
CommandIn(Name='output_metadata_id', DataType=str,
Description='the name to be given to the observation '
'metadata column when creating a classic table from a BIOM-'
'formatted table', DefaultDescription='same name as in the '
'BIOM-formatted table'),
CommandIn(Name='process_obs_metadata', DataType=str,
Description='process metadata associated with observations '
'when converting from a classic table. Must be one of: %s' %
', '.join(ObservationMetadataTypes.keys()), Default='naive'),
CommandIn(Name='table_type', DataType=str,
Description='the BIOM table type to get converted into. '
'Required when converting a classic table file to a BIOM '
'table file. Must be one of: %s' %
', '.join(TableTypes.keys()))
])
CommandOuts = ParameterCollection([
CommandOut(Name='table_str', DataType=str,
Description='The resulting table')
])
def run(self, **kwargs):
table_file = kwargs['table_file']
matrix_type = kwargs['matrix_type']
biom_to_classic_table = kwargs['biom_to_classic_table']
sparse_biom_to_dense_biom = kwargs['sparse_biom_to_dense_biom']
dense_biom_to_sparse_biom = kwargs['dense_biom_to_sparse_biom']
sample_metadata = kwargs['sample_metadata']
observation_metadata = kwargs['observation_metadata']
header_key = kwargs['header_key']
output_metadata_id = kwargs['output_metadata_id']
process_obs_metadata = kwargs['process_obs_metadata']
table_type = kwargs['table_type']
if sum([biom_to_classic_table, sparse_biom_to_dense_biom,
dense_biom_to_sparse_biom]) > 1:
raise CommandError("Converting between classic/BIOM formats and "
"sparse/dense representations are mutually "
"exclusive. You may only specify a single "
"operation at a time.")
# if the user does not specify a name for the output metadata column,
# set it to the same as the header key
output_metadata_id = output_metadata_id or header_key
convert_error_msg = ("Input does not look like a BIOM-formatted file. "
"Did you accidentally specify that a classic "
"table file should be created from a BIOM table "
"file?")
if biom_to_classic_table:
try:
result = convert_biom_to_table(table_file, header_key,
output_metadata_id)
except ValueError:
raise CommandError(convert_error_msg)
elif sparse_biom_to_dense_biom:
try:
table = parse_biom_table(table_file)
except ValueError:
raise CommandError(convert_error_msg)
conv_constructor = self.TableTypes[table._biom_type.lower()][1]
conv_table = table_factory(table._data, table.SampleIds,
table.ObservationIds, table.SampleMetadata,
table.ObservationMetadata, table.TableId,
constructor=conv_constructor)
result = conv_table.getBiomFormatJsonString(generatedby())
elif dense_biom_to_sparse_biom:
try:
table = parse_biom_table(table_file)
except ValueError:
raise CommandError(convert_error_msg)
conv_constructor = self.TableTypes[table._biom_type.lower()][0]
conv_table = table_factory(table._data, table.SampleIds,
table.ObservationIds, table.SampleMetadata,
table.ObservationMetadata, table.TableId,
constructor=conv_constructor)
result = conv_table.getBiomFormatJsonString(generatedby())
else:
if table_type is None:
raise CommandError("Must specify the BIOM table type: %s" %
', '.join(self.TableTypes.keys()))
else:
table_type = table_type.lower()
if table_type not in self.TableTypes:
raise CommandError("Unknown BIOM table type, must be one of: "
"%s" % ', '.join(self.TableTypes.keys()))
if matrix_type not in self.MatrixTypes:
raise CommandError("Unknown BIOM matrix type, must be one of: "
"%s" % ', '.join(self.MatrixTypes))
if process_obs_metadata not in \
self.ObservationMetadataTypes.keys():
raise CommandError("Unknown observation metadata processing "
"method, must be one of: %s" %
', '.join(self.ObservationMetadataTypes.keys()))
idx = 0 if matrix_type == 'sparse' else 1
constructor = self.TableTypes[table_type][idx]
convert_error_msg = ("Input does not look like a classic table. "
"Did you forget to specify that a classic "
"table file should be created from a BIOM "
"table file?")
try:
result = convert_table_to_biom(table_file, sample_metadata,
observation_metadata,
self.ObservationMetadataTypes[process_obs_metadata],
constructor)
except ValueError:
raise CommandError(convert_error_msg)
except IndexError:
raise CommandError(convert_error_msg)
return {'table_str': result}
CommandConstructor = TableConverter
|
17,109 | a57b09286bdc5d679610befb19642c18e2b7a7d2 | # encoding=utf8
import os
rowS, columnS = os.popen('stty size', 'r').read().split()
row = int(rowS)
col = int(columnS)
feral = []
feral.append(" โโโโโโโโโโโโ โโโโโโ โโโ โโโ ")
feral.append(" โโโ โ โโ โ โโโ โ โโโโโโโโโ โโโโ ")
feral.append(" โโโโโ โ โโโโ โโโ โโโ โโโโ โโโ โโโโ ")
feral.append(" โโโโ โ โโโ โ โโโโโโโ โโโโโโโโโ โโโโ ")
feral.append(" โโโโ โโโโโโโโโโโ โโโโ โโ โโโโโโโโโโโโ ")
feral.append(" โ โ โโ โโ โโ โโ โโโโ โโ โโโโโ โโโ โ ")
feral.append(" โ โ โ โ โโ โ โโ โ โโ โโ โ โ โ ")
feral.append(" โ โ โ โโ โ โ โ โ โ ")
feral.append(" โ โ โ โ โ โ โ ")
feral.append(" ")
feral.append(" โโโโโ โโโ โโโโ โโโโโโโโโโโ โโโโโโ ")
feral.append(" โโโ โโโโโโโโโ โโโโโโโ โโโโโ โ โโโ โ ")
feral.append("โโโโโโโโโโโ โโโ โโโ โโโโโโโโ โ โโโโ ")
feral.append("โโโ โโโโโโโโโโโโ โโโ โโโ โโโ โ โ โโโ")
feral.append("โโโโโโโโ โโ โโโโโโโโ โโโโโโโโโโโโโโโโโโโโ")
feral.append(" โโ โ โโ โโโโโ โโ โ โโโ โโ โโ โโโ โ โ")
feral.append(" โ โ โ โโ โโ โ โ โ โ โโ โโ โ โ")
feral.append("โ โ โ โ โ โ โ โ โ โ โ ")
feral.append(" โ โ โ โ โ โ โ ")
feralW = len(feral[0])
feralH = len(feral)
fillerW = int((col - feralW)/2)
fillerH = int((row - feralH)/2)
print('\n'*fillerH)
for line in feral:
print(' '*fillerW + line)
print('\n'*fillerH) |
17,110 | 354ced17696d287524b48f9ef4f43d7ff70a1520 | from pal.writer.access_mechanism.access_mechanism \
import AccessMechanismWriter
from pal.logger import logger
class GasX86_64AttSyntaxAccessMechanismWriter(AccessMechanismWriter):
def declare_access_mechanism_dependencies(self, outfile, register,
access_mechanism):
pass
def call_readable_access_mechanism(self, outfile, register,
access_mechanism, result):
if access_mechanism.name == "mov_read":
self._call_mov_read_access_mechanism(outfile, register,
access_mechanism, result)
elif access_mechanism.name == "cpuid":
self._call_cpuid_access_mechanism(outfile, register,
access_mechanism, result)
elif access_mechanism.name == "rdmsr":
self._call_rdmsr_access_mechanism(outfile, register,
access_mechanism, result)
elif access_mechanism.name == "vmread":
self._call_vmread_access_mechanism(outfile, register,
access_mechanism, result)
elif access_mechanism.name == "xgetbv":
self._call_xgetbv_access_mechanism(outfile, register,
access_mechanism, result)
else:
msg = "Access mechnism {am} is not supported using "
msg += "Intel x86_64 gas att assembler syntax"
msg = msg.format(
am=access_mechanism.name
)
logger.warn(msg)
def call_writable_access_mechanism(self, outfile, register,
access_mechanism, value):
if access_mechanism.name == "mov_write":
self._call_mov_write_access_mechanism(outfile, register,
access_mechanism, value)
elif access_mechanism.name == "wrmsr":
self._call_wrmsr_access_mechanism(outfile, register,
access_mechanism, value)
elif access_mechanism.name == "vmwrite":
self._call_vmwrite_access_mechanism(outfile, register,
access_mechanism, value)
elif access_mechanism.name == "xsetbv":
self._call_xsetbv_access_mechanism(outfile, register,
access_mechanism, value)
else:
msg = "Access mechnism {am} is not supported using "
msg += "Intel x86_64 gas att assembler syntax"
msg = msg.format(
am=access_mechanism.name
)
logger.warn(msg)
def _call_mov_read_access_mechanism(self, outfile, register,
access_mechanism, result):
self._write_inline_assembly(outfile, [
"mov %%" + access_mechanism.source_mnemonic + ", %[v]"
],
outputs='[v] "=r"(' + str(result) + ')'
)
def _call_cpuid_access_mechanism(self, outfile, register, access_mechanism,
result):
if register.is_indexed:
subleaf_mnemonic = "%[subleaf]"
subleaf_input = '[subleaf] "r"(index)'
else:
subleaf_mnemonic = "$0"
subleaf_input = ""
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.leaf)) + ", %%eax",
"mov " + subleaf_mnemonic + ", %%ecx",
"cpuid",
"mov %%" + access_mechanism.output + ", %[out]"
],
outputs='[out] "=r"(' + result + ')',
inputs=subleaf_input,
clobbers='"eax", "ebx", "ecx", "edx"'
)
def _call_rdmsr_access_mechanism(self, outfile, register, access_mechanism,
result):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.address)) + ", %%rcx",
"rdmsr",
"shl $32, %%rdx",
"or %%rdx, %%rax",
"mov %%rax, %[v]",
],
outputs='[v] "=r"(' + result + ')',
clobbers='"rax", "rcx", "rdx"'
)
def _call_vmread_access_mechanism(self, outfile, register,
access_mechanism, result):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.encoding)) + ", %%rdi",
"vmread %%rdi, %q[v]",
],
outputs='[v] "=r"(' + str(result) + ')',
clobbers='"rdi"'
)
def _call_xgetbv_access_mechanism(self, outfile, register,
access_mechanism, result):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.register)) + ", %%rcx",
"xgetbv",
"shl $32, %%rdx",
"or %%rdx, %%rax",
"mov %%rax, %[v]",
],
outputs='[v] "=r"(' + str(result) + ')',
clobbers='"rax", "rcx", "rdx"'
)
def _call_mov_write_access_mechanism(self, outfile, register,
access_mechanism, value):
self._write_inline_assembly(outfile, [
"mov %[v], %%" + access_mechanism.destination_mnemonic,
],
inputs='[v] "r"(' + value + ')'
)
def _call_wrmsr_access_mechanism(self, outfile, register, access_mechanism,
value):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.address)) + ", %%rcx",
"mov %[v], %%rax",
"mov %[v], %%rdx",
"shr $32, %%rdx",
"wrmsr",
],
inputs='[v] "r"(' + value + ')',
clobbers='"rax", "rcx", "rdx"'
)
def _call_vmwrite_access_mechanism(self, outfile, register,
access_mechanism, value):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.encoding)) + ", %%rdi",
"vmwrite %q[v], %%rdi",
],
inputs='[v] "r"(' + value + ')',
clobbers='"rdi"'
)
def _call_xsetbv_access_mechanism(self, outfile, register,
access_mechanism, value):
self._write_inline_assembly(outfile, [
"mov $" + str(hex(access_mechanism.register)) + ", %%rcx",
"mov %[v], %%rax",
"mov %[v], %%rdx",
"shr $32, %%rdx",
"xsetbv",
],
inputs='[v] "r"(' + value + ')',
clobbers='"rax", "rcx", "rdx"'
)
def _write_inline_assembly(self, outfile, statements, outputs="",
inputs="", clobbers=""):
outfile.write("__asm__ __volatile__(")
self.write_newline(outfile)
for statement in statements:
self.write_indent(outfile)
outfile.write("\"" + str(statement) + ";\"")
self.write_newline(outfile)
self.write_indent(outfile)
outfile.write(": " + str(outputs))
self.write_newline(outfile)
self.write_indent(outfile)
outfile.write(": " + str(inputs))
self.write_newline(outfile)
self.write_indent(outfile)
outfile.write(": " + str(clobbers))
self.write_newline(outfile)
outfile.write(");")
self.write_newline(outfile)
|
17,111 | 05f070ee2296e17882060d7350a55d9a2a50ba9f | def kume_dondur(str_kume: str):
if str_kume.startswith("{") and str_kume.endswith("}"):
kume_elemanlari = str_kume[1:len(str_kume) -1]
v = kume_elemanlari.split(", ")
print(v)
kume_dondur("{1, 2, 3, 17}")
|
17,112 | c06aa43491b1a785ce278b420174f8dabdd4205a | import pytest
@pytest.fixture(scope='package', autouse=True)
def st_emptyEnv():
print(f'\n#### ๅๅงๅ-็ฎๅฝ็บงๅซ ####')
yield
print(f'\n ### ๆธ
้ค-็ฎๅฝ็บงๅซ ####')
|
17,113 | 3efbd5e40e20cb37f47bd5178fbc0f7886fdcc95 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from itertools import product
from pathlib import Path
import numpy as np
import tensorflow as tf
from dotenv import load_dotenv
from annotation.direction import PinDirection
from annotation.piece import Piece
from ..count import WhiteEffectCountLayer
from ..long_effect import WhiteLongEffectLayer
from ..ou import WhiteOuEffectLayer
from ..short_effect import WhiteShortEffectLayer
__author__ = 'Yasuhiro'
__date__ = '2018/3/21'
class TestKyEffect(tf.test.TestCase):
@classmethod
def setUpClass(cls):
dotenv_path = Path(__file__).parents[3] / '.env'
load_dotenv(str(dotenv_path))
cls.data_format = os.environ.get('DATA_FORMAT')
cls.use_cudnn = bool(os.environ.get('USE_CUDNN'))
def test_ky_effect(self):
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
# ใใใงใฏ็ธๆใฎๅฉใใใชใ่จญๅฎ
black_effect_mask = np.zeros(shape, dtype=np.bool)
ph = tf.placeholder(tf.int32, shape=shape)
short_effect = WhiteShortEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph)
long_effect = WhiteLongEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph)
ou_effect = WhiteOuEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph, black_effect_mask)
effect_count = WhiteEffectCountLayer()(
short_effect, long_effect, ou_effect
)
effect_count = tf.squeeze(effect_count)
with self.test_session() as sess:
for i, j in product(range(9), repeat=2):
# (i, j)ใซ้งใ็ฝฎใใ(i, 0:j)ใๅฉใใฎใใไฝ็ฝฎ
if j == 8:
continue
board[:] = Piece.EMPTY
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.WHITE_KY
else:
board[0, i, j, 0] = Piece.WHITE_KY
effect = sess.run(effect_count, feed_dict={ph: board})
with self.subTest(i=i, j=j):
for k in range(j + 1, 9):
self.assertEqual(effect[i, k], 1)
self.assertEqual(np.sum(effect), 8 - j)
def test_ky_pin(self):
shape = (1, 1, 9, 9) if self.data_format == 'NCHW' else (1, 9, 9, 1)
board = np.empty(shape, dtype=np.int32)
# ใใใงใฏ็ธๆใฎๅฉใใใชใ่จญๅฎ
black_effect_mask = np.zeros(shape, dtype=np.bool)
ph = tf.placeholder(tf.int32, shape=shape)
short_effect = WhiteShortEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph)
long_effect = WhiteLongEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph)
ou_effect = WhiteOuEffectLayer(
data_format=self.data_format, use_cudnn=self.use_cudnn
)(ph, black_effect_mask)
effect_count = WhiteEffectCountLayer()(
short_effect, long_effect, ou_effect
)
effect_count = tf.squeeze(effect_count)
with self.test_session() as sess:
for i, j in product(range(9), repeat=2):
# (i, j)ใซ้งใ็ฝฎใใ(i, 0:j)ใๅฉใใฎใใไฝ็ฝฎ
if j == 8:
continue
board[:] = Piece.EMPTY
for pin_direction in PinDirection:
if pin_direction == PinDirection.SIZE:
continue
offset = Piece.SIZE - Piece.WHITE_FU + pin_direction * 14
if self.data_format == 'NCHW':
board[0, 0, i, j] = Piece.WHITE_KY + offset
else:
board[0, i, j, 0] = Piece.WHITE_KY + offset
effect = sess.run(effect_count, feed_dict={ph: board})
with self.subTest(i=i, j=j, pin_direction=pin_direction):
if pin_direction == PinDirection.VERTICAL:
for k in range(j + 1, 9):
self.assertEqual(effect[i, k], 1)
self.assertEqual(np.sum(effect), 8 - j)
else:
self.assertTrue(np.all(effect == 0))
|
17,114 | c53ad01f54a813c268fc264280221c239e3b2ffc | from datetime import time
from datetime import date
from datetime import datetime
from datetime import timedelta
#span of time time based maths
def main():
print(timedelta(days=365,hours=5,minutes=1))
now=datetime.now()
print("today is",now)
print("one year later:",now+timedelta(days=365))
print("In two days and three weeks it will be:",now+timedelta(weeks=3,days=2))
t= now-timedelta(weeks=1)
print("One week ago",t.strftime("%A %d %B,%Y"))
today=date.today()
afd=date(today.year,month=4,day=1)
if afd<today:
print("April fools day went away %d ago" %((today-afd).days))
afd=afd.replace(year=today.year+1)
timetoafd=afd-today
print("time to afd:",timetoafd.days)
if __name__=="__main__":
main()
|
17,115 | 3c8657f0ca1773a6a42f4ae5c3a5b65b595981ad | from .sudoku_board import SudokuBoard
from .solving_functions import solve
__all__ = ["SudokuBoard", "solve"]
|
17,116 | d83318b9aa836aeb23af80f9c80a776b6d654a85 | UNSCOPED_TOKEN = {
u'access': {u'serviceCatalog': {},
u'token': {u'expires': u'2012-10-03T16:58:01Z',
u'id': u'3e2813b7ba0b4006840c3825860b86ed'},
u'user': {u'id': u'c4da488862bd435c9e6c0275a0d0e49a',
u'name': u'exampleuser',
u'roles': [],
u'roles_links': [],
u'username': u'exampleuser'}
}
}
PROJECT_SCOPED_TOKEN = {
u'access': {
u'serviceCatalog': [{
u'endpoints': [{
u'adminURL': u'http://admin:8776/v1/225da22d3ce34b15877ea70b2a575f58',
u'internalURL':
u'http://internal:8776/v1/225da22d3ce34b15877ea70b2a575f58',
u'publicURL':
u'http://public.com:8776/v1/225da22d3ce34b15877ea70b2a575f58',
u'region': u'RegionOne'
}],
u'endpoints_links': [],
u'name': u'Volume Service',
u'type': u'volume'},
{u'endpoints': [{
u'adminURL': u'http://admin:9292/v1',
u'internalURL': u'http://internal:9292/v1',
u'publicURL': u'http://public.com:9292/v1',
u'region': u'RegionOne'}],
u'endpoints_links': [],
u'name': u'Image Service',
u'type': u'image'},
{u'endpoints': [{
u'adminURL': u'http://admin:8774/v2/225da22d3ce34b15877ea70b2a575f58',
u'internalURL': u'http://internal:8774/v2/225da22d3ce34b15877ea70b2a575f58',
u'publicURL': u'http://public.com:8774/v2/225da22d3ce34b15877ea70b2a575f58',
u'region': u'RegionOne'}],
u'endpoints_links': [],
u'name': u'Compute Service',
u'type': u'compute'},
{u'endpoints': [{
u'adminURL': u'http://admin:8773/services/Admin',
u'internalURL': u'http://internal:8773/services/Cloud',
u'publicURL': u'http://public.com:8773/services/Cloud',
u'region': u'RegionOne'}],
u'endpoints_links': [],
u'name': u'EC2 Service',
u'type': u'ec2'},
{u'endpoints': [{
u'adminURL': u'http://admin:35357/v2.0',
u'internalURL': u'http://internal:5000/v2.0',
u'publicURL': u'http://public.com:5000/v2.0',
u'region': u'RegionOne'}],
u'endpoints_links': [],
u'name': u'Identity Service',
u'type': u'identity'}],
u'token': {u'expires': u'2012-10-03T16:53:36Z',
u'id': u'04c7d5ffaeef485f9dc69c06db285bdb',
u'tenant': {u'description': u'',
u'enabled': True,
u'id': u'225da22d3ce34b15877ea70b2a575f58',
u'name': u'exampleproject'}},
u'user': {u'id': u'c4da488862bd435c9e6c0275a0d0e49a',
u'name': u'exampleuser',
u'roles': [{u'id': u'edc12489faa74ee0aca0b8a0b4d74a74',
u'name': u'Member'}],
u'roles_links': [],
u'username': u'exampleuser'}
}
}
|
17,117 | bc35853e90d66182c6cb5cab745e83a4e39dab93 | """
Using zip builtin function to iterate multiple lists.
If one list is bigger than other, it will iterate only through the smaller one size.
"""
list_1 = ['a', 'b', 'c', 'd', 'e', 'f']
list_2 = [1, 2, 3]
for item_list_1, item_list_2 in zip(list_1, list_2):
print("Item list 1: %s refers to Item list 2: %s" % (item_list_1, item_list_2)) |
17,118 | f3ae47622b46bc7ec95414d6e74dc411ae09a797 | import collections
from typing import List
class Solution:
def countCharacters(self, words: List[str], chars: str) -> int:
good = collections.Counter(chars)
result = 0
for word in words:
missing = collections.Counter(word) - good
if not missing:
result += len(word)
return result
|
17,119 | 4f076822d6258667f4cbdb9a9dcb712583ea860d | class Solution:
def convert(self, s: str, numRows: int) -> str:
if len(s) < numRows or numRows == 1:
return s
flag = 1
index = 0
A = []
result = ""
for i in range(numRows):
A.append([])
for i in range(len(s)):
A[index].append(s[i])
if index == 0:
flag = 1
elif index == numRows-1:
flag = -1
if flag == 1:
index += 1
elif flag == -1:
index -= 1
for i in range(numRows):
temp = ''.join(A[i])
result += temp
return result
|
17,120 | 0edc1eac0fda8aa402b62a58c688141d4675f795 | mydict={"Shivani":'Abcd@11',"Saloni":'Efgh@12'}
def upassword(password):
validu=0
validd=0
validsc=0
for i in password:
if i.isupper():
validu=validu+1
elif i in '1234567890':
validd=validd+1
elif i in '!@#$%^&*':
validsc=validsc+1
if(validu>0 and validd>0 and validsc>0 and len(password)>=6 and len(password)<=20):
return True
else:
return False
insert='y'
while(insert=='y' or insert=='Y'):
username=input("Enter the user name")
if username in mydict:
VorM=input("Do you want to view or modify?")
if(VorM=='view'):
print("Your username is:",username)
print("Your password is:",mydict[username])
elif(VorM=='modify'):
chance=3
while(chance>0):
password1=input("Password:")
if(upassword(password1)):
mydict[username]=password1
print(mydict[username])
break
else:
chance=chance-1
print("invalid")
if(chance<=0):
print("Updation failed")
else:
if((" " not in username) and (username[0] in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890") and len(username)>=6 and len(username)<=12):
chance=3
while(chance>0):
password1=input("Enter password")
if(upassword(password1)):
mydict[username]=password1
break
else:
chance=chance-1
print("Invalid")
if(chance<=0):
print("Updation failed")
else:
print("Please enter a proper username")
print(mydict)
insert=input("Want to insert?Y/N")
|
17,121 | be5bccb60a5ce5ad5b9cd7342ca351c32e937aab | import requests
from requests_oauthlib import OAuth1
base_url = "https://api.tumblr.com/v2/blog/"
with open('api-keys.txt') as keyfile:
keys = [key.strip() for key in keyfile]
keyfile.close()
auth = OAuth1(*keys)
def api_url(blog,method):
return "{0}{1}.tumblr.com/{2}".format(base_url,blog,method)
def get(blog,method,params):
url = api_url(blog,method)
# params.update({'api_key': keys[0]})
response = requests.get(url,auth=auth,params=params)
response.raise_for_status()
print('response received at',response.url)
return response.json()['response']
def get_posts(blog,params):
return get(blog,'posts',params)['posts']
def get_bloginfo(blog,params):
return get(blog,'info',params)['blog']
|
17,122 | f5a34035f4536c555fa46880a79f24d0909871cb |
def class_to_dict(obj):
dic = {}
dic.update(obj.__dict__)
if "_sa_instance_state" in dic:
print dic['_sa_instance_state']
del dic['_sa_instance_state']
return dic
|
17,123 | a5fbe0d2aced1b8d86739b2d84594a07da91279b | from facebook_business.adobjects.productcatalog import ProductCatalog as FacebookProductCatalog
from facebook_business.adobjects.productgroup import ProductGroup as FacebookProductGroup
from facebook_business.adobjects.productitem import ProductItem as FacebookProductItem
from facebook_business.adobjects.productset import ProductSet as FacebookProductSet
PRODUCT_SETS_FIELD_BY_PRODUCT = 'product_sets'
PRODUCT_CATALOGS_FIELDS = [FacebookProductCatalog.Field.name,
FacebookProductCatalog.Field.vertical,
FacebookProductCatalog.Field.id,
FacebookProductCatalog.Field.business,
FacebookProductCatalog.Field.da_display_settings,
FacebookProductCatalog.Field.default_image_url,
FacebookProductCatalog.Field.fallback_image_url,
FacebookProductCatalog.Field.feed_count,
FacebookProductCatalog.Field.is_catalog_segment,
FacebookProductCatalog.Field.product_count,
FacebookProductCatalog.Field.store_catalog_settings]
PRODUCT_SETS_FIELDS = [FacebookProductSet.Field.auto_creation_url,
FacebookProductSet.Field.filter,
FacebookProductSet.Field.id,
FacebookProductSet.Field.name,
FacebookProductSet.Field.product_count,
FacebookProductSet.Field.product_catalog]
PRODUCT_GROUPS_FIELDS = [FacebookProductGroup.Field.id,
FacebookProductGroup.Field.product_catalog,
FacebookProductGroup.Field.retailer_id,
FacebookProductGroup.Field.variants]
PRODUCT_FIELDS = [FacebookProductItem.Field.id,
FacebookProductItem.Field.currency,
FacebookProductItem.Field.description,
FacebookProductItem.Field.url,
FacebookProductItem.Field.availability,
FacebookProductItem.Field.name,
FacebookProductItem.Field.price,
FacebookProductItem.Field.product_group,
FacebookProductItem.Field.category,
FacebookProductItem.Field.product_type,
FacebookProductItem.Field.short_description,
FacebookProductItem.Field.custom_data,
FacebookProductItem.Field.custom_label_0,
FacebookProductItem.Field.custom_label_1,
FacebookProductItem.Field.custom_label_2,
FacebookProductItem.Field.custom_label_3,
FacebookProductItem.Field.custom_label_4,
FacebookProductItem.Field.additional_image_urls,
FacebookProductItem.Field.additional_variant_attributes,
FacebookProductItem.Field.age_group,
FacebookProductItem.Field.image_url,
FacebookProductItem.Field.image_cdn_urls,
FacebookProductItem.Field.gender,
FacebookProductItem.Field.brand,
FacebookProductItem.Field.color,
FacebookProductItem.Field.condition,
FacebookProductItem.Field.expiration_date,
FacebookProductItem.Field.inventory,
FacebookProductItem.Field.material,
FacebookProductItem.Field.pattern,
FacebookProductItem.Field.retailer_product_group_id,
FacebookProductItem.Field.review_status,
FacebookProductItem.Field.review_rejection_reasons,
FacebookProductItem.Field.sale_price,
FacebookProductItem.Field.sale_price_end_date,
FacebookProductItem.Field.sale_price_start_date,
FacebookProductItem.Field.shipping_weight_unit,
FacebookProductItem.Field.shipping_weight_value,
FacebookProductItem.Field.size,
FacebookProductItem.Field.offer_price_amount,
FacebookProductItem.Field.offer_price_end_date,
FacebookProductItem.Field.offer_price_start_date,
PRODUCT_SETS_FIELD_BY_PRODUCT]
|
17,124 | 5642e4ce7c63b9a9059697b10d0f83f842b9df0c | from __future__ import division
from __future__ import print_function
import os
import pickle
from engine import *
from model import *
from utils import *
np.random.seed(1234)
tf.set_random_seed(123)
# load data to df
start_time = time.time()
data_df = pd.read_csv('../../datasets/tmall_1mth_2014_item20user50k_1neg_30seq.csv')
meta_df = pd.read_csv('../../datasets/tmall_1mth_2014_item20user50k_1neg_30seq_item_meta.csv')
data_df['itemSeq'] = data_df['itemSeq'].fillna('') # empty seq are NaN
data_df['itemSeq'] = data_df['itemSeq'].apply(lambda x: [int(item) for item in x.split('#') if item != ''])
meta_df['cateId'] = meta_df['cateId'].apply(lambda x: [x])
meta_df = meta_df.sort_values(['itemId'], ascending=True).reset_index(drop=True)
cate_ls = meta_df['cateId'].tolist()
print('Done loading data! time elapsed: {}'.format(time.strftime('%H:%M:%S', time.gmtime(time.time() - start_time))))
num_users = data_df['userId'].max() + 1
num_items = data_df['itemId'].max() + 1
num_cates = max([max(i) for i in cate_ls]) + 1
cates, cate_lens = process_cate(cate_ls)
train_config = {'method': 'ASMGgru_full_by_date',
'dir_name': 'ASMGgru_full_linear_train11-23_test24-30_4emb_4mlp_1epoch_try2', # edit method to compute loss weight, train test period, rnn hidden size for emb and mlp, number of epochs
'niu_dir_name': 'NIU_train11-23_test24-30_1epoch_0.01', # input model sequence directory
'train_start_date': 20141011,
'train_end_date': 20141031,
'test_start_date': 20141024,
'cur_date': None, # current incremental date
'next_dates': None, # respective next incremental date
'next_set_sizes': None, # respective next incremental dataset size
'date_alias': None, # individual date directory alias to save ckpts
'restored_ckpt_mode': 'best auc', # mode to search the ckpt to restore: 'best auc', 'best logloss', 'last'
'restored_ckpt': None, # restored meta generator checkpoint
'seq_length': None, # length of input model sequence
'rnn_type': 'gru', # type of rnn cell: vanilla, gru
'emb_hidden_dim': 4, # rnn hidden size for embedding layers parameters
'mlp_hidden_dim': 4, # rnn hidden size for MLP layers parameters
'loss_weight': 'linear', # method to compute loss weight: uniform, linear, exp
'test_stop_train': False, # whether to stop updating meta generator during test periods
'meta_optimizer': 'adam', # meta generator optimizer: adam, rmsprop, sgd
'meta_lr': None, # meta generator learning rate
'meta_bs': 1024, # meta generator batch size
'meta_num_epochs': 1, # meta generator number of epochs
'shuffle': True, # whether to shuffle the dataset for each epoch
}
EmbMLP_hyperparams = {'num_users': num_users,
'num_items': num_items,
'num_cates': num_cates,
'user_embed_dim': 8,
'item_embed_dim': 8,
'cate_embed_dim': 8,
'layers': [40, 20, 10, 1] # input dim is user_embed_dim + (item_embed_dim + cate_embed_dim) x 2
}
def collect_params():
"""
collect list of parameters for input model sequence
:return: emb_ls_dict, mlp_ls_dict
"""
collect_params_start_time = time.time()
emb_ls = ['user_emb_w', 'item_emb_w', 'cate_emb_w']
mlp_ls = ['fcn1/kernel', 'fcn2/kernel', 'fcn3/kernel', 'fcn3/bias', 'fcn1/bias', 'fcn2/bias']
# collect input model sequence from niu_dir
emb_dict_ls = []
mlp_dict_ls = []
for prev_num in reversed(range(train_config['seq_length'])):
date_alias = 'date' + str(i - prev_num)
alias = os.path.join('../IU/ckpts', train_config['niu_dir_name'], date_alias, 'Epoch*')
restored_ckpt = search_ckpt(alias, mode=train_config['restored_ckpt_mode'])
print('restored model {}: {}'.format(i - prev_num, restored_ckpt))
emb_dict = {name: tf.train.load_checkpoint(restored_ckpt).get_tensor(name)
for name, _ in tf.train.list_variables(restored_ckpt) if name in emb_ls}
mlp_dict = {name: tf.train.load_checkpoint(restored_ckpt).get_tensor(name)
for name, _ in tf.train.list_variables(restored_ckpt) if name in mlp_ls}
emb_dict_ls.append(emb_dict)
mlp_dict_ls.append(mlp_dict)
# concat sequence for different parameters on the last axis
emb_ls_dict_ = {}
for k in emb_dict_ls[0].keys():
for emb_dict in emb_dict_ls:
if k not in emb_ls_dict_.keys():
emb_ls_dict_[k] = np.expand_dims(emb_dict[k], axis=-1)
else:
emb_ls_dict_[k] = np.concatenate((emb_ls_dict_[k], np.expand_dims(emb_dict[k], axis=-1)), axis=-1)
mlp_ls_dict_ = {}
for k in mlp_dict_ls[0].keys():
for mlp_dict in mlp_dict_ls:
if k not in mlp_ls_dict_.keys():
mlp_ls_dict_[k] = np.expand_dims(mlp_dict[k], axis=-1)
else:
mlp_ls_dict_[k] = np.concatenate((mlp_ls_dict_[k], np.expand_dims(mlp_dict[k], axis=-1)), axis=-1)
# check that the shapes are correct
for k in emb_ls_dict_.keys():
print(k, np.shape(emb_ls_dict_[k]))
for k in mlp_ls_dict_.keys():
print(k, np.shape(mlp_ls_dict_[k]))
print('collect params time elapsed: {}'.format(
time.strftime('%H:%M:%S', time.gmtime(time.time() - collect_params_start_time))))
return emb_ls_dict_, mlp_ls_dict_
def test_and_train_meta():
# create an engine instance with asmg_model
engine = Engine(sess, asmg_model)
test_start_time = time.time()
print('Testing Meta Generator Start!')
next_auc, next_logloss = engine.test_last(next_set_ls[-1], train_config)
print('Done! time elapsed: {}, next_auc {:.4f}, next_logloss {:.4f}'.format(
time.strftime('%H:%M:%S', time.gmtime(time.time() - test_start_time)),
next_auc,
next_logloss))
print('')
if i >= train_config['test_start_date']:
test_aucs.append(next_auc)
test_loglosses.append(next_logloss)
if i < train_config['test_start_date'] or not train_config['test_stop_train']:
train_start_time = time.time()
for epoch_id in range(1, train_config['meta_num_epochs'] + 1):
print('Training Meta Generator Epoch {} Start!'.format(epoch_id))
meta_loss_next_avg = engine.meta_train_an_epoch(epoch_id, next_set_ls, train_config)
print('Epoch {} Done! time elapsed: {}, meta_loss_next_avg {:.4f}'.format(
epoch_id,
time.strftime('%H:%M:%S', time.gmtime(time.time() - train_start_time)),
meta_loss_next_avg
))
next_auc, next_logloss = engine.test_last(next_set_ls[-1], train_config)
print('next_auc {:.4f}, next_logloss {:.4f}'.format(
next_auc,
next_logloss))
print('time elapsed {}'.format(time.strftime('%H:%M:%S', time.gmtime(time.time() - train_start_time))))
print('')
# save checkpoint
checkpoint_alias = 'Epoch{}_TestAUC{:.4f}_TestLOGLOSS{:.4f}.ckpt'.format(
epoch_id,
next_auc,
next_logloss)
checkpoint_path = os.path.join(ckpts_dir, checkpoint_alias)
saver.save(sess, checkpoint_path)
else:
# save checkpoint
checkpoint_alias = 'EpochNA_TestAUC{:.4f}_TestLOGLOSS{:.4f}.ckpt'.format(
next_auc,
next_logloss)
checkpoint_path = os.path.join(ckpts_dir, checkpoint_alias)
saver.save(sess, checkpoint_path)
orig_dir_name = train_config['dir_name']
for meta_lr in [1e-2]:
print('')
print('meta_lr', meta_lr)
train_config['meta_lr'] = meta_lr
train_config['dir_name'] = orig_dir_name + '_' + str(meta_lr)
print('dir_name: ', train_config['dir_name'])
test_aucs = []
test_loglosses = []
start_date = train_config['train_start_date']
for i in range(start_date, train_config['train_end_date']):
train_config['seq_length'] = i - train_config['train_start_date'] + 1
# configure cur_date, next_dates
train_config['cur_date'] = i
train_config['next_dates'] = [i - prev_num + 1 for prev_num in reversed(range(train_config['seq_length']))]
print('')
print('current date: {}, next dates: {}'.format(
train_config['cur_date'],
train_config['next_dates']))
print('')
# create next sets
next_set_ls = []
next_set_size_ls = []
for next_date in train_config['next_dates']:
next_set = data_df[data_df['date'] == next_date]
next_set_ls.append(next_set)
next_set_size_ls.append(len(next_set))
train_config['next_set_sizes'] = next_set_size_ls
print('next set sizes', next_set_size_ls)
train_config['date_alias'] = 'date' + str(i)
# checkpoints directory
ckpts_dir = os.path.join('ckpts', train_config['dir_name'], train_config['date_alias'])
if not os.path.exists(ckpts_dir):
os.makedirs(ckpts_dir)
if i == start_date:
train_config['restored_ckpt'] = None
else:
prev_date_alias = 'date' + str(i - 1)
search_alias = os.path.join('ckpts', train_config['dir_name'], prev_date_alias, 'Epoch*')
train_config['restored_ckpt'] = search_ckpt(search_alias, mode=train_config['restored_ckpt_mode'])
print('restored checkpoint: {}'.format(train_config['restored_ckpt']))
# write train_config to text file
with open(os.path.join(ckpts_dir, 'config.txt'), mode='w') as f:
f.write('train_config: ' + str(train_config) + '\n')
f.write('\n')
f.write('EmbMLP_hyperparams: ' + str(EmbMLP_hyperparams) + '\n')
# collect list of parameters for input model sequence
emb_ls_dict, mlp_ls_dict = collect_params()
# create initial hidden state
init_h_dict = {'user_emb_w': np.zeros((EmbMLP_hyperparams['num_users'],
EmbMLP_hyperparams['user_embed_dim'],
train_config['emb_hidden_dim'])),
'item_emb_w': np.zeros((EmbMLP_hyperparams['num_items'],
EmbMLP_hyperparams['item_embed_dim'],
train_config['emb_hidden_dim'])),
'cate_emb_w': np.zeros((EmbMLP_hyperparams['num_cates'],
EmbMLP_hyperparams['cate_embed_dim'],
train_config['emb_hidden_dim'])),
'fcn1_kernel': np.zeros((EmbMLP_hyperparams['layers'][0],
EmbMLP_hyperparams['layers'][1],
train_config['mlp_hidden_dim'])),
'fcn1_bias': np.zeros((EmbMLP_hyperparams['layers'][1],
train_config['mlp_hidden_dim'])),
'fcn2_kernel': np.zeros((EmbMLP_hyperparams['layers'][1],
EmbMLP_hyperparams['layers'][2],
train_config['mlp_hidden_dim'])),
'fcn2_bias': np.zeros((EmbMLP_hyperparams['layers'][2],
train_config['mlp_hidden_dim'])),
'fcn3_kernel': np.zeros((EmbMLP_hyperparams['layers'][2],
EmbMLP_hyperparams['layers'][3],
train_config['mlp_hidden_dim'])),
'fcn3_bias': np.zeros((EmbMLP_hyperparams['layers'][3],
train_config['mlp_hidden_dim']))}
# build asmg model computation graph
tf.reset_default_graph()
asmg_model = ASMGrnn(cates, cate_lens, EmbMLP_hyperparams, emb_ls_dict, mlp_ls_dict, init_h_dict, train_config=train_config)
# create session
with tf.Session() as sess:
# restore meta generator
if i == start_date:
# print([var.name for var in tf.global_variables()]) # check graph variables
sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
else:
restorer = tf.train.Saver()
restorer.restore(sess, train_config['restored_ckpt'])
saver = tf.train.Saver()
# test and then train meta generator
test_and_train_meta()
if i >= train_config['test_start_date']:
average_auc = sum(test_aucs) / len(test_aucs)
average_logloss = sum(test_loglosses) / len(test_loglosses)
print('test aucs', test_aucs)
print('average auc', average_auc)
print('')
print('test loglosses', test_loglosses)
print('average logloss', average_logloss)
# write metrics to text file
with open(os.path.join(ckpts_dir, 'test_metrics.txt'), mode='w') as f:
f.write('test_aucs: ' + str(test_aucs) + '\n')
f.write('average_auc: ' + str(average_auc) + '\n')
f.write('test_loglosses: ' + str(test_loglosses) + '\n')
f.write('average_logloss: ' + str(average_logloss) + '\n')
|
17,125 | 964c63f969fa92b29c72de89185b8b47269d2283 | from django.shortcuts import render, get_object_or_404
from .models import Clientes, ClientesForm
from django.views.generic import ListView
from django.contrib.auth.decorators import login_required
from django.http.response import HttpResponse, HttpResponseRedirect
# Create your views here.
@login_required(login_url='/administracion')
def clientes(request):
template = "clientes.html"
clientes = Clientes.objects.all().order_by("-id")
value_boton = "Agregar"
if request.method=='POST':
cliente_form = ClientesForm(request.POST)
if cliente_form.is_valid():
new_cliente = cliente_form.save()
return HttpResponseRedirect('/administracion/clientes')
else:
cliente_form = ClientesForm()
return render(request, template, locals())
@login_required(login_url='/administracion')
def clientes_edit(request, pk_id):
template = "clientes.html"
cliente_instance = get_object_or_404(Clientes, pk = pk_id)
clientes = Clientes.objects.all()
value_boton = "Editar"
if request.method=='POST':
cliente_form = ClientesForm(request.POST, instance=cliente_instance)
if cliente_form.is_valid():
new_empleado = cliente_form.save()
return HttpResponseRedirect('/administracion/clientes')
else:
cliente_form = ClientesForm(instance=cliente_instance)
return render(request, template, locals())
@login_required(login_url='/administracion')
def clientes_delete(request, pk_id):
template = "clientes.html"
cliente_instance = get_object_or_404(Clientes, pk = pk_id)
cliente_instance.delete()
return HttpResponseRedirect('/administracion/clientes')
|
17,126 | fa0404dffbdce5f7e2ccba49e8ca37c8cfebf0db | import numpy as np
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from pprint import pprint
def pre_process_text(name, n=3):
"""
:param name:
:param n: determines the n-grams (bi-gram, tri-gram, etc). By default, it's tri-gram
:return: string with the n-grams
"""
name = re.sub(r'[,-./]|\sBD', r'', name)
name = re.sub(" +", "", name)
tokens = zip(*[name[i:] for i in range(n)])
return [''.join(token) for token in tokens]
def get_top_n_match(row, n_top=5):
"""
:param row:
:param n_top: number of results to be determined
:return: list of tuples with index of the match and the cosine similarity score
"""
row_count = row.getnnz()
if row_count == 0:
return None
elif row_count <= n_top:
result = zip(row.indices, row.data)
else:
arg_idx = np.argpartition(row.data, -n_top)[-n_top:]
result = zip(row.indices[arg_idx], row.data[arg_idx])
return sorted(result, key=(lambda x: -x[1]))
def match_company_name(input_name, vectorizer, comp_name_vectors, comp_name_df):
"""
:param input_name: input company name whose matches need to be found
:param vectorizer: TFIDF vectorizer which was initialized earlier
:param comp_name_vectors: the company names' vectors of the whole data set
:param comp_name_df: the company names dataframe
:return: a dataframe with top N matching names with match score
"""
input_name_vector = vectorizer.transform([input_name])
result_vector = input_name_vector.dot(comp_name_vectors.T)
matched_data = [get_top_n_match(row) for row in result_vector]
flat_matched_data = [tup for data_row in matched_data for tup in data_row]
lkp_idx, lkp_sim = zip(*flat_matched_data)
nr_matches = len(lkp_idx)
matched_names = np.empty([nr_matches], dtype=object)
sim = np.zeros(nr_matches)
for i in range(nr_matches):
matched_names[i] = comp_name_df['Company Name'][lkp_idx[i]]
sim[i] = lkp_sim[i]
return pd.DataFrame({"Matching company name": matched_names,
"Match Score (%)": sim*100})
if __name__ == '__main__':
company_names_df = pd.read_csv("./data/company_names.csv")
tfidf = TfidfVectorizer(min_df=5, analyzer=pre_process_text)
company_name_vectors = tfidf.fit_transform(company_names_df['Company Name'])
# Example
result_df = match_company_name("ADVISORY U S EQUITY MARKET", tfidf, company_name_vectors, company_names_df)
print(result_df) |
17,127 | cebbaee54c84f5e52c129ee7ccb5ff0a44ca1d87 | class Newtype(type): # ็ปงๆฟtype็ฑป
def __init__(self, a, b, c):
print("ๅ
็ฑป็ๆ้ ๅฝๆฐๆง่ก")
def __call__(self, *args, **kwargs):
print("็ฑป่ฟ่กๅๅงๅๆถ็()๏ผ้ฆๅ
่ฐ็จ็ๆฏๅ
็ฑป็__call__ๆนๆณ")
obj = object.__new__(self)
# print(type(obj))
self.__init__(obj, *args, **kwargs)
return obj
class Bar(metaclass=Newtype): # ๅฏไปฅ็่งฃไธบ๏ผclass ๅ
ณ้ฎๅญๆง่ก็ๆไฝๆฏ๏ผๅฎไพๅNewtypeๅ
็ฑป๏ผๅฎไพๅๅ็ๅฏน่ฑกๆฏBar่ฟไธช็ฑป๏ผๆง่ก็ๆฏๅ
็ฑปNewtype็__init__ๅฝๆฐ๏ผ
def __init__(self, name):
print("ๅฎไพ็ๅๅงๅๆนๆณ")
self.name = name
f1 = Bar("Lee")
print(f1.__dict__)
|
17,128 | 5421e2905031c45488df0f1b21129cb2173821a0 | from join import Join as join
from joindocuments import joindocuments
import pandas as pd
from oddratio import OddRatio as ratio
from topwords import topwords
from ngrams import ngrams
from truncatedsvd import SVDf
from PCAC import pcaf
import arff
import numpy as np
#joinoddratios
#it gets the data preprocessing
def domain(document, crossvalidationundersampling,ArffL,A=0, undersampler=0,sentiment=0 ):
test=pd.read_csv('documents\csv\drunk\drunkTEXT400'+'.csv' )
test.L=test.L.replace(['y','n'], ['True','False'])
df1=pd.read_csv(document+'.csv' )
df1.L=df1.L.replace(['y','n'], ['True','False'])
joinc=joindocuments(df1,df1)
top = topwords(df1,'Clean tweet',100)
main_domain = join(df1,'Clean tweet')
bigrams=ngrams(df1,'Clean tweet')
print 'bigrams'
print bigrams.bigrams
main_domain.joinall(bigrams.bigrams,2)
main_domain.joinall(top.top,1)
main_domain.df.to_csv('prueba.csv',index=False)
ratiov=ratio(main_domain.df,'L')
ratios=ratiov.getoddratios(top.top)
print 'ratios'
print ratios
ds=list(ratios.keys())
testobject = join(test,'Clean tweet')
oddradiojoin=join(df1,'Clean tweet')
oddradiojoin.joinall(ds,1)
testobject.joinall(ds,1)
oddradiojoin.joinall(bigrams.bigrams,2)
testobject.joinall(bigrams.bigrams,2)
test=testobject.df
cols=['Clean tweet']
if sentiment==1:
cols=['Clean tweet','sentiment_polarity', 'sentiment_subjectivity', 'absPolarity']
try:
for x in cols:
del oddradiojoin.df[x]
del test[x]
except:
pass
#training, test=joinc.gettrainingandtestp(oddradiojoin.df)
print 'matrix of elements to reduce'
print "saul,",oddradiojoin.df.shape
#########################################################
if undersampler==1:
print "saul,",oddradiojoin.df.shape
oddradiojoin.df=joinc.undersampling(oddradiojoin.df)
print oddradiojoin.df.shape
if A==1:
dftraining, dftest=pcaf(oddradiojoin.df,test)
oddradiojoin.df =dftraining.join(oddradiojoin.df["L"])
test=dftest.join(test["L"])
print oddradiojoin.df.shape
training=oddradiojoin.df
training=training.replace(['True','False'], [True,False])
test=test.replace(['True','False'], [True,False])
training=training.astype(np.float64)
test=test.astype(np.float64)
training['L']=training['L'].astype(bool)
test['L']=test['L'].astype(bool)
A=str(A)
sentiment=str(sentiment)
oddradiojoin.df.to_csv('crossvalidation.csv',index=False)
#undersampleddf1.to_csv(str(crossvalidationundersampling) +'\undersampling'+A+'.csv',index=False)
headers_names=list(training.columns.values)
headers_names.remove('L')
headers_names.append('L')
headers_names1=list(test.columns.values)
print headers_names,'heathers test',headers_names1
test = test[headers_names]
training = training[headers_names]
print 'training' +str(training.dtypes)
test.to_csv(str(crossvalidationundersampling) + r'\test1'+A+'.csv',index=False)
training.to_csv(str(crossvalidationundersampling) +r'\training1'+A+'.csv',index=False)
TRAINING=training.as_matrix(columns=None)
TEST=test.as_matrix(columns=None)
print 'training'
print training.dtypes
arff.dump(ArffL +r'\trainingwu'+A+str(undersampler)+sentiment+'.arff',TRAINING, relation="whatever", names=headers_names)
arff.dump(ArffL +r'\testwu'+A+str(undersampler)+sentiment+'.arff',TEST, relation="whatever", names=headers_names)
#domain('documents\csv\divorce\divorce',r'documents\csv\divorce',r'documents\Arff\divorce',1)
#domain('documents\csv\pregnancy\GOOD LABELING 170620151',r'documents\csv\pregnancy',r'documents\Arff\pregnancy',1)#1 indicates the value of A
A=[[0,0,0],[0,1,0],[1,0,0],[1,1,0],[0,0,1],[0,1,1],[1,0,1],[1,1,1]] #,(1,0),(1,1),(1,2),(0.5,0),(0.5,1),(0.5,2)
for s ,i,sentiment in A:
print s, i, sentiment
domain('documents\csv\drunk\drunk labeling 1300',r'documents\csv\drunk',r'documents\Arff\drunk',s,i,sentiment)
#domain('documents\csv\pregnancy\GOOD LABELING 170620151',r'documents\csv\pregnancy',r'documents\Arff\pregnancy',s,i, sentiment)#1 indicates the value of A
|
17,129 | 19f7bf69c346cc5a29aa6e38b5441b129bb35148 | import numpy as np
from .data_analysis import moving_average
def test_moving_average():
avg = moving_average(np.ones(10),4)
assert np.any(np.isnan(avg))
assert np.allclose(avg[3],1.0)
return
|
17,130 | 2aa353b9c67c5cb42abb977f46432f09efddf63a | import requests
import json
import http.client as httplib, urllib.parse
import logging
import yaml
import datetime
config = yaml.safe_load(open("config.yaml"))
curr_date = datetime.datetime.now()
date_Array = []
for i in range(7):
curr_date += datetime.timedelta(days=1)
date_Array.append(curr_date.strftime("%Y-%m-%d"))
url = 'https://apis.mdnius.com/booking/api/v1/public/getSlots'
headers = {'authority': 'apis.mdnius.com'}
logging.basicConfig(filename='message_push.log', filemode='a', format='%(asctime)s - %(levelname)s - %(message)s', level=logging.INFO)
for date_obj in date_Array:
data = { 'date': date_obj,'clinicId': '609be7f8f132c1001fb007c4'}
r = requests.post(url, json=data, headers=headers)
response_json = json.loads(r.text)
message_push = ""
for i in response_json["data"]:
i["remainingSlots"] = i["totalSlots"]-i["filledSlots"]
if i["remainingSlots"] > 0:
message_push = message_push+ " TimeSlotStart: "+ i["timeSlotStart"] + " RemainingSlots: " + str(i["remainingSlots"]) + "\n"
if bool(message_push):
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.parse.urlencode({
"token": config["API_TOKEN"],
"user": config["USER_KEY"],
"message": "Date: "+ date_obj + " \n\n" + (message_push),
}), { "Content-type": "application/x-www-form-urlencoded" })
a=conn.getresponse()
print(a.reason)
else:
logging.info("No Slots Available for " + date_obj)
#print(conn.getresponse())
|
17,131 | 4f4053488ffc363fca093596bebb7ffbb0e7735d | from hashlib import md5
from textract import process
def integers_only(text) -> str:
"""
Removes all symbols except integers
ex: +998(91) 333 33 33 -> 998913333333
"""
return ''.join(x for x in text if x.isdigit())
def get_boolean(value):
return value not in ('false', None, 0, 'False', '0')
def get_text_from_file(filename):
try:
return process(filename).decode('utf-8')
except:
return ''
def md5_text(text):
return md5(text.encode('utf-8')).hexdigest() |
17,132 | cee15b4b021e76c732696e71c14f8f20115f8c44 | from config import Config
import numpy as np
from PaperTest.help import Help
class FrequencyMeasures(object):
"""Computes idf, tfidf, suidf"""
def __init__(self, meetingHisto, meetingWords, wordsVector, Ns):
#config class
cfg = Config()
#class variables
self.meetingHisto = meetingHisto
self.meetingWords = meetingWords
self.wordsVector = wordsVector
self.Ns = Ns
self.high = cfg.high
self.small = cfg.small
self.tfidf = []
self.tfidfSpeak = []
self.idf = []
self.suidf = []
def GetAll(self):
self.tfidf = self.TfIdfGlobal(True)
self.idf = self.TfIdfGlobal(False)
self.tfidfSpeak = self.TfIdfSpeakersMeeting()
self.suidf = self.Suidf()
def TfIdfGlobal(self, tfTrue=True):
tfidf = np.zeros(len(self.meetingWords))
lenDataset = len(self.wordsVector)
for x in range(len(self.meetingWords)):
if tfTrue:
tf = self.meetingHisto[0][x]/np.sum(self.meetingHisto[0][:])
else:
tf = 1
count = 0
for words in self.wordsVector:
if self.meetingWords[x] in words:
count += 1
try:
idf = np.log(len_dataset/count)
except:
idf = 0
tfidf[x] = tf * (1 + idf)
return tfidf
def TfIdfSpeakersMeeting(self): #all speakers
tfidf = np.zeros((self.Ns, len(self.meetingWords)))
for j in range(self.Ns):
den = np.sum(self.meetingHisto[j+1][:])
for x in range(len(self.meetingHisto[0])):
tf = self.meetingHisto[j+1][x] / den #frequency from jth speaker
count = 0
for s in range(self.Ns):
if self.meetingHisto[s+1][x]:
count += 1
try:
idf = np.log(self.Ns/count)
except:
idf = self.small
tf = 1
tfidf[j][x] = tf * (1 + idf)
return tfidf
#can receive idfVec
def Suidf(self): #computes suidf for all the words in the meeting (not in the dataset, not sure what's better)
surp_w_s = np.zeros((self.Ns, len(self.meetingWords))) #matrix [num_speakers X num_words]
surp_w = np.zeros(len(self.meetingWords))
suidf_v = surp_w
for c in range(0, len(self.meetingWords)): #ext loop, it over words to match
w_ref = self.meetingWords[c]
for j in range(0, self.Ns):
num = 0
den = 0
for k in range(0, self.Ns):
if j != k:
num += self.meetingHisto[j+1][c] #number of times speaker k+1 utters w_ref
den += np.sum(self.meetingHisto[j+1][:]) #number of words uttered by given speaker, pass given sp and list of speak
surp_w_s[j][c] = -np.log(Help.SafeDiv(num, den))
if surp_w_s[j][c] == np.inf:
surp_w_s[j][c] = self.high * self.Ns
for f in range(0, len(self.meetingWords)): #f idx of each single word
word = self.meetingWords[f]
summ = 0
for c in range(0, self.Ns):
summ += surp_w_s[c][f]
surp_w[f] = Help.SafeDiv(summ, self.Ns)
# suidf_v[f] = surp_w[f] * howmany(word, f, num_speak) * np.sqrt(idf(word)) / num_speak #howmany number of speaks uttered word
suidf_v[f] = surp_w[f] * self.HowMany(f) * np.sqrt(self.idf[self.meetingWords.index(word)]) / self.Ns #howmany number of speaks uttered word
return suidf_v
def HowMany(self, f):
count = 0
for x in range(1, len(self.meetingHisto)):
if self.meetingHisto[x][f]:
count += 1
return count
|
17,133 | 5743cb2b072a326296b355a13cc67278b55a87ea | # coding: utf-8
"""
Rustici Engine API
Rustici Engine API # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class LaunchLinkRequestSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'expiry': 'int',
'redirect_on_exit_url': 'str',
'tracking': 'bool',
'start_sco': 'str',
'additional_values': 'list[ItemValuePairSchema]'
}
attribute_map = {
'expiry': 'expiry',
'redirect_on_exit_url': 'redirectOnExitUrl',
'tracking': 'tracking',
'start_sco': 'startSco',
'additional_values': 'additionalValues'
}
def __init__(self, expiry=120, redirect_on_exit_url=None, tracking=True, start_sco=None, additional_values=None): # noqa: E501
"""LaunchLinkRequestSchema - a model defined in Swagger""" # noqa: E501
self._expiry = None
self._redirect_on_exit_url = None
self._tracking = None
self._start_sco = None
self._additional_values = None
self.discriminator = None
if expiry is not None:
self.expiry = expiry
if redirect_on_exit_url is not None:
self.redirect_on_exit_url = redirect_on_exit_url
if tracking is not None:
self.tracking = tracking
if start_sco is not None:
self.start_sco = start_sco
if additional_values is not None:
self.additional_values = additional_values
@property
def expiry(self):
"""Gets the expiry of this LaunchLinkRequestSchema. # noqa: E501
The number of seconds from now that this link will expire in. This parameter should only be specified if the setting 'ApiUseSignedLaunchLinks' is configured with a value of 'true'. # noqa: E501
:return: The expiry of this LaunchLinkRequestSchema. # noqa: E501
:rtype: int
"""
return self._expiry
@expiry.setter
def expiry(self, expiry):
"""Sets the expiry of this LaunchLinkRequestSchema.
The number of seconds from now that this link will expire in. This parameter should only be specified if the setting 'ApiUseSignedLaunchLinks' is configured with a value of 'true'. # noqa: E501
:param expiry: The expiry of this LaunchLinkRequestSchema. # noqa: E501
:type: int
"""
self._expiry = expiry
@property
def redirect_on_exit_url(self):
"""Gets the redirect_on_exit_url of this LaunchLinkRequestSchema. # noqa: E501
The URL the application should redirect to when the learner exits a course. If not specified, configured value will be used. # noqa: E501
:return: The redirect_on_exit_url of this LaunchLinkRequestSchema. # noqa: E501
:rtype: str
"""
return self._redirect_on_exit_url
@redirect_on_exit_url.setter
def redirect_on_exit_url(self, redirect_on_exit_url):
"""Sets the redirect_on_exit_url of this LaunchLinkRequestSchema.
The URL the application should redirect to when the learner exits a course. If not specified, configured value will be used. # noqa: E501
:param redirect_on_exit_url: The redirect_on_exit_url of this LaunchLinkRequestSchema. # noqa: E501
:type: str
"""
self._redirect_on_exit_url = redirect_on_exit_url
@property
def tracking(self):
"""Gets the tracking of this LaunchLinkRequestSchema. # noqa: E501
Should this launch be tracked? If false, Engine will avoid tracking to the extent possible for the standard being used. # noqa: E501
:return: The tracking of this LaunchLinkRequestSchema. # noqa: E501
:rtype: bool
"""
return self._tracking
@tracking.setter
def tracking(self, tracking):
"""Sets the tracking of this LaunchLinkRequestSchema.
Should this launch be tracked? If false, Engine will avoid tracking to the extent possible for the standard being used. # noqa: E501
:param tracking: The tracking of this LaunchLinkRequestSchema. # noqa: E501
:type: bool
"""
self._tracking = tracking
@property
def start_sco(self):
"""Gets the start_sco of this LaunchLinkRequestSchema. # noqa: E501
For SCORM, SCO identifier to override launch, overriding the normal sequencing. # noqa: E501
:return: The start_sco of this LaunchLinkRequestSchema. # noqa: E501
:rtype: str
"""
return self._start_sco
@start_sco.setter
def start_sco(self, start_sco):
"""Sets the start_sco of this LaunchLinkRequestSchema.
For SCORM, SCO identifier to override launch, overriding the normal sequencing. # noqa: E501
:param start_sco: The start_sco of this LaunchLinkRequestSchema. # noqa: E501
:type: str
"""
self._start_sco = start_sco
@property
def additional_values(self):
"""Gets the additional_values of this LaunchLinkRequestSchema. # noqa: E501
:return: The additional_values of this LaunchLinkRequestSchema. # noqa: E501
:rtype: list[ItemValuePairSchema]
"""
return self._additional_values
@additional_values.setter
def additional_values(self, additional_values):
"""Sets the additional_values of this LaunchLinkRequestSchema.
:param additional_values: The additional_values of this LaunchLinkRequestSchema. # noqa: E501
:type: list[ItemValuePairSchema]
"""
self._additional_values = additional_values
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(LaunchLinkRequestSchema, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, LaunchLinkRequestSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
17,134 | ead4132aeb1ed4e2707747f2f2d0b2d9ff509ecd | import pygame
import random
# TILES
DIRT = 0
GRASS = 1
WATER = 2
WALL = 3
TREE_0 = 4
TREE_1 = 5
TREE_2 = 6
class Tree:
def __init__(self):
self.SPRITE = pygame.transform.scale(pygame.image.load('./textures/trees/tree.png'), (125, 125))
self.X_POS = random.randint(50, 300)
self.Y_POS = random.randint(50, 450)
class TEMPLE:
def __init__(self):
self.SPRITE = pygame.transform.scale(pygame.image.load('./sprites/temple.png'), (250, 250))
self.X_POS = 6
self.Y_POS = 1
num_trees = 15
trees = [Tree() for x in range (num_trees)]
TEXTURES = {
DIRT: pygame.image.load('./textures/dirt.png'),
GRASS: pygame.image.load('./textures/grass.png'),
WATER: pygame.image.load('./textures/water.png'),
WALL: pygame.image.load('./textures/wall.png'),
TREE_0: pygame.image.load('./textures/trees/tree.png'),
TREE_1: pygame.image.load('./textures/trees/tree_1.png'),
}
GRID = [
[GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, WATER, WATER, WATER],
[GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, GRASS, GRASS, WATER, WATER, WATER, WATER, WATER],
[GRASS, GRASS, GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, GRASS, GRASS, GRASS, GRASS, GRASS, WATER, WATER, WATER, GRASS, GRASS],
[GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, GRASS, GRASS, GRASS, WATER, WATER, WATER, WATER, GRASS, GRASS, GRASS],
[GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, GRASS, GRASS, GRASS, WATER, WATER, GRASS, GRASS, GRASS, GRASS, GRASS],
[DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, WATER, WATER, WATER, WATER, GRASS, GRASS, GRASS],
[DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, WATER, WATER, WATER, WATER, WATER, WATER, WATER, GRASS, GRASS],
[GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, WATER, WATER, WATER, WATER, WATER, WATER, GRASS, GRASS, GRASS],
[GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, WATER, WATER, WATER, WATER, WATER, WATER, WATER, GRASS, GRASS],
[GRASS, GRASS, GRASS, GRASS, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, DIRT, WATER, WATER, WATER, WATER, WATER, WATER, WATER, WATER, GRASS]
]
TILESIZE = 50
MAPWIDTH = 20
MAPHEIGHT = 10
pygame.init()
pygame.display.set_caption('Wolf Slayer')
DISPLAYSURFACE = pygame.display.set_mode((MAPWIDTH*TILESIZE, MAPHEIGHT*TILESIZE))
WHITE = (200, 200, 200)
BLACK = (0, 0, 0)
BLUE = (30, 144, 255)
GREEN = (60, 179, 113)
RED = (178, 0, 0)
|
17,135 | d8fc6ba514bf61adc45fe99f0f337681653d729f | """Support for retrieving status info from Google Wifi/OnHub routers."""
from __future__ import annotations
from dataclasses import dataclass
from datetime import timedelta
import logging
import requests
import voluptuous as vol
from homeassistant.components.sensor import (
PLATFORM_SCHEMA,
SensorEntity,
SensorEntityDescription,
)
from homeassistant.const import (
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
UnitOfTime,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
from homeassistant.util import Throttle, dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_VERSION = "current_version"
ATTR_LAST_RESTART = "last_restart"
ATTR_LOCAL_IP = "local_ip"
ATTR_NEW_VERSION = "new_version"
ATTR_STATUS = "status"
ATTR_UPTIME = "uptime"
DEFAULT_HOST = "testwifi.here"
DEFAULT_NAME = "google_wifi"
ENDPOINT = "/api/v1/status"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
@dataclass
class GoogleWifiRequiredKeysMixin:
"""Mixin for required keys."""
primary_key: str
sensor_key: str
@dataclass
class GoogleWifiSensorEntityDescription(
SensorEntityDescription, GoogleWifiRequiredKeysMixin
):
"""Describes GoogleWifi sensor entity."""
SENSOR_TYPES: tuple[GoogleWifiSensorEntityDescription, ...] = (
GoogleWifiSensorEntityDescription(
key=ATTR_CURRENT_VERSION,
primary_key="software",
sensor_key="softwareVersion",
icon="mdi:checkbox-marked-circle-outline",
),
GoogleWifiSensorEntityDescription(
key=ATTR_NEW_VERSION,
primary_key="software",
sensor_key="updateNewVersion",
icon="mdi:update",
),
GoogleWifiSensorEntityDescription(
key=ATTR_UPTIME,
primary_key="system",
sensor_key="uptime",
native_unit_of_measurement=UnitOfTime.DAYS,
icon="mdi:timelapse",
),
GoogleWifiSensorEntityDescription(
key=ATTR_LAST_RESTART,
primary_key="system",
sensor_key="uptime",
icon="mdi:restart",
),
GoogleWifiSensorEntityDescription(
key=ATTR_LOCAL_IP,
primary_key="wan",
sensor_key="localIpAddress",
icon="mdi:access-point-network",
),
GoogleWifiSensorEntityDescription(
key=ATTR_STATUS,
primary_key="wan",
sensor_key="online",
icon="mdi:google",
),
)
SENSOR_KEYS: list[str] = [desc.key for desc in SENSOR_TYPES]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=SENSOR_KEYS): vol.All(
cv.ensure_list, [vol.In(SENSOR_KEYS)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(
hass: HomeAssistant,
config: ConfigType,
add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Google Wifi sensor."""
name = config[CONF_NAME]
host = config[CONF_HOST]
monitored_conditions = config[CONF_MONITORED_CONDITIONS]
api = GoogleWifiAPI(host, monitored_conditions)
entities = [
GoogleWifiSensor(api, name, description)
for description in SENSOR_TYPES
if description.key in monitored_conditions
]
add_entities(entities, True)
class GoogleWifiSensor(SensorEntity):
"""Representation of a Google Wifi sensor."""
entity_description: GoogleWifiSensorEntityDescription
def __init__(
self,
api: GoogleWifiAPI,
name: str,
description: GoogleWifiSensorEntityDescription,
) -> None:
"""Initialize a Google Wifi sensor."""
self.entity_description = description
self._api = api
self._attr_name = f"{name}_{description.key}"
@property
def available(self) -> bool:
"""Return availability of Google Wifi API."""
return self._api.available
def update(self) -> None:
"""Get the latest data from the Google Wifi API."""
self._api.update()
if self.available:
self._attr_native_value = self._api.data[self.entity_description.key]
else:
self._attr_native_value = None
class GoogleWifiAPI:
"""Get the latest data and update the states."""
def __init__(self, host, conditions):
"""Initialize the data object."""
uri = "http://"
resource = f"{uri}{host}{ENDPOINT}"
self._request = requests.Request("GET", resource).prepare()
self.raw_data = None
self.conditions = conditions
self.data = {
ATTR_CURRENT_VERSION: None,
ATTR_NEW_VERSION: None,
ATTR_UPTIME: None,
ATTR_LAST_RESTART: None,
ATTR_LOCAL_IP: None,
ATTR_STATUS: None,
}
self.available = True
self.update()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the router."""
try:
with requests.Session() as sess:
response = sess.send(self._request, timeout=10)
self.raw_data = response.json()
self.data_format()
self.available = True
except (ValueError, requests.exceptions.ConnectionError):
_LOGGER.warning("Unable to fetch data from Google Wifi")
self.available = False
self.raw_data = None
def data_format(self):
"""Format raw data into easily accessible dict."""
for description in SENSOR_TYPES:
if description.key not in self.conditions:
continue
attr_key = description.key
try:
if description.primary_key in self.raw_data:
sensor_value = self.raw_data[description.primary_key][
description.sensor_key
]
# Format sensor for better readability
if attr_key == ATTR_NEW_VERSION and sensor_value == "0.0.0.0":
sensor_value = "Latest"
elif attr_key == ATTR_UPTIME:
sensor_value = round(sensor_value / (3600 * 24), 2)
elif attr_key == ATTR_LAST_RESTART:
last_restart = dt_util.now() - timedelta(seconds=sensor_value)
sensor_value = last_restart.strftime("%Y-%m-%d %H:%M:%S")
elif attr_key == ATTR_STATUS:
if sensor_value:
sensor_value = "Online"
else:
sensor_value = "Offline"
elif (
attr_key == ATTR_LOCAL_IP and not self.raw_data["wan"]["online"]
):
sensor_value = None
self.data[attr_key] = sensor_value
except KeyError:
_LOGGER.error(
(
"Router does not support %s field. "
"Please remove %s from monitored_conditions"
),
description.sensor_key,
attr_key,
)
self.data[attr_key] = None
|
17,136 | 112e48f03692a6c13e5c373ce07c7fe6c5407352 | class Node:
def __init__(self,initdata):
self.data =initdata
self.next=None
def getData(self):
return self.data
def getNext(self):
return self.next
def setData(self,newdata):
self.data=newdata
def setNext(self,newnext):
self.next=newnext
|
17,137 | 898b881b7108bc0233aa8696b82f390e148e5ed2 | # -*- coding: utf-8 -*-
"""
Written by Daniel M. Aukes
Email: danaukes<at>gmail.com
Please see LICENSE for full license.
"""
import pynamics
from pynamics.tree_node import TreeNode
from pynamics.vector import Vector
from pynamics.rotation import Rotation, RotationalVelocity
from pynamics.name_generator import NameGenerator
import sympy
class Frame(NameGenerator):
def __init__(self,name = None):
super(Frame,self).__init__()
self.connections_R = {}
self.precomputed_R = {}
self.connections_w = {}
self.precomputed_w = {}
self.reps = {}
self.R_tree = TreeNode(self)
self.w_tree = TreeNode(self)
name = name or self.generate_name()
self.name = name
self.x = Vector()
self.y = Vector()
self.z = Vector()
self.x_sym = sympy.Symbol(name+'.x')
self.y_sym = sympy.Symbol(name+'.y')
self.z_sym = sympy.Symbol(name+'.z')
self.syms = sympy.Matrix([self.x_sym,self.y_sym,self.z_sym])
self.x.add_component(self,[1,0,0])
self.y.add_component(self,[0,1,0])
self.z.add_component(self,[0,0,1])
r = Rotation(self,self,sympy.Matrix.eye(3))
w = RotationalVelocity(self,self,sympy.Number(0)*self.x)
self.add_rotation(r)
self.add_w(w)
pynamics.addself(self,name)
def add_rotation(self,rotation):
self.connections_R[rotation.other(self)] = rotation
def add_precomputed_rotation(self,rotation):
self.precomputed_R[rotation.other(self)] = rotation
def add_w(self,w):
self.connections_w[w.other(self)] = w
def add_precomputed_w(self,w):
self.precomputed_w[w.other(self)] = w
@property
def principal_axes(self):
return [self.x,self.y,self.z]
def __str__(self):
return self.name
def __repr__(self):
return str(self)
def calc_R(self,other):
if other in self.connections_R:
return self.connections_R[other]
elif other in self.precomputed_R:
return self.precomputed_R[other]
else:
path = self.R_tree.path_to(other.R_tree)
path = [item.myclass for item in path]
from_frames = path[:-1]
to_frames = path[1:]
Rs = [from_frame.connections_R[to_frame].to_other(from_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
# w_s = [from_frame.connections[to_frame].w__from(from_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
R_final = Rs.pop(0)
# w_final = w_s.pop(0)
for R,to_frame in zip(Rs,to_frames[1:]):
R_final = R*R_final
# w_final += w_
rotation = Rotation(self,to_frame,R_final)
# rotation.set_w(w_final)
self.add_precomputed_rotation(rotation)
to_frame.add_precomputed_rotation(rotation)
# rotation = Rotation(self,to_frame,R_final,w_final)
return rotation
def calc_w(self,other):
if other in self.connections_w:
return self.connections_w[other]
elif other in self.precomputed_w:
return self.precomputed_w[other]
else:
path = self.w_tree.path_to(other.w_tree)
path = [item.myclass for item in path]
from_frames = path[:-1]
to_frames = path[1:]
# Rs = [from_frame.connections[to_frame].to_other(from_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
w_s = [from_frame.connections_w[to_frame].w__from(from_frame) for from_frame,to_frame in zip(from_frames,to_frames)]
# R_final = Rs.pop(0)
w_final = w_s.pop(0)
for w_,to_frame in zip(w_s,to_frames[1:]):
# R_final = R*R_final
w_final += w_
rotational_velocity = RotationalVelocity(self,to_frame,w_final)
# rotation.set_w(w_final)
self.add_precomputed_w(rotational_velocity)
to_frame.add_precomputed_w(rotational_velocity)
# rotation = Rotation(self,to_frame,R_final,w_final)
return rotational_velocity
def getR(self,other):
return self.calc_R(other).to_other(self)
def getw_(self,other):
return self.calc_w(other).w__from(self)
def set_w(self,other,w):
rotational_velocity = RotationalVelocity(self, other, w)
self.add_w(rotational_velocity)
other.add_w(rotational_velocity)
self.w_tree.add_branch(other.w_tree)
def rotate_fixed_axis(self,fromframe,axis,q,system = None):
system = system or pynamics.get_system()
rotation = Rotation.build_fixed_axis(fromframe,self,axis,q,system)
rotational_velocity = RotationalVelocity.build_fixed_axis(fromframe,self,axis,q,system)
self.add_rotation(rotation)
self.add_w(rotational_velocity)
fromframe.add_rotation(rotation)
fromframe.add_w(rotational_velocity)
fromframe.R_tree.add_branch(self.R_tree)
fromframe.w_tree.add_branch(self.w_tree)
def rotate_fixed_axis_directed(self,fromframe,axis,q,system=None):
self.rotate_fixed_axis(fromframe,axis,q,system)
def efficient_rep(self,other,functionname):
key = (other,functionname)
if key in self.reps:
return self.reps[key]
else:
path = self.R_tree.path_to(other.R_tree)
dot = {}
for mysym,myvec in zip(self.syms,[self.x,self.y,self.z]):
for othersym,othervec in zip(other.syms,[other.x,other.y,other.z]):
min_dot_len = 0
for frame in path:
frame = frame.myclass
v1 = myvec.express(frame).components[frame]
v2 = othervec.express(frame).components[frame]
function = getattr(v1,functionname)
dot_rep = function(v2)
dot_len = len(str(dot_rep))
if min_dot_len==0 or dot_len<min_dot_len:
min_dot_len=dot_len
min_dot_frame = frame
elif dot_len==min_dot_len:
if min_dot_frame in frame.decendents:
min_dot_frame = frame
dot[frozenset((mysym,othersym))] = min_dot_frame
self.reps[key] = dot
return dot
|
17,138 | 426b19ebec25354ffab4e788161ed3f1d1db3a04 | import os
import pygame
pygame.init() #์ด๊ธฐํ (๋ฐ๋์ ํ์)
#ํ๋ฉด ํฌ๊ธฐ ์ค์
screen_width = 640 #๊ฐ๋กํฌ๊ธฐ
screen_height = 480 #์ธ๋ก ํฌ๊ธฐ
screen = pygame.display.set_mode((screen_width, screen_height))
# ํ๋ฉด ํ์ดํ ์ค์
pygame.display.set_caption("Pang")
#FPS
clock = pygame.time.Clock()
###########################################
# 1. ์ฌ์ฉ์ ๊ฒ์ ์ด๊ธฐํ (๋ฐฐ๊ฒฝํ๋ฉด, ๊ฒ์์ด๋ฏธ์ง, ์ขํ, ์๋, ํฐํธ ๋ฑ)
current_path = os.path.dirname(__file__) #ํ์ฌ ํ์ผ์ ์์น ๋ฐํ
image_path = os.path.join(current_path,"image") #images ํด๋ ์์น ๋ฐํ
# ๋ฐฐ๊ฒฝ ๋ง๋ค๊ธฐ
background = pygame.image.load(os.path.join(image_path,"background.png"))
# ์คํ
์ด์ง ๋ง๋ค๊ธฐ
stage = pygame.image.load(os.path.join(image_path,"stage.png"))
stage_size = stage.get_rect().size
stage_height = stage_size[1]
#์คํ
์ด์ง ๋์ด ์์ ์บ๋ฆญํฐ๋ฅผ ๋๊ธฐ ์ํด ์ฌ์ฉ
character = pygame.image.load(os.path.join(image_path,"character.png"))
character_size = character.get_rect().size #์ด๋ฏธ์ง์ ํฌ๊ธฐ๋ฅผ ๊ตฌํด์ด
character_width = character_size[0]
character_height = character_size[1]
character_x_pos = (screen_width /2) - (character_width /2) #ํ๋ฉด ๊ฐ๋ก์ ์ ๋ฐ ํฌ๊ธฐ์ ํด๋นํ๋ ๊ณณ์ ์์น (๊ฐ๋ก)
character_y_pos = screen_height - character_height -stage_height #ํ๋ฉด ์ธ๋ก ํฌ๊ธฐ ๊ฐ์ฅ ์๋์ ํด๋นํ๋ ๊ณณ์ ์์น (์ธ๋ก)
# ์ด๋ํ ์ขํ
to_x = 0
to_y = 0
# ์ด๋ํ ์๋
# ์ด๋ฒคํธ ๋ฃจํ
running = True #๊ฒ์์ด ์งํ์ค์ธ๊ฐ?
while running:
dt = clock.tick(30) #๊ฒ์ํ๋ฉด์ ์ด๋น ํ๋ ์ ์๋ฅผ ์ค์
for event in pygame.event.get(): #์ด๋ค ์ด๋ฒคํธ๊ฐ ๋ฐ์ํ์๋๊ฐ?
if event.type == pygame.QUIT: #์ฐฝ์ด ๋ซํ๋ ์ด๋ฒคํธ๊ฐ ๋ฐ์ํ์๋๊ฐ?
running = False #๊ฒ์์ด ์งํ์ค์ด ์๋
screen.blit(background, (0,0)) #๋ฐฐ๊ฒฝ ๊ทธ๋ฆฌ๊ธฐ
screen.blit(stage,(0, screen_height - stage_height))
screen.blit(character, (character_x_pos, character_y_pos))
pygame.display.update() #๊ฒ์ํ๋ฉด์ ๋ค์๊ทธ๋ฆฌ๊ธฐ
# pyhame ์ข
๋ฃ
pygame.quit() |
17,139 | 566b4dfd5d903630669410eebe0ee342a0c726ed | #!/usr/bin/python
"""
"""
# IMPORT
from Bio import SeqIO
from collections import OrderedDict
import sys
from os import listdir
from os.path import isfile
# FUNCTIONS
def check_duplication(file):
with open(file,"r") as f:
records = list(SeqIO.parse(f,"fasta"))
pool = OrderedDict()
for record in records:
if record.seq not in pool:
pool[record.seq] = [record.id]
else:
pool[record.seq].append(record.id)
with open(file+'.nonredundant.fasta',"w") as f:
for i in pool.keys():
ids = pool[i]
ids_string = "\n=".join(ids)
f.write(">"+ids_string)
f.write("\n")
f.write(str(i))
f.write('\n')
# MAIN
if __name__ == '__main__':
input = sys.argv[1]
files = [file for file in listdir(input) if isfile(file)]
for file in files:
check_duplication(file) |
17,140 | a1b26d594efc584e99f80982af50f7e7ad8ddb01 | # test inbound rules
# 2 participants - receiver has multiple router connections
mode multi-switch
participants 2
peers 1 2
#participant ID ASN PORT MAC IP PORT MAC IP
participant 1 100 PORT MAC 172.0.0.1/16
participant 2 200 PORT MAC 172.0.0.11/16 PORT MAC 172.0.0.12/16 PORT MAC 172.0.0.13/16 PORT MAC 172.0.0.14/16 PORT MAC 172.0.0.15/16 PORT MAC 172.0.0.16/16 PORT MAC 172.0.0.17/16 PORT MAC 172.0.0.18/16 PORT MAC 172.0.0.19/16
host AS ROUTER _ IP # host names of form a1_100 a1_110
announce 1 100.0.0.0/24
announce 2 140.0.0.0/24
flow b1 << 80
flow b2 << 81
flow b3 << 82
flow b4 << 83
flow b5 << 84
flow b6 << 85
flow b7 << 86
flow b8 << 87
flow b9 << 88
listener AUTOGEN 77 80 81 82 83 84 85 86 87 88
test init {
listener
}
test regress {
verify a1_100 b1_140 77
verify a1_100 b1_140 80
verify a1_100 b2_140 81
verify a1_100 b3_140 82
verify a1_100 b4_140 83
verify a1_100 b5_140 84
verify a1_100 b6_140 85
verify a1_100 b7_140 86
verify a1_100 b8_140 87
verify a1_100 b9_140 88
}
test info {
local ovs-ofctl dump-flows s1
local ovs-ofctl dump-flows s2
local ovs-ofctl dump-flows s3
local ovs-ofctl dump-flows s4
} |
17,141 | 2fafd80638c983bce2bf44e62582b3b5ad868874 | #!/usr/bin/env python
import os
import sys
import string
from datetime import datetime
source_exts = ('.cpp', '.c', '.cc')
header_exts = ('.hpp', '.h')
sln_exts = ('.sln')
proj_exts = ('.vcproj')
CREATE_PRI_FILES = False
ignore_dirs = ('.svn', 'GeneratedFiles', 'generatedfiles',
'_UpgradeReport_Files', 'Debug', 'Release',
'Logs', 'debug', 'release')
pro_template = string.Template('''# $basename.pro
# Auto-generated by generate-pro.py
# Do not edit this file.
# Put additional options in the corresponding $basename.pri
PROJECT = $project
$template
CONFIG *= debug_and_release
CONFIG *= build_all
CONFIG *= warn_on
CONFIG *= console
PROJECT_PATH = $proj_path
PROJECT_LIBRARY_PATH = $proj_path/Libraries
PAVE_COMMON_PATH = $pave_common
PAVE_COMMON_LIBRARY_PATH = $pave_common/Libraries
$body
# optional pri file to specify additional build options and dependencies
include( $basename.pri )
# output dir for build outputs
CONFIG(debug, debug|release) {
DESTDIR = $$$${PROJECT_PATH}/debug
TARGET = $target_debug
}
CONFIG(release, debug|release) {
DESTDIR = $$$${PROJECT_PATH}/release
TARGET = $target
}
QMAKE_LIBPATH *= $$$${DESTDIR}
include( $$$${PROJECT_LIBRARY_PATH}/Libraries.pri )
include( $$$${PAVE_COMMON_LIBRARY_PATH}/Libraries.pri )
include( $$$${PAVE_COMMON_PATH}/Common.pri )
''')
start_dir = os.path.abspath(sys.argv[1]) # subdirectory of either IGVC or P12
project_root = os.path.abspath('.') # current directory, either IGVC or P12
pave_common_root = os.path.normpath(os.path.join(project_root, '../PAVE_Common'))
project_root_basename = os.path.basename(project_root)
if project_root_basename != 'IGVC' and project_root_basename != 'P12':
print 'Current directory must be project root.'
exit()
# build directory structure of the form:
class Dir:
def __init__(self, root_name, dirs, source_files, header_files, proj_descendents, sln_descendents):
self.root_name = root_name
self.dirs = dirs
self.source_files = source_files
self.header_files = header_files
self.proj_descendents = proj_descendents
self.sln_descendents = sln_descendents
# check if file has one of the extensions in type_exts
def is_type(file, type_exts):
return os.path.splitext(file)[1] in type_exts
# returns a list of the elements of files that have extensions in type_exts
def get_files_of_type(files, type_exts):
return [file for file in files if is_type(file, type_exts)]
# return the argument, quoted if it has spaces
def quote_name(name):
if ' ' in name:
return '"' + name + '"'
else:
return name
# return the include string for .pri solution subdirectories, such as
# include( Vision/Vision.pri )
def subdir_sln_name(sln_dir):
file_name = os.path.basename(sln_dir) + '.pri'
return 'include( %s )' % quote_name(os.path.join(sln_dir, file_name))
# replaces backslashes with forward, to avoid qmake warnings
def qmake_path(s):
return s.replace('\\', '/')
# creates the text of the .pro file for the directory root (of type Dir)
# returns a tuple of (basename of generated .pro file, text of file)
def generate_pro_text(root):
source_files = root.source_files
header_files = root.header_files
source_list = []
header_list = []
subdir_list = []
subdir_pri_list = []
template = ''
lib_config = ''
proj_path = qmake_path(os.path.relpath(project_root, root.root_name))
pave_common = qmake_path(os.path.relpath(pave_common_root, root.root_name))
project_name = os.path.basename(os.path.abspath(root.root_name))
pro_pri_basename = project_name
d = dict(project = project_name,
proj_path = proj_path,
pave_common = pave_common,
basename = pro_pri_basename,
target = project_name,
target_debug = project_name,
template = '')
if len(root.proj_descendents) > 0: # this is a solution directory
d['template'] = 'TEMPLATE = subdirs\n'
subdir_list = ['SUBDIRS *= %s' % quote_name(proj_dir)
for proj_dir in root.proj_descendents]
subdir_pri_list = [subdir_sln_name(sln_dir)
for sln_dir in root.sln_descendents]
elif len(source_files) > 0: # this is a project directory
source_list = ['SOURCES *= ' + quote_name(file) for file in source_files]
header_list = ['HEADERS *= ' + quote_name(file) for file in header_files]
else:
# print 'Doing nothing in ' + root.root_name
return ('', '')
body_list = []
if len(source_list) > 0:
body_list.append("\n".join(source_list) + '\n')
if len(header_list) > 0:
body_list.append("\n".join(header_list) + '\n')
if len(subdir_list) > 0:
body_list.append("\n".join(subdir_list) + '\n')
if len(subdir_pri_list) > 0:
body_list.append("\n".join(subdir_pri_list) + '\n')
d['body'] = qmake_path("\n".join(body_list))
return (pro_pri_basename, pro_template.substitute(d))
# prunes out directories with names in ignore_dirs
def pruned_dirs(dirs):
return [dir for dir in dirs if os.path.basename(dir) not in ignore_dirs]
# returns a clean, readable path for directory child, a subdirectory of parent_dir
def child_item(parent_dir, child):
return os.path.normpath(os.path.join(parent_dir, child))
def map_files(start_dir):
items = os.listdir(start_dir)
files = [item for item in items if os.path.isfile(os.path.join(start_dir, item))]
dirs = [item for item in items if os.path.isdir(os.path.join(start_dir, item))]
dirs = pruned_dirs(dirs)
sub_fdirs = [map_files(child_item(start_dir, dir)) for dir in dirs]
source_files = get_files_of_type(files, source_exts)
header_files = get_files_of_type(files, header_exts)
proj_descendents = [child_item(os.path.basename(sub_fdir.root_name), proj_dir)
for sub_fdir in sub_fdirs for proj_dir in sub_fdir.proj_descendents]
sln_descendents = [child_item(os.path.basename(sub_fdir.root_name), sln_dir)
for sub_fdir in sub_fdirs for sln_dir in sub_fdir.sln_descendents]
for sub_fdir in sub_fdirs:
subdir_basename = os.path.basename(sub_fdir.root_name)
# if this is a solution subdir (has multiple project descendents), add it to the list
if len(sub_fdir.proj_descendents) > 0:
sln_descendents.append(subdir_basename)
# don't count header files
elif len(sub_fdir.source_files) > 0:
proj_descendents.append(subdir_basename)
return Dir(start_dir, sub_fdirs, source_files, header_files, proj_descendents, sln_descendents)
def generate_pro_files(root_dir, parent_basename = ''):
(basename, pro_text) = generate_pro_text(root_dir)
if len(basename) == 0: # if there shouldn't be a .pro file here
return; # don't process children
pro_name = child_item(root_dir.root_name, basename + '.pro')
pro_file = os.open(pro_name, os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
os.write(pro_file, pro_text)
os.close(pro_file)
# ---- Note: this is left out because all .pri's should now be checked
# in, so we want to see warnings if any are missing
if CREATE_PRI_FILES:
# make sure the .pri file exists to avoid qmake warnings
pri_name = child_item(root_dir.root_name, basename + '.pri')
try:
pri_file = os.open(pri_name, os.O_RDONLY)
os.close(pri_file)
except: # if the file doesn't exist, create it
pri_file = os.open(pri_name, os.O_WRONLY | os.O_CREAT)
if len(parent_basename) > 0:
pri_text = 'include( ../' + parent_basename + '.pri )\n'
os.write(pri_file, pri_text)
os.close(pri_file)
for subfdir in root_dir.dirs:
generate_pro_files(subfdir, basename)
# for debugging only
def print_dirs(root_dir, prefix):
pinc = ' '
print prefix + 'd ' + root_dir.root_name
for source in root_dir.source_files:
print prefix + pinc + 's ' + source
for header in root_dir.header_files:
print prefix + pinc + 'h ' + header
for proj_d in root_dir.proj_descendents:
print prefix + pinc + 'p ' + proj_d
for subfdir in root_dir.dirs:
print_dirs(subfdir, prefix + pinc)
start_fdir = map_files(start_dir)
generate_pro_files(start_fdir)
|
17,142 | 7a8af5883308e11ea847aae30732613c9261b4e8 |
import re
def remove_ansi_color_from_string(input_string):
ansi_escape = re.compile(r'(\x9B|\x1B\[)[[0-?]*[ -/]*[@-~]')
return ansi_escape.sub('', input_string)
def is_valid_charts_yaml(content):
"""
Check if 'content' contains mandatory keys
:param content: parsed YAML file as list of dictionary of key values
:return: True if dict contains mandatory values, else False
"""
# Iterate on each list cell
for chart_details in content:
# If one of the keys is missing or, is None
if not all(chart_details.get(x) is not None
and x in chart_details
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file', 'private_image']):
return False
# If one of the keys is not a string
if not all(type(chart_details.get(x)) is str
for x in ['chart_name', 'helm_repo_name', 'name_space', 'values_file']):
return False
# If one of the keys is not a boolean
if not all(type(chart_details.get(x)) is bool
for x in ['private_image']):
return False
if not all(type(chart_details.get(x)) is list
for x in ['extra_executes']):
return False
return True
main_palette = [
('body', 'black', 'light gray', 'standout'),
('reverse', 'light gray', 'black'),
('header', 'white', 'dark red', 'bold'),
('footer', 'white', 'dark green', 'bold'),
('important', 'dark blue', 'light gray', ('standout', 'underline')),
('editfc', 'white', 'dark blue', 'bold'),
('editbx', 'light gray', 'dark blue'),
('editcp', 'black', 'light gray', 'standout'),
('bright', 'dark gray', 'light gray', ('bold', 'standout')),
('buttn', 'black', 'light blue'),
('buttnf', 'white', 'dark blue', 'bold'),
('errors', 'black', 'dark red'),
]
|
17,143 | 9d0d4c1de06c77fb2e14a449f7fa805f31b2c991 | print('I love {0} and {1}'.format('bread','butter'))
# Output: I love bread and butter
print('I love {1} and {0}'.format('bread','butter'))
# Output: I love butter and bread
|
17,144 | 118b27bd67e40f0a1c5f487609c09335ddd30ddb | class Rule:
'''
provides interface to query the raw representation of rules returned by the parser
'''
def __init__(self, head, body):
'''
head: a symbol to represent the head
body: list of the body symbols in this format => [[('terminal', False), ('non-terminal', 'True')], [('terminal', False)]]
'''
self.head = head
self.body = body
self.terms = []
self.terms = self.get_terms()
def set_terms(self, terms):
self.terms = terms
def get_terms(self):
if self.terms:
return self.terms
for term in self.body:
symbols_list = []
for symbol in term:
symbols_list.append(symbol[0])
self.terms.append(symbols_list)
return self.terms
def __repr__(self):
out = self.head + '->['
for term in self.terms:
for symbol in term:
out += symbol
if self.terms[-1] != term:
out += ', '
return out + ']'
|
17,145 | 06abb18f8545e839333cae66bd2745b6c61ce592 | # ะ ะตะฐะปะธะทะพะฒะฐัั ัะพัะผะธัะพะฒะฐะฝะธะต ัะฟะธัะบะฐ, ะธัะฟะพะปัะทัั ััะฝะบัะธั range() ะธ ะฒะพะทะผะพะถะฝะพััะธ ะณะตะฝะตัะฐัะพัะฐ.
# ะ ัะฟะธัะพะบ ะดะพะปะถะฝั ะฒะพะนัะธ ัะตัะฝัะต ัะธัะปะฐ ะพั 100 ะดะพ 1000 (ะฒะบะปััะฐั ะณัะฐะฝะธัั).
# ะะตะพะฑั
ะพะดะธะผะพ ะฟะพะปััะธัั ัะตะทัะปััะฐั ะฒััะธัะปะตะฝะธั ะฟัะพะธะทะฒะตะดะตะฝะธั ะฒัะตั
ัะปะตะผะตะฝัะพะฒ ัะฟะธัะบะฐ.
from functools import reduce
def total_number(current_number, next_number):
return current_number * next_number
numbers = [i for i in range(100, 1001) if i % 2 == 0]
total = reduce(total_number, numbers)
print(f'\nะัะพะธะทะฒะตะดะตะฝะธะต ะฒัะตั
ัะตัะฝัั
ัะธัะตะป ะพั 100 ะดะพ 1000 - {total}.')
|
17,146 | 01186258c1d1232757540962ee67184e4fb1c7b7 | # Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START imports]
import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import mail
import webapp2
import utils
from settings import appSettings
from models import Owner, Birthday
class Summary(webapp2.RequestHandler):
def get(self):
now = datetime.date.today()
currentMonthDay = "%02d" % (now.month) + "%02d" % (now.day)
query = Birthday.query(projection=[Birthday.owner.identity], distinct=True)
allUsers = query.fetch();
birthdays = []
for user in allUsers:
q1 = Birthday.query(
user.owner.identity == Birthday.owner.identity
).order(
Birthday.monthday
)
q2 = q1.filter(
Birthday.monthday >= currentMonthDay
)
q3 = q1.filter(
Birthday.monthday < currentMonthDay
)
thisYearBDays = q2.fetch()
nextYearBDays = q3.fetch()
birthdays = thisYearBDays + nextYearBDays
body = "Up coming birthdays:...."
for birthday in birthdays:
toEmail = birthday.owner.email
body = body + birthday.firstName + birthday.lastName + "<br />"
body = body + birthday.date.strftime("%B %d") + "<hr />"
mail.send_mail(sender=appSettings["sender_address"],
to=toEmail,
subject="Your upcoming birthdays",
body=body)
self.response.write("You have run birthdays cron job")
|
17,147 | 722e133e64d23d1858d97d96d9b5cced83ee796e | """ This is a solution to an exercise from
Think Python, 2nd Edition
by Allen Downey
http://thinkpython2.com
Copyright 2015 Allen Downey
License: http://creativecommons.org/licenses/by/4.0/
Exercise 12-4:
What is the longest English word, that remains a valid English word, as you remove its
letters one at a time?
Now, letters can be removed from either end, or the middle, but you canโt rearrange
any of the letters. Every time you drop a letter, you wind up with another English
word. If you do that, youโre eventually going to wind up with one letter and that too is
going to be an English wordโone thatโs found in the dictionary. I want to know whatโs
the longest word and how many letters does it have?
Iโm going to give you a little modest example: Sprite. Ok? You start off with sprite, you
take a letter off, one from the interior of the word, take the r away, and weโre left with
the word spite, then we take the e off the end, weโre left with spit, we take the s off,
weโre left with pit, it, and I.
"""
from exercise11_1 import to_dictionary
##TODO:
##Create fuction for finding largest word in a dictionary
def is_reducible(word, word_dict):
"""Takes a string and a dictionary as input and returns true
if one letter could be removed from that string to form
a new word. Empty space is the base case
Keyword Arguments:
word: the word to be tested
word_dict: a dictionary containing valid words"""
# The base cases, as well as checking the memo
if "a" not in word and "i" not in word:
return False
if word in reducible_words:
return True
if word == 'a' or word == 'i':
return True
# The function is recursive, looking for one reduction, then
# checking until the base case or a memoized word is found
word_length = len(word)
for i in range(word_length):
current_reduction = word[0:i] + word[i + 1:word_length]
if current_reduction in word_dict and is_reducible(current_reduction, word_dict):
reducible_words[word] = word_length
return True
def most_reducible(wordlist):
"""Takes in a wordlist file as input and prints the longest
word that can be reduced into other words one letter at a time
Keyword Arguments:
wordlist: a string that represents the list of words we will
look from
"""
# We create a memo for reducible words since is_reducible is
# recursive. The keys are the words and the values are the
# number of characters
global reducible_words
reducible_words = dict()
reducible_words['a'], reducible_words['i'] = 1, 1
word_dict = to_dictionary(wordlist)
for line in word_dict:
is_reducible(line, word_dict)
# Varible that will search the memo for the longest word
current_greatest = ''
for word in reducible_words:
if reducible_words[word] > len(current_greatest):
current_greatest = word
print(current_greatest)
if __name__ == "__main__":
most_reducible("words.txt")
|
17,148 | 5afdbb840eac93bfb7ac5a92f5ee1854a228d7ab | # -*- coding: utf-8 -*-
# @Time : 2021/2/3 17:32
# @Author : Jclian91
# @File : iris_model_predict_using_onnx_runtime_server.py
# @Place : Yangpu, Shanghai
import numpy as np
import assets.onnx_ml_pb2 as onnx_ml_pb2
import assets.predict_pb2 as predict_pb2
import requests
from sklearn.datasets import load_iris
# Create request message to be sent to the ORT server
input_tensor = onnx_ml_pb2.TensorProto()
input_tensor.dims.extend([1, 4])
input_tensor.data_type = 1
# input_tensor.raw_data = np.array([[6.4, 2.8, 5.6, 2.1]], dtype=np.float32).tobytes()
input_tensor.raw_data = np.array([[5.7, 3.8, 1.7, 0.3]], dtype=np.float32).tobytes()
print(input_tensor)
request_message = predict_pb2.PredictRequest()
# Use Netron to find out the input name.
request_message.inputs["input"].data_type = input_tensor.data_type
request_message.inputs["input"].dims.extend(input_tensor.dims)
request_message.inputs["input"].raw_data = input_tensor.raw_data
content_type_headers = ['application/x-protobuf', 'application/octet-stream', 'application/vnd.google.protobuf']
for h in content_type_headers:
request_headers = {
'Content-Type': h,
'Accept': 'application/x-protobuf'
}
# Inference run using ORT server
# Change the number 9001 to the appropriate port number if you had changed it during ORT Server docker instantiation
PORT_NUMBER = 9001 # Change appropriately if needed based on any changes when invoking the server in the pre-requisites
inference_url = "http://192.168.4.193:" + str(PORT_NUMBER) + "/v1/models/default/versions/1:predict"
response = requests.post(inference_url, headers=request_headers, data=request_message.SerializeToString())
# Parse response message
print(response.content)
response_message = predict_pb2.PredictResponse()
response_message.ParseFromString(response.content)
# Use Netron to find out the outputs names.
bboxes = np.frombuffer(response_message.outputs['output'].raw_data, dtype=np.float32)
print('Ouput Dim: ', response_message.outputs['output'].dims)
output_data = np.frombuffer(response_message.outputs['output'].raw_data, dtype=np.float32)
print('Output data: ', output_data)
print("Predict Label: ", load_iris()['target_names'][np.argmax(output_data, axis=0)])
|
17,149 | 112f1aff2c3d9bfe5b17ea45fa3dd04d4f74f67a | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 7 13:46:24 2017
@author: lindseykitchell
"""
from parse_ev_files import parse_ev
from sklearn.cross_validation import KFold
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
import matplotlib.pyplot as plt
import numpy as np
# get path to files
path = '/Users/lindseykitchell/Box Sync/fiberVolumes/vol_norm_ev_files/'
#path = '/Users/lindseykitchell/Box Sync/fiberVolumes/normalized_ev_values/'
# parse the .ev files
shapeDNA_df, Eigenvalues, labels = parse_ev(path, 50)
Eigenvalues = Eigenvalues.tolist()
# testing RandomForestClassifier
# first use GridSearchCV to get best parameters
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'n_estimators':[50,75,100,500], 'criterion': ('gini', 'entropy'), 'max_features': ('auto', 'log2', None), 'min_samples_leaf':[1,5,10,50,100,200,500]}
svr = RandomForestClassifier()
clf = GridSearchCV(svr, parameters, cv=kf, n_jobs=-1)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = RandomForestClassifier(n_estimators=60, min_samples_split=2, criterion='entropy', max_features='log2')
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print labels_test
print np.mean(acc)
# testing AdaBoost
# first use GridSearchCV to get best parameters
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'n_estimators':[25,50,75,100,500]}
svr = AdaBoostClassifier()
clf = GridSearchCV(svr, parameters, cv=kf,)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = AdaBoostClassifier(n_estimators=50 )
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
# testing SVM
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'C':[1,2,3,4,5,10], 'probability':(True,False), 'decision_function_shape':('ovo','ovr',None)}
svr = SVC(kernel='rbf')
clf = GridSearchCV(svr, parameters, cv=kf,)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = SVC(kernel='rbf', C=3, probability=True, decision_function_shape='ovo')
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
# testing KNN
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'n_neighbors':[2,3,4,5], 'weights':('uniform', 'distance'), 'algorithm':('ball_tree','kd_tree','brute'),'leaf_size':[5,6,7,8],'p':[1,2]}
svr = KNeighborsClassifier()
clf = GridSearchCV(svr, parameters, cv=kf, n_jobs=-1)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = KNeighborsClassifier(n_neighbors=5, weights='uniform', leaf_size=5, algorithm='ball_tree', p=1)
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
#testing CART
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'criterion':('gini', 'entropy'), 'splitter': ('best', 'random'), 'max_features':('auto', 'sqrt', 'log2', None), 'min_samples_leaf':[1,5,7,10,20]}
svr = DecisionTreeClassifier()
clf = GridSearchCV(svr, parameters, cv=kf)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = DecisionTreeClassifier(max_features = 'log2', splitter='best', criterion = 'gini', min_samples_leaf = 10)
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
# testing LDA
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
kf = KFold(len(labels), 10, shuffle=True)
parameters = {'shrinkage':('auto',None), 'n_components':[2,3,4,5,6,7,10,20,45], }
svr = LinearDiscriminantAnalysis(solver='lsqr')
clf = GridSearchCV(svr, parameters, cv=kf)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = LinearDiscriminantAnalysis(solver='lsqr', shrinkage='auto', n_components=2)
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
# testing logistic regression
from sklearn.linear_model import LogisticRegression
kf = KFold(len(labels), 10, shuffle=True)
parameters = { 'C':[.2,.3,.5,.6,.7], 'fit_intercept':(True, False),'solver': ('newton-cg', 'lbfgs', 'sag'), 'multi_class':('ovr','multinomial'), 'warm_start':(True,False)}
#'penalty':('l1','l2'),'dual':(True, False),'multi_class':('ovr','multinomial')
svr = LogisticRegression()
clf = GridSearchCV(svr, parameters, cv=kf)
clf.fit(Eigenvalues, labels)
print clf.best_params_
print clf.best_score_
#print clf.grid_scores_
# Then test again with the best parameters
clf = LogisticRegression(C=.5, solver='newton-cg', fit_intercept=True, multi_class='ovr', warm_start=True)
kf = KFold(len(labels), 10, shuffle = True)
acc = []
for train_indices, test_indices in kf:
data_train = [Eigenvalues[ii] for ii in train_indices]
data_test = [Eigenvalues[ii] for ii in test_indices]
labels_train = [labels[ii] for ii in train_indices]
labels_test = [labels[ii] for ii in test_indices]
clf.fit(data_train, labels_train)
pred = clf.predict(data_test)
acc.append(clf.score(data_test, labels_test))
print np.mean(acc)
from sklearn.manifold import TSNE
model = TSNE(n_components=2, random_state=0)
tsne_transf = model.fit_transform(Eigenvalues)
for i in range(len(labels)):
if labels[i] == 1:
plt.plot(tsne_transf[i][0],tsne_transf[i][1],'ro')
else:
plt.plot(tsne_transf[i][0],tsne_transf[i][1],'bo')
plt.show()
# cross validation one way
data_train, data_test, labels_train, labels_test = train_test_split(
Eigenvalues, labels, test_size=0.2, random_state=0)
from sklearn.grid_search import GridSearchCV
kf = KFold(len(labels), 10, shuffle = True)
parameters = {'n_estimators':[10,15,20,30], 'criterion': ('gini', 'entropy'), 'max_features': ('auto', 'log2', None)}
svr = RandomForestClassifier()
clf = GridSearchCV(svr, parameters, cv=kf)
clf.fit(Eigenvalues, labels)
clf = DecisionTreeClassifier()
clf.fit(data_train, labels_train)
clf.predict(data_test)
clf.score(data_test, labels_test)
clf = RandomForestClassifier()
clf.fit(data_train, labels_train)
clf.predict(data_test)
clf.score(data_test, labels_test)
clf = AdaBoostClassifier()
clf.fit(data_train, labels_train)
clf.predict(data_test)
clf.score(data_test, labels_test)
# using PCA
#
pca = PCA()
pca = pca.fit(data_train)
train_transf = pca.transform(data_train)
for e in range(5):
for i in range(len(train_transf)):
plt.plot(train_transf[i][e],train_transf[i][e+1],'ro')
plt.show()
test_transf = pca.transform(data_test)
clf = DecisionTreeClassifier()
clf.fit(train_transf, labels_train)
clf.predict(test_transf)
clf.score(test_transf, labels_test)
clf = RandomForestClassifier()
clf.fit(train_transf, labels_train)
clf.predict(test_transf)
clf.score(test_transf, labels_test)
clf = AdaBoostClassifier()
clf.fit(train_transf, labels_train)
clf.predict(test_transf)
clf.score(test_transf, labels_test)
|
17,150 | bc06cafa5b1112a40b93df35c51362146d57b5d3 | #
# Copyright (C) 2017 Murata Manufacturing Co.,Ltd.
#
##
# @brief AP main function.
# @author E2N3
# @date 2018.11.09
# -*- coding: utf-8 -*-
import json
import sys
import threading
from Debug import Debug_GetObj
from CLS_Define import COM_DEF
from tx_snd import snd_rsp_cmd
##
# @brief Identify the command ID and call the method of the AP_FUNC class.
# @param cls_ap_func AP function class (class object)
# @param l_com_hdr_info command header parameter
# @param d_tlv_param tlv parameter \n
# ["SecurityType"] Security type
# @retval d_ap_rply response data \n
# ["Result"] value of the result \n
# - Success : COM_DEF.i_RET_SUCCESS \n
# - Failure : Value other than COM_DEF.i_RET_SUCCESS
def call_apfunc(cls_ap_func, l_com_hdr_info, d_tlv_param):
d_ap_rply = {}
# Get debug info
Dbg = Debug_GetObj(COM_DEF.i_MODULE_AP)
Dbg.log(COM_DEF.TRACE, "[S] call_apfunc")
# get command id
i_cmd_id = l_com_hdr_info[0][2]
Dbg.log(COM_DEF.DEBUG,
"[0x%04x] COMMAND : 0x%04x"
% (l_com_hdr_info[0][1], i_cmd_id))
if COM_DEF.i_CMD_Attach == i_cmd_id:
d_ap_rply = cls_ap_func.attach(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetCurrentTime == i_cmd_id:
d_ap_rply = cls_ap_func.date(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetSsid == i_cmd_id:
d_ap_rply = cls_ap_func.ssid(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetSecurity == i_cmd_id:
if "SecurityType" in d_tlv_param:
if COM_DEF.i_SecurityType_Open == d_tlv_param["SecurityType"]:
d_ap_rply = cls_ap_func.open(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_SecurityType_Wep == d_tlv_param["SecurityType"]:
d_ap_rply = cls_ap_func.wep(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_SecurityType_Wpa == d_tlv_param["SecurityType"]:
d_ap_rply = cls_ap_func.wpa(l_com_hdr_info, d_tlv_param)
else:
Dbg.log(COM_DEF.ERROR,
"Security Type Err !! : " +
str(d_tlv_param["SecurityType"]))
d_ap_rply["Result"] = COM_DEF.i_RET_TLV_ABNORMAL
else:
Dbg.log(COM_DEF.ERROR,
"Security Type parameter is nothing !! ")
d_ap_rply["Result"] = COM_DEF.i_RET_TLV_ABNORMAL
elif COM_DEF.i_CMD_SetChannel == i_cmd_id:
d_ap_rply = cls_ap_func.channel(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetCountryCode == i_cmd_id:
d_ap_rply = cls_ap_func.country(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetStealthMode == i_cmd_id:
d_ap_rply = cls_ap_func.stealth(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetRadioOutput == i_cmd_id:
d_ap_rply = cls_ap_func.controlbss(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_GetStaList == i_cmd_id:
d_ap_rply = cls_ap_func.stalist(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetConnectionLimit == i_cmd_id:
d_ap_rply = cls_ap_func.limit(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_Set11n == i_cmd_id:
d_ap_rply = cls_ap_func.control11n(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_Detach == i_cmd_id:
d_ap_rply = cls_ap_func.detach(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_TestReady == i_cmd_id:
d_ap_rply = cls_ap_func.test_ready(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_StartDhcpd == i_cmd_id:
d_ap_rply = cls_ap_func.dhcpd(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_SetIpInfo == i_cmd_id:
d_ap_rply = cls_ap_func.setipinfo(l_com_hdr_info, d_tlv_param)
elif COM_DEF.i_CMD_GetIpInfo == i_cmd_id:
d_ap_rply = cls_ap_func.getipinfo(l_com_hdr_info, d_tlv_param)
else:
Dbg.log(COM_DEF.ERROR,
"[0x%04x] command 0x%04x not supported"
% (l_com_hdr_info[0][1], i_cmd_id))
d_ap_rply["Result"] = COM_DEF.i_RET_COMHDR_ABNORMAL
Dbg.log(COM_DEF.TRACE, "[E] call_apfunc")
return d_ap_rply
##
# @brief It receives the queue notification from the common reception thread
# and calls the AP_FUNC class method. \n
# Receive the result and send the response.
# @param que_main queue used by this module and main_ctrl_thread
# (queue class object)
# @param cls_soc socket used for sending response command to MC
# (clas object)
# @param s_device_type device type
# @param s_host MC IP Address
# @param s_model_name AP model name. (AP folder name)
# @retval None
def ap_ctrl_thread(que_main, cls_soc, s_device_type, s_host, s_model_name):
d_ap_rply = {}
# Get debug info
Dbg = Debug_GetObj(COM_DEF.i_MODULE_AP)
Dbg.log(COM_DEF.TRACE, "[S] ap_ctrl_thread")
# read environment file
s_env_file = "./device/AP/env.json"
fp = open(s_env_file, 'r')
d_env_data = json.load(fp)
s_ap_name = d_env_data["DeviceName"]
fp.close()
Dbg.log(COM_DEF.INFO, "Device Name : " + s_ap_name)
sys.path.append("./device/AP/" + s_ap_name + "/")
from Control import AP_FUNC
# Get API function
cls_ap_func = AP_FUNC(cls_soc, s_host, s_model_name)
while(1):
Dbg.log(COM_DEF.INFO, "wait queue...")
# wait to queue
l_decode_data = que_main.get()
Dbg.log(COM_DEF.TRACE, "queue get data")
# get comhdr param
l_com_hdr_info = l_decode_data[0][0]
# get tlv param
d_tlv_param = l_decode_data[0][1]
d_ap_rply = call_apfunc(cls_ap_func, l_com_hdr_info, d_tlv_param)
# send response command
snd_rsp_cmd(l_com_hdr_info, d_ap_rply, cls_soc,
COM_DEF.i_MODULE_AP, s_device_type)
# while end
Dbg.log(COM_DEF.TRACE, "[E] ap_ctrl_thread")
##
# @brief Start module main process.
# @param que_main queue used by this module and main_ctrl_thread
# (queue class object)
# @param cls_soc socket used for sending response command to MC
# (clas object)
# @param s_device_type device type
# @param d_DevConfigInfo device configuration info
# @retval None
def module_start(que_main, cls_soc, s_device_type, d_DevConfigInfo):
s_host = d_DevConfigInfo["ExHost"]
s_model_name = d_DevConfigInfo["ModelName"]
ap_th = threading.Thread(target=ap_ctrl_thread,
args=(que_main, cls_soc, s_device_type,
s_host, s_model_name, ),
name="AP_main")
ap_th.setDaemon(True)
ap_th.start()
|
17,151 | 2ba03c3cfd870e00e51bf4009ed6f10038127c6f | from cymbology.identifiers.sedol import Sedol
from cymbology.identifiers.cusip import Cusip, cusip_from_isin
from cymbology.identifiers.isin import Isin
__all__ = ('Sedol', 'Cusip', 'cusip_from_isin', 'Isin')
|
17,152 | 8ee949a5f003b98d07eb5d37bff7dd66ba43670f | import warnings
from pyod.models.iforest import IForest
warnings.filterwarnings('ignore', category=FutureWarning)
import argparse
import datetime
import gc
import os
import sys
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import keras
from pyod.models.auto_encoder import AutoEncoder
from imblearn.under_sampling import RandomUnderSampler
from imblearn.over_sampling import RandomOverSampler, SMOTE
from matplotlib.patches import Ellipse
from pyod.models.lof import LOF
from pyod.models.ocsvm import OCSVM
from scipy.stats import multivariate_normal
from sklearn import preprocessing
from sklearn.metrics import precision_score, recall_score, roc_auc_score, classification_report, fbeta_score, roc_curve
from sklearn.model_selection import train_test_split, StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.mixture import GaussianMixture
from xgboost import XGBClassifier
NOT_FRAUD = 0
FRAUD = 1
SUPERVISED = 2
SEMI_SUPERVISED = 3
UNSUPERVISED = 4
def log(logfile, s):
""" Log a string into a file and print it. """
with open(logfile, 'a', encoding='utf8') as f:
f.write(str(s))
f.write("\n")
print(s)
def get_args_parser():
parser = argparse.ArgumentParser(fromfile_prefix_chars='@')
parser.add_argument(
"-d",
"--dataset",
default="creditcard",
help="Name of the dataset to use: creditcard, ieee."
)
parser.add_argument(
"-m",
"--method",
default="ocSVM",
help="Name of the outlier detection method: ocSVM, LOF, twoClass."
)
parser.add_argument(
"-s",
"--seed",
default=1910299034,
help="Random seed."
)
parser.add_argument(
"-od",
"--outdir",
default='results/'
)
parser.add_argument(
"-fd",
"--fraud_ratio",
default=0.1,
help="Desired ratio of fraud datapoints."
)
parser.add_argument(
"-p",
"--sampling",
default="under",
help="Sampling: under, over, smote"
)
return parser
####
# Function found at https://www.kaggle.com/jesucristo/fraud-complete-eda
####
def reduce_memory_usage(props):
start_mem_usg = props.memory_usage().sum() / 1024 ** 2
print("Memory usage of properties dataframe is :", start_mem_usg, " MB")
NAlist = [] # Keeps track of columns that have missing values filled in.
for col in props.columns:
if props[col].dtype != object: # Exclude strings
# Print current column type
print("******************************")
print("Column: ", col)
print("dtype before: ", props[col].dtype)
# make variables for Int, max and min
IsInt = False
mx = props[col].max()
mn = props[col].min()
# Integer does not support NA, therefore, NA needs to be filled
if not np.isfinite(props[col]).all():
NAlist.append(col)
props[col].fillna(mn - 1, inplace=True)
# test if column can be converted to an integer
asint = props[col].fillna(0).astype(np.int64)
result = (props[col] - asint)
result = result.sum()
if result > -0.01 and result < 0.01:
IsInt = True
# Make Integer/unsigned Integer datatypes
if IsInt:
if mn >= 0:
# if mx < 255:
# props[col] = props[col].astype(np.uint8)
# elif mx < 65535:
# props[col] = props[col].astype(np.uint16)
if mx < 4294967295:
props[col] = props[col].astype(np.uint32)
else:
props[col] = props[col].astype(np.uint64)
else:
# if mn > np.iinfo(np.int8).min and mx < np.iinfo(np.int8).max:
# props[col] = props[col].astype(np.int8)
# elif mn > np.iinfo(np.int16).min and mx < np.iinfo(np.int16).max:
# props[col] = props[col].astype(np.int16)
if mn > np.iinfo(np.int32).min and mx < np.iinfo(np.int32).max:
props[col] = props[col].astype(np.int32)
elif mn > np.iinfo(np.int64).min and mx < np.iinfo(np.int64).max:
props[col] = props[col].astype(np.int64)
# Make float datatypes 32 bit
else:
props[col] = props[col].astype(np.float32)
# Print new column type
print("dtype after: ", props[col].dtype)
print("******************************")
else:
props[col] = props[col].fillna('NaN')
# Print final result
print("___MEMORY USAGE AFTER COMPLETION:___")
mem_usg = props.memory_usage().sum() / 1024 ** 2
print("Memory usage is: ", mem_usg, " MB")
print("This is ", 100 * mem_usg / start_mem_usg, "% of the initial size")
return props, NAlist
####
# Function found at https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html
####
def draw_ellipse(position, covariance, ax=None, **kwargs):
"""Draw an ellipse with a given position and covariance"""
ax = ax or plt.gca()
# Convert covariance to principal axes
if covariance.shape == (2, 2):
U, s, Vt = np.linalg.svd(covariance)
angle = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
width, height = 2 * np.sqrt(s)
else:
angle = 0
width, height = 2 * np.sqrt(covariance)
# Draw the Ellipse
for nsig in range(1, 4):
ax.add_patch(Ellipse(position, nsig * width, nsig * height,
angle, **kwargs))
####
# Function adapted from https://jakevdp.github.io/PythonDataScienceHandbook/05.12-gaussian-mixtures.html
####
def plot_gmm(gmm, X, label=True, ax=None, outdir=None):
ax = ax or plt.gca()
ax.set_title("GMM Plot: V17, V14")
labels = gmm.fit(X).predict(X)
if label:
ax.scatter(X['V17'], X['V14'], c=labels, s=40, cmap='viridis', zorder=2)
else:
ax.scatter(X['V17'], X['V14'], s=40, zorder=2)
ax.axis('equal')
w_factor = 0.75 / gmm.weights_.max()
for pos, covar, w in zip(gmm.means_, gmm.covariances_, gmm.weights_):
draw_ellipse(pos[np.ix_([17, 14])], covar[np.ix_([17, 14], [17, 14])], alpha=w * w_factor)
plt.savefig(outdir + 'gmm_v17_v14.png', bbox_inches='tight')
plt.clf()
return True
def group_emails(df_train, df_test):
emails = {'gmail': 'google', 'att.net': 'att', 'twc.com': 'spectrum',
'scranton.edu': 'other', 'optonline.net': 'other', 'hotmail.co.uk': 'microsoft',
'comcast.net': 'other', 'yahoo.com.mx': 'yahoo', 'yahoo.fr': 'yahoo',
'yahoo.es': 'yahoo', 'charter.net': 'spectrum', 'live.com': 'microsoft',
'aim.com': 'aol', 'hotmail.de': 'microsoft', 'centurylink.net': 'centurylink',
'gmail.com': 'google', 'me.com': 'apple', 'earthlink.net': 'other', 'gmx.de': 'other',
'web.de': 'other', 'cfl.rr.com': 'other', 'hotmail.com': 'microsoft',
'protonmail.com': 'other', 'hotmail.fr': 'microsoft', 'windstream.net': 'other',
'outlook.es': 'microsoft', 'yahoo.co.jp': 'yahoo', 'yahoo.de': 'yahoo',
'servicios-ta.com': 'other', 'netzero.net': 'other', 'suddenlink.net': 'other',
'roadrunner.com': 'other', 'sc.rr.com': 'other', 'live.fr': 'microsoft',
'verizon.net': 'yahoo', 'msn.com': 'microsoft', 'q.com': 'centurylink',
'prodigy.net.mx': 'att', 'frontier.com': 'yahoo', 'anonymous.com': 'other',
'rocketmail.com': 'yahoo', 'sbcglobal.net': 'att', 'frontiernet.net': 'yahoo',
'ymail.com': 'yahoo', 'outlook.com': 'microsoft', 'mail.com': 'other',
'bellsouth.net': 'other', 'embarqmail.com': 'centurylink', 'cableone.net': 'other',
'hotmail.es': 'microsoft', 'mac.com': 'apple', 'yahoo.co.uk': 'yahoo', 'netzero.com': 'other',
'yahoo.com': 'yahoo', 'live.com.mx': 'microsoft', 'ptd.net': 'other', 'cox.net': 'other',
'aol.com': 'aol', 'juno.com': 'other', 'icloud.com': 'apple'}
us_emails = ['gmail', 'net', 'edu']
# https://www.kaggle.com/c/ieee-fraud-detection/discussion/100499#latest-579654
for c in ['P_emaildomain', 'R_emaildomain']:
df_train[c + '_bin'] = df_train[c].map(emails)
df_test[c + '_bin'] = df_test[c].map(emails)
df_train[c + '_suffix'] = df_train[c].map(lambda x: str(x).split('.')[-1])
df_test[c + '_suffix'] = df_test[c].map(lambda x: str(x).split('.')[-1])
df_train[c + '_suffix'] = df_train[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')
df_test[c + '_suffix'] = df_test[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')
###
# Feature Engineering : https://www.kaggle.com/artgor/eda-and-models#Feature-engineering
###
def feature_engineering(df_train, df_test, logfile):
df_train['TransactionAmt_to_mean_card1'] = df_train['TransactionAmt'] / df_train.groupby(['card1'])[
'TransactionAmt'].transform('mean')
df_train['TransactionAmt_to_mean_card4'] = df_train['TransactionAmt'] / df_train.groupby(['card4'])[
'TransactionAmt'].transform('mean')
df_train['TransactionAmt_to_std_card1'] = df_train['TransactionAmt'] / df_train.groupby(['card1'])[
'TransactionAmt'].transform('std')
df_train['TransactionAmt_to_std_card4'] = df_train['TransactionAmt'] / df_train.groupby(['card4'])[
'TransactionAmt'].transform('std')
df_test['TransactionAmt_to_mean_card1'] = df_test['TransactionAmt'] / df_test.groupby(['card1'])[
'TransactionAmt'].transform(
'mean')
df_test['TransactionAmt_to_mean_card4'] = df_test['TransactionAmt'] / df_test.groupby(['card4'])[
'TransactionAmt'].transform(
'mean')
df_test['TransactionAmt_to_std_card1'] = df_test['TransactionAmt'] / df_test.groupby(['card1'])[
'TransactionAmt'].transform(
'std')
df_test['TransactionAmt_to_std_card4'] = df_test['TransactionAmt'] / df_test.groupby(['card4'])[
'TransactionAmt'].transform(
'std')
df_train['id_02_to_mean_card1'] = df_train['id_02'] / df_train.groupby(['card1'])['id_02'].transform('mean')
df_train['id_02_to_mean_card4'] = df_train['id_02'] / df_train.groupby(['card4'])['id_02'].transform('mean')
df_train['id_02_to_std_card1'] = df_train['id_02'] / df_train.groupby(['card1'])['id_02'].transform('std')
df_train['id_02_to_std_card4'] = df_train['id_02'] / df_train.groupby(['card4'])['id_02'].transform('std')
df_test['id_02_to_mean_card1'] = df_test['id_02'] / df_test.groupby(['card1'])['id_02'].transform('mean')
df_test['id_02_to_mean_card4'] = df_test['id_02'] / df_test.groupby(['card4'])['id_02'].transform('mean')
df_test['id_02_to_std_card1'] = df_test['id_02'] / df_test.groupby(['card1'])['id_02'].transform('std')
df_test['id_02_to_std_card4'] = df_test['id_02'] / df_test.groupby(['card4'])['id_02'].transform('std')
df_train['D15_to_mean_card1'] = df_train['D15'] / df_train.groupby(['card1'])['D15'].transform('mean')
df_train['D15_to_mean_card4'] = df_train['D15'] / df_train.groupby(['card4'])['D15'].transform('mean')
df_train['D15_to_std_card1'] = df_train['D15'] / df_train.groupby(['card1'])['D15'].transform('std')
df_train['D15_to_std_card4'] = df_train['D15'] / df_train.groupby(['card4'])['D15'].transform('std')
df_test['D15_to_mean_card1'] = df_test['D15'] / df_test.groupby(['card1'])['D15'].transform('mean')
df_test['D15_to_mean_card4'] = df_test['D15'] / df_test.groupby(['card4'])['D15'].transform('mean')
df_test['D15_to_std_card1'] = df_test['D15'] / df_test.groupby(['card1'])['D15'].transform('std')
df_test['D15_to_std_card4'] = df_test['D15'] / df_test.groupby(['card4'])['D15'].transform('std')
df_train['D15_to_mean_addr1'] = df_train['D15'] / df_train.groupby(['addr1'])['D15'].transform('mean')
df_train['D15_to_mean_addr2'] = df_train['D15'] / df_train.groupby(['addr2'])['D15'].transform('mean')
df_train['D15_to_std_addr1'] = df_train['D15'] / df_train.groupby(['addr1'])['D15'].transform('std')
df_train['D15_to_std_addr2'] = df_train['D15'] / df_train.groupby(['addr2'])['D15'].transform('std')
df_test['D15_to_mean_addr1'] = df_test['D15'] / df_test.groupby(['addr1'])['D15'].transform('mean')
df_test['D15_to_mean_addr2'] = df_test['D15'] / df_test.groupby(['addr2'])['D15'].transform('mean')
df_test['D15_to_std_addr1'] = df_test['D15'] / df_test.groupby(['addr1'])['D15'].transform('std')
df_test['D15_to_std_addr2'] = df_test['D15'] / df_test.groupby(['addr2'])['D15'].transform('std')
df_train[['P_emaildomain_1', 'P_emaildomain_2', 'P_emaildomain_3']] = df_train['P_emaildomain'].str.split('.',
expand=True)
df_train[['R_emaildomain_1', 'R_emaildomain_2', 'R_emaildomain_3']] = df_train['R_emaildomain'].str.split('.',
expand=True)
df_test[['P_emaildomain_1', 'P_emaildomain_2', 'P_emaildomain_3']] = df_test['P_emaildomain'].str.split('.',
expand=True)
df_test[['R_emaildomain_1', 'R_emaildomain_2', 'R_emaildomain_3']] = df_test['R_emaildomain'].str.split('.',
expand=True)
one_value_cols_train = [col for col in df_train.columns if df_train[col].nunique() <= 1]
many_null_cols_train = [col for col in df_train.columns if
df_train[col].isnull().sum() / df_train.shape[0] > 0.9]
big_top_value_cols_train = [col for col in df_train.columns if
df_train[col].value_counts(dropna=False, normalize=True).values[0] > 0.9]
one_value_cols_test = [col for col in df_test.columns if df_test[col].nunique() <= 1]
many_null_cols_test = [col for col in df_test.columns if df_test[col].isnull().sum() / df_test.shape[0] > 0.9]
big_top_value_cols_test = [col for col in df_test.columns if
df_test[col].value_counts(dropna=False, normalize=True).values[0] > 0.9]
cols_to_drop = list(set(
many_null_cols_train + many_null_cols_test + big_top_value_cols_train + big_top_value_cols_test + one_value_cols_train + one_value_cols_test))
cols_to_drop.remove('isFraud')
log(logfile, "Number of columns to drop: " + str(len(cols_to_drop)))
log(logfile, "Columns to drop: " + str(cols_to_drop))
df_train = df_train.sort_values('TransactionDT').drop(cols_to_drop, axis=1, errors='ignore')
df_test = df_test.sort_values('TransactionDT').drop(cols_to_drop, axis=1, errors='ignore')
return df_train, df_test
def experiments(config_file):
warnings.filterwarnings('ignore')
# Save cwd path
cwd = str(Path.cwd())
# Parse arguments
args = get_args_parser().parse_args(['@' + config_file])
# Set seed
np.random.seed(int(args.seed))
# Construct output directory
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
outdir = args.outdir + str(args.dataset) + "/" + timestamp + '/'
# Create results directory
outdir_path = Path(outdir)
if not outdir_path.is_dir():
os.makedirs(outdir)
# Logging
logfile = outdir + 'log.txt'
log(logfile, "Directory " + outdir + " created.")
# Set dataset
if str(args.dataset) == 'creditcard':
dataset_name = 'Credit Card'
# Read data
df = pd.read_csv('{}/data/creditcard/creditcard.csv'.format(cwd))
# Normalize 'Amount' column
df['Amount'] = StandardScaler().fit_transform(df['Amount'].values.reshape(-1, 1))
# Normalize 'Time' column
df['Time'] = StandardScaler().fit_transform(df['Time'].values.reshape(-1, 1))
# Set features and labels
X = df.drop(['Class'], axis=1)
y = df['Class']
# Split dataframe by class
df_NF = df[df.Class == NOT_FRAUD]
df_F = df[df.Class == FRAUD]
# Split by data and labels
X_NF, X_F = df_NF.drop(['Class'], axis=1), df_F.drop(['Class'], axis=1)
y_NF, y_F = df_NF['Class'], df_F['Class']
contamination = len(y_F) / (len(y_F) + len(y_NF))
### SUPERVISED, UNSUPERVISED ###
# Split 80-20
X_train_F_NF, X_test_F_NF, y_train_F_NF, y_test_F_NF = train_test_split(X, y,
test_size=0.5, # 0.5 for OCSVM
random_state=int(args.seed))
### SEMI-SUPERVISED ###
# Split 80-20 NF
X_train_NF, X_cv_test_NF, y_train_NF, y_cv_test_NF = train_test_split(X_NF, y_NF,
test_size=0.2,
random_state=int(args.seed))
# Split extra NF for cross validation and testing
X_cv_NF, X_test_NF, y_cv_NF, y_test_NF = train_test_split(X_cv_test_NF, y_cv_test_NF,
test_size=0.5,
random_state=int(args.seed))
# Split 50-50 F data
X_cv_F, X_test_F, y_cv_F, y_test_F = train_test_split(X_F, y_F, test_size=0.5, random_state=int(args.seed))
# Build threshold cross validation and testing sets
X_cv = np.vstack([X_cv_NF, X_cv_F])
y_cv = np.hstack([y_cv_NF, y_cv_F])
X_test = np.vstack([X_test_NF, X_test_F])
y_test = np.hstack([y_test_NF, y_test_F])
X_test_df = pd.concat([X_test_NF, X_test_F])
elif str(args.dataset) == 'ieee':
# Read data
dataset_name = 'ieee'
df_train_transaction = pd.read_csv('{}/data/ieee/train_transaction.csv'.format(cwd), index_col='TransactionID')
# nrows=n_rows)
df_train_identity = pd.read_csv('{}/data/ieee/train_identity.csv'.format(cwd), index_col='TransactionID')
# nrows=n_rows)
df_test_transaction = pd.read_csv('{}/data/ieee/test_transaction.csv'.format(cwd), index_col='TransactionID')
# nrows=n_rows)
df_test_identity = pd.read_csv('{}/data/ieee/test_identity.csv'.format(cwd), index_col='TransactionID')
# nrows=n_rows)
df_submission = pd.read_csv('{}/data/ieee/sample_submission.csv'.format(cwd), index_col='TransactionID')
# nrows=n_rows)
# Match both columns
fix = {o: n for o, n in zip(df_test_identity.columns, df_train_identity.columns)}
df_test_identity.rename(columns=fix, inplace=True)
# Merge and set train, test
df_train = df_train_transaction.merge(df_train_identity, how='left', left_index=True, right_index=True,
on='TransactionID')
df_test = df_test_transaction.merge(df_test_identity, how='left', left_index=True, right_index=True,
on='TransactionID')
log(logfile, f'There are {df_train.isnull().any().sum()} columns in train dataset with missing values.')
log(logfile, 'Train shape: ' + str(df_train.shape) + ', Test shape: ' + str(df_test.shape))
# Group emails: https://www.kaggle.com/kabure/extensive-eda-and-modeling-xgb-hyperopt
group_emails(df_train, df_test)
# Normalize D columns: https://www.kaggle.com/cdeotte/xgb-fraud-with-magic-0-9600
for i in range(1, 16):
if i in [1, 2, 3, 5, 9]: continue
df_train['D' + str(i)] = df_train['D' + str(i)] - df_train.TransactionDT / np.float32(24 * 60 * 60)
df_test['D' + str(i)] = df_test['D' + str(i)] - df_test.TransactionDT / np.float32(24 * 60 * 60)
# Feature engineering
df_train, df_test = feature_engineering(df_train, df_test, logfile)
# Cleaning infinite values to NaN by https://www.kaggle.com/dimartinot
df_train = df_train.replace([np.inf, -np.inf], np.nan)
df_test = df_test.replace([np.inf, -np.inf], np.nan)
# Reduce memory usage
df_train, _ = reduce_memory_usage(df_train)
df_test, _ = reduce_memory_usage(df_test)
# Encoding categorical features
for f in df_train.drop('isFraud', axis=1).columns:
if str(df_train[f].dtype) == 'object' or str(df_test[f].dtype) == 'object':
lbl = preprocessing.LabelEncoder()
lbl.fit(list(df_train[f].values) + list(df_test[f].values))
df_train[f] = lbl.transform(list(df_train[f].values))
df_test[f] = lbl.transform(list(df_test[f].values))
# Free objects
del df_train_transaction, df_train_identity, df_test_transaction, df_test_identity
print('Train shape', df_train.shape, 'test shape', df_test.shape)
### SUPERVISED ###
# Set features and labels
X_test = df_test.drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1, errors='ignore')
X_test_df = X_test # For compatibility with other if clauses at Testing stage
# Split dataframe by class
df_train_NF = df_train[df_train.isFraud == NOT_FRAUD]
df_train_F = df_train[df_train.isFraud == FRAUD]
# Free objects
del df_train, df_test
# Collect garbage
gc.collect()
# Split by data and labels
X_train_NF = df_train_NF.drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1, errors='ignore')
X_train_F = df_train_F.drop(['isFraud', 'TransactionDT', 'TransactionID'], axis=1, errors='ignore')
y_train_NF, y_train_F = df_train_NF['isFraud'], df_train_F['isFraud']
contamination = len(y_train_F) / (len(y_train_F) + len(y_train_NF))
### SEMI-SUPERVISED ###
# Split 80-20 NF for cross validation and testing
X_train_NF, X_cv_NF, y_train_NF, y_cv_NF = train_test_split(X_train_NF, y_train_NF,
test_size=0.5,
random_state=int(args.seed))
# Split 50-50 F data
X_train_F, X_cv_F, y_train_F, y_cv_F = train_test_split(X_train_F, y_train_F, test_size=0.5,
random_state=int(args.seed))
# Build cross validation and testing sets
# X_train_F_NF = np.vstack([X_train_NF, X_train_F])
X_train_F_NF = pd.concat([X_train_NF, X_train_F]) # for XGBoost works with df
# y_train_F_NF = np.hstack([y_train_NF, y_train_F])
y_train_F_NF = pd.concat([y_train_NF, y_train_F])
X_cv = np.vstack([X_cv_NF, X_cv_F])
y_cv = np.hstack([y_cv_NF, y_cv_F])
else:
raise ("Dataset not found")
# Set methods
methods = (
('IForest', IForest(contamination=contamination, n_jobs=-1, random_state=int(args.seed), verbose=1),
UNSUPERVISED),
('LOF', LOF(n_neighbors=3, contamination=contamination, n_jobs=-1), UNSUPERVISED),
('OCSVM', OCSVM(kernel='linear', contamination=contamination, tol=.1, verbose=True, cache_size=2000),
UNSUPERVISED),
('GaussianMixture',
GaussianMixture(n_components=2, covariance_type='full', random_state=int(args.seed)),
SEMI_SUPERVISED),
('XGBClassifier', XGBClassifier(max_depth=9, n_jobs=-1, verbosity=3, random_state=int(args.seed)), SUPERVISED),
)
methods = list(methods)
if str(args.dataset) == 'creditcard':
methods.insert(0, ('AutoEncoder',
AutoEncoder(hidden_neurons=[64, 30, 30, 64], verbose=2, epochs=70, batch_size=320,
random_state=int(args.seed),
contamination=contamination), UNSUPERVISED), )
else:
methods.insert(0, ('AutoEncoder',
AutoEncoder(hidden_neurons=[64, 30, 30, 64], verbose=2, epochs=35, batch_size=320,
random_state=int(args.seed),
contamination=contamination), UNSUPERVISED), )
methods = tuple(methods)
# Get fraud-ratio
fraud_ratio = float(args.fraud_ratio)
# Set k-folds
skf = StratifiedKFold(n_splits=3, shuffle=False, random_state=None)
# Plotting
plt.style.use('dark_background')
roc_auc_fig = plt.figure()
roc_auc_ax = roc_auc_fig.subplots()
roc_auc_ax.set_title('ROC-AUC Curve')
cv_fig = plt.figure()
cv_fig_ax = cv_fig.subplots()
labels = ['Not Fraud', 'Fraud']
# Set sampler
if str(args.sampling) == 'under':
sampler = RandomUnderSampler(sampling_strategy=fraud_ratio, random_state=int(args.seed))
elif str(args.sampling) == 'smote':
sampler = SMOTE(sampling_strategy=fraud_ratio, n_jobs=-1, random_state=int(args.seed))
elif str(args.sampling) == 'over':
sampler = RandomOverSampler(random_state=int(args.seed))
else:
raise ("Sampling method not found.")
# Running
for method_name, method, level in methods:
log(logfile, dataset_name + ", " + method_name)
# Train
if level == SEMI_SUPERVISED:
# Only normal labels are known
# No 'y' needed, because it is known that X is NF
method.fit(X_train_NF)
elif level == UNSUPERVISED or level == SUPERVISED:
# Initialize CV metrics
precision_cv_scores = []
recall_cv_scores = []
f2_cv_scores = []
roc_cv_scores = []
# Cross validate while sampling
for split_idx, (train_index, valid_index) in enumerate(skf.split(X_train_F_NF, y_train_F_NF)):
X_fold_train, X_fold_valid = X_train_F_NF.iloc[train_index], X_train_F_NF.iloc[valid_index]
y_fold_train, y_fold_valid = y_train_F_NF.iloc[train_index], y_train_F_NF.iloc[valid_index]
# Sample
X_fold_train_resampled, y_fold_train_resampled = sampler.fit_resample(X_fold_train, y_fold_train)
# plot_gmm(GaussianMixture(n_components=2, covariance_type='full', random_state=int(args.seed)),
# X_fold_train_resampled.sample(frac=0.5),
# outdir=outdir)
if method_name == 'LOF' or method_name == 'OCSVM':
X_fold_train_resampled, y_fold_train_resampled = X_fold_train_resampled.sample(
frac=0.1), y_fold_train_resampled.sample(frac=0.1) # for few CPUs
# Fit
if level == UNSUPERVISED:
method.fit(X_fold_train_resampled)
elif level == SUPERVISED:
method.fit(X_fold_train_resampled, y_fold_train_resampled)
# Validate
y_fold_pred = method.predict(X_fold_valid)
if method_name == 'XGBClassifier':
y_fold_scores = method.predict_proba(X_fold_valid)[:, 1]
else:
y_fold_scores = method.decision_function(np.array(X_fold_valid))
# Save fold results
precision_cv_scores.append(precision_score(y_true=y_fold_valid, y_pred=y_fold_pred))
recall_cv_scores.append(recall_score(y_true=y_fold_valid, y_pred=y_fold_pred))
f2_cv_scores.append(fbeta_score(y_true=y_fold_valid, y_pred=y_fold_pred, beta=2))
roc_cv_scores.append(roc_auc_score(y_true=y_fold_valid, y_score=y_fold_scores))
log(logfile, "Fold Precision: {}".format(str(np.round(precision_cv_scores[-1], 3))))
log(logfile, "Fold Recall: {}".format(str(np.round(recall_cv_scores[-1], 3))))
log(logfile, "Fold F2: {}".format(str(np.round(f2_cv_scores[-1], 3))))
log(logfile, "Fold ROC-AUC: {}".format(str(np.round(roc_cv_scores[-1], 3))))
# Average Training CV results
log(logfile, '')
log(logfile, "Avg Training CV Precision: {}"
.format(np.round(np.mean(precision_cv_scores), 3)) + " +/- " + str(
np.round(np.std(precision_cv_scores), 3)))
log(logfile, "Avg Training CV Recall: {}"
.format(np.round(np.mean(recall_cv_scores), 3)) + " +/- " + str(
np.round(np.std(recall_cv_scores), 3)))
log(logfile, "Avg Training CV F2: {}"
.format(np.round(np.mean(f2_cv_scores), 3)) + " +/- " + str(np.round(np.std(f2_cv_scores), 3)))
log(logfile, "Avg Training CV ROC-AUC: {}"
.format(np.round(np.mean(roc_cv_scores), 3)) + " +/- " + str(
np.round(np.std(roc_cv_scores), 3)))
else:
raise ("Supervision level not found.")
# TEST
# Re-initialize metrics
precision_cv_scores = []
recall_cv_scores = []
f2_cv_scores = []
roc_cv_scores = []
best_f2_threshold = []
if method_name == 'GaussianMixture':
# Threshold search: Cross validate
for split_idx, (_, valid_index) in enumerate(skf.split(X_cv, y_cv)):
X_cv_valid = X_cv[valid_index]
y_cv_valid = y_cv[valid_index]
# Estimate mu, sigma, generate multivariate normal variable
mu = np.mean(X_cv_valid, axis=0)
sigma = np.cov(X_cv_valid.T)
mnv = multivariate_normal(mean=mu, cov=sigma, allow_singular=True)
# Understand possible threshold values
if split_idx == 0:
not_fraud_logpdf = np.median(mnv.logpdf(X_cv[valid_index][y_cv[valid_index] == NOT_FRAUD]))
fraud_logpdf = np.median(mnv.logpdf(X_cv[valid_index][y_cv[valid_index] == FRAUD]))
log(logfile, "Not Fraud logpdf median: {}".format(not_fraud_logpdf))
log(logfile, "Fraud logpdf median: {}".format(fraud_logpdf))
# Compute the weighted log probabilities for each sample
y_valid_score_samples = method.score_samples(X_cv_valid)
# Search best threshold
thresholds = -np.arange(0, 1000, 2)
scores = []
for threshold in thresholds:
y_hat = (y_valid_score_samples < threshold).astype(int)
scores.append([recall_score(y_true=y_cv_valid, y_pred=y_hat),
precision_score(y_true=y_cv_valid, y_pred=y_hat),
fbeta_score(y_true=y_cv_valid, y_pred=y_hat, beta=2),
roc_auc_score(y_true=y_cv_valid, y_score=y_hat)])
scores = np.array(scores)
best_threshold_index = scores[:, 2].argmax()
best_threshold = thresholds[best_threshold_index]
best_f2_threshold.append(best_threshold)
# Plot gmm threshold
cv_fig_ax.set(xticks=[best_threshold])
cv_fig_ax.plot(thresholds, scores[:, 0], label='Recall')
cv_fig_ax.plot(thresholds, scores[:, 1], label='Precision')
cv_fig_ax.plot(thresholds, scores[:, 2], label='F2')
cv_fig_ax.set_ylabel('Score')
cv_fig_ax.set_xlabel('Threshold')
cv_fig_ax.legend(loc='best')
cv_fig_ax.figure.savefig(outdir + 'gmm_threshold_cv_sample_{}.png'.format(split_idx),
bbox_inches='tight')
cv_fig = plt.figure()
cv_fig_ax = cv_fig.subplots()
# Save Test Valid results
recall_cv_scores.append(scores[best_threshold_index, 0])
precision_cv_scores.append(scores[best_threshold_index, 1])
f2_cv_scores.append(scores[best_threshold_index, 2])
roc_cv_scores.append(scores[best_threshold_index, 3])
# Best threshold according to F2
best_threshold = best_f2_threshold[np.array(f2_cv_scores).argmax()]
log(logfile, 'Best Threshold: %d' % best_threshold)
# Average CV results
log(logfile, '')
log(logfile, "Avg Threshold CV Precision: {}"
.format(np.round(np.mean(precision_cv_scores), 3)) + " +/- " + str(
np.round(np.std(precision_cv_scores), 3)))
log(logfile, "Avg Threshold CV Recall: {}"
.format(np.round(np.mean(recall_cv_scores), 3)) + " +/- " + str(np.round(np.std(recall_cv_scores), 3)))
log(logfile, "Avg Threshold CV F2: {}"
.format(np.round(np.mean(f2_cv_scores), 3)) + " +/- " + str(np.round(np.std(f2_cv_scores), 3)))
log(logfile, "Avg Threshold CV ROC-AUC: {}"
.format(np.round(np.mean(roc_cv_scores), 3)) + " +/- " + str(np.round(np.std(roc_cv_scores), 3)))
# Test
# Predict probabilities
y_test_pred = (method.score_samples(X_test) < best_threshold).astype(int)
y_test_scores = y_test_pred # The same for Gaussian ROC-AUC
y_test_fraud_probabilities = method.predict_proba(X_test)[:, 1]
else:
# Test
# Predict probabilities
if method_name == 'XGBClassifier':
y_test_pred = method.predict(X_test_df)
# y_test_scores = y_test_pred
y_test_fraud_probabilities = method.predict_proba(X_test_df)[:, 1]
y_test_scores = y_test_fraud_probabilities
else:
# y_test_pred = method.predict(X_test_df)
# y_test_fraud_probabilities = method.predict_proba(X_test_df)[:, 1]
# y_test_scores = method.decision_function(X_test_df)
batch_size = 20000 # chunk row size
y_test_pred_list_df = []
y_test_fraud_probabilities_list_df = []
y_test_scores_list_df = []
for batch_number, X_test_batch in X_test_df.groupby(np.arange(len(X_test_df)) // batch_size):
y_test_pred_list_df.append(method.predict(X_test_batch))
y_test_fraud_probabilities_list_df.append(method.predict_proba(X_test_batch)[:, 1])
y_test_scores_list_df.append(method.decision_function(X_test_batch))
y_test_fraud_probabilities = np.hstack(y_test_fraud_probabilities_list_df)
if dataset_name != 'ieee':
# Plot heatmap
fig, (ax1) = plt.subplots(ncols=1, figsize=(5, 5))
cm = pd.crosstab(y_test, y_test_pred, rownames=['Actual'],
colnames=['Predicted'])
sns.heatmap(cm, xticklabels=labels, yticklabels=labels, annot=True, ax=ax1, linewidths=.2, fmt='g')
plt.title('Confusion Matrix for {}'.format(method_name), fontsize=14)
plt.savefig(outdir + '{}_confusion_matrix.png'.format(method_name))
plt.clf()
plt.close(fig)
# Save results
if dataset_name == 'ieee':
df_submission['isFraud'] = y_test_fraud_probabilities
df_submission.to_csv(outdir + '{}_{}_ieee_submission.csv'.format(method_name, str(args.sampling)))
else:
log(logfile, classification_report(y_test, y_test_pred, target_names=labels))
log(logfile, 'Test Precision: %.3f' % precision_score(y_true=y_test, y_pred=y_test_pred))
log(logfile, 'Test Recall: %.3f' % recall_score(y_true=y_test, y_pred=y_test_pred))
log(logfile, 'Test F2: %.3f' % fbeta_score(y_true=y_test, y_pred=y_test_pred, beta=2))
log(logfile, 'Test ROC AUC: %.3f' % roc_auc_score(y_test, y_test_scores))
log(logfile, '')
# Plot ROC curve
fpr, tpr, _ = roc_curve(y_test, y_test_scores)
roc_auc_ax.plot(fpr, tpr, marker='x', label=method_name)
log(logfile, '---' * 45)
if dataset_name != 'ieee':
# Generate a no skill prediction (majority class)
ns_probs = [0 for _ in range(len(y_test))]
# Plot the roc curve for the model
ns_fpr, ns_tpr, _ = roc_curve(y_test, ns_probs)
roc_auc_ax.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
roc_auc_ax.set_xlabel('False Positive Rate')
roc_auc_ax.set_ylabel('True Positive Rate')
roc_auc_ax.legend()
roc_auc_ax.figure.savefig(outdir + 'roc_curve.png')
if __name__ == "__main__":
experiments(config_file=sys.argv[1])
|
17,153 | 901c97d3c2127d718051b39d193e5a7430592372 | from random import random as rr
class Filter:
def __init__(self):
pass
def filter(self, buffer, interests, preference):
ret = []
bonus = 0
for post in buffer:
var bonus = 0
if post.preference != 0:
if post.preference == preference:
bonus = 0.1
else:
bonus = -0.2
if interests[post.topic] + bonus < 0:
bonus = 0
if rr() < interests[post.topic] + bonus:
ret.append(post)
return ret
|
17,154 | dd05edda7cf5c5111761060a8b33a78a747cb001 | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pandas as pd # ๅฏผๅ
ฅpandasๅบ
import seaborn as sns # ๅฏผๅ
ฅseabornๅบ
import matplotlib.pyplot as plt
# ไธ่ฝฝAUTO-MPGๆฐๆฎ้
dataset_path = keras.utils.get_file("auto-mpg.data", "http://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data")
# ไฝฟ็จpandasๅฏผๅ
ฅๆฐๆฎ้
column_names = ['MPG','Cylinders','Displacement','Horsepower','Weight',
'Acceleration', 'Model Year', 'Origin']
raw_dataset = pd.read_csv(dataset_path, names=column_names,
na_values = "?", comment='\t',
sep=" ", skipinitialspace=True)
dataset = raw_dataset.copy()
dataset.tail()
# ๆฐๆฎๆธ
ๆด
dataset.isna().sum() # ๆฐๆฎ้ไธญๅ
ๆฌไธไบๆช็ฅๅผ
dataset = dataset.dropna() # ๅ ้คๆช็ฅ่ก
origin = dataset.pop('Origin') # originๅๅฎ้
ไธไปฃ่กจๅ็ฑป๏ผๅฐoriginๅ่ฝฌๆขไธบ็ฌ็ญ็ ๏ผone-hot๏ผ
dataset['USA'] = (origin == 1)*1.0
dataset['Europe'] = (origin == 2)*1.0
dataset['Japan'] = (origin == 3)*1.0
dataset.tail()
# ๆๅ่ฎญ็ปๆฐๆฎ้ๅๆต่ฏๆฐๆฎ้
train_dataset = dataset.sample(frac=0.8, random_state=0)
test_dataset = dataset.drop(train_dataset.index)
# ไฝฟ็จseaborn่ฟ่กๆฐๆฎๆฃๆฅ
sns.pairplot(train_dataset[["MPG", "Cylinders", "Displacement", "Weight"]], diag_kind="kde") # ๅฟซ้ๆฅ็่ฎญ็ป้ไธญๅ ๅฏนๅ็่ๅๅๅธ
# ๆฅ็ๆปไฝ็ๆฐๆฎ็ป่ฎก
train_stats = train_dataset.describe()
train_stats.pop("MPG")
train_stats = train_stats.transpose()
train_stats
# ไปๆ ็ญพไธญๅ็ฆป็นๅพ
train_labels = train_dataset.pop('MPG')
test_labels = test_dataset.pop('MPG')
# ๆฐๆฎ่ง่ๅ
def norm(x):
return (x - train_stats['mean']) / train_stats['std']
normed_train_data = norm(train_dataset)
normed_test_data = norm(test_dataset)
# ๆจกๅๆญๅปบ
model = keras.Sequential([
layers.Dense(64, activation='relu', input_shape=[len(train_dataset.keys())]),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])
optimizer = tf.keras.optimizers.RMSprop(0.001)
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
# # ไป่ฎญ็ปๆฐๆฎไธญๆน้่ทๅโ10โๆกไพๅญๅนถๅฏน่ฟไบไพๅญ่ฐ็จ
# example_batch = normed_train_data[:10]
# example_result = model.predict(example_batch)
# example_result
# # ้่ฟไธบๆฏไธชๅฎๆ็ๆถๆๆๅฐไธไธช็นๆฅๆพ็คบ่ฎญ็ป่ฟๅบฆ
# class PrintDot(keras.callbacks.Callback):
# def on_epoch_end(self, epoch, logs):
# if epoch % 100 == 0: print('')
# print('.', end='')
# # ๅฏนๆจกๅ่ฟ่ก1000ไธชๅจๆ็่ฎญ็ป๏ผๅนถๅจhistoryๅฏน่ฑกไธญ่ฎฐๅฝ่ฎญ็ปๅ้ช่ฏ้ไธ็ๅ็กฎๆง
EPOCHS = 1000
# history = model.fit(
# normed_train_data, train_labels,
# epochs=EPOCHS, validation_split=0.2, verbose=2)
# patience ๅผ็จๆฅๆฃๆฅๆน่ฟ epochs ็ๆฐ้
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=10)
history = model.fit(normed_train_data, train_labels, epochs=EPOCHS,
validation_split=0.2, verbose=1, callbacks=[early_stop])
# # ไฝฟ็จhistoryๅฏน่ฑกไธญๅญๅจ็็ป่ฎกไฟกๆฏๅฏ่งๅๆจกๅ็่ฎญ็ป่ฟๅบฆ
# hist = pd.DataFrame(history.history)
# hist['epoch'] = history.epoch
# hist.tail()
# ๅฏ่งๅ่ฎญ็ป่ฏฏๅทฎ
def plot_history(history):
hist = pd.DataFrame(history.history)
hist['epoch'] = history.epoch
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Abs Error [MPG]')
plt.plot(hist['epoch'], hist['mae'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mae'],
label='Val Error')
plt.ylim([0, 5])
plt.legend()
plt.figure()
plt.xlabel('Epoch')
plt.ylabel('Mean Square Error [$MPG^2$]')
plt.plot(hist['epoch'], hist['mse'],
label='Train Error')
plt.plot(hist['epoch'], hist['val_mse'],
label='Val Error')
plt.ylim([0, 20])
plt.legend()
# plt.show()
plot_history(history)
# ๅนณๅ็ฒพๅบฆ
loss, mae, mse = model.evaluate(normed_test_data, test_labels, verbose=2)
print("Testing set Mean Abs Error: {:5.2f} MPG".format(mae))
test_predictions = model.predict(normed_test_data).flatten()
# prediction-turthๅพ
plt.figure()
plt.scatter(test_labels, test_predictions)
plt.xlabel('True Values [MPG]')
plt.ylabel('Predictions [MPG]')
plt.axis('equal')
plt.axis('square')
plt.xlim([0,plt.xlim()[1]])
plt.ylim([0,plt.ylim()[1]])
_ = plt.plot([-100, 100], [-100, 100])
# ่ฏฏๅทฎๅๅธ็ดๆนๅพ
plt.figure()
error = test_predictions - test_labels
plt.hist(error, bins = 25)
plt.xlabel("Prediction Error [MPG]")
_ = plt.ylabel("Count")
plt.show() |
17,155 | 9274415bfb9f8e95dc38e6a170e6047331b014bf | import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
# Extract features from data
def add_features(data, eps=10**-10, n_cluster=50, embeddings=None, ewma = True, aggregate = True):
#print("mem data:", id(data))
# Get useful columns (data from different hours)
return_cols = [col for col in data.columns if col.endswith(':00')]
# Some stats features
data['return_nan'] = data.isna().sum(axis=1)
data['avg_return_date_eqt'] = data[return_cols].mean(axis=1)
data['var_return_date_eqt'] = data[return_cols].var(axis=1)
data['skew_return_date_eqt'] = data[return_cols].skew(axis=1)
data['kurt_return_date_eqt'] = data[return_cols].kurt(axis=1)
data['max_drawdown_date_eqt'] = data[return_cols].max(
axis=1) - data[return_cols].min(axis=1)
#data['avg_log_vol_date_eqt'] = np.log(
# np.abs(data[return_cols]).mean(axis=1))
#data['var_log_vol_date_eqt'] = np.log(
# np.abs(data[return_cols]).var(axis=1))
data = group_by_date_countd(data, return_cols)
data = group_by_product_countd(data, return_cols)
data['09:30:00'].fillna(0, inplace=True)
data[return_cols] = data[return_cols].interpolate(axis=1)
if aggregate :
returns = data[return_cols]
df_train = pd.DataFrame(
np.add.reduceat(
returns.values, np.arange(len(returns.columns))[::7], axis=1))
df_train.columns = returns.columns[::7]/7
data = data.drop(return_cols, axis=1)
new_returns_cols = return_cols[::7]
data[new_returns_cols] = df_train
return_cols = [col for col in data.columns if col.endswith(':00')]
if ewma :
data[return_cols] = data[return_cols].ewm(alpha=0.2, axis=1).mean()
data['difference_to_market'] = data[return_cols[-1]] - data[
'avg_market_return_date']
data['return_trend'] = np.sum(data[return_cols],axis = 1)
#data['log_vol_difference_to_market'] = np.log(
# np.abs(data[return_cols[-1]] + eps)) - data['avg_market_log_vol_date']
#data['log_vol_trend'] = np.log(np.abs(data[return_cols[-1]] + eps)) - np.log(
# np.abs(data[return_cols[0]] + eps))
if embeddings:
sectors = get_sector(embeddings, n_clusters=n_cluster)
add_sector(data, sectors)
data = group_by_sector(data, return_cols, sectors)
#print(data.keys())
#print("mem data:", id(data))
return data
def group_by_date_countd(all_data, return_cols):
groupby_col = "date"
unique_products = all_data.groupby([groupby_col])["eqt_code"].nunique()
avg_market_return = all_data.groupby(
[groupby_col])['avg_return_date_eqt'].mean()
var_market_return = all_data.groupby(
[groupby_col])['var_return_date_eqt'].mean()
# avg_log_vol_market_return = all_data.groupby(
# [groupby_col])['avg_log_vol_date_eqt'].mean()
# var_log_vol_market_return = all_data.groupby(
# [groupby_col])['var_log_vol_date_eqt'].mean()
all_data.set_index([groupby_col], inplace=True)
all_data["countd_product"] = unique_products.astype('float64')
all_data["avg_market_return_date"] = avg_market_return.astype('float64')
all_data["var_market_return_date"] = var_market_return.astype('float64')
# all_data["avg_market_log_vol_date"] = avg_log_vol_market_return.astype(
# 'float64')
# all_data["avg_market_log_vol_date"] = var_log_vol_market_return.astype(
# 'float64')
all_data.reset_index(inplace=True)
return all_data
def group_by_product_countd(all_data, return_cols):
groupby_col = "eqt_code"
unique_date = all_data.groupby([groupby_col])["date"].nunique()
avg_market_return = all_data.groupby(
[groupby_col])['avg_return_date_eqt'].mean()
var_market_return = all_data.groupby(
[groupby_col])['var_return_date_eqt'].mean()
# avg_log_vol_market_return = all_data.groupby(
# [groupby_col])['avg_log_vol_date_eqt'].mean()
# var_log_vol_market_return = all_data.groupby(
# [groupby_col])['var_log_vol_date_eqt'].mean()
all_data.set_index([groupby_col], inplace=True)
all_data["countd_date"] = unique_date.astype('float64')
all_data["avg_market_return_eqt"] = avg_market_return.astype('float64')
all_data["var_market_return_eqt"] = var_market_return.astype('float64')
# all_data["avg_market_log_vol_eqt"] = avg_log_vol_market_return.astype(
# 'float64')
# all_data["avg_market_log_vol_eqt"] = var_log_vol_market_return.astype(
# 'float64')
all_data.reset_index(inplace=True)
return all_data
def plot_corr(df, size=10):
import seaborn as sns
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(size, size))
sns.heatmap(df.corr(), annot=False)
def get_data_matrix(embeddings):
n_samples, n_features = len(embeddings), len(list(embeddings.values())[0])
X = np.zeros((n_samples, n_features))
for i, eqt in enumerate(embeddings.keys()):
X[i, :] = embeddings[eqt]
return X
def get_sector(embeddings, n_clusters=8):
X = get_data_matrix(embeddings)
clf = KMeans(n_clusters=n_clusters, n_init=30)
sectors_temp = clf.fit_predict(X)
sectors = {}
for j, eqt in enumerate(embeddings.keys()):
sectors[eqt] = sectors_temp[j]
return sectors
def add_sector(data, sectors):
data['sector'] = data['eqt_code'].map(sectors)
def group_by_sector(all_data, return_cols, sector):
groupby_col = "sector"
unique_products = all_data.groupby([groupby_col])["eqt_code"].nunique()
avg_market_return = all_data.groupby(
[groupby_col])['avg_return_date_eqt'].mean()
var_market_return = all_data.groupby(
[groupby_col])['var_return_date_eqt'].mean()
avg_log_vol_market_return = all_data.groupby(
[groupby_col])['avg_log_vol_date_eqt'].mean()
var_log_vol_market_return = all_data.groupby(
[groupby_col])['var_log_vol_date_eqt'].mean()
all_data.set_index([groupby_col], inplace=True)
all_data["countd_product"] = unique_products.astype('float64')
all_data["avg_market_return_date"] = avg_market_return.astype('float64')
all_data["var_market_return_date"] = var_market_return.astype('float64')
all_data["avg_market_log_vol_date"] = avg_log_vol_market_return.astype(
'float64')
all_data["avg_market_log_vol_date"] = var_log_vol_market_return.astype(
'float64')
all_data.reset_index(inplace=True)
return all_data
|
17,156 | 1a2686d3aba64f0a642eef6cee71257ac9aa2fc1 | import glob
import os
import re
import math
def word_list_maker(address):
file_list = glob.glob(os.path.join(os.getcwd(), address, "*.txt"))
AllFiles = []
for file_path in file_list:
with open(file_path, errors = 'ignore') as f_input:
AllFiles.append(f_input.read())
split_version = []
for file in AllFiles:
split_version.append(re.split('[ \n]', file))
word_counter = {}
unique_words = []
for current_file in split_version:
for word in current_file:
if(word not in unique_words):
unique_words.append(word)
word_counter[word] = 1
else:
val = word_counter.get(word)
val+=1
word_counter.update({word:val})
return len(file_list), unique_words, word_counter
def generator_for_testing(address):
file_list = glob.glob(os.path.join(os.getcwd(), address, "*.txt"))
AllFiles = []
for file_path in file_list:
with open(file_path, errors = 'ignore') as f_input:
AllFiles.append(f_input.read())
split_version = []
for file in AllFiles:
split_version.append(re.split('[ \n]', file))
return split_version
def NaiveBayes(classes, number_of_class_i, dictionary_class_i, test_file):
total = sum(number_of_class_i)
prior_of_class = [] #will store the prior probability of class i.
total_words_in_class = [] #will store the total number of words in class i.
unique_words_in_class = [] #Will store nested lists, each of which contains all the unique words in class i.
number_of_unique_words_in_class = [] #Will store the number of unique words in class i
posterior = {} #will store the posterior probability of class i given a certain test_file. This is the output
for dictionary in dictionary_class_i:
unique_words_in_class.append(list(dictionary.keys()))
number_of_unique_words_in_class.append(len(list(dictionary.keys())))
total_words_in_class.append(sum(list(dictionary.values())))
for idx, val in enumerate(number_of_class_i):
prior_of_class.append(math.log2(val/total))
for idx, prior in enumerate(prior_of_class):
posterior[classes[idx]] = prior #initializing the posterior with the priors.
for word in test_file:
for idx, dictionary in enumerate(dictionary_class_i):
if word in unique_words_in_class[idx]:
laplaced_probability = ((dictionary.get(word) + 1) / (total_words_in_class[idx] + number_of_unique_words_in_class[idx]))
else:
laplaced_probability = 1 / (total_words_in_class[idx] + number_of_unique_words_in_class[idx])
value = posterior.get((classes[idx]))
updated_value = value + math.log2(laplaced_probability)
posterior[classes[idx]] = updated_value
return posterior
def tester(classes, numbers_of_class, dicts_of_class, address, test_class):
test_files = generator_for_testing(address)
total = 0
correct = 0
for test_file in test_files:
results = NaiveBayes(classes, numbers_of_class, dicts_of_class, test_file)
maximum = max(results.values())
for key, value in results.items():
if value == maximum:
maxkey = key
if maxkey == test_class:
correct += 1
total += 1
print("Accuracy for {} = {}%".format(test_class, round((correct/total)*100, 2)))
return len(test_files), (correct/total)
number_of_ham, ham_uniq_words, ham_word_counter = word_list_maker("train\ham")
number_of_spam, spam_uniq_words, spam_word_counter = word_list_maker("train\spam")
numbers_of_class = []
dicts_of_class = []
numbers_of_class.append(number_of_ham)
numbers_of_class.append(number_of_spam)
dicts_of_class.append(ham_word_counter)
dicts_of_class.append(spam_word_counter)
classes = ["ham", "spam"]
print("Naive Bayes Classifier for ham and spam")
print("-----------------------------")
no_of_test_files1, ham_acc = tester(classes, numbers_of_class, dicts_of_class, "test\ham", "ham")
no_of_test_files0, spam_acc = tester(classes, numbers_of_class, dicts_of_class, "test\spam", "spam")
weighted_average = ((no_of_test_files1*ham_acc) + (no_of_test_files0*spam_acc))/(no_of_test_files0 + no_of_test_files1)
print('Weighted average is {}%'.format(round(weighted_average*100, 2)))
with open('stopwords.txt', 'r') as file:
data = file.read()
stopwords = re.split('[\n]', data)
for stopword in stopwords:
if stopword in dicts_of_class[0]:
del dicts_of_class[0][stopword]
if stopword in dicts_of_class[1]:
del dicts_of_class[1][stopword]
print("With stopwords removed, we now get:")
no_of_test_files1, ham_acc = tester(classes, numbers_of_class, dicts_of_class, "test\ham", "ham")
no_of_test_files0, spam_acc = tester(classes, numbers_of_class, dicts_of_class, "test\spam", "spam")
weighted_average = ((no_of_test_files1*ham_acc) + (no_of_test_files0*spam_acc))/(no_of_test_files0 + no_of_test_files1)
print('Weighted average is {}%'.format(round(weighted_average*100, 2)))
|
17,157 | 8bdec5d57b9d465623dc71c4fa81041683961f08 | from uuid import uuid4
from ui_testing.pages.base_selenium import BaseSelenium
import time, pyperclip, os
from random import randint
import datetime
import pymysql
class BasePages:
def __init__(self):
self.base_selenium = BaseSelenium()
self.pagination_elements_array = ['10', '20', '25', '50', '100']
def generate_random_text(self):
return str(uuid4()).replace("-", "")[:10]
def generate_random_number(self, lower=1, upper=100000):
return randint(lower, upper)
def search(self, value):
"""
Search for a specific value
:param value:
:return: The first element in the search table
"""
self.base_selenium.set_text(element='general:search', value=value)
self.base_selenium.click(element='general:search')
time.sleep(self.base_selenium.TIME_MEDIUM)
return self.result_table()
def result_table(self, element='general:table'):
table = self.base_selenium.get_table_rows(element=element)
return table
def clear_text(self, element):
self.base_selenium.clear_element_text(element=element)
def sleep_tiny(self):
self.base_selenium.LOGGER.debug('wait up to 0.5 sec')
self.base_selenium.wait_until_page_load_resources(expected_counter=5)
def sleep_small(self):
self.base_selenium.LOGGER.debug('wait up to 1 sec')
self.base_selenium.wait_until_page_load_resources(expected_counter=10)
def sleep_medium(self):
self.base_selenium.LOGGER.debug('wait up to 2 sec')
self.base_selenium.wait_until_page_load_resources(expected_counter=20)
def sleep_large(self):
self.base_selenium.LOGGER.debug('wait up to 4 sec')
self.base_selenium.wait_until_page_load_resources(expected_counter=40)
def save(self, sleep=True, save_btn='general:save', logger_msg='save the changes'):
self.info(logger_msg)
if sleep:
self.sleep_tiny()
self.base_selenium.click(element=save_btn)
if sleep:
self.sleep_tiny()
def save_and_wait(self, save_btn='general:save'):
self.save(save_btn=save_btn)
self.info('Refresh to make sure that data are saved correctly')
self.base_selenium.refresh()
self.wait_until_page_is_loaded()
def cancel(self, force=True):
if self.base_selenium.check_element_is_exist(element='general:cancel'):
self.base_selenium.click(element='general:cancel')
else:
self.base_selenium.click(element='my_profile:cancel_button')
self.confirm_popup(force)
def confirm_popup(self, force=True, check_only=False):
"""
:param check_only: this parameter (when true) is to check the existence of popup only without clicking ok/cancel
"""
self.info('confirming the popup')
if self.base_selenium.check_element_is_exist(element='general:confirmation_pop_up'):
if check_only:
return True
if force:
self.base_selenium.click(element='general:confirm_pop')
else:
self.base_selenium.click(element='general:confirm_cancel')
self.sleep_small()
def get_confirmation_pop_up_text(self):
if self.base_selenium.wait_element(element='general:confirmation_pop_up'):
return self.base_selenium.get_text(element='general:confirmation_pop_up')
def open_filter_menu(self):
self.info('open Filter')
filter = self.base_selenium.find_element_in_element(source_element='general:menu_filter_view',
destination_element='general:filter')
filter.click()
def close_filter_menu(self):
filter = self.base_selenium.find_element_in_element(source_element='general:menu_filter_view',
destination_element='general:filter')
filter.click()
def filter_by(self, filter_element, filter_text, field_type='drop_down'):
if field_type == 'drop_down':
self.base_selenium.select_item_from_drop_down(element=filter_element,
item_text=filter_text, avoid_duplicate=True)
else:
self.base_selenium.set_text(element=filter_element, value=filter_text)
def filter_apply(self):
self.base_selenium.click(element='general:filter_btn')
self.wait_until_page_is_loaded()
def apply_filter_scenario(self, filter_element, filter_text, field_type='drop_down'):
self.open_filter_menu()
self.sleep_tiny()
self.base_selenium.wait_element(element=filter_element)
self.filter_by(filter_element=filter_element, filter_text=filter_text, field_type=field_type)
self.filter_apply()
self.sleep_tiny()
def filter_reset(self):
self.info(' Reset Filter')
self.base_selenium.click(element='general:filter_reset_btn')
time.sleep(self.base_selenium.TIME_SMALL)
def select_random_multiple_table_rows(self, element='general:table'):
_selected_rows_text = []
selected_rows_data = []
selected_rows = []
rows = self.base_selenium.get_table_rows(element=element)
no_of_rows = randint(min(2, len(rows) - 1), min(5, len(rows) - 1))
count = 0
self.info(' No. of selected rows {} '.format(no_of_rows))
while count < no_of_rows:
self.base_selenium.scroll()
row = rows[randint(0, len(rows) - 2)]
row_text = row.text
if not row_text:
continue
if row_text in _selected_rows_text:
continue
count = count + 1
self.click_check_box(source=row)
self.sleep_tiny()
_selected_rows_text.append(row_text)
selected_rows.append(row)
selected_rows_data.append(self.base_selenium.get_row_cells_dict_related_to_header(row=row))
return selected_rows_data, selected_rows
def select_random_ordered_multiple_table_rows(self, element='general:table'):
_selected_rows_text = []
selected_rows_data = []
selected_rows = []
rows = self.base_selenium.get_table_rows(element=element)
no_of_rows = randint(min(2, len(rows)-1), min(5, len(rows)-1))
count = 0
self.info(' No. of selected rows {} '.format(no_of_rows))
while count < no_of_rows:
self.base_selenium.scroll()
row = rows[count]
row_text = row.text
if not row_text:
continue
if row_text in _selected_rows_text:
continue
count = count + 1
self.click_check_box(source=row)
_selected_rows_text.append(row_text)
selected_rows.append(row)
selected_rows_data.append(self.base_selenium.get_row_cells_dict_related_to_header(row=row))
return selected_rows_data, selected_rows
def select_random_table_row(self, element='general:table'):
self.info("select random row")
rows = self.base_selenium.get_table_rows(element=element)
for _ in range(5):
row_index = randint(0, len(rows) - 2)
row = rows[row_index]
row_text = row.text
if not row_text:
continue
self.click_check_box(source=row)
return self.base_selenium.get_row_cells_dict_related_to_header(row)
def click_check_box(self, source):
check_box = self.base_selenium.find_element_in_element(
destination_element='general:checkbox', source=source)
check_box.click()
def open_edit_page(self, row, xpath=''):
if xpath == '':
xpath = '//span[@class="mr-auto"]/a'
row.find_element_by_xpath(xpath).click()
self.wait_until_page_is_loaded()
def open_edit_page_by_css_selector(self, row, css_selector=''):
if css_selector == '':
css_selector = '[title="Edit details"]'
row.find_element_by_css_selector(css_selector).click()
self.wait_until_page_is_loaded()
def get_archived_items(self):
self.sleep_tiny()
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:archived')
self.sleep_tiny()
def get_active_items(self):
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:active')
self.sleep_small()
def restore_selected_items(self):
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:restore')
self.confirm_popup()
self.sleep_tiny()
def delete_selected_item(self, confirm_pop_up=True):
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:delete')
if confirm_pop_up:
self.confirm_popup()
def archive_selected_items(self):
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:archive')
self.confirm_popup()
self.sleep_tiny()
def download_xslx_sheet(self):
self.info("download XSLX sheet")
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.sheet = self.base_selenium.download_excel_file(element='general:xslx')
def select_all_records(self):
header_row = self.base_selenium.get_table_head_elements(element='general:table')
self.click_check_box(source=header_row[0])
def get_table_rows_data(self):
return [row.text for row in self.base_selenium.get_table_rows(element='general:table')]
def open_random_table_row_page(self, table_element):
row = self.get_random_table_row(table_element)
self.open_edit_page(row=row)
def get_random_table_row(self, table_element):
rows = self.base_selenium.get_table_rows(element=table_element)
if len(rows) > 1:
row_id = randint(0, len(rows) - 2)
row = rows[row_id]
return row
return ''
def get_table_info(self):
return self.base_selenium.get_text(element='general:table_info')
def get_table_records(self):
self.info(' Get table records.')
return int(self.get_table_info().split(' ')[5])
def get_random_date(self):
return '{:02d}.{:02d}.{}'.format(randint(1, 30), randint(1, 12), 2019)
def filter(self, field_name, element, filter_text, type):
self.info(' Filter by {} : {}'.format(field_name, filter_text))
self.filter_by(filter_element=element, filter_text=filter_text, field_type=type)
self.filter_apply()
def _copy(self, value):
pyperclip.copy(value)
def _paste(self, element):
self.info(' past. {}'.format(pyperclip.paste()))
self.base_selenium.paste(element=element)
def copy_paste(self, element, value):
self._copy(value=value)
self._paste(element=element)
def open_row_options(self, row):
self.info('open record options menu')
row_options = self.base_selenium.find_element_in_element(
destination_element='general:table_menu_options', source=row)
row_options.click()
self.sleep_tiny()
def open_child_table(self, source):
childtable_arrow = self.base_selenium.find_element_in_element(
destination_element='general:child_table_arrow', source=source)
childtable_arrow.click()
self.sleep_medium()
def close_child_table(self, source):
childtable_arrow = self.base_selenium.find_element_in_element(
destination_element='general:child_table_arrow', source=source)
childtable_arrow.click()
def get_child_table_data(self, index=0, open_child=True):
rows = self.result_table()
if open_child:
self.open_child_table(source=rows[index])
return self.get_table_data()
def get_table_data(self, table_element='general:table_child'):
rows_with_childtable = self.result_table(element=table_element)
headers = self.base_selenium.get_table_head_elements(element=table_element)
child_table_data = []
for subrecord in range(0, len(rows_with_childtable)):
rows_with_headers = self.base_selenium.get_row_cells_dict_related_to_header(
row=rows_with_childtable[subrecord], table_element='general:table_child')
if rows_with_headers != {}:
child_table_data.append(rows_with_headers)
return child_table_data
@property
def info(self):
return self.base_selenium.LOGGER.info
@property
def debug(self):
return self.base_selenium.LOGGER.debug
def generate_random_email(self):
name = str(uuid4()).replace("-", "")[:10]
server = "@" + str(uuid4()).replace("-", "")[:6] + "." + 'com'
return name + server
def generate_random_website(self):
return "www." + str(uuid4()).replace("-", "")[:10] + "." + str(uuid4()).replace("-", "")[:3]
def generate_random_string(self):
return str(uuid4()).replace("-", "")[:10]
def generate_random_number(self, lower=1, upper=100000):
return randint(lower, upper)
def open_configuration(self):
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:configurations')
self.base_selenium.wait_until_element_located(element='general:fields_panel')
def open_archived_configuration(self):
self.open_configuration()
self.sleep_tiny()
self.base_selenium.click(element='general:configurations_archived')
def open_analysis_configuration(self):
self.base_selenium.refresh()
self.sleep_tiny()
self.open_configuration()
self.sleep_tiny()
self.base_selenium.click(element='general:configurations_analysis')
def open_configure_table(self):
self.info('open configure table')
configure_table_menu = self.base_selenium.find_element(element='general:configure_table')
configure_table_menu.click()
self.sleep_tiny()
def close_configure_table(self):
self.info('open configure table')
configure_table_menu = self.base_selenium.find_element(element='general:configure_table')
configure_table_menu.click()
def navigate_to_child_table_configuration(self):
self.open_configure_table()
configure_child_table_menu = self.base_selenium.find_element(element='general:configure_child_table')
configure_child_table_menu.click()
self.sleep_tiny()
active_columns = self.base_selenium.find_elements_in_element(
source_element='general:configure_child_table_items', destination_element='general:li')
coulmns_text = [column.text for column in active_columns]
self.close_configure_table()
return coulmns_text
def hide_columns(self, random=True, count=3, index_arr=[], always_hidden_columns=[]):
self.open_configure_table()
total_columns = self.base_selenium.find_elements_in_element(source_element='general:configure_table_items',
destination_element='general:li')
# total_columns = self.base_selenium.find_element_by_xpath(xpath='//ul[@class="m-nav sortable sortable-table1 ui-sortable"]').find_elements_by_tag_name('li')
random_indices_arr = index_arr
hidden_columns_names = []
if random:
random_indices_arr = self.generate_random_indices(max_index=len(total_columns) - 2, count=count)
for index in random_indices_arr:
if total_columns[index].get_attribute('id') and total_columns[index].get_attribute('id') != 'id' and \
total_columns[index].get_attribute('id') not in always_hidden_columns:
column_name = self.change_column_view(column=total_columns[index], value=False,
always_hidden_columns=always_hidden_columns)
if column_name != '':
hidden_columns_names.append(column_name)
self.press_apply_in_configure_table()
self.info(hidden_columns_names)
return hidden_columns_names
def change_column_view(self, column, value, always_hidden_columns=[]):
if column.get_attribute('id') and column.get_attribute('id') != 'id' and column.get_attribute(
'id') not in always_hidden_columns:
try:
new_checkbox_value = "//li[@id='" + column.get_attribute('id') + "']//input[@type='checkbox']"
new_label_xpath = "//li[@id='" + column.get_attribute('id') + "']//label[@class='sortable-label']"
new_checkbox_xpath = "//li[@id='" + column.get_attribute('id') + "']//span[@class='checkbox']"
column_name = self.base_selenium.find_element_by_xpath(new_label_xpath).text
checkbox = self.base_selenium.find_element_by_xpath(new_checkbox_xpath)
checkbox_value = self.base_selenium.find_element_by_xpath(new_checkbox_value)
if checkbox_value.is_selected() != value:
checkbox.click()
return column_name
except Exception as e:
self.info(
"element with the id '{}' doesn't exit in the configure table".format(column.get_attribute('id')))
self.base_selenium.LOGGER.exception(' * %s Exception ' % (str(e)))
return ''
def set_specific_configure_table_column_to_specific_value(self, fields=[''], value=True, child=False,
element='general:configure_table_items'):
"""
:param fields: list of items to select or deslect in table
:param value: True to select, False to deselect
:param child: true if want child table
:param element: configure_child_table_items if child table selected
:return:
"""
self.open_configure_table()
if child:
self.base_selenium.click(element='general:configure_child_table')
total_columns = self.base_selenium.find_elements_in_element(
source_element=element, destination_element='general:li')
for column in total_columns:
if column.text in fields:
self.change_column_view(column=column, value=value)
self.press_apply_in_configure_table()
self.sleep_tiny()
self.base_selenium.refresh()
self.sleep_tiny()
if child:
self.open_child_table(self.result_table()[0])
headers = self.base_selenium.get_table_head_elements(element='general:table_child')
child_table_headings = [i.text for i in headers]
return child_table_headings
else:
return self.base_selenium.get_table_head_elements_with_tr(element='general:table')[0].text.split('\n')
def generate_random_indices(self, max_index=3, count=3):
counter = 0
indices_arr = []
while counter < count:
random_index = self.generate_random_number(lower=0, upper=max_index - 1)
if random_index not in indices_arr:
indices_arr.append(random_index)
counter = counter + 1
return indices_arr
def press_apply_in_configure_table(self):
apply_button = self.base_selenium.find_element(element="general:apply_configure_table")
if apply_button:
apply_button.click()
def set_all_configure_table_columns_to_specific_value(self, value=True, always_hidden_columns=['']):
self.open_configure_table()
total_columns = self.base_selenium.find_elements_in_element(
source_element='general:configure_table_items',
destination_element='general:li')
for column in total_columns:
self.change_column_view(column=column, value=value, always_hidden_columns=always_hidden_columns)
self.press_apply_in_configure_table()
def deselect_all_configurations(self):
self.open_configure_table()
self.info('deselect all configuration')
active_columns = self.base_selenium.find_elements_in_element(
source_element='general:configure_table_items', destination_element='general:li')
for column in active_columns:
if column.text:
self.change_column_view(column=column, value=False)
archived_coloums = self.base_selenium.find_elements_in_element(
source_element='general:configure_table_archive_items', destination_element='general:li')
for column in archived_coloums:
if column.text:
self.change_column_view(column=column, value=False)
parent_class = self.base_selenium.driver.find_element_by_xpath('//*[contains(text(), "Apply")]//parent::a')
class_string = parent_class.get_attribute('class')
if 'disabled' in class_string:
self.info("can't apply")
return False
else:
self.info("can apply")
return True
def click_overview(self):
# click on Overview, this will display an alert to the user
self.base_selenium.scroll()
self.info('click on Overview')
self.base_selenium.click_by_script(element='general:overview')
self.sleep_tiny()
def confirm_overview_pop_up(self):
self.base_selenium.click(element='general:confirm_overview')
self.sleep_tiny()
def cancel_overview_pop_up(self):
self.base_selenium.click(element='general:cancel_overview')
self.sleep_tiny()
def duplicate_selected_item(self):
self.base_selenium.scroll()
self.base_selenium.click(element='general:right_menu')
self.base_selenium.click(element='general:duplicate')
self.sleep_small()
'''
archives this item and tries to delete it
returns True if it's deleted and False otherwise
'''
def delete_selected_item_from_active_table_and_from_archived_table(self, item_name):
# archive this item
row = self.search(item_name)
self.info('Selecting the row')
self.click_check_box(source=row[0])
self.sleep_small()
self.info('Archiving the selected row')
self.archive_selected_items()
self.info('Navigating to the Archived table')
self.get_archived_items()
# try to delete it
archived_row = self.search(item_name)
self.sleep_small()
self.info('Selecting the row')
self.click_check_box(source=archived_row[0])
self.sleep_small()
self.info('Attempting to delete item: {}'.format(item_name))
self.delete_selected_item()
if self.base_selenium.check_element_is_exist(element='general:cant_delete_message'):
self.base_selenium.click(element='general:confirm_pop')
return False
return True
def is_next_page_button_enabled(self, element='general:next_page'):
_class = self.base_selenium.get_attribute('general:next_page', 'class')
if 'disabled' in _class:
return False
else:
return True
def upload_file(self, file_name, drop_zone_element, remove_current_file=False, save=True):
"""
Upload single file to a page that only have 1 drop zone
:param file_name: name of the file to be uploaded
:param drop_zone_element: the dropZone element
:return:
"""
self.info(" uploading file")
# remove the current file and save
if remove_current_file:
self.info(" remove current file")
is_the_file_exist = self.base_selenium.check_element_is_exist(element='general:file_upload_success_flag')
if is_the_file_exist:
self.sleep_tiny()
self.base_selenium.click('general:remove_file')
self.base_selenium.click('general:close_uploader_popup')
if save:
self.save()
else:
self.info(" there is no current file")
# get the absolute path of the file
file_path = os.path.abspath('ui_testing/assets/{}'.format(file_name))
# check if the file exist
if os.path.exists(file_path) == False:
raise Exception(
"The file you are trying to upload doesn't exist localy")
else:
self.info(
"The {} file is ready for upload".format(file_name))
# silence the click event of file input to prevent the opening of (Open Files) Window
self.base_selenium.driver.execute_script(
"""
HTMLInputElement.prototype.click = function() {
if(this.type !== 'file')
{
HTMLElement.prototype.click.call(this);
}
}
""")
# click on the dropZone component
self.base_selenium.click(element=drop_zone_element)
# the input tag will be appended to the HTML by dropZone after the click
# find the <input type="file"> tag
file_field = self.base_selenium.find_element(element='general:file_input_field')
# send the path of the file to the input tag
file_field.send_keys(file_path)
self.info("Uploading {}".format(file_name))
# wait until the file uploads
self.base_selenium.wait_until_element_located(element='general:file_upload_success_flag')
self.info(
"{} file is uploaded successfully".format(file_name))
def open_pagination_menu(self):
self.base_selenium.wait_element(element='general:pagination_button')
self.base_selenium.click(element='general:pagination_button')
def set_page_limit(self, limit='20'):
self.info('set the pagination limit to {}'.format(limit))
self.open_pagination_menu()
limit_index = self.pagination_elements_array.index(limit)
self.base_selenium.wait_element(element='general:pagination_menu')
pagination_elements = self.base_selenium.find_elements_in_element(source_element='general:pagination_menu',
destination_element='general:li')
if limit_index >= 0:
pagination_elements[limit_index].click()
time.sleep(self.base_selenium.TIME_MEDIUM)
def get_current_pagination_limit(self):
return self.base_selenium.find_element(element='general:pagination_button').text.split('\n')[0]
def wait_until_page_is_loaded(self):
self.debug('wait until page is loaded')
self.base_selenium.wait_until_element_is_not_displayed('general:loading')
self.sleep_tiny()
def get_table_info_data(self):
self.info('get table information')
table_info = self.base_selenium.find_element('general:table_info')
table_info_data = table_info.text
table_info_elements = table_info_data.split(' ')
start = table_info_elements[1]
end = table_info_elements[3]
count = table_info_elements[5]
page_limit = str((int(end) - int(start)) + 1)
current_pagination_limit = self.get_current_pagination_limit()
return {'start': start,
'end': end,
'count': count,
'page_limit': page_limit,
'pagination_limit': current_pagination_limit
}
def convert_to_dot_date_format(self, date):
date_in_days = date[0:10]
date_parameters = date_in_days.split('-')
date_parameters.reverse()
return '.'.join(date_parameters)
def get_the_latest_row_data(self):
latest_row = (self.result_table()[0])
return self.base_selenium.get_row_cells_dict_related_to_header(latest_row)
def open_connection_with_database(self):
db = pymysql.connect(host='52.28.249.166', user='root', passwd='modeso@test', database='automation')
cursor = db.cursor()
return cursor, db
def close_connection_with_database(self, db):
db.close()
def get_current_year(self):
current_year = datetime.datetime.now()
return str(current_year.year)
|
17,158 | 5230316544b89273ebe8f840a180454e05a74b8a | import calendar
import logging
import random
from collections import defaultdict, deque, namedtuple
from enum import Enum
from math import ceil
from typing import cast, Iterable, Union, Literal
import discord
from redbot.cogs.bank import is_owner_if_bank_global
from redbot.cogs.mod.converters import RawUserIds
from redbot.core import Config, bank, commands, errors, checks
from redbot.core.i18n import Translator, cog_i18n
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import box, humanize_number
from redbot.core.utils.menus import close_menu, menu, DEFAULT_CONTROLS
from .economyembed import Embed
from redbot.core.bot import Red
T_ = Translator("Economy", __file__)
logger = logging.getLogger("red.economy")
NUM_ENC = "\N{COMBINING ENCLOSING KEYCAP}"
VARIATION_SELECTOR = "\N{VARIATION SELECTOR-16}"
MOCK_MEMBER = namedtuple("Member", "id guild")
class SMReel(Enum):
cherries = "\N{CHERRIES}"
cookie = "\N{COOKIE}"
two = "\N{DIGIT TWO}" + NUM_ENC
flc = "\N{FOUR LEAF CLOVER}"
cyclone = "\N{CYCLONE}"
sunflower = "\N{SUNFLOWER}"
six = "\N{DIGIT SIX}" + NUM_ENC
mushroom = "\N{MUSHROOM}"
heart = "\N{HEAVY BLACK HEART}" + VARIATION_SELECTOR
snowflake = "\N{SNOWFLAKE}" + VARIATION_SELECTOR
_ = lambda s: s
PAYOUTS = {
(SMReel.two, SMReel.two, SMReel.six): {
"payout": lambda x: x * 50,
"phrase": _("JACKPOT! 226! Your bid has been multiplied * 50!"),
},
(SMReel.flc, SMReel.flc, SMReel.flc): {
"payout": lambda x: x * 25,
"phrase": _("4LC! Your bid has been multiplied * 25!"),
},
(SMReel.cherries, SMReel.cherries, SMReel.cherries): {
"payout": lambda x: x * 20,
"phrase": _("Three cherries! Your bid has been multiplied * 20!"),
},
(SMReel.two, SMReel.six): {
"payout": lambda x: x * 4,
"phrase": _("2 6! Your bid has been multiplied * 4!"),
},
(SMReel.cherries, SMReel.cherries): {
"payout": lambda x: x * 3,
"phrase": _("Two cherries! Your bid has been multiplied * 3!"),
},
"3 symbols": {
"payout": lambda x: x * 10,
"phrase": _("Three symbols! Your bid has been multiplied * 10!"),
},
"2 symbols": {
"payout": lambda x: x * 2,
"phrase": _("Two consecutive symbols! Your bid has been multiplied * 2!"),
},
}
SLOT_PAYOUTS_MSG = _(
"Slot machine payouts:\n"
"{two.value} {two.value} {six.value} Bet * 50\n"
"{flc.value} {flc.value} {flc.value} Bet * 25\n"
"{cherries.value} {cherries.value} {cherries.value} Bet * 20\n"
"{two.value} {six.value} Bet * 4\n"
"{cherries.value} {cherries.value} Bet * 3\n\n"
"Three symbols: Bet * 10\n"
"Two symbols: Bet * 2"
).format(**SMReel.__dict__)
_ = T_
def guild_only_check():
async def pred(ctx: commands.Context):
if await bank.is_global():
return True
elif not await bank.is_global() and ctx.guild is not None:
return True
else:
return False
return commands.check(pred)
class SetParser:
def __init__(self, argument):
allowed = ("+", "-")
self.sum = int(argument)
if argument and argument[0] in allowed:
if self.sum < 0:
self.operation = "withdraw"
elif self.sum > 0:
self.operation = "deposit"
else:
raise RuntimeError
self.sum = abs(self.sum)
elif argument.isdigit():
self.operation = "set"
else:
raise RuntimeError
@cog_i18n(_)
class Money(commands.Cog):
"""Get rich and have fun with imaginary currency!"""
default_guild_settings = {
"PAYDAY_TIME": 300,
"PAYDAY_CREDITS": 120,
"SLOT_MIN": 5,
"SLOT_MAX": 100,
"SLOT_TIME": 5,
"REGISTER_CREDITS": 0,
}
default_global_settings = default_guild_settings
default_member_settings = {"next_payday": 0, "last_slot": 0}
default_role_settings = {"PAYDAY_CREDITS": 0}
default_user_settings = default_member_settings
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, 1256844281)
self.config.register_guild(**self.default_guild_settings)
self.config.register_global(**self.default_global_settings)
self.config.register_member(**self.default_member_settings)
self.config.register_user(**self.default_user_settings)
self.config.register_role(**self.default_role_settings)
self.slot_register = defaultdict(dict)
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
if requester != "discord_deleted_user":
return
await self.config.user_from_id(user_id).clear()
all_members = await self.config.all_members()
async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100):
if user_id in guild_data:
await self.config.member_from_ids(guild_id, user_id).clear()
@guild_only_check()
@commands.group(name="bank")
async def _bank(self, ctx: commands.Context):
"""Manage the bank."""
pass
@_bank.command()
async def balance(self, ctx: commands.Context, user: discord.Member = None):
"""Show the user's account balance.
Defaults to yours."""
if user is None:
user = ctx.author
bal = await bank.get_balance(user)
currency = await bank.get_currency_name(ctx.guild)
max_bal = await bank.get_max_balance(ctx.guild)
if bal > max_bal:
bal = max_bal
await bank.set_balance(user, bal)
await ctx.send(
_("{user}'s balance is {num} {currency}").format(
user=user.display_name, num=humanize_number(bal), currency=currency
)
)
@_bank.command()
async def transfer(self, ctx: commands.Context, to: discord.Member, amount: int):
"""Transfer currency to other users."""
from_ = ctx.author
currency = await bank.get_currency_name(ctx.guild)
try:
await bank.transfer_credits(from_, to, amount)
except (ValueError, errors.BalanceTooHigh) as e:
return await ctx.send(str(e))
await ctx.send(
_("{user} transferred {num} {currency} to {other_user}").format(
user=from_.display_name,
num=humanize_number(amount),
currency=currency,
other_user=to.display_name,
)
)
@is_owner_if_bank_global()
@checks.admin_or_permissions(manage_guild=True)
@_bank.command(name="set")
async def _set(self, ctx: commands.Context, to: discord.Member, creds: SetParser):
"""Set the balance of user's bank account.
Passing positive and negative values will add/remove currency instead.
Examples:
- `[p]bank set @Twentysix 26` - Sets balance to 26
- `[p]bank set @Twentysix +2` - Increases balance by 2
- `[p]bank set @Twentysix -6` - Decreases balance by 6
"""
author = ctx.author
currency = await bank.get_currency_name(ctx.guild)
try:
if creds.operation == "deposit":
await bank.deposit_credits(to, creds.sum)
msg = _("{author} added {num} {currency} to {user}'s account.").format(
author=author.display_name,
num=humanize_number(creds.sum),
currency=currency,
user=to.display_name,
)
elif creds.operation == "withdraw":
await bank.withdraw_credits(to, creds.sum)
msg = _("{author} removed {num} {currency} from {user}'s account.").format(
author=author.display_name,
num=humanize_number(creds.sum),
currency=currency,
user=to.display_name,
)
else:
await bank.set_balance(to, creds.sum)
msg = _("{author} set {user}'s account balance to {num} {currency}.").format(
author=author.display_name,
num=humanize_number(creds.sum),
currency=currency,
user=to.display_name,
)
except (ValueError, errors.BalanceTooHigh) as e:
await ctx.send(str(e))
else:
await ctx.send(msg)
@is_owner_if_bank_global()
@checks.guildowner_or_permissions(administrator=True)
@_bank.command()
async def reset(self, ctx, confirmation: bool = False):
"""Delete all bank accounts."""
if confirmation is False:
await ctx.send(
_(
"This will delete all bank accounts for {scope}.\nIf you're sure, type "
"`{prefix}bank reset yes`"
).format(
scope=self.bot.user.name if await bank.is_global() else _("this server"),
prefix=ctx.clean_prefix,
)
)
else:
await bank.wipe_bank(guild=ctx.guild)
await ctx.send(
_("All bank accounts for {scope} have been deleted.").format(
scope=self.bot.user.name if await bank.is_global() else _("this server")
)
)
@is_owner_if_bank_global()
@checks.admin_or_permissions(manage_guild=True)
@_bank.group(name="prune")
async def _prune(self, ctx):
"""Prune bank accounts."""
pass
@_prune.command(name="server", aliases=["guild", "local"])
@commands.guild_only()
@checks.guildowner()
async def _local(self, ctx, confirmation: bool = False):
"""Prune bank accounts for users no longer in the server."""
global_bank = await bank.is_global()
if global_bank is True:
return await ctx.send(_("This command cannot be used with a global bank."))
if confirmation is False:
await ctx.send(
_(
"This will delete all bank accounts for users no longer in this server."
"\nIf you're sure, type "
"`{prefix}bank prune local yes`"
).format(prefix=ctx.clean_prefix)
)
else:
await bank.bank_prune(self.bot, guild=ctx.guild)
await ctx.send(
_("Bank accounts for users no longer in this server have been deleted.")
)
@_prune.command(name="global")
@checks.is_owner()
async def _global(self, ctx, confirmation: bool = False):
"""Prune bank accounts for users who no longer share a server with the bot."""
global_bank = await bank.is_global()
if global_bank is False:
return await ctx.send(_("This command cannot be used with a local bank."))
if confirmation is False:
await ctx.send(
_(
"This will delete all bank accounts for users "
"who no longer share a server with the bot."
"\nIf you're sure, type `{prefix}bank prune global yes`"
).format(prefix=ctx.clean_prefix)
)
else:
await bank.bank_prune(self.bot)
await ctx.send(
_(
"Bank accounts for users who "
"no longer share a server with the bot have been pruned."
)
)
@_prune.command(usage="<user> [confirmation=False]")
async def user(
self, ctx, member_or_id: Union[discord.Member, RawUserIds], confirmation: bool = False
):
"""Delete the bank account of a specified user."""
global_bank = await bank.is_global()
if global_bank is False and ctx.guild is None:
return await ctx.send(_("This command cannot be used in DMs with a local bank."))
try:
name = member_or_id.display_name
uid = member_or_id.id
except AttributeError:
name = member_or_id
uid = member_or_id
if confirmation is False:
await ctx.send(
_(
"This will delete {name}'s bank account."
"\nIf you're sure, type "
"`{prefix}bank prune user {id} yes`"
).format(prefix=ctx.clean_prefix, id=uid, name=name)
)
else:
await bank.bank_prune(self.bot, guild=ctx.guild, user_id=uid)
await ctx.send(_("The bank account for {name} has been pruned.").format(name=name))
@guild_only_check()
@commands.command()
async def payday(self, ctx: commands.Context):
"""Get some free currency."""
author = ctx.author
guild = ctx.guild
cur_time = calendar.timegm(ctx.message.created_at.utctimetuple())
credits_name = await bank.get_currency_name(ctx.guild)
if await bank.is_global(): # Role payouts will not be used
# Gets the latest time the user used the command successfully and adds the global payday time
next_payday = (
await self.config.user(author).next_payday() + await self.config.PAYDAY_TIME()
)
if cur_time >= next_payday:
try:
await bank.deposit_credits(author, await self.config.PAYDAY_CREDITS())
except errors.BalanceTooHigh as exc:
await bank.set_balance(author, exc.max_balance)
await ctx.send(
_(
"You've reached the maximum amount of {currency}!"
"Please spend some more \N{GRIMACING FACE}\n\n"
"You currently have {new_balance} {currency}."
).format(
currency=credits_name, new_balance=humanize_number(exc.max_balance)
)
)
return
# Sets the current time as the latest payday
await self.config.user(author).next_payday.set(cur_time)
pos = await bank.get_leaderboard_position(author)
embed = Embed.create(
self, ctx, title="<:dollarbag:778687019944771616> Payday!",
description=(
"Here you go, {author.name}. "
"Don't spend it too quickly!\n\n"
"<:plus:777167188816560168> {amount} dollars have been added to your bank.\n"
"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\n\n"
"You are currently #{pos} on the **global** leaderboard!\n"
"Use `dem leaderboard` to review your position further.\n"
"Use `dem slot <bid>` to take a gamble at the slot machine!"
).format(
author=author,
currency=credits_name,
amount=humanize_number(await self.config.PAYDAY_CREDITS()),
new_balance=humanize_number(await bank.get_balance(author)),
pos=humanize_number(pos)
)
)
await ctx.send(embed=embed)
else:
dtime = self.display_time(next_payday - cur_time)
await ctx.send(
_(
"{author.mention} Too soon. For your next payday you have to wait {time}."
).format(author=author, time=dtime)
)
else:
# Gets the users latest successfully payday and adds the guilds payday time
next_payday = (
await self.config.member(author).next_payday()
+ await self.config.guild(guild).PAYDAY_TIME()
)
if cur_time >= next_payday:
credit_amount = await self.config.guild(guild).PAYDAY_CREDITS()
for role in author.roles:
role_credits = await self.config.role(
role
).PAYDAY_CREDITS() # Nice variable name
if role_credits > credit_amount:
credit_amount = role_credits
try:
await bank.deposit_credits(author, credit_amount)
except errors.BalanceTooHigh as exc:
await bank.set_balance(author, exc.max_balance)
await ctx.send(
_(
"You've reached the maximum amount of {currency}! "
"Please spend some more \N{GRIMACING FACE}\n\n"
"You currently have {new_balance} {currency}."
).format(
currency=credits_name, new_balance=humanize_number(exc.max_balance)
)
)
return
# Sets the latest payday time to the current time
next_payday = cur_time
await self.config.member(author).next_payday.set(next_payday)
pos = await bank.get_leaderboard_position(author)
embed = Embed.create(
self, ctx, title="<:dollarbag:778687019944771616> Payday!",
description=(
"{author.mention} Take a loada' cash. "
"Don't spend it took quick!\n\n"
"<:plus:777167188816560168> {amount} dollars have been added to your bank.\n"
"You now have {new_balance} dollars. <:dollarbag:778687019944771616>\n\n"
"You are currently #{pos} on the server leaderboard!"
).format(
author=author,
currency=credits_name,
amount=humanize_number(await self.config.PAYDAY_CREDITS()),
new_balance=humanize_number(await bank.get_balance(author)),
pos=humanize_number(pos)
)
)
await ctx.send(embed=embed)
else:
dtime = self.display_time(next_payday - cur_time)
await ctx.send(
_(
"{author.mention} Too soon. For your next payday you have to wait {time}."
).format(author=author, time=dtime)
)
@commands.command()
@guild_only_check()
async def leaderboard(self, ctx: commands.Context, top: int = 10, show_global: bool = False):
"""Print the leaderboard.
Defaults to top 10.
"""
guild = ctx.guild
author = ctx.author
embed_requested = await ctx.embed_requested()
footer_message = _("Page {page_num}/{page_len}.")
max_bal = await bank.get_max_balance(ctx.guild)
if top < 1:
top = 10
base_embed = discord.Embed(title=_("Economy Leaderboard"))
if await bank.is_global() and show_global:
# show_global is only applicable if bank is global
bank_sorted = await bank.get_leaderboard(positions=top, guild=None)
base_embed.set_author(name=ctx.bot.user.name, icon_url=ctx.bot.user.avatar_url)
else:
bank_sorted = await bank.get_leaderboard(positions=top, guild=guild)
if guild:
base_embed.set_author(name=guild.name, icon_url=guild.icon_url)
try:
bal_len = len(humanize_number(bank_sorted[0][1]["balance"]))
bal_len_max = len(humanize_number(max_bal))
if bal_len > bal_len_max:
bal_len = bal_len_max
# first user is the largest we'll see
except IndexError:
return await ctx.send(_("There are no accounts in the bank."))
pound_len = len(str(len(bank_sorted)))
header = "{pound:{pound_len}}{score:{bal_len}}{name:2}\n".format(
pound="#",
name=_("Name"),
score=_("Score"),
bal_len=bal_len + 6,
pound_len=pound_len + 3,
)
highscores = []
pos = 1
temp_msg = header
for acc in bank_sorted:
try:
name = guild.get_member(acc[0]).display_name
except AttributeError:
user_id = ""
if await ctx.bot.is_owner(ctx.author):
user_id = f"({str(acc[0])})"
name = f"{acc[1]['name']} {user_id}"
balance = acc[1]["balance"]
if balance > max_bal:
balance = max_bal
await bank.set_balance(MOCK_MEMBER(acc[0], guild), balance)
balance = humanize_number(balance)
if acc[0] != author.id:
temp_msg += (
f"{f'{humanize_number(pos)}.': <{pound_len+2}} "
f"{balance: <{bal_len + 5}} {name}\n"
)
else:
temp_msg += (
f"{f'{humanize_number(pos)}.': <{pound_len+2}} "
f"{balance: <{bal_len + 5}} "
f"<<{author.display_name}>>\n"
)
if pos % 10 == 0:
if embed_requested:
embed = base_embed.copy()
embed.description = box(temp_msg, lang="md")
embed.set_footer(
text=footer_message.format(
page_num=len(highscores) + 1,
page_len=ceil(len(bank_sorted) / 10),
)
)
highscores.append(embed)
else:
highscores.append(box(temp_msg, lang="md"))
temp_msg = header
pos += 1
if temp_msg != header:
if embed_requested:
embed = base_embed.copy()
embed.description = box(temp_msg, lang="md")
embed.set_footer(
text=footer_message.format(
page_num=len(highscores) + 1,
page_len=ceil(len(bank_sorted) / 10),
)
)
highscores.append(embed)
else:
highscores.append(box(temp_msg, lang="md"))
if highscores:
await menu(
ctx,
highscores,
DEFAULT_CONTROLS if len(highscores) > 1 else {"\N{CROSS MARK}": close_menu},
)
else:
await ctx.send(_("No balances found."))
@commands.command()
@guild_only_check()
async def payouts(self, ctx: commands.Context):
"""Show the payouts for the slot machine."""
try:
await ctx.author.send(SLOT_PAYOUTS_MSG)
except discord.Forbidden:
await ctx.send(_("I can't send direct messages to you."))
@commands.command()
@guild_only_check()
async def slot(self, ctx: commands.Context, bid: int):
"""Use the slot machine."""
author = ctx.author
guild = ctx.guild
channel = ctx.channel
if await bank.is_global():
valid_bid = await self.config.SLOT_MIN() <= bid <= await self.config.SLOT_MAX()
slot_time = await self.config.SLOT_TIME()
last_slot = await self.config.user(author).last_slot()
else:
valid_bid = (
await self.config.guild(guild).SLOT_MIN()
<= bid
<= await self.config.guild(guild).SLOT_MAX()
)
slot_time = await self.config.guild(guild).SLOT_TIME()
last_slot = await self.config.member(author).last_slot()
now = calendar.timegm(ctx.message.created_at.utctimetuple())
if (now - last_slot) < slot_time:
await ctx.send(_("You're on cooldown, try again in a bit."))
return
if not valid_bid:
await ctx.send(_("That's an invalid bid amount, sorry :/"))
return
if not await bank.can_spend(author, bid):
await ctx.send(_("You ain't got enough money, friend."))
return
if await bank.is_global():
await self.config.user(author).last_slot.set(now)
else:
await self.config.member(author).last_slot.set(now)
await self.slot_machine(author, channel, bid)
@staticmethod
async def slot_machine(author, channel, bid):
default_reel = deque(cast(Iterable, SMReel))
reels = []
for i in range(3):
default_reel.rotate(random.randint(-999, 999)) # weeeeee
new_reel = deque(default_reel, maxlen=3) # we need only 3 symbols
reels.append(new_reel) # for each reel
rows = (
(reels[0][0], reels[1][0], reels[2][0]),
(reels[0][1], reels[1][1], reels[2][1]),
(reels[0][2], reels[1][2], reels[2][2]),
)
slot = "~~\n~~" # Mobile friendly
for i, row in enumerate(rows): # Let's build the slot to show
sign = " "
if i == 1:
sign = ">"
slot += "{}{} {} {}\n".format(
sign, *[c.value for c in row] # pylint: disable=no-member
)
payout = PAYOUTS.get(rows[1])
if not payout:
# Checks for two-consecutive-symbols special rewards
payout = PAYOUTS.get((rows[1][0], rows[1][1]), PAYOUTS.get((rows[1][1], rows[1][2])))
if not payout:
# Still nothing. Let's check for 3 generic same symbols
# or 2 consecutive symbols
has_three = rows[1][0] == rows[1][1] == rows[1][2]
has_two = (rows[1][0] == rows[1][1]) or (rows[1][1] == rows[1][2])
if has_three:
payout = PAYOUTS["3 symbols"]
elif has_two:
payout = PAYOUTS["2 symbols"]
pay = 0
if payout:
then = await bank.get_balance(author)
pay = payout["payout"](bid)
now = then - bid + pay
try:
await bank.set_balance(author, now)
except errors.BalanceTooHigh as exc:
await bank.set_balance(author, exc.max_balance)
await channel.send(
_(
"You've reached the maximum amount of {currency}! "
"Please spend some more \N{GRIMACING FACE}\n{old_balance} -> {new_balance}!"
).format(
currency=await bank.get_currency_name(getattr(channel, "guild", None)),
old_balance=humanize_number(then),
new_balance=humanize_number(exc.max_balance),
)
)
return
phrase = T_(payout["phrase"])
else:
then = await bank.get_balance(author)
await bank.withdraw_credits(author, bid)
now = then - bid
phrase = _("Nothing!")
await channel.send(
(
"{slot}\n{author.mention} {phrase}\n\n"
+ _("Your bid: {bid}")
+ _("\n{old_balance} - {bid} (Your bid) + {pay} (Winnings) โ {new_balance}!")
).format(
slot=slot,
author=author,
phrase=phrase,
bid=humanize_number(bid),
old_balance=humanize_number(then),
new_balance=humanize_number(now),
pay=humanize_number(pay),
)
)
@guild_only_check()
@is_owner_if_bank_global()
@checks.admin_or_permissions(manage_guild=True)
@commands.group()
async def economyset(self, ctx: commands.Context):
"""Manage Economy settings."""
@economyset.command(name="showsettings")
async def economyset_showsettings(self, ctx: commands.Context):
"""
Shows the current economy settings
"""
guild = ctx.guild
if await bank.is_global():
conf = self.config
else:
conf = self.config.guild(guild)
await ctx.send(
box(
_(
"----Economy Settings---\n"
"Minimum slot bid: {slot_min}\n"
"Maximum slot bid: {slot_max}\n"
"Slot cooldown: {slot_time}\n"
"Payday amount: {payday_amount}\n"
"Payday cooldown: {payday_time}\n"
"Amount given at account registration: {register_amount}\n"
"Maximum allowed balance: {maximum_bal}"
).format(
slot_min=humanize_number(await conf.SLOT_MIN()),
slot_max=humanize_number(await conf.SLOT_MAX()),
slot_time=humanize_number(await conf.SLOT_TIME()),
payday_time=humanize_number(await conf.PAYDAY_TIME()),
payday_amount=humanize_number(await conf.PAYDAY_CREDITS()),
register_amount=humanize_number(await bank.get_default_balance(guild)),
maximum_bal=humanize_number(await bank.get_max_balance(guild)),
)
)
)
@economyset.command()
async def slotmin(self, ctx: commands.Context, bid: int):
"""Set the minimum slot machine bid."""
if bid < 1:
await ctx.send(_("Invalid min bid amount."))
return
guild = ctx.guild
if await bank.is_global():
await self.config.SLOT_MIN.set(bid)
else:
await self.config.guild(guild).SLOT_MIN.set(bid)
credits_name = await bank.get_currency_name(guild)
await ctx.send(
_("Minimum bid is now {bid} {currency}.").format(
bid=humanize_number(bid), currency=credits_name
)
)
@economyset.command()
async def slotmax(self, ctx: commands.Context, bid: int):
"""Set the maximum slot machine bid."""
slot_min = await self.config.SLOT_MIN()
if bid < 1 or bid < slot_min:
await ctx.send(
_("Invalid maximum bid amount. Must be greater than the minimum amount.")
)
return
guild = ctx.guild
credits_name = await bank.get_currency_name(guild)
if await bank.is_global():
await self.config.SLOT_MAX.set(bid)
else:
await self.config.guild(guild).SLOT_MAX.set(bid)
await ctx.send(
_("Maximum bid is now {bid} {currency}.").format(
bid=humanize_number(bid), currency=credits_name
)
)
@economyset.command()
async def slottime(self, ctx: commands.Context, seconds: int):
"""Set the cooldown for the slot machine."""
guild = ctx.guild
if await bank.is_global():
await self.config.SLOT_TIME.set(seconds)
else:
await self.config.guild(guild).SLOT_TIME.set(seconds)
await ctx.send(_("Cooldown is now {num} seconds.").format(num=seconds))
@economyset.command()
async def paydaytime(self, ctx: commands.Context, seconds: int):
"""Set the cooldown for payday."""
guild = ctx.guild
if await bank.is_global():
await self.config.PAYDAY_TIME.set(seconds)
else:
await self.config.guild(guild).PAYDAY_TIME.set(seconds)
await ctx.send(
_("Value modified. At least {num} seconds must pass between each payday.").format(
num=seconds
)
)
@economyset.command()
async def paydayamount(self, ctx: commands.Context, creds: int):
"""Set the amount earned each payday."""
guild = ctx.guild
max_balance = await bank.get_max_balance(ctx.guild)
if creds <= 0 or creds > max_balance:
return await ctx.send(
_("Amount must be greater than zero and less than {maxbal}.").format(
maxbal=humanize_number(max_balance)
)
)
credits_name = await bank.get_currency_name(guild)
if await bank.is_global():
await self.config.PAYDAY_CREDITS.set(creds)
else:
await self.config.guild(guild).PAYDAY_CREDITS.set(creds)
await ctx.send(
_("Every payday will now give {num} {currency}.").format(
num=humanize_number(creds), currency=credits_name
)
)
@economyset.command()
async def rolepaydayamount(self, ctx: commands.Context, role: discord.Role, creds: int):
"""Set the amount earned each payday for a role."""
guild = ctx.guild
max_balance = await bank.get_max_balance(ctx.guild)
if creds <= 0 or creds > max_balance:
return await ctx.send(
_("Amount must be greater than zero and less than {maxbal}.").format(
maxbal=humanize_number(max_balance)
)
)
credits_name = await bank.get_currency_name(guild)
if await bank.is_global():
await ctx.send(_("The bank must be per-server for per-role paydays to work."))
else:
await self.config.role(role).PAYDAY_CREDITS.set(creds)
await ctx.send(
_(
"Every payday will now give {num} {currency} "
"to people with the role {role_name}."
).format(num=humanize_number(creds), currency=credits_name, role_name=role.name)
)
@economyset.command()
async def registeramount(self, ctx: commands.Context, creds: int):
"""Set the initial balance for new bank accounts."""
guild = ctx.guild
max_balance = await bank.get_max_balance(ctx.guild)
credits_name = await bank.get_currency_name(guild)
try:
await bank.set_default_balance(creds, guild)
except ValueError:
return await ctx.send(
_("Amount must be greater than or equal to zero and less than {maxbal}.").format(
maxbal=humanize_number(max_balance)
)
)
await ctx.send(
_("Registering an account will now give {num} {currency}.").format(
num=humanize_number(creds), currency=credits_name
)
)
# What would I ever do without stackoverflow?
@staticmethod
def display_time(seconds, granularity=2):
intervals = ( # Source: http://stackoverflow.com/a/24542445
(_("weeks"), 604800), # 60 * 60 * 24 * 7
(_("days"), 86400), # 60 * 60 * 24
(_("hours"), 3600), # 60 * 60
(_("minutes"), 60),
(_("seconds"), 1),
)
result = []
for name, count in intervals:
value = seconds // count
if value:
seconds -= value * count
if value == 1:
name = name.rstrip("s")
result.append("{} {}".format(value, name))
return ", ".join(result[:granularity])
|
17,159 | 61e75d889096b003b403a17cc0a88f5738a3112d | # import necessary modules and libraries
from skytrip.revalidate_itinerary.sabre_revalidate_prefs import SabreRevalidatePrefs
from skytrip.gds_handler import SabreHandler
from skytrip.revalidate_itinerary.sabre_revalidate_structure_adapter import SabreRevalidateStructureAdapter
from skytrip.revalidate_itinerary.revalidate_config import RevalidateConfig
from skytrip.utils.helper import generate_json, get_root_exception_message, finalize_response
import inspect
class RevalidateHandler:
# initialize default if requires utils
require_utils = False
# create sabre handler instance
__sabre_handler = SabreHandler()
# sabre revalidate preference
__sabre_revalidate_prefs = SabreRevalidatePrefs()
def __init__(self, EventBodyData=None):
self.event_body_data = EventBodyData
# handler function
def sabre_revalidate_handler(self, generateJSON=False):
"""
revalidate handler for Revalidate Itinerary Module
params => generateJSON (boolean)
"""
# define main response placeholder
sabre_response = None
result = None
try:
# ------------------- *** get main response from Sabre *** -------------------
# get main response from sabre
sabre_response = self.__sabre_handler.get_sabre_response(
EventBodyData=self.event_body_data, request_pref_func=self.__sabre_revalidate_prefs.get_revalidate_preference, endpoint_identifier='v6.shop.flights.revalidate', generateJSON=generateJSON
)
# ------------------- *** adopt Sabre response with Skytrip standard structure *** -------------------
# ticket revalidate response structure
__revalidate_config = RevalidateConfig()
revalidate_response_structure = __revalidate_config.get_skytrip_revalidate_itinerary_response_standard_structure()
# get if requires utils
self.require_utils = self.event_body_data.get("RequireUTILS", False)
# Adopt structure
structure_adapter = SabreRevalidateStructureAdapter(
response=sabre_response, revalidate_response_structure=revalidate_response_structure, require_utils=self.require_utils
)
# get structured revalidate response
result = structure_adapter.build_structure()
# ------------------- *** handle exceptions *** -------------------
except Exception as E:
# assign exceptions to result
result = get_root_exception_message(
Ex=E, gdsResponse=sabre_response, appResponse=result, file=__file__,
parent=inspect.stack()[0][3], line=inspect.stack()[0][2],
msg="Failed to revalidate itinerary!"
)
# ------------------- *** validate structure and finalize response *** -------------------
finalized_response = finalize_response(response=result)
# ------------------- *** generate JSON file of Skytrip structured response *** -------------------
if generateJSON == True:
generate_json(
gds="sabre", isReq=False, filename="revalidate_structured_response_sabre.json", data=finalized_response
)
# return finalized response
return finalized_response |
17,160 | 3733778ec74d3e8d732a144150c0439baac9dd78 | from argparse import ArgumentParser, Namespace
from cli.cli import (add_database_option,
add_logging_options,
set_logging_options,
add_team_option,
add_league_option,
add_venue_option,
add_half_option,
add_block_option,
get_unique_league,
get_unique_team)
from lib.helpful import set_matplotlib_defaults
from lib.messages import warning_message
from matplotlib import pyplot as plt
from model.fixtures import Fixture, Half, Scoreline, Venue, create_fixture_from_row, win, loss, canonicalise_scoreline
from model.leagues import league_register
from model.seasons import Season
from model.tables import LeagueTable
from model.teams import Team
from sql.sql import extract_picked_team, load_league, load_teams, get_finished_matches
from typing import List, Tuple
import pandas as pd
def parse_command_line():
parser = ArgumentParser(description='Show recent form')
add_database_option(parser)
add_logging_options(parser)
add_team_option(parser, True)
add_league_option(parser, True)
add_venue_option(parser)
add_half_option(parser)
add_block_option(parser)
return parser.parse_args()
def decide_cell_color(result: Result,
left_color: Tuple[float, float, float],
right_color: Tuple[float, float, float],
neutral_color: Tuple[float, float, float],
unknown_color: Tuple[float, float, float]):
if result:
if win(result):
return left_color
elif loss(result):
return right_color
else:
return neutral_color
else:
return unknown_color
def create_results_table(ax,
team: Team,
fixtures: List[Fixture],
team_color: str,
other_color: str,
neutral_color: str,
unknown_color: str):
colors = []
table = []
fixtures.sort(key=lambda row: row.date, reverse=True)
for i, fixture in enumerate(fixtures):
first_half = canonicalise_scoreline(team, fixture.first_half())
second_half = canonicalise_scoreline(fixture, team, fixture.second_half())
full_time = canonicalise_scoreline(fixture, team, fixture.full_time())
colors.append([neutral_color,
neutral_color,
neutral_color,
decide_cell_color(first_half, team_color, other_color, neutral_color, unknown_color),
decide_cell_color(second_half, team_color, other_color, neutral_color, unknown_color),
decide_cell_color(full_time, team_color, other_color, neutral_color, unknown_color)])
if fixture.home_team == team:
opponent_team = fixture.away_team
venue = Venue.home
else:
opponent_team = fixture.home_team
venue = Venue.away
row = ['{}'.format(fixture.date.strftime('%Y-%m-%d')),
opponent_team.name,
venue.name[0].upper(),
str(first_half) if first_half else '',
str(second_half) if second_half else '',
str(full_time)]
table.append(row)
df = pd.DataFrame(table)
df.columns = ['Date', 'Opponent', 'Venue', '1st', '2nd', 'FT']
ax_table = ax.table(cellText=df.values,
colLabels=df.columns,
colLoc='left',
colColours=[neutral_color] * len(df.columns),
colWidths=[0.2, 0.4, 0.1, 0.1, 0.1, 0.1],
cellColours=colors,
cellLoc='left',
fontsize=16,
loc='upper center')
ax.set_title('Form', fontstyle='italic')
ax.axis('off')
def create_league_table(ax,
this_season: Season,
team: Team,
team_color: str,
other_color: str,
venue: Venue,
half: Half):
league_table = LeagueTable(this_season, half)
display_table = []
colors = []
team_length = 1
for i, league_row in enumerate(league_table, start=1):
display_row = [league_row.TEAM.name]
if venue == Venue.home:
display_row.extend([league_row.HW + league_row.HD + league_row.HL,
league_row.HW, league_row.HD, league_row.HL, league_row.HF, league_row.HA])
wins = league_row.HW
draws = league_row.HD
elif venue == Venue.away:
display_row.extend([league_row.AW + league_row.AD + league_row.AL,
league_row.AW, league_row.AD, league_row.AL, league_row.AF, league_row.AA])
wins = league_row.AW
draws = league_row.AD
else:
display_row.extend([league_row.W + league_row.D + league_row.L,
league_row.W, league_row.D, league_row.L, league_row.F, league_row.A])
wins = league_row.W
draws = league_row.D
pts = wins * 3 + draws
display_row.append(pts)
display_table.append(display_row)
team_length = max(team_length, len(league_row.TEAM.name))
display_table.sort(key=lambda row: row[-1], reverse=True)
for display_row in display_table:
if display_row[0] == team.name:
colors.append([team_color] * len(display_row))
else:
colors.append([other_color] * len(display_row))
df = pd.DataFrame(display_table)
df.columns = ['Team', 'Played', 'W', 'D', 'L', 'F', 'A', 'PTS']
ax.table(cellText=df.values,
colLabels=df.columns,
colLoc='left',
colColours=[other_color] * len(df.columns),
colWidths=[0.3, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1],
cellColours=colors,
cellLoc='left',
loc='upper center')
title = 'League table'
if half:
title = '{} ({} half)'.format(title, half.name)
ax.set_title(title, fontstyle='italic')
ax.axis('off')
def main(args: Namespace):
set_matplotlib_defaults()
load_teams(args.database)
league = league_register[get_unique_league(args)]
load_league(args.database, league)
(row,) = extract_picked_team(args.database, get_unique_team(args), league)
team = Team.inventory[row[0]]
seasons = Season.seasons(league)
this_season = seasons.pop()
fixtures = get_finished_matches(args.database, this_season, team, args.venue)
if fixtures:
nrows = 1
ncols = 2
fig, axs = plt.subplots(nrows=nrows, ncols=ncols, figsize=(15, 10), squeeze=False, constrained_layout=True)
team_color = '#70A3CC'
other_team_color = '#FA8072'
neutral_color = '#FFF0FF'
unknown_color = '#FFF0FF'
create_results_table(axs[0, 0], team, fixtures, team_color, other_team_color, neutral_color, unknown_color)
create_league_table(axs[0, 1], this_season, team, team_color, neutral_color, args.venue, args.half)
title = '{} {}: {}'.format(league.country, league.name, team.name)
if args.venue == Venue.any:
title = '{} ({} or {})'.format(title, Venue.home.name, Venue.away.name)
else:
title = '{} ({} only)'.format(title, args.venue.name)
fig.suptitle(title, fontweight='bold')
plt.show(block=args.block)
else:
warning_message("No data to display for {} in {} {}".format(team.name, league.country, league.name))
if __name__ == '__main__':
args = parse_command_line()
set_logging_options(args)
main(args)
|
17,161 | 2846efdd4646dfd6cbc2eb53036ec6f548b4d234 | '''unzip:
unzips a file
usage: unzip file [destination]
'''
from .. tools.toolbox import bash,pprint
import zipfile,os
def main(self, line):
"""unzip a zip archive"""
# filename with optional destination
args = bash(line)
if args is None:
return
elif not (1 <= len(args) <= 2):
print "unzip: Usage: unzip file [destination]"
else:
filename = os.path.abspath(args[0])
if not os.path.isfile(filename):
print "unzip: %s: No such file" % args[0]
else:
# PK magic marker check
f = open(filename)
try:
pk_check = f.read(2)
except Exception:
pk_check = ''
finally:
f.close()
if pk_check != 'PK':
print "unzip: %s: does not appear to be a zip file" % args[0]
else:
if (os.path.basename(filename).lower().endswith('.zip')):
altpath = os.path.splitext(os.path.basename(filename))[0]
else:
altpath = os.path.basename(filename) + '_unzipped'
altpath = os.path.join(os.path.dirname(filename), altpath)
location = (args[1:2] or [altpath])[0]
if (os.path.exists(location)) and not (os.path.isdir(location)):
print "unzip: %s: destination is not a directory" % location
return
elif not os.path.exists(location):
os.makedirs(location)
zipfp = open(filename, 'rb')
try:
zipf = zipfile.ZipFile(zipfp)
# check for a leading directory common to all files and remove it
dirnames = [os.path.join(os.path.dirname(x), '') for x in zipf.namelist()]
common_dir = os.path.commonprefix(dirnames or ['/'])
# Check to make sure there aren't 2 or more sub directories with the same prefix
if not common_dir.endswith('/'):
common_dir = os.path.join(os.path.dirname(common_dir), '')
for name in zipf.namelist():
data = zipf.read(name)
fn = name
if common_dir:
if fn.startswith(common_dir):
fn = fn.split(common_dir, 1)[-1]
elif fn.startswith('/' + common_dir):
fn = fn.split('/' + common_dir, 1)[-1]
fn = fn.lstrip('/')
fn = os.path.join(location, fn)
dirf = os.path.dirname(fn)
if not os.path.exists(dirf):
os.makedirs(dirf)
if fn.endswith('/'):
# A directory
if not os.path.exists(fn):
os.makedirs(fn)
else:
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
except Exception:
zipfp.close()
print "unzip: %s: zip file is corrupt" % args[0]
return
finally:
zipfp.close()
|
17,162 | 65891686956a7309ecc97151b8cd6407dbb3fd03 | def is_prime(num):
for i in range(2, int(num**0.5)+1):
if num % i == 0:
return False
return True
index = 1
number = 2
while (index != 10001):
number += 1
if is_prime(number):
index += 1
print(number)
|
17,163 | 63c3d7f6fdd28a82c5c8326b35d19b7e31da2034 | #-
# Copyright (c) 2016 Alfredo Mazzinghi
# All rights reserved.
#
# This software was developed by SRI International and the University of
# Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
# ("CTSRD"), as part of the DARPA CRASH research programme.
#
# @BERI_LICENSE_HEADER_START@
#
# Licensed to BERI Open Systems C.I.C. (BERI) under one or more contributor
# license agreements. See the NOTICE file distributed with this work for
# additional information regarding copyright ownership. BERI licenses this
# file to you under the BERI Hardware-Software License, Version 1.0 (the
# "License"); you may not use this file except in compliance with the
# License. You may obtain a copy of the License at:
#
# http://www.beri-open-systems.org/legal/license-1-0.txt
#
# Unless required by applicable law or agreed to in writing, Work distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
# @BERI_LICENSE_HEADER_END@
#
import numpy as np
import logging
from matplotlib import pyplot as plt
from graph_tool.all import GraphView, bfs_iterator, graph_draw, arf_layout
from cheriplot.plot.provenance.provenance_plot import PointerProvenancePlot
logger = logging.getLogger(__name__)
class PointerTreePlot(PointerProvenancePlot):
"""
Plot the pointer tree
"""
def plot(self):
layout = sfdp_layout(self.dataset)
# also arf_layout? not sure how easy is to draw a tree with multiple roots
# if we want to see features there
node_sizes = np.empty(self.dataset.num_vertices())
for idx, v in enumerate(self.dataset.vertices()):
data = self.dataset.vp.data[v]
node_sizes[idx] = data.length
# normalize in the range min_size, max_size
min_size = 5
max_size = 50
node_min = np.min(node_sizes) or 1
node_max = np.max(node_sizes)
b = (node_min * max_size - min_size * node_max) / (node_min - node_max)
a = (min_size - b) / node_min
node_sizes = a * node_sizes + b
# nx.draw_networkx_nodes(self.dataset, pos,
# node_size=100,
# node_color="lightblue")
# nx.draw_networkx_edges(self.dataset, pos)
# labels = {}
# for node in self.dataset.nodes():
# labels[node] = "0x%x" % node.length
# nx.draw_networkx_labels(self.dataset, pos, labels, font_size=5)
plt.axis("off")
plt.savefig(self._get_plot_file())
class ProvenanceTreePlot(PointerProvenancePlot):
"""
Plot a part of the provenance tree with the given node(s).
The idea here is to have a capability that we know and we
ask where in the provenace tree it is, so we want to see
the parents up to the root and all children.
"""
def __init__(self, target_cap, *args, **kwargs):
super().__init__(*args, **kwargs)
self.target_cap = target_cap
"""The cycles number of the capability to display"""
self.view = None
def init_axes(self):
"""Build the figure and axes for the plot."""
plt.switch_backend("cairo")
fig = plt.figure(figsize=(15,10))
ax = fig.add_axes([0.05, 0.15, 0.9, 0.80,])
return (fig, ax)
def build_dataset(self):
# mask the graph nodes that are neither predecessors nor
# successors of the target node
super().build_dataset()
# find the target vertex with the given target capability t_alloc
target = None
for v in self.dataset.vertices():
data = self.dataset.vp.data[v]
if data.cap.t_alloc == self.target_cap:
target = v
break
if target is None:
logger.error("No node with %d creation time found", self.target_cap)
raise RuntimeError("Node not found")
# get related (successors and predecessors) nodes
related = [target]
pred = target
while pred is not None:
pred_iter = pred.in_neighbours()
try:
# there should always be only one predecessor or none here
pred = next(pred_iter)
related.append(pred)
except StopIteration:
pred = None
for edge in bfs_iterator(self.dataset, target):
related.append(edge.target())
logger.debug("Related nodes %s", related)
self.view = GraphView(self.dataset, vfilt=lambda v: v in related)
def plot(self):
layout = arf_layout(self.view)
# also arf_layout? not sure how easy is to draw a tree with multiple roots
# if we want to see features there
self.ax.set_axis_off()
graph_draw(self.view, pos=layout, mplfig=self.ax)
logger.debug("Plot done")
plt.savefig(self._get_plot_file())
|
17,164 | 95a1cd4c8fdd8a1a72de74284de7f6032a4bbcd7 | from random import randint
def create_matrix(length):
rows = []
for i in range(length):
tnp = []
for s in range(length):
tnp.append(randint(0,10))
rows.append(tnp)
return rows
matrix = create_matrix(2)
def sum_of_elements_by_divider(matrix, divider):
s = 0
for parentList in matrix:
for matrixItem in parentList:
if matrixItem % divider == 0:
s += matrixItem
return s
print(matrix)
print(sum_of_elements_by_divider(matrix,3)) |
17,165 | 100abce572e901455441ce59942621389098f124 | segundos_str = input ("Por favor, entre com o nรบmero de segundos que deseja converter: ")
total_segs = int(segundos_str)
dias = total_segs // 86400
horas = total_segs % 86400//3600
segs_restantes = total_segs % 3600
minutos = segs_restantes // 60
segs_restantes_final = segs_restantes % 60
print(dias,"dias,",horas,"horas,",minutos,"minutos e",segs_restantes_final,"segundos.")
|
17,166 | 19c1f9e7f7e88bc2606420adbff366c3e9774a4b | from rest_framework.permissions import BasePermission, SAFE_METHODS
from pages.models import ProductCategory
class OwnerWritePerm(BasePermission):
message = 'Manipulating objects is restricted to owner only'
def has_object_permission(self, request, view, obj):
if request.method in SAFE_METHODS:
return True
return obj.belongs_to == request.user
class OwnsCategoryPerm(BasePermission):
message = "The specified category belongs to someone else"
def has_permission(self, request, view):
if request.method in SAFE_METHODS:
return True
category = request.data.get('category', None)
if category:
if ProductCategory.objects.get(pk=category).belongs_to != request.user:
return False
return True
|
17,167 | 77d4470df68922e2c49ffac6bad89aa12e0dc04a | # Logger.py 14/05/2016 D.J.Whale
#
# A simple logger - logs to file.
import os, time
try:
# Python 3
from .energenie import OpenThings
except ImportError:
# Python 2
from energenie import OpenThings
LOG_FILENAME = "energenie.csv"
HEADINGS = 'timestamp,mfrid,prodid,sensorid,flags,switch,voltage,freq,reactive,real,apparent,current,temperature'
log_file = None
def trace(msg):
print(str(msg))
def logMessage(msg):
global log_file
if log_file == None:
if not os.path.isfile(LOG_FILENAME):
log_file = open(LOG_FILENAME, 'w')
log_file.write(HEADINGS + '\n')
else:
log_file = open(LOG_FILENAME, 'a') # append
# get the header
header = msg['header']
timestamp = time.time()
mfrid = header['mfrid']
productid = header['productid']
sensorid = header['sensorid']
# set defaults for any data that doesn't appear in this message
# but build flags so we know which ones this contains
flags = [0 for i in range(8)]
switch = None
voltage = None
freq = None
reactive = None
real = None
apparent = None
current = None
temperature = None
# capture any data that we want
##trace(msg)
for rec in msg['recs']:
paramid = rec['paramid']
try:
value = rec['value']
except:
value = None
if paramid == OpenThings.PARAM_SWITCH_STATE:
switch = value
flags[0] = 1
elif paramid == OpenThings.PARAM_DOOR_SENSOR:
switch = value
flags[0] = 1
elif paramid == OpenThings.PARAM_VOLTAGE:
flags[1] = 1
voltage = value
elif paramid == OpenThings.PARAM_FREQUENCY:
flags[2] = 1
freq = value
elif paramid == OpenThings.PARAM_REACTIVE_POWER:
flags[3] = 1
reactive = value
elif paramid == OpenThings.PARAM_REAL_POWER:
flags[4] = 1
real = value
elif paramid == OpenThings.PARAM_APPARENT_POWER:
flags[5] = 1
apparent = value
elif paramid == OpenThings.PARAM_CURRENT:
flags[6] = 1
current = value
elif paramid == OpenThings.PARAM_TEMPERATURE:
flags[7] = 1
temperature = value
# generate a line of CSV
flags = "".join([str(a) for a in flags])
csv = "%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s" % (timestamp, mfrid, productid, sensorid, flags, switch, voltage, freq, reactive, real, apparent, current, temperature)
log_file.write(csv + '\n')
log_file.flush()
##trace(csv) # testing
# END
|
17,168 | ab563be577533be118943791b38102dcd1f2050f | import generic_interface
import pipeline_utils
class QueueManagerFatalError(pipeline_utils.PipelineError):
"""This error should be used when the queue manager has
a fatal error. The queue manager will be stopped.
The job/action currently being processed will be left
in whatever state it is in at the time the error occurred.
"""
pass
class QueueManagerJobFatalError(pipeline_utils.PipelineError):
"""This error should be used when the queue manager has an
error that should cause the job being processed to be
marked as 'failed', but the queue manager can continue
running.
"""
pass
class QueueManagerNonFatalError(pipeline_utils.PipelineError):
"""This error should be used when the queue manager demonstrates
some behaviour that is considered non-fatal to both the
queue manager and the job/action that was being processed.
"""
pass
|
17,169 | 8fd9504c13c2cb95867f05ceae224f7eef638d60 | d = int(input('ะะฒะตะดะธัะต ะดะธะฐะผะตัั ะพะบััะถะฝะพััะธ: '))
ะฒัะฑะพั = input("ะะปะพัะฐะดั ะธะปะธ ะฟะตัะธะผะตัั?:")
if ะฒัะฑะพั == "ะะปะพัะฐะดั":
print("ะะปะพัะฐะดั ะพะบััะถะฝะพััะธ = ",float(d**2/4*3.14))
if ะฒัะฑะพั == "ะะตัะธะผะตัั":
print("ะะตัะธะผะตัั ะพะบััะถะฝะพััะธ = ",float(d*3.14))
|
17,170 | 7510a31cd6784a5c7da188100feb21d8e96b8a65 | from decouple import config
import gdown
import zipfile
#1 Descargar wav
url = config("RAW_DATA")
name_raw = config("FOLDER_NAME")
output = config("RAW_FOLDER")+name_raw
gdown.download(url, output, quiet=False)
# Descomprimir
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(config("RAW_FOLDER"))
#2 Descargar csv
url = config("CSV_DATA")
name_raw = config("CSV_FILE")
output = config("CSV_FOLDER")+name_raw
gdown.download(url, output, quiet=False)
# Descomprimir
with zipfile.ZipFile(output, 'r') as zip_ref:
zip_ref.extractall(config("CSV_FOLDER"))
|
17,171 | 427ce703f09179b58293160189f43ca59586acc4 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import *
from django.shortcuts import get_object_or_404
class NeighbourhoodForm(forms.ModelForm):
class Meta:
model = Neighbourhood
fields = ('neighbourhood',)
class ProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['user', 'hood'] |
17,172 | 1cf7b57507a7e0e8a94a41a4ea973ec05f8afd0c | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 14 13:46:15 2017
@author: QXXV1697
"""
import math
import usefulFunctions as uf
#from sklearn.naive_bayes import MultinomialNB
#import numpy as np
#spamWords = tokenizeFromList(spam)
#hamWords = tokenizeFromList(ham)
def calculateProbability(wordCount, totalSpam, totalHam, k = 0.5):
"""calculate the probability for a word to be in a spam and the one to be in a ham, and put it in dict {word: [proba_spam, proba_ham]}"""
wordProba = {}
for word in wordCount.keys():
wordProba[word] = [(wordCount[word][0] + k)/(totalSpam + 2*k), (wordCount[word][1] + k)/(totalHam + 2*k)]
return wordProba
def spamProba(message, wordProba):
"""compute the 'log-likelihood ratio' that the message is a spam (spam if result > 0)"""
messageWords = uf.tokenizeMessage(message)
logProbSpam = logProbHam = 0.0
# probSpam = probHam = 1
for word in wordProba.keys():
if word in messageWords:
logProbSpam += math.log(wordProba[word][0])
logProbHam += math.log(wordProba[word][1])
# probSpam = probSpam*wordProba[word][0]
# probHam = probHam*wordProba[word][1]
else:
logProbSpam += math.log(1 - wordProba[word][0])
logProbHam += math.log(1 - wordProba[word][1])
# probSpam = probSpam*(1-wordProba[word][0])
# probHam = probHam*(1-wordProba[word][1])
# probSpam = math.exp(logProbSpam)
# probHam = math.exp(logProbHam)
return logProbSpam - logProbHam
class NaiveBayesClassifier:
def __init__(self,k = 0.5):
self.k = k
self.wordProba = {}
def train(self, trainingSet):
numSpam = len([isSpam for message,isSpam in trainingSet if isSpam])
numHam = len(trainingSet) - numSpam
wordCount = uf.countWords(trainingSet)
self.wordProba = calculateProbability(wordCount, numSpam, numHam, self.k)
def classify(self, message):
return spamProba(message, self.wordProba)
with open("emails/spam.txt","r", encoding = "utf-8") as file:
spam = file.readlines()
with open("emails/ham.txt", "r", encoding = "utf-8") as file:
ham = file.readlines()
totalSet = [[message,True] for message in spam] + [[message,False] for message in ham]
trainingSet, testSet = uf.splitData(totalSet, 0.75)
classifier = NaiveBayesClassifier()
classifier.train(trainingSet)
classified = [(message, isSpam, classifier.classify(message)) for message, isSpam in testSet]
|
17,173 | 453112694c0e94e27e0ced9fc8b335de74b35ecc | #Daniel Brestoiu
#Symbolic Expression Calculator
import re
import sys
from sympy import simplify
FUNCTION_DICT = {"add": "+", "multiply": "*", "subtract": "-", "divide": "/"}
#REGEX_PATTERN = "(\([0-9A-Za-z]+\ [0-9]+\ [0-9]+\))"
REGEX_PATTERN = "(\([0-9A-Za-z]+\ [0-9\ ]+\ +[0-9]+\))"
def parse_input():
args = sys.argv
return args[1]
def is_int(string:str) -> bool:
""""is_int attempts to convert a string to an int and reports its results."""
try:
int(string)
return True
except:
return False
def regex_splitter(input_string: str) -> str:
""" Regex Splitter attempts to split the input string into useful components based on
the regex that has been defined.
Returns input as grouped per defined regex.
"""
#grouping_list: list = re.compile(REGEX_PATTERN).split(input_string)
compiled_regex = re.compile(REGEX_PATTERN)
mo = compiled_regex.findall(input_string)
#print("Current found matches are:" + str(mo))
result = evaluator_indefinite(mo)
#print("Dictionary of evaluations" + str(result))
new_string = input_string
for match in mo:
new_string = new_string.replace(str(match), str(result[match]))
#print("Current string modified with new value: " + new_string)
return new_string
def evaluator_indefinite(match_group: list) -> dict:
return_dict = {}
for match in match_group:
#print("Evaluator match is: "+match)
stripped_match = match[1:-1]
split_match = stripped_match.split()
#print("Print evaluator split match is" + str(split_match))
operator = FUNCTION_DICT[split_match[0]]
current_result = split_match[1]
for x in range(len(split_match)-2):
new_num = split_match[x+2]
current_result = evaluator(operator, current_result, new_num)
return_dict[match] = current_result
return return_dict
def evaluator(operator: str, value1: str, value2: str) -> str:
"""
Evaluator takes a mathematical operator, value and value then applies the mathematical operation to the values.
It returns the result of this operation.
"""
evaluation_function: str = value1 + operator + value2
#Because all three are strings, the + operator simply appends them together to be simplified.
result: str = str(simplify(evaluation_function))
return result
def input_parser(input_string: str) -> str:
"""
input_parser takes in a string input of the form (add ... ...) or (multiply ... ...)
and returns the result of the calculation
"""
if is_int(input_string):
return input_string
#he is int, give back plz.
else:
try:
modified_input: str = input_string.strip()
evaluatable_pairs: str = regex_splitter(modified_input)
while not (is_int(evaluatable_pairs)):
evaluatable_pairs = regex_splitter(evaluatable_pairs)
return (evaluatable_pairs)
except:
raise Exception("Invalid Input")
def tests() -> None:
""" Run test cases to verify functionality"""
assert input_parser("123") == '123'
assert input_parser("(add 12 12)") == '24'
assert input_parser("(add 0 (add 3 4))") == '7'
assert input_parser("(add 3 (add (add 3 3) 3))") == '12'
assert input_parser("(multiply 3 (multiply (multiply 3 3) 3))") == '81'
assert input_parser("(multiply 2 (multiply 3 4))") == '24'
assert input_parser("(multiply 0 (multiply 3 4))") == '0'
assert input_parser("(add 4 1)") == '5'
assert input_parser("(multiply 4 1)") == '4'
assert input_parser("(add 4 (add 1 8))") == '13'
assert input_parser("(add (add 1 8) 4)") == '13'
assert input_parser("(multiply (multiply 1 2) 12)") == '24'
assert input_parser("(multiply 4 (multiply 8 12))") == '384'
assert input_parser("(add (multiply 4 5) (multiply 10 10))") == '120'
assert input_parser("(add (multiply (add 4 (add 3 (add 3 (add 3 (add 1 (multiply 4 5)))))) 5) (multiply 10 10))") == '270'
assert input_parser("(add (multiply 4 5) (multiply 10 10) (add 1 2 3 4 5 6 7 (add 4 4) 9) (multiply 4 5))") == '185'
assert input_parser('(subtract 2 1)') == '1'
assert input_parser("(divide 55 5)") == '11'
def main(input: str) -> None:
""" Main function"""
print(input_parser(input))
if __name__ == "__main__":
#tests()
main(parse_input()) |
17,174 | 510bd7809c08015b2fa7fcb6e66c7ca6e35fbdf0 | import re
from .. import Provider as AutoProvider
class Provider(AutoProvider):
"""Implement license formats for ``az_AZ`` locale."""
license_formats = ("##-??-###",)
ascii_uppercase_azerbaijan = "ABCDEFGHXIJKQLMNOPRSTUVYZ"
license_plate_initial_numbers = (
"01",
"02",
"03",
"04",
"05",
"06",
"07",
"08",
"09",
"10",
"90",
"11",
"12",
"14",
"15",
"16",
"17",
"18",
"19",
"20",
"21",
"22",
"23",
"24",
"25",
"26",
"27",
"28",
"29",
"30",
"31",
"32",
"33",
"34",
"35",
"36",
"37",
"38",
"39",
"40",
"41",
"42",
"43",
"44",
"45",
"46",
"47",
"48",
"49",
"50",
"51",
"52",
"53",
"54",
"55",
"56",
"57",
"58",
"59",
"60",
"61",
"62",
"63",
"64",
"65",
"66",
"67",
"68",
"69",
"70",
"71",
"72",
"77",
"85",
)
def license_plate(self) -> str:
"""Generate a license plate."""
temp = re.sub(
r"\?",
lambda x: self.random_element(self.ascii_uppercase_azerbaijan),
self.random_element(self.license_formats),
)
temp = temp.replace("##", self.random_element(self.license_plate_initial_numbers), 1)
# temp = temp.format(self.random_element(range(1, 999)))
return self.numerify(temp)
|
17,175 | 43e05f030a2bce147cf166afd2e7604c384fabdb | from .signup import AccountViewSet
from .login import LogInView
|
17,176 | af6dfd4532b97a8c2173ffa0ec6eaf4edd777248 | """empty message
Revision ID: 4c58b3c1a250
Revises: daaa786774cc
Create Date: 2019-02-14 16:41:05.729050
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4c58b3c1a250'
down_revision = 'daaa786774cc'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('phonenumberstr', sa.String(length=32), nullable=True))
op.drop_column('user', 'phonenumber')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('phonenumber', sa.VARCHAR(length=10), autoincrement=False, nullable=True))
op.drop_column('user', 'phonenumberstr')
# ### end Alembic commands ###
|
17,177 | d29574a659dc67b60bdfe5e6a1bd00ffdcbe3ce3 | #%% Import
import numpy as np
import matplotlib.pyplot as plt
import os
import importlib
import my_constants as mc
import my_utilities as mu
import chain_functions as cf
mc = importlib.reload(mc)
mu = importlib.reload(mu)
cf = importlib.reload(cf)
import time
os.chdir(mc.sim_folder + 'PMMA_sim')
#%%
m = np.load('harris_x_before.npy')
mw = np.load('harris_y_before_SZ.npy')
sample = np.zeros(1000000)
for i in range(len(sample)):
sample[i] = cf.get_chain_len(m, mw)
#%%
xx = np.load('harris_x_before.npy')
yy = np.load('harris_y_before_SZ.npy')
plt.semilogx(xx, yy / np.max(yy), label='Schulz-Zimm')
#lens = np.load('Harris_lens_arr.npy')
lens = sample
mass = lens*100
bins = np.logspace(2, 7.1, 51)
#bins = np.linspace(1e+2, 12e+6, 101)
hist, edges = np.histogram(mass, bins)
plt.semilogx(edges[:-1], hist/np.max(hist), label='sample')
plt.title('Harris initial molecular weight distribution')
plt.xlabel('molecular weight')
plt.ylabel('density')
plt.legend()
plt.grid()
plt.show()
#%%
source_dir = '/Volumes/ELEMENTS/Chains_Harris'
f_names = os.listdir(source_dir)
#%%
diams = np.zeros(len(f_names))
i = 0
for chain in f_names:
if 'chain' not in chain:
continue
mu.upd_progress_bar(i, len(f_names))
now_chain = np.load(source_dir + '/' + chain)
c_max = np.max(now_chain, axis=0)
c_min = np.min(now_chain, axis=0)
diams[i] = np.max(c_max - c_min)
i += 1
|
17,178 | 37613313306c67798de670323d4b3de3a4a57cb3 | from django.db import models
from django.utils.translation import ugettext_lazy as _
class Proform(models.Model):
CASH = 'cash'
CREDIT_CARD = 'credit_card'
NEGOTIABLE = 'negotiable'
TRANSFER = 'transfer'
PAYMENT_CONDITIONS = (
(CASH, _('Cash')),
(CREDIT_CARD, _('Credit card')),
(NEGOTIABLE, _('Negotiable')),
(TRANSFER, _('Transfer')),
)
company = models.ForeignKey('works.Company',
related_name='proforms_company')
client = models.ForeignKey('works.Client',
related_name='proforms_client')
created = models.DateTimeField(auto_now_add=True)
expire_on = models.DateTimeField()
payment_conditions = models.CharField(max_length=100,
choices=PAYMENT_CONDITIONS)
total_to_pay = models.FloatField(default=0)
def __unicode__(self):
return "%s" % self.id
class Report(models.Model):
datetime_in = models.DateTimeField()
machine_data = models.TextField()
problem = models.TextField()
diagnosis = models.TextField()
treatment = models.TextField()
observations = models.TextField(blank=True, null=True)
suggestions = models.TextField(blank=True, null=True)
datetime_out = models.DateTimeField()
technical = models.CharField(max_length=100)
def __unicode__(self):
return "%s" % self.id
|
17,179 | 14a91f2f8c448708d6fc52383844d7931f6fa9da | # Generated by Django 2.2.9 on 2020-01-28 11:02
import brueckio.pages.blocks
from django.db import migrations
import wagtail.blocks
import wagtail.fields
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('pages', '0006_projectpage_description'),
]
operations = [
migrations.AlterField(
model_name='contentpage',
name='content',
field=wagtail.fields.StreamField([('heading', wagtail.blocks.StructBlock([('heading', wagtail.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.blocks.CharBlock(label='Text', max_length=50))])), ('rich_text', brueckio.pages.blocks.RichTextBlock()), ('lead_text', brueckio.pages.blocks.LeadTextBlock()), ('contact_teaser', brueckio.pages.blocks.ContactTeaserBlock()), ('image', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.blocks.CharBlock())])), ('cv', wagtail.blocks.StructBlock([('when', wagtail.blocks.CharBlock()), ('what', wagtail.blocks.RichTextBlock())]))], blank=True, null=True),
),
migrations.AlterField(
model_name='projectoverviewpage',
name='content',
field=wagtail.fields.StreamField([('heading', wagtail.blocks.StructBlock([('heading', wagtail.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.blocks.CharBlock(label='Text', max_length=50))])), ('rich_text', brueckio.pages.blocks.RichTextBlock()), ('lead_text', brueckio.pages.blocks.LeadTextBlock()), ('contact_teaser', brueckio.pages.blocks.ContactTeaserBlock()), ('image', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.blocks.CharBlock())])), ('cv', wagtail.blocks.StructBlock([('when', wagtail.blocks.CharBlock()), ('what', wagtail.blocks.RichTextBlock())]))], blank=True, null=True),
),
migrations.AlterField(
model_name='projectpage',
name='content',
field=wagtail.fields.StreamField([('heading', wagtail.blocks.StructBlock([('heading', wagtail.blocks.ChoiceBlock(choices=[('h1', 'H1'), ('h2', 'H2'), ('h3', 'H3'), ('h4', 'H4'), ('h5', 'H5'), ('h6', 'H6')], label='Header Size')), ('text', wagtail.blocks.CharBlock(label='Text', max_length=50))])), ('rich_text', brueckio.pages.blocks.RichTextBlock()), ('lead_text', brueckio.pages.blocks.LeadTextBlock()), ('contact_teaser', brueckio.pages.blocks.ContactTeaserBlock()), ('image', wagtail.blocks.StructBlock([('image', wagtail.images.blocks.ImageChooserBlock()), ('description', wagtail.blocks.CharBlock())])), ('cv', wagtail.blocks.StructBlock([('when', wagtail.blocks.CharBlock()), ('what', wagtail.blocks.RichTextBlock())]))], blank=True, null=True),
),
]
|
17,180 | 73272c1f7365e79c8a1bcf9d2e25d1fdc10f5418 | from commons import get_tensor,get_model
import torch
import json
with open('class_to_idx_json.json') as f:
class_to_idx = json.load(f)
with open('bird_to_name_json.json') as f:
bird_to_name = json.load(f)
model = get_model()
def get_bird_name(image_bytes):
tensor = get_tensor(image_bytes)
outputs = model.forward(tensor)
ps = torch.exp(outputs)
_, prediction = ps.max(1)
category = prediction.item()
print(prediction,"----------")
bird_name = bird_to_name[str(category+1)]
return bird_name, category+1
|
17,181 | 64f37947b124a13fe3724bc5df4c66fe01741e9e | #!/usr/bin/python
# -*- coding: utf-8 -*-
import MySQLdb as mdb
import RPi.GPIO as RPIO
import time
import telebot, config
door_sensor = 12
door_log = open(config.log_path + 'door.log', 'a',0)
db_conect = mdb.connect(config.db_host, config.db_user, config.db_password , config.db_database);
bot = telebot.TeleBot(config.telegram_token)
@bot.message_handler(commands=['knock'])
def echo_msg(message):
bot.send_message(message.chat.id, "Who's there?")
door_log.write (time.strftime("%d.%m.%Y %H:%M:%S")+"\tBot: knok\r\n")
@bot.message_handler(commands=['start'])
def handle_start_help(message):
config.telegram_chatid = message.chat.id
door_log.write (time.strftime("%d.%m.%Y %H:%M:%S")+"\tBot: start chating, chat ID_" + str(config.telegram_chatid) + "\r\n")
bot.send_message(config.telegram_chatid, "Hi, i`m door-bot! Door sensor is activated.")
@bot.message_handler(commands=['stop'])
def handle_start_help(message):
config.CHATID = ""
door_log.write (time.strftime("%d.%m.%Y %H:%M:%S")+"\tBot: stop chating, chat ID_" + str(config.telegram_chatid) + "\r\n")
bot.send_message(config.telegram_chatid, "Door sensor is deactivated.")
def write_door_state(e):
if RPIO.input(door_sensor)==0:
door_log.write (time.strftime("%d.%m.%Y %H:%M:%S")+"\tOpen\r\n")
bot.send_message(config.telegram_root_chatid, "door OPENED!")
if config.telegram_chatid:
bot.send_message(config.telegram_chatid, "door OPENED!")
with db_conect:
cur = db_conect.cursor()
cur.execute("INSERT INTO door(status) VALUES('open')")
db_conect.commit()
cur.close()
else:
door_log.write (time.strftime("%d.%m.%Y %H:%M:%S")+"\tClose\r\n")
bot.send_message(config.telegram_root_chatid, "door CLOSED!")
if config.telegram_chatid:
bot.send_message(config.telegram_chatid, "door CLOSED!")
with db_conect:
cur = db_conect.cursor()
cur.execute("INSERT INTO door(status) VALUES('close')")
db_conect.commit()
cur.close()
RPIO.setmode(RPIO.BOARD)
RPIO.setup(door_sensor, RPIO.IN)
RPIO.add_event_detect(door_sensor, RPIO.BOTH)
RPIO.add_event_callback(door_sensor, write_door_state)
try:
print "Waiting for falling edge on port 12"
#RPIO.wait_for_edge(door_sensor, RPIO.BOTH)
if __name__ == '__main__':
bot.polling(none_stop=True)
while True:
time.sleep(1)
print "Falling edge detected. Here endeth the second lesson."
except KeyboardInterrupt:
RPIO.cleanup() # clean up GPIO on CTRL+C exit
RPIO.cleanup() # clean up GPIO on normal exit
db_conect.close()
|
17,182 | 0157a34de087767eb1af11b5e5e7e08c5b1759ff | import pytest
from reconcile.utils.external_resource_spec import ExternalResourceSpec
from reconcile.utils.terrascript.cloudflare_resources import (
UnsupportedCloudflareResourceError,
create_cloudflare_terrascript_resource,
)
def create_external_resource_spec(provision_provider):
return ExternalResourceSpec(
provision_provider,
{"name": "dev", "automationToken": {}},
{
"provider": provision_provider,
"identifier": "test",
},
{},
)
def test_create_cloudflare_terrascript_resource_unsupported():
spec = create_external_resource_spec("doesntexist")
with pytest.raises(UnsupportedCloudflareResourceError):
create_cloudflare_terrascript_resource(spec)
|
17,183 | c7081d15f2935e4316593de858dd632d7e191f6e | #!/usr/bin/env python
import cgi
import rtorrent
import torrentHandler
import random
import string
import os
import sys
import time
import login
import config
class Detail:
def __init__(self, torrent_id, conf=config.Config()):
self.Config = conf
self.RT = rtorrent.rtorrent(self.Config.get("rtorrent_socket"))
self.Handler = torrentHandler.Handler()
self.HTML = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>rTorrent - %(tname)s</title>
<link rel="stylesheet" type="text/css" href="/css/detail.css">
<link rel="stylesheet" type="text/css" href="/css/smoothness/jquery-ui-1.8.13.custom.css">
<link rel="stylesheet" type="text/css" href="/css/jquery.treeview.css">
<script src="/javascript/jquery-1.6.1.min.js" type="text/javascript"></script>
<script src="/javascript/jquery-ui-1.8.13.custom.min.js" type="text/javascript"></script>
<script src="/javascript/jquery.cookie.js" type="text/javascript"></script>
<!-- <script src="/javascript/jquery.jstree.js" type="text/javascript"></script> -->
<script src="/javascript/jquery.treeview.js" type="text/javascript"></script>
<script src="/javascript/detail.js" type="text/javascript"></script>
</head>
<body>
<div id="accordion">
<h3><a href="#">Torrent info</a></h3>
<div id="info_within">
<p>Name: %(tname)s</p>
<p>ID: %(tid)s</p>
<p>Created: %(tcreated)s</p>
<p>Path: %(tpath)s</p>
<p>Priority: %(tpriority)s</p>
<p class="%(tstate)s">State: %(tstate)s</p>
<p>Completion: %(tdone)s%%</p>
<p>Size: %(tsize)s</p>
<p>Ratio: %(tratio)s</p>
<p>Downloaded: %(tdownloaded)s</p>
<p>Uploaded: %(tuploaded)s</p>
<p>Upload Rate: %(tuprate)s</p>
<p>Download Rate: %(tdownrate)s</p>
<p>Leechers: %(tleechs_connected)s (%(tleechs_total)s)</p>
<p>Seeders: %(tseeds_connected)s (%(tseeds_total)s)</p>
</div>
<h3><a href="#">Peers</a></h3>
<div id="peers_within">
<div id="peers_table">
<table>
<tr>
<td class="heading">IP Address</td>
<td class="heading">Port</td>
<td class="heading">Client</td>
<td class="heading">Completed</td>
<td class="heading">Download Rate</td>
<td class="heading">Download Total</td>
<td class="heading">Upload Rate</td>
<td class="heading">Upload Total</td>
<td class="heading">Peer Rate</td>
<td class="heading">Peer Total</td>
</tr>
%(peer_table_rows)s
</table>
</div>
</div>
<h3><a href="#">File list</a></h3>
<div id="files_within">
<div>
%(filelist)s
</div>
</div>
<h3><a href="#">Tracker list</a></h3>
<div id="trackers_within">
<div id="trackers_table">
<table>
<tr>
<td class="heading">URL</td>
<td class="heading">Type</td>
<td class="heading">Announce Interval</td>
<td class="heading">Seeders</td>
<td class="heading">Leechers</td>
<td class="heading">Enabled</td>
</tr>
%(tracker_table_rows)s
</table>
</div>
</div>
</div>
</body>
</html>
""" % self._getInfo(torrent_id)
def _getInfo(self, torrent_id):
#for use by other lines
_size = self.RT.getSizeBytes(torrent_id)
_trackers = self.RT.getTrackers(torrent_id)
#end 'preload'
#general info
tname = self.RT.getNameByID(torrent_id)
tcreated = time.strftime("%02d/%02m/%Y %02H:%02M:%02S", time.localtime(self.RT.getCreationDate(torrent_id)))
tpath = self.RT.getPath(torrent_id)
tpriority = self.RT.getPriorityStr(torrent_id)
tstate = self.RT.getStateStr(torrent_id)
tsize = self.Handler.humanSize(_size)
tratio = "%.02f" % (float(self.RT.getRatio(torrent_id))/1000)
tuploaded = self.Handler.humanSize(self.RT.getUploadBytes(torrent_id))
tdownloaded = self.Handler.humanSize(self.RT.getDownloadBytes(torrent_id))
tdone = "%.02f" % (100*(float(self.RT.conn.d.get_completed_bytes(torrent_id)) / _size))
tuprate = "%s/s" % self.Handler.humanSize(self.RT.getUploadSpeed(torrent_id))
tdownrate = "%s/s" % self.Handler.humanSize(self.RT.getDownloadSpeed(torrent_id))
tseeds_connected = self.RT.conn.d.get_peers_complete(torrent_id)
tseeds_total = sum([tracker.seeds for tracker in _trackers])
tleechs_connected = self.RT.conn.d.get_peers_accounted(torrent_id)
tleechs_total = sum([tracker.leechs for tracker in _trackers])
#end general info
#html inserts
files = self.Handler.fileTreeHTML(self.RT.getFiles(torrent_id), self.RT.getRootDir())
peer_table_rows = self.peers(torrent_id)
tracker_table_rows = self.trackers(_trackers)
#end html inserts
return {
"tid" : torrent_id,
"tname" : tname,
"tcreated" : tcreated,
"tpath" : tpath,
"tpriority" : tpriority,
"tstate" : tstate,
"tsize" : tsize,
"tratio" : tratio,
"tuploaded" : tuploaded,
"tdownloaded" : tdownloaded,
"tdone" : tdone,
"tuprate" : tuprate,
"tdownrate" : tdownrate,
"tseeds_connected" : tseeds_connected,
"tseeds_total" : tseeds_total,
"tleechs_connected" : tleechs_connected,
"tleechs_total" : tleechs_total,
"peer_table_rows" : peer_table_rows,
"filelist" : files,
"tracker_table_rows": tracker_table_rows,
}
def peers(self, torrent_id):
PEER_ROW_TEMPLATE = """
<tr class="peer_tablerow">
<td>%(address)s</td>
<td>%(port)s</td>
<td>%(down_rate)s</td>
<td>%(down_total)s</td>
<td>%(up_rate)s</td>
<td>%(up_total)s</td>
<td>%(peer_rate)s</td>
<td>%(peer_total)s</td>
</tr>
"""
PEER_HTML = ""
for peer in self.RT.getPeers(torrent_id):
peer_info = {
"address" : peer.address,
"port" : peer.port,
"down_rate" : "%s/s" % self.Handler.humanSize(peer.down_rate),
"down_total" : self.Handler.humanSize(peer.down_total),
"up_rate" : "%s/s" % self.Handler.humanSize(peer.up_rate),
"up_total" : self.Handler.humanSize(peer.up_total),
"peer_rate" : "%s/s" % self.Handler.humanSize(peer.peer_rate),
"peer_total" : self.Handler.humanSize(peer.peer_total)
}
PEER_HTML += PEER_ROW_TEMPLATE % peer_info
return PEER_HTML
def trackers(self, trackers):
TRACKER_ROW_TEMPLATE = """
<tr class="tracker_tablerow">
<td>%(url)s</td>
<td>%(type)s</td>
<td>%(interval)s</td>
<td>%(seeds)s</td>
<td>%(leechs)s</td>
<td>%(enabled)s</td>
</tr>
"""
TRACKER_HTML = ""
for tracker in trackers:
TRACKER_HTML += TRACKER_ROW_TEMPLATE % tracker.__dict__
return TRACKER_HTML |
17,184 | 9e647ea246e9d04af4d4ef4e9a81295e9b189291 | from . import _WekanObject
from .colors import Colors
class List(_WekanObject):
"""
Wekan List
"""
def __init__(self, api, board_id, list_id: str):
super().__init__(api, list_id)
_list = self._api.get(f"/api/boards/{board_id}/lists/{list_id}")
self.title = _list.get("title")
self.starred = _list.get("starred")
self.archived = _list.get("archived")
self.boardId = board_id
self.swimlaneId = _list.get("swimlaneId")
self.createdAt = _list.get("createdAt")
self.sort = _list.get("sort")
self.updatedAt = _list.get("updatedAt")
self.modifiedAt = _list.get("modifiedAt")
self.wipLimit = _list.get("wipLimit")
self.color = Colors[_list.get("color")] if _list.get("color") else None
self.type = _list.get("type")
|
17,185 | 099f38502d7d9fa5b77e70ed4f7474f73cb97b10 | import socket
udp_socket = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
address = ("192.168.137.1", 8080)
send_data = input("่ฏท่พๅ
ฅ่ฆๅ้็ๅ
ๅฎน๏ผ")
udp_socket.sendto(send_data.encode("utf-8"), address)
recv_data = udp_socket.recvfrom(1024)
print(recv_data[0].decode("gbk"))
print(recv_data[1])
udp_socket.close()
|
17,186 | 347b2d76891aa2f8649980b2742cffccd3424ead | import urllib
import mechanize
from bs4 import BeautifulSoup
import re
def getGoogleLinks(link,depth):
br = mechanize.Browser()
br.set_handle_robots(False)
br.addheaders = [('User-agent','chrome')]
term = link.replace(" ","+")
#query = "http://www.google.com.br/search?num=100&q="+term+"&start="+depth
query = "http://www.google.com.br/search?q="+term+"&start="+depth
htmltext = br.open(query).read()
soup = BeautifulSoup(htmltext)
search = soup.findAll('div',attrs={'id':'search'})
searchtext = str(search[0])
soup1 = BeautifulSoup(searchtext)
list_items = soup1.findAll('li')
regex = "q(?!.*q).*?&"
pattern = re.compile(regex)
results_array = []
for li in list_items:
soup2 = BeautifulSoup(str(li))
links = soup2.findAll('a')
source_link = links[0]
source_url = re.findall(pattern,str(source_link))
if len(source_url)>0:
results_array.append(str(source_url[0].replace("q=","").replace("&","")))
#results_array = [1,2,3]
return results_array
print getGoogleLinks("python","0") |
17,187 | efaabd751de8bf927baa5f54eeffe6ba10667f53 | # !/usr/bin/evn python
# -*- coding:utf-8 -*-
# @time: 2020/5/30 13:14
#
|
17,188 | 5de3b5d7588b5bec43a5a175c0fd0be49a254a7a | from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
from numpy.distutils.misc_util import get_numpy_include_dirs
ext_modules = [Extension("pxutil", ["pxutil.pyx"], include_dirs=get_numpy_include_dirs()),
Extension("procrustesopt", ["procrustesopt.pyx"], include_dirs=get_numpy_include_dirs()),
Extension("normalisedImageOpt", ["normalisedImageOpt.pyx"], include_dirs=get_numpy_include_dirs()),
Extension("simpleGbrt", ["simpleGbrt.pyx"], include_dirs=get_numpy_include_dirs()),
Extension("supraFeatures", ["supraFeatures.pyx"], include_dirs=get_numpy_include_dirs()),
Extension("lazyhog", ["lazyhog.pyx"], include_dirs=get_numpy_include_dirs())]
setup(
name = 'supra',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
#python setup.py build_ext --inplace
|
17,189 | 086c26f957175976d19e9f733ea93a13acf1d352 | # https://adventofcode.com/2020/day/2
lines = []
def load_data(fileName):
global lines
with open(fileName, "r") as input_data:
lines = input_data.readlines()
for i in range(len(lines)):
lines[i] = lines[i].strip()
def problemOne():
pass
# global lines
# print(lines)
# validPassCounter = 0
# for line in lines:
# # extraxt values
# print(line)
# parts = line.split(": ")
# p = parts[1]
# rules = parts[0]
# ruleParts = rules.split(" ")
# l = ruleParts[1]
# limits = ruleParts[0].split("-")
# low = int(limits[0])
# high = int(limits[1])
# print(p,low,high,l) #debug
# #check if valid
# if is_valid(p, low, high, l):
# validPassCounter += 1
# print(validPassCounter)
# return validPassCounter
# def is_valid(password, min, max, letter):
# #count occurences in password
# #password = "chcddj"
# count = 0
# for l in password:
# if letter == l:
# count += 1
# # print(password, min, max, letter, count)
# #check limit
# return min <= count and count <= max
def problemTwo():
global lines
print(lines)
validPassCounter = 0
for line in lines:
# extraxt values
print(line)
parts = line.split(": ")
p = parts[1]
rules = parts[0]
ruleParts = rules.split(" ")
l = ruleParts[1]
limits = ruleParts[0].split("-")
low = int(limits[0])
high = int(limits[1])
print(p,low,high,l) #debug
#check if valid
if is_valid(p, low, high, l):
validPassCounter += 1
print(validPassCounter)
return validPassCounter
def is_valid(password, min, max, letter):
#count occurences in password
#password = "chcddj"
count = 0
for l in password:
if letter == l:
count += 1
# print(password, min, max, letter, count)
#check limit
return min <= count and count <= max
load_data("Day2-input.txt")
problemOne()
problemTwo() |
17,190 | ea61663cebd86a7de3946221b90eb8685f6e2843 | #!/usr/bin/env python3
def remove_item(file_name):
remove_item = input("Is there anything that you would like to delete? Type `clear` to clear your list. > ")
if remove_item == "clear":
clear_file(file_name)
else:
with open(file_name,"r") as f:
read = f.read().split("\n")
with open(file_name,"w") as f:
for line in read:
if remove_item not in line:
f.write(line + "\n")
remove_item("x.txt")
|
17,191 | f6d0d4090000ddfd4beaad99ffa8bdef310ec3d6 | """
File: myinfo.py
Project 1.2
Prints my name, address, and phone number.
"""
print("Ken Lambert")
print("Virginia")
print("345-9110")
|
17,192 | a3297e07a5086ee04a22646f2c5dad625bc5c8ae | """empty message
Revision ID: 8defef1bfe68
Revises: 1e2339618ea2
Create Date: 2019-08-21 14:30:16.084842
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '8defef1bfe68'
down_revision = '1e2339618ea2'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('file', sa.Column('userId', sa.Integer(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('file', 'userId')
# ### end Alembic commands ###
|
17,193 | 8ddbb2c1ac689fb17e3ffbc05dc98e10f50a5791 |
class Medio:
def __init__(self, etiqueta):
self.etiqueta = etiqueta |
17,194 | f98aeeadd9f6f41a30eb54f7bc50123d721344c7 | # Generated by Django 2.2 on 2021-06-15 11:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('webapp', '0024_collection'),
]
operations = [
migrations.AlterField(
model_name='collection',
name='coins',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='collections', to='webapp.Coin_in_Collection', verbose_name='ะผะพะฝะตัั'),
),
]
|
17,195 | 88041b060b92ba508c077a54ca3029e020edf34a | """
Admin Commands cog for Talos.
Holds all commands relevant to administrator function, guild specific stuff.
Author: CraftSpider
"""
import discord
import discord.ext.commands as commands
import asyncio
import logging
import typing
import spidertools.common as utils
import spidertools.discord as dutils
import discord_talos.talossql as sql
from collections import defaultdict
#
# Admin Command Variables
#
# Configure Logging
log = logging.getLogger("talos.admin")
# Security keys, for security-locked commands.
secure_keys = defaultdict(lambda: "")
# Default priority levels
PRIORITY_LEVELS = {
"guild": 10,
"channel": 20,
"role": 30,
"user": 40
}
#
# Admin Cog Class
#
class AdminCommands(dutils.TalosCog):
"""These commands can only be used by Admins or Devs, and will work at any time.
If no admins list is set, anyone with administrator role permission can use admin commands"""
LEVELS = {"guild": 0, "channel": 1, "role": 2, "user": 3}
cog_check = dutils.admin_local
@commands.command(description="Changes Talos' nickname")
@commands.guild_only()
async def nick(self, ctx, *, nickname):
"""Sets Talos' nickname in the current guild."""
if len(nickname) > 32:
await ctx.send("Nickname must be 32 characters or fewer")
return
await ctx.me.edit(nick=nickname)
await ctx.send(f"Nickname changed to {nickname}")
@commands.command(description="Makes Talos repeat you")
async def repeat(self, ctx, *, text):
"""Causes Talos to repeat whatever you just said, exactly."""
await ctx.send(text)
@commands.command(usage="[number=10]", description="Remove messages from a channel")
@commands.guild_only()
async def purge(self, ctx, number: typing.Union[discord.Member, int, str] = 10, key=None):
"""Purges messages from a channel. By default, this will be 10 (including the invoking command)."""\
""" Use 'all' to purge whole channel. Confirmation keys should be tacked on the end, so """\
"""`^purge 100 [key]`"""
if isinstance(number, discord.Member):
await ctx.send("Purging a member's messages not yet supported")
elif isinstance(number, int):
if number >= 100 and (key is None or key != secure_keys[str(ctx.guild.id)]):
rand_key = utils.key_generator()
secure_keys[str(ctx.guild.id)] = rand_key
await ctx.send(f"Are you sure? If so, re-invoke with {rand_key} on the end.")
else:
await ctx.channel.purge(limit=number)
elif number == "all":
if key is None or key != secure_keys[str(ctx.guild.id)]:
rand_key = utils.key_generator()
secure_keys[str(ctx.guild.id)] = rand_key
await ctx.send(f"Are you sure? If so, re-invoke with {rand_key} on the end.")
elif key == secure_keys[str(ctx.guild.id)]:
await ctx.channel.purge(limit=None)
secure_keys[str(ctx.guild.id)] = ""
@commands.command(description="Kick a user from chat")
@commands.guild_only()
async def kick(self, ctx, user: discord.Member, reason="Kicked from guild by Talos"):
"""Kicks a given user from the current guild. Only accepts a user who is currently in the guild"""
await user.kick(reason=reason)
await self.bot.mod_log(ctx, "kick", user, reason)
await ctx.send(f"User {user} kicked")
@commands.command(description="Ban a user from chat")
@commands.guild_only()
async def ban(self, ctx, user: discord.Member, reason="Banned from guild by Talos"):
"""Bans a given user from the current guild. Currently only accepts a user who is currently in the guild"""
await user.ban(reason=reason)
await self.bot.mod_log(ctx, "ban", user, reason)
await ctx.send(f"User {user} banned")
@commands.command(aliases=["mute"], description="Silence a user")
@commands.guild_only()
async def silence(self, ctx, user: discord.Member, length=None, reason="Silenced by Talos"):
"""Silences a user, optionally takes in a length of time and a reason for silencing. A role called 'Muted' """\
"""or 'Silenced' with the necessary permissions in place must exist for this to work."""
muted = list(filter(lambda x: x.name.lower() == "muted" or x.name.lower() == "silenced", ctx.guild.roles))
if not muted:
await ctx.send("No Muted or Silenced role")
return
role = muted[0]
await user.add_roles(role, reason=reason)
await self.bot.mod_log(ctx, "silence", user, reason)
await ctx.send(f"User {user} silenced")
if length is not None:
if isinstance(length, str):
period = dutils.EventPeriod(length)
elif isinstance(length, int):
period = dutils.EventPeriod("")
period.minutes = length
async def unmuter():
await asyncio.sleep(int(period))
await user.remove_roles(role, reason="Silence timer up")
self.bot.loop.create_task(unmuter())
@commands.command(description="Display current Talos perms")
@commands.guild_only()
async def talos_perms(self, ctx):
"""Has Talos display their current effective guild permissions. This is channel independent, """\
"""channel-specific perms aren't taken into account."""
out = "```Guild Permissions:\n"
out += ', '.join(map(lambda x: x[0], filter(lambda y: y[1] is True, ctx.me.guild_permissions)))
out += "```"
await ctx.send(out)
@commands.group(description="Admin related commands")
async def admins(self, ctx):
"""By default, anyone on a guild with administrator privileges is an Admin. Adding someone to the list will """\
""" override this behavior.
The Guild Owner is also always Admin, and this behavior can't be overridden for security reasons."""
if ctx.invoked_subcommand is None:
await ctx.send("Valid options are 'add', 'list', and 'remove'.")
@admins.command(name="add", description="Adds a new admin")
@commands.guild_only()
async def _ad_add(self, ctx, member: discord.Member):
"""Adds a user to the guild admin list."""
new_admin = sql.TalosAdmin((ctx.guild.id, member.id))
if new_admin not in self.database.get_admins(ctx.guild.id):
self.database.save_item(new_admin)
await ctx.send(f"Added admin {member.name}!")
else:
await ctx.send("That user is already an admin!")
@admins.command(name="remove", description="Removes an admin")
@commands.guild_only()
async def _ad_remove(self, ctx, member):
"""Removes an admin user from the guild list"""
member_object = discord.utils.find(
lambda x: x.name == member or str(x) == member or (member.isnumeric() and x.id == int(member)),
ctx.guild.members
)
if member_object is not None:
member = member_object.id
elif member.isnumeric():
member = int(member)
admin = list(filter(lambda x: x.user_id == member, self.database.get_admins(ctx.guild.id)))
if admin:
self.database.remove_item(admin[0])
if member_object:
await ctx.send(f"Removed admin from {member_object.name}")
else:
await ctx.send("Removed admin from invalid user")
else:
await ctx.send("That person isn't an admin!")
@admins.command(name="list", description="Display admins")
@commands.guild_only()
async def _ad_list(self, ctx):
"""Displays all admins for the current guild"""
admin_list = self.database.get_admins(ctx.guild.id)
if len(admin_list) > 0:
out = "```"
for admin in admin_list:
admin_name = self.bot.get_user(admin.user_id)
admin_name = str(admin_name) if admin_name is not None else admin.user_id
out += f"{admin_name}\n"
out += "```"
await ctx.send(out)
else:
await ctx.send("This guild currently has no administrators.")
@admins.command(name="all", hidden=True, description="Display all admins")
@dutils.dev_check()
async def _ad_all(self, ctx):
"""Displays all admins in every guild Talos is in"""
all_admins = self.database.get_all_admins()
consumed = []
out = "```"
for admin in all_admins:
if admin.guild_id not in consumed:
out += f"Guild: {self.bot.get_guild(admin.guild_id)}\n"
consumed.append(admin.guild_id)
admin = self.bot.get_user(admin.user_id)
admin = str(admin) if admin is not None else admin.user_id
out += f" {admin}\n"
if out != "```":
out += "```"
await ctx.send(out)
else:
await ctx.send("No admins currently")
@commands.group(description="Permissions related commands")
async def perms(self, ctx):
"""Talos permissions are divided into 4 levels, with each level having a default priority. The levels, in """\
"""order from lowest to highest default priority, are:
-Guild
-Channel
-Role
-User
If one doesn't like the default priority, it can be changed by adding a number to the end of the command."""\
""" Priority defaults are 10 for guild, then going up by ten for each level, ending with User being 40.
One can 'allow' or 'forbid' specifics at each level, or simply 'allow' or 'forbid' the whole guild."""
if ctx.invoked_subcommand is None:
await ctx.send("Valid options are 'create', 'list', and 'remove'.")
@perms.command(name="add", aliases=["create"], description="Create or alter permission rules")
@commands.guild_only()
async def _p_add(self, ctx, command, level, allow, name=None, priority: int = None):
"""Provide a command, one of the four levels (see `^help perms`), whether to allow or forbid the command, """\
"""a name (If level is guild, this is ignored), and a priority if you don't want default."""
level = level.lower()
allow = allow.lower()
if allow == "allow" or allow == "true":
allow = True
elif allow == "forbid" or allow == "false":
allow = False
else:
await ctx.send("I can only 'allow' or 'forbid', sorry!")
return
found = self.bot.find_command(command) is not None
if found and level in self.LEVELS:
if name is None and level != "guild":
await ctx.send("You need to include both a name and either 'allow' or 'forbid'")
return
old_name = name
if level == "user":
name = discord.utils.find(lambda u: u.name == name, ctx.guild.members)
elif level == "role":
name = discord.utils.find(lambda r: r.name == name, ctx.guild.roles)
elif level == "channel":
name = discord.utils.find(lambda c: c.name == name, ctx.guild.channels)
elif level == "guild":
name = ""
if name is None:
await ctx.send(f"Sorry, I couldn't find the user {old_name}!")
return
name = str(name) if name != "" else "SELF"
priority = priority or PRIORITY_LEVELS[level]
perm_rule = sql.PermissionRule((ctx.guild.id, command, level, name, priority, allow))
self.database.save_item(perm_rule)
await ctx.send(f"Permissions for command **{command}** at level **{level}** updated.")
elif not found:
await ctx.send("I don't recognize that command, so I can't set permissions for it!")
else:
await ctx.send("Unrecognized permission level.")
@perms.command(name="remove", aliases=["delete"], description="Remove permission rules")
@commands.guild_only()
async def _p_remove(self, ctx, command=None, level=None, name=None):
"""Remove a permissions rule or set of rules. Be careful, as simply `^perms remove` will clear all guild """\
"""permissions."""
if isinstance(level, str):
level = level.lower()
if level in self.LEVELS or level is None:
if isinstance(name, str):
if level == "user":
name = str(discord.utils.find(lambda u: u.name == name, ctx.guild.members))
elif level == "role":
name = str(discord.utils.find(lambda r: r.name == name, ctx.guild.roles))
elif level == "channel":
name = str(discord.utils.find(lambda c: c.name == name, ctx.guild.channels))
perm_rule = sql.PermissionRule((ctx.guild.id, command, level, name, None, None))
self.database.remove_item(perm_rule, general=True)
if command is None:
await ctx.send("Permissions for guild cleared")
elif level is None:
await ctx.send(f"Permissions for command **{command}** at all levels cleared.")
elif name is None:
await ctx.send(f"Permissions for command **{command}** at level **{level}** cleared.")
else:
await ctx.send(f"Permissions for command **{command}** at level **{level}** for **{name}** cleared.")
else:
await ctx.send("Unrecognized permission level.")
@perms.command(name="list", description="Display permission rules for the current guild")
@commands.guild_only()
async def _p_list(self, ctx):
"""Displays a list of all permissions rules for the current guild"""
result = self.database.get_perm_rules(ctx.guild.id)
if len(result) == 0:
await ctx.send("No permissions set for this guild.")
return
guild_perms = {}
for perm in result:
if guild_perms.get(perm.command, None) is None:
guild_perms[perm.command] = {}
if guild_perms.get(perm.command).get(perm.perm_type, None) is None:
guild_perms[perm.command][perm.perm_type] = []
guild_perms[perm.command][perm.perm_type].append([perm.target, perm.priority, perm.allow])
out = "```"
for command in guild_perms:
out += f"Command: {command}\n"
for level in sorted(guild_perms[command], key=lambda a: self.LEVELS[a]):
out += f" Level: {level}\n"
if level == "guild":
out += f" {guild_perms[command][level]}\n"
else:
for detail in guild_perms[command][level]:
out += f" {detail[1]}-{detail[0]}: {bool(detail[2])}\n"
out += "```"
await ctx.send(out)
@perms.command(name="all", hidden=True, description="Display permission rules for all guilds")
@dutils.dev_check()
async def _p_all(self, ctx):
"""Displays all permissions rules, in all guilds Talos is in."""
result = self.database.get_all_perm_rules()
if len(result) == 0:
await ctx.send("All permissions default")
return
guild_perms = {}
for permission in result:
if guild_perms.get(permission.id, None) is None:
guild_perms[permission.id] = {}
if guild_perms.get(permission.id).get(permission.command, None) is None:
guild_perms[permission.id][permission.command] = {}
if guild_perms.get(permission.id).get(permission.command).get(permission.perm_type, None) is None:
guild_perms[permission.id][permission.command][permission.perm_type] = []
guild_perms[permission.id][permission.command][permission.perm_type].append([permission.target,
permission.priority,
permission.allow])
out = "```"
for guild in guild_perms:
guild_name = self.bot.get_guild(guild)
out += f"Guild: {guild_name}\n"
for command in guild_perms[guild]:
out += f" Command: {command}\n"
for level in sorted(guild_perms[guild][command], key=lambda a: self.LEVELS[a]):
out += f" Level: {level}\n"
if level == "guild":
out += f" {guild_perms[guild][command][level]}\n"
else:
for detail in guild_perms[guild][command][level]:
out += f" {detail[1]}-{detail[0]}: {bool(detail[2])}\n"
out += "```"
await ctx.send(out)
@commands.group(description="Options related commands")
async def options(self, ctx):
"""Command to change Talos guild options. All of these only effect the current guild. Check """\
"""`^help options list` for a list of available options, and what they do."""
if ctx.invoked_subcommand is None:
await ctx.send("Valid options are 'set', 'list', and 'default'.")
@options.command(name="set", description="Set guild options")
@commands.guild_only()
async def _opt_set(self, ctx, option, value):
"""Set an option. Most options are true or false. See `^help options list` for available options"""
try:
guild_options = self.database.get_guild_options(ctx.guild.id)
cur_val = getattr(guild_options, option)
if isinstance(cur_val, (int, bool)):
if value.upper() == "ALLOW" or value.upper() == "TRUE":
value = True
elif value.upper() == "FORBID" or value.upper() == "FALSE":
value = False
else:
await ctx.send("Sorry, that option only accepts true or false values.")
return
if isinstance(cur_val, str):
value = utils.replace_escapes(value)
setattr(guild_options, option, value)
self.database.save_item(guild_options)
await ctx.send(f"Option {option} set to `{value}`")
except AttributeError:
await ctx.send("I don't recognize that option.")
@options.command(name="list", description="Display guild options")
@commands.guild_only()
async def _opt_list(self, ctx):
"""Displays list of what options are currently set to in this guild. Available options are:
rich_embeds: whether Talos will use any embedded messages in this guild.
fail_message: whether Talos will post a message for unknown commands
pm_help: whether Talos will PM help or post it in the channel
commands: whether the Commands cog is active in this guild
user_commands: whether the UserCommands cog is active in this guild
joke_commands: whether the JokeCommands cog is active in this guild
writing_prompts: whether to post daily writing prompts
prompts_channel: the name of the channel to post daily prompts to, if above option is true
prefix: command prefix for Talos to use in this guild. @ mention will always work
timezone: what timezone for Talos to use for displayed times, supports any timezone abbreviation"""
out = "```"
options = self.database.get_guild_options(ctx.guild.id)
for item in options.__slots__[1:]:
out += f"{item}: {getattr(options, item)}\n"
out += "```"
if out == "``````":
await ctx.send("No options available.")
return
await ctx.send(out)
@options.command(name="default", description="Set guild option to default")
@commands.guild_only()
async def _opt_default(self, ctx, option):
"""Sets an option to its default value, as in a guild Talos had just joined."""
try:
guild_options = self.database.get_guild_options(ctx.guild.id)
setattr(guild_options, option, None)
self.database.save_item(guild_options)
await ctx.send(f"Option {option} set to default")
except AttributeError:
await ctx.send("I don't recognize that option.")
@options.command(name="all", hidden=True, description="Display all guild options")
@dutils.dev_check()
async def _opt_all(self, ctx):
"""Displays all guild options in every guild Talos is in. Condensed to save your screen."""
all_options = self.database.get_all_guild_options()
out = "```"
for options in all_options:
out += f"Guild: {self.bot.get_guild(options.id)}\n"
for item in options.__slots__[1:]:
option = getattr(options, item)
if option is None:
continue
out += f" {item}: {option}\n"
out += "```"
if out == "``````":
await ctx.send("No options available.")
return
await ctx.send(out)
@commands.group(description="Custom commands, Yay!")
async def command(self, ctx):
"""Command for managing guild-only commands. Create, edit, delete, or list commands. To see documentation """\
"""on how to write more complex commands, check out the talos website CommandLang page. **Currently in """\
"""development, please report bugs on the github or official server**"""
if ctx.invoked_subcommand is None:
await ctx.send("Valid options are 'add', 'edit', 'remove', and 'list'")
@command.command(name="add", aliases=["create"], description="Add new command")
async def _c_add(self, ctx, name, *, text):
"""Creates a new guild only command, first word will be the name, and everything after will define the """\
"""command"""
if name in self.bot.all_commands:
await ctx.send("Talos already has that command, no overwriting allowed.")
return
elif self.database.get_guild_command(ctx.guild.id, name):
await ctx.send("That command already exists. Maybe you meant to `edit` it instead?")
return
self.database.save_item(sql.GuildCommand((ctx.guild.id, name, text)))
await ctx.send(f"Command {name} created")
@command.command(name="edit", description="Edit existing command")
async def _c_edit(self, ctx, name, *, text):
"""Edits an existing command. Same format as adding a command."""
if not self.database.get_guild_command(ctx.guild.id, name):
await ctx.send("That command doesn't exist. Maybe you meant to `add` it instead?")
return
self.database.save_item(sql.GuildCommand((ctx.guild.id, name, text)))
await ctx.send(f"Command {name} successfully edited")
@command.command(name="remove", description="Remove existing command")
async def _c_remove(self, ctx, name):
"""Removes a command from the guild."""
if self.database.get_guild_command(ctx.guild.id, name) is None:
await ctx.send("That command doesn't exist, sorry.")
return
self.database.remove_item(sql.GuildCommand((ctx.guild.id, name, None)), True)
await ctx.send(f"Command {name} successfully removed")
@command.command(name="list", description="List existing commands")
async def _c_list(self, ctx):
"""Lists commands in this guild"""
command_list = self.database.get_guild_commands(ctx.guild.id)
if len(command_list) == 0:
await ctx.send("This server has no custom commands")
return
out = "```\nServer Commands:\n"
for command in command_list:
out += f"{command.name}: {command.text}\n"
out += "```"
await ctx.send(out)
@commands.group(description="Custom events, on a timer")
async def event(self, ctx):
"""Allows the creation of custom events to occur on a regular basis. See the CommandLang page on the """\
"""official website for details of the language used to define these events."""
if ctx.invoked_subcommand is None:
await ctx.send("Valid options are 'add', 'edit', 'remove', and 'list'")
@event.command(name="add", description="Add a custom event")
async def _e_add(self, ctx, name, period, *, text):
"""Creates a new custom event. First word is identifier name, Second word is a period. Period is defined as """\
"""1h for once an hour, 10m for once every ten minutes, 7d for once every week. Minimum time period """\
"""is 10 minutes. One may user multiple specifiers, eg 1d7m"""
if self.database.get_guild_event(ctx.guild.id, name):
await ctx.send("That event already exists. Maybe you meant to `edit` it instead?")
return
event = sql.GuildEvent((ctx.guild.id, name, period, 0, ctx.channel.id, text))
self.database.save_item(event)
await ctx.send(f"Event {name} created")
@event.command(name="edit", description="Edit an existing event")
async def _e_edit(self, ctx, name, *, text):
"""Edits an existing event, changing what text is displayed when the event runs."""
event = self.database.get_guild_event(ctx.guild.id, name)
if not event:
await ctx.send("That event doesn't exist. Maybe you meant to `add` it instead?")
return
event.name = name
event.text = text
self.database.save_item(event)
await ctx.send(f"Event {name} successfully edited")
@event.command(name="remove", description="Remove an event")
async def _e_remove(self, ctx, name):
"""Delete an existing event, so it will no longer occur."""
if self.database.get_guild_event(ctx.guild.id, name) is None:
await ctx.send("That event doesn't exist, sorry.")
return
event = sql.GuildEvent((ctx.guild.id, name, None, None, None, None))
self.database.remove_item(event, True)
await ctx.send(f"Event {name} successfully removed")
@event.command(name="list", description="List all events")
async def _e_list(self, ctx):
"""Display a list of all events currently defined for this guild."""
event_list = self.database.get_guild_events(ctx.guild.id)
if len(event_list) == 0:
await ctx.send("This server has no custom events")
return
out = "```\nServer Events:\n"
for event in event_list:
out += f"{event.name} - {event.period}: {event.text}\n"
out += "```"
await ctx.send(out)
def setup(bot):
"""
Sets up the AdminCommands extension. Adds the AdminCommands cog to the bot
:param bot: Bot this extension is being setup for
"""
bot.add_cog(AdminCommands(bot))
|
17,196 | d31b3fbb60c35f4cf0a4655f7cfdf5c84cd869e5 | import artrat.lispparse as lisp
import random
class ExpansionKey:
def __init__(self, parent, pos, brother_symbol, brother_text):
assert isinstance(parent,str)
assert isinstance(pos,str)
assert isinstance(brother_symbol,str) or brother_symbol == None, type(brother_symbol)
assert isinstance(brother_text,str) or brother_text == None, type(brother_text)
self.parent = parent
self.pos = pos
self.brother_symbol = brother_symbol
self.brother_text = brother_text
def __hash__(self):
return (self.parent, self.pos, self.brother_symbol, self.brother_text).__hash__()
def __eq__(self, ok):
return (self.parent, self.pos, self.brother_symbol, self.brother_text) == (ok.parent, ok.pos, ok.brother_symbol, ok.brother_text)
def __str__(self):
return "ExpansionKey(%s,%s,%s,%s)" % (self.parent, self.pos, self.brother_symbol, self.brother_text)
class ExpandableLeaf:
def __init__(self, pos, word):
self.pos = pos
self.word = word
def IsLeaf(self):
return True
def Lisp(self):
return lisp.L(self.pos, self.word)
def __str__(self):
return "Leaf(%s,%s)" % (self.pos, self.word)
class ExpandableNode:
def __init__(self, pos):
self.pos = pos
def IsLeaf(self):
return False
def __str__(self):
return self.pos
class ExpansionUnit:
def __init__(self, parent, pos, brother_symbol, brother_text, children):
self.key = ExpansionKey(parent, pos, brother_symbol, brother_text)
self.children = children
def __str__(self):
return "%s ->\n (%s)" % (self.key, ", ".join(map(str,self.children)))
def Expand(self, ctx):
result = []
print map(str,self.children)
for i in xrange(len(self.children)):
print i, len(self.children)
if self.children[i].IsLeaf():
result.append(self.children[i].Lisp())
print result[-1]
else:
pos = self.children[i].pos
last_pos = None if i == 0 else result[-1].POS()
last_text = None if i == 0 else result[-1].ToText()
if i > 0:
print result[-1]
key = ExpansionKey(self.key.pos, pos, last_pos, last_text)
lku = ctx.Lookup(key)
result.append(lku.Expand(ctx.Down()))
return lisp.L(self.key.pos, *result)
def FromPenn(lisp):
result = []
if lisp.IsLeaf():
return []
for i in xrange(len(lisp)):
if lisp.At(i).IsLeaf():
continue
children = []
for j in xrange(len(lisp.At(i))):
if lisp.At(i).At(j).IsLeaf():
children.append(ExpandableLeaf(lisp.At(i).At(j).POS(), lisp.At(i).At(j).ToText()))
else:
children.append(ExpandableNode(lisp.At(i).At(j).POS()))
if i == 0:
result.append(ExpansionUnit(lisp.POS(), lisp.At(i).POS(), None, None, children))
else:
result.append(ExpansionUnit(lisp.POS(), lisp.At(i).POS(), lisp.At(i-1).POS(), lisp.At(i-1).ToText(), children))
result.extend(FromPenn(lisp.At(i)))
return result
class CannotExpandException:
def __init__(self, err):
self.err = err
def __str__(self):
return "CannotExpandException(%s)" % self.err
def ToyContextFromLisps(lisps):
result = ToyContext()
for l in lisps:
for eu in FromPenn(l):
result.Insert(eu)
return result
class ToyContext:
def __init__(self):
self.dct = {}
self.height = 0
def Insert(self, eu):
if eu.key not in self.dct:
self.dct[eu.key] = []
self.dct[eu.key].append(eu)
def Lookup(self, key):
print "Lookup(%s)" % key
if key in self.dct:
result = random.choice(self.dct[key])
print result
return result
raise CannotExpandException("Lookup(%s)" % key)
def Down(self):
result = ToyContext()
result.dct = self.dct
result.height = self.height + 1
return result
def Generate(self):
return self.Lookup(random.choice(self.dct.keys())).Expand(self)
|
17,197 | 75352affaeb344bf926837322b500e7cdf4cd97d | import numpy as np
import pandas as pd
from tensorloader import EncoderUtil as u
#Author: Asa Thibodeau
class BaseToInt:
"""Initializes a dictionary which maps characters
that correspond to one or more bases.
"""
def __init__(self):
self.basetoint = dict()
self.basetoint["A"] = (0,)
self.basetoint["C"] = (1,)
self.basetoint["M"] = (0,1,)
self.basetoint["G"] = (2,)
self.basetoint["R"] = (0,2,)
self.basetoint["S"] = (1,2,)
self.basetoint["V"] = (0,1,2,)
self.basetoint["T"] = (3,)
self.basetoint["W"] = (0,3,)
self.basetoint["Y"] = (1,3,)
self.basetoint["H"] = (0,1,3,)
self.basetoint["K"] = (2,3,)
self.basetoint["D"] = (0,2,3,)
self.basetoint["B"] = (1,2,3,)
self.basetoint["N"] = (0,1,2,3,)
def getPositions(self, base):
"""List the bases in integers that correspond
to a specific character.
0 - A
1 - C
2 - G
3 - T
Parameters
----------
base : str
The character to convert to base pair integers.
Returns
-------
rv : tuple
A tuple of all bases (in integers) that correspond
to the character provided.
"""
return self.basetoint[base]
def updateReferenceAndNormalize(m, ref, thresh):
"""Updates a sequence matrix, filling in values
from the reference when the number of reads for
a position is less than the threshold provided.
Parameters
----------
m : (5, #len) numpy array
The matrix containing the raw counts that will
be normalized to [0,1] by columnwise minmax
normalization and updated with the reference
sequence provided.
"""
ref = list(ref)
thresh = max(thresh,0)
totals = np.sum(m[:4,], axis=0)
idx = 0;
b2i = BaseToInt()
for i in totals:
if i < thresh:
bases = np.array(b2i.getPositions(ref[idx].capitalize()))
m[:4, idx] = 0
m[bases, idx] = 1.0/len(bases)
else:
m[:4,idx] = m[:4,idx]/i
#DEBUG CODE#
if (m[:4,idx] > 1).any():
print(i)
print (m[:4,idx])
print(totals)
#END DEBUG CODE#
idx += 1
def getSequenceFeatures(seqfile, summitpeaks, peaks, spidx, l, rthresh):
"""Reads dataset information corresponding to
dataset file location and dataset labels.
Parameters
----------
seqfile : str
Path of the file containing sequence features.
summitpeaks : (#numpeaks, 3) numpy array
The extended peak locations
chr start end
peaks : (#numpeaks, 3) numpy array
The original peak locations.
chr start end
spidx : (#numpeaks) numpy array
An sorted index of peak ids for properly matching
data between files.
l : int
The total length of each region.
rthresh : int
The number of reads at a position required to
override reference genome base.
Returns
-------
rv : (#peaks, 4, #len) numpy array
A (#peaks, 4, #len) tensor of sequence features.
0:Peak position
1:A
2:C
3:G
4:T
idx : (#peaks) numpy array
A vector containing the ids of each region.
"""
numpeaks = len(peaks)
rv = np.zeros((numpeaks, 5, l),dtype=float)
idx = np.zeros((numpeaks), dtype=object)
curidx = 0
with open(seqfile, 'r') as f:
while True:
try:
metadata = next(f)
peakid = int(metadata.split("\t")[3].strip())
reference = next(f).strip()
fv1 = u.getFrequencyVector(next(f))
fv2 = u.getFrequencyVector(next(f))
fv3 = u.getFrequencyVector(next(f))
fv4 = u.getFrequencyVector(next(f))
if peakid in spidx:
idx[curidx] = peakid
rv[curidx, 0, :] = fv1 #A
rv[curidx, 1, :] = fv2 #C
rv[curidx, 2, :] = fv3 #G
rv[curidx, 3, :] = fv4 #T
updateReferenceAndNormalize(rv[curidx], reference, rthresh)
curidx += 1
except StopIteration as se:
break
sortedidx = np.argsort(idx)
idx = idx[sortedidx]
assert np.all(spidx == idx)
rv = rv[sortedidx]
rv[:, 4, :] = u.getPeakPositions(summitpeaks, peaks, l)
return rv
|
17,198 | 4a4885458ef0997b8369f78871f1d0cdf0186f43 |
class QwcoreError(Exception):
"""Qwcore base exception"""
class PluginNameNotFoundError(QwcoreError):
"""Raised when a specific plugin is not found"""
class PluginNameMismatchError(QwcoreError):
"""Raised when a plugin name does not match the 'name' attribute of the object"""
class DuplicatePluginError(QwcoreError):
"""Raised when a specific name has multiple plugins"""
class NoPluginsFoundError(QwcoreError):
"""Raised when no template plugins are found"""
class PluginNoNameAttributeError(QwcoreError):
"""Raised when a plugin has no 'name' attribute"""
class ConfigFileNotFoundError(QwcoreError):
"""Raised when the config file for an app is not found"""
class ConfigFileParserError(QwcoreError):
"""Raised when the config file can't be parsed"""
class ConfigFileSectionNotFoundError(QwcoreError):
"""Raised when a section is not precent in the config file"""
class ConfigFileKeyNotFoundError(QwcoreError):
"""Raised when a section is not precent in the config file"""
|
17,199 | 45f8537280cb95a22adbd28e0b44eb7b040c271d | from data.state_machine import StateMachine
from data.states import intro_screen, game
def main():
state_dict = {'INTRO': intro_screen.IntroScreen(),
'GAME': game.Game()}
sm = StateMachine(state_dict, 'INTRO')
sm.main_loop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.