index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
998,000 | c6371a29a595b002e069d21647646721d4e1da47 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# bmconv.py - Read Delicious.com bookmark file and convert it into a list of dictionaries.
import re
bookmark_file = 'delicious.html'
def main():
"""Return a list of dictionaries of bookmarks."""
lines_list = []
with open(bookmark_file, 'r') as f:
lines_list = f.readlines()
entries_list = []
for idx, line in enumerate(lines_list):
entry = {}
if re.match(r'^<DT>', line):
entry['url'] = re.match(r'^.*HREF=\"([^\"]+)\"', line).group(1)
entry['add_date'] = re.match(r'^.*ADD_DATE=\"([^\"]+)\"', line).group(1)
entry['private'] = re.match(r'^.*PRIVATE=\"([^\"]*)\"', line).group(1)
entry['tags'] = re.match(r'^.*TAGS=\"([^\"]*)\"', line).group(1).split(',')
entry['title'] = re.match(r'^.*<A [^>]+>(.*)</A>', line).group(1)
if re.match(r'^<DD>', lines_list[idx + 1]):
dd_tmp = []
increment = 1
try:
while True:
if re.match(r'^<DT>', lines_list[idx + increment]):
break
dd_tmp.append(re.match(r'^(<DD>)?(.*)$', lines_list[idx + increment]).group(2))
increment += 1
except:
pass
entry['description'] = '\n'.join(dd_tmp)
entries_list.append(entry)
return entries_list
if __name__ == '__main__':
print(main())
|
998,001 | 9c6f8bf98f7721cf849b2770124347337af95eee | # -*- coding: utf-8 -*-
# Generated by Django 1.11.12 on 2018-04-17 21:04
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('currencies', '0003_auto_20180417_2056'),
]
operations = [
migrations.AlterField(
model_name='fill',
name='fill_text',
field=models.CharField(max_length=200, verbose_name=b'Market Order, Stop Loss, Take Profit'),
),
migrations.AlterField(
model_name='order',
name='order_text',
field=models.CharField(max_length=20, verbose_name=b'Order Type'),
),
]
|
998,002 | e6a3d20fe9445ae88f78af2a2f5be869b8bbd4ec | from __future__ import annotations
from miscutils import ReprMixin
from pathmagic import File
from subtypes import Dict
from .element import Comment, LexicalElement
from .section import DefinesSection
from .parser import DefinesParser
from .. import config
class DefinesDocument(ReprMixin):
def __init__(self, parse_result: dict[str, list[LexicalElement]]) -> None:
self.parse_result = parse_result
self.sections: Dict[str, DefinesSection] = Dict({
(name := "NCamera"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NGraphics"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NInterface"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NGameplay"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NSpecies"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NShip"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NCombat"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NPop"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NArmy"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NEconomy"): DefinesSection(name=name, lexical_elements=parse_result.get(name)),
(name := "NAI"): DefinesSection(name=name, lexical_elements=parse_result.get(name))
})
def __str__(self) -> str:
return "\n\n".join(str(section) for section in self.sections.values())
def sync_against_reference(self) -> DefinesDocument:
document = type(self).from_defines_file(config.STELLARIS_DIR.d.common.d.defines.f._00_defines)
document.comment()
difference = document.difference(self)
for section_name, keys in difference.items():
defines_section = document.sections[section_name]
for key in keys:
define = self.sections[section_name].defines[key]
define.comment = None
index = defines_section.lexical_elements.index(defines_section.defines[key])
defines_section.lexical_elements.insert(index + 1, define)
return document
def difference(self, other: DefinesDocument) -> dict[str, set[str]]:
return {
section_name: self.sections[section_name].difference(other.sections[section_name])
for section_name in self.sections
}
def union(self, other: DefinesDocument) -> dict[str, set[str]]:
return {
section_name: self.sections[section_name].union(other.sections[section_name])
for section_name in self.sections
}
def comment(self) -> None:
for section in self.sections.values():
section.comment()
def uncomment(self) -> None:
for section in self.sections.values():
section.uncomment()
@classmethod
def from_defines_file(cls, defines_file: File) -> DefinesDocument:
parse_result = DefinesParser(defines_file.content).parse()
return cls(parse_result=parse_result)
|
998,003 | f282555a204d3df4212166457a040b5da57c97cd | import pandas as pd
import requests
from bs4 import BeautifulSoup
import collections
import re
import os
from datetime import datetime
def getvineyards(parent_url):
page = requests.get(parent_url).text
soup = BeautifulSoup(page, "lxml")
# Get list of all the wineries
linkrefs = soup.findAll('li' , attrs={'class':'winery-list__item'})
refs = []
# For each winery found in the parent page, scape its individual page
for link in linkrefs:
winery = {}
hlinks = link.findAll('a')
# Get basics header data from the main page
for ahl in hlinks:
if 'class=winery-list__text' in ahl['href']:
winery['website'] = ahl.text
else:
winery['name'] = ahl.text
winery['vapage'] = 'https://www.virginiawine.org' + ahl['href']
print(winery['name'])
if link.find('p'):
winery['phone'] = link.find('p').text
winery['address'] = link.find('address').text
# Get further header data from winery's individual page
wpage = requests.get(winery['vapage']).text
wsoup = BeautifulSoup(wpage, "lxml")
winery['region'] = wsoup.find('span' , attrs={'class':'card__heading'}).text.replace('Region: ', '').strip()
headersect = wsoup.find('div', attrs={'class':'col-xs-12 col-md-8'})
try: #sometimes there is no description
descard = headersect.find('div', attrs={'class':'markdown'})
winery['descr'] = descard.find('p').text.strip()
except:
pass
# Get info about wines currently available
if headersect.findAll('div', attrs={'class':'record-detail__wine-cat-list-item'}):
winelist = headersect.findAll('div', attrs={'class':'record-detail__wine-cat-list-item'})
for winetype in winelist:
wtype = winetype.find('span',class_='card__heading').text
subtypes = winetype.findAll('a')
tpwines = [x.text.strip() for x in subtypes]
winery[f"wines_{wtype}"] = tpwines
# Iterate through finding winery data using the "cards" construct
cards = wsoup.find('div', attrs={'class':'col-xs-12 col-md-4'}).findAll('div', attrs={'class':'card'})
for card in cards:
clist = card.findAll('div', attrs={'class':'card__list'})
# Different searching scenarios for different content "Cards"
if re.search(r'(Tasting Fee|Tour Fee)', card.text):
fees = card.findAll('div', attrs={'class':'card__content'})
fees = [x.text.replace('Fee\n', 'Fee: ').strip() for x in fees]
winery['fees'] = ', '.join(fees)
elif re.search(r'Seasonal Hours', card.text):
timelist = []
for cl in clist:
divs = cl.findAll('div')
divs = [re.sub(r'\s{2,}',' ', x.text.replace('\n', '')).strip() for x in divs]
#if ' PM' in divs.text or 'Wed: ' in divs.text:
for it in divs:
catchvals = re.findall(r'\s?([\w]+)[\s]*([\w: 0-9-]+)',it)
catchvals = [(x[0], 'Closed') if 'Closed' in ''.join(x) else x for x in catchvals]
timelist.append(catchvals[0])
winery['hours'] = [a[0] + ': ' + a[1] for a in timelist]
#else:
del divs
del clist
elif re.search(r'Hours of Operation', card.text):
winery['hours'] = card.find('p', attrs={'class':'card__text'}).text.strip()
elif re.search(r"(Ships to)", card.text):
sl = list(card.find('span', attrs={'class':'card__shipping-state-list'}).children)
winery['shipsto'] = [x.string.strip() for x in sl if len(x.string.strip()) > 1]
elif re.search(r"(Features)", card.text):
cli = re.findall(r"[\s]*([\w\/ ]+)[\s]*", card.text)
cli.remove('Features') #this is the section header, not a real value
winery['amenities'] = cli
elif re.search(r"(Trails)", card.text):
winery['trails'] = [(am.text.strip(), am['href']) for cl in clist for am in cl.findAll('a')]
elif re.search(r"data-lat", str(card)):
latlong = card.find('div', attrs={'class':'card__map'})
winery['reglatlong'] = (float(latlong['data-lat']), float(latlong['data-lng']))
refs.append(winery)
return refs
def munge(filedir):
df = pd.read_csv(filedir)
a = df.iloc[[0]]
print(a)
response = requests.get('https://maps.googleapis.com/maps/api/geocode/json?address=1600+Amphitheatre+Parkway,+Mountain+View,+CA')
resp_json_payload = response.json()
print(resp_json_payload['results'][0]['geometry']['location'])
if __name__ == '__main__':
start_time = datetime.now()
vawinelist = r"https://www.virginiawine.org/wineries/all"
currdir = os.path.dirname(os.path.abspath(__file__)) + "\\"
winelist = getvineyards(vawinelist)
pd.DataFrame(winelist).to_csv(currdir + r"VA_Wineries.csv")
#munge_data = munge(currdir + r"VA_Wineries.csv")
print("--- %s seconds ---" % (datetime.now() - start_time)) |
998,004 | 9dd5f548d98076ff74dbe70975dcfdf0cd48af7a | from .config import cfg
from .preprocess import TrainPreprocessor, Preprocessor
|
998,005 | 9960f654529057d91e33d1bf18c7a0398417a38b | from django_filters import FilterSet, CharFilter, ModelChoiceFilter, DateFromToRangeFilter
from .models import Product, Category, Comment
from django.contrib.auth.models import User
class ProductFilter(FilterSet):
class Meta:
model = Product
fields = {
'name': ['icontains'],
'quantity': ['gt'],
'price': ['lt'],
'category__name': ['contains'],
}
class F(FilterSet):
username = CharFilter(method='my_filter')
class Meta:
model = User
fields = ['username']
def my_filter(self, queryset, name, value):
return queryset.filter(**{
name: value,
}
)
class C(FilterSet):
category = ModelChoiceFilter(queryset=Category.objects.all())
class Meta:
model = Product
fields = ['category']
class X(FilterSet):
date = DateFromToRangeFilter()
class Meta:
model = Comment
fields = ['date']
|
998,006 | c92f20d08e71de0c970e65805fc0346e227861f8 | #!/usr/bin/env python
"""
Build FRB bits and pieces
"""
from __future__ import (print_function, absolute_import, division, unicode_literals)
def parser(options=None):
import argparse
# Parse
parser = argparse.ArgumentParser(description='Build parts of the CASBAH database; Output_dir = $CASBAH_GALAXIES [v1.1]')
parser.add_argument("item", type=str, help="Item to build ['FRBs', 'Hosts', 'specDB', 'FG']. Case insensitive")
parser.add_argument("--flag", type=str, default='all', help="Flag passed to the build")
parser.add_argument("-g", "--galaxy_options", type=str, help="Options for fg/host building (photom,cigale)")
if options is None:
pargs = parser.parse_args()
else:
pargs = parser.parse_args(options)
return pargs
def main(pargs):
""" Run
"""
import warnings
from frb.builds import build_specdb
from frb.builds import build_frbs
from frb.builds import build_hosts
from frb.builds import build_fg
# Parse
item = pargs.item.lower()
if item == 'frbs':
build_frbs.main(inflg=pargs.flag)
elif item == 'hosts':
build_hosts.main(inflg=pargs.flag, options=pargs.galaxy_options)
elif item == 'specdb':
build_specdb.main(inflg=pargs.flag)
elif item == 'fg':
build_fg.main(inflg=pargs.flag, options=pargs.galaxy_options)
else:
raise IOError("Bad build item {:s}".format(item))
|
998,007 | 322fe24e26bd2efdf0de75f152468b280ac822ff | # Code borrowed from:
# https://www.edureka.co/blog/web-scraping-with-python/
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from bs4 import BeautifulSoup
from PIL import Image
import numpy
import time
import requests
import pandas as pd
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument('--test-type')
options.add_argument('load-extension=~/Downloads/adblock')
driver = webdriver.Chrome("/Users/lorenzomendes/Downloads/chromedriver", options=options)
img_urls = []
genres = []
# Open AllMusic advanced-search
driver.get("https://www.allmusic.com/advanced-search")
# Click on Main Albums
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, 'cookie-policy-box'))
)
csvCount = 1
albumCount = 0
# Sometimes when trying to click on Main Albums, the site's cookie policy box gets in the way, so we'll
# loop until it's clickable.
while True:
try:
element = WebDriverWait(driver, 20).until(
EC.element_to_be_clickable((By.XPATH, "//input[@name='recording-type' and @value='Main Albums']"))
)
element.click()
break
except:
pass
page = 0
# Successfully saved albums up to page 85, so we'll continue on page 88 to avoid any potential duplicates.
while page < 87:
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "next"))
)
element.click()
page += 1
csvCount = page+1
# Main loop of the scraper. For each page on main albums, save each cover url, click on each album, and store the genre.
# When all albums on a given page have been stored, scroll down and click on the next page.
# After every 40 albums have been stored, open each url to get the image, convert to a numpy array, and store it as a
# 40 x 9075 matrix csv file along with the genres.
while True:
page += 1
WebDriverWait(driver, 20).until(
EC.presence_of_element_located((By.CLASS_NAME, 'cover'))
)
page_source = driver.page_source
soup = BeautifulSoup(page_source, 'html.parser')
results = soup.find_all("tr") # Each album is stored under the 'tr' tag
for album in results:
stringToParse = str(album)
albumIndToStart = stringToParse.find("data-original")
if albumIndToStart == -1:
continue
albumIndToEnd = stringToParse.find(' ', albumIndToStart)
coverUrl = stringToParse[albumIndToStart+15:albumIndToEnd-1]
albumIndToStart = coverUrl.find('&')
if albumIndToStart != -1:
# REMOVE AMP;
coverUrl = coverUrl[:albumIndToStart+1] + coverUrl[albumIndToStart+5:]
albumTitleIndStart = stringToParse.find('e">\n<a href="')
if albumTitleIndStart == -1:
print("THIS SHOULDN'T HAPPEN") # every album should contain this url
continue
albumTitleIndEnd = stringToParse.find('">', albumTitleIndStart+3)
titleUrl = stringToParse[albumTitleIndStart+13:albumTitleIndEnd]
# Open a new tab and access each album's url to find the genre
driver.execute_script("window.open('');")
driver.switch_to.window(driver.window_handles[1])
driver.get(titleUrl)
count = 0
genreDoesntExist = False
# Try to find the genre given in the album's page. After 5 tries, set the genreDoesntExist flag
while True:
try:
count += 1
WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'genre'))
)
break
except:
# After trying to find the genre 5 times, we'll assume it doesn't exist, set flag
if count == 5:
genreDoesntExist = True
break
# If the genre doesn't exist, skip the current album
if genreDoesntExist:
continue
# Parse the album's page for the genre
genreSoup = BeautifulSoup(driver.page_source, 'html.parser')
genreText = str(genreSoup.find("div", "genre"))
genreTextIndEnd = genreText.find('</a')
genreTextIndStart = genreText.rfind('>', 0, genreTextIndEnd)
genre = genreText[genreTextIndStart+1:genreTextIndEnd]
genreTestIndStart = genre.find('amp')
if genreTestIndStart != -1:
genre = genre[:genreTestIndStart]+genre[genreTestIndStart+4:]
# After retrieving both the image url and genre, store each in their respective lists
genres.append(genre)
img_urls.append(coverUrl)
# Close the current album's page and switch back into the results page
driver.close()
driver.switch_to.window(driver.window_handles[0])
# Store images and urls after 40 albums
if len(genres) == 40:
np_mat = numpy.empty((0, 9075))
offset = 0
for i in range(40):
img_url = img_urls[i]
print(img_url)
# Encountered this problem where some images couldn't be flatted to 9075 integer values
# If this happens, don't store the image, and delete its genre
try:
image = Image.open(requests.get(img_url, stream=True).raw)
image = image.resize((55, 55))
np_img = numpy.array(image)
np_img = np_img.flatten()
np_mat = numpy.append(np_mat, np_img[numpy.newaxis, :], axis=0)
except:
del genres[i-offset]
offset += 1
np_mat = numpy.round(np_mat).astype(int)
pd.DataFrame(numpy.array(genres)).to_csv("~/Genre-Predictor/genres"+str(csvCount)+".csv")
numpy.savetxt("covers"+str(csvCount)+".csv", np_mat, delimiter=",")
csvCount += 1
genres.clear()
img_urls.clear()
# Try to go to the next results page. After 5 tries, we'll assume we've reached the last page
count = 0
nextNotFound = False
while (True):
try:
count += 1
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
element = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, "next"))
)
element.click()
break
except:
if count == 5:
nextNotFound = True
break
if nextNotFound:
break
# Close the driver and store any remaining images and genres
driver.close()
np_mat = numpy.empty((0, 9075))
for img_url in img_urls:
image = Image.open(requests.get(img_url, stream=True).raw)
image = image.resize((55, 55))
np_img = numpy.array(image)
np_img = np_img.flatten()
np_mat = numpy.append(np_mat, np_img[numpy.newaxis, :], axis=0)
np_mat = numpy.round(np_mat).astype(int)
pd.DataFrame(numpy.array(genres)).to_csv("~/Genre-Predictor/genres"+str(csvCount)+".csv")
numpy.savetxt("covers"+str(csvCount)+".csv", np_mat, delimiter=",")
|
998,008 | e711559599f1af5246144b0bca5bd637f44d1669 | # Taken from https://gist.github.com/st4lk/6287746
import logging
import logging.handlers
f = logging.Formatter(fmt='%(asctime)s %(levelname)s:%(name)s: %(message)s '
'(%(filename)s:%(lineno)d)',
datefmt="%Y-%m-%d %H:%M:%S")
handler = logging.StreamHandler()
root_logger = logging.getLogger()
root_logger.setLevel(logging.DEBUG)
handler.setFormatter(f)
handler.setLevel(logging.DEBUG)
root_logger.addHandler(handler)
|
998,009 | a1a85d898410a934ad8a792fd15200bcd0def1e5 | import torch
from torch.nn.modules.utils import _pair
import torch.nn.functional as F
from lnets.models.layers.conv.base_conv2d import BaseConv2D
from lnets.utils.math.projections import get_weight_signs, get_linf_projection_threshold
class LInfProjectedConv2D(BaseConv2D):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, config=None):
kernel_size = _pair(kernel_size)
stride = _pair(stride)
padding = _pair(padding)
dilation = _pair(dilation)
super(BaseConv2D, self).__init__(
in_channels, out_channels, kernel_size, stride, padding, dilation,
False, _pair(0), groups, bias)
self._set_config(config)
self.original_shape = self.weight.shape
self.out_channels = out_channels
if stride == 1 or stride == [1, 1]:
print("BEWARE: Norm is not being preserved due to stride > 1. ")
def forward(self, x):
# Reshape and put in a matrix form.
flattened_weights = self.conv_form_to_matrix_form(self.weight, (self.out_channels, -1))
# Orthonormalize. The scaling makes sure the singular values of the matrix are constrained by 1.
thresholds = get_linf_projection_threshold(flattened_weights, self.config.cuda)
signs = get_weight_signs(flattened_weights)
flattened_projected_weights = signs * torch.clamp(torch.abs(flattened_weights) - thresholds.unsqueeze(-1),
min=torch.tensor(0).float())
# Reshape back.
projected_weights = self.matrix_form_to_conv_form(flattened_projected_weights, self.original_shape)
return F.conv2d(x, projected_weights, self.bias, self.stride, self.padding, self.dilation, self.groups)
|
998,010 | a0564ceb2fec6415056158c6af1b3ceff0ab2732 | """
TESTS is a dict with all you tests.
Keys for this will be categories' names.
Each test is dict with
"input" -- input data for user function
"answer" -- your right answer
"explanation" -- not necessary key, it's using for additional info in animation.
"""
TESTS = {
"0. Random X":
{
"player_mark": "X",
"bot": "random"
},
"1. Random O":
{
"player_mark": "O",
"bot": "random"
},
"2. Random X":
{
"player_mark": "X",
"bot": "random"
},
"3. Random O":
{
"player_mark": "O",
"bot": "random"
},
"4. Greedy X":
{
"player_mark": "X",
"bot": "greedy"
},
"5. Greedy O":
{
"player_mark": "O",
"bot": "greedy"
},
"6. Against X":
{
"player_mark": "X",
"bot": "against"
},
"7. Against O":
{
"player_mark": "O",
"bot": "against"
},
}
|
998,011 | eb17ecf2990034f3674f2b0e2c9ea376ecda21c5 | #! /usr/bin/env python3
from rift import *
def fact(n):
if n < 0:
return 0
if n == 0:
return 1
return n * fact(n-1)
@Test
def test_fact_correct():
test_values = [0, 1, 7, 13, 20]
for i in test_values:
ret, stdout, stderr = rift.call(lib.fact, rift.c_longlong, i)
if ret != fact(i):
print("fact({}) = {} != {}".format(i, ret, fact(i)))
return False
return True
@Test
def test_fact_fail():
test_values = [0, -1, -7, -13, -20]
for i in test_values:
ret, stdout, stderr = rift.call(lib.fact, rift.c_longlong, i)
if ret != fact(i):
print("fact({}) = {} != {}".format(i, ret, fact(i)))
return False
return True
rift.init("fact.so")
rift.run_tests(True)
|
998,012 | cd25075f712fef95d9ffdf5ae966b6369f63f0cd | #2
print('Hello' + __name__)
'''If you are running calc , this name will be main but if you are import calc
in other module then it will print thename of mudule'''
|
998,013 | 4107431fe7b859ae2c158ee286bde5b846092bde | """This module contains the general information for GlVnicTemplate ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class GlVnicTemplateConsts():
pass
class GlVnicTemplate(ManagedObject):
"""This is GlVnicTemplate class."""
consts = GlVnicTemplateConsts()
naming_props = set([u'id'])
mo_meta = MoMeta("GlVnicTemplate", "glVnicTemplate", "vt-[id]", VersionMeta.Version201b, "InputOutput", 0x1f, [], ["read-only"], [u'glVnicTemplateEp'], [u'glOperationEp'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version201b, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x2, 0, 256, None, [], []),
"id": MoPropertyMeta("id", "id", "uint", VersionMeta.Version201b, MoPropertyMeta.NAMING, 0x4, None, None, None, [], []),
"oper_template_name": MoPropertyMeta("oper_template_name", "operTemplateName", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version201b, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"template_name": MoPropertyMeta("template_name", "templateName", "string", VersionMeta.Version201b, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"id": "id",
"operTemplateName": "oper_template_name",
"rn": "rn",
"status": "status",
"templateName": "template_name",
}
def __init__(self, parent_mo_or_dn, id, **kwargs):
self._dirty_mask = 0
self.id = id
self.child_action = None
self.oper_template_name = None
self.status = None
self.template_name = None
ManagedObject.__init__(self, "GlVnicTemplate", parent_mo_or_dn, **kwargs)
|
998,014 | c38652771c1da0d1036cac1edefa29bde859b6d9 |
from PIL import Image
import sys
import glob
files = glob.glob("img/1.jpg")
count = 0
for i in files:
im = Image.open(i)
pixdata = im.load()
count += 1
strCount = str(count)
for y in xrange(im.size[1]):
for x in xrange(im.size[0]):
if 185 < pixdata[x, y][0] or 185 < pixdata[x, y][1] or 185 < pixdata[x, y][2]:
pixdata[x, y] = (0, 0, 0)
else:
pixdata[x, y] = (255, 255, 255)
for y in xrange(1, im.size[1]-1):
for x in xrange(1, im.size[0]-1):
if pixdata[x, y] == (0,0,0) and (pixdata[x + 1, y] == (255, 255, 255) or pixdata[x + 1, y + 1] == (255, 255, 255) or pixdata[x, y + 1] == (255, 255, 255)):
pixdata[x, y] = (0, 0, 255)
elif pixdata[x, y] == (0,0,0) and (pixdata[x - 1, y] == (255, 255, 255) or pixdata[x - 1, y - 1] == (255, 255, 255) or pixdata[x, y - 1] == (255, 255, 255)):
pixdata[x, y] = (0, 0, 0)
else:
pixdata[x, y] = (255, 255, 255)
im.convert('RGB')
im.save('out/translation2.3/' + strCount + '.4.jpg', "JPEG") |
998,015 | f1cd4ce2d2a86e93fcb53b3bcd21c40e08917f18 | """
Usage:
scripts/oneoff/fix-agreement-extensions.py <stage> <api_token> <download_directory> [--dry-run]
"""
import sys
sys.path.insert(0, '.')
import os
import re
import magic
from docopt import docopt
from dmapiclient import DataAPIClient
from dmutils.s3 import S3
from dmscripts.env import get_api_endpoint_from_stage
def get_all_pdfs(download_directory):
for root, subfolder, files in os.walk(download_directory):
for filename in files:
if filename.endswith('.pdf') and not filename.startswith('2015-11-'):
yield os.path.join(root, filename)
def get_filetype(path):
return magic.from_file(path, mime=True)
def is_empty(path):
stat = os.stat(path)
return stat.st_size == 0
def get_supplier_id_from_path(path):
match = re.search(r'/(\d+)/', path)
if not match:
raise ValueError("Could not find supplier ID in path {}".format(path))
return match.group(1)
def handle_path(client, bucket, dry_run, path):
if is_empty(path):
show_contact_details(client, dry_run, path)
else:
filetype = get_filetype(path)
if filetype != b"application/pdf":
update_file_extension(client, bucket, dry_run, path, filetype)
def show_contact_details(client, dry_run, path):
supplier_id = get_supplier_id_from_path(path)
if dry_run:
print("Empty file for {} - {}".format(supplier_id, os.path.basename(path)))
else:
supplier = client.get_supplier(supplier_id)['suppliers']
declaration = client.get_supplier_declaration(supplier_id, 'g-cloud-7')
print(
"Empty file for {}, {}, {}, {}".format(
supplier_id,
supplier['name'],
declaration['declaration'].get('SQ1-2b', "none"),
supplier['contactInformation'][0]['email']))
def get_correct_file_extension(filetype):
extension = {
b"application/zip": "zip",
b"image/png": "png",
b"image/jpeg": "jpg",
b'application/vnd.openxmlformats-officedocument.wordprocessingml.document': 'docx',
}.get(filetype)
if not extension:
raise ValueError("Unknown file type: {}".format(filetype))
return extension
def get_path_in_s3(path):
return "{}/{}".format('g-cloud-7', path.split('/g-cloud-7/')[1])
def update_file_extension(client, bucket, dry_run, path, filetype):
supplier_id = get_supplier_id_from_path(path)
extension = get_correct_file_extension(filetype)
path_in_s3 = get_path_in_s3(path)
prefix, suffix = os.path.splitext(path_in_s3)
new_path = "{}.{}".format(prefix, extension)
if dry_run:
print(
"Not copying {} to {} for supplier {}".format(
path_in_s3, new_path, supplier_id))
else:
print(
"Copying {} to {} for supplier {} filetype {}".format(
path_in_s3, new_path, supplier_id, filetype))
bucket.bucket.copy_key(
new_path,
src_bucket_name=bucket.bucket_name,
src_key_name=path_in_s3,
metadata={'Content-Type': filetype.decode('utf-8')},
preserve_acl=True,
)
client.register_framework_agreement_returned(
supplier_id,
'g-cloud-7',
'script: fix incorrect extension'
)
def get_bucket_name(stage):
return 'digitalmarketplace-agreements-{0}-{0}'.format(stage)
if __name__ == '__main__':
arguments = docopt(__doc__)
stage = arguments['<stage>']
api_token = arguments['<api_token>']
download_directory = arguments['<download_directory>']
dry_run = arguments['--dry-run']
api_url = get_api_endpoint_from_stage(stage)
if dry_run:
client = None
bucket = None
else:
client = DataAPIClient(api_url, api_token)
bucket = S3(get_bucket_name(stage))
for path in get_all_pdfs(download_directory):
handle_path(client, bucket, dry_run, path)
|
998,016 | 5fac533690a2cded1f2a602d5ff9ee3a523841dd | # Generated by Django 3.2.4 on 2021-06-06 05:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
("taxonomy", "__first__"),
]
operations = [
migrations.CreateModel(
name="Animal",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
(
"species",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="taxonomy.species",
),
),
],
options={
"db_table": "animal",
},
),
]
|
998,017 | 7dc59bf4be8552144274d27722c9e7b29e0333c0 | import cv2 as cv
import numpy as np
# 轮廓发现:
'''
是基于图像边缘提取的基础寻找对象轮廓的方法。
所以边缘提取的阈值选定会影响最终轮廓发现结果
'''
# 对边缘操作
def contours_demo(image):
'''
dst = cv.GaussianBlur(image,(3,3),0)
gray = cv.cvtColor(dst,cv.COLOR_BGR2GRAY)
ret,binary = cv.threshold(gray,0,255,cv.THRESH_BINARY | cv.THRESH_OTSU) #二值化
cv.imshow("binary image",binary)
'''
binary = edge_demo(image)
# 查找轮廓
# 返回轮廓改变,轮廓的存放,层次的信息
contours, hierarchy = cv.findContours(binary, cv.RETR_EXTERNAL,cv.CHAIN_APPROX_SIMPLE)
for i,contour in enumerate(contours):
cv.drawContours(image,contours,i,(0,0,255),2)
print(i)
cv.imshow("detect contours",image)
def edge_demo(image):
blurred = cv.GaussianBlur(image,(3,3),0)
gray = cv.cvtColor(blurred,cv.COLOR_BGR2GRAY)
# x Gradient(x方向的梯度)
xgrad = cv.Sobel(gray,cv.CV_16SC1,1,0)
# y Grodient
ygrad = cv.Sobel(gray,cv.CV_16SC1,0,1)
#edge(边缘)
edge_output = cv.Canny(xgrad,ygrad,360,400)
cv.imshow("Canny Edge",edge_output)
return edge_output
print("---------------Hello Python--------------")
src = cv.imread("D:/opencvwen/tuyanse2.PNG")
cv.namedWindow("input image",cv.WINDOW_AUTOSIZE)
cv.imshow("input image",src)
contours_demo(src)
cv.waitKey(0)
cv.destroyAllWindows() |
998,018 | b270018e1a4caed3d772c239610e5a05c6988df9 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2020-10-06 04:21
from __future__ import unicode_literals
import django.contrib.gis.db.models.fields
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('seedsource', '0005_transferlimit_elevation'),
]
operations = [
migrations.AlterField(
model_name='region',
name='polygons',
field=django.contrib.gis.db.models.fields.MultiPolygonField(srid=4326),
),
]
|
998,019 | 4d75f78e5fed9ff711c6e8f0c554096cf0486ec1 | # A - Discount Fare
# https://atcoder.jp/contests/abc113/tasks/abc113_a
X, Y = map(int, input().split())
print(X + Y//2)
|
998,020 | 763c141ed67ef685a64700b7af6dfec38d14caae | from pyramid.httpexceptions import HTTPForbidden
from pyramid.threadlocal import get_current_request
from kindergarden.lib import helpers
from kindergarden.lib.i18n import ugettext as _
# from kindergarden.models import SYSTEM_SCHEMA, PUBLIC_SCHEMA, DBSession
def add_renderer_globals(event, request=None):
if request is None:
request = event.get('request', get_current_request())
event['_'] = request.translate
event['h'] = helpers
def add_localizer(event, request=None):
if request is None:
request = getattr(event, 'request', get_current_request())
request.translate = lambda string: _(string, request=request)
def add_csrf_validation(event, request=None):
request = getattr(event, 'request', request)
if request.method == 'POST':
token = request.POST.get('_csrf')
if token is None or token != request.session.get_csrf_token():
raise HTTPForbidden('CSRF token is missing or invalid')
# def pg_set_search_path(event, request=None):
# DBSession.execute('SET search_path TO %s,%s' % (SYSTEM_SCHEMA, PUBLIC_SCHEMA))
|
998,021 | 5815ba7aed36396e1099ebbf631518431539f1a3 | from django.shortcuts import render
from django.http import HttpResponse
from .models import M1301,M1332,M1333,M1352,M1376,M1377
from .forms import HelloForm
def index(request):
# data = M1301.objects.all()
dekidaka_list = []
takane_list = []
owarine_list = []
company = {}
params = {
'title':'Hello',
'form':HelloForm(),
'message':'all friends',
'com_name':[],
'dekidaka':[],
'takane':[],
'owarine':[]
}
if request.method == 'POST' :
date = request.POST['date']
params['form'] = HelloForm(request.POST)
for i in [M1301,M1332,M1333,M1352,M1376,M1377]:
rate=i.objects.filter(date = date).values_list('dekidaka', flat=True).get()/i.objects.filter(date = '2018/11').values_list('dekidaka', flat=True).get()
company[i.__name__[1:]] = rate
takane_list.append(i.objects.filter(date = '2018/12').values_list('top', flat=True).get())
owarine_list.append(i.objects.filter(date = '2018/12').values_list('close', flat=True).get())
params['com_name'] = max(company)
params['dekidaka'] = company[max(company)]
params['takane'] = max(takane_list)
params['owarine'] = max(owarine_list)
return render(request, 'hello/index.html', params) |
998,022 | c011430fe1cb974a29bb90f860310b23bc6e2821 |
import pymongo
# connecting mongoDB Atlas and accessing database
client = pymongo.MongoClient("mongodb+srv://himanshu:Database%40123@cluster0-ktyqe.mongodb.net/test?retryWrites=true&w=majority")
mydb = client.University
# to add student in Students collection on mongoDB cloud
def add_student(name,age,roll_no,branch):
unique_student = mydb.students.find_one({"Roll No.":roll_no})
if unique_student:
return "Student already exists"
else:
mydb.students.insert_one(
{
"Name" : name,
"Age" : age,
"Roll No" : roll_no,
"Branch" : branch
})
return "Student added successfully"
# Storing data
add_student('Himanshu',21,'A045','CS')
add_student('Ashish',20,'A019','CS')
add_student('Kritika',19,'A053','CS')
add_student('Divya',19,'A036','CS')
add_student('Hitesh',20,'A046','CS')
add_student('Lakshya',20,'A056','CS')
add_student('Aditya',21,'A08','CS')
add_student('Akshay',20,'A06','CS')
add_student('Abhishek',21,'A01','CS')
add_student('Kushal',21,'A052','CS')
# to fetch all students in Students collection on mongoDB cloud
def fetch_all_student():
student_collection = mydb.students.find()
for document in student_collection:
print(document)
fetch_all_student() |
998,023 | 2d60f0b8592e7376aa7379adca3d1631a7eefb58 | import torch
from torch.utils.data import Dataset
class SideInformationDataset(Dataset):
def __init__(self, data, side_information, min_th, max_th):
self.data = data
self.side_information = side_information
self.min_th = min_th
self.max_th = max_th
def __len__(self):
return len(self.inp_data)
def __getitem__(self, index):
# target = self.out_data[ind]
# data_val = self.data[index] [:-1]
return self.inp_data[index], self.out_data[index]
|
998,024 | ad3881729f05f9471a5a8f2ca167e528a9f32929 | #This example is about how to read arguments on python
from sys import argv
script, first, second, third = argv
print("This script is called:", script);
print("First :",first);
print("Second :", second);
print("Third :", third);
|
998,025 | b44c2edf4536ffe05bd430d0732544a1203aee85 | import os.path
import sys
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256
from Crypto.Cipher import PKCS1_OAEP
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES
from base64 import b64encode, b64decode
import json
class FileController:
key_path = "./keys/"
sym_key_path = 'file_sym.pem'
private_key_path = 'file_priv.pem'
def __init__(self,initiator, password):
self.initiator = initiator
self.set_key_location(password)
self.sym_key = b''
if os.path.isfile(self.sym_key_path) and os.path.isfile(self.private_key_path):
self.load_private_key(password)
self.load_sym_key(self.private_key)
else:
print("Missing decoding key file. Would you like to generate new one? (Y/n)")
if str(input()) == "n": sys.exit()
print("Generating file encrypting keys")
if os.path.isfile(self.private_key_path):
self.load_private_key(password)
self.generate_sym_key(self.private_key)
else:
self.generate_private_key(password)
self.generate_sym_key(self.private_key)
def decrypt_file(self, file_name, json_input):
b64 = json.loads(json_input)
json_k = ['nonce', 'header', 'ciphertext', 'tag']
jv = {k: b64decode(b64[k]) for k in json_k}
cipher = AES.new(self.sym_key, AES.MODE_GCM, nonce=jv['nonce'])
cipher.update(jv['header'])
plain_text = cipher.decrypt_and_verify(jv['ciphertext'], jv['tag'])
with open(file_name, "wb") as f:
f.write(plain_text)
f.close()
def encrypt_file(self,file_name):
with open(file_name, "rb") as f:
header = self.initiator.encode()
cipher = AES.new(self.sym_key, AES.MODE_GCM)
cipher.update(header)
ciphertext, tag = cipher.encrypt_and_digest(f.read())
f.close()
json_k = ['nonce', 'header', 'ciphertext', 'tag']
json_v = [b64encode(x).decode('utf-8')
for x in (cipher.nonce, header, ciphertext, tag)]
return json.dumps(dict(zip(json_k, json_v))).encode()
def set_key_location(self,password):
# Generate unique symmetric keys for every password
password_hash = SHA256.new(password.encode())
prefix = str(password_hash.hexdigest())[:64]+"_"
if not os.path.isdir(self.key_path): os.mkdir(self.key_path)
self.sym_key_path = self.key_path+prefix + self.sym_key_path
self.private_key_path = self.key_path + self.private_key_path
def load_private_key(self,password):
with open(self.private_key_path,"rb") as f:
self.private_key = RSA.import_key(f.read(),password)
f.close()
def load_sym_key(self,private_key):
with open(self.sym_key_path, "rb") as f:
cipher_rsa = PKCS1_OAEP.new(private_key)
self.sym_key = cipher_rsa.decrypt(f.read())
f.close()
def generate_private_key(self,password):
self.private_key = RSA.generate(2048)
with open(self.private_key_path, "wb") as f:
f.write(self.private_key.export_key(format='PEM', pkcs=8, passphrase=password))
f.close()
def generate_sym_key(self, private_key):
with open(self.sym_key_path, "wb") as f:
key = get_random_bytes(16)
cipher_rsa = PKCS1_OAEP.new(private_key)
f.write(cipher_rsa.encrypt(key))
f.close()
|
998,026 | 29bcf7902d6b7fba8e492a5542734832276794f1 | a = [1,2,3,4,5]
b = [[3,1,2,1],[2,2],[3,3],[4,4],[5,5]]
def log_list(c, d):
for element in c:
print(element)
index = a.index(element)
print(type(b[index]))
def find_index_in_list(d):
for i in d:
print i
index_matched = i.index(1)
print index_matched
if __name__ == "__main__":
#log_list(iter(a), iter(b))
find_index_in_list(b) |
998,027 | 400030ce527eb27706e79f9a4f566a19e9b5754e | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-02-10 13:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Course',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show', models.BooleanField(default=True, verbose_name='Show on site')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Datetime last modified')),
('title', models.CharField(max_length=255, verbose_name='Заголовок')),
('long_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Длинный заголовок')),
('short_desc', models.TextField(blank=True, null=True, verbose_name='Краткое описание')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Task',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show', models.BooleanField(default=True, verbose_name='Show on site')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Datetime last modified')),
('title', models.CharField(max_length=255, verbose_name='Заголовок')),
('long_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Длинный заголовок')),
('short_desc', models.TextField(blank=True, null=True, verbose_name='Краткое описание')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('show', models.BooleanField(default=True, verbose_name='Show on site')),
('last_modified', models.DateTimeField(auto_now=True, verbose_name='Datetime last modified')),
('title', models.CharField(max_length=255, verbose_name='Заголовок')),
('long_title', models.CharField(blank=True, max_length=255, null=True, verbose_name='Длинный заголовок')),
('short_desc', models.TextField(blank=True, null=True, verbose_name='Краткое описание')),
],
options={
'abstract': False,
},
),
]
|
998,028 | 98f793a10dc0b25f03f758d2ce0b438476790aae | import urllib, urllib2, json
from collections import defaultdict
from cards import Card
class DeckBrewError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Request(object):
filter_types = {'status': str, 'set': str, 'name': str, 'format': str, 'color': str, 'multiverse': str, 'rarity': str, 'subtype': str, 'oracle': str, 'supertype': str, 'type': str, "multicolor":bool}
def __init__(self, params={}):
self.base_url = "https://api.deckbrew.com/mtg/cards"
self.base_params = params
self.reset()
def reset(self):
self.filters = defaultdict(set)
self.all = []
self.pages = {}
self.next_page = 1
def __download_page(self, opt_params):
params = dict(self.base_params.items() + opt_params.items())
if "page" not in params:
params["page"] = 1
url = self.base_url + "?" + urllib.urlencode(params.items() + [(k, i) for k, v in self.filters.items() for i in v])
print url
result = json.loads(urllib2.urlopen(url).read())
if "errors" in result:
raise DeckBrewError("Error: " + " - ".join(result["errors"]))
cards = [Card(i) for i in result]
self.pages[params["page"]] = cards
self.all += cards
def page(self, num=1):
if num not in self.pages:
self.__download_page({"page":num})
if num == self.next_page:
self.next_page += 1
return self.pages[num]
def next(self):
return self.page(self.next_page)
def filter(self, **kwargs):
for k, v in kwargs.items():
if k not in Request.filter_types:
raise DeckBrewError("Invalid keyword argument: "+k)
if type(v) != Request.filter_types[k]:
raise DeckBrewError("Invalid type for keyword %s - expected %s, received %s"%(k, Request.filter_types[k], type(v)))
self.filters[k].add(v)
return self |
998,029 | f6986b269707e593405ea6b2cb680cb944fff1c5 | # Generated by Django 3.2 on 2021-04-12 21:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0005_auto_20210412_2105'),
]
operations = [
migrations.AlterField(
model_name='calendarevent',
name='description',
field=models.CharField(blank=True, max_length=250, null=True),
),
migrations.AlterField(
model_name='calendarevent',
name='event_type',
field=models.CharField(max_length=30),
),
migrations.AlterField(
model_name='calendarevent',
name='notes',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
|
998,030 | 71565563e4f458e5afea86f6f5b515e430d5f7e3 | '''
Created on Jul 7, 2017
@author: Karim Hammouda
'''
#solution1 based on building alphabatical histogram (All letters) => O(n)
def anagram_solution1(s1,s2):
c1 = [0] * 26
c2 = [0] * 26
for i in range(len(s1)):
pos = ord(s1[i]) - ord('a')
c1[pos] = c1[pos] + 1
for i in range(len(s2)):
pos = ord(s2[i]) - ord('a')
c2[pos] = c2[pos] + 1
j = 0
still_ok = True
while j < 26 and still_ok:
if c1[j] == c2[j]:
j = j + 1
else:
still_ok = False
return still_ok
#solution2 based on sorting and comparing the 2 words => O(n) expensive sort()
def anagram_solution2(s1,s2):
a_list1 = list(s1)
a_list2 = list(s2)
a_list1.sort()
a_list2.sort()
pos = 0
matches = True
while pos < len(s1) and matches:
if a_list1[pos] == a_list2[pos]:
pos = pos + 1
else:
matches = False
return matches
#my solution combining solution1 and solution2 using dictionary histogram (not all latters) => O(n) replacing sort with dict search O(1)
def anagram_solutionkiki(word1,word2):
if len(word1) != len(word2):
return False
else:
histogram1 = dict()
histogram2 = dict()
for character in word1:
histogram1[character] = histogram1.get(character , 0) + 1
print(histogram1)
for character in word2:
histogram2[character] = histogram2.get(character , 0) + 1
print(histogram2)
for key in histogram1.keys():
if histogram1[key] != histogram2.get(key, 0):
return False
return True
def main():
print(anagram_solutionkiki('Kaaaareeem','eaaaaKreem'))
return
if __name__ == '__main__':
main()
|
998,031 | 9bd8ee79a7aebd7b1f1dcd208fd8c8bb2e38327c | # -*- coding=utf-8 -*-
'''
@creatdate: 2016-12-19
@author: YuhuiXu
@description
'''
from Selenium2Library import Selenium2Library
class testlib(Selenium2Library):
"""Added By Yuhuixu"""
def if_page_contain_elemet(self, locator, loglevel='INFO'):
'''
Verifies that current page contains `locator` element.
:param locator:
:param loglevel:
:return False/True
'''
if not self._is_element_present(locator):
self.log_source(loglevel)
# raise AssertionError("Page should have contained element '%s' "
# "but did not" % locator)
return False
self._info("Current page contains element '%s'." % locator)
return True
#-----------------table element--------------------------------------------------
def get_table_rows(self, table_locator, loglevel='INFO'):
"""Return the rows of the table.
Header and footer rows are included in the count.
"""
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
rows.extend(table.find_elements_by_xpath("./tbody/tr"))
rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
return len(rows)
self.log_source(loglevel)
raise AssertionError("Table %s could not be found." % table_locator)
def get_table_cols_at_row(self, table_locator, row, loglevel='INFO'):
"""Return the columns of the table in one row.
Row number start from 1. It will count the columns at one row,
the argument 'row' is the row number which you should give it.
"""
row = int(row)
row_index = row - 1
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tbody/tr"))
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
if row_index < len(rows):
columns = rows[row_index].find_elements_by_tag_name('th')
columns.extend(rows[row_index].find_elements_by_tag_name('td'))
return len(columns)
raise AssertionError("Table %s in row #%s could not be found." % (table_locator, str(row)))
def click_element_at_table_cell(self, table_locator, row, column, loglevel='INFO'):
"""Click a table cell.
Row and column number start from 1. Header and footer rows are
included in the count. This means that also cell content from
header or footer rows can be obtained with this keyword. To
understand how tables are identified, please take a look at
the `introduction`.
"""
row = int(row)
row_index = row - 1
column = int(column)
column_index = column - 1
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tbody/tr"))
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
if row_index < len(rows):
columns = rows[row_index].find_elements_by_tag_name('th')
if column_index >= len(columns): columns.extend(rows[row_index].find_elements_by_tag_name('td'))
if column_index < len(columns):
return columns[column_index].click()
self.log_source(loglevel)
raise AssertionError("Cell in table %s in row #%s and column #%s could not be found."
% (table_locator, str(row), str(column)))
def click_link_at_table_cell(self, table_locator, row, column, loglevel='INFO'):
"""Click link within a table cell.
Row and column number start from 1. Header and footer rows are
included in the count. This means that also cell content from
header or footer rows can be obtained with this keyword. To
understand how tables are identified, please take a look at
the `introduction`.
"""
row = int(row)
row_index = row - 1
column = int(column)
column_index = column - 1
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tbody/tr"))
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
if row_index < len(rows):
columns = rows[row_index].find_elements_by_tag_name('th')
if column_index >= len(columns): columns.extend(rows[row_index].find_elements_by_tag_name('td'))
if column_index < len(columns):
return columns[column_index].find_element_by_tag_name('a').click()
self.log_source(loglevel)
raise AssertionError("Cell in table %s in row #%s and column #%s could not be found."
% (table_locator, str(row), str(column)))
def click_subelement_at_table_cell(self, table_locator, row, column, sub_element_xpath, loglevel='INFO'):
"""Click a sub element indentified classpath in a table cell.
Row and column number start from 1. Header and footer rows are
included in the count. This means that also cell content from
header or footer rows can be obtained with this keyword. To
understand how tables are identified, please take a look at
the `introduction`.
"""
row = int(row)
row_index = row - 1
column = int(column)
column_index = column - 1
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tbody/tr"))
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
if row_index < len(rows):
columns = rows[row_index].find_elements_by_tag_name('th')
if column_index >= len(columns): columns.extend(rows[row_index].find_elements_by_tag_name('td'))
if column_index < len(columns):
return columns[column_index].find_element_by_xpath(sub_element_xpath).click()
self.log_source(loglevel)
raise AssertionError("Cell in table %s in row #%s and column #%s could not be found."
% (table_locator, str(row), str(column)))
def get_index_in_table_column(self, table_locator, col, expected, loglevel='INFO'):
"""get content's index in a specific column contains `content`.
Row and column number start from 1. Header and footer rows are
included in the count. However, the header and footer content
will not be matched against 'expected'.
See `Page Should Contain Element` for explanation about `loglevel` argument.
"""
has_head=0
element = self._table_element_finder.find_by_header(self._current_browser(), table_locator, None)
if element is not None:
has_head = 1
index = self._table_element_finder.find_in_col(self._current_browser(), table_locator, col, expected)
if index <= 0:
self.log_source(loglevel)
raise AssertionError("Column #%s in table identified by '%s' "
"should have contained text '%s'."
% (col, table_locator, expected))
return index+has_head
def get_index_in_table_row(self, table_locator, row, expected, loglevel='INFO'):
"""Get content's index in a specific table row contains `content`.
Row and column number start from 1. Header and footer rows are
included in the count. This means that also cell content from
header or footer rows can be obtained with this keyword. To
understand how tables are identified, please take a look at
the `introduction`.
See `Page Should Contain Element` for explanation about `loglevel` argument.
"""
row = int(row)
row_index = row - 1
table = self._table_element_finder.find(self._current_browser(), table_locator)
if table is not None:
rows = table.find_elements_by_xpath("./thead/tr")
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tbody/tr"))
if row_index >= len(rows): rows.extend(table.find_elements_by_xpath("./tfoot/tr"))
if row_index < len(rows):
columns = rows[row_index].find_elements_by_tag_name('th')
columns.extend(rows[row_index].find_elements_by_tag_name('td'))
column_index = 0
for element in columns:
column_index = column_index + 1
element_text = element.text
if element_text and expected in element_text:
return column_index
self.log_source(loglevel)
raise AssertionError("%s could not be found in row #%s of table %s."
% (expected, str(row), table_locator))
"""Added By Yuhuixu"""
def decode(self,customstr,mode='utf-8'):
'''
turn to unicode
:param customstr:
:param mode:
'''
return customstr.decode(mode)
"""Added By Yuhuixu"""
def encode(self,customstr,mode='utf-8'):
'''
turn to gb2312
:param customstr:
:param mode:
'''
return customstr.encode(mode)
"""Added By Yuhuixu"""
def select_from_list_by_text(self, locator, *texts):
"""Selects `*texts` from list identified by `locator`
Select list keywords work on both lists and combo boxes. Key attributes for
select lists are `id` and `name`. See `introduction` for details about
locating elements.
"""
if not texts:
raise ValueError("No texts given.")
items_str = "text(s) '%s'" % ", ".join(texts)
self._info("Selecting %s from list '%s'." % (items_str, locator))
select = self._get_select_list(locator)
for text in texts:
select.select_by_visible_text(text) |
998,032 | 1438793e10aadd63a8c58e1579f6d62a2b05be1b | '''Tendo como dados de entrada a altura de uma pessoa, construa um algoritmo que calcule seu peso ideal, usando a seguinte fórmula: (72.7*altura) - 58 '''
altura = float(input('Digite a sua altura: '))
p_ideal = (72.7 * altura) - 58
print(f'O seu peso ideal é', p_ideal)
|
998,033 | 510e9c58e6350e285456c13c2390cbe48cde600f | from common import *
# 2-sides checkerboard
def draw(**kwargs):
imgsz=np.array([2*1024]*2)
def radial_warp(i,j):
cx,cy=imgsz/2
a,r=np.arctan2(i-cy,j-cx),np.sqrt((i-cy)**2+(j-cx)**2)
r=r*(1+0.1*np.sin(0.008*r))
a=a*6/4
return cx+np.cos(a)*r,cy+np.sin(a)*r
im=checkerboard(imgsz, imgsz//16)^imtile(boxN(imgsz//8,4),imgsz)
im2=checkerboard(imgsz, imgsz//16)^imtile(boxN(imgsz//16,4),imgsz)
im[512:1536,512:1536]=im2[512:1536,512:1536]
im=imwarp(im,radial_warp,cycle)
return im
if __name__ == '__main__':
im=draw()
imshow(im)
imsave(im,'p09.png')
|
998,034 | ba453d124d3965737edcc457f9d33169e6174463 | import math
x = int(input('podaj liczbe do sprawdzenia: '))
i = 2
y = True
if (x == 1 or math.sqrt(x)%1==0):
y = False
else:
while(x > i):
if (x % i == 0):
y = False
break
i += 1
print (y)
|
998,035 | 5f7c4fe0f4113606ea38ac162f56ea707f134614 | import csv
from random import randint
clues = []
with open('crossworddata.csv','r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for row in reader:
clues.append(row)
length = len(clues)
while True:
randNumber = randint(0,length)
randWord = clues[randNumber][1]
randAnswer = randWord.replace(" ", "").replace("-", "").replace("'", "").replace("!", " ")
lenRandAnswer = len(randAnswer)
randLetter = randint(0,lenRandAnswer-1)
answerList = list(randAnswer)
answerList[randLetter]='?'
output = "".join(answerList)
print (output)
input()
print (clues[randNumber][0])
input()
print (clues[randNumber][1])
input()
print (clues[randNumber][2])
input()
|
998,036 | c2742273d7b9725ce14e202bbe78c7bd8f01df4a |
# Author Ilya Bilyi
import math
RESPONCE="\t Що виконати?\n*******************************\n 1 додавання \n 2 Віднімання \n 3 Ділення \n 4 множення \n 5 до степеня\n 6 сінус \n 7 косинус \n 8 аргсінус \n 9 аргкосинус \n*******************************\n "
print ('$$$ Вітаю це я калькулятор! $$$')
# loop while precheck input data
flag = True
while(flag == True):
print(RESPONCE)
variant = input('Ваш варіант: ')
# TODO: handle input is not float
if(variant.isdigit() == True):
variant = int(variant)
else:
print("\nВведено літеру: {}! Ведіть цифру \n".format(variant))
continue
if variant in range(1,11):
flag = False
else:
print("\nНекоректно введене значення: {} \n".format(variant))
continue
flag2 = True
while(flag2 == True):
first_number = input('Введіть число 1: ')
if(first_number.isdigit() == True):
first_number = int(first_number)
flag2 = False
else:
print("\nВведено літеру: {}! Ведіть цифру \n".format(first_number))
continue
if variant in range(1,6):
flag3 = True
while(flag3 == True):
second_number = input('Введіть число 2: ')
if(second_number.isdigit() == True) :
second_number = int(second_number)
flag3 = False
else:
print("\nВведено літеру: {}! Ведіть цифру \n".format(second_number))
continue
if variant == 1:
r = first_number + second_number
t = 'додавання: {} + {}'.format(first_number, second_number)
if variant == 2:
r = first_number - second_number
t = ('віднімання {} - {} '.format(first_number, second_number ) )
if variant == 3:
r = float(first_number / second_number)
t = 'ділення: {} / {}'.format(first_number, second_number)
if variant == 4:
r = first_number * second_number
t = 'множення: {} * {}'.format(first_number, second_number)
if variant == 5:
r = (first_number ** second_number)
t = 'піднесення до степення: {} ^ {}'.format(first_number, second_number)
if variant == 6:
r = math.sin(math.radians(first_number))
t = 'сінус числа: {}'.format(first_number)
if variant == 7:
r = math.cos(math.radians(first_number))
t = 'косинус числа: {}'.format(first_number)
if variant == 8:
r = math.asin(math.radians(first_number))
t = 'арксінус числа: {}'.format(first_number)
print ('Результат ',t,' = ',r)
|
998,037 | f88d44ec7f9c5479ed4050786747f3cebd9b339b | import unittest
from src.Display.IOTest import IOTest
from src.Display.ConsoleInputs import getVolume
from unittest.mock import MagicMock
class GetVolumeTest(unittest.TestCase):
def test_getValidVolume(self):
logger = IOTest()
logger.setInputList(["5"])
self.assertEqual(0.5, getVolume(logger))
def test_getValidVolumeWithSpaces(self):
logger = IOTest()
logger.setInputList([" 5 "])
self.assertEqual(0.5, getVolume(logger))
def test_getMinVolume(self):
logger = IOTest()
logger.setInputList(["0"])
self.assertEqual(0, getVolume(logger))
def test_getMaxVolume(self):
logger = IOTest()
logger.setInputList(["10"])
self.assertEqual(1, getVolume(logger))
def test_getOutOfBoundsVolume(self):
logger = IOTest()
logger.setInputList(["-1", "5"])
self.assertEqual(0.5, getVolume(logger))
def test_getInvalidVolume(self):
logger = IOTest()
logger.setInputList(["", "5"])
self.assertEqual(0.5, getVolume(logger))
def test_getMultipleVolumesMock(self):
logger = IOTest()
logger.takeInput = MagicMock(side_effect=["5", "10", "0"])
self.assertEqual(0.5, getVolume(logger))
self.assertEqual(1, getVolume(logger))
self.assertEqual(0, getVolume(logger))
def test_getMultipleVolumesMockThirdOutput(self):
logger = IOTest()
logger.takeInput = MagicMock(side_effect=["5", "10", "0"])
getVolume(logger)
getVolume(logger)
self.assertEqual(0, getVolume(logger))
if __name__ == '__main__':
unittest.main()
|
998,038 | 4c2ec65ef3b2db292c2be961b22f29e767d10be7 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import django_filters
from .models import Article
class ArticleFilter(django_filters.rest_framework.FilterSet):
category = django_filters.rest_framework.BaseInFilter(field_name='category_id')
category_name = django_filters.rest_framework.CharFilter(method='category_filter',label='标题')
title = django_filters.rest_framework.CharFilter(field_name='title', lookup_expr='icontains')
def category_filter(self, queryset,name,value):
return queryset.filter(category__name__icontains=value)
class Meta:
model = Article
fields = ['category','title','category_name' ] |
998,039 | 04def9989d2861045b364079f8d87ed09ead572b | class Details:
def __init__(self, F, L):
self.first = F
self.last = L
@property
def Name(self):
return self.first + ' ' + self.last
@Name.setter
def Name(self, N):
self.first = N[1:2]
self.last = N[3:]
R = Details('Ryan', 'Alderson')
Fullname = R.Name
print(Fullname)
S = Details('The', 'Shafeeq')
S.Name = 'Shafeeq'
NewName = S.Name
print(NewName)
|
998,040 | 6d0bd5fcf0866efaea3aeeae54f2e23315edfc7c | import time
start = time.time()
def printElapsedTime(prefix=None):
print((prefix + ' ' if prefix else '') + str(time.time() - start))
|
998,041 | d2327e8c85dfca4b15e751253bafd4b8cbf6d69d | import json
import os
with open('data/error_funds.json', 'r') as fp:
error_fund = json.load(fp)
DATAPath = '/home/kuoluo/data/fund_data'
good_funds = []
for _file in os.listdir(DATAPath):
with open(DATAPath + '/' + _file, 'r') as fp:
json_data = json.load(fp)
if json_data[0]['fund_id'] not in error_fund:
json_data[0].pop('_id')
json_data[0].pop('benchmark')
json_data[0].pop('symbol')
json_data[0].pop('accrued_daily')
json_data[0].pop('exchange')
json_data[0].pop('round_lot')
for i, asset in enumerate(json_data[0]['asset_allocation_records']):
if json_data[0]['asset_allocation_records'][i]['nav'] != json_data[0]['asset_allocation_records'][i]['net_asset']:
print('error')
json_data[0]['asset_allocation_records'][i].pop('nav')
for i, holding_records in enumerate(json_data[0]['holding_records']):
for j, holding in enumerate(holding_records['holdings_list']):
if json_data[0]['holding_records'][i]['holdings_list'][j]['datetime']!= json_data[0]['holding_records'][i]['datetime']:
print('error')
json_data[0]['holding_records'][i]['holdings_list'][j].pop('datetime')
json_data[0]['holding_records'][i]['holdings_list'][j].pop('type')
json_data[0]['holding_records'][i]['holdings_list'][j].pop('symbol')
good_funds.append(json_data[0])
with open('data/good_funds.json', 'w') as wp:
json.dump(good_funds, wp)
|
998,042 | 6ab2481617c4b77c4b7056c87b2e80097c94ad02 | #!/usr/bin/python
import cgi, cgitb
import datetime
import os
import sys
#from datetime import *
cgitb.enable()
page='Content-type: text/html\n\n'
form=cgi.FieldStorage()
user = form["username"].value
project=form["projectname"].value
citationtype=form['style'].value
citationtype+=form['type'].value
f1=open(citationtype+'.html','r')
f2=f1.read()
f1.close()
page+=f2
page=page.replace("*****",user)
page=page.replace("&&&&&",project)
print page
|
998,043 | a55bc963a5253d76e7ef2b6d9dc13e0f2c023717 | import csv
import json
import datetime
source_base = "../data/ml-1m/"
print("Converting users...")
users = []
occupation_list = []
with open(source_base + "users.dat") as infile:
reader = csv.reader((line.replace("::", ";") for line in infile),
delimiter=";")
for row in reader:
row_last = row
users.append({"model": "Rater.Rater",
"pk": row[0],
"fields": {
"gender": row[1],
"age": row[2],
"occupation": row[3],
"zip_code": row[4]
}})
if row[3] not in occupation_list:
occupation_list.append(row[3])
print(" example: "+str(users[0]))
print(" ")
print(occupation_list)
with open("fixtures/users.json", "w") as outfile:
outfile.write(json.dumps(users))
print("Converting movies...")
movies = []
genre_list = []
with open(source_base + "movies.dat", encoding="windows-1252") as infile:
reader = csv.reader((line.replace("::", ";") for line in infile),
delimiter=";")
for row in reader:
row_last = row
m_genres = row[2].split("|")
movies.append({"model": "Rater.Movie",
"pk": row[0],
"fields": {
"title": row[1],
"genre": m_genres
}})
for g in m_genres:
if g not in genre_list:
genre_list.append(g)
print(row_last)
print(row_last[2].replace("|",","))
print(" example: "+str(movies[0]))
print(" ")
print("Converting genres...")
genres = []
print(genre_list)
for i in range(len(genre_list)):
genres.append({"model": "Rater.Genre",
"pk": i,
"fields": {
"text": genre_list[i]
}})
print(" ")
def get_genre_pk(text, genre_list):
for i in range(len(genre_list)):
if text == genre_list[i]:
return i + 1
print(" genre lookup failed "+text)
with open("fixtures/genres.json", "w") as outfile:
outfile.write(json.dumps(genres))
for movie in movies:
g_set = movie["fields"]["genre"]
for i in range(len(g_set)):
movie["fields"]["genre"][i] = get_genre_pk(movie["fields"]["genre"][i], genre_list)
with open("fixtures/movies.json", "w") as outfile:
outfile.write(json.dumps(movies))
print("Converting ratings...")
ratings = []
with open(source_base + "ratings.dat") as infile:
reader = csv.reader((line.replace("::", ";") for line in infile),
delimiter=";")
for idx, row in enumerate(reader):
row_last = row
ratings.append({"model": "Rater.Rating",
"pk": idx + 1,
"fields": {
"rater": row[0],
"movie": row[1],
"rating": row[2],
"posted": row[3]
}})
print(row_last)
print(" example: "+str(ratings[0]))
print(" ")
with open("fixtures/ratings.json", "w") as outfile:
outfile.write(json.dumps(ratings))
|
998,044 | 0cbba22cba7827052dbe959fba7d5814ff14ea3b | formatter = "{} {} {} {}"
print(formatter.format(1, 2, 3, 4))
print(formatter.format("one", "two", "three", "four"))
print(formatter.format(True, False, False, True))
print(formatter.format(formatter, formatter, formatter, formatter))
print(formatter.format(
"hello, it's",
"me",
"can you",
"hear me?",
"can this line be printed?"# this line will be ommited.
))
|
998,045 | 4529a940f1630ba74a57f9e49c0b555b409f7814 | # Generated by Django 3.0.7 on 2020-08-16 20:03
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('movies', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BillType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='PaymentMethod',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=25)),
],
),
migrations.CreateModel(
name='ResState',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Reservation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creation', models.DateTimeField(auto_now_add=True)),
('description', models.TextField()),
('scoring', models.PositiveIntegerField(default=0)),
('show', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='movies.Show', verbose_name='movie that was booked')),
('spots', models.ManyToManyField(to='movies.Spot', verbose_name='spots that were booked')),
('state', models.ForeignKey(default=1, on_delete=django.db.models.deletion.PROTECT, to='reservations.ResState', verbose_name='state of the reservation')),
],
),
migrations.CreateModel(
name='Bill',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.FloatField()),
('saved_score', models.FloatField()),
('bill_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='reservations.BillType', verbose_name='billing type')),
('payment_method', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='reservations.PaymentMethod', verbose_name='payment method of the bill')),
('reservation', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, related_name='reservation', to='reservations.Reservation', verbose_name='reservation of the bill')),
],
),
]
|
998,046 | 717d617544e0372b162cf3ac788d55bc5deaef14 | #!/usr/bin/env python3
import argparse
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import formatdate
from email import encoders
import os
import smtplib
import requests
def send_mail(to, fro, subject, text, files=None, server='localhost'):
if files is None:
files = []
assert type(to) == list
assert type(files) == list
msg = MIMEMultipart()
msg['From'] = fro
msg['To'] = ', '.join(to)
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = subject
msg.attach(MIMEText(text))
for file in files:
part = MIMEBase('application', "octet-stream")
part.set_payload(open(file, "rb").read())
encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"'
% os.path.basename(file))
msg.attach(part)
smtp = smtplib.SMTP(server)
smtp.sendmail(fro, to, msg.as_string())
smtp.close()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('host', type=str)
parser.add_argument('--user', type=str)
parser.add_argument('--password', type=str)
args = parser.parse_args()
auth = (args.user, args.password) if 'user' in args else None
messages = requests.get('http://{}/'.format(args.host), auth=auth).json()
for message in messages:
send_mail(
message['rcpt_tos'],
message['mail_from'],
message['subject'],
message['body']
)
if __name__ == '__main__':
main()
|
998,047 | 833b7abb71a27e4d523d1fb3db3187d1f5fa7207 |
from marshmallow import Schema, fields
class EventSchema(Schema):
name = fields.Str()
title = fields.Str()
when = fields.Dict(
keys=fields.Str(),
values=fields.Str()
)
|
998,048 | 3727f358477024deca172df4fb7afbebcd3a1327 | a = int(input('请输入一个数字'))
if a>=1 and a<=10:
print('胜利')
else:
print('错误')
|
998,049 | b23afde1c94cc7a70b0414109a2a471dc7ff443f | import os
import requests
from redis import Redis
# Default configurations can be set with environment variables
REDIS_ADDRESS = os.environ.get('REDIS_ADDRESS', 'redis-service')
PROXY_ADDRESS_0 = os.environ.get('PROXY_ADDRESS_0',
'redis-proxy-cache-service-0')
PROXY_PORT_0 = os.environ.get('PROXY_PORT_0', 5000)
PROXY_ADDRESS_1 = os.environ.get('PROXY_ADDRESS_1',
'redis-proxy-cache-service-1')
PROXY_PORT_1 = os.environ.get('PROXY_PORT_1', 5001)
class MyTestEngine:
def __init__(self, proxy_address=PROXY_ADDRESS_0, proxy_port=PROXY_PORT_0):
self.redis_conn = Redis(REDIS_ADDRESS, 6379, None)
self.proxy_url = 'http://' + proxy_address + ':' + proxy_port
def clear_redis(self):
"""
Clear all key-values pairs from Redis backing instance
"""
self.redis_conn.flushall()
def clear_cache(self):
"""
Clear cache of the proxy instance
"""
requests.get(url=self.proxy_url+'/clear_cache')
def clear_all(self):
"""
Clear Redis and proxy data
"""
self.clear_redis()
self.clear_cache()
def populate_redis(self, d):
"""
Populate Redis backing instance from the given dictionary
Parameters:
d dict: a dictionary of key-value pairs
"""
for k, v in d.items():
self.redis_conn.set(k, v)
def make_request(self, key):
"""
Make an HTTP GET request to the proxy instance
Parameters:
key str: the string to search for the value
"""
params = {'requestedKey': key}
res = requests.get(url=self.proxy_url, params=params)
return res
|
998,050 | 41e9b1e8f88e0edbb231a23a2d6260b3dbfbe24f | #!/usr/bin/python3
from models.amenity import Amenity
from models.base_model import BaseModel
from models.city import City
from models.engine.file_storage import FileStorage
from models.place import Place
from models.review import Review
from models.state import State
from models.user import User
all_classes = {"Amenity": Amenity, "BaseModel": BaseModel, "City": City,
"Place": Place, "Review": Review, "State": State, "User": User}
storage = FileStorage()
storage.reload()
|
998,051 | 48450c0609b7eec8be936625f34ff3cb9dcae486 | addon_id="script.icechannel.TheVideo.settings"
addon_name="iStream - TheVideo - Settings"
import xbmcaddon
addon = xbmcaddon.Addon(id=addon_id)
addon.openSettings()
|
998,052 | 190b5e110becd20f5115ccf3061b98d7d9f8d332 | # c = get_config()
# c.TerminalIPythonApp.display_banner = True
# c.InteractiveShell.autoindent = True
# c.InteractiveShell.deep_reload = True
# c.PromptManager.in_template = '>> '
# c.PromptManager.in2_template = ' .\D.: '
# c.PromptManager.out_template = ' '
# c.PromptManager.justify = True
# c.InteractiveShellApp.exec_lines = [
# 'import numpy',
# 'import sympy as sy',
# 'from sympy import diff, expand, simplify'
# 'from sympy.abc import x, y, z, w'
# ]
import numpy
import sympy as sy
from sympy import diff, expand, simplify, pprint
from sympy import sin, cos, tan, exp, log, atan, sqrt
from sympy.abc import x, y, z, w
sy.init_printing()
print("\n [INFO] SymPy => Symbolic Engine Loaded Ok \n")
|
998,053 | 1b78cf6ca8e7426fc74d6b8ac0e3a3758110f022 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class Solution(object):
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
result = ''
# 65 -> A
while n:
n -= 1
result = chr(65 + n%26) + result
n /= 26
return result
|
998,054 | 773a2fe9cae1a48689066d6fdf195a5b0ce97bd9 | pattern_zero=[0.0, 0.0475, 0.09, 0.1, 0.1275, 0.1475, 0.16, 0.1875, 0.19, 0.2, 0.21, 0.2275, 0.24, 0.2475, 0.25, 0.26, 0.2875, 0.29, 0.3, 0.31, 0.3275, 0.34, 0.3475, 0.35, 0.36, 0.3875, 0.39, 0.4, 0.41, 0.4275, 0.44, 0.4475, 0.45, 0.46, 0.4875, 0.49, 0.5, 0.51, 0.5275, 0.54, 0.5475, 0.55, 0.56, 0.5875, 0.59, 0.6, 0.61, 0.6275, 0.64, 0.6475, 0.65, 0.66, 0.6875, 0.69, 0.7, 0.71, 0.7275, 0.74, 0.7475, 0.75, 0.76, 0.7875, 0.79, 0.8, 0.81, 0.8275, 0.84, 0.8475, 0.85, 0.86, 0.8875, 0.89, 0.9, 0.91, 0.9275, 0.94, 0.9475, 0.95, 0.96, 0.9875, 0.99]
pattern_odd=[0.0, 0.01, 0.0275, 0.04, 0.0475, 0.05, 0.06, 0.0875, 0.09, 0.1, 0.11, 0.1275, 0.14, 0.1475, 0.15, 0.16, 0.1875, 0.19, 0.2, 0.21, 0.2275, 0.24, 0.2475, 0.25, 0.26, 0.2875, 0.29, 0.3, 0.31, 0.3275, 0.34, 0.3475, 0.35, 0.36, 0.3875, 0.39, 0.4, 0.41, 0.4275, 0.44, 0.4475, 0.45, 0.46, 0.4875, 0.49, 0.5, 0.51, 0.5275, 0.54, 0.5475, 0.55, 0.56, 0.5875, 0.59, 0.6, 0.61, 0.6275, 0.64, 0.6475, 0.65, 0.66, 0.6875, 0.69, 0.7, 0.71, 0.7275, 0.74, 0.7475, 0.75, 0.76, 0.7875, 0.79, 0.8, 0.81, 0.8275, 0.84, 0.8475, 0.85, 0.86, 0.8875, 0.89, 0.9, 0.91, 0.9275, 0.94, 0.9475, 0.95, 0.96, 0.9875, 0.99]
pattern_even=[0.0, 0.01, 0.0275, 0.04, 0.0475, 0.05, 0.06, 0.0875, 0.09, 0.1, 0.11, 0.1275, 0.14, 0.1475, 0.15, 0.16, 0.1875, 0.19, 0.2, 0.21, 0.2275, 0.24, 0.2475, 0.25, 0.26, 0.2875, 0.29, 0.3, 0.31, 0.3275, 0.34, 0.3475, 0.35, 0.36, 0.3875, 0.39, 0.4, 0.41, 0.4275, 0.44, 0.4475, 0.45, 0.46, 0.4875, 0.49, 0.5, 0.51, 0.5275, 0.54, 0.5475, 0.55, 0.56, 0.5875, 0.59, 0.6, 0.61, 0.6275, 0.64, 0.6475, 0.65, 0.66, 0.6875, 0.69, 0.7, 0.71, 0.7275, 0.74, 0.7475, 0.75, 0.76, 0.7875, 0.79, 0.8, 0.81, 0.8275, 0.84, 0.8475, 0.85, 0.86, 0.8875, 0.89, 0.9, 0.91, 0.9275, 0.94, 0.9475, 0.95, 0.96, 0.9875, 0.99]
averages_even={0.0: [0.0], 0.1875: [0.75, 0.25], 0.25: [0.5], 0.1: [0.0], 0.89: [0.9, 0.1], 0.0475: [0.95, 0.45, 0.05, 0.55], 0.6475: [0.95, 0.45, 0.55, 0.05], 0.6875: [0.25, 0.75], 0.15: [0.5], 0.06: [0.8, 0.2], 0.26: [0.2, 0.8], 0.69: [0.9, 0.1], 0.44: [0.6, 0.4], 0.85: [0.5], 0.79: [0.9, 0.1], 0.7875: [0.75, 0.25], 0.4: [0.0], 0.8: [0.0], 0.55: [0.5], 0.5875: [0.25, 0.75], 0.3275: [0.15, 0.65, 0.85, 0.35], 0.71: [0.7, 0.3], 0.96: [0.2, 0.8], 0.1475: [0.95, 0.45, 0.55, 0.05], 0.76: [0.2, 0.8], 0.56: [0.8, 0.2], 0.1275: [0.35, 0.15, 0.65, 0.85], 0.5475: [0.95, 0.45, 0.55, 0.05], 0.09: [0.9, 0.1], 0.3475: [0.95, 0.45, 0.55, 0.05], 0.84: [0.6, 0.4], 0.5: [0.0], 0.29: [0.9, 0.1], 0.16: [0.2, 0.8], 0.5275: [0.85, 0.35, 0.65, 0.15], 0.9475: [0.95, 0.45, 0.05, 0.55], 0.14: [0.6, 0.4], 0.6: [0.0], 0.4875: [0.25, 0.75], 0.2: [0.0], 0.81: [0.7, 0.3], 0.61: [0.7, 0.3], 0.99: [0.9, 0.1], 0.51: [0.7, 0.3], 0.95: [0.5], 0.4475: [0.95, 0.45, 0.55, 0.05], 0.31: [0.7, 0.3], 0.9275: [0.15, 0.65, 0.85, 0.35], 0.0275: [0.15, 0.65, 0.85, 0.35], 0.4275: [0.85, 0.35, 0.65, 0.15], 0.11: [0.7, 0.3], 0.35: [0.5], 0.64: [0.6, 0.4], 0.6275: [0.85, 0.35, 0.65, 0.15], 0.0875: [0.25, 0.75], 0.45: [0.5], 0.3875: [0.25, 0.75], 0.7: [0.0], 0.9875: [0.75, 0.25], 0.9: [0.0], 0.91: [0.7, 0.3], 0.01: [0.7, 0.3], 0.2275: [0.15, 0.65, 0.85, 0.35], 0.41: [0.7, 0.3], 0.46: [0.8, 0.2], 0.8275: [0.15, 0.65, 0.85, 0.35], 0.39: [0.9, 0.1], 0.34: [0.6, 0.4], 0.75: [0.5], 0.21: [0.7, 0.3], 0.2475: [0.95, 0.45, 0.55, 0.05], 0.3: [0.0], 0.94: [0.6, 0.4], 0.04: [0.6, 0.4], 0.2875: [0.75, 0.25], 0.8875: [0.75, 0.25], 0.66: [0.8, 0.2], 0.24: [0.6, 0.4], 0.7475: [0.95, 0.45, 0.55, 0.05], 0.59: [0.9, 0.1], 0.36: [0.8, 0.2], 0.54: [0.6, 0.4], 0.86: [0.2, 0.8], 0.19: [0.9, 0.1], 0.49: [0.9, 0.1], 0.65: [0.5], 0.8475: [0.95, 0.45, 0.05, 0.55], 0.7275: [0.15, 0.65, 0.85, 0.35], 0.05: [0.5], 0.74: [0.6, 0.4]}
averages_odd={0.0: [0.0], 0.1875: [0.75, 0.25], 0.25: [0.5], 0.1: [0.0], 0.89: [0.9, 0.1], 0.0475: [0.95, 0.45, 0.05, 0.55], 0.6475: [0.95, 0.45, 0.55, 0.05], 0.6875: [0.25, 0.75], 0.15: [0.5], 0.06: [0.8, 0.2], 0.26: [0.2, 0.8], 0.69: [0.9, 0.1], 0.44: [0.6, 0.4], 0.85: [0.5], 0.79: [0.9, 0.1], 0.7875: [0.75, 0.25], 0.4: [0.0], 0.8: [0.0], 0.55: [0.5], 0.5875: [0.25, 0.75], 0.3275: [0.15, 0.65, 0.85, 0.35], 0.71: [0.7, 0.3], 0.96: [0.2, 0.8], 0.1475: [0.95, 0.45, 0.55, 0.05], 0.76: [0.2, 0.8], 0.56: [0.8, 0.2], 0.1275: [0.35, 0.15, 0.65, 0.85], 0.5475: [0.95, 0.45, 0.55, 0.05], 0.09: [0.9, 0.1], 0.3475: [0.95, 0.45, 0.55, 0.05], 0.84: [0.6, 0.4], 0.5: [0.0], 0.29: [0.9, 0.1], 0.16: [0.2, 0.8], 0.5275: [0.85, 0.35, 0.65, 0.15], 0.9475: [0.95, 0.45, 0.05, 0.55], 0.14: [0.6, 0.4], 0.6: [0.0], 0.4875: [0.25, 0.75], 0.2: [0.0], 0.81: [0.7, 0.3], 0.61: [0.7, 0.3], 0.99: [0.9, 0.1], 0.51: [0.7, 0.3], 0.95: [0.5], 0.4475: [0.95, 0.45, 0.55, 0.05], 0.31: [0.7, 0.3], 0.9275: [0.15, 0.65, 0.85, 0.35], 0.0275: [0.15, 0.65, 0.85, 0.35], 0.4275: [0.85, 0.35, 0.65, 0.15], 0.11: [0.7, 0.3], 0.35: [0.5], 0.64: [0.6, 0.4], 0.6275: [0.85, 0.35, 0.65, 0.15], 0.0875: [0.25, 0.75], 0.45: [0.5], 0.3875: [0.25, 0.75], 0.7: [0.0], 0.9875: [0.75, 0.25], 0.9: [0.0], 0.91: [0.7, 0.3], 0.01: [0.7, 0.3], 0.2275: [0.15, 0.65, 0.85, 0.35], 0.41: [0.7, 0.3], 0.46: [0.8, 0.2], 0.8275: [0.15, 0.65, 0.85, 0.35], 0.39: [0.9, 0.1], 0.34: [0.6, 0.4], 0.75: [0.5], 0.21: [0.7, 0.3], 0.2475: [0.95, 0.45, 0.55, 0.05], 0.3: [0.0], 0.94: [0.6, 0.4], 0.04: [0.6, 0.4], 0.2875: [0.75, 0.25], 0.8875: [0.75, 0.25], 0.66: [0.8, 0.2], 0.24: [0.6, 0.4], 0.7475: [0.95, 0.45, 0.55, 0.05], 0.59: [0.9, 0.1], 0.36: [0.8, 0.2], 0.54: [0.6, 0.4], 0.86: [0.2, 0.8], 0.19: [0.9, 0.1], 0.49: [0.9, 0.1], 0.65: [0.5], 0.8475: [0.95, 0.45, 0.05, 0.55], 0.7275: [0.15, 0.65, 0.85, 0.35], 0.05: [0.5], 0.74: [0.6, 0.4]} |
998,055 | 435f443ba8a0f25e0a64402da5df95f104dcadd7 | from django.contrib import admin
from .models import Company
from apps.client.models import Client
class ChoiceInline(admin.StackedInline):
model = Client
extra = 0
max_num = 10
class CompanyAdmin(admin.ModelAdmin):
fieldsets = (
('Customer Company', {
'fields': (
'name',
'address',
'city',
'uuid',
),
}),
)
list_display = ('name', 'city',)
list_filter = ('city',)
inlines = [
ChoiceInline,
]
readonly_fields = ('uuid',)
search_fields = ('name', 'city')
ordering = ('name', 'city')
admin.site.register(Company, CompanyAdmin)
|
998,056 | c5ef5aaf45451a48980f2ae7f92bd0df35f136cd | # Generated by Django 3.1 on 2020-08-29 20:57
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0007_auto_20200829_2356'),
]
operations = [
migrations.AlterField(
model_name='director',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
998,057 | f84c77b3375cd21cc505d1e8be20765a72605ad1 | from flask import Flask, request, send_from_directory, send_file
from flask_cors import CORS
import importlib
import json
app = Flask(__name__)
CORS(app)
# api path setup:
@app.route('/api/<resource>')
def call_api(resource):
res = importlib.import_module("api.{}".format(resource)).get(request)
return json.dumps(res)
# path for main svelte page:
@app.route('/')
def base():
# return send_from_directory('svelte-client/public', 'index.html')
return send_file('svelte-client/public/index.html')
# path for all frontend static files
@app.route('/<path:path>')
def home(path):
return send_from_directory('svelte-client/public', path)
if __name__ == "__main__":
app.run()
|
998,058 | df5b19cb1353af4b1ea729fd1c2486c0d6b773de | n1=int(input("Digite a primeira nota: "))
n2=int(input("Digite a segunda nota: "))
m=(n1+n2)/2
if m>=9 and m<=10:
print("Ótimo")
elif m>=7 and m<=8.9:
print("Bom")
elif m>=6 and m<=6.9:
print("Suficiente")
elif m>=0 and m<=5.9:
print("Insuficiente")
|
998,059 | 511a28bee002e82ad0e3f70c4d6bdabb63c77256 | # Samuel #
cadena=[]
i='1'
while i!='':
i=input()
cadena.append(i)
print(cadena)
|
998,060 | a5b21b7e57c83376f71aa272bec4e2fb81dedf5e |
import glob
import os
from scripts.build_support import *
Import ('*')
files = glob.glob (env['rootdir'] + 'src/tools/logview/*.c')
envcopy = env.Copy ()
envcopy.ParseConfig ('pkg-config libglade-2.0 libxml-2.0 --cflags --libs')
file_list = strip_path_from_files (files)
add_include_path (envcopy['CPPPATH'], '#src/tools/logview/include')
add_include_path (envcopy['CPPPATH'], '#src/tools')
logview = envcopy.Program ('logview', file_list)
Return ('logview')
|
998,061 | d6634e2bee5266cbacfdca2dc27a954c7d2e72df | # Numbers in decimal expansions
#
#Let p = p1 p2 p3 ... be an infinite sequence of random digits, selected from {0,1,2,3,4,5,6,7,8,9} with equal probability.
#It can be seen that p corresponds to the real number 0.p1 p2 p3 ....
#It can also be seen that choosing a random real number from the interval [0,1) is equivalent to choosing an infinite sequence of random digits selected from {0,1,2,3,4,5,6,7,8,9} with equal probability.
#For any positive integer n with d decimal digits, let k be the smallest index such that p<small>k</small>, p<small>k+1</small>, ...p<small>k+d-1</small> are the decimal digits of n, in the same order.
#Also, let g(n) be the expected value of k; it can be proven that g(n) is always finite and, interestingly, always an integer number.
#For example, if n = 535, then
#for p = 31415926535897...., we get k = 9
#for p = 355287143650049560000490848764084685354..., we get k = 36
#etc and we find that g(535) = 1008.
#Given that <img style="vertical-align:middle;" src="project/images/p316_decexp1.gif" alt="p316_decexp1.gif" />, find <img style="vertical-align:middle;" src="project/images/p316_decexp2.gif" alt="p316_decexp2.gif" />
#Note: <img style="vertical-align:middle;" src="project/images/p316_decexp3.gif" alt="p316_decexp3.gif" /> represents the floor function.
#
import time
startTime = time.time()
print('Elapsed time: ' + str(time.time()-startTime)) |
998,062 | 8930ee57d4a9eca07998323585b6da94cb9fd0cd | # -*- coding:utf -*-
import random
import copy
from mahjong_project.server.common.logger import LogConfig
log_type = "console"
logger = LogConfig(log_type).getLogger()
class Table(object):
u"""该类主要抽象牌桌的行为,该类在整个流程只实例化一次。
该类的方法对应玩家的动作,因此玩家在牌桌上的每个动作都需要一个处理函数。
"""
# 记录所有桌的信息
all_table_info = list()
def check_table(self, player, table):
if player not in table:
logger.error(u"房间出现未知异常,请重新连接!")
raise Exception(u"房间出现未知异常,请重新连接!")
if len(table) != 5:
logger.error(u"房间出现未知异常,请重新连接!")
raise Exception(u"房间出现未知异常,请重新连接!")
def create_table(self, player):
u"""若玩家点击创建房间,则创建一桌游戏,此时需要等待满四人;
若都是加入房间的形式的话,那么就需要判断满四人才创建一桌。
"""
new_table = list()
new_table.append(player)
self.all_table_info.append(new_table)
return new_table
def join_table(self, player):
u"""加入房间."""
for table in self.all_table_info:
if len(table) < 4:
table.append(player)
return table
# 游戏队列中都满员,需要创建房间
return self.create_table(player)
def quit_table(self, player, table):
u"""退出房间."""
if not table or player not in table:
logger.error(u"玩家(%s)未加入到房间,不允许退出房间!" % player["nick_name"])
return
if len(table) == 5:
logger.error(u"游戏中,不允许退出房间")
table.remove(player)
def start(self, table):
u"""开始游戏."""
# 摇筛子
index = random.randrange(4)
logger.info(u"随机庄家为:%s" % table[index]["nick_name"])
self.distribute_board(table)
# 将正在出牌的人记录在牌桌的最后一个元素中
table[-1]["index"] = index
board = table[-1]["board_list"].pop(0)
# 给筛子摇中的人(庄家)多发一张
table[index]["board_list"].append(board)
def distribute_board(self, table):
u"""发牌."""
# 生成牌堆
board_factory = BoardFactory()
board_list = board_factory.gen_board_list()
# 发牌给每个玩家
for i in range(4):
table[i]["board_list"] = list()
for j in range(len(board_list)):
if len(table[i]["board_list"]) >= 13:
logger.info(u"玩家(%s)的手牌为:%s" %
(table[i]["nick_name"], table[i]["board_list"]))
break
board = board_list.pop(j)
table[i]["board_list"].append(board)
# 将剩余的牌加入到第五个元素
table_board = dict()
table_board["board_list"] = board_list
table.append(table_board)
def get_one_board(self, player, table, handle_request_obj):
u"""玩家摸牌."""
self.check_table(player, table)
# 抽牌
board = table[-1].pop(0)
# 在将摸的牌加入牌堆之前去判断
self.check_board(board, table, handle_request_obj, check_player=player)
def discard_one_board(self, player, table, disused_board, handle_request_obj):
u"""玩家出牌."""
self.check_table(player, table)
# 废弃牌堆
player["board_list"].remove(disused_board)
# 记录废弃牌,显示在桌面
player["disused_board_list"].append(disused_board)
self.check_board(disused_board, table, handle_request_obj, exclude_player=player)
def check_board(self, disused_board, table, handle_request_obj,
check_player=None, exclude_player=None):
u"""检查玩家是否能碰、杠、胡牌."""
check_board = CheckMahjongBoard()
# 判断出需要检查的玩家
if check_player:
msg = check_board.check(disused_board, check_player)
handle_request_obj.get_board_to_check(msg, check_player, table, disused_board)
elif exclude_player:
for player in table:
if player == exclude_player:
continue
msg = check_board.check(disused_board, player)
handle_request_obj.discard_board_to_check(msg, player, disused_board)
def judge_next_player(self):
u"""判定下一个出牌玩家."""
pass
def is_all_prepared(self, table):
u"""所有玩家是否都点击准备."""
# 记录是否存在玩家没准备
flag = False
if len(table) < 4:
logger.info(u"同桌玩家数量不够,请等待其他玩家连接!")
return
for player in table:
if player["status"] == 0:
flag = True
# 四个玩家都准备好了
if not flag:
logger.info(u"同桌玩家均已经准备好,开始发牌!")
self.start(table)
return table
logger.info(u"还有玩家未准备,请的等待该玩家准备!")
class BoardFactory(object):
u"""该类主要抽象生成牌堆."""
def __init__(self):
pass
def gen_board_list(self):
u"""生成一桌的牌堆."""
board_list = list()
for i in range(4):
for j in range(11, 20):
# 万
board_list.append(str(j))
for j in range(21, 30):
# 条
board_list.append(str(j))
for j in range(31, 40):
# 筒
board_list.append(str(j))
for j in range(41, 44):
# 中,发,白板
board_list.append(str(j))
# 打乱牌堆顺序
random.shuffle(board_list)
logger.info(u"生成的乱序牌堆为:%s" % str(board_list))
return board_list
class CheckMahjongBoard(object):
def __init__(self):
pass
def _is_seven_pairs(self, board, board_list):
u"""七对胡牌."""
board_list.append(board)
board_list.sort()
if len(board_list) != 14:
return False
# 去重之后直接判断相同的牌的数量
for temp_board in set(board_list):
if board_list.count(temp_board) not in [2, 4]:
return False
return True
def _is_common_win(self, board, board_list):
u"""常规胡牌."""
board_list.append(board)
board_list.sort()
# 判断牌数是否正确
if len(board_list) % 3 != 2:
logger.info(u"胡牌失败,牌数不正确!")
return False
# 记录相同的牌
same_list = list()
for temp_board in set(board_list):
if board_list.count(temp_board) >= 2:
same_list.append(temp_board)
if not same_list:
return False
origin_board_list = copy.deepcopy(board_list)
divide_board_list = list()
for temp_board in same_list:
# 先分组多余两张牌的牌
board_list.remove(temp_board)
board_list.remove(temp_board)
divide_board_list.append((temp_board, temp_board))
for i in range(len(board_list) // 3):
if board_list.count(board_list[0]) == 3:
divide_board_list.append((board_list[0],) * 3)
board_list = board_list[3:]
elif board_list[0]+1 in board_list and board_list[0]+2 in board_list:
# 此处不能用index来判断,因为可能存在 1,222,3 的情况
divide_board_list.append((board_list[0], board_list[0]+1, board_list[0]+2))
board_list.remove(board_list[0])
board_list.remove(board_list[0]+1)
board_list.remove(board_list[0]+2)
else:
# 当前的一张牌可能是分离列表中的第三张,需要重置
board_list = copy.deepcopy(origin_board_list)
divide_board_list = list()
break
else:
return True
else:
return False
def is_win(self, count_same_board, board, player):
u"""是否可以胡牌."""
board_list = player["board_list"]
if count_same_board == 0:
# 只能是3,3,3,3,2胡牌
flag = self._is_common_win(board, board_list)
elif count_same_board == 1:
# 能3,3,3,3,2胡牌,也能七对胡牌
# 优先去判断是否能七对
flag = False
flag1 = self._is_seven_pairs(board, board_list)
flag2 = self._is_common_win(board, board_list)
if flag1 or flag2:
flag = True
elif count_same_board == 2:
# 只能3,3,3,3,2胡牌
flag = self._is_common_win(board, board_list)
elif count_same_board == 3:
# 只能七对胡牌
flag = self._is_seven_pairs(board, board_list)
else:
logger.error(u"玩家(%s)的手牌不正确!" % player["nick_name"])
flag = False
return flag
def check(self, board, player):
logger.info(u"检查玩家(%s)手牌是否能碰、杠、胡" % player["nick_name"])
# 先去查看该玩家的手牌中有几张该牌
count_same_board = player["board_list"].count(board)
# 先去判断是否能胡牌
flag = self.is_win(count_same_board, board, player)
# 再根据有几张相同的手牌去判断是否可以碰或杠
if count_same_board <= 1:
msg = "pass"
if flag:
logger.info(u"玩家(%s)可以胡牌!" % player["nick_name"])
msg = "win"
elif count_same_board == 2:
logger.info(u"玩家(%s)可以碰牌!" % player["nick_name"])
msg = "double_board"
if flag:
logger.info(u"玩家(%s)可以胡牌!" % player["nick_name"])
msg = "win_and_double_board"
elif count_same_board == 3:
logger.info(u"玩家(%s)可以碰牌!" % player["nick_name"])
msg = "triple_board"
if flag:
logger.info(u"玩家(%s)可以胡牌!" % player["nick_name"])
msg = "win_and_triple_board"
else:
logger.error(u"玩家(%s)的手牌不正确!" % player["nick_name"])
msg = "error"
return msg
|
998,063 | 900fae9016ae98689692e00b0877e6ea4476d976 | # -*- coding: utf-8 -*-
"""Models for API requests & responses."""
import dataclasses
import typing as t
import marshmallow_jsonapi.fields as mm_fields
from ..base import BaseModel, BaseSchemaJson
from ..custom_fields import SchemaBool, field_from_mm
class SignupRequestSchema(BaseSchemaJson):
"""Schema for performing an initial signup request."""
company_name = mm_fields.Str(required=False, description="Company name (Optional)")
new_password = mm_fields.Str(required=True, description="Admin user password")
confirm_new_password = mm_fields.Str(
required=True, description="Admin user password confirmation"
)
contact_email = mm_fields.Email(required=True, description="Admin email address")
user_name = mm_fields.Str(required=True, description="Admin user name")
api_keys = SchemaBool(
required=False,
load_default=True,
dump_default=True,
descripton="Whether to return the API key and secret",
)
class Meta:
"""JSONAPI config."""
type_ = "signup_schema"
@staticmethod
def get_model_cls() -> t.Any:
"""Get the model for this schema."""
return SignupRequest
SCHEMA: BaseSchemaJson = SignupRequestSchema()
@dataclasses.dataclass
class SignupRequest(BaseModel):
"""Model for performing an initial signup request."""
company_name: str = field_from_mm(SCHEMA, "company_name")
contact_email: str = field_from_mm(SCHEMA, "contact_email")
new_password: str = field_from_mm(SCHEMA, "new_password")
confirm_new_password: str = field_from_mm(SCHEMA, "confirm_new_password")
user_name: str = field_from_mm(SCHEMA, "user_name")
api_keys: bool = field_from_mm(SCHEMA, "api_keys")
SCHEMA: t.ClassVar[BaseSchemaJson] = SCHEMA
@staticmethod
def get_schema_cls() -> t.Any:
"""Get the schema for this model."""
return SignupRequestSchema
|
998,064 | 77e2f77ee0a0194ce178324503e8ddce89ffbf3b | ee = True
|
998,065 | 96aaeb7ea95ea569251725e1ef56cf765a3a6f95 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.common.exceptions import NoSuchElementException
import unittest, time, random
import sys
sys.path.append(r"C:\\Users\\19144\\PycharmProjects\\学习\\test_demo\\public")
import login, data, workxls
from selenium.webdriver.common.action_chains import ActionChains
# 用户删除
class User_Delete(unittest.TestCase):
def setUp(self):
self.driver = login.Login_CAS.login()
self.driver.implicitly_wait(30)
self.verificationErrors = []
self.accept_next_alert = True
# 用户删除用例
def test_User_Delete(self):
driver = self.driver
user_delete_confirm_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div[4]/div/div/div[3]/span/button[1]/span" # 确定按钮的xpath
user_delete_cancel_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div[4]/div/div/div[3]/span/button[2]/span" # 取消按钮的xpath
# "//*[@id="app"]/div/div[2]/section/section/div/div/div[3]/div/div/div[3]/span/button[1]/span"
# 点击菜单栏用户管理
driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div[3]/div[1]/div/ul/div[4]").click()
time.sleep(1) # 预留时间查看
# 选一个删除用户按钮,点击,弹出确认框
driver.find_element_by_xpath(self.get_user_delete_button_xpath(driver)).click()
time.sleep(1)
# 点击取消,回到界面
driver.find_element_by_xpath(user_delete_cancel_button_xpath).click()
time.sleep(1)
# 点击确定,直接删除
driver.find_element_by_xpath(self.get_user_delete_button_xpath(driver)).click() # 重新选一个删除用户按钮,点击,弹出确认框
time.sleep(1)
driver.find_element_by_xpath(user_delete_confirm_button_xpath).click()
# time.sleep(1)
time.sleep(6)
driver.close()
@staticmethod
def get_user_delete_button_xpath(driver, i=0): # 获取删除用户按钮的xpath,页面上随机选取一个来编辑,并将光标移动至删除按钮上
driver.implicitly_wait(1)
if (i == 0):
i = random.randint(1, 10)
ret = ""
# driver.find_element_by_xpath("//*[@id='app']/div/div[1]/div[3]/div[1]/div/ul/div[5]").click()
# driver.find_element_by_xpath("//*[@id='app']/div/div[2]/section/section/div/div/div[1]/div[3]/div/ul/li[7]").click()
while (i):
# print(i)
ret = "//*[@id='app']/div/div[2]/section/section/div/div/div[2]/div[2]/div[3]/table/tbody/tr[" + str(
i) + "]/td[9]/div/a[2]/span"
try:
driver.find_element_by_xpath(ret)
driver.implicitly_wait(30)
break
except:
i = i - 1
if i > 5: # 如果选到了6-10的用户,下拉滚动条至底部
js = "var q=document.documentElement.scrollTop=10000"
driver.execute_script(js)
time.sleep(1)
else:
js = "var q=document.documentElement.scrollTop=0"
driver.execute_script(js)
time.sleep(1)
above = driver.find_element_by_xpath(ret) # 移动光标至删除按钮
ActionChains(driver).move_to_element(above).perform()
time.sleep(3) # 预留时间查看效果
return ret
@staticmethod
def UserDelete(driver):
user_delete_confirm_button_xpath = "//*[@id='app']/div/div[2]/section/section/div/div/div[4]/div/div/div[3]/span/button[1]/span" # 确定按钮的xpath
# 选第一个用户删除按钮
user_delete_button_xpath = User_Delete.get_user_delete_button_xpath(driver, 1) # 传参数1,表示选第一个删除
# "//*[@id='app']/div/div[2]/section/section/div/div/div[2]/div[2]/div[3]/table/tbody/tr[2]/td[9]/div/a[2]/span"
# 记录一下删除用户的属性
name = driver.find_element_by_xpath(user_delete_button_xpath.replace("9]/div/a[2]/span", "2]/div")).text
org_name_str = driver.find_element_by_xpath(user_delete_button_xpath.replace("9]/div/a[2]/span", "3]/div")).text
role_name_str = driver.find_element_by_xpath(user_delete_button_xpath.replace("9]/div/a[2]/span", "4]/div")).text
phonenum = driver.find_element_by_xpath(user_delete_button_xpath.replace("9]/div/a[2]/span", "5]/div")).text
mail = driver.find_element_by_xpath(user_delete_button_xpath.replace("9]/div/a[2]/span", "6]/div")).text
# 点击,弹出确认框
driver.find_element_by_xpath(user_delete_button_xpath).click()
time.sleep(1)
# 点击确定,直接删除
driver.find_element_by_xpath(user_delete_confirm_button_xpath).click()
timestr = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
time.sleep(1)
workxls.writedata('userdata.xls', name, '女', org_name_str, role_name_str, phonenum, mail, timestr)
return driver
def tearDown(self):
self.driver.quit()
self.assertEqual([], self.verificationErrors)
if __name__ == "__main__":
unittest.main()
|
998,066 | 3b2fb476e1460c09b213d6cfcee391c0eca8a4a7 | class Solution:
# @return a string
def longestCommonPrefix(self, strs):
if len(strs) == 0:
return ''
elif len(strs) == 1:
return strs[0]
most_right = len(strs[0])
for i in range(1, len(strs)):
for j in range(most_right):
if j == len(strs[i]) or strs[i][j] != strs[i - 1][j]:
most_right = j
break
return strs[0][0:most_right]
s = Solution()
print s.longestCommonPrefix(['12345', '1234', '123456'])
|
998,067 | 64a95c0c3f063b13263023280cde3bb8f33b9b49 | from cunsumer.consumer_confluent_kafka import KafkaConsumer
ERROR_CODE_ZERO = 0
class ConsumerHandler:
def __init__(self, bootstrap_servers, group_id, client_id, timeout, auto_commit, config):
self._consumer = KafkaConsumer(bootstrap_servers, group_id, client_id, timeout, auto_commit, config)
def message_listener(self, topic, timeout):
"""
listen to new messages and yield them
:return: error_code, error_message, message_json
"""
"""
demo_message = [
{'user_id': 'Lazy Man', 'timestamp': '2019-10-06T22:59:59.989Z', 'risk_level': 3}
]
for message in demo_message:
yield ERROR_CODE_ZERO, "", message
"""
while True:
for error_code, error_message, message in self._consumer.subscribe(topic, timeout):
yield error_code, error_message, message
if error_code == 1:
break
|
998,068 | 38b2e9fc3bee69c7bb0b47a837859bf2b46b2617 | from django.db import models
# Create your models here.
class Supplier(models.Model):
supplier_name = models.CharField(max_length=50,null=True, blank=True)
quantity = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = '供应商'
verbose_name_plural = '供应商'
class Overdue(models.Model):
category = models.CharField(max_length=10,null=True, blank=True)
quantity = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = '超期分类'
verbose_name_plural = '超期分类'
class DeliveredTotal(models.Model):
Year = models.CharField(max_length=5)
Jan = models.IntegerField(null=True, blank=True)
Feb = models.IntegerField(null=True, blank=True)
Mar = models.IntegerField(null=True, blank=True)
Apr = models.IntegerField(null=True, blank=True)
May = models.IntegerField(null=True, blank=True)
Jun = models.IntegerField(null=True, blank=True)
Jul = models.IntegerField(null=True, blank=True)
Aug = models.IntegerField(null=True, blank=True)
Sep = models.IntegerField(null=True, blank=True)
Oct = models.IntegerField(null=True, blank=True)
Nov = models.IntegerField(null=True, blank=True)
Dec = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = '发货缺件'
verbose_name_plural = '发货缺件'
class ReasonAnalysis(models.Model):
reason_category = models.CharField(max_length=15,null=True, blank=True)
quantity = models.IntegerField(null=True, blank=True)
class Meta:
verbose_name = '原因分析'
verbose_name_plural = '原因分析'
|
998,069 | 9ccf561ddd79a2e2e13f55f207b73e6323f9c510 | import math
pi =(round(math.pi,3))
print("Approximation of pi:",round(pi,3))
rad = eval(input("Enter the radius:\n"))
print("Area:", round(3.14159*(rad**2),3)) |
998,070 | f812a8c33b879647644c89d6470b16e12706ff92 | import pytest
import asyncio
from unittest.mock import MagicMock
from aiocache.backends.memory import SimpleMemoryBackend
@pytest.fixture
def memory(event_loop, mocker):
SimpleMemoryBackend._handlers = {}
SimpleMemoryBackend._cache = {}
mocker.spy(SimpleMemoryBackend, "_cache")
return SimpleMemoryBackend()
class TestSimpleMemoryBackend:
@pytest.mark.asyncio
async def test_get(self, memory):
await memory._get(pytest.KEY)
SimpleMemoryBackend._cache.get.assert_called_with(pytest.KEY)
@pytest.mark.asyncio
async def test_set(self, memory):
await memory._set(pytest.KEY, "value")
SimpleMemoryBackend._cache.__setitem__.assert_called_with(pytest.KEY, "value")
@pytest.mark.asyncio
async def test_set_no_ttl_no_handle(self, memory):
await memory._set(pytest.KEY, "value", ttl=0)
assert pytest.KEY not in memory._handlers
await memory._set(pytest.KEY, "value")
assert pytest.KEY not in memory._handlers
@pytest.mark.asyncio
async def test_set_ttl_handle(self, memory):
await memory._set(pytest.KEY, "value", ttl=100)
assert pytest.KEY in memory._handlers
assert isinstance(memory._handlers[pytest.KEY], asyncio.Handle)
@pytest.mark.asyncio
async def test_multi_get(self, memory):
await memory._multi_get([pytest.KEY, pytest.KEY_1])
SimpleMemoryBackend._cache.get.assert_any_call(pytest.KEY)
SimpleMemoryBackend._cache.get.assert_any_call(pytest.KEY_1)
@pytest.mark.asyncio
async def test_multi_set(self, memory):
await memory._multi_set([(pytest.KEY, "value"), (pytest.KEY_1, "random")])
SimpleMemoryBackend._cache.__setitem__.assert_any_call(pytest.KEY, "value")
SimpleMemoryBackend._cache.__setitem__.assert_any_call(pytest.KEY_1, "random")
@pytest.mark.asyncio
async def test_add(self, memory):
await memory._add(pytest.KEY, "value")
SimpleMemoryBackend._cache.__setitem__.assert_called_with(pytest.KEY, "value")
@pytest.mark.asyncio
async def test_add_existing(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = True
with pytest.raises(ValueError):
await memory._add(pytest.KEY, "value")
@pytest.mark.asyncio
async def test_exists(self, memory):
await memory._exists(pytest.KEY)
SimpleMemoryBackend._cache.__contains__.assert_called_with(pytest.KEY)
@pytest.mark.asyncio
async def test_increment(self, memory):
await memory._increment(pytest.KEY, 2)
SimpleMemoryBackend._cache.__contains__.assert_called_with(pytest.KEY)
SimpleMemoryBackend._cache.__setitem__.assert_called_with(pytest.KEY, 2)
@pytest.mark.asyncio
async def test_increment_missing(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = True
SimpleMemoryBackend._cache.__getitem__.return_value = 2
await memory._increment(pytest.KEY, 2)
SimpleMemoryBackend._cache.__getitem__.assert_called_with(pytest.KEY)
SimpleMemoryBackend._cache.__setitem__.assert_called_with(pytest.KEY, 4)
@pytest.mark.asyncio
async def test_increment_typerror(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = True
SimpleMemoryBackend._cache.__getitem__.return_value = "asd"
with pytest.raises(TypeError):
await memory._increment(pytest.KEY, 2)
@pytest.mark.asyncio
async def test_expire_no_handle_no_ttl(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = True
await memory._expire(pytest.KEY, 0)
assert memory._handlers.get(pytest.KEY) is None
@pytest.mark.asyncio
async def test_expire_no_handle_ttl(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = True
await memory._expire(pytest.KEY, 1)
assert isinstance(memory._handlers.get(pytest.KEY), asyncio.Handle)
@pytest.mark.asyncio
async def test_expire_handle_ttl(self, memory):
fake = MagicMock()
SimpleMemoryBackend._handlers[pytest.KEY] = fake
SimpleMemoryBackend._cache.__contains__.return_value = True
await memory._expire(pytest.KEY, 1)
assert fake.cancel.call_count == 1
assert isinstance(memory._handlers.get(pytest.KEY), asyncio.Handle)
@pytest.mark.asyncio
async def test_expire_missing(self, memory):
SimpleMemoryBackend._cache.__contains__.return_value = False
assert await memory._expire(pytest.KEY, 1) is False
@pytest.mark.asyncio
async def test_delete(self, memory):
fake = MagicMock()
SimpleMemoryBackend._handlers[pytest.KEY] = fake
await memory._delete(pytest.KEY)
assert fake.cancel.call_count == 1
assert pytest.KEY not in SimpleMemoryBackend._handlers
SimpleMemoryBackend._cache.pop.assert_called_with(pytest.KEY, None)
@pytest.mark.asyncio
async def test_delete_missing(self, memory):
SimpleMemoryBackend._cache.pop.return_value = None
await memory._delete(pytest.KEY)
SimpleMemoryBackend._cache.pop.assert_called_with(pytest.KEY, None)
@pytest.mark.asyncio
async def test_clear_namespace(self, memory):
SimpleMemoryBackend._cache.__iter__.return_value = iter(['nma', 'nmb', 'no'])
await memory._clear("nm")
assert SimpleMemoryBackend._cache.pop.call_count == 2
SimpleMemoryBackend._cache.pop.assert_any_call('nma', None)
SimpleMemoryBackend._cache.pop.assert_any_call('nmb', None)
@pytest.mark.asyncio
async def test_clear_no_namespace(self, memory):
SimpleMemoryBackend._handlers = "asdad"
SimpleMemoryBackend._cache = "asdad"
await memory._clear()
SimpleMemoryBackend._handlers = {}
SimpleMemoryBackend._cache = {}
@pytest.mark.asyncio
async def test_raw(self, memory):
await memory._raw("get", pytest.KEY)
SimpleMemoryBackend._cache.get.assert_called_with(pytest.KEY)
await memory._set(pytest.KEY, "value")
SimpleMemoryBackend._cache.__setitem__.assert_called_with(pytest.KEY, "value")
|
998,071 | 814cfc5a34effc2a440c60d5aa43db239f6266bb | """
Module to aggregate transactions, routes and counters
1. RouteAggregator - Conflates long routes to short ones
2. TxnAggregator - Classifies transaction based on a predicate and aggregates elasped cycles
Author: Manikandan Dhamodharan, Morgan Stanley
"""
from xpedite.txn.classifier import DefaultClassifier
def txnSubCollectionFactory(txnSubCollection, txn):
"""
Builds a new transaction subcollection with meta data matching given source collection
:param txnSubCollection: Source collection to clone meta data for the new subcollection
:param txn: transaction to be added to the new subcollection
"""
subCollection = txnSubCollection.cloneMetaData()
subCollection.append(txn)
return subCollection
def addTxn(routeMap, txnSubCollection, route, txn):
"""
Adds transaction to a transaction subcollection with matching route
:param routeMap: Map of routes taken by all the transaction in the profile
:param route: Route taken by the given transaction
:param txnSubCollection: Transaction subcollection to clone meta data from
:param txn: Transaction to be added
"""
if route in routeMap:
routeMap[route].append(txn)
else:
routeMap.update({route : txnSubCollectionFactory(txnSubCollection, txn)})
class RouteAggregator(object):
"""Aggregates transactions based their respective routes"""
@staticmethod
def aggregateTxnsByRoutes(txnSubCollection):
"""
Aggregates transactions in a given source subcollection, to multiple subcollections
based on their respective routes
:param txnSubCollection: Transaction subcollection to be aggregated
"""
routeMap = {}
for txn in txnSubCollection:
addTxn(routeMap, txnSubCollection, txn.route, txn)
return routeMap
class RouteConflatingAggregator(object):
"""Aggregates transactions to a set of conflatable source routes"""
def __init__(self, srcTree):
self.srcTree = srcTree
def aggregateTxnsByRoutes(self, txnSubCollection, ancestry):
"""
Aggregates transactions in a given subcollection to child collections
in an ancestry node with conflatable routes
:param txnSubCollection: Transaction subcollection to be aggregated
:param ancestry: A node in the tree collection
"""
from xpedite.types.route import conflateRoutes
srcRouteMap = self.srcTree.getChildren(ancestry)
routes = srcRouteMap.keys() if srcRouteMap else []
routeMap = {}
for txn in txnSubCollection:
for dstRoute in routes:
if conflateRoutes(txn.route, dstRoute):
addTxn(routeMap, txnSubCollection, dstRoute, txn)
return routeMap
class TxnAggregator(object):
"""Aggregates transaction by categories"""
begin = 0
end = 1
@staticmethod
def _addOrUpdateContainer(container, subCollectionFactory, classifier, key, value):
"""
Adds transaction to a transaction subcollection with matching category
:param container: Container with all categories of aggreagated values
:param subCollectionFactory: Callable used to build an instance of subcollection
:param classifier: Predicate to classify transactions into different categories
:param key: Key used for classification
:param value: Value to be aggregated
"""
category = classifier.classify(key)
if category in container:
container[category].append(value)
else:
container.update({category : subCollectionFactory(value)})
@staticmethod
def groupElapsedTscByScope(txnSubCollection, beginProbe, endProbe, classifier=DefaultClassifier()):
"""
Aggregates elapsed tsc values by category
:param txnSubCollection: Transaction subcollection to be aggregated
:param beginProbe: Begin probe used for elapsed tsc computation
:param endProbe: End probe used for elapsed tsc computation
:param classifier: Predicate to classify transactions into different categories
"""
elapsedTscGroup = {}
for txn in txnSubCollection:
if txn.hasProbes([beginProbe, endProbe]):
beginCounter = txn.getCounterForProbe(beginProbe)
endCounter = txn.getCounterForProbe(endProbe)
TxnAggregator._addOrUpdateContainer(
elapsedTscGroup, lambda v: [v], classifier, txn,
endCounter.tsc - beginCounter.tsc
)
return elapsedTscGroup
@staticmethod
def groupElapsedTime(txnSubCollection, cpuInfo, classifier=DefaultClassifier()):
"""
Aggregates elapsed time by category
This method computes elapsed wall time for each transaction in the subcollection
and aggregates computed duration by its soruce transaction's category
:param txnSubCollection: Transaction subcollection to be aggregated
:param classifier: Predicate to classify transactions into different categories
:param cpuInfo: Cpu info to convert cycles to duration (micro seconds)
"""
elapsedTscGroup = {}
for txn in txnSubCollection:
if txn:
time = cpuInfo.convertCyclesToTime(txn.getElapsedTsc())
TxnAggregator._addOrUpdateContainer(elapsedTscGroup, lambda v: [v], classifier, txn, time)
return elapsedTscGroup
@staticmethod
def groupTxns(txnSubCollection, classifier=DefaultClassifier(), mustHaveProbes=None):
"""
Aggregates transactions by their resepective categories
:param txnSubCollection: Transaction subcollection to be aggregated
:param classifier: Predicate to classify transactions into different categories
:param mustHaveProbes: Probes used to exclude transaction from aggregation
"""
groupMap = {}
# classifiy the counter breakups into categories
for txn in txnSubCollection:
if not mustHaveProbes or txn.hasProbes(mustHaveProbes):
TxnAggregator._addOrUpdateContainer(
groupMap, lambda t: txnSubCollectionFactory(txnSubCollection, t),
classifier, txn, txn
)
return groupMap
|
998,072 | 652e0cf06f2bc34459839dc3825a340ec3bcaa6d |
class Emoji:
def __init__(
self,
id,
name,
is_animated = False
):
self.id = id
self.name = name
self.is_animated = is_animated |
998,073 | b35e58c7f5af54cc089dd862820fa8d3f3fa5565 | __author__ = '123'
# coding=utf-8
from common.base import Base
from common.myTest import MyTest
from common.logger import logger
from common.connectMysql import ConnectMysql
from common.connectRedis import ConnectRedis
from common.jsonparser import JMESPathExtractor
import time, unittest
class TestCase(MyTest):
def test_01(self):
"""
限价卖\买:while(余额足够){A创建卖单,B买入}, 全部成交 手续费 == 2/1000
交易对:BTC/USDT
A卖单等差递增
UserA_SellOrderPrice = 98
UserA_SellOrderNum = 1,2,3,4,5
B买单价格
UserB_BuyOrderPrice = 100
UserB_BuyOrderNum= 15
"""
# 清除redis,mysql中btc/usdt的买卖单数据
ConnectRedis().clear_redis(name=["1buy", "1sell"])
ConnectMysql().update_order_status(transtion_id=1, order_type=1, order_status=2)
ConnectMysql().update_order_status(transtion_id=1, order_type=2, order_status=2)
self.test_buyer = Base(user="buyer")
self.test_seller = Base(user="seller")
logger.info("测试用例说明:".format(TestCase.setUp.__doc__))
# 下单前查询买卖双发的主币和目标币余额
self.before_deal_seller_main_balance_value = self.test_seller.User_balance_details(currency_id=1)
self.before_deal_seller_deputy_balance_value = self.test_seller.User_balance_details(currency_id=2)
self.before_deal_buyer_main_balance_value = self.test_buyer.User_balance_details(currency_id=1)
self.before_deal_buyer_deputy_balance_value = self.test_buyer.User_balance_details(currency_id=2)
logger.info("买入之前买家主币余额:{0}-----买入之前买家目标币余额:{1}".format(self.before_deal_buyer_main_balance_value, self.before_deal_buyer_deputy_balance_value))
logger.info("买入之前卖家的主币余额:{0}------买入之前卖家的目标币余额{1}".format(self.before_deal_seller_main_balance_value, self.before_deal_seller_deputy_balance_value))
self.buy_price, self.sell_price = 100, 98
self.buy_num = 15
self.sell_num = [1, 2, 3, 4, 5]
self.sell_id_list = []
for i in range(len(self.sell_num)):
self.sell_resp = self.test_seller.SellOrder(transtion_id=1, price=self.sell_price, num=self.sell_num[i], order_type=0)
self.sell_order_id = JMESPathExtractor().extract(query="OBJECT.sellerOrderId", body=self.sell_resp.text)
self.sell_id_list.append(self.sell_order_id)
time.sleep(1)
logger.info("下卖单返回信息:{0}".format(self.sell_resp.json()))
self.buy_resp = self.test_buyer.OrderReservations(transtion_id=1, price=self.buy_price, num=self.buy_num, order_type=0)
buy_order_id = JMESPathExtractor().extract(query="OBJECT.buyerOrderId", body=self.buy_resp.text)
logger.info("下买单返回信息:{0}".format(self.buy_resp.json()))
for i in self.sell_id_list:
order_status = ConnectMysql().get_Order_Status(order_id=i, order_type=2)
logger.info("卖单Id:{0}----订单状态:{1}".format(i, order_status))
logger.info("买单ID:{0}----订单状态:{1}".format(buy_order_id, ConnectMysql().get_Order_Status(order_id=buy_order_id, order_type=1)))
self.after_deal_seller_main_balance_value = self.test_seller.User_balance_details(currency_id=1)
self.after_deal_seller_deputy_balance_value = self.test_seller.User_balance_details(currency_id=2)
self.after_deal_buyer_main_balance_value = self.test_buyer.User_balance_details(currency_id=1)
self.after_deal_buyer_deputy_balance_value = self.test_buyer.User_balance_details(currency_id=2)
logger.info("买入之后买家主币余额:{0}-------买入之后买家目标币余额:{1}".format(self.after_deal_buyer_main_balance_value, self.after_deal_buyer_deputy_balance_value))
logger.info("买入之后卖家主币余额:{0}------买入之后卖家目标币余额:{1}".format(self.after_deal_seller_main_balance_value, self.after_deal_seller_deputy_balance_value))
logger.info("成交金额:{0}".format(self.buy_num*self.sell_price))
if __name__ == '__main__':
unittest.main() |
998,074 | 374b0a89bd96cc4afa7b8926bd7220795ca19a99 | # -*- coding: utf-8 -*-
import os
import sqlite3
import urllib.request
from bs4 import BeautifulSoup
def get_feeds(url):
# RSSフィードの取得
response = urllib.request.urlopen(url)
xml = response.read()
soup = BeautifulSoup(xml, 'html.parser')
# 結果格納用の箱を準備
result = []
# RSSを分解して今回必要な要素を結果に詰め込む
for item in soup.find_all('item'):
result.append({
'title': item.title.text,
'link': item['rdf:about'],
'description': item.description.text,
})
return result
def save_db_feeds(feeds):
# sqlite database に接続
conn = sqlite3.connect('feeds.db')
c = conn.cursor()
# テーブルの作成
c.execute("CREATE TABLE IF NOT EXISTS feeds_table (title text, link text, description text)")
# データの挿入
for item in feeds:
c.execute("INSERT INTO feeds_table VALUES ('{}', '{}', '{}')".format(item['title'], item['link'], item['description']))
# SQLデータの確定
conn.commit()
# sqlite database 接続の終了
conn.close()
def get_db_feeds():
# sqlite database に接続
conn = sqlite3.connect('feeds.db')
c = conn.cursor()
res = c.execute("SELECT title, link, description FROM feeds_table")
result = []
for row in res:
result.append({
'title': row[0],
'link': row[1],
'description': row[2],
})
# sqlite database 接続の終了
conn.close()
return result
if __name__ == "__main__":
# 出力用ファイルの準備
this_script_path = os.path.dirname(os.path.abspath(__file__))
output_html = os.path.join(this_script_path, 'output.html')
# はてなブックマークの人気エントリ
# url = 'http://feeds.feedburner.com/hatena/b/hotentry'
### 他のRSSのだとうまくいくしょうか?
# はてなブックマーク ITカテゴリ人気エントリ
url = 'http://b.hatena.ne.jp/hotentry/it.rss'
# techcrunch
# url = 'http://jp.techcrunch.com/feed/'
feeds = get_feeds(url)
### 今回取得した結果を保存しておく?
save_db_feeds(feeds)
### 以前保存された結果も表示する?
# saved_feeds = get_db_feeds()
# feeds.extend(saved_feeds)
# 結果を保存
with open(output_html, 'w') as f:
for item in feeds:
f.write('<h2><a href="{}">{}</a></h2><p>{}</p>'.format(item['link'], item['title'], item['description']))
print('ブラウザで下記のpathにアクセス!')
print('file://' + output_html)
|
998,075 | 21f317a020d17e2417ae118928f74ffecd4a716c | '''
Doxygen Comments Generator
--------------------------
Author : Anish Kumar
Date : 23/06/2018
'''
import time
import re, sys
from Tkinter import *
import Tkinter, Tkconstants, tkFileDialog, tkMessageBox
import os
import subprocess
from collections import Counter
Str = '''*********************************************************
* Program : Doxygen Comments Generator
* Version : 1.2
* Developer : Anish Kumar
* Date : 25-June-2018
*********************************************************\
'''
COU_TEST_Count = 0
COU_TEST_Flag, COU_CALL_Flag, Assert_Print_Flag, Event_Print_Flag = 0, 0, 0, 0
ASSERT_Missing = 0
Missed_Asserts = []
Missed_Asserts_Final = []
Asserts = []
Events = []
Order_Check_Flag = 0 #
COU_SET_Count, COU_CALL_Count, COU_ASSERT_Count, COU_TEST_Count = 0, 0, 0, 0
Num_Lines = 0
Name = " "
Annotation_missing = 0
Annotation_missed_in = []
alph = 98
alph1 = 98
filepath = "NULL"
error_1 = 0
COU_LOG = {}
COU_LOG_List = []
Missed_Asserts_Dict = {}
Precondn_Str = ""
def event_updater():
global Event_Print_Flag, Precondn_Str
global Events
if Event_Print_Flag:
dest.write(" *\n * @events\n *")
Event_Print_Flag = 0
Events = set(Events)
for i in Events:
dest.write(' ')
dest.write(i)
dest.write('\n *')
Events = []
def results_updater():
global Assert_Print_Flag, Precondn_Str
global Asserts, alph1
if Assert_Print_Flag:
dest.write("\n * @results\n * {}:\n * ".format(chr(97)))
alph1 = 98
Assert_Print_Flag = 0
for i in Asserts:
dest.write(i)
dest.write(' ')
Asserts = []
def generator():
global COU_TEST_Count, ASSERT_Missing, Missed_Asserts, Order_Check_Flag, TEST_CASE_Name, COU_ASSERT_Count
global dest, COU_CALL_Flag, COU_TEST_Flag, COU_SET_Count, COU_CALL_Count, Event_Print_Flag, Assert_Print_Flag
global Num_Lines, Name, COU_LOG, COU_LOG_List, Missed_Asserts_Dict, Missed_Asserts_Final
global Name, E1, Annotation_missing, alph, alph1, found, filepath, error_1, Precondn_Str
First_Time = 0
Name = E1.get()
dest = open("Doxygen_Gen.txt", "w")
try:
# Loop Starts here
with open(filepath, 'r+') as fp:
line = fp.readline()
while line:
ch = (line.strip())
'''
==============================================================================
COU_TEST IDENTIFICATION
==============================================================================
'''
while ch.find("COU_TEST") == 0:
COU_TEST_Count += 1
COU_TEST_Flag = 1
alph = 98
if COU_CALL_Flag:
COU_CALL_Flag = 0
ASSERT_Missing += 1
Missed_Asserts.append(TEST_CASE_Name) # Assert missing finding logic
event_updater()
results_updater()
if First_Time:
dest.write("\n * @type\n * Elementary Comparison Test (ECT)\n *\n * @regression\n * No\n *\n * @integration\n * No\n *\n * @validates\n *\n *")
First_Time = 1
dat = (ch.strip())
dat1 = (re.search('\((.+?),', dat))
if dat1:
TEST_CASE_Name = dat1.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines+1)
if Order_Check_Flag == 0: # Order_Check_Flag = zero Means Executing after COU_ASSERT
dest.write("\n**/\n\n\n ")
dest.write(TEST_CASE_Name)
dest.write("\n============================================================\n")
Order_Check_Flag = 1
dest.write("/**\n")
dest.write(" * @brief\n * Test case {}\n *\n *".format(TEST_CASE_Name))
dest.write(" @description\n *\n *")
dest.write(" @author\n * ")
dest.write(Name)
dest.write("\n *\n")
dest.write(" * @preconditions\n * {}:\n".format(chr(97)))
break
else:
pass
'''
==============================================================================
COU_SET IDENTIFICATION
==============================================================================
'''
if ch.find("COU_SET") == 0:
if COU_CALL_Flag:
COU_CALL_Flag = 0
ASSERT_Missing += 1
# print(COU_LOG_List)
Missed_Asserts.append(TEST_CASE_Name) # Assert missing finding logic
COU_SET_Count += 1
if Order_Check_Flag == 0 and COU_TEST_Flag:
dest.write(" * \n * {}:\n".format(chr(alph)))
alph += 1
Order_Check_Flag = 1
Asserts.append('\n * {}:\n *'.format(chr(alph1)))
alph1 += 1
dat = (ch.strip())
dat1 = re.search('\((.+?),', dat)
dat2 = re.search(',(.+?),',dat)
if COU_TEST_Flag: # Omiting checking SET before starting Test cases
if dat1:
found = dat1.group(1)
found += " set to " + dat2.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines+1)
dest.write(' * ')
dest.write(found)
dest.write('\n')
'''
==============================================================================
COU_CALL IDENTIFICATION
==============================================================================
'''
elif ch.find("COU_CALL") == 0:
COU_CALL_Count += 1
Order_Check_Flag = 1
COU_CALL_Flag = 1
Event_Print_Flag = 1
dat = (ch.strip())
dat1 = re.search('\((.+?)"', dat)
if dat1:
found = "Calling Function "
found += dat1.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines+1)
Events.append(found)
'''
==============================================================================
COU_ASSERT_EQUAL IDENTIFICATION
==============================================================================
'''
elif ch.find("COU_ASSERT_EQUAL") == 0:
COU_ASSERT_Count += 1
Order_Check_Flag = 0
COU_CALL_Flag = 0
dat = (ch.strip())
dat1 = re.search('\((.+?),', dat)
dat2 = re.search(',(.+?)"', dat)
# current_assert_list = set(Asserts)
if dat1:
found = "Check whether the value of "
found += dat1.group(1)
found += " is equal to " + dat2.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines+1)
# if found not in current_assert_list:
Asserts.append(found)
Asserts.append('\n *')
Assert_Print_Flag = 1
'''
==============================================================================
COU_ASSERT_NOT_EQUAL IDENTIFICATION
==============================================================================
'''
elif ch.find("COU_ASSERT_NOT_EQUAL") == 0:
COU_ASSERT_Count += 1
Order_Check_Flag = 0
COU_CALL_Flag = 0
dat = (ch.strip())
dat1 = re.search('\((.+?),', dat)
dat2 = re.search(',(.+?)"', dat)
if dat1:
found = "Check whether the value of "
found += dat1.group(1)
found += " is not equal to " + dat2.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines + 1)
Asserts.append(found)
Asserts.append('\n *')
Assert_Print_Flag = 1
'''
==============================================================================
COU_LOG IDENTIFICATION
==============================================================================
'''
elif ch.find("COU_LOG") == 0:
dat = (ch.strip())
dat1 = re.search('"(.+?)"', dat)
if dat1:
found = dat1.group(1)
else:
Annotation_missing += 1
Annotation_missed_in.append(Num_Lines + 1)
COU_LOG_List.append("{} {}".format(TEST_CASE_Name, found))
else:
pass
line = fp.readline()
Num_Lines += 1
error_1 = 0
except IOError:
error_1 = 1
event_updater()
results_updater()
Missed_Asserts_Dict = dict((Counter(Missed_Asserts)))
# print(Missed_Asserts_Dict.keys())
# print(Missed_Asserts_Dict.values())
for i in Missed_Asserts_Dict:
s = "{} ----> {}".format(i,Missed_Asserts_Dict[i])
Missed_Asserts_Final.append(s)
if Annotation_missing > 0:
error("Annotation Missing\nPlease Update Annotations properly\n{}".format(Annotation_missed_in))
Str1 = '''
======================================================================================
* SUMMARY *
======================================================================================
* Date and Time : {}
* Input File : {}
* Output File : {}
* Number Of Test Cases : {}
* Number Of Lines : {}
* Number Of Asserts : {}
* Number of Annotations Missed: {}
* Where You Missed Annotation : <Line Numbers:>\n {}
* Number Of Asserts Missed : {}
* Where You Missed Asserts :\n
======================================================================================
Test Case Num of Asserts Missed
=======================================================================================
{}
***************************************************************************************\n
==================================End Of Summary========================================='''.format(time.asctime(), fp.name, dest.name, COU_TEST_Count, Num_Lines,
COU_ASSERT_Count,Annotation_missing, Annotation_missed_in, ASSERT_Missing,
"\n".join(Missed_Asserts_Final))
if Annotation_missing == 0:
dest.write("\n * @type\n * Elementary Comparison Test (ECT)\n *\n * @regression\n * No\n *\n * @integration\n * No\n *\n * @validates\n *\n **/")
dest.write('''
==============================================================================
EOF
==============================================================================
''')
dest.close()
dest = open("Doxygen_Gen.txt", "r")
contents = dest.readlines()
dest.close()
contents.insert(0, Str1)
dest = open("Doxygen_Gen.txt", "w")
contents = "".join(contents)
dest.write(contents)
fp.close()
dest.close()
def gui_main():
global root, FileChoose, Name, E1, filepath
root = Tk()
root.title('Doxygen Generator')
root.resizable(0, 0)
w = 500 # width for the Tk root
h = 100 # height for the Tk root
# get screen width and height
ws = root.winfo_screenwidth() # width of the screen
hs = root.winfo_screenheight() # height of the screen
# calculate x and y coordinates for the Tk root window
x = (ws / 2) - (w / 2)
y = (hs / 2) - (h / 2)
# set the dimensions of the screen
# and where it is placed
root.geometry('%dx%d+%d+%d' % (w, h, x, y))
frame = Frame(root)
frame.pack()
'''
==============================================================================
CHOOSE FILE BUTTON
==============================================================================
'''
FileChoose = Button(frame,
text="Choose File",
command=choose_file)
FileChoose.pack(side=LEFT, padx = 20)
'''
==============================================================================
INFO BUTTON
==============================================================================
'''
button_info = Button(frame,
text="Info",
fg="blue",
command=info)
button_info.pack(side=RIGHT)
L1 = Label(root, text="Author Name")
L1.pack(side=LEFT, fill = X, padx = 20, pady = 0)
E1 = Entry(root, bd=5)
E1.pack(side=LEFT)
root.mainloop()
def info():
tkMessageBox.showinfo("Doxygen Generator", Str)
def error(str):
tkMessageBox.showerror("Doxygen Generator", str)
def choose_file():
global filepath , root, dest, Precondn_Str
root.filename = tkFileDialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("text files", "*.c"), ("all files", "*.*")))
filepath = root.filename
generator()
root.destroy()
def update():
pass
if __name__ == '__main__':
gui_main()
if filepath != "NULL" and error_1 == 0:
osCommandString = "notepad.exe Doxygen_Gen.txt"
subprocess.call(osCommandString, shell=False)
else:
error_1 = 0
|
998,076 | df31b69ad14c34f54012ffaf2ff371e0528ef0fa | ## setについて
## listを重複を省いて集合にする。
my_list={11,12,13,14,15,12}
my_set=set(my_list)
print(my_set) |
998,077 | 13b90a2c95a672dd7d12a131b415f1ab8fd84f36 | """ec2 instances scheduler."""
from typing import Dict, List
import boto3
from botocore.exceptions import ClientError
from .exceptions import ec2_exception
from .filter_resources_by_tags import FilterByTags
class InstanceScheduler:
"""Abstract ec2 scheduler in a class."""
def __init__(self, region_name=None) -> None:
"""Initialize ec2 scheduler."""
if region_name:
self.ec2 = boto3.client("ec2", region_name=region_name)
self.asg = boto3.client("autoscaling", region_name=region_name)
else:
self.ec2 = boto3.client("ec2")
self.asg = boto3.client("autoscaling")
self.tag_api = FilterByTags(region_name=region_name)
def stop(self, aws_tags: List[Dict]) -> None:
"""Aws ec2 instance stop function.
Stop ec2 instances with defined tags and disable its Cloudwatch
alarms.
:param list[map] aws_tags:
Aws tags to use for filter resources.
For example:
[
{
'Key': 'string',
'Values': [
'string',
]
}
]
"""
for instance_arn in self.tag_api.get_resources("ec2:instance", aws_tags):
instance_id = instance_arn.split("/")[-1]
try:
if not self.asg.describe_auto_scaling_instances(
InstanceIds=[instance_id]
)["AutoScalingInstances"]:
self.ec2.stop_instances(InstanceIds=[instance_id])
print(f"Stop instances {instance_id}")
except ClientError as exc:
ec2_exception("instance", instance_id, exc)
def start(self, aws_tags: List[Dict]) -> None:
"""Aws ec2 instance start function.
Start ec2 instances with defined tags.
Aws tags to use for filter resources
Aws tags to use for filter resources.
For example:
[
{
'Key': 'string',
'Values': [
'string',
]
}
]
"""
for instance_arn in self.tag_api.get_resources("ec2:instance", aws_tags):
instance_id = instance_arn.split("/")[-1]
try:
if not self.asg.describe_auto_scaling_instances(
InstanceIds=[instance_id]
)["AutoScalingInstances"]:
self.ec2.start_instances(InstanceIds=[instance_id])
print(f"Start instances {instance_id}")
except ClientError as exc:
ec2_exception("instance", instance_id, exc)
|
998,078 | ab54564901cac6b49e153919be3d926bef5af324 | # Obtaining the current date and time
import datetime
current_date_time = datetime.datetime.now()
print("Current datetime: ", current_date_time)
current_date = datetime.datetime.now().date()
print("Current date: ", current_date)
current_year = datetime.datetime.now().date().year
print("Current year: ", current_year)
current_time = datetime.datetime.now().time()
print("Current time: ", current_time)
|
998,079 | fd2a4fe3c3896fcdb249590d078b9821eff45784 | #!/usr/bin/env python
# encoding: utf-8
"""
Metadata object module
"""
__author__ = 'Christoph Piechula'
import os
import archive.util.times as utl
import archive.crawler.extractor as extractor
import archive.util.mimextractor as mime
class MetaData(dict):
"""
Metadata Builder Class, generates a meta object
from a given file
"""
def __init__(self, **kwargs):
super().__init__(kwargs)
# Class methods:
def get_domain_from_path(post_path):
"""
Splits path to reterive domain
"""
return post_path.split('/', 1)[0]
def get_url_from_path(post_path):
"""
Cuts path to reterive content url
"""
url_start = post_path[:-len('data')]
if url_start.endswith('/'):
url_start = url_start[:-1]
return url_start
def get_content_path_from_tmp(tmp_path, domain):
"""
/tmp/archive/tmp/www.golem.de/www.golem.de/...
=>
/tmp/archive/content/www.golem.de/...
"""
return tmp_path.replace(os.path.join('tmp', domain), 'content')
def build_metadata_from_file(tmp_crawler_folder, abs_data_path, commitTime):
"""
Retrieves metadata and returns object
Note: This needs to be threadsafe code.
:abs_data_path: path of content file
:commitTime: commit time string to tag with
:tmp_crawler_folder: wget temp folder
"""
m = MetaData()
post_path = abs_data_path[len(tmp_crawler_folder):]
if post_path[0] == '/':
post_path = post_path[1:]
m['mimeType'] = mime.get_mime(abs_data_path)
m['url'] = MetaData.get_url_from_path(post_path)
m['domain'] = MetaData.get_domain_from_path(post_path)
m['path'] = MetaData.get_content_path_from_tmp(abs_data_path, m['domain'])
m['tmpPath'] = abs_data_path
m['createTime'] = utl.get_ctime(abs_data_path)
m['commitTime'] = commitTime
m['title'] = extractor.get_title(abs_data_path, m['mimeType'])
if not m['url'] in m['path']:
print('WARNING, url not in path!')
return m
|
998,080 | 2a3a6197db1ac2ac235dd5700e6c8dfde9fc63f8 | from itertools import product
import en_core_web_md
from collections import defaultdict
from tqdm import tqdm
from ent_extractor import EntitiesExtraction
from common import RELATION, TEXT, PERSON, ORG, is_the_same
class RelationSentence:
def __init__(self, idx, text, analyzed, entities):
self.idx = idx
self.text = text
self.analyzed = analyzed
self.entities = entities
class RelationSentenceBuilder:
def __init__(self):
self.nlp = en_core_web_md.load()
self.ent_extractor = EntitiesExtraction()
def build_relation_sent(self, idx, sent):
text = self.clean_sent(sent)
analyzed = self.nlp(text)
entities = self.ent_extractor.extract(analyzed)
return RelationSentence(idx, text, analyzed, entities)
@staticmethod
def clean_sent(sent):
return sent.replace("-LRB-", "(").replace("-RRB-", ")").replace("-LCB-", "").strip("()\n ")
class ProcessCorpusData:
def __init__(self, path):
self.i2sentence = self.process_data(path)
def process_data(self, path):
relation_builder = RelationSentenceBuilder()
i2relations = defaultdict(list)
with open(path) as f:
lines = f.readlines()
sentences = {}
for line in tqdm(lines):
idx, sentence = line.split("\t")
if idx not in i2relations:
sentences[idx] = relation_builder.build_relation_sent(idx, sentence)
return sentences
def get_op_relations(self):
relations = []
for sentence in self.i2sentence.values():
for person, org in list(product(sentence.entities[PERSON], sentence.entities[ORG])):
relations.append((sentence.idx, person, org, sentence.text))
return relations
class ProcessAnnotatedData:
def __init__(self, path):
self.i2sentence, self.i2relations = self.process_data(path)
def process_data(self, path):
relation_builder = RelationSentenceBuilder()
i2relations = defaultdict(list)
with open(path) as f:
lines = f.readlines()
sentences = {}
for line in tqdm(lines):
idx, arg0, relation, arg1, sentence = line.split("\t")
if idx not in i2relations:
sentences[idx] = relation_builder.build_relation_sent(idx, sentence)
i2relations[idx].append((arg0, relation, arg1))
return sentences, i2relations
def get_relations_tag(self):
relations = []
labels = []
for sentence in self.i2sentence.values():
for gold_person, gold_rel, gold_org in self.i2relations[sentence.idx]:
for person, org in list(product(sentence.entities[PERSON], sentence.entities[ORG])):
relations.append((sentence.idx, person, org, sentence.text))
if self.is_relation_pos(gold_rel, person, org, gold_person, gold_org):
labels.append('1')
else:
labels.append('0')
return relations, labels
def is_relation_pos(self, rel, person, org, arg0, arg1):
if rel == RELATION and is_the_same(person[TEXT], arg0) and is_the_same(org[TEXT], arg1):
return True
return False
if __name__ == '__main__':
RelationSentenceBuilder().build_relation_sent("b", "( The Environmental Protection Agency is working on a brochure advising diabetics of the best way to dispose of needles , said Robin Woods , a spokeswoman for the agency in Washington , D.C. It will suggest that diabetics place ` ` sharps ' ' _ the needle end of a syringe _ in a hard plastic or metal container like a coffee can before throwing it in the trash , Woods said. )") |
998,081 | 6800e14558f9f0fabc561e556b5c06ab2159bbce | from save_beer import save_beer
from lookup_beer import lookup_beer |
998,082 | 18e003d7b3f182430fa9a177be4a82f9e6aa1247 | """
Flask-FIDO-U2F
-------------
Flask plugin to simplify usage and management of U2F devices.
"""
from setuptools import setup
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
# https://pythonhosted.org/an_example_pypi_project/setuptools.html
def read(filename):
with open(filename) as r:
return r.read()
setup(
name = 'Flask-FIDO-U2F',
version = '0.4.4',
url = 'https://github.com/herrjemand/flask-fido-u2f',
license = 'MIT',
author = 'Ackermann Yuriy',
author_email = 'ackermann.yuriy@gmail.com',
description = 'A Flask plugin that adds FIDO U2F support.',
long_description = read('README.md'),
keywords = 'flask fido u2f 2fa',
py_modules = ['flask_fido_u2f'],
zip_safe = True,
test_suite = 'test',
tests_require = [],
include_package_data = True,
platforms = 'any',
install_requires = [
'Flask',
'python-u2flib-server'
],
classifiers = [
'Environment :: Web Environment',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
# Needs python 2.x testing
# 'Programming Language :: Python :: 2',
# 'Programming Language :: Python :: 2.6',
# 'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Internet',
'Topic :: Security :: Cryptography',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) |
998,083 | 21ef83dc8a63c36f464ad95ee6dcb69bacf05c99 | from django.db import models
class Company(models.Model):
name = models.CharField(max_length=200)
category = models.CharField(max_length=100)
def __str__(self):
return self.name
class Meta:
verbose_name_plural = "Companies"
class Ad(models.Model):
Company = models.ForeignKey(Company,on_delete=models.CASCADE)
ad_path = models.CharField(max_length=200)
def __str__(self):
return self.ad_path
class Meta:
verbose_name_plural = "Ads"
class Click(models.Model):
user_id = models.IntegerField()
company_id = models.IntegerField()
ad_id = models.IntegerField()
date_click = models.DateTimeField(verbose_name="date_click",auto_now_add=True)
def __str__(self):
return "{}|{}|{}".format(self.user_id,self.company_id,self.ad_id)
class Meta:
verbose_name_plural = "Clicks"
|
998,084 | ec865eb3d583fbf2c67725fbe7b978d890a4ea25 | from . import views
from django.urls import path
from django.conf.urls import url, include
from django.views.decorators.csrf import csrf_exempt
from django.views import static
urlpatterns = [
path('', views.PostList.as_view(), name='home'),
path('post/<slug:slug>/', views.PostDetail.as_view(), name='post_detail'),
path('add_post/', views.AddPostView.as_view(), name='add_post'),
path('article/edit/<slug:slug>', views.UpdatePostView.as_view(), name='update_post'),
path('article/delete/<slug:slug>', views.DeletePostView.as_view(), name='delete_post'),
path('like/<slug:slug>', views.LikeView, name='like_post'),
path('<int:pk>/profile/', views.ProfileView.as_view(), name='profile'),
path('<int:id>/edit_account/', views.edit_account, name='edit_account'),
url(r'analytics/$', csrf_exempt(views.store_coordinates), name='store_coordinates'),
url(r'analytics/heatmap/(?P<id>(\w*-*\w*))/$', views.generate_heatmap, name='generate_heatmap'),
url(r'analytics/elements/(?P<id>\d{1,6})/$', views.retrieve_html_elements, name='retrieve_html_elements'),
url(r'analytics/page/(?P<id>\d{1,6})/$', views.retrieve_heatmap_information, name='retrieve_heatmap_information'),
url(r'analytics/html/(?P<path>.*)$', static.serve, {'document_root': '.'})
] |
998,085 | 770def2dc19cf2664e0e5639ad53e0293fddee54 | a, b, c, d = input().split()
a = int(a + b)
c = int(c + d)
print(a + c) |
998,086 | 54384c8ed8215ee18956c722420982004a632337 | import subprocess
import os
import pickle
import re
import shutil
import tempfile
from game_controller import GameController
from textwrap import wrap
from PIL import Image, ImageDraw, ImageFont
from math import log10
from fnmatch import fnmatch
def get_image_format(data):
'''Gets image format from its data (bytearray).'''
# Source: getimageinfo.py (from code.google.com)
if data[:3] == b'GIF':
return '.gif'
elif data.startswith(b'\211PNG\r\n\032\n'):
return '.png'
elif data.startswith(b'\377\330'):
return '.jpeg'
class NoJuryStatesException(Exception):
'''Thrown if no jury states found.'''
pass
class VideoVisualizer:
'''Composes video file from game data.'''
def __init__(self, _framerate, _painter_obj, _file_mask, _working_dir='.',
_silent=False, _table_drawer=None):
'''
Constructor. Parameters:
* _framerate - framerate of video (max=24)
* _painter_obj - painter class
* _file_mask - file mask of GameController files (regular
expression)
* _working_dir - directory with GameController files
* _silent - if True, compiler will write nothing to screen
'''
self.painter = _painter_obj
self.table_drawer = _table_drawer
self.file_mask = _file_mask
self.working_dir = _working_dir
self.framerate = _framerate
self.inframe = int(48.0 / _framerate)
self.file_list = None
# Image filemask given to ffmpeg
self.imagefile_name = None
# Image size
self.size = None
self._paths = [os.path.abspath('.'), tempfile.mkdtemp()]
self._frame_count = 0
self.log = not _silent
self._tempfiles = []
self.ext = None
self.mode = None
def _create_tempfile(self, suffix=''):
self._tempfiles.append(tempfile.mkstemp(suffix))
return self._tempfiles[-1]
def _change_path(self, num):
if num == 1 and os.path.abspath('.') != self._paths[1]:
self._paths[0] = os.path.abspath('.')
os.chdir(self._paths[num])
def _create_frame(self, fname, number):
self._change_path(1)
begname = '{:09d}'.format(self._frame_count) + self.ext
shutil.copyfile(fname[1], begname)
for loop in range(number * self.inframe - 1):
os.link(begname, '{:09d}'.format(self._frame_count +
loop + 1) + self.ext)
self._frame_count += self.inframe * number
self._change_path(0)
os.close(fname[0])
os.remove(fname[1])
def _get_game_controller(self, filename):
'''Unpickles a game controller.'''
with open(filename, 'rb') as file:
return pickle.load(file)
def _generate_game_images(self, controller):
'''Generates frames for video.'''
if len(controller.jury_states) == 0:
if self.ext is None:
raise NoJuryStatesException('First GameController contains no '
'jury states - cannot visualize '
'an empty game.')
else:
print('One of GameControllers consists no jury states - the '
'game will be empty.')
# We need filenames with leading zeroes for ffmpeg
zero_count = int(log10(len(controller.jury_states)) + 1)
file_list = []
painter = self.painter(controller._players)
for ind, jstate in enumerate(controller.jury_states):
if self.log:
print(chr(13) + ' Generating game images... {}/{}'.format(
ind + 1, len(controller.jury_states)),
end=''
)
image = painter.paint(jstate)
self.ext = self.ext or get_image_format(image)
file_list.append(self._create_tempfile(self.ext))
with open(file_list[-1][1], 'wb') as f:
f.write(image)
if self.size is None or self.mode is None:
im = Image.open(file_list[-1][1])
self.size = im.size
self.mode = im.mode
return file_list
def _draw_tournament_status(self, round_id):
if self.table_drawer is not None:
tfile = self._create_tempfile(self.ext)
with open(tfile[1], 'wb') as f:
imagew = self.table_drawer(
os.path.join(self.working_dir, 'tournament.data'),
round_id,
self.mode,
self.ext
)
f.write(imagew)
im = Image.open(tfile[1])
ratio = min(self.size[0] / im.size[0], self.size[1] / im.size[1])
ratio_turned = min(self.size[1] / im.size[0],
self.size[0] / im.size[1])
if ratio_turned > ratio:
im = im.transpose(Image.ROTATE_270)
im = im.resize(
(
int(im.size[0] * ratio_turned),
int(im.size[1] * ratio_turned)
),
Image.ANTIALIAS
)
else:
im = im.resize((int(im.size[0] * ratio),
int(im.size[1] * ratio)), Image.ANTIALIAS)
res = Image.new(self.mode, self.size, 'white')
res.paste(im, ((self.size[0] - im.size[0]) // 2,
(self.size[1] - im.size[1]) // 2,
(self.size[0] + im.size[0]) // 2,
(self.size[1] + im.size[1]) // 2))
resfile = self._create_tempfile(self.ext)
res.save(resfile[1], self.ext[1:])
del im
os.close(tfile[0])
os.remove(tfile[1])
self._create_frame(resfile, 2 * self.framerate)
def generate_tournament_status(self, contr):
'''Generates a frame with a tournament status.'''
temptitle = self._create_tempfile(self.ext)
# Text displayed on the frame:
info = (wrap('Tournament: ' + str(contr.signature.tournament_id),
width=40) +
wrap('Round: ' + str(contr.signature.round_id) + (
'' if contr.signature.round_name is None else (
'(' + contr.signature.round_name + ')')), width=40) +
wrap('Series: ' + str(contr.signature.series_id),
width=40) +
wrap('Game: ' + str(contr.signature.game_id), width=40) +
[''] +
wrap('Players: ' + ', '.join(
map(
lambda x: x.bot_name + ' by ' + x.author_name,
contr._players
)
), width=40)
)
im = Image.new(self.mode, self.size, 'blue')
draw = ImageDraw.Draw(im)
cfsize = 100
done_once = False
# Here we should find the best fitting font size.
self._change_path(0)
while True:
font = ImageFont.truetype(os.path.join('fonts',
'Lucida Console.ttf'), cfsize,
encoding='unic')
textlen = max(map(lambda x: font.getsize(x)[0], info))
textheight = (font.getsize('T')[1] + 1) * len(info)
if (textlen < self.size[0] - 10 and textheight < self.size[1] - 10
and done_once):
break
done_once = True
cfsize = min(cfsize - 1, int(cfsize * min((self.size[0] - 10)
/ textlen, (self.size[1] - 10) / textheight)))
# Distance between corners of texts, starting position:
dy = font.getsize('T')[1] + 1
y = (self.size[1] - dy * len(info)) / 2
# Finally, we draw it:
for line in info:
width = font.getsize(line)[0]
draw.text(((self.size[0] - width) / 2, y), line, font=font,
fill=(255, 255, 255))
y += dy
im.save(temptitle[1])
# Compiling a video file:
self._create_frame(temptitle, 2 * self.framerate)
def compile(self, output_name):
'''
Compiles all games given by the specified filemask into one video file.
The file will be saved into the log folder.
'''
controllers = []
for filename in os.listdir(self.working_dir):
if fnmatch(filename, self.file_mask):
controllers.append(self._get_game_controller(os.path.join(
self.working_dir, filename)))
# The games should be given in the right order:
controllers = list(sorted(controllers))
vfile_list = []
prev_rnd = controllers[0].signature.round_id
for ind, controller in enumerate(controllers):
if self.log:
print('Processing game {}:{}:{}:{} ({} of {}):'.format(
controller.signature.tournament_id,
controller.signature.round_id,
controller.signature.series_id,
controller.signature.game_id, ind + 1, len(controllers)))
t = self._generate_game_images(controller)
if controller.signature.round_id > prev_rnd:
prev_rnd = controller.signature.round_id
self._draw_tournament_status(prev_rnd)
self.generate_tournament_status(controller)
if self.log:
print('\n Creating frames...')
for fname in t:
self._create_frame(fname, 1)
self._draw_tournament_status(prev_rnd + 1)
self._change_path(1)
print('Compiling the video file...')
try:
with open(os.devnull, 'w') as fnull:
subprocess.Popen(
'ffmpeg -i %09d{} -r 48 -s {}x{} {}'.format(
self.ext,
self.size[0],
self.size[1],
output_name
).split(),
stderr=fnull,
stdin=subprocess.PIPE).communicate('y\n'.encode() * 10)
self._change_path(0)
shutil.copyfile(os.path.join(self._paths[1], output_name),
os.path.join(self.working_dir, output_name))
except FileNotFoundError:
raise FileNotFoundError('You need to install ffmpeg to create'
' videos.')
print('Compiling finished.')
def __del__(self):
if self.log:
print('Cleaning up...')
for fname in self._tempfiles:
if os.path.exists(fname[1]):
os.close(fname[0])
os.remove(fname[1])
shutil.rmtree(self._paths[1])
|
998,087 | 9ae7e6d766060d900331780f18d9220631bd2cfe | import sys
import collections
import sklearn.naive_bayes
import sklearn.linear_model
import nltk
import random
random.seed(0)
from gensim.models.doc2vec import LabeledSentence, Doc2Vec
nltk.download("stopwords")
# Download the stop words from nltk
# User input path to the train-pos.txt, train-neg.txt, test-pos.txt, and test-neg.txt datasets
if len(sys.argv) != 3:
print "python sentiment.py <path_to_data> <0|1>"
print "0 = NLP, 1 = Doc2Vec"
exit(1)
path_to_data = sys.argv[1]
method = int(sys.argv[2])
def main():
train_pos, train_neg, test_pos, test_neg = load_data(path_to_data)
if method == 0:
train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec = feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg)
nb_model, lr_model = build_models_NLP(train_pos_vec, train_neg_vec)
if method == 1:
train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec = feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg)
nb_model, lr_model = build_models_DOC(train_pos_vec, train_neg_vec)
print "Naive Bayes"
print "-----------"
evaluate_model(nb_model, test_pos_vec, test_neg_vec, True)
print ""
print "Logistic Regression"
print "-------------------"
evaluate_model(lr_model, test_pos_vec, test_neg_vec, True)
def load_data(path_to_dir):
"""
Loads the train and test set into four different lists.
"""
train_pos = []
train_neg = []
test_pos = []
test_neg = []
with open(path_to_dir+"train-pos.txt", "r") as f:
for i,line in enumerate(f):
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_pos.append(words)
with open(path_to_dir+"train-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
train_neg.append(words)
with open(path_to_dir+"test-pos.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_pos.append(words)
with open(path_to_dir+"test-neg.txt", "r") as f:
for line in f:
words = [w.lower() for w in line.strip().split() if len(w)>=3]
test_neg.append(words)
return train_pos, train_neg, test_pos, test_neg
def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):
"""
Returns the feature vectors for all text in the train and test datasets.
"""
# English stopwords from nltk
stopwords = set(nltk.corpus.stopwords.words('english'))
# Determine a list of words that will be used as features.
# This list should have the following properties:
# (1) Contains no stop words
# (2) Is in at least 1% of the positive texts or 1% of the negative texts
# (3) Is in at least twice as many postive texts as negative texts, or vice-versa.
# YOUR CODE HERE
pos_unique_words = []
neg_unique_words = []
intermediate_vec = []
feature_vec = []
for line in train_pos:
line = list(set(line))
for word in line:
if word not in stopwords:
pos_unique_words.append(word)
for line in train_neg:
line = list(set(line))
for word in line:
if word not in stopwords:
neg_unique_words.append(word)
pos_word_dict = collections.Counter(pos_unique_words)
neg_word_dict = collections.Counter(neg_unique_words)
unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))
for word in unique_words:
if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):
intermediate_vec.append(word)
for word in intermediate_vec:
if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):
feature_vec.append(word)
train_pos_vec = []
train_neg_vec = []
test_pos_vec = []
test_neg_vec = []
# Using the above words as features, construct binary vectors for each text in the training and test set.
# These should be python lists containing 0 and 1 integers.
# YOUR CODE HERE
for line in train_pos:
lst = []
for word in feature_vec:
if word in line:
lst.append(1)
else:
lst.append(0)
train_pos_vec.append(lst)
for line in train_neg:
lst = []
for word in feature_vec:
if word in line:
lst.append(1)
else:
lst.append(0)
train_neg_vec.append(lst)
for line in test_pos:
lst = []
for word in feature_vec:
if word in line:
lst.append(1)
else:
lst.append(0)
test_pos_vec.append(lst)
for line in test_neg:
lst = []
for word in feature_vec:
if word in line:
lst.append(1)
else:
lst.append(0)
test_neg_vec.append(lst)
# Return the four feature vectors
return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec
def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):
"""
Returns the feature vectors for all text in the train and test datasets.
"""
# Doc2Vec requires LabeledSentence objects as input.
# Turn the datasets from lists of words to lists of LabeledSentence objects.
# YOUR CODE HERE
labeled_train_pos = []
labeled_train_neg = []
labeled_test_pos = []
labeled_test_neg = []
i = 0
for line in train_pos:
labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))
i += 1
i = 0
for line in train_neg:
labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))
i += 1
i = 0
for line in test_pos:
labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))
i += 1
i = 0
for line in test_neg:
labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))
i += 1
# Initialize model
model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)
sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg
model.build_vocab(sentences)
# Train the model
# This may take a bit to run
for i in range(5):
print "Training iteration %d" % (i)
random.shuffle(sentences)
model.train(sentences)
# Use the docvecs function to extract the feature vectors for the training and test data
# YOUR CODE HERE
train_pos_vec = []
train_neg_vec = []
test_pos_vec = []
test_neg_vec = []
for j in range(len(train_pos)):
train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])
for j in range(len(train_neg)):
train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])
for j in range(len(test_pos)):
test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])
for j in range(len(test_neg)):
test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])
# Return the four feature vectors
return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec
def build_models_NLP(train_pos_vec, train_neg_vec):
"""
Returns a BernoulliNB and LosticRegression Model that are fit to the training data.
"""
Y = ["pos"]*len(train_pos_vec) + ["neg"]*len(train_neg_vec)
# Use sklearn's BernoulliNB and LogisticRegression functions to fit two models to the training data.
# For BernoulliNB, use alpha=1.0 and binarize=None
# For LogisticRegression, pass no parameters
# YOUR CODE HERE
from sklearn import linear_model,naive_bayes
lr = linear_model.LogisticRegression()
lr_model = lr.fit(train_pos_vec + train_neg_vec, Y)
nb = naive_bayes.BernoulliNB(alpha=1.0, binarize=None)
nb_model = nb.fit(train_pos_vec + train_neg_vec, Y)
return nb_model, lr_model
def build_models_DOC(train_pos_vec, train_neg_vec):
"""
Returns a GaussianNB and LosticRegression Model that are fit to the training data.
"""
Y = ["pos"]*len(train_pos_vec) + ["neg"]*len(train_neg_vec)
# Use sklearn's GaussianNB and LogisticRegression functions to fit two models to the training data.
# For LogisticRegression, pass no parameters
# YOUR CODE HERE
from sklearn import linear_model,naive_bayes
lr = linear_model.LogisticRegression()
lr_model = lr.fit(train_pos_vec + train_neg_vec, Y)
gnb = naive_bayes.GaussianNB()
gnb_model = gnb.fit(train_pos_vec + train_neg_vec, Y)
return gnb_model, lr_model
def evaluate_model(model, test_pos_vec, test_neg_vec, print_confusion=False):
"""
Prints the confusion matrix and accuracy of the model.
"""
# Use the predict function and calculate the true/false positives and true/false negative.
# YOUR CODE HERE
from collections import Counter
positive_pred = Counter(model.predict(test_pos_vec))
negative_pred = Counter(model.predict(test_neg_vec))
tp = positive_pred['pos']
fn = positive_pred['neg']
tn = negative_pred['neg']
fp = negative_pred['pos']
accuracy = float(tp + tn) / float(tp + tn + fp + fn)
if print_confusion:
print "predicted:\tpos\tneg"
print "actual:"
print "pos\t\t%d\t%d" % (tp, fn)
print "neg\t\t%d\t%d" % (fp, tn)
print "accuracy: %f" % accuracy
if __name__ == "__main__":
main() |
998,088 | e5a950490c17edacdcdb327ed7d220de3af6b5d5 | import numpy as np
import matplotlib.pylab as plt
import sys
sys.path.append('../zdrojaky')
from tsplot import tsplot
from nig import NiG
from scipy.io import loadmat
datafile = loadmat('/tmp/Gi.mat')
rolling_data = datafile['CDsel']
start = 300
end = 850
data = rolling_data[start:end,10]
ndat = data.size
tsplot(data)
ARorder = 2
xi0 = np.eye(ARorder+2) * 0.1
xi0[0,0] = 0.01
nu0 = 10
ar = NiG(xi0, nu0)
pred = []
for t in range(ndat):
if t < ARorder or t == ndat:
pred.append(0)
continue
data_for_pred = np.flip(data[t-ARorder+1:t+1], 0)
pred.append(np.dot(ar.beta_hat, np.insert(data_for_pred, 0, 1)))
regressor = np.insert(np.flip(data[t-ARorder:t],0), 0, 1)
xt = data[t]
ar.update(xt, regressor)
ar.log()
#%%
Ebeta_log = np.array(ar.Ebeta_log)
varbeta_log = np.array(ar.var_beta_log)
Esigma2_log = np.array(ar.Esigma2_log)
varsigma2_log = np.array(ar.var_sigma2_log)
pred = np.array(pred)
#%%
plt.figure(figsize=(14, 2*ARorder))
for i in range(ARorder+1):
plt.subplot(ARorder+1, 1, i+1)
plt.plot(Ebeta_log[:,i])
plt.plot(Ebeta_log[:,i] + 3*np.sqrt(varbeta_log[:,i]), 'gray')
plt.plot(Ebeta_log[:,i] - 3*np.sqrt(varbeta_log[:,i]), 'gray')
plt.show()
#%%
plt.figure(figsize=(14,3))
plt.plot(Esigma2_log)
plt.plot(Esigma2_log - 3*np.sqrt(varsigma2_log))
plt.plot(Esigma2_log + 3*np.sqrt(varsigma2_log))
#%%
plt.figure(figsize=(14, 4))
plt.plot(data)
plt.plot(pred, '+')
plt.show()
#%%
residues = pred[ARorder+1:] - data[ARorder+1:]
print("RMSE: ", np.sqrt(np.mean(residues**2)))
plt.hist(residues, bins=15) |
998,089 | 192d7d4e1ac871b10995323768df28c131dfb936 | import argparse
import logging
import os
from . import __version__, DEFAULT_PAGINATE_BY
from .main import do_everything
def main():
# Setup command line option parser
parser = argparse.ArgumentParser(
description='Parametric modeling of buckling and free vibration in '\
'prismatic shell structures, performed by solving the '\
'eigenvalue problem in HCFSM.'
)
parser.add_argument(
'data_file',
help="Data file describing the parametric model, please see "\
"'examples/data-files/barbero-viscoelastic.yaml' for an example"
)
parser.add_argument(
'-r',
'--results-file',
metavar='FILENAME',
help="Store results to the selected FILENAME, uses '<data_file>.hdf5' by default"
)
parser.add_argument(
'-d',
'--purge-integral-db-cache',
action='store_true',
help='Purge the integral db cache, forcing it to redownload'
)
parser.add_argument(
'-p',
'--paginate-by',
metavar='NUM',
type=int,
default=DEFAULT_PAGINATE_BY,
help="Show progress every NUM iterations, %d by default" % DEFAULT_PAGINATE_BY
)
parser.add_argument(
'-q',
'--quiet',
action='store_const',
const=logging.WARN,
dest='verbosity',
help='Be quiet, show only warnings and errors'
)
parser.add_argument(
'-v',
'--verbose',
action='store_const',
const=logging.DEBUG,
dest='verbosity',
help='Be very verbose, show debug information'
)
parser.add_argument(
'--version',
action='version',
version="%(prog)s " + __version__
)
args = parser.parse_args()
# Configure logging
log_level = args.verbosity or logging.INFO
logging.basicConfig(level=log_level, format="%(asctime)s [%(levelname)s] %(message)s")
if not args.results_file:
args.results_file = os.path.splitext(args.data_file)[0] + '.hdf5'
do_everything(
data_file=args.data_file,
results_file=args.results_file,
purge_integral_db_cache=args.purge_integral_db_cache,
paginate_by=args.paginate_by,
)
if __name__ == '__main__':
main()
|
998,090 | da6696aec3fbec14780a784de21b57fde95d5f13 | """
Refer to https://github.com/BramVanroy/bert-for-inference/blob/master/introduction-to-bert.ipynb for a quick intro
Code credits to: Bram Vanroy
"""
from transformers import BertModel, BertTokenizer
import torch
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Convert the string "granola bars" to tokenized vocabulary IDs
granola_ids = tokenizer.encode('granola bars', return_tensors='pt')
# Print the IDs
print('granola_ids', granola_ids)
print('type of granola_ids', type(granola_ids))
# Convert the IDs to the actual vocabulary item
# Notice how the subword unit (suffix) starts with "##" to indicate
# that it is part of the previous string
print('granola_tokens', tokenizer.convert_ids_to_tokens(granola_ids[0]))
model = BertModel.from_pretrained('bert-base-uncased', output_hidden_states=True)
# Set the device to GPU (cuda) if available, otherwise stick with CPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
model = model.to(device)
granola_ids = granola_ids.to(device)
model.eval()
print(granola_ids.size())
# unsqueeze IDs to get batch size of 1 as added dimension
# granola_ids = granola_ids.unsqueeze(0)
# print(granola_ids.size())
print(type(granola_ids))
with torch.no_grad():
out = model(input_ids=granola_ids)
# the output is a tuple
print(type(out))
# the tuple contains three elements as explained above)
print(len(out))
# we only want the hidden_states
hidden_states = out[2]
print(len(hidden_states))
print(hidden_states[-1].shape)
sentence_embedding = torch.mean(hidden_states[-1], dim=1).squeeze()
print(sentence_embedding)
print(sentence_embedding.size())
# get last four layers
last_four_layers = [hidden_states[i] for i in (-1, -2, -3, -4)]
# cast layers to a tuple and concatenate over the last dimension
cat_hidden_states = torch.cat(tuple(last_four_layers), dim=-1)
print(cat_hidden_states.size())
# take the mean of the concatenated vector over the token dimension
cat_sentence_embedding = torch.mean(cat_hidden_states, dim=1).squeeze()
# print(cat_sentence_embedding)
print(cat_sentence_embedding.size()) |
998,091 | 912b67e4021c1ec05471437069ec0fea4a8a0a9c | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-12-26 09:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('cats', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='viewlog',
options={'verbose_name': 'view log', 'verbose_name_plural': 'view log'},
),
migrations.AlterModelTable(
name='viewlog',
table='cats_view_log',
),
]
|
998,092 | 234f58be7cfcc6260500903dccdf4c244908bb5b | #!/usr/bin/env python
from scipy.sparse import coo_matrix
class Matrix:
"""A sparse matrix class (indexed from zero). Replace with NumPy arrays."""
def __init__(self, matrix=None, size=None):
"""Size is a tuple (m,n) representing m rows and n columns."""
if matrix is None:
self.data = {}
if size is None:
self.column_count = 0
self.row_count = 0
self.size = (self.row_count, self.column_count)
else:
self.row_count, self.column_count = size[:2]
self.size = size
else:
"""Initialize to be a clone of the given matrix."""
self.column_count = matrix.column_count
self.data = matrix.data
self.row_count = matrix.row_count
self.size = matrix.size
def get(self, subscript):
"""Return the matrix element indexed by the given (valid) subscript."""
row, column = subscript[:2]
if self.__is_valid(subscript):
# Get the value if it's present, else a zero.
result = self.data.get(subscript, 0)
else:
raise IndexError
return result
def set(self, subscript, value):
"""Set the matrix element indexed by the given (valid) subscript."""
if value != 0 and self.__is_valid(subscript):
self.data[subscript] = value
else:
raise IndexError
def __is_valid(self, subscript):
"""Return whether the given subscript is within the matrix's bounds."""
return ((0,0) <= subscript and subscript < self.size)
def __is_valid_row(self, row_number):
"""Return whether the given row is within the matrix's bounds."""
return self.__is_valid((row_number, 0))
def __str__(self):
"""Return a NumPy-like matrix representation."""
result = ""
for row in range(self.size):
current = []
for column in range(self.size):
subscript = (row, column)
current.append(self.get(subscript))
if result == "":
result = "[{}".format(current)
else:
result = "{0}\n {1}".format(result, current)
return "{}]".format(result)
def extend_columns(self, matrix):
raise NotImplementedError
def extend_rows(self, matrix):
"""Extend the current matrix with the given matrix."""
row_count, column_count = matrix.size[:2]
if column_count != self.column_count:
raise ValueError
self.row_count += row_count
self.size = (self.row_count, self.column_count)
base_row_count = self.row_count
for key, value in matrix.data.items():
row, column = key[:2]
self.set((base_row_count + row, column), value)
return self
def replace_row(self, row_number, vector):
"""Replace the specified row with the given vector."""
if not self.__is_valid_row(row_number):
raise ValueError
row_count, column_count = vector.size[:2]
if row_count != 1 and column_count != 1:
raise ValueError
# Eliminate current row entries.
for col in [col for (row, col) in self.data.items()
if row == row_number]:
self.data.pop(row_number, col)
# Update row with vector elements.
if row_count == 1:
new_row = vector.transpose()
else:
new_row = vector
for key, value in new_row.data.items():
row, _ = key[:2]
self.set((row_number, row), value)
return self
def submatrix(self, row_set, column_set):
"""Return a submatrix with the given rows and columns."""
submatrix = Matrix(len(row_set), len(column_set))
raise NotImplementedError
def to_vec(self):
"""Return an m*n length vector comprising all the matrix's columns."""
column_count = self.column_count
vector = Matrix(size=(self.row_count * column_count, 1))
for key, value in self.data.items():
row, column = key[:2]
subscript = (column * column_count + row, 0)
vector.set(subscript, value)
return vector
def to_ijv(self):
"""Return the matrix in ijv (triplet) array format."""
row_indices = []
column_indices = []
nonzero_elements = []
k = 0
for key, value in self.data.items():
if value == 0:
continue
row, col = key[:2]
row_indices.append(row)
column_indices.append(col)
nonzero_elements.append(value)
k += 1
return row_indices, column_indices, nonzero_elements
def to_coo_matrix(self):
"""Return the matrix in COOrdinate format."""
row_indices, column_indices, nonzero_elements = self.to_ijv()
return coo_matrix((nonzero_elements, (row_indices, column_indices)),
shape=(self.size, self.size))
def transpose(self):
"""Transpose the matrix."""
m, n = self.size[:2]
transposed_size = (n, m)
transposed_matrix = {}
for key, value in matrix.data.items():
i, j = key[:2]
transposed_key = (j, i)
transposed_matrix[transposed_key] = value
self.matrix = transposed_matrix
self.size = transposed_size
|
998,093 | 948b64a3f9226b0f27cffc0476e68e8116e7f438 |
"""
prompt user to input number as of todays toppings
set factn as 1
if n is < 0 print "It is not possible to have minus toppings."
else for i in range 1 to n plus 1
factn equals factn multiplied by i
print "Factorial of", number, "is", factn" to show how program works
prompt user to input number as of stanard toppings
set factk as 1
if k is < 0 print "It is not possible to have minus toppings."
else for i in range 1 to k plus 1
factk equals factk multiplied by i
print "Factorial of", number, "is", factk" to show how program works
set factnk as 1
if number n and k < 0
else for i in range 1 to nk plus 1
factnk equals factnk multiplied by i
set below as factk multiplied by factnk
set combo as factn divided by below
print The possible number of toppings today is: ", combo
"""
n = int(input("How many toppings on the pizza today? \n"))
factn = 1
if n < 0:
print("It is not possible to have minus toppings.")
else:
for i in range(1, n + 1):
factn *=i
print("Factorial of", n, "is", factn)
k = int(input("How many standard toppings on the pizza? \n"))
factk = 1
if k < 0:
print("It is not possible to have minus toppings.")
else:
for i in range(1, k + 1):
factk *=i
print("Factorial of", k, "is", factk)
factnk = 1
if n < 0 and k < 0:
print()
else:
nk = n-k
for i in range(1, nk + 1):
factnk *=i
below = factk * factnk
combo = factn / below
print("The possible number of toppings today is: ", combo)
|
998,094 | 952b76bdbda889a5f48bd954b3cb1806cefc89c1 | # Created by Qixun Qu
# quqixun@gmail.com
# 2017/04/03
#
# This script provides a test on generating
# SIFT descriptors on images which consist
# of digits. This script and relative
# functions are tested under Python 3.5.2.
import numpy as np
import generate_SIFT as gs
import classify_SIFT as cs
# -------------------------------------------------
# Stage 1: Test some functions using a simple case
# -------------------------------------------------
# Generate a simple image to test
simple_test_img = np.arange(start=11, stop=101).reshape([9, 10]).T
# simple_test_img = np.random.normal(size=[9, 10]).T
# Get a patch from test image
test_patch = gs.get_patch(simple_test_img, 5, 5, 3)
# Filtering the test patch with gaussian filter
sigma = 3.0
test_patch_filt = gs.gaussian_filter(test_patch, sigma)
# Compute the gradient of test patch in x ang y direction
patch_grad_x, patch_grad_y = gs.gaussian_gradients(test_patch, sigma)
# Plot gradients on test patch
# gs.plot_gradients(test_patch, patch_grad_x, patch_grad_y)
# Compute the gradient histogram of test patch
histogram = gs.gradient_histogram(patch_grad_x, patch_grad_y)
# Plot bouqute of gradients histogram
# gs.plot_bouqute(histogram)
# -------------------------------------------------------------
# Stage 2: Test some functions using one image which has digit
# -------------------------------------------------------------
# The image set in digits,mat has 100 training images and
# 50 validation images, all images are in grayscale and lack of
# SIFT descriptors, the scale of each image is 39 by 39
train_set, validate_set = gs.read_data('Data/digits.mat')
# Set the index of an image in training set, this image is used
# int next few steps, the range of idx is from 1 to 100
idx = 38
# Extract the image from training set
train_img = train_set[0, idx - 1][0]
# Set the position of the train image's centre
# Set the scale of the intersted patch that needs to be processed
# in this case, the entire training image is interested patch
scale = 39
position = np.array([[20, 20]])
obj_pos = np.array([[position, scale]])
# Plot nine grids on training image
# gs.plot_grides(train_img, obj_pos)
# Calculate the descriptor of this training image
desc = gs.gradient_descriptor(train_img, obj_pos)
# ----------------------------------------------
# Stage 3: Classification all validation images
# ----------------------------------------------
# Compute descriptors for all training images
train_set_desc, train_labels = cs.prepare_digits(train_set, obj_pos)
# print(train_labels)
# Validate one digit image, get the label for this image
# and display classification result
label = cs.classify_digit(validate_set[0, idx - 1][0],
train_set_desc, train_labels, obj_pos)
print("Validate one digit - the NO.{} validation image".format(idx - 1))
print("The label of this image is {}, it should be {}.\n".format(
label, train_labels[0, idx]))
# Validate all images and show classification result
cs.classify_all_digit(validate_set, train_set_desc, train_labels, obj_pos)
|
998,095 | 7aa8b6334639369c87f6d24690121404a99cbf02 | import paho.mqtt.client as mqtt
MQTT_HOST="172.20.0.2"
MQTT_PORT=1883
MQTT_TOPIC="hw3"
CLOUD_MQTT_HOST="169.61.16.130"
CLOUD_MQTT_PORT=1883
CLOUD_MQTT_TOPIC="facedetection"
def on_connect(client, userdata, flags, rc):
print("Connected to jetson")
def on_connect_cloud(client, userdata, flags, rc):
print("Connected to cloud")
def on_message(client, userdata, msg):
print("on message received")
cloudmqttclient.publish(CLOUD_MQTT_TOPIC,payload=msg.payload,qos=2,retain=False)
cloudmqttclient = mqtt.Client()
cloudmqttclient.connect(CLOUD_MQTT_HOST,CLOUD_MQTT_PORT,60)
cloudmqttclient.on_connect = on_connect_cloud
mqttclient = mqtt.Client()
mqttclient.on_connect = on_connect
mqttclient.on_message = on_message
mqttclient.connect(MQTT_HOST,MQTT_PORT,60)
mqttclient.subscribe(MQTT_TOPIC, qos=2)
mqttclient.loop_forever() # Start networking daemon
|
998,096 | d016d4ec47ed5793c50d425593ecbd10a9645ca4 | #!/usr/bin/env python
"""cardapios_spider.py: Very simple spider to crawl through the
online menus of the University of São Paulo :D ."""
__author__ = "Juliano Garcia de Oliveira"
__copyright__ = "Copyright NULL, Planet Earth"
import scrapy
from scrapy_splash import SplashRequest
# Color constants
W = '\033[0m' # white (normal)
R = '\033[31m' # red
G = '\033[32m' # green
O = '\033[33m' # orange
B = '\033[34m' # blue
P = '\033[35m' # purple
class CardapioSpider(scrapy.Spider):
# Spider Identifier
name = "cardapiosSAS"
allowed_domains = ["https://uspdigital.usp.br/"]
start_urls = [f"https://uspdigital.usp.br/rucard/Jsp/cardapioSAS.jsp?codrtn={i}"
for i in range(7, 10)]
def start_requests(self):
# Pass request through splash
for url in self.start_urls:
yield SplashRequest(url=url, callback=self.parse,
args={
'wait': 5,})
# Process our response to the desired format
def parse(self, response):
# Symbol Table with key - value
dictRest = {"7":"Prefeitura" , "8":"Física", "9":"Química"}
# Modify this parse method
print(P)
print(response.body)
print(W)
page = dictRest[response.url.split("/")[-1][-1]]
filename = "c_%s.html" % page
with open(filename, mode="wb") as f:
f.write(response.body)
self.log("Saved dump file %s" % filename)
|
998,097 | da10fd716218978df1a89477060adb8c076f1d71 | from bluetooth import *
import subprocess
target_name = "NJ's Cellphone"
target_address = "00:1A:75:C2:61:CE"
#target_address = "00:1A:89:3A:6B:FF"
flag = 0
flag1 = 0
while flag1 == 0 :
nearby_devices = discover_devices(duration=4)
if(len(nearby_devices) != 0) :
for address in nearby_devices :
if address == target_address :
flag1 = 1
print "Device Found"
print nearby_devices
in_range = 1
while in_range == 1:
nearby_devices = discover_devices(duration=4)
if(len(nearby_devices) != 0) :
for address in nearby_devices :
if address == target_address :
in_range = 1
break
else :
in_range = 0
else :
in_range = 0
subprocess.call("gnome-screensaver; gnome-screensaver-command -l",shell=True)
in_range = 0
while in_range == 0:
nearby_devices = discover_devices(duration=4)
if(len(nearby_devices) != 0) :
for address in nearby_devices :
if address == target_address :
in_range = 1
break
else :
in_range = 0
else :
in_range = 0
if in_range == 1 :
print "Device back in range, unlocking..."
subprocess.call("gnome-screensaver; gnome-screensaver-command -d",shell=True)
|
998,098 | 4771fde616560cdc67ecc0ee840b7206be11f838 | from first_class_functions_asgnmnt import *
nums_list = [1, 2, 3, 4, 5]
# a higher order function which takes a function and an array of numbers as args
# here, we wil pass in a square func to which every num from the array is passed to be squared
def func_array_as_args(func, num_array):
num_squares = []
for num in num_array:
num_squares.append(func(num))
return num_squares
# passing in the square func
print(func_array_as_args(square_var, nums_list))
# passing in the cube function
print(func_array_as_args(cube_var, nums_list))
|
998,099 | 9ea06a7e48831279057e06b5ec983264c7728877 | from day22.grid import Grid
from day22.node import Node
from day22.solver import Solver
with open('input.txt') as f:
lines = [line[:-1] for line in f.readlines() if len(line) > 0][2:]
nodes = [Node.from_string(line) for line in lines]
# viable_pairs = 0
# for node1 in nodes:
# for node2 in nodes:
# if Node.viable_pair(node1, node2):
# viable_pairs += 1
#
# print(viable_pairs)
grid = Grid(nodes)
solver = Solver(grid)
print(solver.fewest_amount_of_steps())
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.