index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,900 | 2352f31ca39c1eed78e0079da37096aaf44a7176 | from django.db import models
from django.template import Context, Template
from django.template.defaultfilters import slugify
from tinymce import models as tinymce_models
from photologue.models import Photo
class Question(models.Model):
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
class Resume(models.Model):
pdf = models.FileField(upload_to='pdfs')
photos = models.ImageField(upload_to='photos')
class Blog_Post(models.Model):
title = models.CharField(max_length=100, unique=True)
slug = models.SlugField(max_length=100, unique=True)
body = tinymce_models.HTMLField(blank=True)
photos = models.ManyToManyField(Photo, blank=True)
posted = models.DateTimeField(auto_now_add=True)
def posted_date(self):
return self.posted.strftime('%B %d, %Y')
@property
def render(self):
t = Template(self.body)
c = Context({'post': self})
return t.render(c)
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Blog_Post, self).save(*args, **kwargs)
def __str__(self):
return self.title
|
19,901 | 7e6c18449b03edef2b46154a9569597d7c86c076 | """
File that has the api core functionality
"""
from app import app
from flask import Flask, jsonify, request
import random, json
from app.models import MaintenanceRequest, User
all_requests = []
Users = []
@app.route("/<v1>/user/register", methods=["POST"])
def register(v1):
"""End point to register a new user"""
data = request.get_json()
username = data.get("username")
password = data.get("password")
email = data.get("email")
if not username:
return jsonify({"message": "Missing username parameter"}), 400
if not password:
return jsonify({"message": "Missing password parameter"}), 400
if not email:
return jsonify({"message": "Missing email parameter"}), 400
new_user = User(username, password, email)
Users.append(new_user)
return jsonify({'message':'successfully registered'}), 201
@app.route("/<v1>/user/login", methods=["POST"])
def login(v1):
post_data = request.get_json()
email = post_data.get("email")
password = post_data.get("password")
if not email:
return jsonify({"message": "Missing email parameter"}), 400
if not password:
return jsonify({"message": "Missing password parameter"}), 400
return jsonify({"message": "successfully logged in"}), 200
@app.route("/<v1>/users/requests", methods=["POST"])
def create_request(v1):
""" Endpoint to get the request data entered by the user """
#get entered data
data = request.get_json()
#picking the request attributes
req_title = data.get("request_title")
req_desc = data.get("request_description")
requester_name = "Gideon"
req_id = len(all_requests) +1 # + random.randint(1, 3000)
#validation
if not req_title:
return jsonify({"message": "Request has no title"}), 400
if not req_desc:
return jsonify({"message": "Request has no description"}), 400
if not requester_name:
return jsonify({"message": "Request must be issued by a user"}), 400
if not req_id:
return jsonify({"message": "Request has no id"}), 400
#storing entered request
new_request = MaintenanceRequest(req_title, req_desc, requester_name, req_id)
all_requests.append(new_request)
# new_number_of_requests = len(all_requests)
return jsonify({
"message":"sucessfully created request",
'request_title':new_request.title,
"request_description":new_request.description,
"requester_name" : new_request.requester_name,
"request_id" : new_request.request_id
})
@app.route("/<v1>/users/requests", methods=["GET"])
def fetch_requests(v1):
""" Endpoint to fetch saved user requests """
#check if user has any requests
if len(all_requests) < 1:
return jsonify({
"message":"You have not made any requests yet"
})
#if user has more than one request
if len(all_requests) >= 1:
return jsonify({
"message":"Successfully fetched requests",
"requests":[
a_request.__dict__ for a_request in all_requests
]
})
return jsonify({"message":"Can not fetch requests now"})
@app.route("/<v1>/users/requests/<requestid>", methods=["GET"])
def fetch_a_request(v1, requestid):
""" Endpoint to fetch a single user requests """
#check if user has any requests
if len(all_requests) < 1:
return jsonify({
"message":"You have not made any requests yet"
})
#if user has more than one request
if len(all_requests) >= 1:
returned_request = []
for a_request in all_requests:
if a_request.request_id == int(requestid):
returned_request.append(a_request)
return jsonify({
"message": "Successfully fetched the request",
"request": returned_request[0].__dict__
})
return jsonify({
"message":"Request doesnt exist"
})
@app.route("/<v1>/users/requests/<requestid>", methods=["PUT"])
def edit_a_request(v1, requestid):
""" Endpoint to edit a user requests """
#check if user has any requests
if len(all_requests) < 1:
return jsonify({
"message":"You have not made any requests yet"
})
#if user has more than one request
if len(all_requests) >= 1:
#get entered data
data = request.get_json()
#picking the request attributes
req_title = data.get("request_title")
req_desc = data.get("request_description")
if len(all_requests) >= 1:
for a_request in all_requests:
if a_request.request_id == int(requestid):
a_request.title = req_title
a_request.description = req_desc
return jsonify({
"message":"Successfully edited the request",
"request": a_request.__dict__
})
|
19,902 | fb7451cb7c38b64b0f1191e7b05ac65692949baf | import json
import scrapy
class ImmoscoutSpider(scrapy.Spider):
name = 'immoscout'
allowed_domains = ['www.immoscout24.ch']
root = 'https://www.immoscout24.ch'
start_urls = [
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-appenzell-re?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-appenzell-ri?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-argovie?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-bale-campagne?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-bale-ville?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-berne?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-fribourg?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-geneve?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-glaris?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-grisons?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-jura?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-lucerne?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-neuchatel?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-nidwald?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-obwald?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-st-gall?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-schaffhouse?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-schwyz?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-soleure?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-tessin?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-turgovie?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-uri?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-valais?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-vaud?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-zoug?pn=1',
'https://www.immoscout24.ch/fr/immobilier/acheter/canton-zurich?pn=1',
]
def parse(self, response):
data_js = response.xpath('//script[@id="state"]/text()').re(r'{.*')[0]
data_js = data_js.replace(':undefined', ':""')
data = json.loads(data_js)
for listing in data['pages']['searchResult']['resultData']['listData']:
yield {
'rooms': listing.get('numberOfRooms', None),
'area': listing.get('surfaceLiving', None),
'price': listing.get('sellingPrice', None),
'city': listing.get('cityName', None),
'canton': listing.get('state', None),
'type': {1: 'appartment',
2: 'house'}[listing.get('propertyCategoryId')],
'url': self.root + listing.get('propertyDetailUrl')
}
page_number_param_ix = response.url.find('pn=')
page_number = int(response.url[page_number_param_ix + 3:])
next_page_url = response.url[:page_number_param_ix] + 'pn=' + str(page_number + 1)
yield response.follow(next_page_url, callback=self.parse)
|
19,903 | 2300c6a833fb7990389b7e636978b0d0d2f5c915 | import servo
import httplib
import json
import time
import wiringpi
device_name = "hebe"
keynames = ["Lights","Aircon", "Temp", "FanIntensity", "Fan"]
actions = [servo.set_lights, servo.set_aircon,servo.set_temp, servo.set_wind, servo.set_fan]
servopins = [0,1,2]
onoffconstants = ["Off","On"]
def get_server_data(server_path):
server = "192.241.140.108"
port = "5000"
headers = {'Content-type': 'application/json'}
connection = httplib.HTTPConnection(server, port=port)
connection.request('GET', server_path, headers=headers)
response = connection.getresponse().read()
d = json.loads(response.decode())
return d
def update_event(title):
get_server_data("/setevent/"+device_name+"?title="+title)
def get_time():
return get_server_data("/currtime")
def get_device_data():
return get_server_data("/get/"+device_name)
def delete_gadget(gadget):
get_server_data("/delgadget/"+device_name+"?gadget="+gadget)
def step():
server_time = get_time()
print "New Round!"
data = get_device_data()
print "got data!"
for event in data['events']:
if data['events'][event]['time'] < server_time:
update_event(event)
print event + " updated!"
print "passed events"
# This is a very dirty and resource-draining way to get around an issue,
# but otherwise the above loops shorts the entire thing out if there aren't
# any events in the queue. There's definitely a better way (like actually
# checking my inputs), but this is quickest. By a long shot.
get_server_data("/event/"+device_name+"?title=Stop&time=10000000")
curr_data = get_device_data()['current']
for i in range(len(keynames)):
if keynames[i] in curr_data:
print keynames[i]
actions[i](curr_data[keynames[i]]['value'])
delete_gadget(keynames[i])
def poll(interval):
while True:
try:
step()
except:
print "Exception - investigate..."
pass
wiringpi.delay(interval)
get_server_data("/event/"+device_name+"?title=Stop&time=10000000")
servo.bottom_out_temp()
poll(10)
|
19,904 | 0ff1bf96c2fdcf4f35264807884825211fcb8a64 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import utils
import analyseur as a
class Trame:
def __init__(self, taille, org, data, id, status, checksum):
self.taille = taille
self.org = org
self.data = data
self.id = id
self.status = status
self.checksum = checksum
self.timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def isLearn(self):
if self.org in ["06","07"]:
intdata = int(self.data[6:8],16)
masque = int(0b00001000)
if intdata & masque != 0:
return False
else:
return True
else:
return False
def analyseLearn(self):
metadata = []
databin = []
sep = ""
if self.org == "07":
metadata.append(self.org)
databin.extend(utils.hexstrtobin(self.data[0:2])[2:].zfill(8))
databin.extend(utils.hexstrtobin(self.data[2:4])[2:].zfill(8))
databin.extend(utils.hexstrtobin(self.data[4:6])[2:].zfill(8))
metadata.append(str(hex(int(sep.join(databin[0:6]),2)))[2:].upper().zfill(2))
metadata.append(str(hex(int(sep.join(databin[6:13]),2)))[2:].upper().zfill(2))
metadata.append(str(hex(int(sep.join(databin[13:]),2)))[2:].upper().zfill(2))
elif self.org == "06":
metadata.append("06")
metadata.append("00")
metadata.append("01")
return metadata
def display(self):
print "\nYop je suis une trame !"
print "taille : " + self.taille \
+ "\nORG : " + self.org \
+" \ndata : " + self.data \
+ "\nid : " + self.id \
+ "\nstatus : " + self.status \
+ "\nchecksum : " + self.checksum \
+ "\nTimeStamp : " + str(self.timestamp)
def checkIntegrity(self):
controle = 0
controle += int(self.taille,16)
controle += int(self.org,16)
controle += int(self.data[0:2],16)
controle += int(self.data[2:4],16)
controle += int(self.data[4:6],16)
controle += int(self.data[6:8],16)
controle += int(self.id[0:2],16)
controle += int(self.id[2:4],16)
controle += int(self.id[4:6],16)
controle += int(self.id[6:8],16)
controle += int(self.status,16)
strControle = hex(controle)[2:].upper()
print "controle"
print hex(controle)[2:]
print self.checksum
if strControle[len(strControle)-2:] == self.checksum:
return True
else:
return False
def isOK(self):
if self.checkIntegrity():
return True
else:
return False |
19,905 | c763ba1b00b89193b586a382685d74c8764699f7 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 21 14:00:49 2019
@author: rounak
"""
from django import forms
class Submit(forms.Form):
website=forms.CharField(label='Submit URL') |
19,906 | c7f0062ba1a7d8a46debf6f692d63c52737ff0b1 | #!/usr/bin/env dls-python
from pkg_resources import require
require("fit_lib == 1.3")
require("scipy == 0.10.1")
require("cothread==2.15")
from adPythonPlugin import AdPythonPlugin
import cv2
import numpy
import scipy.ndimage
class SlowBenchmark(AdPythonPlugin):
tempCounter = 0
def __init__(self):
# The default logging level is INFO.
# Comment this line to set debug logging off
# self.log.setLevel(logging.DEBUG)
# Make inputs and ouptuts list
params = dict()
AdPythonPlugin.__init__(self, params)
def processArray(self, arr, attr={}):
arr = numpy.float32(arr)
# Run a median filter over the image to remove the spikes due to dead pixels.
arr = numpy.float32(scipy.ndimage.median_filter(arr, size=3))
attr["messedWith"] = True
ret, thresh = cv2.threshold(arr, 127, 255, 0)
thresh = numpy.uint8(thresh)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cv2.drawContours(arr, contours, -1, (255,255,255))
# return the resultant array.
return numpy.uint8(arr)
if __name__=="__main__":
SlowBenchmark().runOffline()
|
19,907 | 19abd54acd8ea0159afd3daa96d0cfd307c05211 | #!/usr/bin/python3
import requests as rq
from hbtn_api.urls import URL_AUTH
def auth(apikey, email, psswd, *args):
data = {
'api_key': apikey,
'email': email,
'password': psswd,
'scope': 'checker'
}
r = rq.get(URL_AUTH, data=data)
return r.json()
|
19,908 | 291952be94eb0fc828a07a68fc0fd164813c37c3 | ordered_colors = [
"black", "brown", "red", "orange",
"yellow", "green", "blue", "violet",
"grey", "white"
]
def code(color):
return ordered_colors.index(color)
def value(colors):
return code(colors[0])*10 + code(colors[1]) |
19,909 | 35b1228ceffc37c8c4d2ac619204d75f79ebef97 | """
Read raw financial data and convert them to formatted data tables
"""
__all__ = ["SECTableReader"]
from sec.file_operations import SECFileOps
from sec.table import Table
from html.parser import HTMLParser
class RawTableCleaner(HTMLParser):
"""
This class is used to remove noise attributes and unwanted data from html tags
"""
def __init__(self):
HTMLParser.__init__(self)
self.table = ""
def clear_table(self):
""" Clear the table """
self.table = ""
def _get_attribute_dic(self, attrs):
"""Put the attributes in dictionary format. They are originally in a list containing tuple pairs.
returns
--------
:dict
attributes
"""
attr_dic = {}
for attr_pair in attrs:
attr_dic[attr_pair[0]] = attr_pair[1]
return attr_dic
def handle_starttag(self, tag, attrs):
"""Keep only td, tr and th tags, and only the rowspan and colspan attributes"""
if tag in ["td", "tr", "th"]:
attr_dic = self._get_attribute_dic(attrs)
attr = ""
for att in attr_dic:
if att in ["rowspan", "colspan"]:
attr += ' {}:{} '.format(att, attr_dic[att])
self.table += "<{}{}>".format(tag, attr)
# print("<{}{}>".format(tag, attr), end=" ")
def handle_endtag(self, tag):
"""Keep only the td, tr and th tags."""
if tag in ["td", "tr", "th"]:
self.table += "</{}>".format(tag)
# print("</{}>".format(tag))
def handle_data(self, data):
"""Strip the white spaces at the front and back of each data."""
if len(data.strip()) > 0:
self.table += data
# print(data, end=" ")
class SECTableReader:
"""
It extracts readable tables from saved financial statemments
"""
def __init__(self, data_folder):
self.sec_file_ops = SECFileOps(data_folder)
folders = self.sec_file_ops.get_all_folders()
# print("folders: \n", folders)
def _extract_raw_table(self, expr):
""" Extracts the string between "<table" and "/table>" i.e. the table from the raw html data.
parameters
--------
expr: str
raw input html data
returns
--------
:str
table part of the raw html data
"""
str_start = "<table"
str_end = "/table>"
ind_start = expr.find(str_start)
assert ind_start >= 0
ind_end = expr.find(str_end)
assert ind_end >= 0
return expr[ind_start: ind_end + len(str_end)]
def _remove_unnecessary_tags(self, raw_data):
""" Extracts the rarw table, removes parasitic tags and keeps useful table data.
parameter
--------
raw_data: str
raw table data
returns
--------
:str
useful table data with unwanted tags removed
"""
parser = RawTableCleaner()
parser.clear_table()
table = self._extract_raw_table(raw_data)
parser.feed(table)
return parser.table
def get_quarterly_report_dates(self, symbol):
"""It finds the dates of the saved quarterly reports
parameters
--------
symbol: str
the stock symbol
returns
--------
: list
reported dates
"""
return self.sec_file_ops.get_dates_of_saved_raw_quarterly_financial_statements(symbol)
def get_quarterly_report_tables(self, symbol, date):
""" It extracts the quarterly report tables for the given symbol at the given date
parameters
--------
symbol: str
the stock symbol
date: datetime.date
the report date
returns
--------
: dict
{statement_title: table}
"""
raw_data = self.sec_file_ops.get_raw_quarterly_financial_statement(symbol, date)
cleared_tables = {}
for title in raw_data:
cleared_tables[title] = self._remove_unnecessary_tags(raw_data[title])
# print(cleared_tables)
tables = {}
for title in cleared_tables:
table = Table()
table.read_tablecontent(cleared_tables[title])
tables[title] = table
for title in tables:
tables[title].setup_linked_rows()
# tables[title].print(linked=True)
return tables
|
19,910 | 92558f7dfe9d63c0e12e4a46edc9359350258c0e | """Base forms and form utilities."""
import os
import html
import secrets
import filetype
class Form(object):
"""Base Form."""
def __init__(self):
"""Setup attributes."""
self.request = None
self.params = {}
self.valid = False
self.errors = {}
def sanitize(self, _input):
"""
Sanitize input with default sanitization.
Can be extended, though make sure to call parent
if not extending complete functionality.
returns dictionary containing sanitized input.
"""
sanitized = {}
for key, inp in _input.items():
try:
key = html.escape(key).strip()
except AttributeError:
pass
# try:
# inp = html.escape(inp).strip()
# except (AttributeError, TypeError):
# pass
sanitized[key] = inp
return sanitized
def rules(self):
return {}
def validate(self):
# Sanitize first and set params from request
self.params = self.sanitize(self.request.allInput())
# Get rules
rules = self.rules()
for name, validator_list in rules.items():
for validator in validator_list:
msg = validator(name, self)
if msg is not None:
self.errors.setdefault(name, []).append(msg)
# False if length of errors > 0
return not len(self.errors)
def extract(self):
"""Extract the values from the form."""
pass
def has(self, name):
"""Check if the form has an input."""
try:
if self.input(name) is None:
return False
except KeyError:
return False
return True
def input(self, name):
"""
Helper method to get the input.
Returns None if the input is blank.
Only works after validation has been called (or during validation).
"""
try:
_input = self.params[name]
try:
_input = _input.strip()
except AttributeError:
pass
if _input == '':
return None
return _input
except KeyError:
return None
def has_file(self, name):
"""
Check if the file was uploaded.
Files behave differently to normal inputs and the has() function
will return true no matter if the file was uploaded or not.
"""
return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')
def store_file(self, name, location, filename=None, extension=None):
"""
Store an uploaded file with input "name" to a location.
If extension is None it will try to find the correct extension.
If filename is None it will generate a random name.
Returns filename.
"""
f = self.input(name)
if filename is None:
filename = secrets.token_hex(16)
if extension is None:
_type = filetype.guess(f.value)
if _type is not None:
extension = _type.extension
path = os.path.join(location, f'{filename}.{extension}')
if not os.path.exists(location) or not os.path.isdir(location):
os.mkdir(location)
with open(path, 'wb') as _file:
_file.write(f.value)
return path
|
19,911 | d761333150b38a1cdc9e26ce92992b4f5dd710ed | import utils.tokens as tokens
from utils.message import *
from utils.auth import Auth
import utils.plugin as plugins
import asyncio
import websockets
import json, random, os, sys
try:
port = sys.argv[1]
except:
port = 8090
os.system('clear')
with open('channels.json', 'r') as f:
channels = json.load(f)
default_channel = 'general'
CLIENTS = []
async def handleConnection(websocket, path):
global channels
if await plugins.handleConnect(websocket, CLIENTS, (channels, default_channel, plugins)) == 'Exit':
return
async for message in websocket:
try:
await updateChannelList()
await delDuplicateWebsockets()
message = Message(message)
if await plugins.handleMessage(message, websocket, CLIENTS, (channels, default_channel, plugins)) == 'Exit':
return
except asyncio.exceptions.CancelledError:
for j, i in enumerate(CLIENTS):
if i[2] == websocket:
for iii, ii in enumerate(CLIENTS):
if ii != i:
try:
await ii[2].send(formatMessage('leave', username = i[1]))
except:
del CLIENTS[iii]
del CLIENTS[j]
for j, i in enumerate(CLIENTS):
if i[2] == websocket:
print(f"- {i[1]} has left")
for iii, ii in enumerate(CLIENTS):
if ii != i:
try:
await ii[2].send(formatMessage('leave', username = i[1]))
except:
del CLIENTS[iii]
del CLIENTS[j]
if await plugins.handleDisconnect(websocket, CLIENTS, (channels, default_channel, plugins)) == 'Exit':
return
async def updateChannelList():
global channels
ochannel = channels
with open('channels.json', 'r') as f:
channels = json.load(f)
if len(ochannel) != len(channels):
for i in ochannel:
if i not in channels:
for ii in CLIENTS:
await ii[2].send('channel_delete', channel = i)
for i in channel:
if i not in ochannel:
for ii in CLIENTS:
await ii[2].send('channel_create', channel = i)
async def delDuplicateWebsockets():
for i in CLIENTS:
for j, ii in enumerate(CLIENTS):
if i[0] == ii[0] and i != ii:
try:
await i[2].close()
except:
pass
try:
del CLIENTS[j]
except:
pass
async def main():
global channels
print('Server Ready')
async with websockets.serve(handleConnection, "0.0.0.0", port, ping_timeout=None, ping_interval=None):
try:
await asyncio.Future() # run forever
except:
pass
asyncio.run(main()) |
19,912 | 7d2d85509cbd79d25f747d70fb5457d73ff39868 | # Copyright (C) 2022 CVAT.ai Corporation
#
# SPDX-License-Identifier: MIT
import io
from logging import Logger
from pathlib import Path
from typing import Tuple
import pytest
from cvat_sdk import Client, models
from cvat_sdk.api_client import exceptions
class TestUserUsecases:
@pytest.fixture(autouse=True)
def setup(
self,
tmp_path: Path,
fxt_login: Tuple[Client, str],
fxt_logger: Tuple[Logger, io.StringIO],
fxt_stdout: io.StringIO,
):
self.tmp_path = tmp_path
logger, self.logger_stream = fxt_logger
self.stdout = fxt_stdout
self.client, self.user = fxt_login
self.client.logger = logger
api_client = self.client.api_client
for k in api_client.configuration.logger:
api_client.configuration.logger[k] = logger
yield
def test_can_retrieve_user(self):
me = self.client.users.retrieve_current_user()
user = self.client.users.retrieve(me.id)
assert user.id == me.id
assert user.username == self.user
assert self.stdout.getvalue() == ""
def test_can_list_users(self):
users = self.client.users.list()
assert self.user in set(u.username for u in users)
assert self.stdout.getvalue() == ""
def test_can_update_user(self):
user = self.client.users.retrieve_current_user()
user.update(models.PatchedUserRequest(first_name="foo", last_name="bar"))
retrieved_user = self.client.users.retrieve(user.id)
assert retrieved_user.first_name == "foo"
assert retrieved_user.last_name == "bar"
assert user.first_name == retrieved_user.first_name
assert user.last_name == retrieved_user.last_name
assert self.stdout.getvalue() == ""
def test_can_remove_user(self):
users = self.client.users.list()
removed_user = next(u for u in users if u.username != self.user)
removed_user.remove()
with pytest.raises(exceptions.NotFoundException):
removed_user.fetch()
assert self.stdout.getvalue() == ""
|
19,913 | fc4aea744f16ecbf52edf8347d35b200fcf714ae | #ćwiczenie 39
WordToNums={}
WordToNums["jeden"] = 1
WordToNums["dwa"] = 2
WordToNums["trzy"] = 3
WordToNums["cztery"] = 4
WordToNums["pięć"] = 5
wybrany=input("Podaj cyfre słownie z zakresu (1-5)").lower()
#print(WordToNums.values())
#print(WordToNums.keys())
#print(WordToNums[])
#drugie rozwiązanie
if(wybrany in WordToNums):
print(WordToNums[wybrany])
else:
print("Podałeś zły numer")
|
19,914 | 156994e34f4d993d8f54b69423f54f29edff3f8b | import streamlit as st
import pandas as pd
import numpy as np
from utils import conf, frontend, geo
from ml.search import query
from PIL import Image
frontend.set_max_width(1200)
# Wallpaper at the top of the page
wallpaper = Image.open('static_data/zanzibar.jpg')
wallpaper = wallpaper.resize((1280, 500))
st.image(wallpaper, use_column_width=True)
# App title
st.title('SmartTravel')
st.write('')
# Request block
request = st.text_input(
label='Enter your request',
value=conf.Config.example_query
)
st.write('')
st.subheader('User request')
st.write(request)
# Filter on number of search results
num_results = st.sidebar.slider("Number of search results", min_value=3, max_value=20, value=10)
# Decision box of recommendation model
model_name = st.sidebar.selectbox(
'Chose the search algorithm',
('Tf-Idf', 'LDA', 'Doc2Vec')
)
# Request Dataframe
result = query(request, num_results, model_name)
result['URL'] = result['URL'].apply(frontend.make_clickable_link)
# Result Table placeholder
st.subheader('Search result')
result_table = st.empty()
# Map displaying search results
display_map = st.sidebar.radio('Display Map', ('Yes', 'No'), index=1)
if display_map == 'Yes':
st.write('')
st.subheader('Map')
geo_df = geo.get_coords(result['Name'])
st.map(geo_df)
# Fill the result table placeholder
result = result.to_html(escape=False)
result_table.write(result, unsafe_allow_html=True) |
19,915 | 873e180c37e1ecca35d9cd38818a8d803846b49f | import foauth.providers
class Runkeeper(foauth.providers.OAuth2):
# General info about the provider
provider_url = 'http://runkeeper.com/'
docs_url = 'http://developer.runkeeper.com/healthgraph/overview'
category = 'Fitness'
# URLs to interact with the API
authorize_url = 'https://runkeeper.com/apps/authorize'
access_token_url = 'https://runkeeper.com/apps/token'
api_domain = 'api.runkeeper.com'
available_permissions = [
(None, 'access your health and fitness data'),
]
def get_user_id(self, key):
r = self.api(key, self.api_domains[0], u'/user')
return unicode(r.json()[u'userID'])
|
19,916 | 6f5d45798d70ec39735f4088f55ae2985269ddfd | import json
import os
from collections import Counter
from nltk import word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
# define variables
articles_path = []
articleTitles = []
articleData = []
numberWordCount = 0
exclamWordCount = 0
capWordCount = 0
commaWordCount = 0
questionWordCount = 0
quotationWordCount = 0
tokenContent = []
# a subset of all sources for the articles in the NELA2017 dataset
# sources = ["AP", "BBC", "PBS", "Salon", "Slate", "The New York Times", "BuzzFeed", "Drudge Report", "Faking News",
# "RedState", "The Gateway Pundit", "The Huffington Post"]
# second subset sources used to determine if the results so far are dependent on the current sources being used
sources = sources = ["CNN", "MotherJones", "NPR", "The Hill", "Vox", "Addicting Info", "New York Daily News", "Prntly",
"The D.C. Clothesline", "The Duran", "Yahoo News", "Business Insider", "CNBC", "Daily Buzz Live",
"The Atlantic", "The Fiscal Times", "The Guardian", "Xinhua", "Activist Post", "Bipartisan Report",
"Breitbart", "Fox News", "Intellihub", "The Spoof", "Washington Examiner"]
# listdir() returns a list containing the names of the entries in the directory path given
# ['1_April', '2_May', '3_June', '4_July', '5_August', '6_September', '7_October'] is returned from NELA2017
month_directories = os.listdir("C:/NELA2017/NELA2017.tar/NELA2017")
# need an object to hold the month and date info together
class Directories:
def __init__(self, month, date):
self.month = month
self.date = date
for m in month_directories: # go through all items in month_directories and get contents
# date_directories in the form ['2017-10-01', '2017-10-02', '2017-10-03', '2017-10-04', '2017-10-05'.....]
date_directories = os.listdir("C:/NELA2017/NELA2017.tar/NELA2017/" + m)
# create a list of objects that hold both the month and date for article paths
directoryPath = Directories(m, date_directories)
articles_path.append(directoryPath)
# the path to the files with the HTML is C:/NELA2017/NELA2017.tar/NELA2017/"month"/"date"/"source"/"article_title.txt"
for s in sources:
if not os.path.isfile(
"C:/Users/caire/Desktop/OutputData/ClassifyArticlesContentandTitle/OutputCapTitle/" + s + ".txt"):
for p in articles_path:
for d in p.date:
fileFound = True
try:
# get a list of articleTitles for that source on that date in format
# ['AP--2017-04-17--Absences fitness atmosphere _ new ways to track schools.txt',.....]
articleTitles = os.listdir("C:/NELA2017/NELA2017.tar/NELA2017/" + p.month + "/" + d + "/" + s)
except FileNotFoundError:
fileFound = False
if fileFound: # if the source had articles on that date open all articles using articleTitles list
for articleTitle in articleTitles:
# empty lists for each iteration of the loop
tokenContent.clear()
articleData.clear()
if articleTitle != "PaxHeader":
# open the file and specify mode (read, write, etc.)
# using the keyword "with automatically closes the file afterwards
with open("C:/NELA2017/NELA2017.tar/NELA2017/" + p.month + "/" + d + "/" + s + "/" +
articleTitle, 'rb') as file:
try:
articleData = json.load(file)
# save content of the json file
tokenContent = word_tokenize(articleData['title'])
# add word from the tokenized data to create a list of all words for that article
for word in tokenContent:
# check if any words are all in caps
if word.isupper():
capWordCount = capWordCount + 1
# check if a word has an exclamation point
for char in word:
if char == "!":
exclamWordCount = exclamWordCount + 1
if char == "?":
questionWordCount = questionWordCount + 1
if char == ",":
commaWordCount = commaWordCount + 1
if any([char == "'", char == '"', char == '”', char == '“', char == "''",
char == "``", char == "’", char == "‘"]):
quotationWordCount = quotationWordCount + 1
# check if the word contains a number
if any(char.isdigit() for char in word):
numberWordCount = numberWordCount + 1
except ValueError:
print("JsonDecodeError for file " + articleTitle)
with open(
"C:/Users/caire/Desktop/OutputData/ClassifyArticlesContentandTitle/OutputCapCountTitle/" + s + ".txt",
'a', encoding='utf-8') as newFile:
newFile.write(
str(exclamWordCount) + ", " + str(capWordCount) + ", " + str(numberWordCount) + ", " +
str(questionWordCount) + ", " + str(commaWordCount) + ", " + str(quotationWordCount) + "\n")
numberWordCount = 0
capWordCount = 0
exclamWordCount = 0
questionWordCount = 0
quotationWordCount = 0
commaWordCount = 0
print(s + "'s number of capitalized and numeric words counted for each article and added to file")
|
19,917 | f1438c4bee2ec59dcf1a5cd3ad34e016915768b7 | import sys
import random
n = int(sys.stdin.readline())
def generate_routes():
for c1 in range(0, n):
for c2 in range(c1 + 1, n):
are_connected = random.random() < 0.6
construction_cost = random.randint(1, 20)
yield (c1, c2, are_connected, construction_cost)
print(n)
for (c1, c2, are_connected, construction_cost) in generate_routes():
print(str(c1) + " " +
str(c2) + " " +
("1 " if are_connected else "0 ") +
str(construction_cost))
|
19,918 | ac3daffa6722b9dfcdcc379835bc8805c3554eab |
(m,n)= map(int,input().split())
l = []
for i in range(0,m):
l.append(list(map(int,input().split())))
l2 = []
for i in range(0,n):
l2.append(int(input()))
for i in range(0,m):
sum = 0
for j in range(0,n):
sum += l[i][j] * l2[j]
print (sum) |
19,919 | cbe8913fd889fcbc3a389bc4edff648af979d93e | # http flask
from flask import Flask
app = Flask(__name__)
@app.route("/")
def hello():
return("You called the service with https")
cert_dir = 'cert/'
if __name__ == '__main__':
app.run(ssl_context=(cert_dir+'cert.pem', cert_dir+'private_key.pem')) |
19,920 | a4924b5664bce1a1446561de27254aeb5ac59c6f | import numpy as np
import h5py
import os
import glob
import pickle
class Loader:
def __init__(self):
self.PATH = os.getcwd()
self.DATA = glob.glob(self.PATH + "/*.hdf5")
self.force_data = []
self.x_data = []
self.x_d_data = []
self.episode_rewards = []
self.data_dict = {}
def load_data(self):
print(self.PATH)
self._start()
def _start(self):
for data_file in self.DATA:
with h5py.File(data_file, 'r') as f:
for key in f:
for val_index, val in enumerate(f['{}'.format(key)]):
self.data_dict['{}'.format(val)] = \
np.array(f['{}'.format(key)]['{}'.format(val)])
pickle.dump(self.data_dict, open("data-dict", "wb", ), protocol=2)
def view_pickle(self):
""" Do whatever you need in here this should be a cleaner way to
view the data set for you though"""
print(self.PATH)
pickle_view = pickle.load(open("{0}/data-dict".format(self.PATH),
"rb"))
for k in pickle_view.keys():
print(np.size(pickle_view[k]), k)
m = Loader()
m.load_data()
m.view_pickle()
|
19,921 | 3e983da789d22ec75f7c8eee9cf5e0368f361623 | {% extends 'user_menu.html' %}
{% block detail_block %}
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Tag-it! Example</title>
<!-- These few CSS files are just to make this example page look nice. You can ignore them. -->
<link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/2.9.0/build/reset-fonts/reset-fonts.css">
<link rel="stylesheet" type="text/css" href="http://yui.yahooapis.com/2.9.0/build/base/base-min.css">
<link href="http://fonts.googleapis.com/css?family=Brawler" rel="stylesheet" type="text/css">
<link href="_static/master.css" rel="stylesheet" type="text/css">
<link href="_static/subpage.css" rel="stylesheet" type="text/css">
<link href="_static/examples.css" rel="stylesheet" type="text/css">
<!-- /ignore -->
<!-- INSTRUCTIONS -->
<!-- 2 CSS files are required: -->
<!-- * Tag-it's base CSS (jquery.tagit.css). -->
<!-- * Any theme CSS (either a jQuery UI theme such as "flick", or one that's bundled with Tag-it, e.g. tagit.ui-zendesk.css as in this example.) -->
<!-- The base CSS and tagit.ui-zendesk.css theme are scoped to the Tag-it widget, so they shouldn't affect anything else in your site, unlike with jQuery UI themes. -->
<link href="css/jquery.tagit.css" rel="stylesheet" type="text/css">
<link href="css/tagit.ui-zendesk.css" rel="stylesheet" type="text/css">
<!-- If you want the jQuery UI "flick" theme, you can use this instead, but it's not scoped to just Tag-it like tagit.ui-zendesk is: -->
<!-- <link rel="stylesheet" type="text/css" href="http://ajax.googleapis.com/ajax/libs/jqueryui/1/themes/flick/jquery-ui.css"> -->
<!-- jQuery and jQuery UI are required dependencies. -->
<!-- Although we use jQuery 1.4 here, it's tested with the latest too (1.8.3 as of writing this.) -->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.10.2/jquery.min.js" type="text/javascript" charset="utf-8"></script>
<script src="https://ajax.googleapis.com/ajax/libs/jqueryui/1.9.2/jquery-ui.min.js" type="text/javascript" charset="utf-8"></script>
<!-- The real deal -->
<script src="js/tag-it.js" type="text/javascript" charset="utf-8"></script>
<style>
ul.tagit {
padding: 1px 5px;
overflow: auto;
margin-left: inherit; /* usually we don't want the regular ul margins. */
margin-right: inherit;
}
ul.tagit li {
display: block;
float: left;
margin: 2px 5px 2px 0;
}
ul.tagit li.tagit-choice {
position: relative;
line-height: inherit;
}
input.tagit-hidden-field {
display: none;
}
ul.tagit li.tagit-choice-read-only {
padding: .2em .5em .2em .5em;
}
ul.tagit li.tagit-choice-editable {
padding: .2em 18px .2em .5em;
}
ul.tagit li.tagit-new {
padding: .25em 4px .25em 0;
}
ul.tagit li.tagit-choice a.tagit-label {
cursor: pointer;
text-decoration: none;
}
ul.tagit li.tagit-choice .tagit-close {
cursor: pointer;
position: absolute;
right: .1em;
top: 50%;
margin-top: -8px;
line-height: 17px;
}
/* used for some custom themes that don't need image icons */
ul.tagit li.tagit-choice .tagit-close .text-icon {
display: none;
}
ul.tagit li.tagit-choice input {
display: block;
float: left;
margin: 2px 5px 2px 0;
}
ul.tagit input[type="text"] {
-moz-box-sizing: border-box;
-webkit-box-sizing: border-box;
box-sizing: border-box;
-moz-box-shadow: none;
-webkit-box-shadow: none;
box-shadow: none;
border: none;
margin: 0;
padding: 0;
width: inherit;
background-color: inherit;
outline: none;
}
/* Optional scoped theme for tag-it which mimics the zendesk widget. */
ul.tagit {
border-style: solid;
border-width: 1px;
border-color: #C6C6C6;
background: inherit;
}
ul.tagit li.tagit-choice {
-moz-border-radius: 6px;
border-radius: 6px;
-webkit-border-radius: 6px;
border: 1px solid #CAD8F3;
background: none;
background-color: #DEE7F8;
font-weight: normal;
}
ul.tagit li.tagit-choice .tagit-label:not(a) {
color: #555;
}
ul.tagit li.tagit-choice a.tagit-close {
text-decoration: none;
}
ul.tagit li.tagit-choice .tagit-close {
right: .4em;
}
ul.tagit li.tagit-choice .ui-icon {
display: none;
}
ul.tagit li.tagit-choice .tagit-close .text-icon {
display: inline;
font-family: arial, sans-serif;
font-size: 16px;
line-height: 16px;
color: #777;
}
ul.tagit li.tagit-choice:hover, ul.tagit li.tagit-choice.remove {
background-color: #bbcef1;
border-color: #6d95e0;
}
ul.tagit li.tagit-choice a.tagLabel:hover,
ul.tagit li.tagit-choice a.tagit-close .text-icon:hover {
color: #222;
}
ul.tagit input[type="text"] {
color: #333333;
background: none;
}
.ui-widget {
font-size: 1.1em;
}
/* Forked from a jQuery UI theme, so that we don't require the jQuery UI CSS as a dependency. */
.tagit-autocomplete.ui-autocomplete { position: absolute; cursor: default; }
* html .tagit-autocomplete.ui-autocomplete { width:1px; } /* without this, the menu expands to 100% in IE6 */
.tagit-autocomplete.ui-menu {
list-style:none;
padding: 2px;
margin: 0;
display:block;
float: left;
}
.tagit-autocomplete.ui-menu .ui-menu {
margin-top: -3px;
}
.tagit-autocomplete.ui-menu .ui-menu-item {
margin:0;
padding: 0;
zoom: 1;
float: left;
clear: left;
width: 100%;
}
.tagit-autocomplete.ui-menu .ui-menu-item a {
text-decoration:none;
display:block;
padding:.2em .4em;
line-height:1.5;
zoom:1;
}
.tagit-autocomplete .ui-menu .ui-menu-item a.ui-state-hover,
.tagit-autocomplete .ui-menu .ui-menu-item a.ui-state-active {
font-weight: normal;
margin: -1px;
}
.tagit-autocomplete.ui-widget-content { border: 1px solid #aaaaaa; background: #ffffff 50% 50% repeat-x; color: #222222; }
.tagit-autocomplete.ui-corner-all, .tagit-autocomplete .ui-corner-all { -moz-border-radius: 4px; -webkit-border-radius: 4px; -khtml-border-radius: 4px; border-radius: 4px; }
.tagit-autocomplete .ui-state-hover, .tagit-autocomplete .ui-state-focus { border: 1px solid #999999; background: #dadada; font-weight: normal; color: #212121; }
.tagit-autocomplete .ui-state-active { border: 1px solid #aaaaaa; }
.tagit-autocomplete .ui-widget-content { border: 1px solid #aaaaaa; }
.tagit .ui-helper-hidden-accessible { position: absolute !important; clip: rect(1px,1px,1px,1px); }
</style>
<script>
$(function(){
var sampleTags = ['c++', 'java', 'php', 'coldfusion', 'javascript', 'asp', 'ruby', 'python', 'c', 'scala', 'groovy', 'haskell', 'perl', 'erlang', 'apl', 'cobol', 'go', 'lua'];
//-------------------------------
// Minimal
//-------------------------------
$('#myTags').tagit();
//-------------------------------
// Single field
//-------------------------------
$('#singleFieldTags').tagit({
availableTags: sampleTags,
// This will make Tag-it submit a single form value, as a comma-delimited field.
singleField: true,
singleFieldNode: $('#mySingleField')
});
// singleFieldTags2 is an INPUT element, rather than a UL as in the other
// examples, so it automatically defaults to singleField.
$('#singleFieldTags2').tagit({
availableTags: sampleTags
});
//-------------------------------
// Preloading data in markup
//-------------------------------
$('#myULTags').tagit({
availableTags: sampleTags, // this param is of course optional. it's for autocomplete.
// configure the name of the input field (will be submitted with form), default: item[tags]
itemName: 'item',
fieldName: 'tags'
});
//-------------------------------
// Tag events
//-------------------------------
var eventTags = $('#eventTags');
var addEvent = function(text) {
$('#events_container').append(text + '<br>');
};
eventTags.tagit({
availableTags: sampleTags,
beforeTagAdded: function(evt, ui) {
if (!ui.duringInitialization) {
addEvent('beforeTagAdded: ' + eventTags.tagit('tagLabel', ui.tag));
}
},
afterTagAdded: function(evt, ui) {
if (!ui.duringInitialization) {
addEvent('afterTagAdded: ' + eventTags.tagit('tagLabel', ui.tag));
}
},
beforeTagRemoved: function(evt, ui) {
addEvent('beforeTagRemoved: ' + eventTags.tagit('tagLabel', ui.tag));
},
afterTagRemoved: function(evt, ui) {
addEvent('afterTagRemoved: ' + eventTags.tagit('tagLabel', ui.tag));
},
onTagClicked: function(evt, ui) {
addEvent('onTagClicked: ' + eventTags.tagit('tagLabel', ui.tag));
},
onTagExists: function(evt, ui) {
addEvent('onTagExists: ' + eventTags.tagit('tagLabel', ui.existingTag));
}
});
//-------------------------------
// Read-only
//-------------------------------
$('#readOnlyTags').tagit({
readOnly: true
});
//-------------------------------
// Tag-it methods
//-------------------------------
$('#methodTags').tagit({
availableTags: sampleTags
});
//-------------------------------
// Allow spaces without quotes.
//-------------------------------
$('#allowSpacesTags').tagit({
availableTags: sampleTags,
allowSpaces: true
});
//-------------------------------
// Remove confirmation
//-------------------------------
$('#removeConfirmationTags').tagit({
availableTags: sampleTags,
removeConfirmation: true
});
});
/*
* jQuery UI Tag-it!
*
* @version v2.0 (06/2011)
*
* Copyright 2011, Levy Carneiro Jr.
* Released under the MIT license.
* http://aehlke.github.com/tag-it/LICENSE
*
* Homepage:
* http://aehlke.github.com/tag-it/
*
* Authors:
* Levy Carneiro Jr.
* Martin Rehfeld
* Tobias Schmidt
* Skylar Challand
* Alex Ehlke
*
* Maintainer:
* Alex Ehlke - Twitter: @aehlke
*
* Dependencies:
* jQuery v1.4+
* jQuery UI v1.8+
*/
(function($) {
$.widget('ui.tagit', {
options: {
allowDuplicates : false,
caseSensitive : true,
fieldName : 'tags',
placeholderText : null, // Sets `placeholder` attr on input field.
readOnly : false, // Disables editing.
removeConfirmation: false, // Require confirmation to remove tags.
tagLimit : null, // Max number of tags allowed (null for unlimited).
// Used for autocomplete, unless you override `autocomplete.source`.
availableTags : [],
// Use to override or add any options to the autocomplete widget.
//
// By default, autocomplete.source will map to availableTags,
// unless overridden.
autocomplete: {},
// Shows autocomplete before the user even types anything.
showAutocompleteOnFocus: false,
// When enabled, quotes are unneccesary for inputting multi-word tags.
allowSpaces: false,
// The below options are for using a single field instead of several
// for our form values.
//
// When enabled, will use a single hidden field for the form,
// rather than one per tag. It will delimit tags in the field
// with singleFieldDelimiter.
//
// The easiest way to use singleField is to just instantiate tag-it
// on an INPUT element, in which case singleField is automatically
// set to true, and singleFieldNode is set to that element. This
// way, you don't need to fiddle with these options.
singleField: false,
// This is just used when preloading data from the field, and for
// populating the field with delimited tags as the user adds them.
singleFieldDelimiter: ',',
// Set this to an input DOM node to use an existing form field.
// Any text in it will be erased on init. But it will be
// populated with the text of tags as they are created,
// delimited by singleFieldDelimiter.
//
// If this is not set, we create an input node for it,
// with the name given in settings.fieldName.
singleFieldNode: null,
// Whether to animate tag removals or not.
animate: true,
// Optionally set a tabindex attribute on the input that gets
// created for tag-it.
tabIndex: null,
// Event callbacks.
beforeTagAdded : null,
afterTagAdded : null,
beforeTagRemoved : null,
afterTagRemoved : null,
onTagClicked : null,
onTagLimitExceeded : null,
// DEPRECATED:
//
// /!\ These event callbacks are deprecated and WILL BE REMOVED at some
// point in the future. They're here for backwards-compatibility.
// Use the above before/after event callbacks instead.
onTagAdded : null,
onTagRemoved: null,
// `autocomplete.source` is the replacement for tagSource.
tagSource: null
// Do not use the above deprecated options.
},
_create: function() {
// for handling static scoping inside callbacks
var that = this;
// There are 2 kinds of DOM nodes this widget can be instantiated on:
// 1. UL, OL, or some element containing either of these.
// 2. INPUT, in which case 'singleField' is overridden to true,
// a UL is created and the INPUT is hidden.
if (this.element.is('input')) {
this.tagList = $('<ul></ul>').insertAfter(this.element);
this.options.singleField = true;
this.options.singleFieldNode = this.element;
this.element.addClass('tagit-hidden-field');
} else {
this.tagList = this.element.find('ul, ol').andSelf().last();
}
this.tagInput = $('<input type="text" />').addClass('ui-widget-content');
if (this.options.readOnly) this.tagInput.attr('disabled', 'disabled');
if (this.options.tabIndex) {
this.tagInput.attr('tabindex', this.options.tabIndex);
}
if (this.options.placeholderText) {
this.tagInput.attr('placeholder', this.options.placeholderText);
}
if (!this.options.autocomplete.source) {
this.options.autocomplete.source = function(search, showChoices) {
var filter = search.term.toLowerCase();
var choices = $.grep(this.options.availableTags, function(element) {
// Only match autocomplete options that begin with the search term.
// (Case insensitive.)
return (element.toLowerCase().indexOf(filter) === 0);
});
if (!this.options.allowDuplicates) {
choices = this._subtractArray(choices, this.assignedTags());
}
showChoices(choices);
};
}
if (this.options.showAutocompleteOnFocus) {
this.tagInput.focus(function(event, ui) {
that._showAutocomplete();
});
if (typeof this.options.autocomplete.minLength === 'undefined') {
this.options.autocomplete.minLength = 0;
}
}
// Bind autocomplete.source callback functions to this context.
if ($.isFunction(this.options.autocomplete.source)) {
this.options.autocomplete.source = $.proxy(this.options.autocomplete.source, this);
}
// DEPRECATED.
if ($.isFunction(this.options.tagSource)) {
this.options.tagSource = $.proxy(this.options.tagSource, this);
}
this.tagList
.addClass('tagit')
.addClass('ui-widget ui-widget-content ui-corner-all')
// Create the input field.
.append($('<li class="tagit-new"></li>').append(this.tagInput))
.click(function(e) {
var target = $(e.target);
if (target.hasClass('tagit-label')) {
var tag = target.closest('.tagit-choice');
if (!tag.hasClass('removed')) {
that._trigger('onTagClicked', e, {tag: tag, tagLabel: that.tagLabel(tag)});
}
} else {
// Sets the focus() to the input field, if the user
// clicks anywhere inside the UL. This is needed
// because the input field needs to be of a small size.
that.tagInput.focus();
}
});
// Single field support.
var addedExistingFromSingleFieldNode = false;
if (this.options.singleField) {
if (this.options.singleFieldNode) {
// Add existing tags from the input field.
var node = $(this.options.singleFieldNode);
var tags = node.val().split(this.options.singleFieldDelimiter);
node.val('');
$.each(tags, function(index, tag) {
that.createTag(tag, null, true);
addedExistingFromSingleFieldNode = true;
});
} else {
// Create our single field input after our list.
this.options.singleFieldNode = $('<input type="hidden" style="display:none;" value="" name="' + this.options.fieldName + '" />');
this.tagList.after(this.options.singleFieldNode);
}
}
// Add existing tags from the list, if any.
if (!addedExistingFromSingleFieldNode) {
this.tagList.children('li').each(function() {
if (!$(this).hasClass('tagit-new')) {
that.createTag($(this).text(), $(this).attr('class'), true);
$(this).remove();
}
});
}
// Events.
this.tagInput
.keydown(function(event) {
// Backspace is not detected within a keypress, so it must use keydown.
if (event.which == $.ui.keyCode.BACKSPACE && that.tagInput.val() === '') {
var tag = that._lastTag();
if (!that.options.removeConfirmation || tag.hasClass('remove')) {
// When backspace is pressed, the last tag is deleted.
that.removeTag(tag);
} else if (that.options.removeConfirmation) {
tag.addClass('remove ui-state-highlight');
}
} else if (that.options.removeConfirmation) {
that._lastTag().removeClass('remove ui-state-highlight');
}
// Comma/Space/Enter are all valid delimiters for new tags,
// except when there is an open quote or if setting allowSpaces = true.
// Tab will also create a tag, unless the tag input is empty,
// in which case it isn't caught.
if (
(event.which === $.ui.keyCode.COMMA && event.shiftKey === false) ||
event.which === $.ui.keyCode.ENTER ||
(
event.which == $.ui.keyCode.TAB &&
that.tagInput.val() !== ''
) ||
(
event.which == $.ui.keyCode.SPACE &&
that.options.allowSpaces !== true &&
(
$.trim(that.tagInput.val()).replace( /^s*/, '' ).charAt(0) != '"' ||
(
$.trim(that.tagInput.val()).charAt(0) == '"' &&
$.trim(that.tagInput.val()).charAt($.trim(that.tagInput.val()).length - 1) == '"' &&
$.trim(that.tagInput.val()).length - 1 !== 0
)
)
)
) {
// Enter submits the form if there's no text in the input.
if (!(event.which === $.ui.keyCode.ENTER && that.tagInput.val() === '')) {
event.preventDefault();
}
// Autocomplete will create its own tag from a selection and close automatically.
if (!(that.options.autocomplete.autoFocus && that.tagInput.data('autocomplete-open'))) {
that.tagInput.autocomplete('close');
that.createTag(that._cleanedInput());
}
}
}).blur(function(e){
// Create a tag when the element loses focus.
// If autocomplete is enabled and suggestion was clicked, don't add it.
if (!that.tagInput.data('autocomplete-open')) {
that.createTag(that._cleanedInput());
}
});
// Autocomplete.
if (this.options.availableTags || this.options.tagSource || this.options.autocomplete.source) {
var autocompleteOptions = {
select: function(event, ui) {
that.createTag(ui.item.value);
// Preventing the tag input to be updated with the chosen value.
return false;
}
};
$.extend(autocompleteOptions, this.options.autocomplete);
// tagSource is deprecated, but takes precedence here since autocomplete.source is set by default,
// while tagSource is left null by default.
autocompleteOptions.source = this.options.tagSource || autocompleteOptions.source;
this.tagInput.autocomplete(autocompleteOptions).bind('autocompleteopen.tagit', function(event, ui) {
that.tagInput.data('autocomplete-open', true);
}).bind('autocompleteclose.tagit', function(event, ui) {
that.tagInput.data('autocomplete-open', false);
});
this.tagInput.autocomplete('widget').addClass('tagit-autocomplete');
}
},
destroy: function() {
$.Widget.prototype.destroy.call(this);
this.element.unbind('.tagit');
this.tagList.unbind('.tagit');
this.tagInput.removeData('autocomplete-open');
this.tagList.removeClass([
'tagit',
'ui-widget',
'ui-widget-content',
'ui-corner-all',
'tagit-hidden-field'
].join(' '));
if (this.element.is('input')) {
this.element.removeClass('tagit-hidden-field');
this.tagList.remove();
} else {
this.element.children('li').each(function() {
if ($(this).hasClass('tagit-new')) {
$(this).remove();
} else {
$(this).removeClass([
'tagit-choice',
'ui-widget-content',
'ui-state-default',
'ui-state-highlight',
'ui-corner-all',
'remove',
'tagit-choice-editable',
'tagit-choice-read-only'
].join(' '));
$(this).text($(this).children('.tagit-label').text());
}
});
if (this.singleFieldNode) {
this.singleFieldNode.remove();
}
}
return this;
},
_cleanedInput: function() {
// Returns the contents of the tag input, cleaned and ready to be passed to createTag
return $.trim(this.tagInput.val().replace(/^"(.*)"$/, '$1'));
},
_lastTag: function() {
return this.tagList.find('.tagit-choice:last:not(.removed)');
},
_tags: function() {
return this.tagList.find('.tagit-choice:not(.removed)');
},
assignedTags: function() {
// Returns an array of tag string values
var that = this;
var tags = [];
if (this.options.singleField) {
tags = $(this.options.singleFieldNode).val().split(this.options.singleFieldDelimiter);
if (tags[0] === '') {
tags = [];
}
} else {
this._tags().each(function() {
tags.push(that.tagLabel(this));
});
}
return tags;
},
_updateSingleTagsField: function(tags) {
// Takes a list of tag string values, updates this.options.singleFieldNode.val to the tags delimited by this.options.singleFieldDelimiter
$(this.options.singleFieldNode).val(tags.join(this.options.singleFieldDelimiter)).trigger('change');
},
_subtractArray: function(a1, a2) {
var result = [];
for (var i = 0; i < a1.length; i++) {
if ($.inArray(a1[i], a2) == -1) {
result.push(a1[i]);
}
}
return result;
},
tagLabel: function(tag) {
// Returns the tag's string label.
if (this.options.singleField) {
return $(tag).find('.tagit-label:first').text();
} else {
return $(tag).find('input:first').val();
}
},
_showAutocomplete: function() {
this.tagInput.autocomplete('search', '');
},
_findTagByLabel: function(name) {
var that = this;
var tag = null;
this._tags().each(function(i) {
if (that._formatStr(name) == that._formatStr(that.tagLabel(this))) {
tag = $(this);
return false;
}
});
return tag;
},
_isNew: function(name) {
return !this._findTagByLabel(name);
},
_formatStr: function(str) {
if (this.options.caseSensitive) {
return str;
}
return $.trim(str.toLowerCase());
},
_effectExists: function(name) {
return Boolean($.effects && ($.effects[name] || ($.effects.effect && $.effects.effect[name])));
},
createTag: function(value, additionalClass, duringInitialization) {
var that = this;
value = $.trim(value);
if(this.options.preprocessTag) {
value = this.options.preprocessTag(value);
}
if (value === '') {
return false;
}
if (!this.options.allowDuplicates && !this._isNew(value)) {
var existingTag = this._findTagByLabel(value);
if (this._trigger('onTagExists', null, {
existingTag: existingTag,
duringInitialization: duringInitialization
}) !== false) {
if (this._effectExists('highlight')) {
existingTag.effect('highlight');
}
}
return false;
}
if (this.options.tagLimit && this._tags().length >= this.options.tagLimit) {
this._trigger('onTagLimitExceeded', null, {duringInitialization: duringInitialization});
return false;
}
var label = $(this.options.onTagClicked ? '<a class="tagit-label"></a>' : '<span class="tagit-label"></span>').text(value);
// Create tag.
var tag = $('<li></li>') \
.addClass('tagit-choice ui-widget-content ui-state-default ui-corner-all') \
.addClass(additionalClass) \
.append(label);
if (this.options.readOnly){
tag.addClass('tagit-choice-read-only');
} else {
tag.addClass('tagit-choice-editable');
// Button for removing the tag.
var removeTagIcon = $('<span></span>')
.addClass('ui-icon ui-icon-close');
var removeTag = $('<a><span class="text-icon">\xd7</span></a>') // \xd7 is an X
.addClass('tagit-close')
.append(removeTagIcon)
.click(function(e) {
// Removes a tag when the little 'x' is clicked. \
that.removeTag(tag);
});
tag.append(removeTag);
}
// Unless options.singleField is set, each tag has a hidden input field inline.
if (!this.options.singleField) {
var escapedValue = label.html();
tag.append('<input type="hidden" value="' + escapedValue + '" name="' + this.options.fieldName + '" class="tagit-hidden-field" />');
}
if (this._trigger('beforeTagAdded', null, {
tag: tag,
tagLabel: this.tagLabel(tag),
duringInitialization: duringInitialization
}) === false) {
return;
}
if (this.options.singleField) {
var tags = this.assignedTags();
tags.push(value);
this._updateSingleTagsField(tags);
}
// DEPRECATED. \
this._trigger('onTagAdded', null, tag);
this.tagInput.val('');
// Insert tag. \
this.tagInput.parent().before(tag);
this._trigger('afterTagAdded', null, {
tag: tag,
tagLabel: this.tagLabel(tag),
duringInitialization: duringInitialization
});
if (this.options.showAutocompleteOnFocus && !duringInitialization) {
setTimeout(function () { that._showAutocomplete(); }, 0);
}
},
removeTag: function(tag, animate) {
animate = typeof animate === 'undefined' ? this.options.animate : animate;
tag = $(tag);
// DEPRECATED. \
this._trigger('onTagRemoved', null, tag);
if (this._trigger('beforeTagRemoved', null, {tag: tag, tagLabel: this.tagLabel(tag)}) === false) {
return;
}
if (this.options.singleField) {
var tags = this.assignedTags();
var removedTagLabel = this.tagLabel(tag);
tags = $.grep(tags, function(el){
return el != removedTagLabel;
});
this._updateSingleTagsField(tags);
}
if (animate) {
tag.addClass('removed'); // Excludes this tag from _tags.
var hide_args = this._effectExists('blind') ? ['blind', {direction: 'horizontal'}, 'fast'] : ['fast'];
var thisTag = this;
hide_args.push(function() {
tag.remove();
thisTag._trigger('afterTagRemoved', null, {tag: tag, tagLabel: thisTag.tagLabel(tag)});
});
tag.fadeOut('fast').hide.apply(tag, hide_args).dequeue();
} else {
tag.remove();
this._trigger('afterTagRemoved', null, {tag: tag, tagLabel: this.tagLabel(tag)});
}
},
removeTagByLabel: function(tagLabel, animate) {
var toRemove = this._findTagByLabel(tagLabel);
if (!toRemove) {
throw "No such tag exists with the name '" + tagLabel + "'";
}
this.removeTag(toRemove, animate);
},
removeAll: function() {
// Removes all tags.
var that = this;
this._tags().each(function(index, tag) {
that.removeTag(tag, false);
});
}
});
})(jQuery);
(function(b){b.widget("ui.tagit",{options:{allowDuplicates:!1,caseSensitive:!0,fieldName:"tags",placeholderText:null,readOnly:!1,removeConfirmation:!1,tagLimit:null,availableTags:[],autocomplete:{},showAutocompleteOnFocus:!1,allowSpaces:!1,singleField:!1,singleFieldDelimiter:",",singleFieldNode:null,animate:!0,tabIndex:null,beforeTagAdded:null,afterTagAdded:null,beforeTagRemoved:null,afterTagRemoved:null,onTagClicked:null,onTagLimitExceeded:null,onTagAdded:null,onTagRemoved:null,tagSource:null},_create:function(){var a= \
this;this.element.is("input")?(this.tagList=b("<ul></ul>").insertAfter(this.element),this.options.singleField=!0,this.options.singleFieldNode=this.element,this.element.addClass("tagit-hidden-field")):this.tagList=this.element.find("ul, ol").andSelf().last();this.tagInput=b('<input type="text" />').addClass("ui-widget-content");this.options.readOnly&&this.tagInput.attr("disabled","disabled");this.options.tabIndex&&this.tagInput.attr("tabindex",this.options.tabIndex);this.options.placeholderText&&this.tagInput.attr("placeholder",
this.options.placeholderText);this.options.autocomplete.source||(this.options.autocomplete.source=function(a,e){var d=a.term.toLowerCase(),c=b.grep(this.options.availableTags,function(a){return 0===a.toLowerCase().indexOf(d)});this.options.allowDuplicates||(c=this._subtractArray(c,this.assignedTags()));e(c)});this.options.showAutocompleteOnFocus&&(this.tagInput.focus(function(b,d){a._showAutocomplete()}),"undefined"===typeof this.options.autocomplete.minLength&&(this.options.autocomplete.minLength=
0));b.isFunction(this.options.autocomplete.source)&&(this.options.autocomplete.source=b.proxy(this.options.autocomplete.source,this));b.isFunction(this.options.tagSource)&&(this.options.tagSource=b.proxy(this.options.tagSource,this));this.tagList.addClass("tagit").addClass("ui-widget ui-widget-content ui-corner-all").append(b('<li class="tagit-new"></li>').append(this.tagInput)).click(function(d){var c=b(d.target);c.hasClass("tagit-label")?(c=c.closest(".tagit-choice"),c.hasClass("removed")||a._trigger("onTagClicked",
d,{tag:c,tagLabel:a.tagLabel(c)})):a.tagInput.focus()});var c=!1;if(this.options.singleField)if(this.options.singleFieldNode){var d=b(this.options.singleFieldNode),f=d.val().split(this.options.singleFieldDelimiter);d.val("");b.each(f,function(b,d){a.createTag(d,null,!0);c=!0})}else this.options.singleFieldNode=b('<input type="hidden" style="display:none;" value="" name="'+this.options.fieldName+'" />'),this.tagList.after(this.options.singleFieldNode);c||this.tagList.children("li").each(function(){b(this).hasClass("tagit-new")||
(a.createTag(b(this).text(),b(this).attr("class"),!0),b(this).remove())});this.tagInput.keydown(function(c){if(c.which==b.ui.keyCode.BACKSPACE&&""===a.tagInput.val()){var d=a._lastTag();!a.options.removeConfirmation||d.hasClass("remove")?a.removeTag(d):a.options.removeConfirmation&&d.addClass("remove ui-state-highlight")}else a.options.removeConfirmation&&a._lastTag().removeClass("remove ui-state-highlight");if(c.which===b.ui.keyCode.COMMA&&!1===c.shiftKey||c.which===b.ui.keyCode.ENTER||c.which==
b.ui.keyCode.TAB&&""!==a.tagInput.val()||c.which==b.ui.keyCode.SPACE&&!0!==a.options.allowSpaces&&('"'!=b.trim(a.tagInput.val()).replace(/^s*/,"").charAt(0)||'"'==b.trim(a.tagInput.val()).charAt(0)&&'"'==b.trim(a.tagInput.val()).charAt(b.trim(a.tagInput.val()).length-1)&&0!==b.trim(a.tagInput.val()).length-1))c.which===b.ui.keyCode.ENTER&&""===a.tagInput.val()||c.preventDefault(),a.options.autocomplete.autoFocus&&a.tagInput.data("autocomplete-open")||(a.tagInput.autocomplete("close"),a.createTag(a._cleanedInput()))}).blur(function(b){a.tagInput.data("autocomplete-open")||
a.createTag(a._cleanedInput())});if(this.options.availableTags||this.options.tagSource||this.options.autocomplete.source)d={select:function(b,c){a.createTag(c.item.value);return!1}},b.extend(d,this.options.autocomplete),d.source=this.options.tagSource||d.source,this.tagInput.autocomplete(d).bind("autocompleteopen.tagit",function(b,c){a.tagInput.data("autocomplete-open",!0)}).bind("autocompleteclose.tagit",function(b,c){a.tagInput.data("autocomplete-open",!1)}),this.tagInput.autocomplete("widget").addClass("tagit-autocomplete")},
destroy:function(){b.Widget.prototype.destroy.call(this);this.element.unbind(".tagit");this.tagList.unbind(".tagit");this.tagInput.removeData("autocomplete-open");this.tagList.removeClass("tagit ui-widget ui-widget-content ui-corner-all tagit-hidden-field");this.element.is("input")?(this.element.removeClass("tagit-hidden-field"),this.tagList.remove()):(this.element.children("li").each(function(){b(this).hasClass("tagit-new")?b(this).remove():(b(this).removeClass("tagit-choice ui-widget-content ui-state-default ui-state-highlight ui-corner-all remove tagit-choice-editable tagit-choice-read-only"),
b(this).text(b(this).children(".tagit-label").text()))}),this.singleFieldNode&&this.singleFieldNode.remove());return this},_cleanedInput:function(){return b.trim(this.tagInput.val().replace(/^"(.*)"$/,"$1"))},_lastTag:function(){return this.tagList.find(".tagit-choice:last:not(.removed)")},_tags:function(){return this.tagList.find(".tagit-choice:not(.removed)")},assignedTags:function(){var a=this,c=[];this.options.singleField?(c=b(this.options.singleFieldNode).val().split(this.options.singleFieldDelimiter),
""===c[0]&&(c=[])):this._tags().each(function(){c.push(a.tagLabel(this))});return c},_updateSingleTagsField:function(a){b(this.options.singleFieldNode).val(a.join(this.options.singleFieldDelimiter)).trigger("change")},_subtractArray:function(a,c){for(var d=[],f=0;f<a.length;f++)-1==b.inArray(a[f],c)&&d.push(a[f]);return d},tagLabel:function(a){return this.options.singleField?b(a).find(".tagit-label:first").text():b(a).find("input:first").val()},_showAutocomplete:function(){this.tagInput.autocomplete("search",
"")},_findTagByLabel:function(a){var c=this,d=null;this._tags().each(function(f){if(c._formatStr(a)==c._formatStr(c.tagLabel(this)))return d=b(this),!1});return d},_isNew:function(a){return!this._findTagByLabel(a)},_formatStr:function(a){return this.options.caseSensitive?a:b.trim(a.toLowerCase())},_effectExists:function(a){return Boolean(b.effects&&(b.effects[a]||b.effects.effect&&b.effects.effect[a]))},createTag:function(a,c,d){var f=this;a=b.trim(a);this.options.preprocessTag&&(a=this.options.preprocessTag(a));
if(""===a)return!1;if(!this.options.allowDuplicates&&!this._isNew(a))return a=this._findTagByLabel(a),!1!==this._trigger("onTagExists",null,{existingTag:a,duringInitialization:d})&&this._effectExists("highlight")&&a.effect("highlight"),!1;if(this.options.tagLimit&&this._tags().length>=this.options.tagLimit)return this._trigger("onTagLimitExceeded",null,{duringInitialization:d}),!1;var g=b(this.options.onTagClicked?'<a class="tagit-label"></a>':'<span class="tagit-label"></span>').text(a),e=b("<li></li>").addClass("tagit-choice ui-widget-content ui-state-default ui-corner-all").addClass(c).append(g);
this.options.readOnly?e.addClass("tagit-choice-read-only"):(e.addClass("tagit-choice-editable"),c=b("<span></span>").addClass("ui-icon ui-icon-close"),c=b('<a><span class="text-icon">\u00d7</span></a>').addClass("tagit-close").append(c).click(function(a){f.removeTag(e)}),e.append(c));this.options.singleField||(g=g.html(),e.append('<input type="hidden" value="'+g+'" name="'+this.options.fieldName+'" class="tagit-hidden-field" />'));!1!==this._trigger("beforeTagAdded",null,{tag:e,tagLabel:this.tagLabel(e),
duringInitialization:d})&&(this.options.singleField&&(g=this.assignedTags(),g.push(a),this._updateSingleTagsField(g)),this._trigger("onTagAdded",null,e),this.tagInput.val(""),this.tagInput.parent().before(e),this._trigger("afterTagAdded",null,{tag:e,tagLabel:this.tagLabel(e),duringInitialization:d}),this.options.showAutocompleteOnFocus&&!d&&setTimeout(function(){f._showAutocomplete()},0))},removeTag:function(a,c){c="undefined"===typeof c?this.options.animate:c;a=b(a);this._trigger("onTagRemoved",
null,a);if(!1!==this._trigger("beforeTagRemoved",null,{tag:a,tagLabel:this.tagLabel(a)})){if(this.options.singleField){var d=this.assignedTags(),f=this.tagLabel(a),d=b.grep(d,function(a){return a!=f});this._updateSingleTagsField(d)}if(c){a.addClass("removed");var d=this._effectExists("blind")?["blind",{direction:"horizontal"},"fast"]:["fast"],g=this;d.push(function(){a.remove();g._trigger("afterTagRemoved",null,{tag:a,tagLabel:g.tagLabel(a)})});a.fadeOut("fast").hide.apply(a,d).dequeue()}else a.remove(),
this._trigger("afterTagRemoved",null,{tag:a,tagLabel:this.tagLabel(a)})}},removeTagByLabel:function(a,b){var d=this._findTagByLabel(a);if(!d)throw"No such tag exists with the name '"+a+"'";this.removeTag(d,b)},removeAll:function(){var a=this;this._tags().each(function(b,d){a.removeTag(d,!1)})}})})(jQuery);
</script>
</head>
<body>
<a href="http://github.com/aehlke/tag-it"><img style="position: absolute; top: 0; right: 0; border: 0;" src="http://s3.amazonaws.com/github/ribbons/forkme_right_white_ffffff.png" alt="Fork me on GitHub" /></a>
<div id="wrapper">
<div id="header">
<h2>Tag-it! Usage Examples</h2>
<ul id="nav">
<li><a href="http://aehlke.github.com/tag-it">« back to widget home</a></li>
</ul>
</div>
<div id="content">
<p>These demo various features of Tag-it. View the source to see how each works.</p>
<hr>
<h3>Minimal</h3>
<form>
<p>
Vanilla example — the absolute minimum amount of code required, no configuration. No autocomplete, either. See the other examples for that.
</p>
<ul id="myTags"></ul>
<input type="submit" value="Submit">
</form>
<hr>
<h3>Single Input Field</h3>
<form>
<p>
Example using a single input form field to hold all the tag values, instead of one per tag (see settings.singleField).
This method is particularly useful if you have a form with one input field for comma-delimited tags that you want to trivially "upgrade" to this fancy jQuery UI widget.
This configuration will also degrade nicely as well for browsers without JS — the default behavior is to have one input per tag, which does not degrade as well as one comma-delimited input.
</p>
<p>
Normally this input field will be hidden — we leave it visible here so you can see how it is manipulated by the widget:
<input name="tags" id="mySingleField" value="Apple, Orange" disabled="true"> <!-- only disabled for demonstration purposes -->
</p>
<ul id="singleFieldTags"></ul>
<input type="submit" value="Submit">
</form>
<hr>
<h3><a name="graceful-degredation"></a>Single Input Field (2)</h3>
<form>
<p>
If you instantiate Tag-it on an INPUT element, it will default to being singleField, with that INPUT element as the singleFieldNode. This is the simplest way to have a gracefully-degrading tag widget.
</p>
<input name="tags" id="singleFieldTags2" value="Apple, Orange">
</form>
<hr>
<h3>Spaces Allowed Without Quotes</h3>
<p>You can already do multiword tags with spaces in them by default, but those must be wrapped in quotes. This option lets you use spaces without requiring the user to quote the input.</p>
<p>There are normally 5 ways to insert a tag after inputting some text: space, comma, enter, selecting an autocomplete option, or defocusing the widget. With the "allowSpaces" option set to true, space no longer inserts a tag, it just adds a space to the current tag input.</p>
<form>
<p></p>
<ul id="allowSpacesTags"></ul>
</form>
<hr>
<h3>Preloading Data in Markup</h3>
<form>
<p>
Using a UL in HTML to prefill the widget with some tags.
</p>
<ul id="myULTags">
<!-- Existing list items will be pre-added to the tags. -->
<li>Tag1</li>
<li>Tag2</li>
</ul>
</form>
<hr>
<h3>Read-only</h3>
<form>
<p>Example of read only tags.</p>
<ul id="readOnlyTags">
<li>Tag1</li>
<li>Tag2</li>
</ul>
</form>
<hr>
<h3>Events</h3>
<form>
<p>Example of tag events. Try adding or removing a tag, adding a duplicate tag, or clicking on a tag's label.</p>
<ul id="eventTags">
<li>Click my label</li>
<li>Remove me</li>
</ul>
</form>
<div id="events_container"></div>
<hr>
<h3>Methods</h3>
<form>
<p>Demos the available widget methods. Click the links below the widget to try them.</p>
<ul id="methodTags"></ul>
<p><a href="#" onclick="var inp=prompt('Enter a tag value to test the createTag method.');$('#methodTags').tagit('createTag', inp);return false;">Create tag</a></p>
<p><a href="#" onclick="var inp=prompt('Enter a tag value to test the removeTagByName method.');$('#methodTags').tagit('removeTagByName', inp);return false;">Remove tag by name</a></p>
<p><a href="#" onclick="$('#methodTags').tagit('removeAll');return false;">Clear tags</a></p>
</form>
<hr>
<h3>Remove Confirmation</h3>
<form>
<p>
When removeConfirmation is enabled the user has to press the backspace key twice to remove the last tag.
</p>
<ul id="removeConfirmationTags">
<li>backspace me</li>
<li>me too</li>
</ul>
</form>
</div>
<div id="footer">
<div class="left">
<p>Built with <a href="http://jquery.com/" target="_blank">jQuery</a> and <a href="http://jqueryui.com/" target="_blank">jQuery UI</a>.</p>
<p>Originally created by <a href="http://levycarneiro.com/">Levy Carneiro Jr</a>. Currently maintained by <a href="http://github.com/aehlke">Alex Ehlke</a>.</p>
</div>
<p class="right weak">Template adopted from <a href="http://orderedlist.com/demos/fancy-zoom-jquery/">orderedlist.com</a></p>
<br class="clear"/>
</div>
</div>
</body>
</html>
{% endblock %}
|
19,922 | 37321b083f131deeea5279328dc2966ec620b69b | import sys
# sys.path.insert(0, '/home/changbinli/script/rnn/')
import pandas as pd
from test_instant import HyperParameters
import tensorflow as tf
if __name__ == "__main__":
validation_fold = 0
# read params
#folder_name = 'final_instant10'
folder_name = 'final_instant10'
with tf.Graph().as_default():
MACRO_PATH = '/net/node560.scratch'
hyperparameters = HyperParameters(VAL_FOLD=validation_fold, FOLD_NAME=folder_name, MACRO_PATH=MACRO_PATH)
hyperparameters.main()
|
19,923 | c109cb9eb0d08c1e4a203d8b85e7e1f05a12cc59 | #!/usr/bin/env python
# coding: utf-8
# In[9]:
import pandas as pd
# In[10]:
df=pd.read_csv('C:\\Users\\rakesh.bachu\\Desktop\\Long Forms\\Signzy challenge\\labeledTrainData.tsv',header=0,delimiter='\t')
# In[11]:
df
# In[12]:
from nltk.corpus import stopwords
# In[13]:
from bs4 import BeautifulSoup
import nltk
import os
# In[14]:
from textblob import TextBlob
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import names
from nltk.corpus import stopwords
# In[15]:
sw=stopwords.words("english")
# In[16]:
reviews=[]
for i in range(0,len(df['review'])):
reviews.append(df['review'][i].lower())
# In[17]:
reviews
# In[18]:
import re
# In[19]:
def clean_review(text):
let=re.sub("[^a-zA-Z]", " ",text)
words = let.split()
finalwords=[w for w in words if not w in sw]
return(" ".join(finalwords))
# In[22]:
cleanreviews=[]
n=len(reviews)
for i in range(0,n):
cleanreviews.append(clean_review(reviews[i]))
# In[23]:
cleanreviews
# In[24]:
from sklearn.feature_extraction.text import CountVectorizer
# In[25]:
vectorizer = CountVectorizer(analyzer = "word", tokenizer = None, preprocessor = None, stop_words = None, max_features = 5000)
train_data_features = vectorizer.fit_transform(cleanreviews)
train_data_features = train_data_features.toarray()
# In[26]:
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators = 150)
forest = forest.fit( train_data_features, df["sentiment"] )
# In[27]:
df1=pd.read_csv('C:\\Users\\rakesh.bachu\\Desktop\\Long Forms\\Signzy challenge\\testData.tsv',header=0,delimiter='\t')
# In[28]:
cleantest=[]
n1=len(df1['review'])
for i in range(0,n1):
cleantest.append(clean_review(df1['review'][i]))
# In[29]:
test_data_features = vectorizer.transform(cleantest)
test_data_features = test_data_features.toarray()
# In[30]:
final = forest.predict(test_data_features)
df2 = pd.DataFrame( data={"id":df1["id"], "sentiment":final} )
# In[32]:
df2.to_csv( "C:\\Users\\rakesh.bachu\\Desktop\\submission.csv", index=False)
# In[ ]:
|
19,924 | c5b20419727ecf065f644f49e9661867865fb476 | from system.core.controller import *
from flask import flash
class Users(Controller):
def __init__(self, action):
super(Users, self).__init__(action)
self.load_model('User')
self.load_model('Service')
self.load_model('Dashboard')
def index(self):
return self.load_view('index.html')
def login_reg(self):
if 'user' in session:
return redirect('/edit_user')
return self.load_view('login_reg.html')
def login(self):
user = self.models['User'].get_user_by_email(request.form.copy())
if user['status'] == True:
session['user'] = user['user']
if session['user']['admin_status']:
return redirect('/admin')
else:
return redirect('/user')
else:
for message in user['errors']:
flash(message, 'login_errors')
return redirect('/login_reg')
def register(self):
result = self.models['User'].add_users(request.form.copy())
if result['status'] == True:
session['user'] = result['user']
return redirect('/edit_user')
else:
for message in result['errors']:
flash(message, 'regis_errors')
return redirect('/login_reg')
def edit(self):
if not 'user' in session:
return redirect('/login_reg')
preferences = self.models['User'].get_preferences_by_id(session['user']['id'])
if preferences['status']:
return self.load_view('edit_user.html', preferences = preferences['pref'])
else:
return self.load_view('edit_user.html', preferences = {}) #so the html doesn't error out looking for the preferences dictionary
def update_user(self):
if not 'user' in session:
return redirect('/login_reg')
result = self.models['User'].update_user_by_id(session['user']['id'], request.form.copy())
if result['status']:
session['user']['email'] = result['result']['email']
flash("Your information has been updated successfully", 'user_errors')
else:
for message in result['errors']:
flash(message, 'user_errors')
return redirect('/edit_user')
def update_pref(self):
if not 'user' in session:
return redirect('/login_reg')
result = self.models['User'].update_prefrences_by_id(session['user']['id'], request.form.copy())
if result['status'] == True:
return redirect('/user')
else:
flash('Preferences have been successfully update', 'pref')
return redirect('/edit_user')
# routes['/users'] = 'Users#all_users'
def all_users(self):
if session['user']['admin_status'] == 0:
return redirect('/')
users = self.models['User'].all_users()
admins = self.models['User'].admins()
return self.load_view('users.html', users = users, admins = admins)
# routes["/destroy/user/<id>"]
def destroy_user(self, id):
if session['user']['admin_status'] == 0:
return redirect('/')
self.models['User'].destroy_user(id)
return redirect('/all_users')
# routes["/create/admin/<id>"] = 'Users#upgrade_status'
def upgrade_status(self, id):
if session['user']['admin_status'] == 0:
return redirect('/')
self.models['User'].upgrade_status(id)
return redirect('/all_users')
# routes["/destroy/admin/<id>"] = 'Users#revoke_status'
def revoke_status(self, id):
if session['user']['admin_status'] == 0:
return redirect('/')
if session['user']['id'] == int(id):
flash('Cannot revoke your own status')
return redirect("/all_users")
revoke = self.models['User'].revoke_status(id)
if revoke['error']:
flash(revoke['error'])
return redirect('/all_users')
def admin(self):
if session['user']['admin_status'] == 0:
return redirect('/user')
feedback = self.models['Dashboard'].get_feedback_by_active_status()
types = self.models['Service'].types()
length = len(feedback)
flags = self.models['Dashboard'].get_flagged_ratings()
flag_length = len(flags)
support = self.models['User'].active_support()
support_length = len(support)
recommended = self.models['Service'].get_recommendations()
rec_length = len(recommended)
return self.load_view('admin_dash.html', feedback = feedback, types = types, length = length, flags = flags, flag_length = flag_length, support = support, support_length = support_length, recommended = recommended, rec_length = rec_length)
def admin_feedback(self):
if not 'admin_status' in session['user']:
return redirect('/result')
inactive = self.models['Dashboard'].get_feedback_by_inactive_status()
active = self.models['Dashboard'].get_feedback_by_active_status()
length = len(active)
return self.load_view('admin_feedback.html', inactive = inactive, active = active)
def logout(self):
session.clear();
return redirect('/')
# routes['/support'] = 'Users#support'
def support(self):
return self.load_view('support.html')
# routes['POST']['/create/support'] = 'Users#create_support'
def create_support(self):
create = self.models['User'].support(request.form.copy())
for message in create['errors']:
flash(message, create['type'])
return redirect('/support')
# routes['/all_support'] = 'Users#all_support'
def all_support(self):
active = self.models['User'].active_support()
archived = self.models['User'].archived_support()
return self.load_view('/archive_support.html', active = active, archived = archived)
# routes["/archive/support/{{i['id']}}"] = 'Users#deactivate_support'
def deactivate_support(self, id):
self.models['User'].deactivate_support(id)
return redirect('/admin')
# routes["/activate/support/<id>"] = 'Users#activate_support'
def activate_support(self, id):
self.models['User'].activate_support(id)
return redirect('/all_support')
# routes["/archive/deactivate/support/<id>"] = 'Users#archive_deactivate_support'
def archive_deactivate_support(self, id):
self.models['User'].deactivate_support(id)
return redirect('/all_support')
# routes["/destroy/support/<id>"] = 'Users#destroy_support'
def destroy_support(self, id):
self.models['User'].destroy_support(id)
return redirect('/all_support')
|
19,925 | c4f306eff47f50f684429c53cb5c93608aa95556 | """Done By P.R.Kesavan"""
"""Mini Project : Alaram Clock Using Python With GUI"""
from time import strftime
from tkinter import *
import time
import datetime
from pygame import mixer #Importing Mixer from Pygame : For Loading and PLaying Sound
root = Tk()
root.title('My Alarm Clock')
def setalarm():
alarmtime=f"{hrs.get()}:{mins.get()}:{secs.get()}"
print(alarmtime)
if(alarmtime!="::"):
alarmclock(alarmtime)
def alarmclock(alarmtime):
while True:
time.sleep(1)
time_now=datetime.datetime.now().strftime("%H:%M:%S")
print(time_now)
if time_now==alarmtime:
Wakeup=Label(root, font = ('arial', 16, 'bold'),
text="Time Up! Wake Up!",bg="Red",fg="white").grid(row=6,columnspan=3)
print("wake up!")
mixer.init()
mixer.music.load(r'C:\Users\Asus\OneDrive\Documents\Python\Alaram Clock\Jack Sparrow.mpeg')
mixer.music.play()
break
hrs=StringVar()
mins=StringVar()
secs=StringVar()
titlename=Label(root, font = ('arial', 16, 'bold'),
text="Alarm:(Hr/Min/Sec)").grid(row=1,columnspan=3)
hour=Entry(root,textvariable=hrs,width=4,font =('arial', 20, 'bold'),bg="Yellow" ,fg="Red")
hour.grid(row=2,column=1)
min=Entry(root,textvariable=mins,
width=4,font = ('arial', 20, 'bold'),bg="Yellow" ,fg="Red").grid(row=2,column=2)
sec=Entry(root,textvariable=secs,
width=4,font = ('arial', 20, 'bold'),bg="Yellow" ,fg="Red").grid(row=2,column=3)
setbtn=Button(root,text="Set Alarm",command=setalarm,bg="Blue",
fg="white",font = ('arial', 16, 'bold')).grid(row=4,columnspan=3)
timeleft = Label(root,font=('arial', 16, 'bold'))
timeleft.grid()
mainloop() |
19,926 | 5bbdd67e7219bdd5acf0f6ece631bdffe596b4a9 | import cv2
from util import ContourUtil,TextUtil,ImgUtil,HSVFilteUtil
from roi import ROIDetect
#captureFront = cv2.VideoCapture("http://192.168.1.8:8002/?action=stream")
captureLeft = cv2.VideoCapture("http://192.168.1.8:8001/?action=stream")
roiDetectLeft = ROIDetect()
roiDetectFront = ROIDetect()
contourUtil = ContourUtil()
textUtil = TextUtil()
hsvUtil = HSVFilteUtil()
imgUtil = ImgUtil()
es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (21, 21))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(2, 2))
lower_blue,upper_blue = hsvUtil.getFilteRange()
def processImg(frame,diff):
imgGray = cv2.threshold(diff, 30, 255, cv2.THRESH_BINARY)[1]
imgGray = cv2.dilate(imgGray, es, iterations=2)
maxCnt,hierarchy = contourUtil.getMaxContour(imgGray)
leftRotate = 0
rightRotate = 0
if not maxCnt is None:
x, y, w, h = cv2.boundingRect(maxCnt)
roiImg = frame[y:y+h,x:x+w]
res = hsvUtil.filteByRange(roiImg,lower_blue,upper_blue)
resGray = cv2.cvtColor(res,cv2.COLOR_BGR2GRAY)
ret,thresh = cv2.threshold( resGray , 75 , 255, cv2.THRESH_BINARY )
thresh = cv2.erode(thresh,kernel)
maxCnt,hierarchy = contourUtil.getMaxContour(thresh)
contourUtil.drawRect(frame,maxCnt,x,y)
rotate = contourUtil.drawMinRect(frame,maxCnt,hierarchy,x,y)
rightRotate = abs(rotate)
leftRotate = 90 - rightRotate
text = " rotates: %.1f - %.1f" % (leftRotate ,rightRotate)
textUtil.putText(frame,text)
return frame
while True:
#ret, frameFront = captureFront.read()
ret, frameLeft = captureLeft.read()
#diffFront = roiDetectFront.getROIByDiff(frameFront)
diffLeft = roiDetectLeft.getROIByDiff(frameLeft)
#frameFront = processImg(frameFront,diffFront)
frameLeft = processImg(frameLeft,diffLeft)
#cv2.imshow("brickFront",frameFront)
cv2.imshow("brickLeft",frameLeft)
key = cv2.waitKey(1000//40)
if key == ord('q'):
break
cv2.destroyAllWindows()
captureFront.release()
captureLeft.release() |
19,927 | 874aef104e3f4dfe3e8cc6c104014d5db56ee4a8 | # !/usr/bin/env python
# -*- coding:utf-8 -*-
# author;鸿
import requests
import json
import time
import random
offset = 1755
shop_id = []
cookie = [
# # '__wpkreporterwid_=b77b8bef-a3e9-4e81-0e9c-d3881756bf5b; ubt_ssid=ilkha8pse5g4122hgc2yqduzjoac8tg8_2020-06-07; perf_ssid=vfgpud8oofkmr1tu3kshxxhl7mlsp8eb_2020-06-07; ut_ubt_ssid=s2v3ocmhyif1hdz7jumojmapmetwqxii_2020-06-07; _utrace=0d037d42d880d2ce1ad34156121443d2_2020-06-07; cna=zA71FgNiM1QCAduISvHVe3Vn; _bl_uid=mskRnb254hzsCd6eb6Lba7jazaIv; _samesite_flag_=true; cookie2=11d02f565b83c7201762d8aa5e4f1d0d; t=fde297ee74be499a3efabdcec2346e8c; _tb_token_=3571b8773ed05; csg=743201f3; t_eleuc4=id4=0%40BA%2FvuHCrrRtQnIoNSBWw%2FAHuUx%2FAlPtUgj0rqg%3D%3D; munb=2206585031598; SID=AwAAAdGrVjee7AAGXADHJjyTl-2kwgQ01Xmz7kswTfKW9jh_GYJGlbOn; USERID=2000034346910; UTUSER=2000034346910; tzyy=d65545cad93b482e7f2d01f93ed0fdd2; x5check_ele=0iRIN6%2FS2bYCE%2By3Uw2WWENIV%2FXNj2%2BVuuUj5CbAXzc%3D; l=eBQDZ7GrQVjo3wQyBO5anurza77t0IdbzsPzaNbMiIncC6bPdu99goRQLsPKNCtRR8XcGZYB4gR2YAetoeP_8PwfkaQE-1IVlgEvBef..; isg=BKOjh-0q-VagoLViWryng4BMMuFNmDfaqki2-dUBaIIRFME2XWkLKy7GDuQatI_S',
# 'cna=Y49GFtWEfF4CAXjtoAYpZ4UH; ut_ubt_ssid=ri43wqitp430ynmw7ycr12a6ryc28y48_2020-06-07; perf_ssid=nhgkt2live1yp6rv3213birjffjpl8xu_2020-06-07; ubt_ssid=a996l76k3x7yh4lhi31geqsv6c9uezk9_2020-06-07; _utrace=045f1125ecf0d66e71878f1ddcffab1c_2020-06-07; t=fd45e273043b13a0b97adea2108dd5d9; munb=2206588865815; UTUSER=2000035559833; UM_distinctid=1728e0243c83be-08846cb067e683-d373666-130980-1728e0243c94ac; __wpkreporterwid_=1b5ffe47-8ebe-4b19-8abf-f54a131397f9; _bl_uid=4qkk6b794Fau0d8gh75LnR6ynjFe; tzyy=f2cd8ab3f5e535aecdd49c1d9aead181; l=eBxTRMXgQVjHZCiOBOfaFurza7yeYIRfguPzaNbMiOCPO4595l5hWZvQ_hLpCnNVn6uJR3ykIQI_B7TLny4eQxv9-ewyFtQqxdLh.; _samesite_flag_=true; cookie2=1e4db1e0ef314829d402bc1ab808d2b1; _tb_token_=eeeeea8de5e1f; csg=e6a2f4b9; t_eleuc4=id4=0%40BA%2FvuHCrrRtQnIcy%2FZ8MJX%2BO%2Bi8DiqztJo5UTg%3D%3D; SID=BQAAAdGraLmZ7AAGoQBgmy8LgB4rCW1sKopyO0BsLy53qqel438GaQpJ; USERID=2000035559833; x5check_ele=6%2Bu09bmvA3%2FbTGcfCfYxq3nPDUNCzwLV54tk8rg2tM8%3D; isg=BKOjlv8h-Vf2x7ViaX_F_SEUMudNmDfaW6DIiNUARoJ5FMI2XWjeKZMFDqS_tI_S',
# '__wpkreporterwid_=9efcffc6-4925-4ce7-b9aa-c9fc8b275211; ubt_ssid=53oy0qppxthkfjn9kyrcpdbph3s8xkfc_2020-06-07; perf_ssid=yzgiyfz5jygi7h4zlcoedijef93qj6to_2020-06-07; ut_ubt_ssid=3hu294gq3gvzepw99djh1mu3ll1sse81_2020-06-07; cna=TEw9F5MHjFACAbcAlS9mwsbH; _bl_uid=q4k7zb7p5w41mI3tUc9q21g58848; _utrace=e621ca025d83c2856119bfe156cc13bb_2020-06-07; _samesite_flag_=true; cookie2=1e04f9f20f61b041325cce7180211fb9; t=5fd46f991d9334896d771c663b4d5d19; _tb_token_=e3bd9eb306373; munb=2206099961085; SID=DAAAAAFGjimC7AAGTAAPAfefQ9npfUKLKhR4H6tPpAoP8ZuZW-D5w6pX; USERID=5478689154; UTUSER=5478689154; csg=087101eb; t_eleuc4=id4=0%40BA%2FvuHCrrRtVrsfR%2Bgp1%2B7YLeQILbyzz1juZWA%3D%3D; x5check_ele=w3B8MdQzDziMj5kBx7DtfKSOWL3Pi5DNhCvaAKxA5j4%3D; tzyy=0044f0f418df7a693cd9b4b6c93d456e; l=eBgpTt_7QVNdvLvpBOfChurza779RIR8muPzaNbMiOCP9kCD5-zPWZvQNlLkCnGVnsGvJ35QyF6TBDYnTPatQxv9-e1Sm1uq3dRC.; isg=BBAQy3d2ek5f6CZLYYK5CrC24V5i2fQjFhP-wwrh3Ws-RbLvsumPs1J3GQulkaz7',
# '__wpkreporterwid_=fb8f959b-a88b-4f04-a667-d78542d36ea2; ubt_ssid=lihd4mr152c09zd4nr4ezn07svc2s4el_2020-06-07; perf_ssid=1naki75ce4jiizbaj4o0s8ra96pymixo_2020-06-07; ut_ubt_ssid=m66qhwshe7iszci0lncadmz1klzsxs89_2020-06-07; isg=BMbGpXrg5AyRl7DpO1rAyxF3FLxIJwrhf4T4xrDu2unEs2fNGLdL8aECj29_AAL5; l=eBaGaXqVQVTh4sD-BO5Bnurza77T1IO48kPzaNbMiInca18RNF1Z_OQDdSdXldtfgt1LhetyLqt1edKek3fRwxDDBti2PHWs3xf..; UTUSER=2000030235796; _bl_uid=mCkXUb8s5kC1Ow840hn5dhOtOLLj; cna=OLw1F7jgXi0CAduEckxT/7Uj; _utrace=ea83ec38d96805d43cf88f13352bcf2a_2020-06-07; t=f7a272da22198520c7e8fa1692cac12f; t_eleuc4=id4=0%40BA%2FvuHCrrRtQkLM1%2BglHNhk1kjPp44xHTOmVig%3D%3D; munb=2206546129689; SID=BAAAAdGrF3yU7AAGxwCd2FvrjJ4a2fm8NKeWpCQ9o233UQl-XDSPYy47; USERID=2000030235796; x5check_ele=emCex6%2By37piI4Y3aanxzD2JeOYKyzGk2aCAvCeP4mo%3D; tzyy=ffa2c38117eeae13b6957d498e48cc11',
# # '__wpkreporterwid_=52a4dd0e-f0e1-45f9-393d-9914d1e2f80d; ubt_ssid=mqmrh8166zjhen0ylww5cu3k9ppdre2l_2020-06-07; perf_ssid=zpq5i8uj861pb5b5mj7t9d1ees1qweyy_2020-06-07; ut_ubt_ssid=blk5zezfz4t1hu1redces0g8j56w11tg_2020-06-07; cna=e9pXF21sJigCAXF2DXOwC4iK; _bl_uid=Utk6wbF644Iremvn5zzt35k66e83; _utrace=28fe3bba1c0ce75484a3e70f9f99d0e3_2020-06-07; _samesite_flag_=true; cookie2=1ff728153cd5d11078d6ad7c7c4874da; t=b86f23f7ee94141c087ef5d6f60ac6b5; _tb_token_=ed355b1388036; csg=09484496; t_eleuc4=id4=0%40BA%2FvuHCrrRtQk0MqN0A8ZvMLx537DUeqNxrjZQ%3D%3D; munb=2206579483915; track_id=1591519662|1007f241d03ec6fad54df7f148fd7ac1b04c216fcbaff38b51|ff7c1e2216e2db4412046bcf836fcbf7; tzyy=9933c52edbe4e66dbe32ecc02f6db1fa; l=eBOWF_cmQVTtCKsKBOfZhurza77TdIRAguPzaNbMiOCP9O1p7-PAWZvK1vT9CnGVh6qyR35QyF69BeYBq3K-nxv96IUUIrkmn; USERID=250178596; UTUSER=250178596; SID=BgAAAAAO6Wwk6gAEAADESBQvhoNvFQ_z4X7LhT-2BKqGEtJaUeTuFH7g; ZDS=1.0|1591535321|4WzGeyaGPgR/8mevH65FmJCmlKeJtZ6izhxGtSCOhLYqRzRfQs0+9OKUyJlgm362; x5check_ele=z%2FkLKjvd5MChJVArFKGNStcUZsDWklcE60x0Gm%2F33ZQ%3D; isg=BLe3SfdkpVKggSHeUZlmrueQRqsBfIveZf65tglk0wTeuNf6EUwbLnXZnh7mUGNWUwbLnXZnh7mUGNWNW',
# # '__wpkreporterwid_=0dcb3725-ef92-41b5-bab8-5a5d8c230460; ubt_ssid=ch0d41pj8fjipmyrydgxri05hlln3bou_2020-06-07; perf_ssid=auetpn0v2jdg9q8zaikdz7s8snosypbc_2020-06-07; ut_ubt_ssid=ii2hmyzl8ma3b4mg47rkd8jjwzglax2i_2020-06-07; cna=l61JF0upQlECAXjrqQDAckij; _bl_uid=thkCFbR15zC2wzi1eisvxXLqjIpt; _utrace=d4ace16e8b42ecdae76c365468556339_2020-06-07; t=f7af8ea49f3ddb5813cf8aaa5ea0b5b7; l=eBSQkUIIQVNdH23KBOfZnurza7799IRVguPzaNbMiOCP965yJuU5WZvKStY2CnGVnstvJ3RILX7QBc8KkPU67xv9-e9bMdFs3dRC.; track_id=1591535414|083fb2dca5b43f61ef2cd8b5cbe2b32bed3d8c574e227e820a|3e4acd45664c1ddf3b3e6483821f9b73; USERID=1000081288890; UTUSER=1000081288890; SID=CQAAAOjZfW666gAEAAD2UeJN9eWPF1ElvyeMwKu1FlxYPMFgcrbR6j-n; ZDS=1.0|1591535414|lYrb2E0xqpk9h20z/SVATblfSqVILqYTQ+rnTZoFWMTGTg+13w+m/Pd8enMhiuhhlF7PU0c24I5fU/HM8FtXUg==; isg=BDU15SqVhyjEIOOUwgttYp8wRLHvsunEtQMvebda1qz7jl6AbgIplQkP3FS4zgF8',
# '__wpkreporterwid_=52a4dd0e-f0e1-45f9-393d-9914d1e2f80d; ubt_ssid=mqmrh8166zjhen0ylww5cu3k9ppdre2l_2020-06-07; perf_ssid=zpq5i8uj861pb5b5mj7t9d1ees1qweyy_2020-06-07; ut_ubt_ssid=blk5zezfz4t1hu1redces0g8j56w11tg_2020-06-07; cna=e9pXF21sJigCAXF2DXOwC4iK; _bl_uid=Utk6wbF644Iremvn5zzt35k66e83; _utrace=28fe3bba1c0ce75484a3e70f9f99d0e3_2020-06-07; _samesite_flag_=true; t=b86f23f7ee94141c087ef5d6f60ac6b5; _tb_token_=ed355b1388036; csg=09484496; t_eleuc4=id4=0%40BA%2FvuHCrrRtQk0MqN0A8ZvMLx537DUeqNxrjZQ%3D%3D; munb=2206579483915; track_id=1591519662|1007f241d03ec6fad54df7f148fd7ac1b04c216fcbaff38b51|ff7c1e2216e2db4412046bcf836fcbf7; tzyy=9933c52edbe4e66dbe32ecc02f6db1fa; l=eBOWF_cmQVTtCnFKBO5Zhurza77t3IOXhsPzaNbMiInca1yA_hL2aNQDdKL98dtjgtfvxeKPOzL1BRn2JxaU-xaVX9zbTF6ZmYvvF; UTUSER=1000081304180; USERID=1000081304180; SID=DAAAAOjZfap06gAEAADblI1acUonvyoD4E4fVl0oUn2fbD0c7KG0nVdU; ZDS=1.0|1591586159|WGdv8IeEintFPiD2CWLtgHsaK6UDGAGRHEzFFBC/xg46mV/w3bE3YYAfFZzUjPyNE/daZ9zyryRxri4iZeBKRQ==; x5check_ele=YbcTUKJkbaNwLZWZBOWNnWt1wD%2FD0trvs82dyrXpHow%3D; isg=BCIimOLyiKCllpR9AZp63SSFc6iEcyaN2NmsVWy7TxVAP8C5VAMTmKO-a13DL54l',
# # '__wpkreporterwid_=e80951cb-950a-4940-19ea-48aef5666ce8; _bl_uid=3tkeFbFv5190kjepdr4Rusqw44mh; ubt_ssid=jz4gpp061kx0y4dstiqswrkcx9pqdjgu_2020-06-07; perf_ssid=a4p56zqra0fbpkyaf3smcx9ywuv3dop0_2020-06-07; ut_ubt_ssid=j68pcza8uezw497fffwyn96uxvvzst50_2020-06-07; cna=nMNjFzr8+mECAW8TJp0pA8s1; _utrace=7b264d3be9d4a1ecf92f0ba0f8fd3661_2020-06-07; l=eBLxBDMmQVTtvNoKBOfwlurza77OvIRRguPzaNbMiOCP_J1B5isfBZvK1086CnGVnsMWJ3RILX7QBvYnNyznQxv9-e9bMdF_HpdC.; track_id=1591530960|45fe7cbd04b8548c99f807ae93c556c4ee6c9fe98092656d62|476f221d898decf946a046695987bcff; USERID=35820790; UTUSER=35820790; SID=CAAAAAACIpT26gAEAAD-ggzpA5Yck4-p1bh0xa87oFiSs0B2CegitIVP; ZDS=1.0|1591530960|FR2zhuIyX/x/coV67QjpLkxj9nke6DdJAN+ILTRcuFH/+Rdvzs+0hwgh8M/VnJoy; x5check_ele=OzRutFPmDXN58OzuKu8eeQ%3D%3D; tzyy=e032a097c3f26fae73ec10d60b620488; isg=BAkJYJy1s2QP9E-QzrIJZe0sGDVjVv2IYS_Dfat-hPAv8isE86YvWNhjMFDEqpXA',
# '__wpkreporterwid_=63de1d56-a67c-428d-a62e-a7f3949c134f; ubt_ssid=388ki362426n24dti8mxxax9kju6lzut_2020-06-07; perf_ssid=92kx6a2humh554xv2v9y33rj4j826smu_2020-06-07; ut_ubt_ssid=p0xa0yxylzcj4hp3czx78fn4qn8ixrmc_2020-06-07; _bl_uid=esk3ObeC460t5brz8qakpvOhgej3; cna=75djF+qC40oCARu7Th6A3UiO; _utrace=0d736d242aa41d58248bdbf73cb2102f_2020-06-07; t=498c09094ad6f23cb79be81574f8039d; t_eleuc4=id4=0%40BA%2FvuHCrrRUkrvmFBsmjR6fET2COSO%2BzEYAnkQ%3D%3D; munb=2208234537927; SID=CQAAAOjZfLpF7AAGnwA3rwe7vDICJ37eVlT_pLSfK6ETXc0SMqPIfGCz; USERID=1000081242693; UTUSER=1000081242693; x5check_ele=WkvW5x4UEdmh4I%2FQbK7Qj%2FIpJqBc%2FrcoqjEpZgpexww%3D; tzyy=4fd7aaaf36818a1a41d5c040e1f28754; l=eBQggVBrQVjUG-D-BOfChurza77TvIRbmuPzaNbMiOCPOx5BoaT5BZvQakY6CnGVnsEyR3ljL4J_BuY_7y4E0xv9-e1Sm1u4pdTh.; isg=BHR0qgnxlnoUUQI3FM6uhh7fRTLmTZg3sPZavg7VAP-CeRTDNl1oxypw_behhdCP',
# '__wpkreporterwid_=6429f269-f07d-4571-086a-beac52d2c25c; ubt_ssid=ungexh95fgnyy7ady20gyb1gunwfnop8_2020-06-08; perf_ssid=ke8gvg3vmxl9vujlscgs1guc0t1yctv5_2020-06-08; ut_ubt_ssid=9eh6gxlwwm41trx2opph4aguxmuvoc5b_2020-06-08; _bl_uid=tskwFb4b588v35s4gyI9xFvun9Rb; cna=VwhcFy3+aGcCAXjmtP6DFaZf; _utrace=979205f1493e050928eea3f394dccb4b_2020-06-08; _samesite_flag_=true; cookie2=118cd77b1f5474e7dad32ef73bdc1c6a; t=91ab915102c124fb4a0f406ef53ba6d6; _tb_token_=7eee6136e675a; csg=c9c07550; t_eleuc4=id4=0%40BA%2FvuHCrrRtQl5AQ0dCtb0IaafE2NwKMdX4kAw%3D%3D; munb=2206532508981; SID=AgAAAOjVdSns7AAGtACbmfvpo8dpGdXmtG_cWcYKcL2RRCCof3FsnWdo; USERID=1000013638124; UTUSER=1000013638124; x5check_ele=OQc2KPqM4kSaYowoYKqDJ4crfPZR9TmXjoclfOb%2B1WM%3D; tzyy=bfdb769c131f7a9baee88638ade49f0f; isg=BD09wnBvP4FAIZvtzKcfKG35TJ832nEstRBEWv-CSBTDNl1oxyrG_AOn4GJwtonk; l=eBOVenFHQVUD9Lm5BOfZhurza7799IRAguPzaNbMiOCPO81k5Z2hWZvQzx8DCnGNh6-BR37cAizQBeYBc_C-nxvOpiV3pyMmn',
# 'ut_ubt_ssid=pmi3o74gwz1x94l0lr4a2919xw7opzx4_2020-06-07; ubt_ssid=7rwaci72apyxlmqt6zfqzpe7x35r9q4f_2020-06-07; cna=5zzeFtMS70ACAXFRkC0qymBk; _utrace=91eb824f72f7ab12047a6d23a3915b9f_2020-06-07; t=ac5fe679e7ba2fa0b567e0dfb802df3c; t_eleuc4=id4=0%40BA%2FvuHCrrRj3bnAc6PLwZlHrIXf9Kmk2DQi0KA%3D%3D; munb=2205013417283; SID=CAAAAABNyb7q7AAGiABosP8MkuRXX8vXl7imgV-nCAve4P7jQu7K-5hw; USERID=1305067242; UTUSER=1305067242; tzyy=97d578b447b48beaedb451613aa1886f; x5check_ele=4XOMpa%2BpYfNlkiK%2Br%2BX5FuAMVvbo5GEfhs1ZxLWwO3o%3D; l=eBak7P0lQVja473hBOfwhurza77OOIRAguPzaNbMiOCPO4fRdOrRWZvQGPTvCnGVh64pJ3lsGOfzBeYBcCYLLNYe6IUUIWkmn; isg=BFdXc-DBxXNpVEH-MbGbY3nB5s2hnCv-cXjXB6mEXiaj2HcasG-VTD32PnhGMAN2',
# ' __wpkreporterwid_=99a15c81-7d5d-4564-10ac-f47c09bdb24e; ubt_ssid=t58gahemosfi71dita0m0uetouticgzx_2020-06-07; perf_ssid=vv34jkbxp0h8yv2imr72fjy8h3t98eiy_2020-06-07; ut_ubt_ssid=cvdh6pyqtel17lboiy8hupzzm4v6k65o_2020-06-07; _bl_uid=0CkyObk95pn0jveehmC5kbd2ytCn; cna=hEZSF/sBiWwCAW8SLonGT2Q3; _utrace=08c6bfc14651d81f3160fd18ffa55b18_2020-06-07; l=eBQ4WpvrQVTtWzBKBO5ahurza77TCIRXGsPzaNbMiInca1rPaH520NQDdr1XudtjgtfjnhKyOvlFRRhBJPUU-AkDBeYBhSpT1xJwO; track_id=1591585829|0a059d2680501158c46a4fa943704d6113f4fe8d8c25ad0711|61bd29e095506cf9789f0d282ba40886; USERID=35820790; UTUSER=35820790; SID=CAAAAAACIpT26gAEAAD-ggzpA5Yck4-p1bh0xa87oFiSs0B2CegitIVP; ZDS=1.0|1591585829|CIT+yrzrGXkLRyQ5+hJ9NUDpTOm7XVWivyldfUinubl3ROmgmZApzXaR1DlqIbDd; isg=BIeH752WlcMJtBFOnDBXHsHwFjtRjFtuswU9g1l085Y9yK2KcFwBv5aJbo6WIDPm',
# # '__wpkreporterwid_=0d82cbd8-db95-4995-9329-08d0bba97ee0; ubt_ssid=clbpkimlzdtvo63wt1v1g8vhj58bezlt_2020-06-07; perf_ssid=78ftwxrjrelwt929cemjz932rs4ezcwp_2020-06-07; ut_ubt_ssid=7i7vwde0fbewfje8mh3nfw5qh8rehpcx_2020-06-07; cna=Mk7aFpUvjW8CAatfUbbpNZoR; _bl_uid=tIkzgbCL4w1yF3ah5kqRup932bU9; _utrace=3de7195594ca9a72bdd7d84e4ca62155_2020-06-07; track_id=1591527545|d3f1253063c2b881fbac5a3cc80654b25b36a151e0765f4e1f|901bcfb517f9d260ad4cf03f3dfe9cec; USERID=1000081239815; UTUSER=1000081239815; tzyy=8ef2905cc2c9185dc32c15e5ca11fe6b; t=1648e26109f1b0080f10cfdf036a35bd; l=eBOAeWbnQVNjALM_BO5Z-urza77O5IOXGsPzaNbMiInca6iAg3zigNQDpVW2zdtjgtfXrHtPOzL1BRHMSmUaSAkDBeYBhSpT1xJwr; SID=AgAAAOjZfK8H6gAEAABM6FaK0D553EKaBtZxdkXj1sBGWCuVH9Xrs7t9; ZDS=1.0|1591535160|4tVxKA+lM7OVQXrnFb7868Wt/ZdvA0x5DYPNMHoJ2204C97yz3FYcFXsvyfZdpYMNbufi8y/eZReX6/KylEfUw==; isg=BI6OUJnGXCVyD-gBMYpO3YBp32RQD1IJZH2wCbjX-hFNGy51IJ-iGTTZV0F3A0oh',
# '__wpkreporterwid_=249c2064-09b5-4290-bcec-0a17eabf779b; ubt_ssid=6d6qpdcn7b9sjznipypw8czstvve3nk0_2020-06-07; perf_ssid=mybj1vipvxutffy1qp26eoe0oarf0q4q_2020-06-07; ut_ubt_ssid=1volc20hbtfc2p9p85hshfpgmhml1ll6_2020-06-07; _bl_uid=30kjLb4k41Ota5iLhqLIn6Xha1Im; cna=6Y+HFU1bkwkCAXFcSe+3/nRa; _utrace=37ae7d1d02b017e173894fea3bb2efca_2020-06-07; track_id=1591519423|9a66bb685816851de95898ed6f3d3fcb52b5e1184c66fcd708|25703e329f8a6b21c9d4dc124f758010; USERID=121791358; UTUSER=121791358; tzyy=7b426df19e9be6ccce2b4eef498153d6; ZDS=1.0|1591532442|HNiWBJCKlJz5YhPNoaJXse+JBVZM1fVcayG+pi/cfgd9wmvw9ZvUZcmeFREdRnWl; l=eBNLpvaRQVjrxPMiBOfZlurza77TAIRfguPzaNbMiOCPO0Cp5YXFWZvQZkY9CnGVn6l6R35QyF69BSTorPa9Qxv9-e9bMdF_9dTh.; _samesite_flag_=true; cookie2=111dc5ffde4a524aab9b0720103582e8; t=aae572110ad9ca4cc9c0f57688d0ada0; _tb_token_=3d46efe35b97b; csg=2a0d59a5; t_eleuc4=id4=0%40BA%2FvuHCrrRkeMfkp7WOagoZAAZSmysPR%2F67zEA%3D%3D; munb=2204450098041; SID=AQAAAAAHQmN-7gAGPADLlfyNwQMeOR2O7ig_TL0YfBqYrRMpXzXis9Uc; x5check_ele=Xo0wsbZZGabt9fuUyE08370eWyIMWrI4CDGkffTiapc%3D; isg=BIOD8oD_2TfarZXCDfclhiUfEkct-Bc6USLtirVg3-JZdKOWPcinimHi6gI6VG8y_wpkreporterwid_=249c2064-09b5-4290-bcec-0a17eabf779b; ubt_ssid=6d6qpdcn7b9sjznipypw8czstvve3nk0_2020-06-07; perf_ssid=mybj1vipvxutffy1qp26eoe0oarf0q4q_2020-06-07; ut_ubt_ssid=1volc20hbtfc2p9p85hshfpgmhml1ll6_2020-06-07; _bl_uid=30kjLb4k41Ota5iLhqLIn6Xha1Im; cna=6Y+HFU1bkwkCAXFcSe+3/nRa; _utrace=37ae7d1d02b017e173894fea3bb2efca_2020-06-07; track_id=1591519423|9a66bb685816851de95898ed6f3d3fcb52b5e1184c66fcd708|25703e329f8a6b21c9d4dc124f758010; USERID=121791358; UTUSER=121791358;',
# #'__wpkreporterwid_=0d82cbd8-db95-4995-9329-08d0bba97ee0; ubt_ssid=clbpkimlzdtvo63wt1v1g8vhj58bezlt_2020-06-07; perf_ssid=78ftwxrjrelwt929cemjz932rs4ezcwp_2020-06-07; ut_ubt_ssid=7i7vwde0fbewfje8mh3nfw5qh8rehpcx_2020-06-07; cna=Mk7aFpUvjW8CAatfUbbpNZoR; _bl_uid=tIkzgbCL4w1yF3ah5kqRup932bU9; _utrace=3de7195594ca9a72bdd7d84e4ca62155_2020-06-07; track_id=1591527545|d3f1253063c2b881fbac5a3cc80654b25b36a151e0765f4e1f|901bcfb517f9d260ad4cf03f3dfe9cec; USERID=1000081239815; UTUSER=1000081239815; tzyy=8ef2905cc2c9185dc32c15e5ca11fe6b; t=1648e26109f1b0080f10cfdf036a35bd; l=eBOAeWbnQVNjALM_BO5Z-urza77O5IOXGsPzaNbMiInca6iAg3zigNQDpVW2zdtjgtfXrHtPOzL1BRHMSmUaSAkDBeYBhSpT1xJwr; SID=AgAAAOjZfK8H6gAEAABM6FaK0D553EKaBtZxdkXj1sBGWCuVH9Xrs7t9; ZDS=1.0|1591535160|4tVxKA+lM7OVQXrnFb7868Wt/ZdvA0x5DYPNMHoJ2204C97yz3FYcFXsvyfZdpYMNbufi8y/eZReX6/KylEfUw==; isg=BI6OUJnGXCVyD-gBMYpO3YBp32RQD1IJZH2wCbjX-hFNGy51IJ-iGTTZV0F3A0oh',
# '__wpkreporterwid_=88af1c50-d8a8-4cb1-b9a1-fc571e73c558; ubt_ssid=343qvjtvd5bd6n467fdr3cmvq4gyhmo7_2020-06-08; perf_ssid=e1003poworu7iduv9fc132tf4jpbvmpb_2020-06-08; ut_ubt_ssid=4vyv4ytylnfur6eixqb6j1pp7qt9i82z_2020-06-08; UTUSER=0; isg=BKKiGxidCCAkExT8sKTahVK18CgE86YNdOFLVew7zpXAv0I51IP2HSg9631DtB6l; l=eBMersSVQVUFgdydBOfChurza779xIObYuPzaNbMiOCP_gCM4MWNWZvQlGTHCn1Vnsh6R3yblDgJBvLa2Pa9Qxv9-e1Sm1uqbdTh.; _bl_uid=g0kkmbhO5ydvwph7mwFCrntreCk3; _utrace=d3ffa58aab14b811c27d9207f07dc736_2020-06-08; cna=W49kFzheOFoCAXW1ixk0Dik0; _samesite_flag_=true; cookie2=1197f681e708427978e2389b0a3f3611; t=06ddc36724da9da9328111fc53c6fe24; _tb_token_=f531ee3588a53',
'__wpkreporterwid_=0d82cbd8-db95-4995-9329-08d0bba97ee0; ubt_ssid=clbpkimlzdtvo63wt1v1g8vhj58bezlt_2020-06-07; perf_ssid=78ftwxrjrelwt929cemjz932rs4ezcwp_2020-06-07; ut_ubt_ssid=7i7vwde0fbewfje8mh3nfw5qh8rehpcx_2020-06-07; cna=Mk7aFpUvjW8CAatfUbbpNZoR; _bl_uid=tIkzgbCL4w1yF3ah5kqRup932bU9; _utrace=3de7195594ca9a72bdd7d84e4ca62155_2020-06-07; track_id=1591527545|d3f1253063c2b881fbac5a3cc80654b25b36a151e0765f4e1f|901bcfb517f9d260ad4cf03f3dfe9cec; USERID=1000081239815; UTUSER=1000081239815; tzyy=8ef2905cc2c9185dc32c15e5ca11fe6b; t=1648e26109f1b0080f10cfdf036a35bd; l=eBOAeWbnQVNjAnEiBO5ahurza77OoIOXfsPzaNbMiIncC6h5sl9T-s-QLs0fjpxRR8XcGa1y46aJ5OeTpFku7PDZndLHRKNvB5M2QK1..; SID=BQAAAOjZfK8H6gAEAADksZMUmFdTxXebSKZ0DPGPhsFsOEFUHINpr35H; ZDS=1.0|1591590064|4tVxKA+lM7OVQXrnFb7860blEdyVkhTqL2/ILrIBpCoZ+NGwcb+MQPEng2Pdl4n0uZy8vR6Not5CvRpOq+laPQ==; x5check_ele=3%2BtWw4b0faWQmcwhUT8MdKE2lK4VEFcF9XjanD%2F2OjQ%3D; isg=BOfnwyB_dSO5lfGucEmX-nE6dhuxbLtOFe6JJrlUA3asqAdqwTxLniXpzq42QJPG',
# '__wpkreporterwid_ "85cdaa51-03f6-4449-3176-007ebf5233c8"_bl_uid "enkjsbgq5g310L9LCzzvq6w89308"_utrace "fcba1c4cc41e0195a0e5d89a76422d9c_2020-06-07"cna "SfZXF8siVlECAXGKT/iUK9L"isg"BMTEtOivRmu4xPInVRmEG5YilkK23ehHhiF2qN5lFA9SCWXTBu_A1wDnSSHRCiCf"l"eBSsZzNuQVTu3IcFKOfChurza779IObYuPzaNbMiOCP3595mX5WZvK8NLpCn1VnsF2R3yblDgJBbYspyUB5Lis8wJbMdF_hdTh."munb"2205036748408"perf_ssid "gukahawss1ifn1g0jz975jspmdb4hpcy_2020-06-07"SID "DAAAAABOlXFC7AAGbAA0r4UFKiwcU8iPpMkmEdaLO41vAdxb5lLG8qy4"t "b954103b2de58bd2949999eb3175e150"t_eleuc4 "id4=0@BAvuHCrrRj3bMwlc00qlU3R6lCX1cYbBFPuvw=="tzyy "1c48c11327fa016b2049a00464044e7d"ubt_ssid "oeqt8ber6eopq43fqjz2tv50fimbyyva_2020-06-07"USERID"1318416706"ut_ubt_ssid "35sedh8xdv585tnviocccqgpekjr4xxv_2020-06-07"UTUSER "1318416706"x5check_ele "Ym75BDvxtuUyVonRhpbJrA0kzdKrA8OLjcx2hx+RWnE="',
]
host = [
# '219.137.143.121',
# '113.116.94.241',
# '14.152.19.82',
# '113.64.153.227',
# # '4',
# # '5',
# '120.229.56.77',
# # '7',
# '27.187.78.30',
# '120.230.181.156',
# '113.83.33.116',
# '111.19.36.155',
# # '12',
# '113.92.73.169',
# # '14',
# '47.92.62.13',
'110.185.157.76',
# '113.138.76.147',
]
while True:
try:
group_ID = offset%len(cookie)#组员ID
headers = {
'accept': 'application/json, text/plain, */*',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9',
'cookie': cookie[group_ID],
'sec-fetch-dest': 'empty',
'sec-fetch-mode': 'cors',
'sec-fetch-site': 'same-origin',
'user-agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Mobile Safari/537.36',
'x-shard': 'loc=116.322056,39.89491'
}
url = 'https://h5.ele.me/restapi/shopping/v3/restaurants?latitude=39.89491&longitude=116.322056&offset={}&limit=18'.format(offset)
# proxies = get_ip()
proxyMeta = "http://%(host)s:%(port)s" % {
"host" : hsot[group_ID],
"port" : '8888',
}
proxies = {
"http" : proxyMeta,
}
time.sleep(random.randint(2,4))
text = requests.get(url,headers=headers,proxies=proxies).text#
# release_ip()
text_json = json.loads(text)
for i in range(len(text_json['items'])):
str_s = json.dumps(text_json['items'][i]['restaurant'],ensure_ascii=False)+'\n'
with open('Shop.json','a',encoding='utf-8')as f:
f.write(str_s)
shop_id.append(text_json['items'][i]['restaurant']['id'])
print('第{}页商铺ID获取成功...'.format(offset))
offset+=1
except Exception as e:
if shop_id==[]:
print('第{}页商铺ID信息获取失败,请更换组员ID[{}]的cookie!'.format(offset,group_ID))
result = str(input('请输入新的cookie:'))
if result!= str(-1):
cookie[group_ID] = result
else:
break
# offset+=1
continue
elif '请登录'in text:
print('第{}页商铺ID信息获取失败,请更换组员ID[{}]的cookie!'.format(offset,group_ID))
result = str(input('请输入新的cookie:'))
if result!= str(-1):
cookie[group_ID] = result
else:
break
# offset+=1
continue
# print('获取失败')
# break
elif '"rgv587_flag": "sm"'in text:
print('第{}页商铺ID信息获取失败,请更换组员ID[{}]的cookie!'.format(offset,group_ID))
result = str(input('请输入新的cookie:'))
if result!= str(-1):
cookie[group_ID] = result
else:
break
# offset+=1
continue
else:
print(text)
print('该经纬度店铺id获取完成!')
break
print(shop_id) |
19,928 | 6e83b83d2357ab3785b0d3098cb022f81d88556e |
DATABASES= {
'host':'localhost'
}
print('in proj/settings.py') |
19,929 | 202c0ce60da7084b9edcfce3470b6f4bd6b6b966 | #!/usr/bin/env python2
# From: http://codegist.net/search/kivy-datagrid/2
'''
Under WTFPL - http://www.wtfpl.net
'''
import kivy
kivy.require('1.0.7')
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.uix.gridlayout import GridLayout
from kivy.animation import Animation
from kivy.core import window
SIZE = 30
FORMAT = (10, 16)
window.Window.size = [SIZE * i for i in FORMAT]
window.Window.clearcolor = [0.2, 0.2, 0.2, 1]
class lightButton(Button):
def __init__(self, *args, **kws):
kws.update(background_color=[1, 1, 1, 1])
Button.__init__(self, *args, **kws)
self.border = [16, 16, 30, 16]
class lightLabel(Label):
def __init__(self, *args, **kws):
Label.__init__(self, *args, **kws)
class TestApp(App):
def compl(self, *args, **kws):
print "complete:animation"
def animate_label(self, instance):
self.main_label.color = [1, 1, 1, 0]
anim = Animation(color=[1, 1, 1, 0.7])
anim.bind(on_complete=self.compl)
anim.start(self.main_label)
def build(self):
self.main_grid = GridLayout(rows=2, cols=1, padding=50)
print("Needs ./Aliquam.ttf")
self.main_label = lightLabel(text='Text example test', color=[1, 1, 1, 0.0],
font_name='./Aliquam.ttf', font_size='30px')
self.main_button = lightButton(text='ok', on_press=self.animate_label)
self.main_grid.add_widget(self.main_label)
self.main_grid.add_widget(self.main_button)
return self.main_grid
if __name__ == '__main__':
TestApp().run()
|
19,930 | 0cda47481d4e0801a4a9b94a3f3dd0b9c8054733 | #!/usr/bin/env python3
from sys import stdin, setrecursionlimit
from decimal import Decimal
def main():
input = stdin.readline
a, b = map(str, input().split())
a = Decimal(a)
b = Decimal(b)
print(int(a * b))
if __name__ == "__main__":
setrecursionlimit(10000)
main()
|
19,931 | 170e9808361f453458472a8d124bc4a8052d0a02 | import re, requests
def escape(text):
import cgi
return cgi.escape(text).encode('ascii', 'xmlcharrefreplace').replace('\n', '<br />')
titleRE = re.compile('<title>([^<]+)</title>', re.IGNORECASE)
def massageLink(link):
html = '<a href="%s"><code>%s</code></a>' % (link, link)
try:
response = requests.get(link, verify = False)
except requests.exceptions.ConnectionError:
html += ' <span style="color: rgb(255,0,0);">(Link did not respond)</span>'
except requests.exceptions.MissingSchema:
html = '%s (Not a valid link)' % escape(link)
else:
status_code = response.status_code
if status_code < 400:
m = titleRE.search(response.text)
if m:
title = m.group(1).strip().replace('\n', ' ')
if title:
html = '<a href="%s">%s</a>' % (link, escape(title))
else:
html += ' <span style="color: rgb(255,0,0);">(Link returned status %d)</span>' % status_code
return html
requestJSON = {'Accept': 'application/json'}
class Service:
pass
def getAll(requestLink):
page = 0
totalPages = 1
results = []
while page < totalPages:
params = {
'page': page + 1, # URL page numbers are 1-based
'per_page': 50
}
request = requests.get(requestLink, params=params, headers=requestJSON)
result = request.json()
keys = result.keys()
if len(keys) != 1:
raise RuntimeError('expected single top-level result')
key = keys[0]
pageResults = result[key]
totalPages = pageResults['pages']
results.extend(pageResults['results'])
page += 1
return results
categories = getAll('https://www.biodiversitycatalogue.org/categories')
categoryParent = {}
categoryName = {}
categorisedServices = {}
uncategorisedServices = {}
for category in categories:
link = category['resource']
response = requests.get(link, headers=requestJSON)
cat = response.json()['category']
catName = cat['name']
categoryName[link] = catName
parent = cat.get('broader')
if parent:
categoryParent[link] = parent['resource']
else:
categoryParent[link] = None
categorisedServices[catName] = {}
results = getAll('https://www.biodiversitycatalogue.org/services')
for serviceJson in results:
print serviceJson
content = ''
serviceLink = serviceJson['resource']
name = serviceJson['name']
submitterLink = serviceJson['submitter']
response = requests.get(submitterLink, headers=requestJSON)
json = response.json()
print json
user = json['user']
submitter = user['name']
org = user['affiliation']
if org:
submitter += ', ' + org
email = user['public_email']
if email:
submitter += ' (%s)' % email
serviceId = serviceLink[serviceLink.rfind('/') + 1:]
content += '<p><small>Submitted as <a href="%s">BiodiversityCatalogue service #%s</a> by %s</small></p>\n' % (serviceLink, serviceId, submitter)
annotations = getAll(serviceLink + '/annotations')
categoryIds = []
for annotation in annotations:
if annotation['attribute']['identifier'] == 'http://biodiversitycatalogue.org/attribute/category':
categoryIds.append(annotation['value']['resource'])
if categoryIds:
content += '<p>Categories: %s</p>\n' % ', '.join([escape(categoryName[categoryId]) for categoryId in categoryIds])
description = serviceJson['description']
if description is None:
description = '<p><span style="color: rgb(255,0,0);">No description provided</span></p>'
else:
description = '<p>' + escape(description.strip()) + '</p>'
content += '%s\n' % description
response = requests.get(serviceLink + '/summary', headers=requestJSON)
json = response.json()
print json
service = json['service']
summary = service['summary']
docLinks = summary['documentation_urls']
if docLinks:
for docLink in docLinks:
content += '<p>Documentation: %s</p>\n' % massageLink(docLink)
if docLink.startswith('http://wiki.biovel.eu') or docLink.startswith('https://wiki.biovel.eu'):
if docLink.startswith('http://wiki.biovel.eu/x/') or docLink.startswith('https://wiki.biovel.eu/x/'):
pass
else:
content += '<p><span style="color: rgb(255,0,0);">Links to BioVeL Wiki should use short links</span></p>\n'
else:
content += '<p><span style="color: rgb(255,0,0);">No link to documentation</span></p>\n'
content += '<p>Service provider: %s</p>\n' % escape(', '.join([provider['service_provider']['name'] for provider in summary['providers']]))
# content += '<p>Service protocol: %s</p>\n' % escape(', '.join(service['service_technology_types']))
contacts = summary['contacts']
if contacts:
for contact in contacts:
content += '<p>Contact: %s</p>\n' % escape(contact)
else:
content += '<p><span style="color: rgb(255,0,0);">No support contact listed</span></p>\n'
publications = summary['publications']
for publication in publications:
content += '<p>Publication: %s</p>\n' % escape(publication)
if categoryIds:
tlc = set()
for categoryId in categoryIds:
parentId = categoryParent[categoryId]
while parentId is not None:
categoryId = parentId
parentId = categoryParent[categoryId]
tlc.add(categoryName[categoryId])
for topLevel in tlc:
categorisedServices[topLevel][name] = content
else:
uncategorisedServices[name] = content
final = """
<div class="contentLayout" data-atlassian-layout="{"name":"pagelayout-two-simple-right","columns":["large","aside"]}">
<div class="columnLayout twoColumns">
<div class="cell large">
<div class="innerCell">
"""
for catName, services in sorted(categorisedServices.items()):
if services:
final += '<hr/>\n\n<h2>%s</h2>\n<hr/>\n' % escape(catName)
for name, service in sorted(services.items()):
final += '\n<h3>%s</h3>\n' % escape(name)
final += service
final += '<hr/>\n'
if uncategorisedServices:
final += '<hr/>\n\n<h2>Uncategorised Services</h2>\n<hr/>\n'
for name, service in sorted(uncategorisedServices.items()):
final += '\n<h3>%s</h3>\n' % escape(name)
final += service
final += '<hr/>\n'
final += """
</div>
</div>
<div class="cell aside">
<div class="innerCell">
<ac:macro ac:name="toc" />
</div>
</div>
</div>
</div>
"""
content = final
# Ensure the remote API has been enabled
# https://confluence.atlassian.com/display/DOC/Enabling+the+Remote+API
requestJSON = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
from config import *
confluenceBase = 'https://%s/rpc/json-rpc/confluenceservice-v2' % confluenceHost
import json
from requests.auth import HTTPBasicAuth
kw = dict(
auth = HTTPBasicAuth(confluenceUser, confluencePass),
verify = False, # not yet setup to verify the server certificate
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json'
}
)
response = requests.post(confluenceBase + '/getPage',
data=json.dumps(['BioVeL', 'Automatic Service Summary']), **kw)
print response.url
response.raise_for_status()
print response.text
page = response.json()
if page['content'] != content:
# Although Confluence documentation states that additional arguments are
# ignored, updates fail unless we use the bare minimum of required arguments.
update = {
'id': page['id'],
'space': page['space'],
'title': page['title'],
'content': content,
'version': page['version'],
'parentId': page['parentId']
}
pageUpdateOptions = dict(
versionComment = 'Triggered update',
minorEdit = False
)
response = requests.post(confluenceBase + '/storePage',
data=json.dumps([update]), **kw)
print response.url
response.raise_for_status()
print response.text
|
19,932 | 53177c857843c84ba32d91720049df02be65fc07 | #!/usr/bin/python3
# coding=utf-8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import six
df = pd.DataFrame()
df['date'] = ['2016-04-01', '2016-04-02', '2016-04-03']
df['calories'] = [2200, 2100, 1500]
df['sleep hours'] = [2200, 2100, 1500]
df['gym'] = [True, False, False]
def render_mpl_table(data, col_width=3.0, row_height=0.625, font_size=14,
header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',
bbox=[0, 0, 1, 1], header_columns=0,
ax=None, **kwargs):
if ax is None:
size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])
fig, ax = plt.subplots(figsize=size)
ax.axis('off')
mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)
mpl_table.auto_set_font_size(False)
mpl_table.set_fontsize(font_size)
for k, cell in six.iteritems(mpl_table._cells):
cell.set_edgecolor(edge_color)
if k[0] == 0 or k[1] < header_columns:
cell.set_text_props(weight='bold', color='w')
cell.set_facecolor(header_color)
else:
cell.set_facecolor(row_colors[k[0]%len(row_colors) ])
return ax
def main():
ax = render_mpl_table(df, header_columns=0, col_width=2.0)
fig = ax.get_figure()
with open('/home/gswyhq/Downloads/table.png', 'wb')as f:
fig.savefig(f)
if __name__ == '__main__':
main() |
19,933 | 651978904fa5e7ea734e4cbc265b5df486f1b68f | from os import getpid
from time import time, sleep
from multiprocessing import Pool, Process
def modulus(z):
if z % awal == 0:
print (z**2," ID proses", getpid())
else:
print(z, "ID proses", getpid())
sleep(1)
if __name__ == '__main__':
print ("Masukkan angka awal anda : ")
awal = int(input())
print ("Masukkan angka batas anda : ")
akhir = int(input())
z = akhir * akhir
# PEMROSESAN SEKUENSIAL
print("\nPemrosesan Sekuensial")
sekuensial_awal = time()
for i in range(1, z+1):
modulus(i)
sekuensial_akhir= time()
# PEMROSESAN PARALEL DENGAN multiprocessing.process
print("\nPemrosesan Paralel dengan multiprocessing.process")
kumpulan_proses = []
process_awal = time()
for i in range(1, z+1):
p = Process(target=modulus, args=(i,))
kumpulan_proses.append(p)
p.start()
for i in kumpulan_proses:
p.join()
process_akhir = time()
# PEMROSESAN PARALEL DENGAN multiprocessing.pool
print("\nPemrosesan Paralel dengan multiprocessing.pool")
pool_awal = time()
pool = Pool()
pool.map(modulus, range(1,z+1))
pool.close()
pool_akhir = time()
print("\nSekuensial", sekuensial_akhir-sekuensial_awal, "detik")
print("multiprocessing.process", process_akhir-process_awal, "detik")
print("multiprocessing.pool", pool_akhir-pool_awal, "detik")
|
19,934 | 4943768fcf603346b0ef0deae7f29b6600fa51da | import sys
sys.stdin = open("수열편집.txt", "r")
class Node:
def __init__(self, data, link = None):
self.data = data
self.next = link
class Linked_list:
def __init__(self):
self.head = Node('head')
self.size = 1
def insert(self,data):
p = self.head
for _ in range(self.size-1):
p = p.next
p.next = Node(data,p.next)
self.size += 1
def print_list(self):
p = self.head
for _ in range(self.size-1):
p = p.next
print(p.data, end=' ')
print()
def I_insert(self,pre,data):
p = self.head
for i in range(self.size - 1):
if i == pre:
p.next = Node(data,p.next)
self.size += 1
break
p = p.next
def D_delete(self,pre):
p = self.head
for i in range(self.size - 1):
if i == pre:
p.next = p.next.next
self.size -= 1
break
p = p.next
def C_change(self,pre,data):
p = self.head
for i in range(self.size - 1):
if i == pre:
p.next.data = data
break
p = p.next
def search(self,index):
p = self.head.next
for i in range(self.size - 1):
if i == index:
return p.data
p = p.next
return -1
T = int(input())
for test_case in range(1, T + 1):
N, M, L = map(int,input().split())
num = list(map(int,input().split()))
test = Linked_list()
for i in num:
test.insert(i)
for i in range(M):
info = input().split()
if info[0] == 'I':
test.I_insert(int(info[1]),int(info[2]))
if info[0] == 'D':
test.D_delete(int(info[1]))
if info[0] == 'C':
test.C_change(int(info[1]),int(info[2]))
print(f'#{test_case} {test.search(L)}')
|
19,935 | 3f6416b4a34973bd7493b2896525bc451c83ae9b | #!/usr/bin/python
import sys
max_len_line = 0
max_len_token = 0
for line in sys.stdin:
token1, token2 = line.strip().split()
token1 = int(token1)
token2 = int(token2)
if token1 > max_len_token:
max_len_token = token1
if token2 > max_len_line:
max_len_line = token2
print "{0} {1}".format(max_len_token, max_len_line)
|
19,936 | e6f0a037c1b0060833bb0cab7edf40700b8d185f | schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"type": "object",
"properties": {
"objects": {
"type": "object",
"properties": {
"global_ID": {
"type": "object",
"properties": {
"objectClass": {
"type": "string"
},
"globalID": {
"type": "string"
},
"rectangle": {
"type": "array",
"items": {
"type": "string"
}
},
"classDescription": {
"type": "string"
},
"shortName": {
"type": "string"
}
},
"required": [
"objectClass",
"globalID",
"rectangle",
"classDescription",
"shortName"
]
},
"questions": {
"qid": {
"type": "string"
},
"questionText": {
"type": "string"
},
"questionType": {
"type": "string"
},
"objectsInvolved": {
"type": "array",
"items": {
"type": "string"
}
},
"objectTypesInvolved": {
"type": "array",
"items": {
"type": "string"
}
},
}
}
}
}
} |
19,937 | 3ddbdc0c934eba33578cf2a28c04d7cc61676fb4 | from datetime import datetime
import numpy as np
import cv2
faceCascade = cv2.CascadeClassifier('./resources/Cascades/haarcascade_frontalface_alt.xml')
cap = cv2.VideoCapture(0)
cap.set(3,1280) # set Width
cap.set(4,720) # set Height
def fr_light(ms, callback):
min_time = ms / 1000 # set minimum required time in milliseconds
count = 0
while True:
ret, img = cap.read()
#img = cv2.flip(img, -1) # flip camera
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.3,
minNeighbors=5,
minSize=(40, 40)
)
if faces == ():
detect_face = False
else:
# Set start timestamp
if not detect_face:
detect_face = True
start = datetime.now()
# Set end, total timestamp
end = datetime.now()
total_time = end - start
# Draw a rectangle around the face
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)
for index in range(len(faces)):
# Wait N seconds for callback
if total_time.seconds >= min_time:
count += 1
# Return path and image
path_img = ("dataset/User." + str(index) + '.' + str(end) + ".jpg")
face_img = gray[y:y+h,x:x+w]
callback(path_img, face_img)
cv2.imshow('video',img)
k = cv2.waitKey(30) & 0xff
if k == 27: # press 'ESC' to quit
break
cap.release()
cv2.destroyAllWindows()
|
19,938 | 21255dbdafee758ae5278b6a9d3ac8e11f400866 | #!/usr/bir/env python
from django.contrib import admin
from assault_app.models import Schools, Comment
class SchoolsAdmin(admin.ModelAdmin):
search_fields = ('name',)
admin.site.register(Schools, SchoolsAdmin)
class CommentAdmin(admin.ModelAdmin):
display_fields = ["post", "author", "created"]
admin.site.register(Comment, CommentAdmin)
#class SchoolStoryFeedAdmin(admin.ModelAdmin):
# search_fields = ('title',)
#admin.site.register(SchoolStoryFeed, SchoolStoryFeedAdmin)
|
19,939 | c4738562d4e5febe5af0068b429374bf2c6f0767 | from django.conf.urls import url
from . import views
app_name = 'account'
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'), #first page -- index
url(r'^accountsetting/$', views.AccSetting, name='accountsetting'), #account setting -- access from index
url(r'^(?P<pk>[0-9]+)/$', views.DetailView.as_view(), name='detail'), #detail page -- access from index
url(r'^(?P<account_id>[0-9]+)/download/$', views.export_csv, name='download'),
url(r'^(?P<account_id>[0-9]+)/upload/$', views.upload_file, name='upload'),
url(r'^(?P<account_id>[0-9]+)/addlist/$', views.addlist, name='addlist'),
url(r'^(?P<account_id>[0-9]+)/listsuccess/$', views.listdb, name='listdb'),
#url(r'^(?P<account_id>[0-9]+)/deletelistdb/$', views.dellistdb, name='dellistdb'),
url(r'^(?P<pk>[0-9]+)/sellisttodel/$', views.SelDelList.as_view(), name='seldellist'),
url(r'^(?P<account_id>[0-9]+)/sellisttodel/deletelist/$', views.dellist, name='dellist'),
url(r'^(?P<account_id>[0-9]+)/showtotalmoney/$', views.caltotal, name='showtotal'),
] |
19,940 | 0372287e47abc3490eb881149976850296cf3d59 | __version__ = (1, 5, 8, 'final', 0)
__compatible_with__ = []
def get_current_version():
import geonode.version
return geonode.version.get_version(__version__)
|
19,941 | d8669600cc1b226c91a32eb5b2e4019744200ea7 | #!/usr/bin/python3
""" Utility program to transfer mission data to an AWS bucket as long as we have been
supplied with the proper keys.
"""
import os
import configparser
import argparse
import boto3
from botocore.exceptions import NoCredentialsError
OUTBOUND = 'Processed'
def upload_to_aws(local_file, bucket, s3_file, access_key, secret_key):
""" Upload a file to AWS S3 bucket """
s3 = boto3.client('s3', aws_access_key_id=access_key,
aws_secret_access_key=secret_key)
try:
s3.upload_file(local_file, bucket, s3_file)
print(f'Upload of {local_file} to {bucket} as {s3_file} successful.')
return True
except FileNotFoundError:
print(f'File {local_file} not found.')
return False
except NoCredentialsError:
print('Credentials invalid or not available.')
return False
def get_target_list(data_dir):
""" Get a list of files to be sent to AWS """
target_list = os.listdir(data_dir)
return target_list
def main():
""" Driver for transfer process """
arg_parser = argparse.ArgumentParser(description='Transfer data files to AWS S3 bucket.')
arg_parser.add_argument('mission', action='store', help='Mission data to be transferred.')
arg_parser.add_argument('target', action='store', help='Which AWS account to receive data.')
args = arg_parser.parse_args()
mission = args.mission
target = args.target.upper()
config = configparser.ConfigParser()
config.read('aws.ini')
bucket = config[target]['bucket']
access_key_id = config[target]['access_key']
secret_key_id = config[target]['secret_key']
source_dir = os.path.join(OUTBOUND, mission)
dir_list = get_target_list(source_dir)
for directory in dir_list:
if directory.endswith('_TIF'):
file_dir = os.path.join(source_dir, directory)
files = get_target_list(file_dir)
for file in files:
upload_to_aws(file_dir + file, bucket, mission + f'/{directory}/{file}',
access_key_id, secret_key_id)
if __name__ == '__main__':
main()
|
19,942 | d588841fa536c67667642ab5ac4b1396075fa17f | from django.urls import path
from django.conf.urls import include
from . import views
urlpatterns = [
path('', views.index),
path('sug/add', views.SuggestionCreate.as_view()),
] |
19,943 | 501a808a8fe2973d63011104bda14150524584fb | from .generator import UrlGenerator
class StartUrls():
def __call__(self, spec):
return spec
|
19,944 | 89405a8acf7745dd990768c082dc057fc9159a96 | """
author : Ali Emre SAVAS
Link : https://www.hackerrank.com/challenges/30-linked-list-deletion/problem
"""
class Node:
def __init__(self,data):
self.data = data
self.next = None
class Solution:
def insert(self,head,data):
p = Node(data)
if head==None:
head=p
elif head.next==None:
head.next=p
else:
start=head
while(start.next!=None):
start=start.next
start.next=p
return head
def display(self,head):
current = head
while current:
print(current.data,end=' ')
current = current.next
def removeDuplicates(self,head):
"""
counterNode = it travels on linked list for duplicate data.
currentNode = it shows actual node to compare counterNode
"""
if head != None:
currentNode = head
if(currentNode.next):
counterNode = currentNode.next
while(currentNode):
if(counterNode):
if(currentNode.data == counterNode.data):
currentNode.next = None #If there are duplicate data, we cut connection between them.
else:
currentNodenext = counterNode # If there is no duplite, we connect again two nodes.
currentNode = currentNode.next
counterNode = counterNode.next
else:
break
return head
mylist= Solution()
T=int(input())
head=None
for i in range(T):
data=int(input())
head=mylist.insert(head,data)
head=mylist.removeDuplicates(head)
mylist.display(head); |
19,945 | 28959665398908782267a41d04fda268be414456 | import random
from matplotlib.pyplot import hist, plot, show
def ChangeInBalance(initial_balance):
rate = random.uniform(0.0, 0.06)
return initial_balance*rate
#Set initial conditions
number_sims = 1000
final_balances = []
balance = 1000
number_year= 10
for i in range(number_sims):
#Set initial conditions
time = 0
balance = 1000
while (time < number_year):
#Increase balance and time
balance += ChangeInBalance(balance)
time += 1
final_balances.append(balance)
#Output the simulation results
hist(final_balances, bins=20)
show()
|
19,946 | def20d1e6f203c50e409f99bec05c3d5707d62ea | # 63. Unique Paths II
# ttungl@gmail.com
# Follow up for "Unique Paths":
# Now consider if some obstacles are added to the grids. How many unique paths would there be?
# An obstacle and empty space is marked as 1 and 0 respectively in the grid.
# For example,
# There is one obstacle in the middle of a 3x3 grid as illustrated below.
# [
# [0,0,0],
# [0,1,0],
# [0,0,0]
# ]
# The total number of unique paths is 2.
# Note: m and n will be at most 100.
class Solution(object):
def uniquePathsWithObstacles(self, obstacleGrid):
"""
:type obstacleGrid: List[List[int]]
:rtype: int
"""
# Sol 1: DP
# dp[i][j]: num of possible unique paths
# avoid "1" as obstacles with update
# dp[i][j] = dp[i-1][j] + dp[i][j-1]
# time O(n*m); space O(m*n)
# runtime: 29ms
# --
rows, cols = len(obstacleGrid), len(obstacleGrid[0])
obstacleGrid[0][0] = 1 - obstacleGrid[0][0]
for i in range(1, cols):
if not obstacleGrid[0][i]:
obstacleGrid[0][i] = obstacleGrid[0][i-1]
else:
obstacleGrid[0][i] = 0
for i in range(1, rows):
if not obstacleGrid[i][0]:
obstacleGrid[i][0] = obstacleGrid[i-1][0]
else:
obstacleGrid[i][0] = 0
for i in range(1, rows):
for j in range(1, cols):
if not obstacleGrid[i][j]:
obstacleGrid[i][j] = obstacleGrid[i][j-1] + obstacleGrid[i-1][j]
else:
obstacleGrid[i][j] = 0
return obstacleGrid[-1][-1]
# sol 2: DP 1-D
# time O(m*n); space O(n)
# runtime: 35ms
# --
if not obstacleGrid: return
rows, cols = len(obstacleGrid), len(obstacleGrid[0])
current = [1]*cols
current[0] = 1 - obstacleGrid[0][0]
for i in range(1, cols): # check every col
current[i] = current[i-1] * (1 - obstacleGrid[0][i])
for i in range(1, rows): # check every row
current[0] *= (1 - obstacleGrid[i][0])
for j in range(1, cols):
current[j] = (current[j-1] + current[j]) * (1 - obstacleGrid[i][j])
return current[-1]
|
19,947 | d882146f748263fc7fad62ef911a4645a47ba712 | # Created by Truong Phuc Anh (14520040@gm.uit.edu.vn)
# Kmeans cluster on hand-written digits data
import numpy
import matplotlib.pyplot as plt
from sklearn import datasets
from comparison import cluster_and_compare
# 1. Loading data set
digits = datasets.load_digits()
data = digits.data
n_samples, n_features = data.shape
n_clusters = len(numpy.unique(digits.target))
labels_true = digits.target
print ('n_samples : %i' % n_samples)
print ('n_cluster : %i' % n_clusters)
print ('feature space : %i' % n_features)
# 2. Clustering and evaluating
cluster_and_compare(n_clusters, data, labels_true) |
19,948 | 739b314bc9085c94a24b7e1862e21b7f61fdbe37 | # _*_ coding: utf-8 _*_
"""
@author: E.T
@file: api.py
@time: 2020/5/27 2:47 下午
@desc:
"""
from . import impl
from flask import request, Blueprint
from flask.views import MethodView
from metis.utils.helpers import api_stand_response, register_view
class UserProfile(MethodView):
def get(self):
skill_set = [
"str_items"
]
language = ["language_items"]
content = dict(
id=str,
name=str,
email=str,
metainfo=str,
balance=str,
level=int,
skillset=skill_set,
language=language
)
return api_stand_response(content=content)
class UserEngagements(MethodView):
def get(self):
pass
def post(self):
pass
class UserStakes(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementCommit(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementFinish(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementReview(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementDispute(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementDismissDispute(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementArbitrate(MethodView):
def get(self):
pass
def post(self):
pass
class UserRegister(MethodView):
def get(self):
pass
def post(self):
pass
class UserExit(MethodView):
def get(self):
pass
def post(self):
pass
class Engagements(MethodView):
def get(self):
pass
def post(self):
pass
class Stakes(MethodView):
def get(self):
pass
def post(self):
pass
class StakeCommit(MethodView):
def get(self):
pass
def post(self):
pass
class StakeTerminate(MethodView):
def get(self):
pass
def post(self):
pass
class StakeWithdraw(MethodView):
def get(self):
pass
def post(self):
pass
class StakeDispute(MethodView):
def get(self):
pass
def post(self):
pass
class StakeDismissDispute(MethodView):
def get(self):
pass
def post(self):
pass
class StakeArbitrate(MethodView):
def get(self):
pass
def post(self):
pass
class Assets(MethodView):
def get(self):
pass
def post(self):
pass
class UserAssets(MethodView):
def get(self):
pass
def post(self):
pass
class AssetRequest(MethodView):
def get(self):
pass
def post(self):
pass
class Asset(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementClose(MethodView):
def get(self):
pass
def post(self):
pass
class EngagementRequestArbitration(MethodView):
def get(self):
pass
def post(self):
pass
class StakeRequestArbitration(MethodView):
def get(self):
pass
def post(self):
pass
@impl(tryfirst=True)
def load_blueprints(app):
api = Blueprint("api", __name__)
register_view(
api,
routes=['/userprofile'],
view_func=UserProfile.as_view("user_profile")
)
register_view(
api,
routes=['/userengagements'],
view_func=UserEngagements.as_view("user_engagements")
)
app.register_blueprint(api, url_prefix="/api/v1")
|
19,949 | df391a25f5478418d5a9f933eb4c61351e5eea70 | /Users/sushanthkurdekar/anaconda3/lib/python3.7/rlcompleter.py |
19,950 | 3e1b0305e50f53f7314c3f29bbeece0911a63270 | from .StartingApp import StartingApp |
19,951 | 217d66a0afb8b225938b2a3d882a91d3f2d02621 | def countOnes(n):
ans = 0
while n > 0:
n &= n - 1
ans += 1
return ans
def answer(a, b):
return countOnes(a ^ b)
|
19,952 | d70c509920d34d1824042d98d5c01cc0d5d0c186 | from django.contrib.auth.models import User
from django.db.models import Q, FilteredRelation, F, Sum, Avg, Subquery, Prefetch
from orm_practice_app.models import Company, Product, Order, OrderedProduct
def asdf():
Product.objects.filter(name='product_name3', product_owned_company__name='company_name20').select_related(
'product_owned_company')
Company.objects.prefetch_related('company_set').filter(product__name='product_anme8')
Product.objects.filter(product_owned_company__name='company_name133')
Order.objects.filter(order_owner__is_active=True)
User.objects.filter(order__descriptions__contains='asdf')
OrderedProduct.objects.filter(related_order__order_owner__user_permissions__isnull=True)
OrderedProduct.objects.filter(product_cnt=30).prefetch_related('related_order')
# 1-1 이 쿼리는 select_related()사용하지 않았어도 OrderedProduct는 order의 pk를 들고있기 때문에 해당 order(related_order)에 대해 inner join하면 쉽게 order를 들고올수있다
# filter related_order에 대한 조건문이
OrderedProduct.objects.filter(id=1, related_order__descriptions='sdfsdf') # .select_related('related_order')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_order" ON ("orm_practice_app_orderedproduct"."related_order_id" = "orm_practice_app_order"."id")
WHERE ("orm_practice_app_orderedproduct"."id" = 1 AND "orm_practice_app_order"."descriptions" = 'sdfsdf')
"""
# 1-2 이 쿼리는 prefetch_related를 사용했음에도 불구하고 QuerySet 평가시 추가적인 쿼리가 불필요하다 판단하여 inner join 전략을 택한다. 이경우는 .prefetch_related('related_order') 이 로직은 무시된다
OrderedProduct.objects.filter(Q(product_cnt=30) & Q(related_order__descriptions='asdf')).prefetch_related('related_order')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_order" ON ("orm_practice_app_orderedproduct"."related_order_id" = "orm_practice_app_order"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 30 AND "orm_practice_app_order"."descriptions" = 'asdf')
"""
# 1-3 이 쿼리는 의도한대로 +1개의 쿼리로 related_order를 조회한다 filter절에서 related_order에 대해 별다른 내용이 없어서 반항없이 개발자의 의도대로 따라준다.
OrderedProduct.objects.filter(product_cnt=30).prefetch_related('related_order')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
WHERE "orm_practice_app_orderedproduct"."product_cnt" = 30 LIMIT 21;
SELECT *
FROM "orm_practice_app_order"
WHERE "orm_practice_app_order"."id" IN (135, 776, 404, 535, 151, 280, 666, 155, 29, 675, 548, 298, 45, 48, 177, 306, 336, 729, 605, 226, 739);
"""
# 이러면 prefetch_related()를 붙혀준 의도대로 +1 쿼리로 'related_order'를 조회한다 그러나 완벽히 의도한 쿼리가 생성되지 않는다.
OrderedProduct.objects.filter(Q(product_cnt=30) | Q(related_order__descriptions='asdf')).prefetch_related('related_order')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_order" ON ("orm_practice_app_orderedproduct"."related_order_id" = "orm_practice_app_order"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 30 OR "orm_practice_app_order"."descriptions" = 'asdf');
SELECT *
FROM "orm_practice_app_order"
WHERE "orm_practice_app_order"."id" IN (135, 776, 404, 535, 151, 280, 666, 155, 29, 675, 548, 298, 45, 48, 177, 306, 336, 729, 605, 226, 739);
"""
# prefetch_related()로 추가되는 쿼리에 조건을 걸어주고 싶다면 Prefetch()를 사용해야한다
OrderedProduct.objects.filter(Q(product_cnt=30)).prefetch_related(Prefetch('related_order', queryset=Order.objects.filter(descriptions='asdf')))
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
WHERE "orm_practice_app_orderedproduct"."product_cnt" = 30 ;
SELECT *
FROM "orm_practice_app_order"
WHERE ("orm_practice_app_order"."descriptions" = 'asdf'
AND "orm_practice_app_order"."id" IN (515, 644, 135, 391, 267, 526, 529, 660, 21, 663, 280, 422, 47, 707, 336, 593, 98, 228, 486, 374, 379));
"""
# 앞 쿼리들의 결과에서도 봤듯이 OrderProduct->Order 참조에 관련된 쿼리는 정방향 참조이기때문에 충분히 inner join 전략을 택할수 있다
# 그래서 앞에서 prefetch_related()를 붙이지 않거나 prefetch_related를 붙이더라도 +1 query를 만들지 않고 Django QuerySet은 최대한 inner join전략을 택하려고 노력한다.
OrderedProduct.objects.filter(Q(product_cnt=30) & Q(related_order__descriptions='asdf')).select_related(
'related_order')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_order" ON ("orm_practice_app_orderedproduct"."related_order_id" = "orm_practice_app_order"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 30 AND "orm_practice_app_order"."descriptions" = 'asdf')
"""
# 2-1 이러면 대참사가 발생한다. foransdmf 이것을 N+1 Select Problem 이라고 한다.
companys = Company.objects.filter(name__startswith='company_name')
for company in companys:
print(company.product_set[0])
"""
SELECT * FROM "orm_practice_app_company" WHERE "orm_practice_app_company"."name"::text LIKE 'company\_name%';
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 301;
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 302;
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 303;
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 304;
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 305;
SELECT * FROM "orm_practice_app_product" WHERE "orm_practice_app_product"."product_owned_company_id" = 306;
"""
# 2-2 이러면 딱 2개의 쿼리만 발생한다 prefetch_related를 가장 적절하게 활용한 좋은 예제이다.
# prefetch_related()는 역참조해야 하는 상황에서 아래와 같은 N+1문제를 방지한다.
# 단순 one table join과 같은 상황에서는 django orm이 최대한 inner join를 우선적으로 고민하고 불가능하면 left outer join로
companys = Company.objects.filter(name__startswith='company_name').prefetch_related('product_set')
for company in companys:
print(company.product_set[0])
# 3-1 product_owned_company필드에 null=True 옵션이 있다 이런경우는 outer join
Product.objects.filter(price__gt=24000).select_related('product_owned_company')
"""
SELECT *
FROM "orm_practice_app_product"
LEFT OUTER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE "orm_practice_app_product"."price" > 24000
"""
Product.objects.filter(price__gt=24000, product_owned_company__isnull=False).select_related('product_owned_company')
Product.objects.filter(price__gt=24000, product_owned_company__isnull=False).select_related('product_owned_company')
"""
SELECT *
FROM "orm_practice_app_product"
INNER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE ("orm_practice_app_product"."price" > 24000 AND "orm_practice_app_product"."product_owned_company_id" IS NOT NULL);
"""
# 4-1 Join Table에 제약 주기 FilteredRelation()는 Django2.0부터 가능
Product.objects.select_related('this_is_join_table_name').annotate(this_is_join_table_name=FilteredRelation('product_owned_company',
condition=Q(
product_owned_company__name='company_name34'),),
).filter(this_is_join_table_name__isnull=False)
"""
SELECT "orm_practice_app_product"."id", "orm_practice_app_product"."name", "orm_practice_app_product"."price", "orm_practice_app_product"."product_owned_company_id"
FROM "orm_practice_app_product"
INNER JOIN "orm_practice_app_company" this_is_join_table_name
ON ("orm_practice_app_product"."product_owned_company_id" = this_is_join_table_name."id" AND ( this_is_join_table_name."name" = 'company_name34')
)
WHERE this_is_join_table_name."id" IS NOT NULL ;
"""
# 내가 원한다고 쿼리를 나눌수있는게 아니다.
OrderedProduct.objects.filter(product_cnt=23000,
related_product__product_owned_company__name__contains='comapny_name').prefetch_related(
'related_product')
"""
SELECT *
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_product" ON ("orm_practice_app_orderedproduct"."related_product_id" = "orm_practice_app_product"."id")
INNER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 23000 AND "orm_practice_app_company"."name"::text LIKE '%comapny\_name%')
"""
# 결론은 무거울 것으로 예상되는 QuerySet은 직접 쿼리는 찍어서 확인을 해야한다.
OrderedProduct.objects.filter(product_cnt=23000,
related_product__product_owned_company__name__contains='comapny_name').prefetch_related(
'related_product', 'related_product__product_owned_company')
"""
SELECT "orm_practice_app_orderedproduct"."id", "orm_practice_app_orderedproduct"."product_cnt", "orm_practice_app_orderedproduct"."amount_of_credited_mileage", "orm_practice_app_orderedproduct"."related_product_id", "orm_practice_app_orderedproduct"."related_order_id"
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_product" ON ("orm_practice_app_orderedproduct"."related_product_id" = "orm_practice_app_product"."id")
INNER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 23000 AND "orm_practice_app_company"."name"::text LIKE '%comapny\_name%')
"""
OrderedProduct.objects.filter(product_cnt=23000).prefetch_related(
Prefetch('related_product__product_owned_company',
queryset=Company.objects.filter(name__contains='comapny_name')))
"""
SELECT "orm_practice_app_orderedproduct"."id", "orm_practice_app_orderedproduct"."product_cnt",
"orm_practice_app_orderedproduct"."amount_of_credited_mileage", "orm_practice_app_orderedproduct"."related_product_id",
"orm_practice_app_orderedproduct"."related_order_id"
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_product" ON ("orm_practice_app_orderedproduct"."related_product_id" = "orm_practice_app_product"."id")
INNER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE ("orm_practice_app_orderedproduct"."product_cnt" = 23000 AND "orm_practice_app_company"."name"::text LIKE '%comapny\_name%')
"""
order_list: Order = Order.objects.filter(id=3).prefetch_related('product_set_included_order')
"""
SELECT "orm_practice_app_order"."id", "orm_practice_app_order"."reg_date", "orm_practice_app_order"."descriptions", "orm_practice_app_order"."order_owner_id"
FROM "orm_practice_app_order" WHERE "orm_practice_app_order"."id" = 3 ;
SELECT ("orm_practice_app_orderedproduct"."related_order_id") AS "_prefetch_related_val_related_order_id",
"orm_practice_app_product"."id", "orm_practice_app_product"."name", "orm_practice_app_product"."price", "orm_practice_app_product"."product_owned_company_id"
FROM "orm_practice_app_product"
INNER JOIN "orm_practice_app_orderedproduct" ON ("orm_practice_app_product"."id" = "orm_practice_app_orderedproduct"."related_product_id")
WHERE "orm_practice_app_orderedproduct"."related_order_id" IN (3);
"""
Order.objects.filter(id=4, product_set_included_order__product_owned_company=3)
"""
SELECT "orm_practice_app_order"."id", "orm_practice_app_order"."reg_date", "orm_practice_app_order"."descriptions", "orm_practice_app_order"."order_owner_id"
FROM "orm_practice_app_order"
INNER JOIN "orm_practice_app_orderedproduct" ON ("orm_practice_app_order"."id" = "orm_practice_app_orderedproduct"."related_order_id")
INNER JOIN "orm_practice_app_product" ON ("orm_practice_app_orderedproduct"."related_product_id" = "orm_practice_app_product"."id")
WHERE ("orm_practice_app_order"."id" = 4 AND "orm_practice_app_product"."product_owned_company_id" = 3)
"""
Order.objects.filter(id=4).prefetch_related('product_set_included_order')
"""
SELECT * FROM "orm_practice_app_order"
WHERE "orm_practice_app_order"."id" = 4 LIMIT 21;
SELECT * FROM "orm_practice_app_product"
INNER JOIN "orm_practice_app_orderedproduct" ON ("orm_practice_app_product"."id" = "orm_practice_app_orderedproduct"."related_product_id")
WHERE "orm_practice_app_orderedproduct"."related_order_id" IN (4);
"""
order_product = OrderedProduct.objects.filter(related_order=4).select_related('related_order', 'related_product')
"""
SELECT "orm_practice_app_orderedproduct"."id", "orm_practice_app_orderedproduct"."product_cnt",
"orm_practice_app_orderedproduct"."amount_of_credited_mileage", "orm_practice_app_orderedproduct"."related_product_id", "orm_practice_app_orderedproduct"."related_order_id",
"orm_practice_app_product"."id", "orm_practice_app_product"."name", "orm_practice_app_product"."price", "orm_practice_app_product"."product_owned_company_id",
"orm_practice_app_order"."id", "orm_practice_app_order"."reg_date", "orm_practice_app_order"."descriptions", "orm_practice_app_order"."order_owner_id"
FROM "orm_practice_app_orderedproduct"
INNER JOIN "orm_practice_app_order" ON ("orm_practice_app_orderedproduct"."related_order_id" = "orm_practice_app_order"."id")
INNER JOIN "orm_practice_app_product" ON ("orm_practice_app_orderedproduct"."related_product_id" = "orm_practice_app_product"."id")
WHERE "orm_practice_app_orderedproduct"."related_order_id" = 4
"""
order_queryset = Order.objects.filter(descriptions__contains='상세내용입니다').prefetch_related(
'product_set_included_order')
for order in order_queryset[:10]:
order.product_set_included_order.all()
"""
SELECT "orm_practice_app_order"."id", "orm_practice_app_order"."reg_date", "orm_practice_app_order"."descriptions", "orm_practice_app_order"."order_owner_id"
FROM "orm_practice_app_order"
INNER JOIN "orm_practice_app_orderedproduct" ON ("orm_practice_app_order"."id" = "orm_practice_app_orderedproduct"."related_order_id")
WHERE ("orm_practice_app_order"."id" = 4 AND "orm_practice_app_orderedproduct"."related_product_id" IS NOT NULL) ;
"""
Product.objects.filter(id=4).select_related('product_owned_company')
"""
SELECT "orm_practice_app_product"."id", "orm_practice_app_product"."name", "orm_practice_app_product"."price",
"orm_practice_app_product"."product_owned_company_id", "orm_practice_app_company"."id", "orm_practice_app_company"."name",
"orm_practice_app_company"."tel_num", "orm_practice_app_company"."address"
FROM "orm_practice_app_product"
LEFT OUTER JOIN "orm_practice_app_company" ON ("orm_practice_app_product"."product_owned_company_id" = "orm_practice_app_company"."id")
WHERE "orm_practice_app_product"."id" = 4;
"""
Product.objects.annotate(custom_field=F('name') + F('price')).filter(id=3)
# 일단 데이터를 다 가져와서 sum(product_set.value_list('price'))
Product.objects.filter(id__lte=10).aggregate(total_price=Avg('price'))
# 서브쿼리로 가져오기
users = User.objects.filter(id__lte=20)
Order.objects.filter(order_owner__in=Subquery(users.values('id')))
"""
SELECT "orm_practice_app_order"."id", "orm_practice_app_order"."reg_date",
"orm_practice_app_order"."descriptions", "orm_practice_app_order"."order_owner_id"
FROM "orm_practice_app_order"
WHERE "orm_practice_app_order"."order_owner_id" IN (SELECT U0."id" FROM "auth_user" U0);
"""
Order.objects.filter(order_owner_id__lte=20)
# right outer 가능한가? 불가
Product.objects.filter(product_owned_company__name='company_name3').select_related('product_owned_company')
# many to many join
Order.objects.prefetch_related('product_set_included_order').filter(id=1)
|
19,953 | 0359b0a555fa540ae14f45d5fc5c4ac8a4e6e3c7 | #!/usr/bin/env python
"""A simple messaging app.
Accepts argument to the URL of: http://.../?m=chat%20message. You can also do a
POST.
A way to cheat and send a message to this app using the Javascript console.
var sendMessage = function(m) {
// Fix the URL.
document.createElement('img').src = 'http://.../?m=' + encodeURIComponent(m);
};
// Sample usage.
sendMessage('I am a message.');
"""
import json
import webapp2
class BaseHandler(webapp2.RequestHandler):
def get(self):
self.response.headers['Content-Type'] = 'application/json'
self.response.write(json.dumps(self.execute()))
def post(self):
self.get()
def execute(self):
return {
'success': False,
'error': 'Not implemeted.'
}
class MessageHandler(BaseHandler):
def execute(self):
message = self.request.get('m', '')
if message:
# Put calls to glass with message here.
return {'success': True}
else:
return {
'success': False,
'error': 'No message given.'
}
class LocationHandler(BaseHandler):
def execute(self):
lat = self.request.get('lat', '')
lng = self.request.get('lng', '')
if lat and lng:
# Put calls to glass with message here.
return {'success': True}
else:
return {
'success': False,
'error': 'Missing location data.'
}
class ImageHandler(BaseHandler):
def execute(self):
src = self.request.get('src', '')
if src:
# Put calls to glass with message here.
return {'success': True}
else:
return {
'success': False,
'error': 'No image source given.'
}
app = webapp2.WSGIApplication([
('/', MessageHandler),
('/message', MessageHandler),
('/location', LocationHandler),
('/image', ImageHandler)
])
|
19,954 | b5752c8d3344b65691ee0690fd8c8c9235883308 | """
Polygon.io market data
"""
import asyncio
import logging
import os
import urllib.parse
from typing import Any, Mapping, Optional, Union
import aiohttp
import pandas as pd
from dotenv import load_dotenv
from aqua.market_data import errors
from aqua.market_data.market_data_interface import (
IMarketData,
Quote,
StreamType,
Trade,
_set_time,
)
from aqua.security import Stock
from aqua.security.security import Security
logger = logging.getLogger(__name__)
if not load_dotenv():
logger.warning("Can't load dotenv file")
_POLYGON_URL = os.getenv("POLYGON_URL")
_POLYGON_API_KEY = os.getenv("POLYGON_API_KEY")
if _POLYGON_URL is None:
logger.fatal("Unable to load Polygon url")
raise errors.ConfigError
if _POLYGON_API_KEY is None:
logger.fatal("Can't load polygon api key")
raise errors.CredentialError
class PolygonMarketData(IMarketData):
"""
Polygon market data gets market data from polygon.io asynchronously
"""
def __init__(self) -> None:
self.session: Optional[aiohttp.ClientSession] = None
async def __aenter__(self) -> "PolygonMarketData":
self.session = aiohttp.ClientSession(
headers={"Authorization": f"Bearer {_POLYGON_API_KEY}"}
)
return self
async def __aexit__(self, *exec_info) -> None:
await self.session.close()
await asyncio.sleep(
0.25
) # https://docs.aiohttp.org/en/stable/client_advanced.html#graceful-shutdown
self.session = None
@property
def name(self) -> str:
return "Polygon.io"
async def _get(self, path: str, params: Optional[Mapping[str, str]] = None) -> Any:
url = urllib.parse.urljoin(_POLYGON_URL, path)
async with self.session.get(url, params=params) as response:
if response.status != 200:
logger.warning(
"Got error %s, %s", response.status, await response.json()
)
if response.status == 429:
logger.warning("Rate limited")
raise errors.RateLimitError
raise errors.DataSourceError
return await response.json()
async def _get_stock_hist_bars(
self,
symbol: str,
multiplier: int,
timespan: str,
from_date: str,
to_date: str,
) -> Union[pd.DataFrame, type(NotImplemented)]:
"""Assumes from_date and to_date do not need to be URL escaped"""
# https://polygon.io/docs/get_v2_aggs_ticker__stocksTicker__range__multiplier___timespan___from___to__anchor
symbol = urllib.parse.quote(symbol)
response = await self._get(
f"/v2/aggs/ticker/{symbol}/range/{multiplier}/{timespan}/{from_date}/{to_date}",
params={
"adjusted": "false",
"limit": "50000",
},
)
if response["status"] == "DELAYED":
raise errors.DataPermissionError
if "results" not in response:
return pd.DataFrame()
return pd.DataFrame(response["results"])
async def get_hist_bars(
self,
security: Security,
bar_size: pd.Timedelta,
start_date: pd.Timestamp,
end_date: Optional[pd.Timestamp] = None,
) -> Union[pd.DataFrame, type(NotImplemented)]:
# validate security
if not isinstance(security, Stock):
logger.warning(
"Polygon.io only supports stock historical bars. Got security type: %s",
type(security),
)
return NotImplemented
# validate start and end dates
if end_date is None:
end_date = start_date
start_date = _set_time(start_date).floor("D")
end_date = _set_time(end_date).floor("D")
if end_date < start_date:
raise ValueError("End date cannot come before start date")
# validate bar_size and generate periods to query based on bar_size
if bar_size.value <= 0:
raise ValueError(f"Got non positive bar_size: {bar_size}")
if bar_size % pd.Timedelta("1 day") == pd.Timedelta(0):
range_multiplier = bar_size // pd.Timedelta("1 day")
timespan = "day"
period_freq = pd.DateOffset(days=50000)
elif bar_size % pd.Timedelta("1 min") == pd.Timedelta(0):
range_multiplier = bar_size // pd.Timedelta("1 min")
timespan = "minute"
period_freq = pd.DateOffset(days=75)
else:
logger.warning("Bar size %s not supported", bar_size)
return NotImplemented
periods = list(pd.date_range(start_date, end_date, freq=period_freq))
periods.append(end_date + pd.DateOffset(days=1))
# make requests for each period
res = []
for i in range(len(periods) - 1):
period_start = periods[i]
period_end = periods[i + 1] - pd.DateOffset(days=1)
response = await self._get_stock_hist_bars(
security.symbol,
range_multiplier,
timespan,
period_start.strftime("%Y-%m-%d"),
period_end.strftime("%Y-%m-%d"),
)
if response is NotImplemented:
return NotImplemented
if response.empty:
continue
res.append(response)
if len(res) == 0:
return pd.DataFrame(
columns=["Open", "High", "Low", "Close", "Volume", "NumTrades", "VWAP"]
)
res = pd.concat(res)
res.rename(
columns={
"c": "Close",
"h": "High",
"l": "Low",
"n": "NumTrades",
"o": "Open",
"t": "Time",
"v": "Volume",
"vw": "VWAP",
},
inplace=True,
)
res["Time"] = res["Time"].map(
lambda x: pd.Timestamp(x, unit="ms", tz="America/New_York")
)
res.set_index("Time", inplace=True)
res.sort_index(inplace=True)
return res[["Open", "High", "Low", "Close", "Volume", "NumTrades", "VWAP"]]
async def subscribe(
self, stream_type: StreamType, security: Security
) -> Union[None, type(NotImplemented)]:
return NotImplemented
async def get(
self, stream_type: StreamType, security: Security
) -> Union[Quote, Trade]:
raise NotImplementedError
async def unsubscribe(self, stream_type: StreamType, security: Security) -> None:
raise NotImplementedError
async def get_stock_dividends(
self, stock: Stock
) -> Union[pd.DataFrame, type(NotImplemented)]:
path = f"/v2/reference/dividends/{urllib.parse.quote_plus(stock.symbol)}"
response = await self._get(path)
res = pd.DataFrame(response["results"])
res.rename(
columns={
"amount": "Amount",
"exDate": "ExDate",
"paymentDate": "PaymentDate",
"recordDate": "RecordDate",
},
inplace=True,
)
return res[["Amount", "ExDate", "PaymentDate", "RecordDate"]]
async def get_stock_splits(
self, stock: Stock
) -> Union[pd.DataFrame, type(NotImplemented)]:
path = f"/v2/reference/splits/{urllib.parse.quote_plus(stock.symbol)}"
response = await self._get(path)
res = pd.DataFrame(response["results"])
res.rename(columns={"ratio": "Ratio", "exDate": "ExDate"}, inplace=True)
return res[["Ratio", "ExDate"]]
|
19,955 | a2da65f3552024ef86380ceab0f21ffdf623b0e7 | from LycxlSpider import *
class Spider(spider):
def main(self):
self.rex_find()
print(self.info[0])
if __name__=='__main__':
caokeyi=Spider('https://www.baidu.com')
caokeyi.add_rex('<link rel=stylesheet type=text/css href=(.*?)><title>(.*?)</title></head> <body link=#0000cc>')
caokeyi.main() |
19,956 | cffc7d4ce22447761058788bca0907aa0ed7ba99 | import csv
import json
import os
import boto3
from Strategy import Strategy
import pickle
import pymongo
from credentials import credentials
import time
def bootstrap_sim(ticker, start, n_samples):
raw_data = get_boto_client('s3').get_object(Bucket='stockdatalambda', Key=ticker + '_5.pickle')
data = pickle.loads(raw_data['Body'].read())
strat = Strategy(data, start_offset=start, n_samples=n_samples)
calls = strat.run()
return calls
def lambda_handler(event, context):
results = bootstrap_sim(event['ticker'], event['start'], event['span'])
report = json.loads(json.dumps(event))
report['results'] = results
client = pymongo.MongoClient(
host=credentials['mongo']['host'],
port=credentials['mongo']['port'],
username=credentials['mongo']['username'],
password=credentials['mongo']['password'],
)
client.qfbt[event['sim_id']].insert_one(json.loads(json.dumps(report)))
return {
'statusCode': 200,
'body': json.dumps(report)
}
def get_boto_client(client_type='lambda'):
with open(os.path.dirname(__file__) + '/credentials.csv', 'r') as csvFile:
reader = csv.reader(csvFile)
lines = []
for row in reader:
lines.append(row)
credentials = dict(zip(lines[0], lines[1]))
boto_client = boto3.client(
client_type,
aws_access_key_id=credentials['Access key ID'],
aws_secret_access_key=credentials['Secret access key'],
region_name='eu-west-3'
)
return boto_client
if __name__ == '__main__':
ticker = 'AAPL'
event_id = str(int(time.time())) + '-' + ticker
print(event_id)
result = lambda_handler({'ticker': ticker, 'start': 0, 'span': 100, 'id': event_id, 'sim_id': 'test'}, '')
print(result)
|
19,957 | 142f487b3d17f53b98f9ac086fadb5de58a39056 | #___________________________________________
# Import Environment from above-level script
#___________________________________________
Import('env')
#_________________________
# InitializeRandom Library
#_________________________
target_initializeRandom = 'initializeRandom'
source_initializeRandom = ['initializeRandom.cpp']
libs_initializeRandom = []
libpath_initializeRandom = []
initializeRandom = env.Library(target = target_initializeRandom,
source = source_initializeRandom,
LIBS = libs_initializeRandom,
LIBPATH = libpath_initializeRandom)
|
19,958 | ed818d013ddf6bc5cbf1ff61f0024fa26978d2bd | import functions
import cv2
def main(frontImg, frontBottomY, frontTopY, sideImg, sideBottomY, sideTopY, frontLeftShoulder, frontRightShoulder, sideShoulder):
# left shoulder search
# move from shoulder point to left by shoulder point to top contour
frontLeftShoulder = [int(frontLeftShoulder[0]), int(frontLeftShoulder[1])]
mvTemp = 0
while not(frontImg[frontLeftShoulder[1]-mvTemp, frontLeftShoulder[0], 0] == 0 and
frontImg[frontLeftShoulder[1]-mvTemp, frontLeftShoulder[0], 1] == 255 and
frontImg[frontLeftShoulder[1]-mvTemp, frontLeftShoulder[0], 2] == 0):
mvTemp += 1
frontLeftShoulder[0] -= mvTemp
# right shoulder search
# same right
frontRightShoulder = [int(frontRightShoulder[0]), int(frontRightShoulder[1])]
mvTemp = 0
while not(frontImg[frontRightShoulder[1]-mvTemp, frontRightShoulder[0], 0] == 0 and
frontImg[frontRightShoulder[1]-mvTemp, frontRightShoulder[0], 1] == 255 and
frontImg[frontRightShoulder[1]-mvTemp, frontRightShoulder[0], 2] == 0):
mvTemp += 1
frontRightShoulder[0] += mvTemp
cv2.circle(frontImg, (frontRightShoulder[0], frontRightShoulder[1]), 1, (255,255,255), -1)
cv2.circle(frontImg, (frontRightShoulder[0], frontRightShoulder[1]), 1, (255, 255, 255), -1)
# side shoulder search
sideShoulder = [int(sideShoulder[0]), int((sideTopY-sideBottomY)*(frontRightShoulder[1]-frontTopY)/(frontTopY-frontBottomY)+sideTopY)]
sideRightShoulder = [sideShoulder[0], sideShoulder[1]]
while not(sideImg[sideRightShoulder[1], sideRightShoulder[0], 0] == 0 and
sideImg[sideRightShoulder[1], sideRightShoulder[0], 1] == 255 and sideImg[sideRightShoulder[1], sideRightShoulder[0], 2] == 0):
sideRightShoulder[0] += 1
sideLeftShoulder = [sideShoulder[0], sideShoulder[1]]
while not(sideImg[sideLeftShoulder[1], sideLeftShoulder[0], 0] == 0 and
sideImg[sideLeftShoulder[1], sideLeftShoulder[0], 1] == 255 and sideImg[sideLeftShoulder[1], sideLeftShoulder[0], 2] == 0):
sideLeftShoulder[0] -= 1
return functions.ellipseCircumference((frontRightShoulder[0] - frontLeftShoulder[0]) / 2, (sideRightShoulder[0] - sideLeftShoulder[0]) / 2) |
19,959 | abf6c2543e5534b12b61730b6f6930a870d8e6f3 | from .user_api import ns as user_namespace
from .task_api import ns as task_namespace
|
19,960 | fb34524edbdfc2edf221e61f6a7181b723cb6db2 | from __future__ import division
from cleaner import csv_cleaner
import zipfile
import bs4
import pandas as pd
def try_parse_odt(file_name):
"""
Tried to parse tables in odt files.
Warning: if parsing fails, it will not return an error, rather it will just not return the content of that table.
:param file_name: str
:return: Pandas Dataframe
"""
zf = zipfile.ZipFile(file_name)
bs = bs4.BeautifulSoup(zf.read("content.xml"), 'xml')
tables = bs.findAll("table")
csv_tables = [table_to_csv(tables_to_lists(table)) for table in tables]
success = []
for n, table in enumerate(csv_tables):
try:
success.append(csv_cleaner.try_to_parse_csv(raw_text=table))
except:
pass
return pd.concat(success)
def tables_to_lists(table):
return [[tc.get_text() for tc in tr.findAll("table-cell")]
for tr in table.findAll("table-row")]
def format_for_csv(s):
if "," in s:
return '"' + s + '"'
return s
def table_to_csv(table_lst):
return "\r\n".join([",".join([format_for_csv(y) for y in x]) for x in table_lst])
|
19,961 | 643973c2728fdedc3ea67b4acd3185e8880c47a9 | """Check processed files if they start with ki_....
Copyright (C) 2011 Thomas Nauss
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Please send any comments, suggestions, criticism, or (for our sake) bug
reports to nausst@googlemail.com
"""
__author__ = "Thomas Nauss <nausst@googlemail.com>"
__version__ = "2013-10-16"
__license__ = "GNU GPL, see http://www.gnu.org/licenses/"
import ConfigParser
import fnmatch
import os
import csv
def locate(pattern, patternpath, root=os.curdir):
'''Locate files matching filename pattern recursively
This routine is based on the one from Simon Brunning at
http://code.activestate.com/recipes/499305/ and extended by the patternpath.
Args:
pattern: Pattern of the filename
patternpath: Pattern of the filepath
root: Root directory for the recursive search
'''
for path, dirs, files in os.walk(os.path.abspath(root)):
for filename in fnmatch.filter(files, pattern):
# Modified by Thomas Nauss
if fnmatch.fnmatch(path, patternpath):
yield os.path.join(path, filename)
def configure(config_file):
"""Reads configuration settings and configure object.
Args:
config_file: Full path and name of the configuration file.
"""
config = ConfigParser.ConfigParser()
config.read(config_file)
return config.get('repository', 'toplevel_processing_plots_path'), \
config.get('project', 'project_id')
def main():
"""Main program function
Move data from initial logger import to level 0 folder structure.
"""
print
print 'Module: be_process_mntstation_level0000'
print 'Version: ' + __version__
print 'Author: ' + __author__
print 'License: ' + __license__
print
config_file = "ki_config.cnf"
toplevel_processing_plots_path, project_id = configure(config_file)
path = toplevel_processing_plots_path + project_id
station_dataset=locate("*.*", "*", path)
for dataset in station_dataset:
print " "
print "Checking dataset ", dataset
act_filepath = os.path.dirname(dataset)
act_filename = os.path.basename(dataset)
if "ki_" in act_filename:
act_filename = act_filename[act_filename.index("ki_"):]
os.rename(dataset, act_filepath + os.sep + act_filename)
print " "
print " "
print " "
station_dataset=locate("*.dat", "*", path)
for dataset in station_dataset:
act_filepath = os.path.dirname(dataset)
act_filename = os.path.basename(dataset)
if act_filename.startswith("ki_") == False:
print dataset
if __name__ == '__main__':
main()
|
19,962 | 4045f6bc9c7ac3ba78f1d89d8ce131df979d86e2 | from django.shortcuts import render
from django.http import HttpResponse
from django.http import JsonResponse
import json
from register.user import modify_name
# Create your views here.
|
19,963 | 7535cb9e7cd4f7f5dbc5e0cc5bc57a6051f155b1 | #
# @copyright@
# Copyright (c) 2006 - 2018 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
import os
import stack.sql
import subprocess
class Plugin(stack.sql.InsertEthersPlugin):
"""
Calls "rocks sync config"
"""
def added(self, nodename, id):
self.sync()
def removed(self, nodename, id):
self.sync()
def update(self):
self.sync()
def done(self):
pass
def sync(self):
p = subprocess.Popen([
'/opt/stack/bin/stack','sync','config'],
stdout=open('/dev/null'),
stderr=open('/dev/null'),
)
rc = p.wait()
return
|
19,964 | d9a8bfeb59e711657fbbec4636c6c6632c79d4a5 | # pylist: disable=missing-module-docstring
|
19,965 | eb242782297196d25f1730ed398e2b8edea2d036 | from torch.utils.data import Dataset
from torchvision import transforms
import torch.nn as nn
import torch
import config
import ResNet
import train
import dataset
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device is:", device)
train_dataset = dataset.costum_images_dataset(root_dir=config.train_root_dir,
transform=transforms.Compose(
[dataset.Rescale(config.resize_param), dataset.ToTensor()]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=config.batch_size, shuffle=True)
test_dataset = dataset.costum_images_dataset(root_dir=config.test_root_dir,
transform=transforms.Compose(
[dataset.Rescale(config.resize_param), dataset.ToTensor()]))
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=config.batch_size, shuffle=False)
models = {
'resnet': ResNet.ResNetClassifier(
in_size=config.in_size, out_classes=4, channels=[32, 64, 128, 256, 512, 1024],
pool_every=1, hidden_dims=[100] * 2,
activation_type='relu',
pooling_type='avg', pooling_params=dict(kernel_size=2),
batchnorm=True, dropout=0.2, ),
'cnn': ResNet.ConvClassifier(
in_size=config.in_size, out_classes=4, channels=[32, 64] * 2,
pool_every=2, hidden_dims=[100] * 2,
activation_type='relu',
pooling_type='avg', pooling_params=dict(kernel_size=2)
)}
arc_name = 'resnet'
model_reg_ = models[arc_name]
model_reg_.to(device)
print(model_reg_)
criterion = nn.SmoothL1Loss()
print('number of parameters: ', sum(param.numel() for param in model_reg_.parameters()))
print(f'Num of trainable parameters : {sum(p.numel() for p in model_reg_.parameters() if p.requires_grad)}')
optimizer = torch.optim.Adam(model_reg_.parameters(), lr=config.lr, weight_decay=config.weight_decay)
train.train_model_reg(model_reg_, criterion, optimizer, train_loader, test_loader, device,arc_name) |
19,966 | 61f187d5a6726cca8f94b614a88608dd3a52a55f | from .models import JuryType
from wtforms import ValidationError
def FunctionAllowed(form, field):
type= JuryType.query.filter(JuryType.id==field.data).first()
print("type.is_member_allowed", type.is_member_allowed)
print("form.is_member.data", form.is_member.data)
print(type.is_member_allowed is False)
print(form.is_member.data == 1)
print(type.is_member_allowed is False and form.is_member.data == 1)
if type.is_member_allowed is False and form.is_member.data == 1:
raise ValidationError('Тип TechnicalDelegate не допустим для типа Member')
|
19,967 | 1e29ae2e89f5eadf1facf2f60bd0129df7f0ba73 | import os
import pytest
from rotkehlchen.db.dbhandler import DBHandler
@pytest.fixture
def username():
return 'testuser'
@pytest.fixture(scope='session')
def session_username():
return 'session_test_user'
@pytest.fixture
def data_dir(tmpdir_factory):
return tmpdir_factory.mktemp('data')
@pytest.fixture(scope='session')
def session_data_dir(tmpdir_factory):
return tmpdir_factory.mktemp('session_data')
@pytest.fixture
def user_data_dir(data_dir, username):
"""Create and return the user data directory"""
user_data_dir = os.path.join(data_dir, username)
if not os.path.exists(user_data_dir):
os.mkdir(user_data_dir)
return user_data_dir
@pytest.fixture(scope='session')
def session_user_data_dir(session_data_dir, session_username):
"""Create and return the session scoped user data directory"""
user_data_dir = os.path.join(session_data_dir, session_username)
if not os.path.exists(user_data_dir):
os.mkdir(user_data_dir)
return user_data_dir
@pytest.fixture
def database(user_data_dir, function_scope_messages_aggregator):
return DBHandler(user_data_dir, '123', function_scope_messages_aggregator)
@pytest.fixture(scope='session')
def session_database(session_user_data_dir, messages_aggregator):
return DBHandler(session_user_data_dir, '123', messages_aggregator)
|
19,968 | 87fec60b7f1e488cc9155bd6a9f894cb78324138 |
def multiply_by_11(n):
n = '0' + n
ans = n[-1]
carry = 0
for pos in range(len(n)-2, -1, -1):
box = n[pos:pos+2]
s = int(box[0]) + int(box[1]) + carry
ans = str(s % 10) + ans
carry = s // 10
if carry > 0:
ans = str(carry) + ans
return ans
|
19,969 | f7fb26badcb9df5e3c3ab5cce1bb81edc52227f2 |
# Copyright 2020, Battelle Energy Alliance, LLC
# ALL RIGHTS RESERVED
"""
Defines the Component entity.
"""
from __future__ import unicode_literals, print_function
import os
import sys
from collections import defaultdict
import numpy as np
from base import Base
import time
import xml.etree.ElementTree as ET
from Economics import CashFlowUser
from ValuedParams import ValuedParam
import _utils as hutils
framework_path = hutils.get_raven_loc()
sys.path.append(framework_path)
from utils import InputData, xmlUtils,InputTypes
import MessageHandler
mh = MessageHandler.MessageHandler()
def factory(xml, method='sweep'):
"""
Tool for constructing compnents without the input_loader
TODO can this be set up so the input_loader calls it instead of the methods directly?
@ In, xml, ET.Element, node from which to read component settings
@ In, method, string, optional, operational mode for case
@ Out, comp, Component instance, component constructed
"""
comp = Component(messageHandler=mh)
comp.read_input(xml, method)
return comp
class Component(Base, CashFlowUser):
"""
Represents a unit in the grid analysis. Each component has a single "interaction" that
describes what it can do (produce, store, demand)
"""
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, input_specs, InputData, specs
"""
input_specs = InputData.parameterInputFactory('Component', ordered=False, baseNode=None,
descr=r"""defines a component as an element of the grid system. Components are defined by the action they
perform such as \xmlNode{produces} or \xmlNode{consumes}; see details below.""")
input_specs.addParam('name', param_type=InputTypes.StringType, required=True,
descr=r"""identifier for the component. This identifier will be used to generate variables
and relate signals to this component throughout the HERON analysis.""")
# production
## this unit may be able to make stuff, possibly from other stuff
input_specs.addSub(Producer.get_input_specs())
# storage
## this unit may be able to store stuff
input_specs.addSub(Storage.get_input_specs())
# demands
## this unit may have a certain demand that must be met
input_specs.addSub(Demand.get_input_specs())
# this unit probably has some economics
input_specs = CashFlowUser.get_input_specs(input_specs)
return input_specs
def __init__(self, **kwargs):
"""
Constructor
@ In, kwargs, dict, optional, arguments to pass to other constructors
@ Out, None
"""
Base.__init__(self, **kwargs)
CashFlowUser.__init__(self)
self.name = None
self._produces = []
self._stores = []
self._demands = []
def __repr__(self):
"""
String representation.
@ In, None
@ Out, __repr__, string representation
"""
return '<HERON Component "{}"">'.format(self.name)
def read_input(self, xml, mode):
"""
Sets settings from input file
@ In, xml, xml.etree.ElementTree.Element, input from user
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ Out, None
"""
# get specs for allowable inputs
specs = self.get_input_specs()()
specs.parseNode(xml)
self.name = specs.parameterValues['name']
self.raiseADebug('Loading component "{}"'.format(self.name))
for item in specs.subparts:
if self.get_interaction() and item.getName() in ['produces', 'stores', 'demands']:
self.raiseAnError(NotImplementedError, 'Currently each Component can only have one interaction (produces, stores, demands)! Check Component "{}"'.format(self.name))
# read in producers
if item.getName() == 'produces':
prod = Producer(messageHandler=self.messageHandler)
try:
prod.read_input(item, mode, self.name)
except IOError as e:
self.raiseAWarning('Errors while reading component "{}"!'.format(self.name))
raise e
self._produces.append(prod)
# read in storages
elif item.getName() == 'stores':
store = Storage(messageHandler=self.messageHandler)
store.read_input(item, mode, self.name)
self._stores.append(store)
# read in demands
elif item.getName() == 'demands':
demand = Demand(messageHandler=self.messageHandler)
demand.read_input(item, mode, self.name)
self._demands.append(demand)
# read in economics
elif item.getName() == 'economics':
econ_node = item # need to read AFTER the interactions!
# after looping over nodes, finish up
if econ_node is None:
self.raiseAnError(IOError, '<economics> node missing from component "{}"!'.format(self.name))
CashFlowUser.read_input(self, econ_node)
def get_crossrefs(self):
"""
Collect the required value entities needed for this component to function.
@ In, None
@ Out, crossrefs, dict, mapping of dictionaries with information about the entities required.
"""
inter = self.get_interaction()
crossrefs = {inter: inter.get_crossrefs()}
crossrefs.update(self._economics.get_crossrefs())
return crossrefs
def set_crossrefs(self, refs):
"""
Connect cross-reference material from other entities to the ValuedParams in this component.
@ In, refs, dict, dictionary of entity information
@ Out, None
"""
try_match = self.get_interaction()
for interaction in list(refs.keys()):
# find associated interaction
if try_match == interaction:
try_match.set_crossrefs(refs.pop(interaction))
break
# send what's left to the economics
self._economics.set_crossrefs(refs)
# if anything left, there's an issue
assert not refs
def get_interaction(self):
"""
Return the interactions this component uses.
TODO could this just return the only non-empty one, since there can only be one?
@ In, None
@ Out, interactions, list, list of Interaction instances
"""
try:
return (self._produces + self._stores + self._demands)[0]
except IndexError: # there are no interactions!
return None
def print_me(self, tabs=0, tab=' '):
"""
Prints info about self
@ In, tabs, int, optional, number of tabs to insert before prints
@ In, tab, str, optional, characters to use to denote hierarchy
@ Out, None
"""
pre = tab*tabs
print(pre+'Component:')
print(pre+' name:', self.name)
self.get_interaction().print_me(tabs=tabs+1, tab=tab)
def get_inputs(self):
"""
returns list of all resources consumed here
@ In, None
@ Out, inputs, set, set of input resources as strings (resources that are taken/consumed/stored)
"""
inputs = set()
# simply combine the inputs for the interaction
inputs.update(self.get_interaction().get_inputs())
return inputs
def get_outputs(self):
"""
returns list of all resources producable here
@ In, None
@ Out, outputs, set, set of output resources as strings (resources that are produced/provided)
"""
outputs = set()
outputs.update(self.get_interaction().get_outputs())
return outputs
def get_resources(self):
"""
Provides the full set of resources used by this component.
@ In, None
@ Out, res, set, set(str) of resource names
"""
res = set()
res.update(self.get_inputs())
res.update(self.get_outputs())
return res
def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):
"""
returns the capacity of the interaction of this component
@ In, meta, dict, arbitrary metadata from EGRET
@ In, raven_vars, dict, evaluated RAVEN variables
@ In, dispatch, DispatchScenario.DispatchRecord, current dispatch situation
@ In, t, int, current time step
@ In, raw, bool, optional, if True then return the ValuedParam instance for capacity, instead of the evaluation
@ Out, capacity, float (or ValuedParam), the capacity of this component's interaction
"""
return self.get_interaction().get_capacity(meta, raven_vars, dispatch, t, raw=raw)
def get_capacity_var(self):
"""
Returns the variable that is used to define this component's capacity.
@ In, None
@ Out, var, str, name of capacity resource
"""
return self.get_interaction().get_capacity_var()
def is_dispatchable(self):
"""
Returns the dispatchability indicator of this component.
TODO Note that despite the name, this is NOT boolean, but a string indicator.
@ In, None
@ Out, dispatchable, str, dispatchability (e.g. independent, dependent, fixed)
"""
return self.get_interaction().is_dispatchable()
def set_capacity(self, cap):
"""
Set the float value of the capacity of this component's interaction
@ In, cap, float, value
@ Out, None
"""
return self.get_interaction().set_capacity(cap)
def produce(self, request, meta, raven_variables, dispatch, t, level=None):
"""
Enacts the transfer function for this component to act based on a request.
FIXME was used for "generic" dispatcher, does it still apply?
@ In, request, dict, mapping of requested resource usage to amount requested (negative is
consume, positive is produce)
@ In, meta, dict, metadata information for current status in run
@ In, raven_variables, dict, variables from RAVEN TODO part of meta!
@ In, dispatch, DispatchState, expression of the current activity levels in the system
@ In, t, int, index of "time" at which this production should be performed
@ In, level, float, for storages indicates the amount currently stored
@ Out, balance, dict, full dict of resources used and produced for request
@ Out, meta, dict, updated metadata dictionary
"""
#balance = defaultdict(float)
interaction = self.get_interaction()
balance, meta = interaction.produce(request, meta, raven_variables, dispatch, t, level)
#for resource, quantity in int_balance.items():
# balance[resource] += quantity
return balance, meta
def produce_max(self, meta, raven_variables, dispatch, t):
"""
Determines the maximum production possible for this component.
@ In, meta, dict, metadata information for current status in run
@ In, raven_variables, dict, variables from RAVEN TODO part of meta!
@ In, dispatch, DispatchState, expression of the current activity levels in the system
@ In, t, int, index of "time" at which this production should be performed
@ Out, balance, dict, full dict of resources used and produced for request
@ Out, meta, dict, updated metadata dictionary
"""
#balance = defaultdict(float)
interaction = self.get_interaction()
balance, meta = interaction.produce_max(meta, raven_variables, dispatch, t)
#for resource, quantity in int_balance.items():
# balance[resource] += quantity
return balance, meta
def produce_min(self, meta, raven_variables, dispatch, t):
"""
Determines the minimum production possible for this component.
@ In, meta, dict, metadata information for current status in run
@ In, raven_variables, dict, variables from RAVEN TODO part of meta!
@ In, dispatch, DispatchState, expression of the current activity levels in the system
@ In, t, int, index of "time" at which this production should be performed
@ Out, balance, dict, full dict of resources used and produced for request
@ Out, meta, dict, updated metadata dictionary
"""
#balance = defaultdict(float)
interaction = self.get_interaction()
balance, meta = interaction.produce_min(meta, raven_variables, dispatch, t)
#for resource, quantity in int_balance.items():
# balance[resource] += quantity
return balance, meta
def get_capacity_param(self):
"""
Provides direct access to the ValuedParam for the capacity of this component.
@ In, None
@ Out, cap, ValuedParam, capacity valued param
"""
intr = self.get_interaction()
return intr.get_capacity(None, None, None, None, raw=True)
class Interaction(Base):
"""
Base class for component interactions (e.g. Producer, Storage, Demand)
"""
tag = 'interacts' # node name in input file
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, input_specs, InputData, specs
"""
if cls.tag == 'produces':
desc = r"""indicates that this component produces one or more resources by consuming other resources."""
resource_desc = r"""the resource produced by this component's activity."""
elif cls.tag == 'stores':
desc = r"""indicates that this component stores one resource, potentially absorbing or providing that resource."""
resource_desc = r"""the resource stored by this component."""
elif cls.tag == "demands":
desc = r"""indicates that this component exclusively consumes a resource."""
resource_desc = r"""the resource consumed by this component."""
specs = InputData.parameterInputFactory(cls.tag, ordered=False, descr=desc)
specs.addParam('resource', param_type=InputTypes.StringListType, required=True,
descr=resource_desc)
dispatch_opts = InputTypes.makeEnumType('dispatch_opts', 'dispatch_opts', ['fixed', 'independent', 'dependent'])
specs.addParam('dispatch', param_type=dispatch_opts,
descr=r"""describes the way this component should be dispatched, or its flexibility.
\texttt{fixed} indicates the component always fully dispatched at its maximum level.
\texttt{independent} indicates the component is fully dispatchable by the dispatch optimization algorithm.
\texttt{dependent} indicates that while this component is not directly controllable by the dispatch
algorithm, it can however be flexibly dispatched in response to other units changing dispatch level.
For example, when attempting to increase profitability, the \texttt{fixed} components are not adjustable,
but the \texttt{independent} components can be adjusted to attempt to improve the economic metric.
In response to the \texttt{independent} component adjustment, the \texttt{dependent} components
may respond to balance the resource usage from the changing behavior of other components.""")
cap = ValuedParam.get_input_specs('capacity')
cap.descr = r"""provides the maximum value at which this component can act, in units of the indicated resource. """
#cap.removeSub('ARMA')
#cap.removeSub('Function')
#cap.removeSub('variable')
cap.addParam('resource', param_type=InputTypes.StringType,
descr=r"""indicates the resource that defines the capacity of this component's operation. For example,
if a component consumes steam and electricity to produce hydrogen, the capacity of the component
can be defined by the maximum steam consumable, maximum electricity consumable, or maximum
hydrogen producable. Any choice should be nominally equivalent, but determines the units
of the value of this node.""")
specs.addSub(cap)
minn = ValuedParam.get_input_specs('minimum')
minn.descr = r"""provides the minimum value at which this component can act, in units of the indicated resource. """
minn.addParam('resource', param_type=InputTypes.StringType,
descr=r"""indicates the resource that defines the minimum activity level for this component,
as with the component's capacity.""")
specs.addSub(minn)
return specs
def __init__(self, **kwargs):
"""
Constructor
@ In, kwargs, dict, arbitrary pass-through arguments
@ Out, None
"""
Base.__init__(self, **kwargs)
self._capacity = None # upper limit of this interaction
self._capacity_var = None # which variable limits the capacity (could be produced or consumed?)
self._signals = set() # dependent signals for this interaction
self._crossrefs = defaultdict(dict) # crossrefs objects needed (e.g. armas, etc), as {attr: {tag, name, obj})
self._dispatchable = None # independent, dependent, or fixed?
self._minimum = None # lowest interaction level, if dispatchable
self._minimum_var = None # limiting variable for minimum
self._function_method_map = {} # maps things that call functions to the method within the function that needs calling
self._transfer = None # the production rate (if any), in produces per consumes
# for example, {(Producer, 'capacity'): 'method'}
def read_input(self, specs, mode, comp_name):
"""
Sets settings from input file
@ In, specs, InputData, specs
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ In, comp_name, string, name of component this Interaction belongs to
@ Out, None
"""
self.raiseADebug(' ... loading interaction "{}"'.format(self.tag))
self._dispatchable = specs.parameterValues['dispatch']
for item in specs.subparts:
name = '_' + item.getName()
if name in ['_capacity', '_minimum']:
# common reading for valued params
self._set_valued_param(name, comp_name, item, mode)
if name == '_capacity':
self._capacity_var = item.parameterValues.get('resource', None)
elif item.getName() == 'minimum':
self._minimum_var = item.parameterValues.get('resource', None)
# finalize some values
resources = set(list(self.get_inputs()) + list(self.get_outputs()))
## capacity: if "variable" is None and only one resource in interactions, then that must be it
if self._capacity_var is None:
if len(resources) == 1:
self._capacity_var = list(resources)[0]
else:
self.raiseAnError(IOError, 'If multiple resources are active, "capacity" requires a "resource" specified!')
## minimum: basically the same as capacity, functionally
if self._minimum and self._minimum_var is None:
if len(resources) == 1:
self._minimum_var = list(resources)[0]
else:
self.raiseAnError(IOError, 'If multiple resources are active, "minimum" requires a "resource" specified!')
def _set_valued_param(self, name, comp, spec, mode):
"""
Sets up use of a ValuedParam for this interaction for the "name" attribute of this class.
@ In, name, str, name of member of this class
@ In, comp, str, name of associated component
@ In, spec, InputParam, input specifications
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ Out, None
"""
vp = ValuedParam(name)
signal = vp.read(comp, spec, mode)
self._signals.update(signal)
self._crossrefs[name] = vp
setattr(self, name, vp)
def get_capacity(self, meta, raven_vars, dispatch, t, raw=False):
"""
Returns the capacity of this interaction.
Returns an evaluated value unless "raw" is True, then gives ValuedParam
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, raw, bool, optional, if True then provide ValuedParam instead of evaluation
@ Out, evaluated, float or ValuedParam, requested value
@ Out, meta, dict, additional variable passthrough
"""
if raw:
return self._capacity
request = {self._capacity_var: None}
inputs = {'request': request,
'meta': meta,
'raven_vars': raven_vars,
'dispatch': dispatch,
't': t}
evaluated, meta = self._capacity.evaluate(inputs, target_var=self._capacity_var)
return evaluated, meta
def get_capacity_var(self):
"""
Returns the resource variable that is used to define the capacity limits of this interaction.
@ In, None
@ Out, capacity_var, string, name of capacity-limiting resource
"""
return self._capacity_var
def set_capacity(self, cap):
"""
Allows hard-setting the capacity of this interaction.
This destroys any underlying ValuedParam that was there before.
@ In, cap, float, capacity value
@ Out, None
"""
self._capacity.type = 'value'
self._capacity._value = float(cap) # TODO getter/setter
def get_minimum(self, meta, raven_vars, dispatch, t, raw=False):
"""
Returns the minimum level of this interaction.
Returns an evaluated value unless "raw" is True, then gives ValuedParam
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, raw, bool, optional, if True then provide ValuedParam instead of evaluation
@ Out, evaluated, float or ValuedParam, requested value
@ Out, meta, dict, additional variable passthrough
"""
if raw:
return self._minimum
request = {self._minimum_var: None}
inputs = {'request': request,
'meta': meta,
'raven_vars': raven_vars,
'dispatch': dispatch,
't': t}
evaluated, meta = self._minimum.evaluate(inputs, target_var=self._minimum_var)
return evaluated, meta
def get_crossrefs(self):
"""
Getter.
@ In, None
@ Out, crossrefs, dict, resource references
"""
return self._crossrefs
def set_crossrefs(self, refs):
"""
Setter.
@ In, refs, dict, resource cross-reference objects
@ Out, None
"""
for attr, obj in refs.items():
valued_param = self._crossrefs[attr]
valued_param.set_object(obj)
def get_inputs(self):
"""
Returns the set of resources that are inputs to this interaction.
@ In, None
@ Out, inputs, set, set of inputs
"""
return set()
def get_outputs(self):
"""
Returns the set of resources that are outputs to this interaction.
@ In, None
@ Out, outputs, set, set of outputs
"""
return set()
def get_resources(self):
"""
Returns set of resources used by this interaction.
@ In, None
@ Out, resources, set, set of resources
"""
return list(self.get_inputs()) + list(self.get_outputs())
def is_dispatchable(self):
"""
Getter. Indicates if this interaction is Fixed, Dependent, or Independent.
@ In, None
@ Out, dispatchable, string, one of 'fixed', 'dependent', or 'independent'
"""
return self._dispatchable
def is_type(self, typ):
"""
Checks if this interaction matches the request.
@ In, typ, string, name to check against
@ Out, is_type, bool, whether there is a match or not.
"""
return typ == self.__class__.__name__
def produce(self, *args, **kwargs):
"""
Determines the results of this interaction producing resources.
@ In, args, list, positional arguments
@ In, kwargs, dict, keyword arguments
@ Out, None
"""
raise NotImplementedError('This interaction has no "produce" method.')
def produce_max(self, *args, **kwargs):
"""
Determines the results of this interaction producing maximum resources.
@ In, args, list, positional arguments
@ In, kwargs, dict, keyword arguments
@ Out, None
"""
raise NotImplementedError('This interaction has no produce_max method yet!')
def produce_min(self, *args, **kwargs):
"""
Determines the results of this interaction producing minimum resources.
@ In, args, list, positional arguments
@ In, kwargs, dict, keyword arguments
@ Out, None
"""
raise NotImplementedError('This interaction has no produce_min method yet!')
def check_expected_present(self, data, expected, premessage):
"""
checks dict to make sure members are present and not None
@ In, data, dict, variable set to check against
@ In, expected, list, list of expected entries
@ In, premessage, str, prepend message to add to print
@ Out, None
"""
# check missing
missing = list(d for d in expected if d not in data)
if missing:
self.raiseAWarning(premessage, '| Expected variables are missing:', missing)
# check None
nones = list(d for d, v in data.items() if (v is None and v in expected))
if nones:
self.raiseAWarning(premessage, '| Expected variables are None:', nones)
if missing or nones:
self.raiseAnError(RuntimeError, 'Some variables were missing or None! See warning messages above for details!')
def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):
"""
Check to see if capacity limits of this component have been violated.
@ In, res, str, name of capacity-limiting resource
@ In, amt, float, requested amount of resource used in interaction
@ In, balance, dict, results of requested interaction
@ In, meta, dict, additional variable passthrough
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, new results of requested action, possibly modified if capacity hit
@ Out, meta, dict, additional variable passthrough
"""
cap = self.get_capacity(meta, raven_vars, dispatch, t)[0][self._capacity_var]
try:
if abs(balance[self._capacity_var]) > abs(cap):
#ttttt
# do the inverse problem: how much can we make?
balance, meta = self.produce_max(meta, raven_vars, dispatch, t)
print('The full requested amount ({res}: {req}) was not possible, so accessing maximum available instead ({res}: {blc}).'.format(res=res, req=amt, blc=balance[res]))
except KeyError:
raise SyntaxError('Resource "{}" is listed as capacity limiter, but not an output of the component! Got: {}'.format(self._capacity_var, balance))
return balance, meta
def get_transfer(self):
"""
Returns the transfer function, if any
@ In, None
@ Out, transfer, transfer ValuedParam
"""
return self._transfer
class Producer(Interaction):
"""
Explains a particular interaction, where resources are consumed to produce other resources
"""
tag = 'produces' # node name in input file
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, input_specs, InputData, specs
"""
specs = super(Producer, cls).get_input_specs()
specs.addSub(InputData.parameterInputFactory('consumes', contentType=InputTypes.StringListType, descr=r"""The producer can either produce or consume a resource. If the producer is a consumer it must be accompnied with a transfer function to convert one source of energy to another. """))
specs.addSub(ValuedParam.get_input_specs('transfer'))
return specs
def __init__(self, **kwargs):
"""
Constructor
@ In, None
@ Out, None
"""
Interaction.__init__(self, **kwargs)
self._produces = [] # the resource(s) produced by this interaction
self._consumes = [] # the resource(s) consumed by this interaction
def read_input(self, specs, mode, comp_name):
"""
Sets settings from input file
@ In, specs, InputData, specs
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ In, comp_name, string, name of component this Interaction belongs to
@ Out, None
"""
# specs were already checked in Component
Interaction.read_input(self, specs, mode, comp_name)
self._produces = specs.parameterValues['resource']
for item in specs.subparts:
if item.getName() == 'consumes':
self._consumes = item.value
elif item.getName() == 'transfer':
self._set_valued_param('_transfer', comp_name, item, mode)
# input checking
## if a transfer function not given, can't be consuming a resource
if self._transfer is None:
if self._consumes:
self.raiseAnError(IOError, 'Any component that consumes a resource must have a transfer function describing the production process!')
#else if transfer function is a float/ARMA, then there must be only one output, one input
else:
if self._transfer.type in ['value', 'ARMA']: #isinstance(self._transfer, float) or self._transfer['type'] == 'ARMA':
if not (len(self.get_inputs()) == 1 and len(self.get_outputs() == 1)):
self.raiseAnError(IOError, 'Transfer function ("<transfer>") can be a float/ARMA only if the component '+\
'produces exactly one resource and consumes exactly one resource!\n' +\
' Consumes: {}'.format(self.get_inputs()) +\
' Produces: {}'.format(self.get_outputs()))
def get_inputs(self):
"""
Returns the set of resources that are inputs to this interaction.
@ In, None
@ Out, inputs, set, set of inputs
"""
inputs = Interaction.get_inputs(self)
inputs.update(np.atleast_1d(self._consumes))
return inputs
def get_outputs(self):
"""
Returns the set of resources that are outputs to this interaction.
@ In, None
@ Out, outputs, set, set of outputs
"""
outputs = set(np.atleast_1d(self._produces))
return outputs
def print_me(self, tabs=0, tab=' '):
"""
Prints info about self
@ In, tabs, int, optional, number of tabs to insert before prints
@ In, tab, str, optional, characters to use to denote hierarchy
@ Out, None
"""
pre = tab*tabs
print(pre+'Producer:')
print(pre+' produces:', self._produces)
print(pre+' consumes:', self._consumes)
print(pre+' transfer:', self._transfer)
print(pre+' capacity:', self._capacity)
def produce(self, request, meta, raven_vars, dispatch, t, level=None):
"""
Determines the results of this interaction producing resources.
@ In, request, dict, requested action {resource: amount}
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, level, float, storage leven (unused for this Interaction)
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
# is there a transfer function to apply?
res, amt = next(iter(request.items()))
if self._transfer:
balance, meta = self.transfer(request, meta, raven_vars, dispatch, t)
# TODO catch errors! Custom user-based errors?
else:
# no transfer function, then we provide the request as-is
balance = request
# check if capacity was exceeded
balance, meta = self._check_capacity_limit(res, amt, balance, meta, raven_vars, dispatch, t)
return balance, meta
def produce_max(self, meta, raven_vars, dispatch, t):
"""
Determines the results of this interaction producing maximum resources.
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
request, meta = self.get_capacity(meta, raven_vars, dispatch, t)
balance, meta = self.produce(request, meta, raven_vars, dispatch, t)
# dict((prod, self._capacity[p]) for p, prod in enumerate(self._produces))
return balance, meta
def produce_min(self, meta, raven_vars, dispatch, t):
"""
Determines the results of this interaction producing minimum resources.
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
if self._minimum:
request, meta = self.get_minimum(meta, raven_vars, dispatch, t)#[self._minimum]
request = {self._minimum_var: request[self._minimum_var]}
else:
request = {next(iter(self.get_outputs())): 0.0}
balance, meta = self.produce(request, meta, raven_vars, dispatch, t)
return balance, meta
def transfer(self, request, meta, raven_vars, dispatch, t):
"""
Use the transfer function to make a balance of activities that should occur
@ In, request, dict, requested action {resource: amount}
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
assert len(request) == 1
balance = defaultdict(float)
# in the rare case that the transfer function is simple ...
resources_in = list(self.get_inputs())
resources_out = list(self.get_outputs())
inputs = {'request': request,
'meta': meta,
'raven_vars': raven_vars,
'dispatch': dispatch,
't': t}
balance, meta = self._transfer.evaluate(inputs)
self.check_expected_present(balance, self.get_resources(), 'TRANSFER FUNCTION {}'.format(self._transfer))
# OLD if transfer evaluation is a float (float, arma), then it signifies a conversion rate
## note that we've checked in the input reading for this singular relationship
if False: #len(balance) == 1:
requested, rate = list(balance.items())[0] # requested resource and the transfer rate (amount of product per consumed)
amount = list(requested.values())[0] # amount of requested resource
if requested in resources_in:
balance[resources_out[0]] = -1.0 * rate * amount # NOTE: amount should be negative, but output should be positive
else:
balance[inputs[0]] = -1.0 / rate * amount # NOTE: amount should be positive, but input should be negative
# check that all values got filled -> TODO remove this for opt performance
missing = set(resources_in + resources_out) - set(balance.keys())
if missing:
self.raiseAnError(RuntimeError, 'While evaluating transfer function, not all variables requested were provided!' +\
' Missing: {}'.format(missing) +\
' Transfer function: {}'.format(self._transfer))
return balance, meta
class Storage(Interaction):
"""
Explains a particular interaction, where a resource is stored and released later
"""
tag = 'stores' # node name in input file
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, input_specs, InputData, specs
"""
specs = super(Storage, cls).get_input_specs()
specs.addSub(ValuedParam.get_input_specs('rate'))
specs.addSub(ValuedParam.get_input_specs('initial_stored'))
return specs
def __init__(self, **kwargs):
"""
Constructor
@ In, kwargs, dict, passthrough args
@ Out, None
"""
Interaction.__init__(self, **kwargs)
self._stores = None # the resource stored by this interaction
self._rate = None # the rate at which this component can store up or discharge
self._initial_stored = None # how much resource does this component start with stored?
def read_input(self, specs, mode, comp_name):
"""
Sets settings from input file
@ In, specs, InputData, specs
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ In, comp_name, string, name of component this Interaction belongs to
@ Out, None
"""
# specs were already checked in Component
Interaction.read_input(self, specs, mode, comp_name)
self._stores = specs.parameterValues['resource']
for item in specs.subparts:
if item.getName() == 'rate':
self._set_valued_param('_rate', comp_name, item, mode)
elif item.getName() == 'initial_stored':
self._set_valued_param('_initial_stored', comp_name, item, mode)
assert len(self._stores) == 1, 'Multiple storage resources given for component "{}"'.format(comp_name)
self._stores = self._stores[0]
# checks and defaults
if self._initial_stored is None:
self.raiseAWarning('Initial storage level for "{}" was not provided! Defaulting to 0.'.format(comp_name))
# make a fake reader node for a 0 value
vp = ValuedParam('initial_stored')
vp.type = 'value'
vp._value = 0.0 # TODO getter/setter, also a better default value setting?
self._initial_stored = vp
# the capacity is limited by the stored resource.
self._capacity_var = self._stores
def get_inputs(self):
"""
Returns the set of resources that are inputs to this interaction.
@ In, None
@ Out, inputs, set, set of inputs
"""
inputs = Interaction.get_inputs(self)
inputs.update(np.atleast_1d(self._stores))
return inputs
def get_outputs(self):
"""
Returns the set of resources that are outputs to this interaction.
@ In, None
@ Out, outputs, set, set of outputs
"""
outputs = Interaction.get_outputs(self)
outputs.update(np.atleast_1d(self._stores))
return outputs
def get_resource(self):
"""
Returns the resource this unit stores.
@ In, None
@ Out, stores, str, resource stored
"""
return self._stores
def print_me(self, tabs=0, tab=' '):
"""
Prints info about self
@ In, tabs, int, optional, number of tabs to insert before prints
@ In, tab, str, optional, characters to use to denote hierarchy
@ Out, None
"""
pre = tab*tabs
print(pre+'Storage:')
print(pre+' stores:', self._stores)
print(pre+' rate:', self._rate)
print(pre+' capacity:', self._capacity)
def produce(self, request, meta, raven_vars, dispatch, t, level=None):
"""
Determines the results of this interaction producing resources.
@ In, request, dict, requested action {resource: amount}
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, level, float, storage level
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
if level is None:
raise RuntimeError('Storage level information was not provided to Storage produce call!')
dt = dispatch()['time'].values
dt = dt[t] - dt[t-1] if t > 0 else dt[t+1] - dt[t]
res, amt_rate = next(iter(request.items()))
# UNITS: amt_rate is in resource per time, NOT pure resource!
amt_amount = amt_rate * dt
assert res == self.get_resource(), 'Requested var is not the same as stored var!'
balance, meta = self._check_capacity_limit(res, amt_amount, {}, meta, raven_vars, dispatch, t, level)
# also check rate limit
delta_amount = balance[res]
delta_rate = delta_amount / dt # flip the sign to show we're doing as we were asked ...?
#print('DEBUGG effective battery cons/prod rate:', delta_rate)
balance, meta = self._check_rate_limit(res, delta_rate, {}, meta, raven_vars, dispatch, t)
return balance, meta
def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):
"""
Check to see if capacity limits of this component have been violated.
overloads Interaction method, since units for storage are "res" not "res per second"
@ In, res, str, name of capacity-limiting resource
@ In, amt, float, requested amount of resource used in interaction
@ In, balance, dict, results of requested interaction
@ In, meta, dict, additional variable passthrough
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, level, float, current level of storage
@ Out, balance, dict, new results of requested action, possibly modified if capacity hit
@ Out, meta, dict, additional variable passthrough
"""
# note "amt" has units of AMOUNT not RATE (resource, not resource per second)
sign = np.sign(amt)
# are we storing or providing?
#print('DEBUGG supposed current level:', level)
if sign < 0:
# we are being asked to consume some
cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)
available_amount = cap[res] - level
#print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)
else:
# we are being asked to produce some
available_amount = level
# the amount we can consume is the minimum of the requested or what's available
delta = sign * min(available_amount, abs(amt))
return {res: delta}, meta
def _check_rate_limit(self, res, amt, balance, meta, raven_vars, dispatch, t):
"""
Determines the limiting rate of in/out production for storage
@ In, res, str, name of capacity-limiting resource
@ In, amt, float, requested amount of resource used in interaction
@ In, balance, dict, results of requested interaction
@ In, meta, dict, additional variable passthrough
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, new results of requested action, possibly modified if capacity hit
@ Out, meta, dict, additional variable passthrough
"""
# TODO distinct up/down rates
# check limiting rate for resource flow in/out, if any
if self._rate:
request = {res: None}
inputs = {'request': request,
'meta': meta,
'raven_vars': raven_vars,
'dispatch': dispatch,
't': t}
max_rate = self._rate.evaluate(inputs, target_var=res)[0][res]
delta = np.sign(amt) * min(max_rate, abs(amt))
print('max_rate in _check_rate_limit',max_rate, 'delta (min of maxrate and abs(amt)',delta)
return {res: delta}, meta
return {res: amt}, meta
def get_initial_level(self, meta, raven_vars, dispatch, t):
"""
Find initial level of the storage
@ In, meta, dict, additional variable passthrough
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, initial, float, initial level
"""
res = self.get_resource()
request = {res: None}
inputs = {'request': request,
'meta': meta,
'raven_vars': raven_vars,
'dispatch': dispatch,
't': t}
return self._initial_stored.evaluate(inputs, target_var=res)[0][res]
class Demand(Interaction):
"""
Explains a particular interaction, where a resource is demanded
"""
tag = 'demands' # node name in input file
@classmethod
def get_input_specs(cls):
"""
Collects input specifications for this class.
@ In, None
@ Out, input_specs, InputData, specs
"""
specs = super(Demand, cls).get_input_specs()
specs.addSub(ValuedParam.get_input_specs('penalty'))
return specs
def __init__(self, **kwargs):
"""
Constructor
@ In, kwargs, dict, arguments
@ Out, None
"""
Interaction.__init__(self, **kwargs)
self._demands = None # the resource demanded by this interaction
self._penalty = None # how to penalize for not meeting demand NOT IMPLEMENTED
def read_input(self, specs, mode, comp_name):
"""
Sets settings from input file
@ In, specs, InputData, specs
@ In, mode, string, case mode to operate in (e.g. 'sweep' or 'opt')
@ In, comp_name, string, name of component this Interaction belongs to
@ Out, None
"""
# specs were already checked in Component
# must set demands first, so that "capacity" can access it
self._demands = specs.parameterValues['resource']
Interaction.read_input(self, specs, mode, comp_name)
for item in specs.subparts:
if item.getName() == 'penalty':
self._set_valued_param('_rate', comp_name, item, mode)
def get_inputs(self):
"""
Returns the set of resources that are inputs to this interaction.
@ In, None
@ Out, inputs, set, set of inputs
"""
inputs = Interaction.get_inputs(self)
inputs.update(np.atleast_1d(self._demands))
return inputs
def print_me(self, tabs=0, tab=' '):
"""
Prints info about self
@ In, tabs, int, optional, number of tabs to insert before prints
@ In, tab, str, optional, characters to use to denote hierarchy
@ Out, None
"""
pre = tab*tabs
print(pre+'Demand/Load:')
print(pre+' demands:', self._demands)
print(pre+' penalty:', self._penalty)
print(pre+' capacity:', self._capacity)
def produce(self, request, meta, raven_vars, dispatch, t, level=None):
"""
Determines the results of this interaction producing resources.
@ In, request, dict, requested action {resource: amount}
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ In, level, float, storage level (unused for this Interaction)
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
# Q: should this have a transfer function or something? At least capacity limits?
# A: no; if you want this functionality, add an intervening component with a transfer function.
res, amt = next(iter(request.items()))
balance, meta = self._check_capacity_limit(res, amt, request, meta, raven_vars, dispatch, t)
res, amt = next(iter(balance.items()))
amt = -1 * abs(amt)
balance[res] = amt
return balance, meta
def produce_max(self, meta, raven_vars, dispatch, t):
"""
Determines the results of this interaction producing maximum resources.
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
request, meta = self.get_capacity(meta, raven_vars, dispatch, t)
return request, meta
def produce_min(self, meta, raven_vars, dispatch, t):
"""
Determines the results of this interaction producing minimum resources.
@ In, meta, dict, additional variables to pass through
@ In, raven_vars, dict, TODO part of meta! consolidate!
@ In, dispatch, dict, TODO part of meta! consolidate!
@ In, t, int, TODO part of meta! consolidate!
@ Out, balance, dict, results of requested action
@ Out, meta, dict, additional variable passthrough
"""
if self._minimum:
request, meta = self.get_minimum(meta, raven_vars, dispatch, t)
else:
request = {next(iter(self.get_inputs())): 0.0} # TODO is this a good choice when no min var avail?
return request, meta |
19,970 | 32728139414b6a841fe2ef4fbfcdcd8cd8d74776 | array=[]
for i in range (0,5):
userinput=int(input("Enter an element"))
array.append(userinput)
print(array)
# 1 2 3 4 5
# multiply = 1
# multiply = multiply * 2 1*2
# multiply = multiply * 3 1*2*3
# multiply = multiply * 4 1*2*3*4
# multiply = multiply * 5 1*2*3*4*5
multiply=array[0]
for i in range(1,5):
multiply=multiply*array[i]
print(multiply)
sum_array=array[0]
for i in range(1,5):
sum_array=sum_array+array[i]
print(sum_array)
odd=0
even=0
for i in range(0,5):
if array[i] %2 :
odd=odd+1
else:
even=even+1
print(odd)
print(even)
negative=0
positive=0
zero=0
for i in range(0,5):
if array[i]>0:
positive=positive+1
elif array[i]<0:
negative=negative+1
else:
zero=zero+1
print(positive)
print(negative)
print(zero)
|
19,971 | ca17c739d8c3821aee6b0b6e3b952ec5176ffc1e | import sys
OFFSET_BOOTLOADER = 0x1000
OFFSET_PARTITIONS = 0x8000
OFFSET_APPLICATION = 0x10000
files_in = [
('bootloader', OFFSET_BOOTLOADER, sys.argv[1]),
('partitions', OFFSET_PARTITIONS, sys.argv[2]),
('application', OFFSET_APPLICATION, sys.argv[3]),
]
file_out = sys.argv[4]
cur_offset = OFFSET_BOOTLOADER
with open(file_out, 'wb') as fout:
for name, offset, file_in in files_in:
assert offset >= cur_offset
fout.write(b'\xff' * (offset - cur_offset))
cur_offset = offset
with open(file_in, 'rb') as fin:
data = fin.read()
fout.write(data)
cur_offset += len(data)
print('%-12s% 8d' % (name, len(data)))
print('%-12s% 8d' % ('total', cur_offset))
|
19,972 | 61ccc716d01ec398fdb74a4c0e9f0df1d8b5550e | from inventory import db
from datetime import datetime
class Product(db.Model):
product_id = db.Column(db.Integer, primary_key=True)
product_name = db.Column(db.String(20), unique=True, nullable=False)
product_description = db.Column(db.String(200), nullable=False, default='This is a default description for product, please update the product with a brief description')
product_image_file = db.Column(db.String(20), nullable=False, default='default-cars.jpeg')
def __repr__(self):
return f"Product('{self.product_name}', '{self.product_image_file}')"
class Location(db.Model):
location_id = db.Column(db.Integer, primary_key=True)
location_name = db.Column(db.String(20), unique=True, nullable=False)
location_description = db.Column(db.String(200), nullable=False, default='This is a default description for location, please update the location with a brief description')
location_image_file = db.Column(db.String(20), nullable=False, default='thumbnail-default.jpg')
def __repr__(self):
return f"Location('{self.location_name}', '{self.location_image_file}')"
class Movement(db.Model):
movement_id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime, nullable=False, default=datetime.now())
from_location = db.Column(db.String(20), db.ForeignKey('location.location_id'))
to_location = db.Column(db.String(20), db.ForeignKey('location.location_id'))
product_id = db.Column(db.Integer, db.ForeignKey('product.product_id'), nullable=False)
qty = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"Movement( '{self.timestamp}', '{self.from_location}', '{self.to_location}', '{self.product_id}', '{self.qty}')"
class StaticMovement(db.Model):
movement_id = db.Column(db.Integer, primary_key=True)
timestamp = db.Column(db.DateTime, nullable=False, default=datetime.now())
from_location = db.Column(db.String(20), db.ForeignKey('location.location_id'))
to_location = db.Column(db.String(20), db.ForeignKey('location.location_id'))
product_id = db.Column(db.Integer, db.ForeignKey('product.product_id'), nullable=False)
qty = db.Column(db.Integer, nullable=False)
def __repr__(self):
return f"StaticMovement( '{self.timestamp}', '{self.from_location}', '{self.to_location}', '{self.product_id}', '{self.qty}')" |
19,973 | 0302d6d212e091c20de95e07d03be1a39f9b3652 | import _pickle as pickle
from sklearn.preprocessing import LabelEncoder
import numpy
from keras.models import load_model
import os
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
def load_career():
#clf.predict([[85.859834,56.391853,55.401763,21.368436,55.869562,11.490600,73.845602,57.813019]])
#list(le.inverse_transform([267]))
encoder = LabelEncoder()
encoder.classes_ = numpy.load(ROOT_DIR + '/classes.npy')
with open(ROOT_DIR + '/career.pkl', 'rb') as f:
clf = pickle.load(f)
return clf,encoder
def predict(clf):
pass
def load_mitheory():
# x=np.array([86.645904,85.070347,44.236459,72.588626,71.285970,68.394287])
# inp=x.reshape(1,6,1)
# model_lstm.predict(inp)
model = load_model(ROOT_DIR + '/MI_childaptitude_model.h5')
return model
|
19,974 | 9bed1eb353490fe72ebfca7fc8af806eba35aefe | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 00:25:20 2020
@author: Kamil Chrustowski
"""
from . import (path, Qt, QSize, QFont, QLabel, QVBoxLayout,
QDialog, QPixmap, QPainter, QPalette,
ConfigButton, QSizePolicy, QWidget)
class FarewellDialog(QDialog):
def closeEvent(self, event):
event.ignore()
def __init__(self, txt: str, parent: QWidget =None):
super(FarewellDialog, self).__init__(parent)
self.setFixedSize(600, 200)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_OpaquePaintEvent)
self.pixmap = QPixmap(path.join('images\\backgrounds', 'blank4.png'))
self.setAttribute(Qt.WA_TranslucentBackground)
lab = QLabel(txt)
lab.setStyleSheet("QLabel{padding-left: 2px; padding-right: 2px; padding-top: 2px; padding-bottom: 2px;}")
lab.setFont(QFont('KBREINDEERGAMES', 80))
vbox = QVBoxLayout()
vbox.setAlignment(Qt.AlignCenter)
vbox.addWidget(lab)
lab.setAlignment(Qt.AlignCenter)
self.button = ConfigButton(text="Go back to Main Menu")
vbox.addWidget(self.button, 0, Qt.AlignBottom|Qt.AlignCenter)
self.setLayout(vbox)
self.button.clicked.connect(lambda: self.accept())
def paintEvent(self, event):
painter = QPainter()
painter.begin(self)
painter.save()
painter.drawPixmap(0,0, self.pixmap.scaled(QSize(self.width(), self.height())))
painter.restore()
@staticmethod
def getDialog(parent: QWidget =None, title: str ="Bidding"):
dialog = FarewellDialog(title, parent)
dialog.setWindowTitle("It\'s the end of the game")
result = dialog.exec_()
return result == QDialog.Accepted |
19,975 | cc2454f7031abce8869dca33f78d230e98f5d900 | from . import bathroom
from . import livingroom
|
19,976 | 0d432217e50855c504a7a02fc124f24b3d7fd353 | import json
import datetime
from api import Utils
def create_new_user(data, cursor):
# get data input
user_data = data['userInfo']
user_id = Utils.get_id_for_access_token(data['accessToken'])
# Add user to SQL database
sql_querry = "INSERT INTO USER values ('{0}', '{1}', {2}, {3}, {4}, '{5}', '{6}', '{7}')"\
.format(user_id, user_data['name'], user_data['birthDay'], user_data['birthMonth'],
user_data['birthYear'], user_data['location'], user_data['picture'], user_data['bio'])
print sql_querry
cursor.execute(sql_querry)
# Return json
response = {
'success': True
}
return json.dumps(response)
def update_user(data, cursor):
# get data input
user_data = data['userInfo']
user_id = Utils.get_id_for_access_token(data['accessToken'])
# Update user in SQL database
sql_querry = '''UPDATE USER
SET name='{0}', location='{1}', picture='{2}', bio='{3}'
WHERE userId = '{4}' '''\
.format(user_data['name'], user_data['location'], user_data['picture'], user_data['bio'], user_id)
cursor.execute(sql_querry)
# Return json
response = {
'success': True
}
return json.dumps(response)
def delete_user(data, cursor):
user_id = Utils.get_id_for_access_token(data['accessToken'])
# Remove user from SQL database
sql_querry = '''
DELETE FROM User
WHERE userId = '{0}'
'''.format(user_id)
cursor.execute(sql_querry)
# Return json
response = {
'success': True
}
return json.dumps(response)
def get_my_user(data, cursor):
user_id = Utils.get_id_for_access_token(data['accessToken'])
return get_user_json_for_id(user_id, cursor)
def get_user_json_for_id(user_id, cursor):
return json.dumps(get_user_for_id(user_id, cursor))
def get_user_for_id(user_id, cursor):
# Remove user from SQL database
sql_querry = '''
SELECT name, birthDay, birthMonth, birthYear, location, picture, bio
FROM User
WHERE userId = '{0}'
'''.format(user_id)
cursor.execute(sql_querry)
# Return json
for (name, birthDay, birthMonth, birthYear, location, picture, bio) in cursor:
now = datetime.datetime.now()
if now.month > birthMonth:
age = now.year - birthYear - 1
elif now.month < birthMonth:
age = now.year - birthYear
elif now.day > birthDay:
age = now.year - birthYear - 1
else:
age = now.year - birthYear
return {
'name': name,
'age': age,
'location': location,
'picture': picture,
'bio': bio
} |
19,977 | e70d6733206d0c4f406f3a34c03e753e0e7ba338 | from __future__ import annotations
import re
from typing import TYPE_CHECKING, List, Type, Dict
import logging
import docker
from snowshu.configs import (DOCKER_NETWORK, DOCKER_REPLICA_MOUNT_FOLDER,
DOCKER_WORKING_DIR, DOCKER_REPLICA_VOLUME, DOCKER_API_TIMEOUT, LOCAL_ARCHITECTURE)
from snowshu.core.utils import get_multiarch_list
if TYPE_CHECKING:
from snowshu.adapters.target_adapters.base_target_adapter import BaseTargetAdapter
logger = logging.getLogger(__name__)
class SnowShuDocker:
def __init__(self):
self.client = docker.from_env(timeout=DOCKER_API_TIMEOUT)
def _create_snowshu_volume(self, volume_name: str) -> docker.models.volumes.Volume:
""" Creating a docker volume if not exists"""
try:
volume = self.client.volumes.get(volume_name)
except docker.errors.NotFound:
volume = self.client.volumes.create(
name=volume_name, driver='local',)
return volume
def convert_container_to_replica(
self,
replica_name: str,
active_container: docker.models.containers.Container,
passive_container: docker.models.containers.Container) -> list[docker.models.images.Image]:
"""coerces a live container into a replica image and returns the image.
replica_name: the name of the new replica
return: [replica_image_from_active,
replica_image_from_passive(skipped if no passive),
replica_image_from_local_arch]
"""
new_replica_name = self.sanitize_replica_name(replica_name)
replica_list = []
container_list = [
active_container, passive_container] if passive_container else [active_container]
logger.info(
f'Creating new replica image with name {new_replica_name}...')
for container in container_list:
try:
self.client.images.remove(new_replica_name, force=True)
except docker.errors.ImageNotFound:
pass
container_arch = container.name.split('_')[-1]
# commit with arch tag
replica = container.commit(
repository=new_replica_name, tag=container_arch)
replica_list.append(replica)
logger.info(
f'Replica image {replica.tags[0]} created. Cleaning up...')
self.remove_container(container.name)
for replica in replica_list:
if replica.attrs.get('Architecture') == LOCAL_ARCHITECTURE:
local_arch_replica = replica
local_arch_replica.tag(
repository=new_replica_name, tag='latest')
# this is done due to how recomitting existing image is not reflected in 'replica_list' var
actual_replica_list = self.client.images.list(new_replica_name)
return actual_replica_list
def startup(self, # noqa pylint: disable=too-many-locals, too-many-branches, too-many-statements
target_adapter: Type['BaseTargetAdapter'],
source_adapter: str,
arch_list: list[str],
envars: list) -> tuple(docker.models.containers.Container):
# Unpack target adapter's data
image_name = target_adapter.DOCKER_IMAGE
is_incremental = target_adapter.is_incremental
hostname = target_adapter.credentials.host
network = self._get_or_create_network(DOCKER_NETWORK)
logger.info('Creating an external volume...')
replica_volume = self._create_snowshu_volume(DOCKER_REPLICA_VOLUME)
logger.info(f'Finding base image {image_name}...')
container_list = []
if is_incremental:
name = self.replica_image_name_to_common_name(image_name)
# get arch of the supplied image
base_image_arch = self.get_docker_image_attributes(image_name)[
'Architecture']
# set arch list to always set supplied image as active container, regardless of if it is native
arch_list_i = get_multiarch_list(base_image_arch) if len(
arch_list) == 2 else [base_image_arch]
# warn user if non-native architecture base was supplied
if base_image_arch != LOCAL_ARCHITECTURE:
logger.warning(
'Supplied base image is of a non-native architecture,'
' please try to use native for better performance')
for arch in arch_list_i:
try:
# Try to retreive supplied image
try:
image_candidate = self.client.images.get(image_name)
except docker.errors.ImageNotFound:
logger.exception(
f'Supplied incremental base image {image_name} not found locally, aborting build')
raise
if image_candidate.attrs['Architecture'] == arch:
logger.info(
'Found base image...')
image = image_candidate
else:
# If supplied image is not of current arch, pull postgres instead
logger.info(
f'Getting target database image of arch {arch}...')
try:
image = self.client.images.get(
f'{target_adapter.BASE_DB_IMAGE.split(":")[0]}:{arch}')
except docker.errors.ImageNotFound:
image = self.client.images.pull(
target_adapter.BASE_DB_IMAGE, platform=f'linux/{arch}')
image.tag(f'{target_adapter.BASE_DB_IMAGE.split(":")[0]}:{arch}')
except ConnectionError as error:
logger.error(
'Looks like docker is not started, please start docker daemon\nError: %s', error)
raise
tagged_container_name = f'{name}_{arch}'
logger.info(
f"Creating stopped container {tagged_container_name}...")
self.remove_container(tagged_container_name)
container = self.create_and_init_container(
image=image,
container_name=tagged_container_name,
target_adapter=target_adapter,
source_adapter=source_adapter,
network=network,
replica_volume=replica_volume,
envars=envars
)
if len(arch_list) > 1:
container.stop()
container_list.append(container)
else:
for arch in arch_list:
try:
# This pulls raw postgres for regular full build
try:
image = self.client.images.get(
f'{target_adapter.DOCKER_IMAGE.split(":")[0]}:{arch}')
except docker.errors.ImageNotFound:
image = self.client.images.pull(
target_adapter.DOCKER_IMAGE, platform=f'linux/{arch}')
image.tag(f'{target_adapter.DOCKER_IMAGE.split(":")[0]}:{arch}')
# verify the image is tagged properly (image's arch matches its tag)
try:
assert image.attrs['Architecture'] == arch
except AssertionError:
logger.warning('Image tags do not match their actual architecture, '
'retag or delete postgres images manually to correct')
except ConnectionError as error:
logger.error(
'Looks like docker is not started, please start docker daemon\nError: %s', error)
raise
tagged_container_name = f'{hostname}_{arch}'
logger.info(
f"Creating stopped container {tagged_container_name}...")
self.remove_container(tagged_container_name)
container = self.create_and_init_container(
image=image,
container_name=tagged_container_name,
target_adapter=target_adapter,
source_adapter=source_adapter,
network=network,
replica_volume=replica_volume,
envars=envars
)
if len(arch_list) > 1:
container.stop()
container_list.append(container)
if len(container_list) == 2:
active_container, passive_container = container_list[0], container_list[1]
else:
active_container = container_list[0]
passive_container = None
if len(arch_list) > 1:
active_container.start()
return active_container, passive_container
def create_and_init_container( # noqa pylint: disable=too-many-arguments
self,
image: docker.models.images.Image,
container_name: str,
target_adapter: Type['BaseTargetAdapter'],
source_adapter: str,
network: docker.models.networks.Network,
replica_volume: docker.models.volumes.Volume,
envars: dict
) -> docker.models.containers.Container:
""" Method used during self.startup() execution, creates, starts and setups container
input: some stuff needed to define a container launch
return: container object instance, in a running state and already set up
"""
logger.info(
f"Creating stopped container {container_name}...")
port = target_adapter.DOCKER_TARGET_PORT
hostname = target_adapter.credentials.host
protocol = 'tcp'
port_dict = {f"{str(port)}/{protocol}": port}
self.remove_container(container_name)
container = self.client.containers.create(
image.tags[0],
target_adapter.DOCKER_START_COMMAND,
network=network.name,
name=container_name,
hostname=hostname,
ports=port_dict,
environment=envars,
labels=dict(
snowshu_replica='true',
target_adapter=target_adapter.CLASSNAME,
source_adapter=source_adapter),
detach=True,
volumes={replica_volume.name: {
'bind': f'{DOCKER_REPLICA_MOUNT_FOLDER}'
}},
working_dir=DOCKER_WORKING_DIR
)
logger.info(
f"Created stopped container {container.name}, connecting it to bridge network...")
self._connect_to_bridge_network(container)
logger.info(
f'Connected. Starting created container {container.name}...')
try:
container.start()
except docker.errors.APIError as error:
if 'port is already allocated' in error.explanation:
logger.exception('One of the ports used by snowshu_target is '
'already allocated, stop extra containers and rerun')
raise
logger.info(
f'Container {container.name} started, running initial setup...')
self._run_container_setup(container, target_adapter)
logger.info(f'Container {container.name} fully initialized.')
return container
def remove_container(self, container: str) -> None:
logger.info(f'Removing existing target container {container}...')
try:
removable = self.client.containers.get(container)
try:
removable.kill()
except docker.errors.APIError:
logger.info(f'Container {container} already stopped.')
removable.remove()
logger.info(f'Container {container} removed.')
except docker.errors.NotFound:
logger.info(f'Container {container} not found, skipping.')
def _get_or_create_network(
self, name: str) -> docker.models.networks.Network:
logger.info(f'Getting docker network {name}...')
try:
network = self.client.networks.get(name)
logger.info(f'Network {network.name} found.')
except docker.errors.NotFound:
logger.info(f'Network {name} not found, creating...')
network = self.client.networks.create(name, check_duplicate=True)
logger.info(f'Network {network.name} created.')
return network
def _connect_to_bridge_network(
self, container: docker.models.containers.Container) -> None:
logger.info('Adding container to bridge...')
bridge = self.client.networks.get('bridge')
bridge.connect(container)
logger.info(f'Connected container {container.name} to bridge network.')
def get_adapter_name(self, name: str) -> str:
try:
return self.client.images.get(name).labels['target_adapter']
except KeyError as exc:
message = "Replica image {name} is corrupted; no label for `target_adapter`."
logger.critical(message)
raise AttributeError(message) from exc
@staticmethod
def sanitize_replica_name(name: str) -> str:
"""Much more strict than standard docker tag names.
ReplicaFactory names are coerced into ASCII lowercase, dash-
seperated a-z0-9 strings when possible.
"""
prefix = "snowshu_replica_"
image = '-'.join(re.sub(r'[\-\_\+\.]', ' ',
name.lower().replace(prefix, '')).split())
if not re.fullmatch(r'^[a-z0-9\-]*$', image):
raise ValueError(
f'Replica name {name} cannot be converted to replica name')
final_image = prefix + image
return final_image
@staticmethod
def replica_image_name_to_common_name(name: str) -> str:
"""reverse the replica sanitizer."""
sr_delimeter = 'snowshu_replica_'
return ':'.join((sr_delimeter.join(name.split(sr_delimeter)[1:])).split(':')[:-1])
@staticmethod
def _run_container_setup(container: docker.models.containers.Container,
target_adapter: Type['BaseTargetAdapter']) -> None:
logger.info('Running initialization commands in container...')
for command in target_adapter.image_initialize_bash_commands():
response = container.exec_run(
f"/bin/bash -c '{command}'", tty=True)
if response[0] > 0:
raise OSError(response[1])
logger.info('Setup commands finished.')
def find_snowshu_images(self) -> List[docker.models.images.Image]:
return list(filter((lambda x: len(x.tags) > 0), self.client.images.list(
filters=dict(label='snowshu_replica=true'))))
def get_docker_image_attributes(self, image: str) -> Dict:
"""
Retrieve image-related attributes
"""
return self.client.images.get(image).attrs
|
19,978 | d4f1e9e9e53650a3e5ce39ceb0fee417f7e1fbbd | # coding: utf-8
import traceback
import io
import discord
from discord.ext import commands
class Extension(commands.Cog):
"""
Permet de gérer les extensions.
"""
def __init__(self, bot):
self.bot = bot
def cog_unload(self):
pass
@commands.Cog.listener()
async def on_ready(self):
pass
@commands.is_owner()
@commands.group(aliases=["ext"])
async def extension(self, ctx):
"""
Permet de gérer les extensions.
"""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@commands.is_owner()
@extension.group(name="reload", invoke_without_command=True)
async def extension_reload(self, ctx: commands.Context, extension: str):
"""
Permet de recharger une extension.
"""
if ctx.invoked_subcommand is None and not extension:
await ctx.send_help(ctx.command)
try:
self.bot.reload_extension(extension)
await ctx.reply(f"Successfully reloaded extension: `{extension}`")
except commands.ExtensionNotLoaded:
await ctx.reply(f"The extension `{extension}` was not loaded.")
except commands.ExtensionNotFound:
await ctx.reply(f"The extension `{extension}` was not found.")
except Exception:
file = discord.File(io.StringIO(traceback.format_exc()), filename="crash_log.txt")
await ctx.reply(f"extension reload fail: `{extension}`, rollback", file=file)
@commands.is_owner()
@extension_reload.group(name="all")
async def extension_reload_all(self, ctx):
"""
Permet de recharger toutes les extensions.
"""
msg = []
ext = self.bot.extensions.copy()
for extension in ext:
try:
self.bot.reload_extension(extension)
msg.append(f"Successfully reloading: `{extension}`")
except commands.ExtensionNotFound:
msg.append(f"The extension `{extension}` was not found.")
except Exception:
msg.append(f"extension load fail: `{extension}`")
file = discord.File(io.StringIO(traceback.format_exc()), filename=f"{extension}.txt")
await ctx.reply(file=file)
msg.append(f"\nloaded extensions: {len(self.bot.extensions)}/{len(ext)}")
await ctx.reply("\n".join(msg))
@commands.is_owner()
@extension.group(name="unload")
async def extension_unload(self, ctx, extension: str):
"""
Permet de décharger une extension.
"""
if extension == "cog.extension":
await ctx.reply("You can't unload this extension !")
return
try:
self.bot.unload_extension(extension)
await ctx.reply(f"Successful extension unloading: `{extension}`")
except commands.ExtensionNotLoaded:
await ctx.reply(f"The extension `{extension}` was not loaded.")
@commands.is_owner()
@extension.group(name="load")
async def extension_load(self, ctx, extension: str):
"""
Permet de charger une extension.
"""
try:
self.bot.load_extension(extension)
await ctx.reply(f"Successful extension loading: `{extension}`")
except commands.ExtensionNotLoaded:
ctx.reply(f"The extension `{extension}` was not found.")
except commands.ExtensionAlreadyLoaded:
ctx.reply(f"The extension `{extension}` is already loaded.")
except Exception:
file = discord.File(io.StringIO(traceback.format_exc()), filename=f"{extension}.txt")
await ctx.send(f"extension loading fail: `{extension}`", file=file)
def setup(bot):
bot.add_cog(Extension(bot))
|
19,979 | 1b1f59ad6661636261b5cbd16092fe56d77bba50 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import argparse
import os
from urlparse import urlparse
from lib.ClientHTTP import ClientHTTP
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Descarga url.')
parser.add_argument('--url', help='Url del recurso a descargar.', required=True)
parser.add_argument('--proxy', default='', help='Setear proxy para realizar la conexion.')
parser.add_argument('--port', type=int, default='80', help='Puerto donde se conecta el cliente.')
args = parser.parse_args()
# seteamos el archivo a descargar
u = urlparse(args.url)
if u.path != "/" :
path, filename = os.path.split(u.path)
else :
filename = "index.html"
host = u.netloc
s = ClientHTTP(host, args.port, args.proxy)
result = s.get(args.url)
file = open(filename, "wb")
file.write(result)
file.close()
|
19,980 | 8015d094e01397eea554735ae7b5639b3f44e407 |
from enum import Enum
class Language(Enum):
ENGLISH = "English"
FRENCH = "French"
GERMAN = "German"
CHINESE = "Chinese"
class Country(Enum):
US = "United States"
UK = "United Kingdom"
GERMANY = "Germany"
INDIA = "India"
class Interest(Enum):
CS = "Computer Science"
DS = "Data Science"
ML = "Machine Learning"
NETWORKENG = "Network Engineering"
SECURITY = "CyberSecurity" |
19,981 | 64732c78fdd3d9397ce4742f5b7d05497f9e1294 | '''
BLACK-AND-WHITE
WinterSalmon
Main
'''
from gui.gui import Gui
from cli.cli import Cli
if __name__ == "__main__":
# if pygame is not installed run game in commandline mode
try:
UI = Gui()
except ImportError:
UI = Cli()
UI.init_screen()
UI.game_screen()
UI.credit_screen()
|
19,982 | 6df5481dbe3332f95ed887a5520848935e01f152 | """
Modules to help interface with other applications, systems, languages, etc.
"""
|
19,983 | eaf5ad22c8caca080ccce48dfe04cf07c20c9b3a | # coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilitites to plot the ROC and Calibration for survival models.
This module has utility functions to generate ROC and Calibration plots for
survival models at given horizons of time. Note that ideally both the ROC and
Calibration curves require to be adjusted for censoring using IPCW estimates.
Not designed to be called directly, would be called when running a function from
dcm.deep_cox_mixtures
"""
from dcm import baseline_models
from dcm import models
from dcm.calibration import calibration_curve
import matplotlib as mpl
from matplotlib import pyplot as plt
from dcm.skmetrics import brier_score
from dcm.skmetrics import cumulative_dynamic_auc
from dcm.skmetrics import concordance_index_ipcw
import numpy as np
import logging
logging.getLogger("matplotlib").setLevel(logging.CRITICAL)
from sklearn.metrics import auc
def plot_calibration_curve(ax,
scores,
e,
t,
a,
folds,
group,
quant,
strat='quantile',
adj='IPCW',
plot=True):
"""Function to plot Calibration Curve at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted calibration curve.
Args:
ax:
a matplotlib subfigure object.
scores:
risk scores P(T>t) issued by a trained survival analysis model
(output of deep_cox_mixtures.models.predict_survival).
e:
a numpy array of event indicators.
t:
a numpy array of event/censoring times.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv folds.
group:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj (str):
Determines if IPCW adjustment is carried out on a population or subgroup
level.
One of "IPCWpop", "IPCWcon" (not implemented).
Returns:
A plotted matplotlib calibration curve.
"""
allscores = np.ones_like(t).astype('float')
for fold in set(folds):
allscores[folds == fold] = scores[fold]
scores = allscores
b_fc = (0, 0, 1, .4)
r_fc = (1, 0, 0, .2)
b_ec = (0, 0, 1, .8)
r_ec = (1, 0, 0, .8)
n_bins = 20
hatch = '//'
fs = 16
prob_true_n, _, outbins, ece = calibration_curve(
scores,
e,
t,
a,
group,
quant,
typ=adj,
ret_bins=True,
strat=strat,
n_bins=n_bins)
for d in range(len(prob_true_n)):
binsize = outbins[d + 1] - outbins[d]
binloc = (outbins[d + 1] + outbins[d]) / 2
gap = (prob_true_n[d] - binloc)
if gap < 0:
bottom = prob_true_n[d]
else:
bottom = prob_true_n[d] - abs(gap)
if d == len(prob_true_n) - 1:
lbl1 = 'Score'
lbl2 = 'Gap'
else:
lbl1 = None
lbl2 = None
if plot:
ax.bar(
binloc,
prob_true_n[d],
width=binsize,
facecolor=b_fc,
edgecolor=b_ec,
linewidth=2.5,
label=lbl1)
ax.bar(
binloc,
abs(gap),
bottom=bottom,
width=binsize,
facecolor=r_fc,
edgecolor=r_ec,
linewidth=2.5,
hatch=hatch,
label=lbl2)
d += 1
if plot:
ax.plot([0, 1], [0, 1], c='k', ls='--', lw=2, zorder=100)
ax.set_xlabel('Predicted Score', fontsize=fs)
ax.set_ylabel('True Score', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_title(str(group), fontsize=fs)
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.grid(ls=':', lw=2, zorder=-100, color='grey')
ax.set_axisbelow(True)
ax.text(
x=0.030,
y=.7,
s='ECE=' + str(round(ece, 3)),
size=fs,
bbox=dict(boxstyle='round', fc='white', ec='grey', pad=0.2))
return ece
def plot_roc_curve(ax,
scores,
e,
t,
a,
folds,
groups,
quant,
plot=True):
"""Function to plot ROC at a specified time horizon.
Accepts a matplotlib figure instance, risk scores from a trained survival
analysis model, and quantiles of event interest and generates an IPCW
adjusted ROC curve.
Args:
ax:
a matplotlib subfigure object.
scores:
choice of model. One of "coupled_deep_cph", "coupled_deep_cph_vae".
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quant:
a list of event time quantiles at which the models are to be evaluated.
Returns:
A plotted matplotlib ROC curve.
"""
fs = 16
fprs, tprs, tprs_std, ctds, brss = {}, {}, {}, {}, {}
fprs['all'] = {}
tprs['all'] = {}
ctds['all'] = {}
brss['all'] = {}
for group in groups:
fprs[group] = {}
tprs[group] = {}
ctds[group] = {}
brss[group] = {}
for fold in set(folds):
ate = a[folds == fold]
str_test = baseline_models.structure_for_eval_(t[folds == fold],
e[folds == fold])
if len(set(folds)) == 1:
atr = ate
str_train = str_test
else:
atr = a[folds != fold]
str_train = baseline_models.structure_for_eval_(t[folds != fold],
e[folds != fold])
t_tr_max = np.max([t_[1] for t_ in str_train])
t_ = np.array([t_[1] for t_ in str_test])
clean = (t_<=t_tr_max)
str_test = str_test[t_<=t_tr_max]
ate = ate[t_<=t_tr_max]
scores_f = scores[fold][clean]
for group in groups:
te_protg = (ate == group)
tr_protg = (atr == group)
try:
roc_m = cumulative_dynamic_auc(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train[tr_protg], str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train[tr_protg], str_test[te_protg],
-scores_f[te_protg], quant)[0]
except:
roc_m = cumulative_dynamic_auc(str_train, str_test[te_protg],
-scores_f[te_protg], [quant])
brs_m = brier_score(str_train, str_test[te_protg],
scores_f[te_protg], quant)
ctd_m = concordance_index_ipcw(str_train, str_test[te_protg],
-scores_f[te_protg], quant)[0]
fprs[group][fold] = roc_m[0][0][1]
tprs[group][fold] = roc_m[0][0][0]
ctds[group][fold] = ctd_m
brss[group][fold] = brs_m[1][0]
roc_m = cumulative_dynamic_auc(str_train, str_test, -scores_f, [quant])
ctd_m = concordance_index_ipcw(str_train, str_test, -scores_f, quant)[0]
brs_m = brier_score(str_train, str_test, scores_f, quant)
fprs['all'][fold], tprs['all'][fold] = roc_m[0][0][1], roc_m[0][0][0]
ctds['all'][fold] = ctd_m
brss['all'][fold] = brs_m[1][0]
cols = ['b', 'r', 'g']
roc_auc = {}
ctds_mean = {}
brss_mean = {}
j = 0
for group in list(groups) + ['all']:
all_fpr = np.unique(np.concatenate([fprs[group][i] for i in set(folds)]))
# The ROC curves are interpolated at these points.
mean_tprs = []
for i in set(folds):
mean_tprs.append(np.interp(all_fpr, fprs[group][i], tprs[group][i]))
# Finally the interpolated curves are averaged over to compute AUC.
mean_tpr = np.mean(mean_tprs, axis=0)
std_tpr = 1.96 * np.std(mean_tprs, axis=0) / np.sqrt(10)
fprs[group]['macro'] = all_fpr
tprs[group]['macro'] = mean_tpr
tprs_std[group] = std_tpr
roc_auc[group] = auc(fprs[group]['macro'], tprs[group]['macro'])
ctds_mean[group] = np.mean([ctds[group][fold] for fold in folds])
brss_mean[group] = np.mean([brss[group][fold] for fold in folds])
lbl = str(group)
lbl += ' AUC:' + str(round(roc_auc[group], 3))
lbl += ' Ctd:'+ str(round(ctds_mean[group], 3))
lbl += ' BS:'+ str(round(brss_mean[group], 3))
if plot:
ax.plot(
all_fpr,
mean_tpr,
c=cols[j],
label=lbl)
ax.fill_between(
all_fpr,
mean_tpr - std_tpr,
mean_tpr + std_tpr,
color=cols[j],
alpha=0.25)
j += 1
if plot:
ax.set_xlabel('False Positive Rate', fontsize=fs)
ax.set_ylabel('True Positive Rate', fontsize=fs)
ax.legend(fontsize=fs)
ax.set_xscale('log')
return roc_auc, ctds_mean, brss_mean
def plot_results(outputs, x, e, t, a, folds, groups,
quantiles, strat='quantile', adj='KM', plot=True):
"""Function to plot the ROC and Calibration curves from a survival model.
Accepts a trained survival analysis model, features and horizon of interest
and generates the IPCW adjusted ROC curve and Calibration curve at
pre-specified horizons of time.
Args:
outputs:
a python dict with survival probabilities for each fold
x:
a numpy array of input features.
e:
a numpy array of input features.
t:
a numpy array of input features.
a:
a numpy vector of protected attributes.
folds:
a numpy vector of cv fold.
groups:
List of the demogrpahics to adjust for.
quantiles:
a list of event time quantiles at which the models are to be evaluated.
strat:
Specifies how the bins are computed. One of:
"quantile": Equal sized bins.
"uniform": Uniformly stratified.
adj:
Adjustment strategy for the Expected Calibration Error. One of:
"KM": Kaplan-Meier (Default)
"IPCW": Inverse Propensity of Censoring
Returns:
a numpy vector of estimated risks P(T>t|X) at the horizon "quant".
"""
if plot:
mpl.rcParams['hatch.linewidth'] = 2.0
fig, big_axes = plt.subplots(
figsize=(8 * (len(groups) + 2), 6 * len(quantiles)),
nrows=len(quantiles),
ncols=1)
plt.subplots_adjust(hspace=0.4)
i = 0
for _, big_ax in enumerate(big_axes, start=1):
big_ax.set_title(
'Receiver Operator Characteristic and Calibration at t=' +
str(quantiles[i]) + '\n',
fontsize=16)
big_ax.tick_params(
labelcolor=(1., 1., 1., 0.0),
top='off',
bottom='off',
left='off',
right='off')
i += 1
eces = {}
metrics = {}
for quant in quantiles:
eces[quant] = {}
for i in range(len(quantiles)):
scores = outputs[quantiles[i]]
for j in range(len(groups) + 2):
pt = (i * (len(groups) + 2) + j + 1)
if plot:
ax = fig.add_subplot(len(quantiles), len(groups) + 2, pt)
else:
ax = None
if (j==1):
eces[quantiles[i]]['all'] = plot_calibration_curve(ax,
scores,
e,
t,
a,
folds,
None,
quantiles[i],
strat=strat,
adj=adj,
plot=plot)
if (j>1):
eces[quantiles[i]][groups[j - 2]] = plot_calibration_curve(ax,
scores,
e,
t,
a,
folds,
groups[j - 2],
quantiles[i],
strat=strat,
adj=adj,
plot=plot)
if (j==0):
metrics[quantiles[i]] = plot_roc_curve(ax,
scores,
e,
t,
a,
folds,
groups,
quantiles[i],
plot=plot)
for quant in quantiles:
metrics[quant] = metrics[quant] + (eces[quant], )
if plot:
plt.show()
return metrics
|
19,984 | cac52f19991a478725320e25120f7c13cc8f502d | import requests
import os
from twilio.rest import Client
from dotenv import load_dotenv
#read .env file with Twilio API keys
#docs at https://github.com/theskumar/python-dotenv
load_dotenv()
account_sid = os.getenv('TWILIO_ACCOUNT_SID')
auth_token = os.getenv('TWILIO_AUTH_TOKEN')
client = Client(account_sid, auth_token)
urls = ['https://www.ushgnyc.com',
'https://www.bluesmoke.com',
'https://www.caffemarchio.com/',
'https://www.dailyprovisionsnyc.com/',
'https://www.gramercytavern.com/',
'https://www.manhattarestaurant.com/',
'https://www.martamanhattan.com/',
'https://www.martinapizzeria.com/',
'https://www.themodernnyc.com/',
'https://www.porchlightbar.com/',
'https://www.heytacocina.com/',
'https://www.unionsquarecafe.com/',
'https://www.untitledatthewhitney.com/',
'https://www.vinifritti.com/']
def get_status_code(url):
response = requests.get(url)
return response.status_code
def send_site_down_text(urls):
message = client.messages \
.create(
body='{} down - not returning 200 status.'.format(', '.join(urls)),
from_='+16467986006',
to='+19088124615'
)
print(message.sid)
status_codes = {}
for url in urls:
status_codes[url] = get_status_code(url)
#array of site domains (no "https", etc) for sites that aren't returning 200 status code
non_responding_sites = [site[12:-5] for site, status in status_codes.items() if status !=200]
if non_responding_sites:
send_site_down_text(non_responding_sites) |
19,985 | 64c8de71f7022e0195efdc2f8f454424facda4f9 | from .diceloss import DiceLoss
from .focalloss import FocalLoss
from .ce import MultiCELoss |
19,986 | c38ce484df187fc323e90bccd317afbc44624a1c | import numpy as np
def multiply_by_5(input_number):
new_number = input_number * 5
return new_number
def spherical2cartesian(ra, dec, distance):
x = distance * np.cos(dec) * np.cos(ra)
y = distance * np.cos(dec) * np.sin(ra)
z = distance * np.sin(dec)
return x,y,z |
19,987 | 2c692e61122c0ce6ec7890b5141d97f518a8c942 | import re
class Common_re():
def number_re(self,str1):
patten=re.compile(r"\d+")
result=patten.findall(str(str1))
return result[0]
# if __name__ == '__main__':
# str="()(400)"
# x=Common_re().number_re(str)
# print(x[0])
|
19,988 | 7e7ef4c6f5dad96144f2b63d2e83d8f4a69439f0 | import requests
r = requests.get('https://api.github.com/user', auth=('Hcque', '278696369db!'))
print(r.status_code) |
19,989 | 08318fc85437b25db0dc308e15c8bfa8af93dc20 | #!/usr/bin/env python3
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import pandas as pd
from scipy import stats
import sys
from matplotlib import cm
barwidth = 0.5
# make hatches less annoyingly thick
mpl.rcParams['hatch.linewidth'] = 0.5
mpl.rcParams["errorbar.capsize"] = 2
BIGGER_SIZE = 19
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE)
fname = sys.argv[2]
data_dir = sys.argv[1]
files = ['http_baseline_thru.csv', 'http_virtine_thru.csv', 'http_virtine_snapshot_thru.csv']
bar_pos = np.arange(3)
y = [(1000/pd.read_csv(data_dir + "/" + x, comment='#', names=['trial', 'microseconds'])['microseconds'].values)*1000000 for x in files]
y_means = [stats.hmean(x) for x in y]
#y_std = [np.std(x) for x in y]
print(y_means)
fig, ax = plt.subplots(1, figsize=(5,5))
hatches = ['/', 'o', '-']
color = cm.viridis(np.linspace(0.3, 0.9, 3))
plt.bar(bar_pos[0], y_means[0], align='edge', hatch=hatches[0]*3, color=color[0], zorder=3, width=barwidth, label='native', linewidth=0.25, edgecolor='black')
plt.bar(bar_pos[1], y_means[1], align='edge', hatch=hatches[1]*3, color=color[1], zorder=3, width=barwidth, label='virtine', linewidth=0.25, edgecolor='black')
plt.bar(bar_pos[2], y_means[2], align='edge', hatch=hatches[2]*3, color=color[2], zorder=3, width=barwidth, label='virtine SP', linewidth=0.25, edgecolor='black')
ax.set_xticks([r + barwidth/2 for r in range(0, 3)])
#ax.legend(loc='upper left', fontsize=BIGGER_SIZE-2, ncol=2)
ax.set_ylabel('Throughput (requests/sec)')
#ax.set_ylim(65000, None)
# ax.set_yticks([0, 2000, 4000, 6000, 8000])
# ax.set_yticklabels(['0', '2K', '4K', '6K', '8K'])
ax.set_xticklabels(['native', 'virtine', 'snapshot'])
ax.grid(alpha=0.5, zorder=0, axis='y', which='major')
plt.tight_layout()
plt.savefig(fname)
|
19,990 | 0281f79ef018e76a617bff2feb72bc0d201c5da8 | # You are a professional robber planning to rob houses along a street.
# Each house has a certain amount of money stashed, the only constraint
# stopping you from robbing each of them is that adjacent houses have
# security system connected and it will automatically contact the police
# if two adjacent houses were broken into on the same night.
# Given a list of non-negative integers representing the amount
# of money of each house, determine the maximum amount of money
# you can rob tonight without alerting the police.
class maxLoot(object):
def rob(self, nums):
if len(nums) == 0:
return 0
elif len(nums) == 1:
return nums[0]
elif len(nums) == 2:
return max(nums[0], nums[1])
else:
nums[1] = max(nums[0], nums[1])
for i in range(2, len(nums)):
nums[i] = nums[i] + nums[i-2]
nums[i] = max(nums[i], nums[i-1])
return nums[len(nums)-1]
print(rob([1, 2, 3, 4, 5]))
print(rob([7, 1, 1, 8, 1, 9]))
print(rob([10, 4, 3, 4, 1, 6]))
print("The values above should be 9, 24, and 20.")
|
19,991 | d61e869451fd2ac315ebaa7641f283e11b44db62 | #!/usr/bin/env python
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A class to serve pages from zip files and use memcache for performance.
This contains a class and a function to create an anonymous instance of the
class to serve HTTP GET requests. Memcache is used to increase response speed
and lower processing cycles used in serving. Credit to Guido van Rossum and
his implementation of zipserve which served as a reference as I wrote this.
MemcachedZipHandler: Class that serves request
create_handler: method to create instance of MemcachedZipHandler
"""
__author__ = 'jmatt@google.com (Justin Mattson)'
import email.Utils
import logging
import mimetypes
import time
import zipfile
from google.appengine.api import memcache
from google.appengine.ext import webapp
from google.appengine.ext.webapp import util
from time import localtime, strftime
def create_handler(zip_files, max_age=None, public=None):
"""Factory method to create a MemcachedZipHandler instance.
Args:
zip_files: A list of file names, or a list of lists of file name, first
member of file mappings. See MemcachedZipHandler documentation for
more information about using the list of lists format
max_age: The maximum client-side cache lifetime
public: Whether this should be declared public in the client-side cache
Returns:
A MemcachedZipHandler wrapped in a pretty, anonymous bow for use with App
Engine
Raises:
ValueError: if the zip_files argument is not a list
"""
# verify argument integrity. If the argument is passed in list format,
# convert it to list of lists format
if zip_files and type(zip_files).__name__ == 'list':
num_items = len(zip_files)
while num_items > 0:
if type(zip_files[num_items - 1]).__name__ != 'list':
zip_files[num_items - 1] = [zip_files[num_items-1]]
num_items -= 1
else:
raise ValueError('File name arguments must be a list')
class HandlerWrapper(MemcachedZipHandler):
"""Simple wrapper for an instance of MemcachedZipHandler.
I'm still not sure why this is needed
"""
def get(self, name):
self.zipfilenames = zip_files
self.TrueGet(name)
if max_age is not None:
MAX_AGE = max_age
if public is not None:
PUBLIC = public
return HandlerWrapper
class MemcachedZipHandler(webapp.RequestHandler):
"""Handles get requests for a given URL.
Serves a GET request from a series of zip files. As files are served they are
put into memcache, which is much faster than retreiving them from the zip
source file again. It also uses considerably fewer CPU cycles.
"""
zipfile_cache = {} # class cache of source zip files
MAX_AGE = 600 # max client-side cache lifetime
PUBLIC = True # public cache setting
CACHE_PREFIX = 'cache://' # memcache key prefix for actual URLs
NEG_CACHE_PREFIX = 'noncache://' # memcache key prefix for non-existant URL
intlString = 'intl/'
validLangs = ['en', 'de', 'es', 'fr','it','ja','zh-CN','zh-TW']
def TrueGet(self, reqUri):
"""The top-level entry point to serving requests.
Called 'True' get because it does the work when called from the wrapper
class' get method. Some logic is applied to the request to serve files
from an intl/<lang>/... directory or fall through to the default language.
Args:
name: URL requested
Returns:
None
"""
langName = 'en'
resetLangCookie = False
urlLangName = None
retry = False
isValidIntl = False
isStripped = False
# Try to retrieve the user's lang pref from the cookie. If there is no
# lang pref cookie in the request, add set-cookie to the response with the
# default value of 'en'.
try:
langName = self.request.cookies['android_developer_pref_lang']
except KeyError:
resetLangCookie = True
#logging.info('==========================EXCEPTION: NO LANG COOKIE FOUND, USING [%s]', langName)
logging.info('==========================REQ INIT name [%s] langName [%s] resetLangCookie [%s]', reqUri, langName, resetLangCookie)
# Preprocess the req url. If it references a directory or the domain itself,
# append '/index.html' to the url and 302 redirect. Otherwise, continue
# processing the request below.
name = self.PreprocessUrl(reqUri, langName)
if name:
# Do some prep for handling intl requests. Parse the url and validate
# the intl/lang substring, extract the url lang code (urlLangName) and the
# the uri that follows the intl/lang substring(contentUri)
sections = name.split("/", 2)
contentUri = 0
isIntl = len(sections) > 1 and (sections[0] == "intl")
if isIntl:
isValidIntl = sections[1] in self.validLangs
if isValidIntl:
urlLangName = sections[1]
contentUri = sections[2]
logging.info(' Content URI is [%s]...', contentUri)
if (urlLangName != langName) or (langName == 'en'):
# if the lang code in the request is different from that in
# the cookie, or if the target lang is en, strip the
# intl/nn substring. It will later be redirected to
# the user's preferred language url.
# logging.info(' Handling a MISMATCHED intl request')
name = contentUri
isStripped = True
isValidIntl = False
isIntl = False
# Send for processing
if self.isCleanUrl(name, langName, isValidIntl, isStripped):
# handle a 'clean' request.
# Try to form a response using the actual request url.
# logging.info(' Request being handled as clean: [%s]', name)
if not self.CreateResponse(name, langName, isValidIntl, resetLangCookie):
# If CreateResponse returns False, there was no such document
# in the intl/lang tree. Before going to 404, see if there is an
# English-language version of the doc in the default
# default tree and return it, else go to 404.
self.CreateResponse(contentUri, langName, False, resetLangCookie)
elif isIntl:
# handle the case where we need to pass through an invalid intl req
# for processing (so as to get 404 as appropriate). This is needed
# because intl urls are passed through clean and retried in English,
# if necessary.
# logging.info(' Handling an invalid intl request...')
self.CreateResponse(name, langName, isValidIntl, resetLangCookie)
else:
# handle the case where we have a non-clean url (usually a non-intl
# url) that we need to interpret in the context of any lang pref
# that is set. Prepend an intl/lang string to the request url and
# send it as a 302 redirect. After the redirect, the subsequent
# request will be handled as a clean url.
self.RedirToIntl(name, self.intlString, langName)
def isCleanUrl(self, name, langName, isValidIntl, isStripped):
"""Determine whether to pass an incoming url straight to processing.
Args:
name: The incoming URL
Returns:
boolean: Whether the URL should be sent straight to processing
"""
# logging.info(' >>>> isCleanUrl name [%s] langName [%s] isValidIntl [%s]', name, langName, isValidIntl)
if (langName == 'en' and not isStripped) or isValidIntl or not ('.html' in name) or (not isValidIntl and not langName):
return True
def PreprocessUrl(self, name, langName):
"""Any preprocessing work on the URL when it comes in.
Put any work related to interpreting the incoming URL here. For example,
this is used to redirect requests for a directory to the index.html file
in that directory. Subclasses should override this method to do different
preprocessing.
Args:
name: The incoming URL
Returns:
False if the request was redirected to '/index.html', or
The processed URL, otherwise
"""
# determine if this is a request for a directory
final_path_segment = name
final_slash_offset = name.rfind('/')
if final_slash_offset != len(name) - 1:
final_path_segment = name[final_slash_offset + 1:]
if final_path_segment.find('.') == -1:
name = ''.join([name, '/'])
# if this is a directory or the domain itself, redirect to /index.html
if not name or (name[len(name) - 1:] == '/'):
uri = ''.join(['/', name, 'index.html'])
# logging.info('--->PREPROCESSING REDIRECT [%s] to [%s] with langName [%s]', name, uri, langName)
self.redirect(uri, False)
return False
else:
return name
def RedirToIntl(self, name, intlString, langName):
"""Redirect an incoming request to the appropriate intl uri.
For non-en langName, builds the intl/lang string from a
base (en) string and redirects (302) the request to look for
a version of the file in langName. For en langName, simply
redirects a stripped uri string (intl/nn removed).
Args:
name: The incoming, preprocessed URL
Returns:
The lang-specific URL
"""
if not (langName == 'en'):
builtIntlLangUri = ''.join([intlString, langName, '/', name, '?', self.request.query_string])
else:
builtIntlLangUri = name
uri = ''.join(['/', builtIntlLangUri])
logging.info('-->>REDIRECTING %s to %s', name, uri)
self.redirect(uri, False)
return uri
def CreateResponse(self, name, langName, isValidIntl, resetLangCookie):
"""Process the url and form a response, if appropriate.
Attempts to retrieve the requested file (name) from cache,
negative cache, or store (zip) and form the response.
For intl requests that are not found (in the localized tree),
returns False rather than forming a response, so that
the request can be retried with the base url (this is the
fallthrough to default language).
For requests that are found, forms the headers and
adds the content to the response entity. If the request was
for an intl (localized) url, also resets the language cookie
to the language specified in the url if needed, to ensure that
the client language and response data remain harmonious.
Args:
name: The incoming, preprocessed URL
langName: The language id. Used as necessary to reset the
language cookie in the response.
isValidIntl: If present, indicates whether the request is
for a language-specific url
resetLangCookie: Whether the response should reset the
language cookie to 'langName'
Returns:
True: A response was successfully created for the request
False: No response was created.
"""
# see if we have the page in the memcache
logging.info('PROCESSING %s langName [%s] isValidIntl [%s] resetLang [%s]',
name, langName, isValidIntl, resetLangCookie)
resp_data = self.GetFromCache(name)
if resp_data is None:
logging.info(' Cache miss for %s', name)
resp_data = self.GetFromNegativeCache(name)
if resp_data is None:
resp_data = self.GetFromStore(name)
# IF we have the file, put it in the memcache
# ELSE put it in the negative cache
if resp_data is not None:
self.StoreOrUpdateInCache(name, resp_data)
elif isValidIntl:
# couldn't find the intl doc. Try to fall through to English.
#logging.info(' Retrying with base uri...')
return False
else:
logging.info(' Adding %s to negative cache, serving 404', name)
self.StoreInNegativeCache(name)
self.Write404Error()
return True
else:
# found it in negative cache
self.Write404Error()
return True
# found content from cache or store
logging.info('FOUND CLEAN')
if resetLangCookie:
logging.info(' Resetting android_developer_pref_lang cookie to [%s]',
langName)
expireDate = time.mktime(localtime()) + 60 * 60 * 24 * 365 * 10
self.response.headers.add_header('Set-Cookie',
'android_developer_pref_lang=%s; path=/; expires=%s' %
(langName, strftime("%a, %d %b %Y %H:%M:%S", localtime(expireDate))))
mustRevalidate = False
if ('.html' in name):
# revalidate html files -- workaround for cache inconsistencies for
# negotiated responses
mustRevalidate = True
#logging.info(' Adding [Vary: Cookie] to response...')
self.response.headers.add_header('Vary', 'Cookie')
content_type, encoding = mimetypes.guess_type(name)
if content_type:
self.response.headers['Content-Type'] = content_type
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif (name == 'favicon.ico'):
self.response.headers['Content-Type'] = 'image/x-icon'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
elif name.endswith('.psd'):
self.response.headers['Content-Type'] = 'application/octet-stream'
self.SetCachingHeaders(mustRevalidate)
self.response.out.write(resp_data)
return True
def GetFromStore(self, file_path):
"""Retrieve file from zip files.
Get the file from the source, it must not have been in the memcache. If
possible, we'll use the zip file index to quickly locate where the file
should be found. (See MapToFileArchive documentation for assumptions about
file ordering.) If we don't have an index or don't find the file where the
index says we should, look through all the zip files to find it.
Args:
file_path: the file that we're looking for
Returns:
The contents of the requested file
"""
resp_data = None
file_itr = iter(self.zipfilenames)
# check the index, if we have one, to see what archive the file is in
archive_name = self.MapFileToArchive(file_path)
if not archive_name:
archive_name = file_itr.next()[0]
while resp_data is None and archive_name:
zip_archive = self.LoadZipFile(archive_name)
if zip_archive:
# we expect some lookups will fail, and that's okay, 404s will deal
# with that
try:
resp_data = zip_archive.read(file_path)
except (KeyError, RuntimeError), err:
# no op
x = False
if resp_data is not None:
logging.info('%s read from %s', file_path, archive_name)
try:
archive_name = file_itr.next()[0]
except (StopIteration), err:
archive_name = False
return resp_data
def LoadZipFile(self, zipfilename):
"""Convenience method to load zip file.
Just a convenience method to load the zip file from the data store. This is
useful if we ever want to change data stores and also as a means of
dependency injection for testing. This method will look at our file cache
first, and then load and cache the file if there's a cache miss
Args:
zipfilename: the name of the zip file to load
Returns:
The zip file requested, or None if there is an I/O error
"""
zip_archive = None
zip_archive = self.zipfile_cache.get(zipfilename)
if zip_archive is None:
try:
zip_archive = zipfile.ZipFile(zipfilename)
self.zipfile_cache[zipfilename] = zip_archive
except (IOError, RuntimeError), err:
logging.error('Can\'t open zipfile %s, cause: %s' % (zipfilename,
err))
return zip_archive
def MapFileToArchive(self, file_path):
"""Given a file name, determine what archive it should be in.
This method makes two critical assumptions.
(1) The zip files passed as an argument to the handler, if concatenated
in that same order, would result in a total ordering
of all the files. See (2) for ordering type.
(2) Upper case letters before lower case letters. The traversal of a
directory tree is depth first. A parent directory's files are added
before the files of any child directories
Args:
file_path: the file to be mapped to an archive
Returns:
The name of the archive where we expect the file to be
"""
num_archives = len(self.zipfilenames)
while num_archives > 0:
target = self.zipfilenames[num_archives - 1]
if len(target) > 1:
if self.CompareFilenames(target[1], file_path) >= 0:
return target[0]
num_archives -= 1
return None
def CompareFilenames(self, file1, file2):
"""Determines whether file1 is lexigraphically 'before' file2.
WARNING: This method assumes that paths are output in a depth-first,
with parent directories' files stored before childs'
We say that file1 is lexigraphically before file2 if the last non-matching
path segment of file1 is alphabetically before file2.
Args:
file1: the first file path
file2: the second file path
Returns:
A positive number if file1 is before file2
A negative number if file2 is before file1
0 if filenames are the same
"""
f1_segments = file1.split('/')
f2_segments = file2.split('/')
segment_ptr = 0
while (segment_ptr < len(f1_segments) and
segment_ptr < len(f2_segments) and
f1_segments[segment_ptr] == f2_segments[segment_ptr]):
segment_ptr += 1
if len(f1_segments) == len(f2_segments):
# we fell off the end, the paths much be the same
if segment_ptr == len(f1_segments):
return 0
# we didn't fall of the end, compare the segments where they differ
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
# the number of segments differs, we either mismatched comparing
# directories, or comparing a file to a directory
else:
# IF we were looking at the last segment of one of the paths,
# the one with fewer segments is first because files come before
# directories
# ELSE we just need to compare directory names
if (segment_ptr + 1 == len(f1_segments) or
segment_ptr + 1 == len(f2_segments)):
return len(f2_segments) - len(f1_segments)
else:
if f1_segments[segment_ptr] < f2_segments[segment_ptr]:
return 1
elif f1_segments[segment_ptr] > f2_segments[segment_ptr]:
return -1
else:
return 0
def SetCachingHeaders(self, revalidate):
"""Set caching headers for the request."""
max_age = self.MAX_AGE
#self.response.headers['Expires'] = email.Utils.formatdate(
# time.time() + max_age, usegmt=True)
cache_control = []
if self.PUBLIC:
cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
if revalidate:
cache_control.append('must-revalidate')
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def GetFromCache(self, filename):
"""Get file from memcache, if available.
Args:
filename: The URL of the file to return
Returns:
The content of the file
"""
return memcache.get('%s%s' % (self.CACHE_PREFIX, filename))
def StoreOrUpdateInCache(self, filename, data):
"""Store data in the cache.
Store a piece of data in the memcache. Memcache has a maximum item size of
1*10^6 bytes. If the data is too large, fail, but log the failure. Future
work will consider compressing the data before storing or chunking it
Args:
filename: the name of the file to store
data: the data of the file
Returns:
None
"""
try:
if not memcache.add('%s%s' % (self.CACHE_PREFIX, filename), data):
memcache.replace('%s%s' % (self.CACHE_PREFIX, filename), data)
except (ValueError), err:
logging.warning('Data size too large to cache\n%s' % err)
def Write404Error(self):
"""Ouptut a simple 404 response."""
self.error(404)
self.response.out.write(
''.join(['<html><head><title>404: Not Found</title></head>',
'<body><b><h2>Error 404</h2><br/>',
'File not found</b></body></html>']))
def StoreInNegativeCache(self, filename):
"""If a non-existant URL is accessed, cache this result as well.
Future work should consider setting a maximum negative cache size to
prevent it from from negatively impacting the real cache.
Args:
filename: URL to add ot negative cache
Returns:
None
"""
memcache.add('%s%s' % (self.NEG_CACHE_PREFIX, filename), -1)
def GetFromNegativeCache(self, filename):
"""Retrieve from negative cache.
Args:
filename: URL to retreive
Returns:
The file contents if present in the negative cache.
"""
return memcache.get('%s%s' % (self.NEG_CACHE_PREFIX, filename))
def main():
application = webapp.WSGIApplication([('/([^/]+)/(.*)',
MemcachedZipHandler)])
util.run_wsgi_app(application)
if __name__ == '__main__':
main()
|
19,992 | c9ef898ea0786533cf34a18151f0596dd4eb4d3b |
def find():
l = 1000000
sieve=[True]*(l+1)
mInd = 0
for i in range(2,l):
if sieve[i]==False:
continue
ind = i*i
mInd+=1
if mInd==10001:
print i
while ind <l:
sieve[ind]=False
ind+=i
# print mInd
if __name__ == '__main__':
find() |
19,993 | 30b574bf484a37399715715b2e19da0db42c675c | from scapy.all import *
pcap = rdpcap("arquivo.pcap")
#pcap[3]
pcap.summary()
pcap[3].summary()
#pcap.show()
pcap[3].show()
#pcap.hexdump()
|
19,994 | 16f019545c42f2ce64bf14159c99da161719a50a | """ This module contains all functions related to the simulation of
a synthetic dataset from the generalized Roy model.
"""
# standard library
import numpy as np
import os
''' Main function '''
def simulate(init_dict, unobserved=False):
""" Simulate a model based on the initialization file.
"""
# Antibugging
assert (isinstance(init_dict, dict))
assert (unobserved in [True, False])
# Ensure recomputability
np.random.seed(123)
# Distribute information
num_agents = init_dict['BASICS']['agents']
source = init_dict['BASICS']['source']
Y1_coeffs = init_dict['TREATED']['all']
Y0_coeffs = init_dict['UNTREATED']['all']
C_coeffs = np.array(init_dict['COST']['all'])
U1_sd = init_dict['TREATED']['sd']
U0_sd = init_dict['UNTREATED']['sd']
V_sd = init_dict['COST']['sd']
U1V_rho = init_dict['DIST']['rho1']
U0V_rho = init_dict['DIST']['rho0']
# Auxiliary objects
U1V_cov = U1V_rho * U1_sd * V_sd
U0V_cov = U0V_rho * U0_sd * V_sd
num_covars_out = Y1_coeffs.shape[0]
num_covars_cost = C_coeffs.shape[0]
# Simulate observables
means = np.tile(0.0, num_covars_out)
covs = np.identity(num_covars_out)
X = np.random.multivariate_normal(means, covs, num_agents)
means = np.tile(0.0, num_covars_cost)
covs = np.identity(num_covars_cost)
Z = np.random.multivariate_normal(means, covs, num_agents)
# Add intercepts. The first column of the X and Z matrix always contains
# the intercept term. This is exploited throughout the code.
Z[:,0], X[:, 0] = 1.0, 1.0
# Construct index of observable characteristics
Y1_level = np.dot(Y1_coeffs, X.T)
Y0_level = np.dot(Y0_coeffs, X.T)
C_level = np.dot(C_coeffs, Z.T)
# Simulate unobservables
means = np.tile(0.0, 3)
vars_ = [U1_sd**2, U0_sd**2, V_sd**2]
covs = np.diag(vars_)
covs[0, 2] = U1V_cov
covs[2, 0] = covs[0, 2]
covs[1, 2] = U0V_cov
covs[2, 1] = covs[1, 2]
U = np.random.multivariate_normal(means, covs, num_agents)
# Simulate endogenous variables
Y1 = np.tile(np.nan, num_agents)
Y0 = np.tile(np.nan, num_agents)
Y = np.tile(np.nan, num_agents)
D = np.tile(np.nan, num_agents)
for i in range(num_agents):
# Select individual unobservables and observables
u1, u0, v = U[i, 0], U[i, 1], U[i, 2]
y1_idx, y0_idx, c_idx = Y1_level[i], Y0_level[i], C_level[i]
# Decision Rule
expected_benefits = y1_idx - y0_idx
cost = c_idx + v
d = np.float((expected_benefits - cost > 0))
# Potential outcomes
y1, y0 = y1_idx + u1, y0_idx + u0
# Observed outcomes
y = d * y1 + (1.0 - d) * y0
# Collect data matrices
Y[i], Y0[i], Y1[i], D[i] = y, y1, y0, d
# Check integrity of simulated data
_check_integrity_simulate(Y1, Y0, Y, D)
# Save to disk
_write_out(Y, D, X, Z, source, unobserved, Y1, Y0)
# Return selected features of data
return Y1, Y0, D
''' Auxiliary functions '''
# Note that the name of all auxiliary functions starts with an underscore.
# This ensures that the function is private to the module. A standard import
# of this module will not make this function available.
def _check_integrity_simulate(Y1, Y0, Y, D):
""" Check quality of simulated sample.
"""
assert (np.all(np.isfinite(Y1)))
assert (np.all(np.isfinite(Y0)))
assert (np.all(np.isfinite(Y)))
assert (np.all(np.isfinite(D)))
assert (Y1.dtype == 'float')
assert (Y0.dtype == 'float')
assert (Y.dtype == 'float')
assert (D.dtype == 'float')
assert (D.all() in [1.0, 0.0])
def _write_out(Y, D, X, Z, source, unobserved=False, Y1=None, Y0=None):
""" Write out simulated data to file.
"""
if not unobserved:
np.savetxt(source, np.column_stack((Y, D, X, Z)), fmt='%8.3f')
else:
assert (isinstance(Y1, np.ndarray))
assert (isinstance(Y0, np.ndarray))
np.savetxt(source, np.column_stack((Y, D, X, Z, Y1, Y0)),
fmt='%8.3f') |
19,995 | 89e1cbca7873c3bd4b80b7b756ca67813afcadba | import sys
read = sys.stdin.read
readline = sys.stdin.readline
readlines = sys.stdin.readlines
import numpy as np
N, K = map(int, input().split())
A = np.array(list(map(int, input().split())), dtype = np.int64)
MOD = 10**9+7
def cumprod(x, MOD = MOD):
L = len(x)
Lsq = int(L**.5 + 1)
x = np.resize(x, Lsq ** 2).reshape(Lsq, Lsq)
for n in range(1, Lsq):
x[:,n] *= x[:,n-1]
x[:,n] %= MOD
for n in range(1,Lsq):
x[n] *= x[n-1, -1]
x[n] %= MOD
return x.flatten()[:L]
def make_fact(U, MOD = MOD):
x = np.arange(U, dtype = np.int64)
x[0] = 1
fact = cumprod(x, MOD)
x = np.arange(U, 0, -1, dtype = np.int64)
x[0] = pow(int(fact[-1]), MOD - 2, MOD)
fact_inv = cumprod(x, MOD)[::-1]
return fact, fact_inv
fact, inv = make_fact(N+100, MOD)
A.sort()
ans = 0
lim = N - K + 1
for ind, a in enumerate(A, 1):
if ind < K:
tmp_max = 0
else:
max_comb = (fact[ind-1] * inv[K-1]) % MOD * inv[ind-K] % MOD
tmp_max = max_comb * a % MOD
if ind > lim:
tmp_min = 0
else:
min_comb = (fact[N - ind] * inv[K-1]) % MOD * inv[N - ind - K + 1] % MOD
tmp_min = min_comb * a % MOD
ans += (tmp_max - tmp_min) % MOD
ans %= MOD
print(ans) |
19,996 | 9e24b1030eedb4cc269df836d388422b090087d9 | # coding: utf-8
# /*##########################################################################
#
# Copyright (c) 2016-2017 European Synchrotron Radiation Facility
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# ###########################################################################*/
from __future__ import absolute_import, division, unicode_literals
__authors__ = ['Marius Retegan']
__license__ = 'MIT'
__date__ = '04/10/2017'
import numpy as np
MIN_KERNEL_SUM = 1e-8
def gaussian_kernel1d(sigma=None, truncate=6):
size = int(2 * truncate * sigma)
if size % 2 == 0:
size = size + 1
x = np.arange(size)
# print('The size of the kernel is: {}'.format(size))
mu = np.median(x)
# The prefactor 1 / (sigma * np.sqrt(2 * np.pi))
# drops in the normalization.
kernel = np.exp(-0.5 * ((x - mu)**2 / sigma**2))
if kernel.sum() < MIN_KERNEL_SUM:
raise Exception(
'The kernel can\'t be normalized, because its sum is close to '
'zero. The sum of the kernel is < {0}'.format(MIN_KERNEL_SUM))
kernel /= kernel.sum()
return kernel
def gaussian_kernel2d(sigma=None, truncate=(6, 6)):
if sigma.size != 2 or len(truncate) != 2:
raise Exception('Sigma and the truncation parameter don\'t have the '
'required dimenstion.')
kernel_x = gaussian_kernel1d(sigma[0], truncate[0])
kernel_y = gaussian_kernel1d(sigma[1], truncate[1])
kernel = np.outer(kernel_y, kernel_x)
return kernel
def convolve_fft(array, kernel):
"""
Convolve an array with a kernel using FFT.
Implemntation based on the convolve_fft function from astropy.
https://github.com/astropy/astropy/blob/master/astropy/convolution/convolve.py
"""
array = np.asarray(array, dtype=np.cfloat)
kernel = np.asarray(kernel, dtype=np.cfloat)
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
array_shape = array.shape
kernel_shape = kernel.shape
new_shape = np.array(array_shape) + np.array(kernel_shape)
array_slices = []
kernel_slices = []
for (new_dimsize, array_dimsize, kernel_dimsize) in zip(
new_shape, array_shape, kernel_shape):
center = new_dimsize - (new_dimsize + 1) // 2
array_slices += [slice(center - array_dimsize // 2,
center + (array_dimsize + 1) // 2)]
kernel_slices += [slice(center - kernel_dimsize // 2,
center + (kernel_dimsize + 1) // 2)]
array_slices = tuple(array_slices)
kernel_slices = tuple(kernel_slices)
if not np.all(new_shape == array_shape):
big_array = np.zeros(new_shape, dtype=np.cfloat)
big_array[array_slices] = array
else:
big_array = array
if not np.all(new_shape == kernel_shape):
big_kernel = np.zeros(new_shape, dtype=np.cfloat)
big_kernel[kernel_slices] = kernel
else:
big_kernel = kernel
array_fft = np.fft.fftn(big_array)
kernel_fft = np.fft.fftn(np.fft.ifftshift(big_kernel))
rifft = np.fft.ifftn(array_fft * kernel_fft)
return rifft[array_slices].real
def broaden(array, fwhm=None, kind='gaussian'):
if fwhm is None:
return
fwhm = np.array(fwhm)
if (fwhm <= 0).any():
return array
if kind == 'gaussian':
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
if fwhm.size == 1:
kernel = gaussian_kernel1d(sigma)
elif fwhm.size == 2:
kernel = gaussian_kernel2d(sigma)
else:
print('Unvailable type of broadening.')
return array
return convolve_fft(array, kernel)
|
19,997 | 68a603460b6b17b625cbed8de0a4e39648a0ed57 | from pywinauto.application import Application
class PyWin:
def __init__(self):
self.app = Application()
self._window = None
def open_app(self, app):
self.app.start(app)
def click(self, control):
self._window[control].click()
def set_window(self, title=None):
if title:
self._window = self.app[title]
else:
self._window = self.app.top_window()
def select_tab_element(self, control, name):
self._window[control].select(name)
def input_path(self, control, path):
self._window[control].type_keys(path)
|
19,998 | 18530833c3b3da257194cb6b42d08e767f2d0249 | import numpy as np
from keras.layers import Input, Dense
from keras.models import Model
def auto_encoder(data: np.ndarray) -> np.ndarray:
"""
Returns a neural auto-encoder model
:param data:
:return:
"""
input_img = Input(shape=(784,))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=100,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
|
19,999 | 520cab98379dc7a66e82697273ca16ee937594e3 | def fib(N):
"""
:type N: int
:rtype: int
"""
if N == 0:
return 0
elif N == 1:
return 1
else:
list = [0, 1]
while N > len(list):
list.append(list[len(list) - 2] + list[len(list) - 1])
return list[N - 2] + list[N - 1]
print(fib(6)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.