content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
from datetime import time
from mock import patch
import unittest
from prestans import exception
from prestans.types import Time
NOW = time(12, 34, 11)
UTC_NOW = time(13, 35, 12)
| [
6738,
4818,
8079,
1330,
640,
198,
6738,
15290,
1330,
8529,
198,
11748,
555,
715,
395,
198,
198,
6738,
16153,
504,
1330,
6631,
198,
6738,
16153,
504,
13,
19199,
1330,
3862,
198,
198,
45669,
796,
640,
7,
1065,
11,
4974,
11,
1367,
8,
1... | 3.232143 | 56 |
# Python 2.7.6
# RestfulClient.py
import re
import requests
from requests.auth import HTTPDigestAuth
import json
from base64 import b64decode
# Replace with the correct URL
url1 = "http://172.18.0.2:8000/onem2m/MemsIPE/sensor_data/x/latest/"
url2 = "http://172.18.0.2:8000/onem2m/MemsIPE/sensor_data/y/latest/"
url3 = "http://172.18.0.2:8000/onem2m/MemsIPE/sensor_data/z/latest/"
url={url1, url2, url3}
print len(url)
# It is a good practice not to hardcode the credentials. So ask the user to enter credentials at runtime
#myResponse = requests.get(url, auth=HTTPDigestAuth(raw_input("username: "), raw_input("Password: ")), verify=True)
for i in url:
#print i
myResponse = requests.get(i)
print "code:"+ str(myResponse.status_code)
#print myResponse.text
# For successful API call, response code will be 200 (OK)
if (myResponse.ok):
# Loading the response data into a dict variable
# json.loads takes in only binary or string variables so using content to fetch binary content
# Loads (Load String) takes a Json file and converts into python data structure (dict or list, depending on JSON)
#jData = json.loads(myResponse.content)
jData = myResponse.content
#print "this is ", jData
#print len(jData)
array = json.loads(jData)
#print array["m2m:cin"]["con"]
convert = b64decode(array["m2m:cin"]["con"])
print convert
else:
# If response code is not ok (200), print the resulting http error code with description
myResponse.raise_for_status() | [
2,
11361,
362,
13,
22,
13,
21,
198,
2,
8324,
913,
11792,
13,
9078,
198,
11748,
302,
198,
11748,
7007,
198,
6738,
7007,
13,
18439,
1330,
7154,
51,
5760,
328,
395,
30515,
198,
11748,
33918,
198,
6738,
2779,
2414,
1330,
275,
2414,
1250... | 2.882813 | 512 |
from flask import render_template, request
from app import app, models, utils
from app.forms import *
from flask_restful import reqparse
import json, requests, os
from app.document_retrieval import DocumentRetriever
from app.lexicon import Lexicon
import app.controler as controler
import app.alignment_controller as alignment_controler
from app.stats import one_lang_stat_vals, two_langs_stat_vals, one_edition_stat_vals, two_edition_stat_vals, no_lang_stat_vals
parser = reqparse.RequestParser()
doc_retriever = DocumentRetriever()
lexicon = Lexicon()
@app.route('/', methods=['GET', 'POST'])
@app.route('/index', methods=['GET', 'POST'])
@app.route('/multalign', methods=['GET', 'POST'])
@app.route('/search', methods=['GET'])
@app.route('/lexicon', methods=['GET', 'POST'])
@app.route('/explore', methods=['GET', 'POST'])
@app.route('/stats', methods=['GET', 'POST'])
@app.route('/information', methods=['GET', 'POST'])
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r | [
6738,
42903,
1330,
8543,
62,
28243,
11,
2581,
198,
6738,
598,
1330,
598,
11,
4981,
11,
3384,
4487,
198,
6738,
598,
13,
23914,
1330,
1635,
198,
6738,
42903,
62,
2118,
913,
1330,
43089,
29572,
198,
11748,
33918,
11,
7007,
11,
28686,
198... | 2.855603 | 464 |
# Authors: Gaetano Carlucci
# Giuseppe Cofano
import time
from utils.Plot import RealTimePlot
class OpenLoopActuator:
"""
Generates CPU load by tuning the sleep time
"""
| [
2,
46665,
25,
12822,
316,
5733,
1879,
2290,
35764,
198,
2,
220,
220,
220,
220,
220,
220,
220,
220,
8118,
1904,
27768,
327,
1659,
5733,
198,
198,
11748,
640,
198,
198,
6738,
3384,
4487,
13,
43328,
1330,
6416,
7575,
43328,
628,
198,
4... | 2.571429 | 77 |
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import sys
import st2common.config as config
from oslo.config import cfg
from st2common.logging.filters import LogLevelFilter
from st2common.models.db import db_setup
from st2common.models.db import db_teardown
from st2common.transport.utils import register_exchanges
LOG = logging.getLogger('st2common.content.bootstrap')
cfg.CONF.register_cli_opt(cfg.BoolOpt('verbose', short='v', default=False))
cfg.CONF.register_cli_opt(cfg.BoolOpt('experimental', default=False))
register_opts()
# This script registers actions and rules from content-packs.
if __name__ == '__main__':
main(sys.argv[1:])
| [
2,
49962,
284,
262,
23881,
32173,
11,
3457,
19203,
25896,
32173,
11537,
739,
530,
393,
517,
198,
2,
18920,
5964,
11704,
13,
220,
4091,
262,
28536,
2393,
9387,
351,
198,
2,
428,
670,
329,
3224,
1321,
5115,
6634,
9238,
13,
198,
2,
383... | 3.537688 | 398 |
# -*- coding: utf-8 -*-
r"""
COMET command line interface (CLI)
==============
Composed by 4 main commands:
train Used to train a machine translation metric.
score Uses COMET to score a list of MT outputs.
download Used to download corpora or pretrained metric.
"""
import json
import os
import click
import yaml
from comet.corpora import corpus2download, download_corpus
from comet.models import download_model, load_checkpoint, model2download, str2model
from comet.trainer import TrainerConfig, build_trainer
from pytorch_lightning import seed_everything
@click.group()
@comet.command()
@click.option(
"--config",
"-f",
type=click.Path(exists=True),
required=True,
help="Path to the configure YAML file",
)
@click.option(
"--saving_file",
type=str,
required=True,
help="Path to the file where the model will be saved",
)
@comet.command()
@click.option(
"--model",
default="wmt-large-da-estimator-1719",
help="Name of the pretrained model OR path to a model checkpoint.",
show_default=True,
type=str,
)
@click.option(
"--source",
"-s",
required=True,
help="Source segments.",
type=click.File(),
)
@click.option(
"--hypothesis",
"-h",
required=True,
help="MT outputs.",
type=click.File(),
)
@click.option(
"--reference",
"-r",
required=True,
help="Reference segments.",
type=click.File(),
)
@click.option(
"--cuda/--cpu",
default=True,
help="Flag that either runs inference on cuda or in cpu.",
show_default=True,
)
@click.option(
"--batch_size",
default=-1,
help="Batch size used during inference. By default uses the same batch size used during training.",
type=int,
)
@click.option(
"--to_json",
default=False,
help="Creates and exports model predictions to a JSON file.",
type=str,
show_default=True,
)
@click.option(
"--n_refs",
default=1,
help="Number of references used during inference. By default number of references == 1.",
type=int,
)
@click.option(
"--n_dp_runs",
default=30,
help="Number of dropout runs at test time. By default 30.",
type=int,
)
@click.option(
"--seed",
default=12,
help="Seed. By default 12.",
type=int,
)
@click.option(
"--d_enc",
default=0.1,
help="dropout value for the encoder. Set to 0.0 to disable",
type=float,
)
@click.option(
"--d_pool",
default=0.1,
help="dropout value for the layerwise pooling layer. Set to 0.0 to disable",
type=float,
)
@click.option(
"--d_ff1",
default=0.1,
help="dropout value for the 1st feed forward layer. Set to 0.0 to disable",
type=float,
)
@click.option(
"--d_ff2",
default=0.1,
help="dropout value for the 2nd feed forward layer. Set to 0.0 to disable",
type=float,
)
# def score(model, source, hypothesis, reference, cuda, batch_size, to_json):
# source = [s.strip() for s in source.readlines()]
# hypothesis = [s.strip() for s in hypothesis.readlines()]
# reference = [s.strip() for s in reference.readlines()]
# data = {"src": source, "mt": hypothesis, "ref": reference}
# data = [dict(zip(data, t)) for t in zip(*data.values())]
# model = load_checkpoint(model) if os.path.exists(model) else download_model(model)
# data, scores = model.predict(data, cuda, show_progress=True, batch_size=batch_size)
# print('here-out')
# print(to_json)
# if isinstance(to_json, str):
# with open(to_json, "w") as outfile:
# json.dump(data, outfile, ensure_ascii=False, indent=4)
# click.secho(f"Predictions saved in: {to_json}.", fg="yellow")
# for i in range(len(scores)):
# click.secho("Segment {} score: {:.3f}".format(i, scores[i]), fg="yellow")
# click.secho(
# "COMET system score: {:.3f}".format(sum(scores) / len(scores)), fg="yellow"
# )
@comet.command()
@click.option(
"--data",
"-d",
type=click.Choice(corpus2download.keys(), case_sensitive=False),
multiple=True,
help="Public corpora to download.",
)
@click.option(
"--model",
"-m",
type=click.Choice(model2download().keys(), case_sensitive=False),
multiple=True,
help="Pretrained models to download.",
)
@click.option(
"--saving_path",
type=str,
help="Relative path to save the downloaded files.",
required=True,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
81,
37811,
198,
9858,
2767,
3141,
1627,
7071,
357,
5097,
40,
8,
198,
25609,
855,
198,
7293,
1335,
416,
604,
1388,
9729,
25,
198,
220,
220,
220,
4512,
220,
220,
220,
... | 2.510808 | 1,758 |
import numpy as np
a1 = np.array([1, 2, 3, 4, 5])
a2 = a1 + 1
print(a2)
| [
11748,
299,
32152,
355,
45941,
198,
198,
64,
16,
796,
45941,
13,
18747,
26933,
16,
11,
362,
11,
513,
11,
604,
11,
642,
12962,
198,
64,
17,
796,
257,
16,
1343,
352,
198,
198,
4798,
7,
64,
17,
8,
198
] | 1.85 | 40 |
import pandas as pd
import numpy as np
###SCRIPT DESCRIPTION###
# This script provides statistical analysis for LSTM labeled data.
###SCRIPT INPUT###
# The .csv file given to this script should be equivalent to the labeled output generated by the corresponding
# data-generation.py script. This means a "Format" column must be present with a unique identifier for each
# unique format. The column "true_label" should hold the integer value of the true class of the sample, the
# column "label" should hold the integer value of the predicted class of the sample.
###SCRIPT OUTPUT###
# This script provides the following values in a .csv file
# - Classwise precision, accuracy and recall for each format
# - Formatwise accuracy
###SCRIPT BEGIN####
input_file = input("Path to test results file: ")
output_file = input("Path to output file: ")
# Load data
data = pd.read_csv(input_file)
# Initial column names and row list
rows = []
cols = ['Format ID', 'Format Example', 'Sample Count']
#each class has a column for precision, recall and accuracy
for cl in data['true_label'].unique():
cols.append(str(cl) + "_precision")
cols.append(str(cl) + "_recall")
cols.append(str(cl) + "_accuracy")
#add format accuracy at the end
cols.append("Format Accuracy")
#for each unique format ID
for format in data['Format'].unique():
#create a subset containing only entries from this format
subset = data[data['Format'] == format]
#find the number of rows this format has
n = subset.shape[0]
#get one example of the format
example = subset['Date'].iloc[0]
row = [format, example, n]
# for each class that truly exists
for cl in data['true_label'].unique():
#create subset with all samples in the format that have this class
class_subset = subset[subset['true_label'] == cl]
#create subset with all samples in the format that are predicted as this class
predicted_subset = subset[subset['label'] == cl]
#create subset with all samples in the format that are not predicted as this class
negative_subset = subset[subset['label'] != cl]
#get indices of rows where this class was correctly classified
correct = np.where(class_subset['true_label'] == class_subset['label'])
#get amount of real, predicted and correctly predicted values of this class
real = class_subset.shape[0]
predicted = predicted_subset.shape[0]
correctly_predicted = len(correct[0])
true_negatives = negative_subset[negative_subset['true_label'] != cl].shape[0]
#precision = True Positives / Predicted
precision = (correctly_predicted / predicted) if predicted > 0 else "N/A" if real == 0 else 0
#recall = True Positives / Real Samples
recall = (correctly_predicted / real) if real > 0 else "N/A"
#accuracy = True Positives + True Negatives / All Format Samples
accuracy = (correctly_predicted + true_negatives) / n
#Add formatwise precision, recall and accuracy to the row
row += [precision, recall, accuracy]
#Add format accuracy to the row (all matching entries / all entries)
acc = subset[subset['label'] == subset['true_label']]
row.append(acc.shape[0] / n)
rows.append(row)
#output dataframe
df=pd.DataFrame(rows, columns=cols)
df.to_csv(output_file, index=False) | [
11748,
19798,
292,
355,
279,
67,
198,
11748,
299,
32152,
355,
45941,
198,
198,
21017,
6173,
46023,
22196,
40165,
21017,
198,
2,
770,
4226,
3769,
13905,
3781,
329,
406,
2257,
44,
15494,
1366,
13,
198,
21017,
6173,
46023,
3268,
30076,
210... | 3.062613 | 1,102 |
# question can be found on leetcode.com/problems/intersection-of-two-arrays/
from Typing import List
| [
2,
1808,
460,
307,
1043,
319,
443,
316,
8189,
13,
785,
14,
1676,
22143,
14,
3849,
5458,
12,
1659,
12,
11545,
12,
3258,
592,
14,
198,
6738,
17134,
278,
1330,
7343,
628
] | 3.1875 | 32 |
'''
fixImageOrientation.py
All credits go to Kyle Fox who wrote this EXIF orientation patch.
We just modified tiny pieces. https://github.com/kylefox
The code is free for non-commercial use.
Please contact the author for commercial use.
Please cite the DIRT Paper if you use the code for your scientific project.
Bucksch et al., 2014 "Image-based high-throughput field phenotyping of crop roots", Plant Physiology
-------------------------------------------------------------------------------------------
Author: Alexander Bucksch
School of Biology and Interactive computing
Georgia Institute of Technology
Mail: bucksch@gatech.edu
Web: http://www.bucksch.nl
-------------------------------------------------------------------------------------------
Copyright (c) 2014 Alexander Bucksch
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of the DIRT Developers nor the names of its
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
from PIL import Image, ImageFile
__all__ = ['fix_orientation']
# PIL's Error "Suspension not allowed here" work around:
# s. http://mail.python.org/pipermail/image-sig/1999-August/000816.html
ImageFile.MAXBLOCK = 1024*1024
# The EXIF tag that holds orientation data.
EXIF_ORIENTATION_TAG = 274
# Obviously the only ones to process are 3, 6 and 8.
# All are documented here for thoroughness.
ORIENTATIONS = {
1: ("Normal", 0),
2: ("Mirrored left-to-right", 0),
3: ("Rotated 180 degrees", 180),
4: ("Mirrored top-to-bottom", 0),
5: ("Mirrored along top-left diagonal", 0),
6: ("Rotated 90 degrees", -90),
7: ("Mirrored along top-right diagonal", 0),
8: ("Rotated 270 degrees", -270)
}
def fix_orientation(img, save_over=False):
"""
`img` can be an Image instance or a path to an image file.
`save_over` indicates if the original image file should be replaced by the new image.
* Note: `save_over` is only valid if `img` is a file path.
"""
path = None
if not isinstance(img, Image.Image):
path = img
img = Image.open(path)
elif save_over:
raise ValueError("You can't use `save_over` when passing an Image instance. Use a file path instead.")
try:
orientation = img._getexif()[EXIF_ORIENTATION_TAG]
except (TypeError, AttributeError, KeyError):
print "WARNING: Image file has no EXIF data."
orientation=-1
pass
if orientation in [3,6,8]:
degrees = ORIENTATIONS[orientation][1]
img = img.rotate(degrees)
if save_over and path is not None:
try:
img.save(path, quality=100)
except IOError:
# Try again, without optimization (PIL can't optimize an image
# larger than ImageFile.MAXBLOCK, which is 64k by default).
# Setting ImageFile.MAXBLOCK should fix this....but who knows.
img.save(path, quality=100)
return (img, degrees)
else:
return (img, 0) | [
7061,
6,
198,
13049,
5159,
46,
8289,
341,
13,
9078,
198,
198,
3237,
10824,
467,
284,
14316,
5426,
508,
2630,
428,
7788,
5064,
12852,
8529,
13,
198,
1135,
655,
9518,
7009,
5207,
13,
3740,
1378,
12567,
13,
785,
14,
74,
2349,
12792,
19... | 3.091954 | 1,392 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 12 13:15:18 2022
@author: Jaimew
"""
from random import uniform as rd
import numpy as np
import time
import sys
R = 10000000
inside = 0
start_time = time.perf_counter()
for i in range(R):
inside = inside + in_circle()
if i % (R/1000) == 0:
sys.stdout.write("\r"+ str(round(i/R*100, 2)) + "%")
sys.stdout.write("\r")
pi = inside/R*4
print(pi)
try:
with open("pi.txt", "r") as f:
f1 = f.read().splitlines()
pi_file = float(f1[0])
R_file = int(f1[1])
except:
with open("pi.txt", "w+") as f:
pass
pi_file = 0.0
R_file = 0
R_new = R + R_file
pi_new = (pi*R + pi_file*R_file)/R_new
with open ("pi.txt", "w") as f:
f.write(str(pi_new)+"\n"+str(R_new))
print(pi_new)
print(np.pi - pi_new)
#Show time ellapsed
end_time = time.perf_counter()
dt = end_time-start_time
print("Calculation time: "+str(dt))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
41972,
319,
3300,
2365,
1105,
1511,
25,
1314,
25,
1507,
33160,
198,
198,
31,
9800,
25,
449,
1385,
413... | 2.092511 | 454 |
from dataclasses import dataclass, field
from typing import List, Union, Pattern
import spacy
from spacy.language import Language
from spacy.pipeline import EntityRuler
from spacy.tokens import Doc, Token
from convenient_ai.nlp.__common__.io import Json
from convenient_ai.nlp.spacy.types import RulePattern
@dataclass
class ConvenientSpacy:
"""
Convenient class implementation for the spacy library.
"""
nlp: Language = field(default_factory=Language)
pipeline_names: List[str] = field(default_factory=list)
"""
Returns a ConvenientSpacy instance with a blank model
"""
@staticmethod
"""
Returns a ConvenientSpacy instance with a predefined model
"""
@staticmethod
"""
Returns a ConvenientSpacy instance with a language instance
"""
@staticmethod
"""
Pipes the given text through the spacy pipeline
"""
"""
Appends an EntityRuler to the spacy pipeline
"""
"""
Appends a custom component to the spacy pipeline
"""
"""
Creates the spacy pipeline
"""
"""
Stores the spacy model at the given path
Creates a config.json file which contains all relevant information to restore the model
"""
"""
Restores the spacy model
"""
@staticmethod
"""
Fixes the character cases of the first word as well as noun and proper noun words.
Excludes part of speech words which are given by the 'excludes' attribute.
At default 'PUNCT' and 'ADV' pos tags will be removed.
"""
| [
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
11,
2214,
198,
6738,
19720,
1330,
7343,
11,
4479,
11,
23939,
198,
198,
11748,
599,
1590,
198,
6738,
599,
1590,
13,
16129,
1330,
15417,
198,
6738,
599,
1590,
13,
79,
541,
4470,
1330,
2088... | 3.067729 | 502 |
ADDRESS_WEBSOCKET = "ws://127.0.0.1/socket"
ADDRESS_API = "https://127.0.0.1/api"
ADDRESS_API_SEARCH = "https://127.0.0.1/api/search/"
| [
2885,
7707,
7597,
62,
8845,
4462,
11290,
2767,
796,
366,
18504,
1378,
16799,
13,
15,
13,
15,
13,
16,
14,
44971,
1,
198,
2885,
7707,
7597,
62,
17614,
796,
366,
5450,
1378,
16799,
13,
15,
13,
15,
13,
16,
14,
15042,
1,
198,
2885,
7... | 2 | 68 |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# WARNING: the syntax of the builtin types is not checked, so please
# don't add something syntactically invalid. It will not be fun to
# track down the bug.
Types = (
# C types
'bool',
'char',
'short',
'int',
'long',
'float',
'double',
# stdint types
'int8_t',
'uint8_t',
'int16_t',
'uint16_t',
'int32_t',
'uint32_t',
'int64_t',
'uint64_t',
'intptr_t',
'uintptr_t',
# stddef types
'size_t',
'ssize_t',
# Mozilla types: "less" standard things we know how serialize/deserialize
'nsresult',
'nsString',
'nsCString',
'mozilla::ipc::Shmem',
'mozilla::ipc::FileDescriptor'
)
Includes = (
'mozilla/Attributes.h',
'base/basictypes.h',
'prtime.h',
'nscore.h',
'IPCMessageStart.h',
'ipc/IPCMessageUtils.h',
'nsAutoPtr.h',
'nsStringGlue.h',
'nsTArray.h',
'nsIFile.h',
'mozilla/ipc/ProtocolUtils.h',
)
| [
2,
770,
8090,
6127,
5178,
318,
2426,
284,
262,
2846,
286,
262,
29258,
5094,
198,
2,
13789,
11,
410,
13,
362,
13,
15,
13,
1002,
257,
4866,
286,
262,
4904,
43,
373,
407,
9387,
351,
428,
198,
2,
2393,
11,
921,
460,
7330,
530,
379,
... | 2.210728 | 522 |
#!python3
# Copyright (C) 2020 Victor O. Costa
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
# 3rd party
import numpy as np
# Own
from base_metaheuristic import Base
class PSO(Base):
""" Class for the Particle Swarm Optimization algorithm (PSO), following (Poli et al., 2007) """
def __init__(self):
""" Constructor """
# Define verbosity and NULL problem definition
super().__init__
# Initial algorithm parameters
self.relative_iterations = None # Array containing the iterations at which best solutions are reported
self.num_iter = 0 # Total number of iterations
self.population_size = 0 # Number of particles
self.personal_acceleration = 0.5 # Tendency towards personal bests
self.global_acceleration = 0.5 # Tendency towards global best
self.inertia_weight = 1.0 # Inertia weight constant at one is the same as no inertia weight
# Optimization results
self.swarm_positions = None # Current solutions of the swarm
self.swarm_velocities = None # Current velocities (perturbations) of each particle in the swarm
self.personal_bests = None # Best solution each particle encountere during the search
self.global_best = None # Best solution found in the search
# Flag for modified PSO
self.adaptive_inertia = False # In vanilla PSO, there is no inertia weighting
def set_parameters(self, population_size, personal_acceleration, global_acceleration, function_evaluations_array):
""" Define values for the parameters used by the algorithm """
# Input error checking
if population_size <= 0:
print("Population size must be greater than zero")
exit(-1)
if personal_acceleration < 0 or global_acceleration < 0:
print("Personal and global weights must be equal or greater than zero")
exit(-1)
if len(function_evaluations_array) == 0:
print("Error, objective function evaluation array must not be empty")
exit(-1)
# Number of function evaluations for PSO: population_size * num_iterations
function_evaluations_array = np.array(function_evaluations_array)
self.relative_iterations = function_evaluations_array / population_size
all_divisible = (np.array([x.is_integer() for x in self.relative_iterations])).all()
if not all_divisible:
print("Error, at least one number of function evaluations is not divisible by population size")
exit(-1)
self.num_iter = int(np.max(self.relative_iterations))
self.population_size = population_size
self.personal_acceleration = personal_acceleration
self.global_acceleration = global_acceleration
def define_variables(self, initial_ranges, is_bounded):
""" Defines the number of variables, their initial values ranges and wether or not these ranges constrain the variable during the search """
# Input error checking
if self.num_iter == 0:
print("Error, please set algorithm parameters before variables definition")
exit(-1)
if len(initial_ranges) == 0 or len(is_bounded) == 0:
print("Error, initial_ranges and is_bounded lists must not be empty")
exit(-1)
if len(initial_ranges) != len(is_bounded):
print("Error, the number of variables for initial_ranges and is_bounded must be equal")
exit(-1)
self.num_variables = len(initial_ranges)
self.initial_ranges = initial_ranges
self.is_bounded = is_bounded
self.swarm_positions = np.zeros((self.population_size, self.num_variables + 1))
self.swarm_velocities = np.zeros((self.population_size, self.num_variables))
# Personal and global bests initially have infinite cost
self.personal_bests = np.zeros((self.population_size, self.num_variables + 1))
self.personal_bests[:, -1] = float('inf')
self.global_best = np.zeros(self.num_variables + 1)
self.global_best[-1] = float('inf')
def update_inertia_weight(self, acceptance_count):
""" Inertia weight is not updated in vanilla PSO. It is kept at 1.0, the same of determining no inertia weight """
pass
def optimize(self):
""" Initializes the archive and enter the main loop, until it reaches maximum number of iterations """
# Variables and cost function must be defined prior to optimization
if self.num_variables == None:
print("Error, number of variables and their boundaries must be defined prior to optimization")
exit(-1)
if self.cost_function == None:
print("Error, cost function must be defined prior to optimization")
exit(-1)
# Initialize swarm positions and velocities randomly
for i in range(self.population_size):
for j in range(self.num_variables):
self.swarm_positions[i, j] = np.random.uniform(self.initial_ranges[j][0], self.initial_ranges[j][1])
self.swarm_velocities[i, j] = np.random.uniform(self.initial_ranges[j][0], self.initial_ranges[j][1])
# Keep solutions defined by function_evaluations_array
recorded_solutions = []
# Main optimization loop (population_size * num_iter cost function evaluations)
for iteration in range(self.num_iter):
# When using adaptive inertia weight
acceptance_count = 0
for particle in range(self.population_size):
# Compute cost of new position
self.swarm_positions[particle, -1] = self.cost_function(self.swarm_positions[particle, :-1])
# Update personal best solution
if self.swarm_positions[particle, -1] < self.personal_bests[particle, -1]:
self.personal_bests[particle, :] = np.array(self.swarm_positions[particle, :])
acceptance_count += 1
# Update global best solution
if self.personal_bests[particle, -1] < self.global_best[-1]:
self.global_best = np.array(self.personal_bests[particle, :])
# Update inertia weight based on success rate of the swarm
# Has no effect in vanilla PSO
self.update_inertia_weight(acceptance_count)
# Update velocity vector
self.swarm_velocities[particle, :] = self.inertia_weight * (self.swarm_velocities[particle, :]
+ self.personal_acceleration * np.random.rand() * (self.personal_bests[particle, :-1] - self.swarm_positions[particle, :-1])
+ self.global_acceleration * np.random.rand() * (self.global_best[:-1] - self.swarm_positions[particle, :-1]))
# Update position vector
self.swarm_positions[particle, :-1] = self.swarm_positions[particle, :-1] + self.swarm_velocities[particle, :]
# Restrict search for bounded variables
for var in range(self.num_variables):
if self.is_bounded[var]:
# Use the hard border strategy
if self.swarm_positions[particle, var] < self.initial_ranges[var][0]:
self.swarm_positions[particle, var] = self.initial_ranges[var][0]
elif self.swarm_positions[particle, var] > self.initial_ranges[var][1]:
self.swarm_positions[particle, var] = self.initial_ranges[var][1]
if (self.relative_iterations - 1 == iteration).any():
recorded_solutions.append(np.array(self.global_best))
return np.array(recorded_solutions)
class AIWPSO(PSO):
""" Class for the Adaptative Inertia Weight Particle Swarm Optimization (AIWPSO), following (Nickabadi et al., 2011).
Only the adaptive mechanism of AIWPSO is implemented here.
The paper also uses a mutation mechanism for the worst particle at each iteration, which is left unimplemented. """
def __init__(self):
""" Constructor """
super().__init__()
self.adaptive_inertia = True
self.max_inertia = None
self.min_inertia = None
def update_inertia_weight(self, acceptance_count):
""" Use swarm success rate to update the inertia weight """
success_percentage = acceptance_count / self.population_size
self.inertia_weight = (self.max_inertia - self.min_inertia) * success_percentage + self.min_inertia
| [
2,
0,
29412,
18,
198,
198,
2,
15069,
357,
34,
8,
12131,
220,
12622,
440,
13,
18133,
198,
198,
2,
770,
1430,
318,
1479,
3788,
25,
345,
460,
17678,
4163,
340,
290,
14,
273,
13096,
198,
2,
340,
739,
262,
2846,
286,
262,
22961,
3611... | 2.315256 | 4,241 |
"""
File Name: test_FGTTool.py
Authors: Kyle Seidenthal
Date: 11-05-2020
Description: Tests for the FGT Tool Class
"""
import pytest
from friendly_ground_truth.controller.tools import FGTTool
from mock import MagicMock
class TestFGTTool():
"""
Tests for the FGTTool superclass
"""
def test_init(self):
"""
Test creating a new tool
Test Condition:
The name, icon_string, and ids are all set
"""
tool = FGTTool("test tool", 'abc', 123, MagicMock(), MagicMock())
assert tool.name == "test tool"
assert tool.icon_string == 'abc'
assert tool.id == 123
def test_set_icon_string(self):
"""
Test setting the icon string
Test Condition:
The icon string is replaced with a new string
"""
tool = FGTTool("test tool", 'abc', 123, MagicMock(), MagicMock())
tool.icon_string = 'xyz'
assert tool.icon_string == 'xyz'
def test_set_cursor(self):
"""
Test setting the cursor.
Test Condition:
The cursor is set.
"""
tool = FGTTool("test tool", 'abc', 123, MagicMock(), MagicMock(),
cursor='hand')
assert tool.cursor == 'hand'
| [
37811,
198,
8979,
6530,
25,
1332,
62,
30386,
15751,
970,
13,
9078,
198,
198,
30515,
669,
25,
14316,
1001,
738,
14201,
198,
198,
10430,
25,
1367,
12,
2713,
12,
42334,
198,
198,
11828,
25,
30307,
329,
262,
376,
19555,
16984,
5016,
198,
... | 2.312274 | 554 |
"""
Make sure that when completing MazeWorld, if the agent navigates to the end, the
state is marked as terminal at some point.
"""
import holodeck
| [
37811,
198,
12050,
1654,
326,
618,
14339,
33412,
10603,
11,
611,
262,
5797,
20436,
689,
284,
262,
886,
11,
262,
198,
5219,
318,
7498,
355,
12094,
379,
617,
966,
13,
198,
198,
37811,
198,
11748,
6039,
1098,
694,
628
] | 3.846154 | 39 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
4818,
8079,
198,
6738,
5366,
13,
9945,
1330,
20613,
198,
6738,
5366,
13,
85,
17,
1330,
10011,
2611,
44,
4254,
198,
6738,
42625,
14208,
13,
9945,
1330,
4981,
198
] | 3.263158 | 38 |
import numpy as np
import tensorflow as tf
| [
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
628
] | 3.384615 | 13 |
"""
Implements wrapper class and methods to work with Brightcove's Social Syndication API.
See: https://apis.support.brightcove.com/social-syndication/getting-started/public-syndication-api-overview.html
"""
from typing import Union
from requests.models import Response
from .Base import Base
from .OAuth import OAuth
class SocialSyndication(Base):
"""
Class to wrap the Brightcove Social Syndication API calls. Inherits from Base.
Attributes:
-----------
base_url (str)
Base URL for API calls.
Methods:
--------
GetAllSyndications(self, account_id: str='') -> Response
Gets a list of all syndications currently configured for the account.
GetSyndication(self, syndication_id: str, account_id: str='') -> Response
Gets the configuration data for a syndication.
CreateSyndication(self, json_body: Union[str, dict], account_id: str='') -> Response
Creates a new syndication.
DeleteSyndication(self, syndication_id: str, account_id: str='') -> Response
Deletes a syndication.
UpdateSyndication(self, syndication_id: str, json_body: Union[str, dict], account_id: str='') -> Response
Updates the configuration data for a syndication.
GetTemplate(self, syndication_id: str, account_id: str='') -> Response
Gets a universal syndication's custom feed template.
UploadTemplate(self, syndication_id: str, json_body: Union[str, dict], account_id: str='') -> Response
Uploads a custom feed template to a universal syndication.
"""
# base URL for all API calls
base_url = 'https://social.api.brightcove.com/v1/accounts/{account_id}/mrss/syndications'
def __init__(self, oauth: OAuth) -> None:
"""
Args:
oauth (OAuth): OAuth instance to use for the API calls.
"""
super().__init__(oauth=oauth)
def GetAllSyndications(self, account_id: str='') -> Response:
"""
Gets a list of all syndications currently configured for the account.
Args:
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = (self.base_url).format(account_id=account_id or self.oauth.account_id)
return self.session.get(url, headers=self.oauth.headers)
def GetSyndication(self, syndication_id: str, account_id: str='') -> Response:
"""
Gets the configuration data for a syndication.
Args:
syndication_id (str): Syndication ID to get config for.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/{syndication_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url, headers=self.oauth.headers)
def CreateSyndication(self, json_body: Union[str, dict], account_id: str='') -> Response:
"""
Creates a new syndication.
Args:
json_body (Union[str, dict]): JSON body for the call.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = (self.base_url).format(account_id=account_id or self.oauth.account_id)
return self.session.post(url, headers=self.oauth.headers, data=self._json_to_string(json_body))
def DeleteSyndication(self, syndication_id: str, account_id: str='') -> Response:
"""
Deletes a syndication.
Args:
syndication_id (str): Syndication ID to delete.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/{syndication_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.delete(url, headers=self.oauth.headers)
def UpdateSyndication(self, syndication_id: str, json_body: Union[str, dict], account_id: str='') -> Response:
"""
Updates the configuration data for a syndication. A Syndication object specifying non-null values for
writable fields to be updated should be passed as the request body. Note that the type property cannot
be changed from the value specified when the syndication was created.
Args:
syndication_id (str): Syndication ID to update.
json_body (Union[str, dict]): JSON body for the call.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/{syndication_id}'.format(account_id=account_id or self.oauth.account_id)
return self.session.patch(url, headers=self.oauth.headers, data=self._json_to_string(json_body))
def GetTemplate(self, syndication_id: str, account_id: str='') -> Response:
"""
Gets a universal syndication's custom feed template.
Args:
syndication_id (str): Syndication ID to get the tempate for.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/{syndication_id}/template'.format(account_id=account_id or self.oauth.account_id)
return self.session.get(url, headers=self.oauth.headers)
def UploadTemplate(self, syndication_id: str, json_body: Union[str, dict], account_id: str='') -> Response:
"""
Uploads a custom feed template to a universal syndication.
Args:
syndication_id (str): Syndication ID to upload the template to.
json_body (Union[str, dict]): JSON body for the call.
account_id (str, optional): Video Cloud account ID. Defaults to ''.
Returns:
Response: API response as requests Response object.
"""
url = f'{self.base_url}/{syndication_id}/template'.format(account_id=account_id or self.oauth.account_id)
return self.session.put(url, headers=self.oauth.headers, data=self._json_to_string(json_body))
| [
37811,
198,
3546,
1154,
902,
29908,
1398,
290,
5050,
284,
670,
351,
17558,
66,
659,
338,
5483,
19148,
3299,
7824,
13,
198,
198,
6214,
25,
3740,
1378,
499,
271,
13,
11284,
13,
29199,
66,
659,
13,
785,
14,
14557,
12,
1837,
358,
3299,
... | 3.001576 | 1,903 |
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support import ui
from selenium.common.exceptions import NoSuchElementException
import os
import time
import random
options = Options()
options.add_argument("start-maximized")
options.add_argument("disable-infobars")
options.add_argument("--disable-extensions")
totalLikes = 0
hashtags = ["doodlesofinstagram", "sociallyawkward", "millenials", "comicoftheday", "inmyhead", "comicstrip", "microtales", "thoughts",
"stories", "wordporn",
"wordsmith", "musings"]
comments = ['Nice one',
'I love your profile',
'Your feed is an inspiration',
'Just incredible',
'This is something amazing',
'Love your posts',
'Looks awesome',
'Getting inspired by you',
'Yes!',
'You just get me.',
'so subtle yet...',
'Tips hat!',
'Just wanna say, this is beautiful',
'how do you get these Ideas?',
'In love with your feed',
'Dope.',
'Damn...',
'you nailed it.',
'respect.',
'this is lovely']
# def checkVisit(self):
# time.sleep(5)
# heart = self.driver.find_element_by_css_selector("[aria-label=Like]")
# if(heart.get_attribute("fill") == "#262626"):
# return True
# else:
# return False
if __name__ == "__main__":
xsum = 0
igbot = InstaBot('siddhantscookups', 'AlphaQuartz123@', xsum)
for tag in hashtags:
igbot.searchTag(tag)
for pic in range(0, 10):
igbot.choosePic()
| [
6738,
384,
11925,
1505,
1330,
3992,
26230,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
46659,
13,
25811,
1330,
18634,
201,
198,
6738,
384,
11925,
1505,
13,
12384,
26230,
13,
11284,
13,
9019,
1330,
5313,
32103,
21321,
201,
19... | 2.121901 | 968 |
"""
Workspace settings, mostly identical to user settings
"""
from dataclasses import dataclass
from ..api_child import ApiChild
from ..person_settings.call_intercept import CallInterceptApi
from ..person_settings.call_waiting import CallWaitingApi
from ..person_settings.caller_id import CallerIdApi
from ..person_settings.forwarding import PersonForwardingApi
from ..person_settings.monitoring import MonitoringApi
from ..person_settings.numbers import NumbersApi
from ..person_settings.permissions_in import IncomingPermissionsApi
from ..person_settings.permissions_out import OutgoingPermissionsApi
from ..rest import RestSession
__all__ = ['WorkspaceSettingsApi']
@dataclass(init=False)
class WorkspaceSettingsApi(ApiChild, base='workspaces'):
"""
API for all workspace settings.
Most of the workspace settings are equivalent to corresponding user settings. For these settings the attributes of
this class are instances of the respective user settings APIs. When calling endpoints of these APIs workspace IDs
need to be passed to the ``person_id`` parameter of the called function.
"""
call_intercept: CallInterceptApi
call_waiting: CallWaitingApi
caller_id: CallerIdApi
forwarding: PersonForwardingApi
monitoring: MonitoringApi
numbers: NumbersApi
permissions_in: IncomingPermissionsApi
permissions_out: OutgoingPermissionsApi
| [
37811,
198,
23044,
10223,
6460,
11,
4632,
10411,
284,
2836,
6460,
198,
198,
37811,
198,
6738,
4818,
330,
28958,
1330,
4818,
330,
31172,
198,
198,
6738,
11485,
15042,
62,
9410,
1330,
5949,
72,
16424,
198,
6738,
11485,
6259,
62,
33692,
13... | 3.531646 | 395 |
from subprocess import Popen
def load_jupyter_server_extension(nbapp):
"""serve the bokeh-app directory with bokeh server"""
Popen(["streamlit", "run", "st_runner.py", "apps", "--browser.serverAddress=0.0.0.0", "--server.enableCORS=False", "--browser.gatherUsageStats=False"])
| [
6738,
850,
14681,
1330,
8099,
268,
628,
198,
4299,
3440,
62,
73,
929,
88,
353,
62,
15388,
62,
2302,
3004,
7,
46803,
1324,
2599,
198,
220,
220,
220,
37227,
2655,
303,
262,
1489,
365,
71,
12,
1324,
8619,
351,
1489,
365,
71,
4382,
37... | 2.707547 | 106 |
#!/usr/bin/env python
import os
import logging
from sys import argv, stderr, exit
from os.path import isdir, exists, join, relpath
from os import makedirs, rename
from shutil import rmtree
from time import sleep
from kazoo.client import KazooClient, KazooState
logging.basicConfig()
node_id = '%s/%d' % (os.uname()[1], os.getpid())
print >>stderr, 'Connecting...'
zk = KazooClient(hosts=os.getenv('ZK_HOST', '127.0.0.1:2181'))
@zk.add_listener
service_ns = os.getenv('SERVICE_NAMESPACE', '/service')
service_id = os.getenv('SERVICE_ID')
instance_ns = os.getenv('INSTANCE_NAMESPACE', '/instance')
instance_id = os.getenv('INSTANCE_ID', node_id)
base_zk_path = '%s/%s' % (service_ns, service_id)
zk.start()
loop()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
28686,
198,
11748,
18931,
198,
6738,
25064,
1330,
1822,
85,
11,
336,
1082,
81,
11,
8420,
198,
6738,
28686,
13,
6978,
1330,
318,
15908,
11,
7160,
11,
4654,
11,
823,
6978,
... | 2.50173 | 289 |
import requests
import os
import json
# DOCS: https://api.imgflip.com/
dirpath = os.path.dirname(os.path.abspath(__file__))
config_dir = os.path.join(dirpath, "configs")
meme_conf_name = os.path.join(config_dir, "meme_conf.json")
os.makedirs(config_dir, exist_ok=True)
# Method: GET
template_url = "https://api.imgflip.com/get_memes"
# Method: POST
generate_url = "https://api.imgflip.com/caption_image"
def parse_message(message):
"""
!meme [meme name]; [(optional) text1]; [(optional) text2]
"""
args = []
template, top, bot = '', '', ''
try:
args = message.split('!meme')[1].split(';')
print(args)
cnt = len(args)
if cnt >= 1:
template = args[0].lstrip().split(' ')[0]
if cnt >= 1:
top = args[0].lstrip().split(' ')[1]
if cnt >= 2:
bot = args[1]
return {'template': template, 'top': top, 'bot': bot}
except Exception as e:
return False
get_conf() | [
11748,
7007,
198,
11748,
28686,
198,
11748,
33918,
198,
198,
2,
37760,
50,
25,
3740,
1378,
15042,
13,
9600,
2704,
541,
13,
785,
14,
198,
198,
15908,
6978,
796,
28686,
13,
6978,
13,
15908,
3672,
7,
418,
13,
6978,
13,
397,
2777,
776,
... | 2.146552 | 464 |
๏ปฟ#!/usr/bin/python
#!coding: utf-8
import tornado.httpserver
import tornado.ioloop
import tornado.options
from tornado.options import options
from application import Application
if __name__ == '__main__':
tornado.options.parse_command_line()
print "open server!"
http_server = tornado.httpserver.HTTPServer(Application())
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
| [
171,
119,
123,
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
0,
66,
7656,
25,
3384,
69,
12,
23,
628,
198,
11748,
33718,
13,
5450,
18497,
198,
11748,
33718,
13,
1669,
11224,
198,
11748,
33718,
13,
25811,
198,
6738,
33718,
13,
25811,
... | 3.058394 | 137 |
import torch
configurations = {
1: dict(
SEED = 1337, # random seed for reproduce results
DATA_ROOT = '/data2/yugehuang/data/', # the parent root where your train/val/test data are stored
RECORD_DIR = '/data2/yugehuang/data/refined_ms1m.txt', # the dataset record dir
MODEL_ROOT = './train_log/model', # the root to buffer your checkpoints
LOG_ROOT = './train_log/log', # the root to log your train/val status
BACKBONE_RESUME_ROOT = "",
HEAD_RESUME_ROOT = "",
BACKBONE_NAME = 'IR_101', # support: ['ResNet_50', 'ResNet_101', 'ResNet_152', 'IR_50', 'IR_101', 'IR_152', 'IR_SE_50', 'IR_SE_101', 'IR_SE_152']
HEAD_NAME = "CurricularFace", # support: ['ArcFace', 'CurricularFace']
LOSS_NAME = 'Softmax', # support: ['Focal', 'Softmax']
INPUT_SIZE = [112, 112], # support: [112, 112] and [224, 224]
RGB_MEAN = [0.5, 0.5, 0.5], # for normalize inputs to [-1, 1]
RGB_STD = [0.5, 0.5, 0.5],
EMBEDDING_SIZE = 512, # feature dimension
BATCH_SIZE = 512,
LR = 0.1, # initial LR
START_EPOCH = 0, #start epoch
NUM_EPOCH = 24, # total epoch number
WEIGHT_DECAY = 5e-4, # do not apply to batch_norm parameters
MOMENTUM = 0.9,
STAGES = [10, 18, 22], # ms1m epoch stages to decay learning rate
WORLD_SIZE = 1,
RANK = 0,
GPU = 0, # specify your GPU ids
DIST_BACKEND = 'nccl',
DIST_URL = 'tcp://localhost:23456',
NUM_WORKERS = 5,
TEST_GPU_ID = [0,1,2,3,4,5,6,7]
),
}
| [
11748,
28034,
198,
198,
11250,
20074,
796,
1391,
198,
220,
220,
220,
352,
25,
8633,
7,
198,
220,
220,
220,
220,
220,
220,
220,
7946,
1961,
796,
1511,
2718,
11,
1303,
4738,
9403,
329,
22919,
2482,
198,
220,
220,
220,
220,
220,
220,
... | 2.125 | 744 |
#
# @lc app=leetcode id=368 lang=python3
#
# [368] Largest Divisible Subset
#
# @lc code=start
# if __name__ == '__main__':
# a = Solution()
# b = a.largestDivisibleSubset([1,2,4,8,16])
# print(b)
# @lc code=end
| [
2,
198,
2,
2488,
44601,
598,
28,
293,
316,
8189,
4686,
28,
27412,
42392,
28,
29412,
18,
198,
2,
198,
2,
685,
27412,
60,
406,
853,
395,
4777,
12843,
3834,
2617,
198,
2,
198,
198,
2,
2488,
44601,
2438,
28,
9688,
198,
198,
2,
611,
... | 2.101852 | 108 |
from typing import Tuple
import numpy as np
from keras import Model
from keras.models import load_model
| [
6738,
19720,
1330,
309,
29291,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
41927,
292,
1330,
9104,
198,
6738,
41927,
292,
13,
27530,
1330,
3440,
62,
19849,
628
] | 3.655172 | 29 |
"""
aspen.resources.rendered_resource
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements rendered resources.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from aspen.resources.negotiated_resource import NegotiatedResource
from aspen.utils import typecheck
from aspen.resources.pagination import parse_specline
class RenderedResource(NegotiatedResource):
"""Model a limiting case of negotiated resources.
A negotiated resource has multiple content pages, one per media type, with
the media type of each explicitly specified in-line. A rendered resource
has one content page, and the media type is inferred from the file
extension. In both cases the rendering machinery is used to transform the
bytes in each page into output for the wire.
"""
min_pages = 1
max_pages = 4
def parse_into_pages(self, raw):
"""Extend to insert page one if needed.
"""
pages = NegotiatedResource.parse_into_pages(self, raw)
self._prepend_empty_pages(pages, 3)
return pages
def _parse_specline(self, specline):
"""Override to simplify.
Rendered resources have a simpler specline than negotiated resources.
They don't have a media type, and the renderer is optional.
"""
#parse into parts.
parts = parse_specline(specline)
#Assign parts, discard media type
renderer = parts[1]
media_type = self.media_type
if not renderer:
renderer = self.website.default_renderers_by_media_type[media_type]
#Hydrate and validate renderer
make_renderer = self._get_renderer_factory(media_type, renderer)
return (make_renderer, media_type)
| [
37811,
198,
292,
3617,
13,
37540,
13,
26238,
62,
31092,
198,
27156,
27156,
93,
198,
198,
3546,
1154,
902,
15111,
4133,
13,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
... | 2.956026 | 614 |
import os
import sys
from types import SimpleNamespace
if sys.version_info >= (3, 10, 0) or sys.version_info < (3, 8, 0):
raise OSError(f'bert2tf requires Python 3.8/3.9, but yours is {sys.version}')
__proto_version__ = '0.0.22'
BERT2TF_GLOBAL = SimpleNamespace()
BERT2TF_GLOBAL.imported = SimpleNamespace()
BERT2TF_GLOBAL.imported.executors = False
BERT2TF_GLOBAL.imported.drivers = False
__bert2tf_env__ = (
'BERT2TF_LOG_LONG',
'BERT2TF_LOG_VERBOSITY',
'BERT2TF_DEFAULT_HOST',
'BERT2TF_PEA_NAME'
)
__default_host__ = os.environ.get('BERT2TF_DEFAULT_HOST', '0.0.0.0')
__stop_msg__ = 'terminated'
__ready_msg__ = 'ready for listening...'
def import_classes(namespace: str, targets=None, show_import_table: bool = False, import_once: bool = False):
"""
Import all or selected executors into the runtime. This is called when bert2tf is first imported for registering the YAML
constructor beforehand.
:param namespace: the namespace to import
:param targets: the list of executor names to import
:param show_import_table: show the import result as a table
:param import_once: import everything only once, to avoid repeated import
"""
import os, sys, re
from .logging import default_logger
if namespace == 'bert2tf.executors':
import_type = 'ExecutorType'
if import_once and BERT2TF_GLOBAL.imported.executors:
return
elif namespace == 'bert2tf.drivers':
import_type = 'DriverType'
if import_once and BERT2TF_GLOBAL.imported.drivers:
return
else:
raise TypeError(f'namespace: {namespace} is unrecognized')
from setuptools import find_packages
import pkgutil
from pkgutil import iter_modules
path = os.path.dirname(pkgutil.get_loader(namespace).path)
modules = set()
for info in iter_modules([path]):
if not info.ispkg:
modules.add('.'.join([namespace, info.name]))
for pkg in find_packages(path):
modules.add('.'.join([namespace, pkg]))
pkgpath = path + '/' + pkg.replace('.', '/')
if sys.version_info.major == 2 or (sys.version_info.major == 3 and sys.version_info.minor < 6):
for _, name, ispkg in iter_modules([pkgpath]):
if not ispkg:
modules.add('.'.join([namespace, pkg, name]))
else:
for info in iter_modules([pkgpath]):
if not info.ispkg:
modules.add('.'.join([namespace, pkg, info.name]))
# filter
ignored_module_pattern = r'\.tests|\.api|\.bump_version'
modules = {m for m in modules if not re.findall(ignored_module_pattern, m)}
from collections import defaultdict
load_stat = defaultdict(list)
bad_imports = []
if isinstance(targets, str):
targets = {targets}
elif isinstance(targets, list):
targets = set(targets)
elif targets is None:
targets = {}
else:
raise TypeError(f'target must be a set, but received {targets!r}')
depend_tree = {}
import importlib
from .helper import colored
for m in modules:
try:
mod = importlib.import_module(m)
for k in dir(mod):
# import the class
if (getattr(mod, k).__class__.__name__ == import_type) and (not targets or k in targets):
try:
_c = getattr(mod, k)
load_stat[m].append(
(k, True, colored('โธ', 'green').join(f'{vvv.__name__}' for vvv in _c.mro()[:-1][::-1])))
d = depend_tree
for vvv in _c.mro()[:-1][::-1]:
if vvv.__name__ not in d:
d[vvv.__name__] = {}
d = d[vvv.__name__]
d['module'] = m
if k in targets:
targets.remove(k)
if not targets:
return # target execs are all found and loaded, return
except Exception as ex:
load_stat[m].append((k, False, ex))
bad_imports.append('.'.join([m, k]))
if k in targets:
raise ex # target class is found but not loaded, raise return
except Exception as ex:
load_stat[m].append(('', False, ex))
bad_imports.append(m)
if targets:
raise ImportError(f'{targets} can not be found in bert2tf')
if show_import_table:
from .helper import print_load_table
print_load_table(load_stat)
else:
if bad_imports:
default_logger.warning(f'due to the missing dependencies or bad implementations, '
f'{bad_imports} can not be imported ')
if namespace == 'bert2tf.executors':
BERT2TF_GLOBAL.imported.executors = True
elif namespace == 'bert2tf.drivers':
BERT2TF_GLOBAL.imported.drivers = True
return depend_tree
import_classes('bert2tf.drivers', show_import_table=False, import_once=True)
import_classes('bert2tf.executors', show_import_table=False, import_once=True)
# add frequently-used imported
from .executors import BaseExecutor as Executor
from .executors.models import *
from .executors.models.bert import Bert, BertPreTraining
from .executors.models.roberta import Roberta, RobertaPreTraining
from .executors.models.electra import ElectraDiscriminator
from .executors.models.gpt import GPT, GPTPreTraining, GPTAutoRegressive
from .executors.models.gpt.chinese import GPTChinese, GPTChinesePretraining, GPTChineseAutoRegressive
from .executors.models.plm_bert import PlmBert
from .executors.models.soft_masked_bert import SoftMaskedBert
from .executors.models.transformer import Transformer
from .executors.models.deberta import DeBerta
from .executors.tokenizers.bert import BertTokenizer
from .executors.tokenizers.gpt import GPTTokenizer
from .executors.models.configs import BertConfig, ElectraConfig, GPTConfig, HuaweiGPTConfig, SoftMaskedBertConfig
from .flow import Flow
from .executors.models.optimizers import *
| [
11748,
28686,
198,
11748,
25064,
198,
6738,
3858,
1330,
17427,
36690,
10223,
198,
198,
361,
25064,
13,
9641,
62,
10951,
18189,
357,
18,
11,
838,
11,
657,
8,
393,
25064,
13,
9641,
62,
10951,
1279,
357,
18,
11,
807,
11,
657,
2599,
198... | 2.235484 | 2,790 |
#!/usr/bin/env python
# coding: utf-8
# In[57]:
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
get_ipython().run_line_magic('matplotlib', 'qt')
# In[58]:
# In[59]:
# In[60]:
# In[61]:
# In[62]:
# In[63]:
# In[64]:
# In[65]:
# In[66]:
# In[67]:
# In[68]:
# Import everything needed to edit/save/watch video clips
from moviepy.editor import VideoFileClip
from IPython.display import HTML
ret,mtx,dist=camera_calibration()
# In[69]:
output = 'output_videos/P2_Output.mp4'
#clip1 = VideoFileClip("project_video.mp4").subclip(38,43)
clip1 = VideoFileClip("project_video.mp4")
video_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
get_ipython().run_line_magic('time', 'video_clip.write_videofile(output, audio=False)')
video_clip.reader.close()
video_clip.audio.reader.close_proc()
# In[ ]:
output = 'output_videos/P2_Output_challenge_video.mp4'
##clip1 = VideoFileClip("project_video.mp4").subclip(0,1)
clip2 = VideoFileClip("challenge_video.mp4")
video_clip = clip2.fl_image(process_image) #NOTE: this function expects color images!!
get_ipython().run_line_magic('time', 'video_clip.write_videofile(output, audio=False)')
video_clip.reader.close()
video_clip.audio.reader.close_proc()
# In[ ]:
output = 'output_videos/P2_Output_harder_challenge_video.mp4'
##clip1 = VideoFileClip("project_video.mp4").subclip(0,1)
clip3 = VideoFileClip("harder_challenge_video.mp4")
video_clip = clip3.fl_image(process_image) #NOTE: this function expects color images!!
get_ipython().run_line_magic('time', 'video_clip.write_videofile(output, audio=False)')
video_clip.reader.close()
video_clip.audio.reader.close_proc()
# In[ ]:
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
2,
554,
58,
3553,
5974,
628,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
269,
85,
17,
198,
11748,
15095,
198,
11748,
2603,
29487,
8019,
... | 2.638681 | 667 |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Definition of error code and relative messages in debugger module."""
from mindinsight.utils.exceptions import MindInsightException
from mindinsight.debugger.common.exceptions.error_code import DebuggerErrors, DebuggerErrorMsg
class DebuggerParamTypeError(MindInsightException):
"""The parameter type error in debugger module."""
class DebuggerParamValueError(MindInsightException):
"""The parameter value error in debugger module."""
class DebuggerCreateWatchPointError(MindInsightException):
"""The error about creating watch point."""
class DebuggerUpdateWatchPointError(MindInsightException):
"""The error about updating watch point."""
class DebuggerDeleteWatchPointError(MindInsightException):
"""The error about deleting watch point."""
class DebuggerRecheckError(MindInsightException):
"""The error about deleting watch point."""
class DebuggerCompareTensorError(MindInsightException):
"""The error about comparing tensors."""
class DebuggerContinueError(MindInsightException):
"""The error about continuing debugging."""
class DebuggerPauseError(MindInsightException):
"""The error about pausing debugging."""
class DebuggerNodeNotInGraphError(MindInsightException):
"""The node is not in the graph."""
class DebuggerGraphNotExistError(MindInsightException):
"""The graph does not exist."""
class DebuggerStepNumError(MindInsightException):
"""The graph does not exist."""
class DebuggerTensorGraphError(MindInsightException):
"""The error about comparing tensors."""
class DebuggerTensorHitError(MindInsightException):
"""The error about comparing tensors."""
class DebuggerSetRecommendWatchpointsError(MindInsightException):
"""The set recommend watchpoints error in debugger module."""
class DebuggerConditionUnavailableError(MindInsightException):
"""The condition unavailable error in debugger module."""
class DebuggerServerRunningError(MindInsightException):
"""The server running error in debugger module."""
class DeviceIdUnregistered(MindInsightException):
"""The error of that the device id is unregister."""
class DebuggerModuleNotFoundError(MindInsightException):
"""The error of that the module is not found."""
class DebuggerSessionNumOverBoundError(MindInsightException):
"""The error of that the session number is out of bound."""
class DebuggerSessionNotFoundError(MindInsightException):
"""The error of that the session is not found."""
class DebuggerOnlineSessionUnavailable(MindInsightException):
"""The error of that the online session is unavailable."""
class DebuggerDownloadOverQueue(MindInsightException):
"""The error of that the download queue is oversize."""
class DebuggerDownloadTensorNotExist(MindInsightException):
"""The error of that the Tensor is not exist."""
class RankDirNotFound(MindInsightException):
"""The error of that the dumped rank directory is not found."""
class DebuggerJsonFileParseError(MindInsightException):
"""The error of that failed to parse the debugger json files."""
class DebuggerHistoryNotFoundError(MindInsightException):
"""The graph history and graph file doesn't match."""
class DebuggerHistoryValueError(MindInsightException):
"""The dumped step id is not belong to relative graph history."""
class DebuggerToolkitNotFoundError(MindInsightException):
"""The error of that the module is not found."""
| [
2,
15069,
12131,
12,
1238,
2481,
43208,
21852,
1766,
1539,
12052,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262... | 3.85514 | 1,070 |
from django.shortcuts import render
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
198
] | 4 | 9 |
###############################################################################
# Copyright 2015 University of Florida. All rights reserved.
# This file is part of the BlueButton.py project.
# Use of this source code is governed by the license found in the LICENSE file.
###############################################################################
"""
Parser for the CCDA procedures section
"""
from ...core import wrappers
from ... import documents
| [
29113,
29113,
7804,
4242,
21017,
198,
2,
15069,
1853,
2059,
286,
4744,
13,
1439,
2489,
10395,
13,
198,
2,
770,
2393,
318,
636,
286,
262,
4518,
21864,
13,
9078,
1628,
13,
198,
2,
5765,
286,
428,
2723,
2438,
318,
21825,
416,
262,
5964... | 5.493976 | 83 |
# encoding: utf-8
import pytest
from wellcomeml.ml.bert_vectorizer import BertVectorizer
EMBEDDING_TYPES = [
"mean_second_to_last",
"mean_last",
"sum_last",
"mean_last_four",
"pooler"
]
@pytest.fixture
@pytest.mark.bert
@pytest.mark.bert
@pytest.mark.bert
@pytest.mark.bert
@pytest.mark.skip("Reason: Build killed or stalls. Issue #200")
| [
2,
21004,
25,
3384,
69,
12,
23,
198,
11748,
12972,
9288,
198,
198,
6738,
880,
785,
368,
75,
13,
4029,
13,
4835,
62,
31364,
7509,
1330,
22108,
38469,
7509,
198,
198,
3620,
33,
1961,
35,
2751,
62,
9936,
47,
1546,
796,
685,
198,
220,... | 2.325 | 160 |
#!/usr/bin/env python
"""
Copyright (c) 2006-2013 sqlmap developers (http://sqlmap.org/)
See the file 'doc/COPYING' for copying permission
"""
import httplib
import re
import urlparse
import time
from lib.core.common import clearConsoleLine
from lib.core.common import dataToStdout
from lib.core.common import findPageForms
from lib.core.common import singleTimeWarnMessage
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.exception import SqlmapConnectionException
from lib.core.settings import CRAWL_EXCLUDE_EXTENSIONS
from lib.core.threads import getCurrentThreadData
from lib.core.threads import runThreads
from lib.request.connect import Connect as Request
from thirdparty.beautifulsoup.beautifulsoup import BeautifulSoup
from thirdparty.oset.pyoset import oset
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
15269,
357,
66,
8,
4793,
12,
6390,
44161,
8899,
6505,
357,
4023,
1378,
25410,
8899,
13,
2398,
34729,
198,
6214,
262,
2393,
705,
15390,
14,
34,
3185,
45761,
6,
329,
2... | 3.421488 | 242 |
#!/usr/bin/env python3
import os
import sys
if len(sys.argv) != 3:
print(' usage: script {template-tmp-file} {body-file}')
exit(1)
temp = sys.argv[1]
body = sys.argv[2]
if not os.path.isfile(temp) or not os.path.isfile(body):
print('Error: file does not exist')
exit(1)
f_temp = ''
f_body = ''
with open(temp, 'r') as f:
f_temp = f.read()
with open(body, 'r') as f:
f_body = f.read()
with open(body, 'w') as f:
f.write(f_temp.replace('<#_BODY_#>', f_body))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
11748,
28686,
198,
11748,
25064,
198,
198,
361,
18896,
7,
17597,
13,
853,
85,
8,
14512,
513,
25,
198,
220,
220,
220,
3601,
10786,
220,
8748,
25,
4226,
1391,
28243,
12,
22065,
12,... | 2.173333 | 225 |
from sklearn import metrics
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from common.services import print_service
from plots.distribution import plot_confusion_matrix
| [
6738,
1341,
35720,
1330,
20731,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
198,
6738,
1341,
35720,
13,
4164,
10466,
1330,
17923,
62,
13116,
198,
6738,
1341,
35720,
13,
19849,
62,
... | 3.829787 | 94 |
"""
Commandline interface.
"""
import os
import sys
from pyuploadtool import (
ReleaseMetadata,
ReleasesHostingProviderFactory,
update_metadata_with_user_specified_data,
BuildSystemFactory,
)
from pyuploadtool.logging import make_logger, setup_logging
setup_logging()
logger = make_logger("cli")
# TODO: use some real CLI library
artifacts = sys.argv[1:]
if not artifacts:
logger.error(f"Usage: {sys.argv[0]} <file> [<another file>...]")
sys.exit(1)
for artifact in artifacts:
if not os.path.exists(artifact):
raise FileNotFoundError(artifact)
logger.info("collecting release metadata")
metadata = get_metadata()
logger.info("updating metadata with user-specified values (if any)")
update_metadata_with_user_specified_data(metadata)
logger.info("build metadata: %s", metadata)
providers = get_release_hosting_providers()
if not providers:
# there's no point in considering "no providers found" a success
logger.error("could not detect any release hosting providers")
sys.exit(1)
logger.info("available release hosting providers: %s", ", ".join((p.name for p in providers)))
for provider in providers:
logger.info("creating release on hosting provider %s", provider.name)
provider.create_release(metadata, artifacts)
logger.info("done!")
| [
37811,
198,
21575,
1370,
7071,
13,
198,
37811,
198,
198,
11748,
28686,
198,
11748,
25064,
628,
198,
6738,
12972,
25850,
25981,
1330,
357,
198,
220,
220,
220,
13868,
9171,
14706,
11,
198,
220,
220,
220,
48691,
17932,
278,
29495,
22810,
1... | 3.08216 | 426 |
"""Miscellaneous utilities for working within an App Engine environment."""
| [
37811,
31281,
25673,
20081,
329,
1762,
1626,
281,
2034,
7117,
2858,
526,
15931,
198
] | 5.428571 | 14 |
# Generated by Django 2.0.13 on 2019-05-30 09:52
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
15,
13,
1485,
319,
13130,
12,
2713,
12,
1270,
7769,
25,
4309,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 2.875 | 32 |
import os, logging, json
from datadog import api, initialize
from datadog_api_client.v2 import ApiClient, ApiException, Configuration
from datadog_api_client.v2.api import logs_metrics_api
options = {
'api_key': os.getenv("DD_API_KEY"),
'app_key': os.getenv("DD_APP_KEY")
}
initialize(**options)
configuration = Configuration()
def dd_resource_remove_keys(dd_resource:dict, dd_resource_type)->dict:
"""
Remove unnacessary keys form Datadog resource definition dict.
Args:
. dd_resource: Datadog resource definitoin dictionary.
. dd_resource_type: Datadog resource type (monitor,dashboard,synthetics)
"""
if dd_resource_type == "monitor":
# Remove keys list.
rem_list = [ 'restricted_roles',
'deleted',
'multi',
'created',
'created_at',
'creator',
'org_id',
'modified',
'overall_state_modified',
'overall_state',
'matching_downtimes'
]
elif dd_resource_type == "dashboard":
rem_list= [
'modified_at',
'created_at',
'author_handle',
'author_name'
]
elif dd_resource_type == "synthetics":
rem_list = [
'created_at',
'modified_at'
]
#remove unneeded keys
[dd_resource.pop(key) for key in rem_list]
return dd_resource
def fetch_dd_resources(dd_resource_type):
"""
Fetch all Datadog resources of specific type.
Args:
. dd_resource_type: Datadog resource type(dashboard,monitor,synthetics)
"""
if dd_resource_type == "monitor":
return [
dd_resource_remove_keys(monitor, dd_resource_type)
for monitor in
api.Monitor.get_all()
]
elif dd_resource_type == "dashboard":
return [
dd_resource_remove_keys(api.Dashboard.get(dashboard["id"]), dd_resource_type)
for dashboard in
api.Dashboard.get_all()["dashboards"]
]
elif dd_resource_type == "synthetics":
return [
dd_resource_remove_keys(synthetics_test, dd_resource_type)
for synthetics_test in
api.Synthetics.get_all_tests()["tests"]
]
elif dd_resource_type == "logmetrics":
return fetch_dd_resources_api_client(dd_resource_type)
else:
return None
def fetch_dd_resource(dd_resource_id, dd_resource_type):
"""
Fetch Datadog resource.
Args:
. dd_resource_id: Datadog resource ID number.
. dd_resource_type: Datadog resource type(monitor,dashboard,synthetics)
"""
if dd_resource_type == "monitor":
return dd_resource_remove_keys(api.Monitor.get(dd_resource_id), dd_resource_type)
elif dd_resource_type == "dashboard":
return dd_resource_remove_keys(api.Dashboard.get(dd_resource_id), dd_resource_type)
elif dd_resource_type == "synthetics":
return None
elif dd_resource_type == "logmetrics":
return fetch_dd_resource_api_client(dd_resource_id,dd_resource_type)
else:
return None
def fetch_dd_resource_def(dd_resource_id, dd_resources, dd_resource_type)->dict:
"""
Fetch Datadog resource definition from list of resources.
Args:
. dd_resource_id: Datadog resource ID number.
. dd_resources: Datadog resources definitoin list.
. dd_resource_type: Datadog resource type(dashboard,monitor,synthetics)
"""
if dd_resource_type in ["synthetics"]:
id_key = "public_id"
else:
id_key = "id"
try:
return [dd_resource for dd_resource in dd_resources if dd_resource[id_key] == dd_resource_id][0]
except IndexError:
logging.exception(f'====> Datadog resource ID {dd_resource_id} not exist <====')
def fetch_dd_resource_ids(input_args, dd_resource_type):
"""
Fetch Datadog resource IDs according to the input arguments(JSON filename or list of IDs ),
and return it as a list in dictionary.
Args:
. input_args: JSON filename ot list of IDs.
. dd_resource_type: Datadog resource type.
"""
if input_args.endswith(".json"):
"""
If input is a json file, we will check if file exist and read it.
"""
if not os.path.isfile(input_args):
raise FileNotFoundError(f'Input file "{input_args}" not found!!!')
with open(input_args, "r") as f:
dd_resource_ids = json.load(f)
logging.debug(f'Datadog resources IDs => \n {dd_resource_ids}')
return dd_resource_ids
else:
"""
If input is a list of ID numbers, we will extract ID numbers into dict
and return it.
"""
dd_resource_ids = {}
ids = input_args.split(',')
if dd_resource_type in ["monitor"]:
if not all(id.isnumeric() for id in ids):
raise ValueError(f'Invalid Ids number input.')
dd_resource_ids[dd_resource_type] = [int(id) for id in input_args.split(',')]
else:
dd_resource_ids[dd_resource_type] = input_args.split(',')
logging.debug(f'Datadog resources IDs => \n {dd_resource_ids}')
return dd_resource_ids
# def object_to_dict(input_object:dict)->dict:
# """
# Convert object to dict.
# """
# dictionary = {}
# print(input_object)
# for key in input_object.keys():
# print(key, type(input_object[key]).__name__)
# if type(input_object[key]).__name__ not in ['list', 'dict', 'str', 'int', 'float', 'bool', 'NoneType', 'set', 'tuple']:
# # print(input_object[key].__dict__)
# dictionary[key] = object_to_dict(input_object[key].__dict__)
# elif isinstance(input_object[key],dict):
# print(input_object[key], "IN DICT FUNC.....")
# # for k in input_object[key].keys():
# # dictionary[key] = object_to_dict(input_object[k])
# # print(vars(input_object[key]))
# # return dict((key, todict(val)) for key, val in obj.items())
# dictionary[key] = dict((k,v) for k, v in input_object[key].items())
# elif type(input_object[key]).__name__ in ['NoneType']:
# continue
# else:
# dictionary[key] = input_object[key]
# # return input_object[key]
# # continue
# return dictionary
| [
11748,
28686,
11,
18931,
11,
33918,
198,
6738,
4818,
324,
519,
1330,
40391,
11,
41216,
198,
6738,
4818,
324,
519,
62,
15042,
62,
16366,
13,
85,
17,
1330,
5949,
72,
11792,
11,
5949,
72,
16922,
11,
28373,
198,
6738,
4818,
324,
519,
62... | 2.199595 | 2,966 |
from string import ascii_lowercase
import hangman.game
import hangman
WIN_LOG = """
Guess a letter:
a
Missed, mistake 1 out of 5.
The word: *****
Guess a letter:
b
Missed, mistake 2 out of 5.
The word: *****
Guess a letter:
e
Hit!
The word: *e***
Guess a letter:
o
Hit!
The word: *e**o
Guess a letter:
l
Hit!
The word: *ello
Guess a letter:
h
Hit!
The word: hello
You won!
"""
LOSE_LOG = """
Guess a letter:
x
Missed, mistake 1 out of 5.
The word: ******
Guess a letter:
y
Missed, mistake 2 out of 5.
The word: ******
Guess a letter:
z
Missed, mistake 3 out of 5.
The word: ******
Guess a letter:
n
Hit!
The word: **n*n*
Guess a letter:
m
Missed, mistake 4 out of 5.
The word: **n*n*
Guess a letter:
o
Missed, mistake 5 out of 5.
The word: **n*n*
You lost!
"""
| [
6738,
4731,
1330,
355,
979,
72,
62,
21037,
7442,
198,
11748,
8181,
805,
13,
6057,
198,
11748,
8181,
805,
628,
198,
198,
37620,
62,
25294,
796,
37227,
198,
8205,
408,
257,
3850,
25,
198,
64,
198,
17140,
276,
11,
7457,
352,
503,
286,
... | 2.350148 | 337 |
#!/usr/bin/env python2
"""
brain.py
Zhiang Chen
4/18/2018
"""
import argparse
import torch
import torch.nn as nn
from torch.optim import Adam
from torch.autograd import Variable
import torch.nn.functional as F
from relationNet import relationNet
from rl.ddpg import DDPG
from rl.naf import Policy
from rl.normalized_actions import NormalizedActions
from rl.ounoise import OUNoise
from rl.replay_memory import ReplayMemory, Transition
"""GLOBAL ARGUMENT"""
MSELoss = nn.MSELoss()
""" PARAMETERS TO TUNE - all from relationNet.py """
# FC2LayersShortcut architecture
# ResNet architecture
# activation function of MLPs output
# input of relationNet
# input of NAF (relation effect, state, goal), dim = 10 + 5 + 2
n_in = 3
n_hidden = 16
n_out = 32
fc_param = (n_in, n_hidden, n_out)
hidden_size = 128
num_inputs = 18
action_space = 2
rl_param = (hidden_size, num_inputs, action_space)
""" Parameters for testing """
parser = argparse.ArgumentParser(description='RelationNet Hyperparameters')
parser.add_argument('--gamma', type=float, default=0.99, metavar='G',
help='discount factor for reward (default: 0.99)')
parser.add_argument('--tau', type=float, default=0.001, metavar='G',
help='discount factor for model (default: 0.001)')
args = parser.parse_args()
if __name__ == '__main__':
torch.manual_seed(0)
agent = RLAgent(args.gamma, args.tau, fc_param, rl_param)
e = torch.randn(4,n_in)
s = torch.randn(1,5)
g = torch.randn(1,2)
print "select action: "
print agent.select_action(e, s, g)
data = batch()
agent.update_parameters(data)
agent.save_model()
agent2 = RLAgent(args.gamma, args.tau, fc_param, rl_param)
agent2.load_model()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
17,
198,
198,
37811,
198,
27825,
13,
9078,
198,
57,
5303,
648,
12555,
198,
19,
14,
1507,
14,
7908,
198,
37811,
198,
11748,
1822,
29572,
198,
11748,
28034,
198,
11748,
28034,
13,
20471,
355,... | 2.606607 | 666 |
values = [[10, 20], [30, 40, 50, 60, 70]]
for i in range(len(values)):
print(len(values[i]), values[i]) | [
27160,
796,
16410,
940,
11,
1160,
4357,
685,
1270,
11,
2319,
11,
2026,
11,
3126,
11,
4317,
11907,
198,
198,
1640,
1312,
287,
2837,
7,
11925,
7,
27160,
8,
2599,
198,
220,
220,
220,
3601,
7,
11925,
7,
27160,
58,
72,
46570,
3815,
58,... | 2.347826 | 46 |
import pytest
import piicatcher
from piicatcher.api import ScanTypeEnum, list_detectors, scan_database
@pytest.mark.skip
| [
11748,
12972,
9288,
198,
198,
11748,
31028,
291,
34734,
198,
6738,
31028,
291,
34734,
13,
15042,
1330,
20937,
6030,
4834,
388,
11,
1351,
62,
15255,
478,
669,
11,
9367,
62,
48806,
628,
628,
198,
31,
9078,
9288,
13,
4102,
13,
48267,
198... | 3 | 42 |
# coding: utf-8
#
# Copyright 2017 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for core.domain.acl_decorators."""
from core.controllers import acl_decorators
from core.controllers import base
from core.domain import question_services
from core.domain import rights_manager
from core.domain import skill_services
from core.domain import suggestion_services
from core.domain import topic_domain
from core.domain import topic_services
from core.domain import user_services
from core.tests import test_utils
import feconf
import webapp2
import webtest
class PlayExplorationDecoratorTests(test_utils.GenericTestBase):
"""Tests for play exploration decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
class PlayCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for play collection decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class EditCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_edit_collection decorator."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class CreateExplorationDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_create_exploration decorator."""
username = 'banneduser'
user_email = 'user@example.com'
class CreateCollectionDecoratorTests(test_utils.GenericTestBase):
"""Tests for can_create_collection decorator."""
username = 'collectioneditor'
user_email = 'user@example.com'
class AccessCreatorDashboardTests(test_utils.GenericTestBase):
"""Tests for can_access_creator_dashboard decorator."""
username = 'banneduser'
user_email = 'user@example.com'
class CommentOnFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_comment_on_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class CreateFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_create_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class ViewFeedbackThreadTests(test_utils.GenericTestBase):
"""Tests for can_view_feedback_thread decorator."""
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class ManageEmailDashboardTests(test_utils.GenericTestBase):
"""Tests for can_manage_email_dashboard decorator."""
query_id = 'query_id'
class RateExplorationTests(test_utils.GenericTestBase):
"""Tests for can_rate_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
exp_id = 'exp_id'
class FlagExplorationTests(test_utils.GenericTestBase):
"""Tests for can_flag_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
exp_id = 'exp_id'
class SubscriptionToUsersTests(test_utils.GenericTestBase):
"""Tests for can_subscribe_to_users decorator."""
username = 'user'
user_email = 'user@example.com'
class TranslateExplorationTests(test_utils.GenericTestBase):
"""Tests for can_translate_exploration decorator."""
role = rights_manager.ROLE_TRANSLATOR
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banneduser@example.com'
published_exp_id_1 = 'exp_1'
published_exp_id_2 = 'exp_2'
private_exp_id_1 = 'exp_3'
private_exp_id_2 = 'exp_4'
class EditExplorationTests(test_utils.GenericTestBase):
"""Tests for can_edit_exploration decorator."""
username = 'banneduser'
user_email = 'user@example.com'
published_exp_id = 'exp_0'
private_exp_id = 'exp_1'
class ManageOwnProfileTests(test_utils.GenericTestBase):
"""Tests for decorator can_manage_own_profile."""
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
username = 'user'
user_email = 'user@example.com'
class DeleteExplorationTests(test_utils.GenericTestBase):
"""Tests for can_delete_exploration decorator."""
private_exp_id = 'exp_0'
published_exp_id = 'exp_1'
class SuggestChangesToExplorationTests(test_utils.GenericTestBase):
"""Tests for can_suggest_changes_to_exploration decorator."""
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banned@example.com'
exploration_id = 'exp_id'
class SuggestChangesDecoratorsTests(test_utils.GenericTestBase):
"""Tests for can_suggest_changes decorator."""
username = 'user'
user_email = 'user@example.com'
banned_username = 'banneduser'
banned_user_email = 'banned@example.com'
exploration_id = 'exp_id'
class ResubmitSuggestionDecoratorsTests(test_utils.GenericTestBase):
"""Tests for can_resubmit_suggestion decorator."""
owner_username = 'owner'
owner_email = 'owner@example.com'
author_username = 'author'
author_email = 'author@example.com'
username = 'user'
user_email = 'user@example.com'
TARGET_TYPE = 'exploration'
SUGGESTION_TYPE = 'edit_exploration_state_content'
exploration_id = 'exp_id'
target_version_id = 1
change_dict = {
'cmd': 'edit_state_property',
'property_name': 'content',
'state_name': 'Introduction',
'new_value': ''
}
class PublishExplorationTests(test_utils.GenericTestBase):
"""Tests for can_publish_exploration decorator."""
private_exp_id = 'exp_0'
public_exp_id = 'exp_1'
class ModifyExplorationRolesTests(test_utils.GenericTestBase):
"""Tests for can_modify_exploration_roles decorator."""
private_exp_id = 'exp_0'
class CollectionPublishStatusTests(test_utils.GenericTestBase):
"""Tests can_publish_collection and can_unpublish_collection decorators."""
user_email = 'user@example.com'
username = 'user'
published_exp_id = 'exp_id_1'
private_exp_id = 'exp_id_2'
published_col_id = 'col_id_1'
private_col_id = 'col_id_2'
class AccessLearnerDashboardDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_access_learner_dashboard."""
user = 'user'
user_email = 'user@example.com'
banned_user = 'banneduser'
banned_user_email = 'banned@example.com'
class EditTopicDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_edit_topic."""
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
topic_id = 'topic_1'
class EditSkillDecoratorTests(test_utils.GenericTestBase):
"""Tests permissions for accessing the skill editor."""
second_admin_username = 'adm2'
second_admin_email = 'adm2@example.com'
manager_username = 'topicmanager'
manager_email = 'topicmanager@example.com'
viewer_username = 'viewer'
viewer_email = 'viewer@example.com'
skill_id = '1'
class EditQuestionDecoratorTests(test_utils.GenericTestBase):
"""Tests the decorator can_edit_question."""
question_id = 'question_id'
| [
2,
19617,
25,
3384,
69,
12,
23,
198,
2,
198,
2,
15069,
2177,
383,
9385,
544,
46665,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743... | 2.75229 | 2,838 |
def gcd(a: int, b: int) -> int:
'''
Greates common divisor of two integers(Euclidean algorithm)
Parameters
----------
a, b : int
The algorithm will use abslute values
Example: gcd(-3, 9) == gcd(3, 9)
'''
a, b = abs(a), abs(b)
while b != 0:
b, a = a % b, b
return a
if __name__ == '__main__':
print('GCD({}, {}) = {}'.format(9, 12, gcd(9, 12)))
print('GCD({}, {}) = {}'.format(-3, 32, gcd(-3, 32)))
print('GCD({}, {}) = {}'.format(14, 21, gcd(14, 21)))
print('GCD({}, {}) = {}'.format(3, 0, gcd(3, 0)))
print('GCD({}, {}) = {}'.format(0, 0, gcd(0, 0)))
| [
4299,
308,
10210,
7,
64,
25,
493,
11,
275,
25,
493,
8,
4613,
493,
25,
198,
220,
220,
220,
705,
7061,
198,
220,
220,
220,
3878,
274,
2219,
2659,
271,
273,
286,
734,
37014,
7,
36,
36616,
485,
272,
11862,
8,
628,
220,
220,
220,
4... | 2.058252 | 309 |
# 23. Merge k Sorted Lists
# ttungl@gmail.com
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
| [
2,
2242,
13,
39407,
479,
311,
9741,
44968,
198,
2,
256,
83,
2150,
75,
31,
14816,
13,
785,
198,
1303,
30396,
329,
1702,
306,
12,
25614,
1351,
13,
198,
2,
1398,
7343,
19667,
7,
15252,
2599,
198,
2,
220,
220,
220,
220,
825,
11593,
... | 2.184783 | 92 |
"""367. Valid Perfect Square"""
| [
37811,
27824,
13,
48951,
16374,
9276,
37811,
198
] | 4 | 8 |
import os
from raven.contrib.tornado import AsyncSentryClient
from tornado.httpserver import HTTPServer
from tornado.options import parse_command_line
from tornado import web
from tornado.ioloop import IOLoop
from urllib.parse import urlparse
IOLoop.configure('tornado.platform.asyncio.AsyncIOLoop')
import momoko
from .application import handlers as send_handlers
from .base.base_handlers import PingHandler
ioloop = IOLoop.instance()
application = web.Application([
(r'/drafts/', send_handlers.ImportDraftsListHandler),
(r'/ping/', PingHandler),
], debug=True)
port = int(os.environ.get('PORT', 8000))
defaultdburl = 'postgres://vagrant:dbpass@localhost:5432/project_prj_db'
dburl = urlparse(os.environ.get("DATABASE_URL", defaultdburl))
if __name__ == '__main__':
parse_command_line()
# application.sentry_client = AsyncSentryClient(
# )
application.db = momoko.Pool(
# connection_factory=psycopg2.extras.LoggingConnection,
dsn='dbname=%(database)s user=%(user)s password=%(password)s host=%(host)s port=%(port)i' % dict(
database=dburl.path[1:],
user=dburl.username,
password=dburl.password,
host=dburl.hostname,
port=int(dburl.port)),
size=1,
max_size=5,
ioloop=ioloop,
)
future = application.db.connect()
ioloop.add_future(future, lambda f: ioloop.stop())
ioloop.start()
future.result()
http_server = HTTPServer(application)
http_server.listen(port, '0.0.0.0')
ioloop.start()
| [
11748,
28686,
198,
198,
6738,
37735,
13,
3642,
822,
13,
45910,
4533,
1330,
1081,
13361,
50,
13000,
11792,
198,
198,
6738,
33718,
13,
5450,
18497,
1330,
38288,
18497,
198,
6738,
33718,
13,
25811,
1330,
21136,
62,
21812,
62,
1370,
198,
67... | 2.44795 | 634 |
from datetime import date
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse
from silverstrike.models import Account, Transaction
from silverstrike.tests import create_transaction
| [
6738,
4818,
8079,
1330,
3128,
198,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
6738,
42625,
14208,
13,
9288,
1330,
6208,
20448,
198,
6738,
42625,
14208,
13,
6371,
82,
1330,
9575,
198,
198,
6738,
8465,... | 4.016667 | 60 |
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import os
import math
import re
import numpy as np
from .. import Function
from ..ops import times, sequence, as_block, element_select
from ..ops.tests.ops_test_utils import cntk_device
from ..utils import one_hot
from ..trainer import *
from ..training_session import *
from ..learner import *
from .. import cross_entropy_with_softmax, classification_error, parameter, \
input_variable, times, plus, reduce_sum, Axis, cntk_py
from cntk.io import MinibatchSource, CTFDeserializer, StreamDef, StreamDefs, FULL_DATA_SWEEP, INFINITELY_REPEAT
import pytest
input_dim = 69
ctf_data = '''\
0 |S0 3:1 |# <s> |S1 3:1 |# <s>
0 |S0 4:1 |# A |S1 32:1 |# ~AH
0 |S0 5:1 |# B |S1 36:1 |# ~B
0 |S0 4:1 |# A |S1 31:1 |# ~AE
0 |S0 7:1 |# D |S1 38:1 |# ~D
0 |S0 12:1 |# I |S1 47:1 |# ~IY
0 |S0 1:1 |# </s> |S1 1:1 |# </s>
2 |S0 60:1 |# <s> |S1 3:1 |# <s>
2 |S0 61:1 |# A |S1 32:1 |# ~AH
3 |S0 60:1 |# <s> |S1 3:1 |# <s>
3 |S0 61:1 |# A |S1 32:1 |# ~AH
4 |S0 60:1 |# <s> |S1 3:1 |# <s>
4 |S0 61:1 |# A |S1 32:1 |# ~AH
5 |S0 60:1 |# <s> |S1 3:1 |# <s>
5 |S0 61:1 |# A |S1 32:1 |# ~AH
6 |S0 60:1 |# <s> |S1 3:1 |# <s>
6 |S0 61:1 |# A |S1 32:1 |# ~AH
7 |S0 60:1 |# <s> |S1 3:1 |# <s>
7 |S0 61:1 |# A |S1 32:1 |# ~AH
8 |S0 60:1 |# <s> |S1 3:1 |# <s>
8 |S0 61:1 |# A |S1 32:1 |# ~AH
9 |S0 60:1 |# <s> |S1 3:1 |# <s>
9 |S0 61:1 |# A |S1 32:1 |# ~AH
10 |S0 60:1 |# <s> |S1 3:1 |# <s>
10 |S0 61:1 |# A |S1 32:1 |# ~AH
'''
| [
2,
15069,
357,
66,
8,
5413,
13,
1439,
2489,
10395,
13,
198,
198,
2,
49962,
739,
262,
17168,
5964,
13,
4091,
38559,
24290,
13,
9132,
2393,
287,
262,
1628,
6808,
198,
2,
329,
1336,
5964,
1321,
13,
198,
2,
38093,
25609,
28,
198,
198,... | 1.93302 | 851 |
import sys
from streamlit import cli as stcli
if __name__ == "__main__":
sys.argv = [
"streamlit",
"run",
"hello.py",
"--global.developmentMode=false",
"--server.headless=true"
]
sys.exit(stcli.main())
| [
11748,
25064,
198,
6738,
4269,
18250,
1330,
537,
72,
355,
336,
44506,
198,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,
25064,
13,
853,
85,
796,
685,
198,
220,
220,
220,
220,
220,
220,
220,
366,
... | 2.073171 | 123 |
import torch
import numpy as np
| [
11748,
28034,
198,
11748,
299,
32152,
355,
45941,
628,
628
] | 3.5 | 10 |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
python __future__ ๆจกๅ
- ๆฏไธบไบๅจ่็ๆฌ็pythonไธญๅ
ผ้กพๆฐ็นๆง็ไธ็งๆนๆณ
- ไปpython2.1ๅผๅงไปฅๅ๏ผๅฝไธไธชๆฐ็่ฏญ่จ็นๆง้ฆๆฌกๅบ็ฐๅจๅ่ก็ไธญๆถๅ๏ผๅฆๆ่ฏฅๆฐ็นๆงไธไปฅๅๆง็ๆฌpythonไธๅ
ผๅฎน, ๅ่ฏฅ็นๆงๅฐไผ่ขซ้ป่ฎค็ฆ็จใๅฆๆๆณๅฏ็จ่ฟไธชๆฐ็นๆง, ๅๅฟ
้กปไฝฟ็จ "from __future__ import *" ่ฏญๅฅ่ฟ่กๅฏผๅ
ฅ
"""
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
1772,
25,
1263,
6513,
692,
16115,
628,
198,
198,
37811,
198,
29412,
11593,
37443,
834,
10545,
101,
94,
161,
251,
... | 0.961977 | 263 |
#!/usr/bin/python3
import os;
import nester;
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
11748,
28686,
26,
198,
11748,
299,
7834,
26,
628,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
197,
12417,
3419,
628,
628
] | 2.388889 | 36 |
import torch.nn.functional as F
import torch
from torch import nn, optim
kernel_size = 5
padding = 2
channel_sizes = [1,6,16]
| [
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
28034,
198,
6738,
28034,
1330,
299,
77,
11,
6436,
198,
198,
33885,
62,
7857,
796,
642,
198,
39231,
796,
362,
198,
17620,
62,
82,
4340,
796,
685,
16,
11,
21,
11,
1433,
60,
1... | 2.886364 | 44 |
import jwt
from django.conf import settings
import base64
from datetime import datetime, timedelta
if __name__ == "__main__":
token = get_new_json_web_token({'username': 'RR', 'password': 'RR@123'})
print(token)
| [
11748,
474,
46569,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
11748,
2779,
2414,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
628,
198,
361,
11593,
3672,
834,
6624,
366,
834,
12417,
834,
1298,
198,
220,
220,
220,... | 2.883117 | 77 |
from itertools import chain, groupby
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from fastapi import APIRouter, FastAPI
from fastapi.routing import APIRoute
from fastapi.templating import Jinja2Templates
from openapi_schema_pydantic import OpenAPI
from pkg_resources import resource_filename
from pydantic import BaseModel
from semantic_version import Version
from starlette.requests import Request
from starlette.responses import HTMLResponse
from .changelog import compare_openapi
from .logger import logger
__all__ = ["VersionRouter", "FastAPIVersioned"]
templates = Jinja2Templates(directory=resource_filename(__name__, "resources"))
| [
6738,
340,
861,
10141,
1330,
6333,
11,
1448,
1525,
198,
6738,
19720,
1330,
4377,
11,
4889,
540,
11,
360,
713,
11,
7343,
11,
32233,
11,
309,
29291,
11,
4479,
198,
198,
6738,
3049,
15042,
1330,
3486,
4663,
39605,
11,
12549,
17614,
198,
... | 3.560847 | 189 |
"""
Train a naive Bayes classifier from the IMDb reviews data set
"""
from __future__ import division
from collections import defaultdict
from math import log, exp
from functools import partial
import re
import os
import random
import pickle
import pylab
handle = open("trained", "rb")
sums, positive, negative = pickle.load(handle)
def negate_sequence(text):
"""
Detects negations and transforms negated words into "not_" form.
"""
negation = False
delims = "?.,!:;"
result = []
words = text.split()
for word in words:
stripped = word.strip(delims).lower()
result.append("not_" + stripped if negation else stripped)
if any(neg in word for neg in frozenset(["not", "n't", "no"])):
negation = not negation
if any(c in word for c in delims):
negation = False
return result
def mutual_info(word):
"""
Finds the mutual information of a word with the training set.
"""
cnt_p, cnt_n = sums['pos'], sums['neg']
total = cnt_n + cnt_p
cnt_x = positive[word] + negative[word]
if (cnt_x == 0):
return 0
cnt_x_p, cnt_x_n = positive[word], negative[word]
I = [[0]*2]*2
I[0][0] = (cnt_n - cnt_x_n) * log ((cnt_n - cnt_x_n) * total / cnt_x / cnt_n) / total
I[0][1] = cnt_x_n * log ((cnt_x_n) * total / (cnt_x * cnt_n)) / total if cnt_x_n > 0 else 0
I[1][0] = (cnt_p - cnt_x_p) * log ((cnt_p - cnt_x_p) * total / cnt_x / cnt_p) / total
I[1][1] = cnt_x_p * log ((cnt_x_p) * total / (cnt_x * cnt_p)) / total if cnt_x_p > 0 else 0
return sum(map(sum, I))
def feature_selection_experiment(test_set):
"""
Select top k features. Vary k from 1000 to 50000 and plot data
"""
keys = positive.keys() + negative.keys()
sorted_keys = sorted(keys, cmp=lambda x, y: mutual_info(x) > mutual_info(y)) # Sort descending by mutual info
features = set()
num_features, accuracy = [], []
print sorted_keys[-100:]
for k in xrange(0, 50000, 1000):
features |= set(sorted_keys[k:k+1000])
preprocessor = partial(reduce_features, features)
correct = 0
for text, label in test_set:
correct += classify(text) == label
num_features.append(k+1000)
accuracy.append(correct / len(test_set))
print negate_sequence("Is this a good idea")
print reduce_features(features, "Is this a good idea")
pylab.plot(num_features, accuracy)
pylab.show()
def get_paths():
"""
Returns supervised paths annotated with their actual labels.
"""
posfiles = [("./aclImdb/test/pos/" + f, True) for f in os.listdir("./aclImdb/test/pos/")[:500]]
negfiles = [("./aclImdb/test/neg/" + f, False) for f in os.listdir("./aclImdb/test/neg/")[:500]]
return posfiles + negfiles
if __name__ == '__main__':
print mutual_info('good')
print mutual_info('bad')
print mutual_info('incredible')
print mutual_info('jaskdhkasjdhkjincredible')
feature_selection_experiment(get_paths())
| [
37811,
198,
44077,
257,
24354,
4696,
274,
1398,
7483,
422,
262,
8959,
43832,
8088,
1366,
900,
198,
37811,
198,
6738,
11593,
37443,
834,
1330,
7297,
198,
6738,
17268,
1330,
4277,
11600,
198,
6738,
10688,
1330,
2604,
11,
1033,
198,
6738,
... | 2.420336 | 1,249 |
import argparse
import os
import time
import warnings
from typing import Optional, Tuple, Union
import torch
import torchaudio as ta
from loguru import logger
from numpy import ndarray
from torch import Tensor, nn
from torch.nn import functional as F
from torchaudio.backend.common import AudioMetaData
from df import config
from df.checkpoint import load_model as load_model_cp
from df.logger import init_logger, warn_once
from df.model import ModelParams
from df.modules import get_device
from df.utils import as_complex, as_real, download_file, get_cache_dir, get_norm_alpha, resample
from libdf import DF, erb, erb_norm, unit_norm
def init_df(
model_base_dir: Optional[str] = None,
post_filter: bool = False,
log_level: str = "INFO",
log_file: Optional[str] = "enhance.log",
config_allow_defaults: bool = False,
epoch: Union[str, int, None] = "best",
default_model: str = "DeepFilterNet2",
) -> Tuple[nn.Module, DF, str]:
"""Initializes and loads config, model and deep filtering state.
Args:
model_base_dir (str): Path to the model directory containing checkpoint and config. If None,
load the pretrained DeepFilterNet2 model.
post_filter (bool): Enable post filter for some minor, extra noise reduction.
log_level (str): Control amount of logging. Defaults to `INFO`.
log_file (str): Optional log file name. None disables it. Defaults to `enhance.log`.
config_allow_defaults (bool): Whether to allow initializing new config values with defaults.
epoch (str): Checkpoint epoch to load. Options are `best`, `latest`, `<int>`, and `none`.
`none` disables checkpoint loading. Defaults to `best`.
Returns:
model (nn.Modules): Intialized model, moved to GPU if available.
df_state (DF): Deep filtering state for stft/istft/erb
suffix (str): Suffix based on the model name. This can be used for saving the enhanced
audio.
"""
try:
from icecream import ic, install
ic.configureOutput(includeContext=True)
install()
except ImportError:
pass
use_default_model = False
if model_base_dir == "DeepFilterNet":
default_model = "DeepFilterNet"
use_default_model = True
elif model_base_dir == "DeepFilterNet2":
use_default_model = True
if model_base_dir is None or use_default_model:
use_default_model = True
model_base_dir = maybe_download_model(default_model)
if not os.path.isdir(model_base_dir):
raise NotADirectoryError("Base directory not found at {}".format(model_base_dir))
log_file = os.path.join(model_base_dir, log_file) if log_file is not None else None
init_logger(file=log_file, level=log_level, model=model_base_dir)
if use_default_model:
logger.info(f"Using {default_model} model at {model_base_dir}")
config.load(
os.path.join(model_base_dir, "config.ini"),
config_must_exist=True,
allow_defaults=config_allow_defaults,
allow_reload=True,
)
if post_filter:
config.set("mask_pf", True, bool, ModelParams().section)
logger.info("Running with post-filter")
p = ModelParams()
df_state = DF(
sr=p.sr,
fft_size=p.fft_size,
hop_size=p.hop_size,
nb_bands=p.nb_erb,
min_nb_erb_freqs=p.min_nb_freqs,
)
checkpoint_dir = os.path.join(model_base_dir, "checkpoints")
load_cp = epoch is not None and not (isinstance(epoch, str) and epoch.lower() == "none")
if not load_cp:
checkpoint_dir = None
try:
mask_only = config.get("mask_only", cast=bool, section="train")
except KeyError:
mask_only = False
model, epoch = load_model_cp(checkpoint_dir, df_state, epoch=epoch, mask_only=mask_only)
if (epoch is None or epoch == 0) and load_cp:
logger.error("Could not find a checkpoint")
exit(1)
logger.debug(f"Loaded checkpoint from epoch {epoch}")
model = model.to(get_device())
# Set suffix to model name
suffix = os.path.basename(os.path.abspath(model_base_dir))
if post_filter:
suffix += "_pf"
logger.info("Model loaded")
return model, df_state, suffix
def load_audio(
file: str, sr: Optional[int], verbose=True, **kwargs
) -> Tuple[Tensor, AudioMetaData]:
"""Loads an audio file using torchaudio.
Args:
file (str): Path to an audio file.
sr (int): Optionally resample audio to specified target sampling rate.
**kwargs: Passed to torchaudio.load(). Depends on the backend. The resample method
may be set via `method` which is passed to `resample()`.
Returns:
audio (Tensor): Audio tensor of shape [C, T], if channels_first=True (default).
info (AudioMetaData): Meta data of the original audio file. Contains the original sr.
"""
ikwargs = {}
if "format" in kwargs:
ikwargs["format"] = kwargs["format"]
rkwargs = {}
if "method" in kwargs:
rkwargs["method"] = kwargs.pop("method")
info: AudioMetaData = ta.info(file, **ikwargs)
audio, orig_sr = ta.load(file, **kwargs)
if sr is not None and orig_sr != sr:
if verbose:
warn_once(
f"Audio sampling rate does not match model sampling rate ({orig_sr}, {sr}). "
"Resampling..."
)
audio = resample(audio, orig_sr, sr, **rkwargs)
return audio, info
@torch.no_grad()
def maybe_download_model(name: str = "DeepFilterNet") -> str:
"""Download a DeepFilterNet model.
Args:
- name (str): Model name. Currently needs to one of `[DeepFilterNet, DeepFilterNet2]`.
Returns:
- base_dir: Return the model base directory as string.
"""
cache_dir = get_cache_dir()
if name.endswith(".zip"):
name = name.removesuffix(".zip")
model_dir = os.path.join(cache_dir, name)
if os.path.isfile(os.path.join(model_dir, "config.ini")) or os.path.isdir(
os.path.join(model_dir, "checkpoints")
):
return model_dir
os.makedirs(cache_dir, exist_ok=True)
url = f"https://github.com/Rikorose/DeepFilterNet/raw/main/models/{name}"
download_file(url + ".zip", cache_dir, extract=True)
return model_dir
if __name__ == "__main__":
run()
| [
11748,
1822,
29572,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
14601,
198,
6738,
19720,
1330,
32233,
11,
309,
29291,
11,
4479,
198,
198,
11748,
28034,
198,
11748,
28034,
24051,
355,
20486,
198,
6738,
2604,
14717,
1330,
49706,
198,
67... | 2.526505 | 2,509 |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
util
--------------------------------
Util methods for functional tests
"""
import operator
import os
def pick_flavor(flavors):
"""Given a flavor list pick the smallest one."""
# Enable running functional tests against rax - which requires
# performance flavors be used for boot from volume
flavor_name = os.environ.get('SHADE_FLAVOR')
if flavor_name:
for flavor in flavors:
if flavor.name == flavor_name:
return flavor
return None
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
if 'performance' in flavor.name:
return flavor
for flavor in sorted(
flavors,
key=operator.attrgetter('ram')):
return flavor
| [
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
345,
743,
198,
2,
407,
779,
428,
2393,
2845,
287,
11846,
351,
262,
13789,
13,
921,
743,
7330,
198,
2,
257,
4866,
286,
262,
13789,
379,
198,
2,... | 2.973034 | 445 |
from src.datasets.util import read_files, get_vocab, pad_sequences, text_to_rank, splits, clean_doc, splitsNonInt
from sklearn.model_selection import StratifiedShuffleSplit,train_test_split
from tensorflow import keras
import numpy as np
from src.models.embedding import *
from sklearn.datasets import fetch_20newsgroups
| [
6738,
12351,
13,
19608,
292,
1039,
13,
22602,
1330,
1100,
62,
16624,
11,
651,
62,
18893,
397,
11,
14841,
62,
3107,
3007,
11,
2420,
62,
1462,
62,
43027,
11,
30778,
11,
3424,
62,
15390,
11,
30778,
15419,
5317,
198,
6738,
1341,
35720,
... | 2.777778 | 126 |
import numpy as np
import xgboost
from sklearn import decomposition
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
def multiclass_logloss(actual, predicted, eps=1e-15):
"""Multi class version of Logarithmic Loss metric.
:param actual: Array containing the actual target classes
:param predicted: Matrix with class predictions, one probability per class
"""
# Convert 'actual' to a binary array if it's not already:
if len(actual.shape) == 1:
actual2 = np.zeros((actual.shape[0], predicted.shape[1]))
for i, val in enumerate(actual):
actual2[i, val] = 1
actual = actual2
clip = np.clip(predicted, eps, 1 - eps)
rows = actual.shape[0]
vsota = np.sum(actual * np.log(clip))
return -1.0 / rows * vsota
def logistic_regression_using_different_models(xtrain, xvalid, ytrain, yvalid):
"""This method extracts two different types of features
These are the TFIDF and CountVectoriser and fits four different
baseline models on it. It then computes the multiclass log loss for the same.
@param xtrain: Training dataframe
@param xvalid: Validation dataframe
@param ytrain: Training output
@param yvalid: Validation output
"""
tfv = TfidfVectorizer(min_df=3,
max_features=None,
strip_accents='unicode',
analyzer='word',
token_pattern=r'\w{1,}',
ngram_range=(1, 3),
use_idf=1,
smooth_idf=1,
sublinear_tf=1,
stop_words='english')
ctv = CountVectorizer(analyzer='word',
token_pattern=r'\w{1,}',
ngram_range=(1, 3),
stop_words='english')
logreg = LogisticRegression(C=1.0)
multnb = MultinomialNB()
svd = decomposition.TruncatedSVD(n_components=120)
xgb_clf = xgboost.XGBClassifier(max_depth=7,
n_estimators=200,
colsample_bytree=0.8,
subsample=0.8,
nthread=10,
learning_rate=0.1)
features = [tfv, ctv]
models = [logreg, multnb, svd, xgb_clf]
for feature in features:
for model in models:
feature.fit(list(xtrain) + list(xvalid))
xtrain_fit = feature.transform(xtrain)
xvalid_fit = feature.transform(xvalid)
model.fit(xtrain_fit, ytrain)
predictions = model.predict_proba(xvalid_fit)
print("logloss: %0.3f " % multiclass_logloss(yvalid, predictions))
| [
11748,
299,
32152,
355,
45941,
198,
11748,
2124,
70,
39521,
198,
6738,
1341,
35720,
1330,
26969,
9150,
198,
198,
6738,
1341,
35720,
13,
30053,
62,
2302,
7861,
13,
5239,
1330,
309,
69,
312,
69,
38469,
7509,
11,
2764,
38469,
7509,
198,
... | 2.042433 | 1,414 |
from p3ui import *
import numpy as np
import asyncio
from shared import VerticalScrollArea
| [
6738,
279,
18,
9019,
1330,
1635,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
30351,
952,
198,
198,
6738,
4888,
1330,
38937,
29261,
30547,
628,
198
] | 3.615385 | 26 |
import pandas as pd, numpy as np, tensorflow as tf, os, warnings
pd.options.mode.chained_assignment = None
class CMF:
"""
Collective matrix factorization model for recommenders systems with explicit data and side info.
Fits a collective matrix factorization model to ratings data along with item and/or user side information,
by factorizing all matrices with common (shared) factors, e.g.:
X~=AB' and I~=BC'
By default, the function to minimize is as follows:
L = w_main*norm(X- AB' - Ubias_{u} - Ibias_{i})^2/|X| + w_item*norm(I-BC')^2/|I| + w_user*norm(U-AD')^2/|U|
with added regularization as follows:
L += reg_param*(norm(A)^2+norm(B)^2+norm(C)^2+norm(D)^2)
And user/item biases
Where:
X is the ratings matrix (considering only non-missing entries).
I is the item-attribute matrix (only supports dense inputs).
U is the user-attribute matrix (only supports dense inputs).
A, B, C, D are lower-dimensional matrices (the model parameters).
|X|, |I|, |U| are the number non-missing of entries in each matrix.
The matrix products might not use all the rows/columns of these matrices at each factorization
(this is controlled with k_main, k_item and k_user).
Ubias_{u} and Ibias_{i} are user and item biases, defined for each user and item ID.
** Be aware that, due to the regularization part, this formula as it is implies that larger datasets require
lower regularization. You can use 'standardize_err=False' and set w1/w2/w3 to avoid this.**
Alternatively, it can also fit an additive model with "offsets", similar in spirit to [2] (see references):
L = norm(X - (A+UC)(B+ID)' - Ubias_{u} - Ibias_{i})^2 + reg_param*(norm(A)^2+norm(B)^2+norm(C)^2+norm(D)^2)
This second model requires more iterations to fit (takes more time), and doesn't support missing value imputation,
but it oftentimes provides better results, especially for cold-start recommendations and if the side information
(users and items) is mostly binary columns.
In both cases, the model is fit with full L-BFGS updates (not stochastic gradient descent or stochastic Newton),
i.e. it calculates errors for the whole data and updates all model parameters at once during each iteration.
This usually leads to better local optima and less need for parameter tuning, but at the cost of less
scalability. The L-BFGS implementation used is from SciPy, which is not entirely multi-threaded, so if your CPU
has many cores or you have many GPUs, you can expect an speed up from parallelization on the calculations of the
objective function and the gradient, but not so much on the calculations that happen inside L-BFGS.
By default, the number of iterations is set at 1000, but for smaller datasets and for the offsets model, this might
not reach convergence when using high regularization.
If passing reindex=True, it will internally reindex all user and item IDs. Your data will not require
reindexing if the IDs for users and items in the input data frame passed to .fit meet the following criteria:
1) Are all integers.
2) Start at zero.
3) Don't have any enumeration gaps, i.e. if there is a user '4', user '3' must also be there.
Adding side information about entries for which there are no ratings or vice-versa can still help to improve
factorizations, but the more elements they have in common, the better.
If passing reindex=False, then the matrices with side information (user and item attributes) must have exactly
the same number of rows as the number of users/items present in the ratings data, in which case there cannot
be entries (users or items) missing from the ratings and present in the side information or vice-versa.
For missing entries in the user and item attributes, use numpy.nan. These will not be taken into account in
the optimization procedure. If there is no information for use user or item, leave that row out altogether
instead of filling with NAs. In the offsets model, missing entries will be automatically filled with zeros,
so it's recommended to perform imputation beforehand for it.
Can also produce non-negative matrix factorization (including the user and item attributes), but if using
user and/or item biases, these will not be constrained to be non-negative.
For the regular model, if there are binary columns in the data, it can apply a sigmoid transformation to the
approximations from the factorization, but these will still be taken as a squared loss with respect to the
original 0/1 values, so as to make the loss comparable to that of the other columns. Be sure to pass the names
of the binary columns to '.fit', or the indexes or the columns when using 'reindex=False'.
Note
----
**Be aware that the data passed to '.fit' will be modified inplace (e.g. reindexed). Make a copy of your data beforehand if
you require it.** If you plan to do any hyper-parameter tuning through cross-validation, you should reindex your data
beforehand and call the CMF constructur with 'index=False'.
Note
----
The API contains parameters for both an item-attribute matrix and a user-attribute matrix,
but you can fit the model to data with only one or none of them.
Parameters corresponding to the factorization of a matrix for which no data is passed will be ignored.
Note
----
The model allows to make recommendations for users and items for which there is data about their
attributes but no ratings. The quality of these predictions might however not be good, especially
if you set k_main > 0. It is highly recommended to center your ratings if you plan on making
predictions for user/items that were not in the training set.
Parameters
----------
k : int
Number of common (shared) latent factors to use.
k_main : int
Number of additional (non-shared) latent factors to use for the ratings factorization.
Ignored for the offsets model.
k_user: int
Number of additional (non-shared) latent factors to use for the user attributes factorization.
Ignored for the offsets model.
k_item : int
Number of additional (non-shared) latent factors to use for the item attributes factorization.
Ignored for the offsets model.
w_main : float
Weight to assign to the (mean) root squared error in factorization of the ratings matrix.
Ignored for the offsets model.
w_user : float
Weight to assign to the (mean) root squared error in factorization of the user attributes matrix.
Ignored for the offsets model.
w_item : float
Weight to assign to the (mean) root squared error in factorization of the item attributes matrix.
Ignored for the offsets model.
reg_param : float or tuple of floats
Regularization parameter for each matrix, in this order:
1) User-Factor, 2) Item-Factor, 3) User bias, 4) Item-bias, 5) UserAttribute-Factor, 6) ItemAttribute-Factor.
offsets_model : bool
Whether to fit the alternative model formulation with offsets (see description).
nonnegative : bool
Whether the resulting low-rank matrices (A and B) should have all non-negative entries.
Forced to False when passing 'center_ratings=True'.
maxiter : int
Maximum number of iterations for which to run the optimization procedure. Recommended to use a higher number
for the offsets model.
standardize_err : bool
Whether to divide the sum of squared errors from each factorized matrix by the number of non-missing entries.
Setting this to False requires far larger regularization parameters. Note that most research papers
don't standardize the errors, so if you try to reproduce some paper with specific parameters, you might
want to set this to True.
Forced to False when passing 'reweight=True'.
reweight : bool
Whether to automatically reweight the errors of each matrix factorization so that they get similar influence,
accounting for the number of elements in each and the magnitudes of the entries
(appplies in addition to weights passed as w_main, w_item and w_user).
This is done by calculating the initial sum of squared errors with randomly initialized factor matrices,
but it's not guaranteed to be a good criterion.
It might be better to scale the entries of either the ratings or the attributes matrix so that they are in a similar scale
(e.g. if the ratings are in [1,5], the attributes should ideally be in the same range and not [-10^3,10^3]).
Ignored for the offsets model.
reindex : bool
Whether to reindex data internally (assign own IDs) - see description above.
center_ratings : bool
Whether to substract the mean rating from the ratings before fitting the model. Will be force to True if
passing 'add_user_bias=True' or 'add_item_bias=True'.
user_bias : bool
Whether to use user biases (one per user) as additional model parameters.
item_bias : bool
Whether to use item biases (one per item) as additional model parameters.
center_user_info : bool
Whether to center the user attributes by substracting the mean from each column.
center_item_info : bool
Whether to center the item attributes by substracting the mean from each column.
user_info_nonneg : bool
Whether the user_attribute-factor matrix (C) should have all non-negative entries.
Forced to false when passing 'center_user_info=True'.
item_info_nonneg : bool
Whether the item_attribute-factor matrix (D) should have all non-negative entries.
Forced to false when passing 'center_item_info=True'
keep_data : bool
Whether to keep information about which user was associated with each item
in the training set, so as to exclude those items later when making Top-N
recommendations.
save_folder : str or None
Folder where to save all model parameters as csv files.
produce_dicts : bool
Whether to produce Python dictionaries for users and items, which
are used by the prediction API of this package. You can still predict without
them, but it might take some additional miliseconds (or more depending on the
number of users and items).
random_seed : int or None
Random seed to use when starting the parameters.
verbose : bool
Whether to display convergence messages frol L-BFGS. If running it from
an IPython notebook, these will be printed in the console in which the
notebook is running, but not on the output of the cell within the notebook.
Attributes
----------
A : array (nitems, k_main + k + k_user)
Matrix with the user-factor attributes, containing columns from both factorizations.
If you wish to extract only the factors used for predictons, slice it like this: A[:,:k_main+k]
B : array (nusers, k_main + k + k_item)
Matrix with the item-factor attributes, containing columns from both factorizations.
If you wish to extract only the factors used for predictons, slice it like this: B[:,:k_main+k]
C : array (k + k_item, item_dim)
Matrix of factor-item_attribute. Will have the columns that correspond to binary features in
a separate attribute.
D : array (k_user + k, user_dim)
Matrix of factor-user_attribute. Will have the columns that correspond to binary features in
a separate attribute.
C_bin : array (k + k_item, item_dim_bin):
Part of the C matrix that corresponds to binary columns in the item data.
Non-negativity constraints will not apply to this matrix.
D_bin : array (k_user + k, user_dim_bin)
Part of the D matrix that corresponds to binary columns in the user data.
Non-negativity constraints will not apply to this matrix.
add_user_bias : array (nusers, )
User biases determined by the model
add_item_bias : array (nitems, )
Item biases determined by the model
user_mapping_ : array (nusers,)
ID of the user (as passed to .fit) represented by each row of A.
item_mapping_ : array (nitems,)
ID of the item (as passed to .fit) represented by each row of B.
user_dict_ : dict (nusers)
Dictionary with the mapping between user IDs (as passed to .fit) and rows of A.
item_dict_ : dict (nitems)
Dictionary with the mapping between item IDs (as passed to .fit) and rows of B.
is_fitted : bool
Whether the model has been fit to some data.
global_mean : float
Global mean of the ratings.
user_arr_means : array (user_dim, )
Column means of the user side information matrix.
item_arr_means : array (item_dim, )
Column means of the item side information matrix.
References
----------
[1] Relational learning via collective matrix factorization (A. Singh, 2008)
[2] Collaborative topic modeling for recommending scientific articles (C. Wang, D. Blei, 2011)
"""
def fit(self, ratings, user_info=None, item_info=None, cols_bin_user=None, cols_bin_item=None):
"""
Fit the model to ratings data and item/user side info, using L-BFGS
Note
----
**Be aware that the data passed to '.fit' will be modified inplace (e.g. reindexed). Make a copy of your data beforehand if
you require it (e.g. using the "deepcopy" function from the "copy" module).**
Parameters
----------
ratings : pandas data frame or array (nobs, 3)
Ratings data to which to fit the model.
If a pandas data frame, must contain the columns 'UserId','ItemId' and 'Rating'. Optionally, it might also contain a column 'Weight'.
If a numpy array, will take the first 4 columns in that order
If a list of tuples, must be in the format (UserId,ItemId,Rating,[Weight]) (will be coerced to data frame)
user_info : pandas data frame or numpy array (nusers, nfeatures_user)
Side information about the users (i.e. their attributes, as a table).
Must contain a column called 'UserId'.
If called with 'reindex=False', must be a numpy array,
with rows corresponding to user ID numbers and columns to user attributes.
item_info : pandas data frame or numpy array (nitems, nfeatures_item)
Side information about the items (i.e. their attributes, as a table).
Must contain a column named ItemId.
If called with 'reindex=False', must be a numpy array,
with rows corresponding to item ID numbers and columns to item attributes.
cols_bin_user : array or list
Columns of user_info that are binary (only take values 0 or 1).
Will apply a sigmoid function to the factorized approximations of these columns.
Ignored when called with 'offsets_model=True'.
cols_bin_item : array or list
Columns of item_info that are binary (only take values 0 or 1).
Will apply a sigmoid function to the factorized approximations of these columns.
Ignored when called with 'offsets_model=True'.
Returns
-------
self : obj
Copy of this object
"""
# readjusting parameters in case they are redundant
if item_info is None:
self.k_user = 0
if user_info is None:
self.k_item = 0
self._process_data(ratings, item_info, user_info, cols_bin_user, cols_bin_item)
self._set_weights(self.random_seed)
self._fit(self.w1, self.w2, self.w3, self.reg_param,
self.k, self.k_main, self.k_item, self.k_user,
self.random_seed, self.maxiter)
self.is_fitted = True
self._clear_internal_objs()
return self
def predict(self, user, item):
"""
Predict ratings for combinations of users and items
Note
----
You can either pass an individual user and item, or arrays representing
tuples (UserId, ItemId) with the combinatinons of users and items for which
to predict (one row per prediction).
Note
----
If you pass any user/item which was not in the training set, the prediction
for it will be NaN.
Parameters
----------
user : array-like (npred,) or obj
User(s) for which to predict each item.
item: array-like (npred,) or obj
Item(s) for which to predict for each user.
"""
assert self.is_fitted
if isinstance(user, list) or isinstance(user, tuple):
user = np.array(user)
if isinstance(item, list) or isinstance(item, tuple):
item = np.array(item)
if user.__class__.__name__=='Series':
user = user.values
if item.__class__.__name__=='Series':
item = item.values
if isinstance(user, np.ndarray):
if len(user.shape) > 1:
user = user.reshape(-1)
assert user.shape[0] > 0
if self.reindex:
if user.shape[0] > 1:
user = pd.Categorical(user, self.user_mapping_).codes
else:
if self.user_dict_ is not None:
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(user, self.user_mapping_).codes[0]
else:
if self.reindex:
if self.user_dict_ is not None:
try:
user = self.user_dict_[user]
except:
user = -1
else:
user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0]
user = np.array([user])
if isinstance(item, np.ndarray):
if len(item.shape) > 1:
item = item.reshape(-1)
assert item.shape[0] > 0
if self.reindex:
if item.shape[0] > 1:
item = pd.Categorical(item, self.item_mapping_).codes
else:
if self.item_dict_ is not None:
try:
item = self.item_dict_[item]
except:
item = -1
else:
item = pd.Categorical(item, self.item_mapping_).codes[0]
else:
if self.reindex:
if self.item_dict_ is not None:
try:
item = self.item_dict_[item]
except:
item = -1
else:
item = pd.Categorical(np.array([item]), self.item_mapping_).codes[0]
item = np.array([item])
assert user.shape[0] == item.shape[0]
if user.shape[0] == 1:
if (user[0] == -1) or (item[0] == -1):
return np.nan
else:
out = self._Ab[user].dot(self._Ba[item].T).reshape(-1)[0]
if self.center_ratings:
out += self.global_mean
if self.add_user_bias:
out += self.user_bias[user]
if self.add_item_bias:
out += self.item_bias[item]
if isinstance(out, np.ndarray):
out = out[0]
return out
else:
nan_entries = (user == -1) | (item == -1)
if nan_entries.sum() == 0:
out = (self._Ab[user] * self._Ba[item]).sum(axis=1)
if self.center_ratings:
out += self.global_mean
if self.add_user_bias:
out += self.user_bias[user]
if self.add_item_bias:
out += self.item_bias[item]
return out
else:
non_na_user = user[~nan_entries]
non_na_item = item[~nan_entries]
out = np.empty(user.shape[0], dtype=self._Ab.dtype)
out[~nan_entries] = (self._Ab[non_na_user] * self._Ba[non_na_item]).sum(axis=1)
if self.center_ratings:
out += self.global_mean
if self.add_user_bias:
out += self.user_bias[user]
if self.add_item_bias:
out += self.item_bias[item]
out[nan_entries] = np.nan
return out
def topN(self, user, n=10, exclude_seen=True, items_pool=None):
"""
Recommend Top-N items for a user
Outputs the Top-N items according to score predicted by the model.
Can exclude the items for the user that were associated to her in the
training set, and can also recommend from only a subset of user-provided items.
Parameters
----------
user : obj
User for which to recommend.
n : int
Number of top items to recommend.
exclude_seen: bool
Whether to exclude items that were associated to the user in the training set.
items_pool: None or array
Items to consider for recommending to the user.
Returns
-------
rec : array (n,)
Top-N recommended items.
"""
if isinstance(n, float):
n = int(n)
assert isinstance(n ,int)
if self.reindex:
if self.produce_dicts:
try:
user = self.user_dict_[user]
except:
raise ValueError("Can only predict for users who were in the training set.")
else:
user = pd.Categorical(np.array([user]), self.user_mapping_).codes[0]
if user == -1:
raise ValueError("Can only predict for users who were in the training set.")
if exclude_seen and not self.keep_data:
raise Exception("Can only exclude seen items when passing 'keep_data=True' to .fit")
if items_pool is None:
allpreds = - (self._Ab[user].dot(self._Ba.T))
if self.add_item_bias:
allpreds -= self.item_bias
if exclude_seen:
if user < self._n_seen_by_user.shape[0]:
n_seen_by_user = self._n_seen_by_user[user]
st_ix_user = self._st_ix_user[user]
else:
n_seen_by_user = 0
st_ix_user = 0
n_ext = np.min([n + n_seen_by_user, self._Ba.shape[0]])
rec = np.argpartition(allpreds, n_ext-1)[:n_ext]
seen = self.seen[st_ix_user : st_ix_user + n_seen_by_user]
rec = np.setdiff1d(rec, seen)
rec = rec[np.argsort(allpreds[rec])[:n]]
if self.reindex:
return self.item_mapping_[rec]
else:
return rec
else:
n = np.min([n, self._Ba.shape[0]])
rec = np.argpartition(allpreds, n-1)[:n]
rec = rec[np.argsort(allpreds[rec])]
if self.reindex:
return self.item_mapping_[rec]
else:
return rec
else:
if isinstance(items_pool, list) or isinstance(items_pool, tuple):
items_pool = np.array(items_pool)
if items_pool.__class__.__name__=='Series':
items_pool = items_pool.values
if isinstance(items_pool, np.ndarray):
if len(items_pool.shape) > 1:
items_pool = items_pool.reshape(-1)
if self.reindex:
items_pool_reind = pd.Categorical(items_pool, self.item_mapping_).codes
nan_ix = (items_pool_reind == -1)
if nan_ix.sum() > 0:
items_pool_reind = items_pool_reind[~nan_ix]
msg = "There were " + ("%d" % int(nan_ix.sum())) + " entries from 'item_pool'"
msg += "that were not in the training data and will be exluded."
warnings.warn(msg)
del nan_ix
if items_pool_reind.shape[0] == 0:
raise ValueError("No items to recommend.")
elif items_pool_reind.shape[0] == 1:
raise ValueError("Only 1 item to recommend.")
else:
pass
else:
raise ValueError("'items_pool' must be an array.")
if self.reindex:
allpreds = - self._Ab[user].dot(self._Ba[items_pool_reind].T)
if self.add_item_bias:
allpreds -= self.item_bias[items_pool_reind]
else:
allpreds = - self._Ab[user].dot(self._Ba[items_pool].T)
if self.add_item_bias:
allpreds -= self.item_bias[items_pool]
n = np.min([n, items_pool.shape[0]])
if exclude_seen:
if user < self._n_seen_by_user.shape[0]:
n_seen_by_user = self._n_seen_by_user[user]
st_ix_user = self._st_ix_user[user]
else:
n_seen_by_user = 0
st_ix_user = 0
n_ext = np.min([n + n_seen_by_user, items_pool.shape[0]])
rec = np.argpartition(allpreds, n_ext-1)[:n_ext]
seen = self.seen[st_ix_user : st_ix_user + n_seen_by_user]
if self.reindex:
rec = np.setdiff1d(items_pool_reind[rec], seen)
allpreds = - self._Ab[user].dot(self._Ba[rec].T)
if self.add_item_bias:
allpreds -= self.item_bias[rec]
return self.item_mapping_[rec[np.argsort(allpreds)[:n]]]
else:
rec = np.setdiff1d(items_pool[rec], seen)
allpreds = - self._Ab[user].dot(self._Ba[rec].T)
if self.add_item_bias:
allpreds -= self.item_bias[rec]
return rec[np.argsort(allpreds)[:n]]
else:
rec = np.argpartition(allpreds, n-1)[:n]
return items_pool[rec[np.argsort(allpreds[rec])]]
def add_user(self, new_id, attributes, reg='auto'):
"""
Adds a new user vector according to its attributes, in order to make predictions for her
In the regular collective factorization model without non-negativity constraints and without binary
columns, will calculate the latent factors vector by its closed-form solution, which is fast. In the offsets
model, the latent factors vector is obtained by a simple matrix product, so it will be even faster. However,
if there are non-negativity constraints and/or binary columns, there is no closed form solution,
and it will be calculated via gradient-based optimization, so it will take longer and shouldn't be
expected to work in 'real time'.
Note
----
For better quality cold-start recommendations, center your ratings data, use high regularization,
assign large weights to the factorization of side information, and don't use large values for number
of latent factors that are specific for some factorization.
Note
----
If you pass and ID that is of a different type (str, int, obj, etc.) than the IDs of the data that
was passed to .fit, the internal indexes here might break and some of the prediction functionality
might stop working. Be sure to pass IDs of the same type. The type of the ID will be forcibly
converted to try to avoid this, but you might still run into problems.
Parameters
----------
new_id : obj
ID of the new user. Ignored when called with 'reindex=False', in which case it will assign it
ID = nusers_train + 1.
attributes : array (user_dim, )
Attributes of this user (side information)
reg : float or str 'auto'
Regularization parameter for these new attributes. If set to 'auto', will use the same regularization
parameter that was set for the user-factor matrix.
Returns
-------
Success : bool
Returns true if the operation completes successfully
"""
## TODO: `add_user`, `add_item`, `topN_cold` need refactoring to be more modular
assert self.is_fitted
if self.C is None:
raise ValueError("Can only add users if model was fit to user side information.")
if self.produce_dicts:
if new_id in self.user_dict_:
raise ValueError("User ID is already in the model.")
else:
if new_id in self.user_mapping_:
raise ValueError("User ID is already in the model.")
if attributes.__class__.__name__ == 'DataFrame':
attributes = attributes[self._user_cols_orig]
attributes = attributes.values
if attributes.__class__.__name__ == 'Series':
attributes = attributes.loc[self._user_cols_orig]
attributes = attributes.values
assert isinstance(attributes, np.ndarray)
attributes = attributes.reshape(-1)
if self.offsets_model:
assert attributes.shape[0] == self.C.shape[0]
elif self.cols_bin_user is None:
assert attributes.shape[0] == self.C.shape[1]
else:
assert attributes.shape[0] == self.C.shape[1] + self.C_bin.shape[1]
attributes = attributes.astype(self.A.dtype)
if reg == 'auto':
reg = self.reg_param[0]
if isinstance(reg, int):
reg = float(reg)
assert isinstance(reg, float)
if (self.cols_bin_user is not None) and (not self.offsets_model):
attributes_bin = attributes[np.in1d(self._user_cols_orig, self.cols_bin_user)].copy()
attributes = attributes[np.in1d(self._user_cols_orig, self._cols_nonbin_user)]
if self.center_user_info:
attributes -= self.user_arr_means
if self.offsets_model:
user_vec = attributes.reshape(1,-1).dot(self.C).astype(self.A.dtype)
self.A = np.r_[self.A, user_vec.reshape(1, -1)]
self._Ab = np.r_[self._Ab, user_vec.reshape(1, -1)]
else:
user_vec = np.zeros(self.k_main + self.k + self.k_user, dtype=self.A.dtype)
if (self.cols_bin_user is None) and (not self.nonnegative):
if self.standardize_err:
reg *= self.k + self.k_user
user_vec[self.k_main:] = np.linalg.solve(self.C.dot(self.C.T) + np.diag(np.repeat(reg,self.k + self.k_user)), self.C.dot(attributes))
else:
Arow = tf.Variable(tf.zeros([1, self.k + self.k_user]))
Ctf = tf.placeholder(tf.float32)
Cbin_tf = tf.placeholder(tf.float32)
attr_num = tf.placeholder(tf.float32)
attr_bin = tf.placeholder(tf.float32)
if self.standardize_err:
loss = tf.losses.mean_squared_error(tf.matmul(Arow, Ctf), attr_num)
if self.C_bin is not None:
loss += tf.losses.mean_squared_error(tf.sigmoid(tf.matmul(Arow, Cbin_tf)), attr_bin)
else:
attributes_bin = None
loss += reg*tf.nn.l2_loss(Arow)
else:
loss = tf.nn.l2_loss(tf.matmul(Arow, Ctf) - attr_num)
if self.cols_user_bin is not None:
loss += tf.nn.l2_loss(tf.sigmoid(tf.matmul(Arow, Cbin_tf)) - attr_bin)
loss += reg * tf.nn.l2_loss(Arow)
opts_lbfgs = {'iprint':-1, 'disp':0}
if self.nonnegative:
dct_bound = {Arow:(0, np.inf)}
else:
dct_bound = dict()
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss, method='L-BFGS-B', options=opts_lbfgs, var_to_bounds=dct_bound)
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
with sess:
tf.logging.set_verbosity(tf.logging.WARN)
optimizer.minimize(sess, feed_dict={Ctf:self.C, Cbin_tf:self.C_bin,
attr_num:attributes, attr_bin:attributes_bin})
user_vec[self.k_main:] = Arow.eval(session=sess).reshape(-1)
self.A = np.r_[self.A, user_vec.reshape(1, -1)]
self._Ab = self.A[:, :self.k_main + self.k]
if self.reindex:
self.user_mapping_ = np.r_[self.user_mapping_, np.array([new_id])]
if self.produce_dicts:
self.user_dict_[new_id] = self.user_mapping_.shape[0] - 1
# if self.keep_data:
# self._st_ix_user = np.r_[self._st_ix_user, -1]
# self._n_seen_by_user = np.r_[self._n_seen_by_user, 0]
if self.add_user_bias:
self.user_bias = np.r_[self.user_bias, np.zeros(1, dtype=self.user_bias.dtype)]
return True
def add_item(self, new_id, attributes, reg='auto'):
"""
Adds a new item vector according to its attributes, in order to make predictions for it
In the regular collective factorization model without non-negativity constraints and without binary
columns, will calculate the latent factors vector by its closed-form solution, which is fast. In the offsets
model, the latent factors vector is obtained by a simple matrix product, so it will be even faster. However,
if there are non-negativity constraints and/or binary columns, there is no closed form solution,
and it will be calculated via gradient-based optimization, so it will take longer and shouldn't be
expected to work in 'real time'.
Note
----
For better quality cold-start recommendations, center your ratings data, use high regularization,
assign large weights to the factorization of side information, and don't use large values for number
of latent factors that are specific for some factorization.
Note
----
If you pass and ID that is of a different type (str, int, obj, etc.) than the IDs of the data that
was passed to .fit, the internal indexes here might break and some of the prediction functionality
might stop working. Be sure to pass IDs of the same type. The type of the ID will be forcibly
converted to try to avoid this, but you might still run into problems.
Parameters
----------
new_id : obj
ID of the new item. Ignored when called with 'reindex=False', in which case it will assign it
ID = nitems_train + 1.
attributes : array (item_dim, )
Attributes of this item (side information)
reg : float or str 'auto'
Regularization parameter for these new attributes. If set to 'auto', will use the same regularization
parameter that was set for the item-factor matrix.
Returns
-------
Success : bool
Returns true if the operation completes successfully
"""
## TODO: `add_user`, `add_item`, `topN_cold` need refactoring to be more modular
assert self.is_fitted
if self.D is None:
raise ValueError("Can only add items if model was fit to item side information.")
if self.produce_dicts:
if new_id in self.item_dict_:
raise ValueError("Item ID is already in the model.")
else:
if new_id in self.item_mapping_:
raise ValueError("Item ID is already in the model.")
if attributes.__class__.__name__ == 'DataFrame':
attributes = attributes.values
if attributes.__class__.__name__ == 'Series':
attributes = attributes.loc[self._item_cols_orig]
attributes = attributes.values
assert isinstance(attributes, np.ndarray)
attributes = attributes.reshape(-1)
if self.offsets_model:
assert attributes.shape[0] == self.D.shape[0]
elif self.cols_bin_item is None:
assert attributes.shape[0] == self.D.shape[1]
else:
assert attributes.shape[0] == self.D.shape[1] + self.D_bin.shape[1]
attributes = attributes.astype(self.B.dtype)
if reg == 'auto':
reg = self.reg_param[1]
if isinstance(reg, int):
reg = float(reg)
assert isinstance(reg, float)
if (self.cols_bin_item is not None) and (not self.offsets_model):
attributes_bin = attributes[np.in1d(self._item_cols_orig, self.cols_bin_item)].copy()
attributes = attributes[np.in1d(self._item_cols_orig, self._cols_nonbin_item)]
if self.center_item_info:
attributes -= self.item_arr_means
if self.offsets_model:
item_vec = attributes.reshape(1,-1).dot(self.D).astype(self.B.dtype)
self.B = np.r_[self.B, item_vec.reshape(1, -1)]
self._Ba = np.r_[self._Ba, item_vec.reshape(1, -1)]
else:
item_vec = np.zeros(self.k_main + self.k + self.k_item, dtype=self.B.dtype)
if self.cols_bin_item is None:
if self.standardize_err:
reg *= self.k + self.k_item
item_vec[self.k_main:] = np.linalg.solve(self.D.dot(self.D.T) + np.diag(np.repeat(reg, self.k + self.k_item)), self.D.dot(attributes))
else:
Brow = tf.Variable(tf.zeros([1, self.k + self.k_item]))
Dtf = tf.placeholder(tf.float32)
Dbin_tf = tf.placeholder(tf.float32)
attr_num = tf.placeholder(tf.float32)
attr_bin = tf.placeholder(tf.float32)
if self.standardize_err:
loss = tf.losses.mean_squared_error(tf.matmul(Brow, Dtf), attr_num)
if self.D_bin is not None:
loss += tf.losses.mean_squared_error(tf.sigmoid(tf.matmul(Brow, Dbin_tf)), attr_bin)
else:
attributes_bin = None
loss += reg*tf.nn.l2_loss(Brow)
else:
loss = tf.nn.l2_loss(tf.matmul(Brow, Dtf) - attr_num)
if self.cols_user_bin is not None:
loss += tf.nn.l2_loss(tf.sigmoid(tf.matmul(Brow, Dbin_tf)) - attr_bin)
loss += reg * tf.nn.l2_loss(Brow)
opts_lbfgs = {'iprint':-1, 'disp':0}
if self.nonnegative:
dct_bound = {Brow:(0, np.inf)}
else:
dct_bound = dict()
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss, method='L-BFGS-B', options=opts_lbfgs, var_to_bounds=dct_bound)
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
with sess:
optimizer.minimize(sess, feed_dict={Dtf:self.D, Dbin_tf:self.D_bin,
attr_num:attributes, attr_bin:attributes_bin})
item_vec[self.k_main:] = Brow.eval(session=sess).reshape(-1)
self.B = np.r_[self.B, item_vec.reshape(1, -1)]
self._Ba = self.B[:, :self.k_main + self.k]
if self.reindex:
self.item_mapping_ = np.r_[self.item_mapping_, np.array([new_id])]
if self.produce_dicts:
self.item_dict_[new_id] = self.item_mapping_.shape[0] - 1
if self.add_item_bias:
self.item_bias = np.r_[self.item_bias, np.zeros(1, dtype=self.item_bias.dtype)]
return True
def topN_cold(self, attributes, n=10, reg='auto', items_pool=None):
"""
Recommend Top-N items for a user that was not in the training set.
In the regular collective factorization model without non-negativity constraints and without binary
columns, will calculate the latent factors vector by its closed-form solution, which is fast. In the offsets
model, the latent factors vector is obtained by a simple matrix product, so it will be even faster. However,
if there are non-negativity constraints and/or binary columns, there is no closed form solution,
and it will be calculated via gradient-based optimization, so it will take longer and shouldn't be
expected to work in 'real time'.
Note
----
For better quality cold-start recommendations, center your ratings data, use high regularization,
assign large weights to the factorization of side information, and don't use large values for number
of latent factors that are specific for some factorization.
Parameters
----------
attributes : array (user_dim, )
Attributes of the user. Columns must be in the same order as was passed to '.fit', but without the ID column.
n : int
Number of top items to recommend.
reg : float or str 'auto'
Regularization parameter for these new attributes. If set to 'auto', will use the same regularization
parameter that was set for the user-factor matrix.
items_pool: None or array
Items to consider for recommending to the user.
Returns
-------
rec : array (n,)
Top-N recommended items.
"""
## TODO: `add_user`, `add_item`, `topN_cold` need refactoring to be more modular
assert self.is_fitted
if self.C is None:
raise ValueError("Can only add users if model was fit to user side information.")
if attributes.__class__.__name__ == 'DataFrame':
attributes = attributes.values
assert isinstance(attributes, np.ndarray)
attributes = attributes.reshape(-1)
if self.offsets_model:
assert attributes.shape[0] == self.C.shape[0]
elif self.cols_bin_user is None:
assert attributes.shape[0] == self.C.shape[1]
else:
assert attributes.shape[0] == self.C.shape[1] + self.C_bin.shape[1]
attributes = attributes.astype(self.A.dtype)
if reg == 'auto':
reg = self.reg_param[0]
if isinstance(n, float):
n = int(n)
assert isinstance(n ,int)
if (self.cols_bin_user is not None) and (not self.offsets_model):
attributes_bin = attributes[np.in1d(self._user_cols_orig, self.cols_bin_user)].copy()
attributes = attributes[np.in1d(self._user_cols_orig, self._cols_nonbin_user)]
if self.center_user_info:
attributes -= self.user_arr_means
if self.offsets_model:
user_vec = attributes.reshape(1,-1).dot(self.C).astype(self.A.dtype)
else:
if self.cols_bin_user is None:
if self.standardize_err:
reg *= self.k + self.k_user
user_vec = np.linalg.solve(self.C.dot(self.C.T) + np.diag(np.repeat(reg, self.k + self.k_user)), self.C.dot(attributes))
else:
Arow = tf.Variable(tf.zeros([1, self.k + self.k_user]))
Ctf = tf.placeholder(tf.float32)
Cbin_tf = tf.placeholder(tf.float32)
attr_num = tf.placeholder(tf.float32)
attr_bin = tf.placeholder(tf.float32)
if self.standardize_err:
loss = tf.losses.mean_squared_error(tf.matmul(Arow, Ctf), attr_num)
if self.C_bin is not None:
loss += tf.losses.mean_squared_error(tf.sigmoid(tf.matmul(Arow, Cbin_tf)), attr_bin)
else:
attributes_bin = None
loss += reg*tf.nn.l2_loss(Arow)
else:
loss = tf.nn.l2_loss(tf.matmul(Arow, Ctf) - attr_num)
if self.cols_user_bin is not None:
loss += tf.nn.l2_loss(tf.sigmoid(tf.matmul(Arow, Cbin_tf)) - attr_bin)
loss += reg * tf.nn.l2_loss(Arow)
opts_lbfgs = {'iprint':-1, 'disp':0}
if self.nonnegative:
dct_bound = {Arow:(0, np.inf)}
else:
dct_bound = dict()
optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss, method='L-BFGS-B', options=opts_lbfgs, var_to_bounds=dct_bound)
model = tf.global_variables_initializer()
sess = tf.Session()
sess.run(model)
with sess:
optimizer.minimize(sess, feed_dict={Ctf:self.C, Cbin_tf:self.C_bin,
attr_num:attributes, attr_bin:attributes_bin})
user_vec = Arow.eval(session=sess).reshape(-1)
user_vec = -np.r_[np.zeros(self.k_main, dtype=user_vec.dtype), user_vec[:self.k]]
if items_pool is None:
allpreds = user_vec.dot(self._Ba.T)
if self.add_item_bias:
allpreds -= self.item_bias
n = np.min([n, self._Ba.shape[0]])
rec = np.argpartition(allpreds, n-1)[:n]
rec = rec[np.argsort(allpreds[rec])]
if self.reindex:
return self.item_mapping_[rec]
else:
return rec
else:
if isinstance(items_pool, list) or isinstance(items_pool, tuple):
items_pool = np.array(items_pool)
if items_pool.__class__.__name__ =='Series':
items_pool = items_pool.values
if isinstance(items_pool, np.ndarray):
if len(items_pool.shape) > 1:
items_pool = items_pool.reshape(-1)
if self.reindex:
items_pool_reind = pd.Categorical(items_pool, self.item_mapping_).codes
nan_ix = (items_pool_reind == -1)
if nan_ix.sum() > 0:
items_pool_reind = items_pool_reind[~nan_ix]
msg = "There were " + ("%d" % int(nan_ix.sum())) + " entries from 'item_pool'"
msg += "that were not in the training data and will be exluded."
warnings.warn(msg)
del nan_ix
if items_pool_reind.shape[0] == 0:
raise ValueError("No items to recommend.")
elif items_pool_reind.shape[0] == 1:
raise ValueError("Only 1 item to recommend.")
else:
pass
else:
raise ValueError("'items_pool' must be an array.")
if self.reindex:
allpreds = user_vec.dot(self._Ba[items_pool_reind].T)
if self.add_item_bias:
allpreds -= self.item_bias[items_pool_reind]
else:
allpreds = user_vec.dot(self._Ba[items_pool].T)
if self.add_item_bias:
allpreds -= self.item_bias[items_pool]
n = np.min([n, items_pool.shape[0]])
rec = np.argpartition(allpreds, n-1)[:n]
return items_pool[rec[np.argsort(allpreds[rec])]]
| [
11748,
19798,
292,
355,
279,
67,
11,
299,
32152,
355,
45941,
11,
11192,
273,
11125,
355,
48700,
11,
28686,
11,
14601,
198,
30094,
13,
25811,
13,
14171,
13,
354,
1328,
62,
562,
16747,
796,
6045,
198,
198,
4871,
16477,
37,
25,
198,
22... | 2.202505 | 21,955 |
import math
import numpy as np
from _config import Config as config
import torch
import sys
import json
import torch
import time
| [
11748,
10688,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4808,
11250,
1330,
17056,
355,
4566,
198,
11748,
28034,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
28034,
198,
11748,
640,
198,
220,
220,
220,
220,
220,
220,
220,
220,
... | 3.155556 | 45 |
import inspect
from airflow.plugins_manager import AirflowPlugin
from airflow.models import BaseOperator
from airflow.sensors.base import BaseSensorOperator
import logging as log
import stat
| [
11748,
10104,
198,
6738,
45771,
13,
37390,
62,
37153,
1330,
3701,
11125,
37233,
198,
6738,
45771,
13,
27530,
1330,
7308,
18843,
1352,
198,
6738,
45771,
13,
82,
641,
669,
13,
8692,
1330,
7308,
47864,
18843,
1352,
198,
11748,
18931,
355,
... | 4.217391 | 46 |
import unittest
import networkx as nx
import PointwiseVulnerability as pv
if __name__ == '__main__':
unittest.main()
| [
11748,
555,
715,
395,
198,
11748,
3127,
87,
355,
299,
87,
198,
11748,
6252,
3083,
53,
40920,
355,
279,
85,
198,
198,
361,
11593,
3672,
834,
6624,
705,
834,
12417,
834,
10354,
198,
220,
220,
220,
555,
715,
395,
13,
12417,
3419,
198
] | 2.837209 | 43 |
# *- coding: utf-8 -*
import json
from django.contrib.admin import ModelAdmin
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from django.contrib import admin
from django.contrib.auth.models import User, Group
from django.contrib.auth.admin import UserAdmin, GroupAdmin
from django.contrib.auth.forms import UserChangeForm, UsernameField
from django import forms
from mezzanine.accounts.admin import ProfileInline, UserProfileAdmin
from mezzanine.core.admin import TabularDynamicInlineAdmin
from pygments import highlight
from pygments.lexers import JsonLexer
from pygments.formatters import HtmlFormatter
from django.utils.safestring import mark_safe
from models import CourierRate, GroupPrice, Route, PricingRule
#UserProfileAdmin.inlines += [CourierRateAdmin,]
UserAdmin.form = CreditUserChangeForm
UserAdmin.fieldsets[0][1]['fields'] = ('username', 'password', ('credit', 'change_credit', 'can_use_api',
'api_test_mode'))
ProfileInline.exclude = ('credit', 'locked_credit')
UserProfileAdmin.list_display = ('username', 'last_name', 'first_name', 'is_active', 'is_staff', 'is_superuser',
'credit', 'valid_order_number', 'can_use_api', 'api_test_mode', 'group_names')
credit.short_description = _(u"ๆๆไฝ้ข ยฃ")
UserProfileAdmin.credit = credit
# def locked_credit(cls, self):
# return self.profile.locked_credit
# locked_credit.short_description = _(u"้ๅฎไฝ้ข ยฃ")
# UserProfileAdmin.locked_credit = locked_credit
system_number.short_description = _(u"ๅผ้็ณป็ปไธชๆฐ")
UserProfileAdmin.system_number = system_number
valid_order_number.short_description = _(u"ๆๆ่ฎขๅๆปๆฐ")
UserProfileAdmin.valid_order_number = valid_order_number
can_use_api.short_description = _(u"ๅฏไปฅไฝฟ็จAPI")
can_use_api.boolean = True
UserProfileAdmin.can_use_api = can_use_api
api_test_mode.short_description = _(u"APIๆต่ฏๆจกๅผ")
api_test_mode.boolean = True
UserProfileAdmin.api_test_mode = api_test_mode
group_names.short_description = _(u"ไปทๆ ผ็ป")
UserProfileAdmin.group_names = group_names
admin.site.unregister(Group)
admin.site.register(Group, GroupAdmin)
admin.site.register(Route, RouteAdmin)
admin.site.register(PricingRule, PricingRuleAdmin)
| [
2,
1635,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
198,
11748,
33918,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
1330,
9104,
46787,
198,
6738,
42625,
14208,
13,
26791,
13,
6494,
1330,
5794,
62,
6494,
198,
6738,
42625,
14208,
... | 2.684524 | 840 |
#!/usr/bin/python3.7
#
# Copyright 2020 Nathaniel R. Lewis
#
# Redistribution and use in source and binary forms, with or without modification, are permitted
# provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of conditions
# and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice, this list of
# conditions and the following disclaimer in the documentation and/or other materials provided
# with the distribution.
# 3. Neither the name of the copyright holder nor the names of its contributors may be used to
# endorse or promote products derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import serial
import binascii
with open('flash.bin', 'rb') as f:
image = f.read(1024*512)
interface = serial.Serial('/dev/ttyUSB0', 115200, timeout=60)
sync_packet = bytes([0xa5] * 16)
interface.write(sync_packet)
# synchronize with board
response = interface.read(1)[0]
if response != 0x5a:
raise ValueError("did not receive sync byte in response to sync packet (got: {})".format(response))
# erase cmos
print("sending clear cmos")
clear_cmos_packet = bytes([0x60])
interface.write(clear_cmos_packet)
while True:
response = interface.read(1)[0]
if response == 0xff:
break
elif response != 0x5a:
raise ValueError("did not receive command ack in response to write command (got: {})".format(response))
response = interface.read(1)[0]
if response != 0xbf:
raise ValueError("did not receive cmos clear complete (got: {})".format(response))
for sector in range(0, 1024*512, 65536):
# start write
write_packet = bytes([0x50])
interface.write(write_packet)
response = interface.read(1)[0]
if response != 0xff:
raise ValueError("did not receive command ack in response to write command (got: {})".format(response))
# send packet
print("sending sector {} image (64 KiB)".format(int(sector / 65536)))
for idx in range(sector, sector + 65536, 1024):
while True:
chunk = image[idx:idx+1024]
crc = binascii.crc32(chunk)
interface.write(chunk)
interface.write(crc.to_bytes(4, byteorder='little', signed=False))
# disard 8 bytes
response = interface.read(1)[0]
if response == 0xEF:
break
else:
print("retrying chunk")
response = interface.read(1)[0]
if response != 0xec:
raise ValueError("did not receive write complete (got: {})".format(response))
# flash write packet
flash_packet = bytes([0x10])
interface.write(flash_packet)
response = interface.read(1)[0]
if response != 0xff:
raise ValueError("did not receive command ack in response to write command (got: {})".format(response))
address_packet = bytes([int(sector / 65536)])
interface.write(address_packet)
response = interface.read(1)[0]
if response != 0xdd:
raise ValueError("did not receive start ack in response to sa (got: {})".format(response))
print("sent flash write")
response = interface.read(1)[0]
if response != 0xdc:
raise ValueError("did not receive erase complete (got: {})".format(response))
print("> sector erase complete")
acks = 0
while True:
response = interface.read(1)[0]
if response == 0xda:
print("acks - ", acks)
acks = acks + 1
elif response == 0xdb:
break
else:
raise ValueError("did not receive program complete (got: {})".format(response))
print("> sector program complete")
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
13,
22,
198,
2,
198,
2,
15069,
12131,
49536,
371,
13,
10174,
198,
2,
198,
2,
2297,
396,
3890,
290,
779,
287,
2723,
290,
13934,
5107,
11,
351,
393,
1231,
17613,
11,
389,
10431,
198,
2,
28... | 2.81211 | 1,602 |
# Copyright (c) 2020 Horizon Robotics and ALF Contributors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import alf
from alf.metrics import (EnvironmentSteps, NumberOfEpisodes,
AverageReturnMetric, AverageDiscountedReturnMetric,
AverageEpisodeLengthMetric, AverageEnvInfoMetric,
AverageEpisodicAggregationMetric)
from alf.utils.tensor_utils import to_tensor
from alf.data_structures import TimeStep, StepType
import unittest
from absl.testing import parameterized
class AverageDrivingMetric(AverageEpisodicAggregationMetric):
"""Metrics for computing the average velocity and accelration.
This is purely for the purpose of unit testing the "@step" feature. It
assumes the time step has velocity, acceleration and "success or not"logged
in its ``env_info`` field.
"""
if __name__ == "__main__":
unittest.main()
| [
2,
15069,
357,
66,
8,
12131,
22776,
47061,
290,
8355,
37,
25767,
669,
13,
1439,
6923,
33876,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,... | 3.151188 | 463 |
# -*- coding: utf-8 -*-
# ่ฟญไปฃๅจ
# ่ฟญไปฃๅฏน่ฑกIterable
from collections import Iterable
print('[]ๆฏIterableๅ? ', isinstance([], Iterable))
print('{}ๆฏIterableๅ? ', isinstance({}, Iterable))
print('abcๆฏIterableๅ? ', isinstance('abc', Iterable))
print('(x for x in range(10))ๆฏIterableๅ? ', isinstance((x for x in range(10)), Iterable))
print('100ๆฏIterableๅ? ', isinstance(100, Iterable))
# ่ฟญไปฃๅจIterator
from collections import Iterator
print('[]ๆฏIteratorๅ? ', isinstance([], Iterator))
print('{}ๆฏIteratorๅ? ', isinstance({}, Iterator))
print('abcๆฏIteratorๅ? ', isinstance('abc', Iterator))
print('(x for x in range(10)ๆฏIteratorๅ? ', isinstance((x for x in range(10)), Iterator))
# ่ฟญไปฃๅจ่ฝฌๅ
print('iter([])ๆฏIteratorๅ? ', isinstance(iter([]), Iterator)) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
2,
5525,
123,
255,
47987,
161,
247,
101,
198,
198,
2,
5525,
123,
255,
47987,
43380,
117,
164,
109,
94,
29993,
540,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 2.419048 | 315 |
import os
import sys
# Checks to see if the required environment variables had been set
try:
salt_api_endpoint = os.environ['SALT_API_ENDPOINT']
salt_api_user = os.environ['SALT_API_USER']
salt_api_password = os.environ['SALT_API_PASSWORD']
except KeyError as i:
print("Error: The {env} environment variable has not been set").format(env=i)
sys.exit()
| [
11748,
28686,
198,
11748,
25064,
198,
198,
2,
47719,
284,
766,
611,
262,
2672,
2858,
9633,
550,
587,
900,
198,
28311,
25,
198,
220,
220,
220,
8268,
62,
15042,
62,
437,
4122,
796,
28686,
13,
268,
2268,
17816,
50,
31429,
62,
17614,
62... | 2.762963 | 135 |
from __future__ import unicode_literals
import os
from django.apps import AppConfig
from django.db.utils import OperationalError, ProgrammingError
from django.conf import settings
| [
6738,
11593,
37443,
834,
1330,
28000,
1098,
62,
17201,
874,
198,
198,
11748,
28686,
198,
198,
6738,
42625,
14208,
13,
18211,
1330,
2034,
16934,
198,
6738,
42625,
14208,
13,
9945,
13,
26791,
1330,
6564,
864,
12331,
11,
30297,
12331,
198,
... | 3.8125 | 48 |
#!/usr/bin/python3
#System Modules
import random
import time
import os
import csv
import os
import sqlite3
def create_example_db():
""" Create a dummy database with metering data
"""
if not os.path.exists('data/'):
os.makedirs('data/')
dbPath = 'data/meters.db'
try:
os.remove(dbPath)
except OSError:
pass
conn = sqlite3.connect(dbPath)
curs = conn.cursor()
# Create the meter list table
query = 'CREATE TABLE METER_DETAILS (meter_id TEXT, meter_desc TEXT);'
curs.execute(query)
query = 'INSERT INTO METER_DETAILS (meter_id, meter_desc) VALUES (?, ?);'
for x in range(1, 5):
meterId = x
meterName = 'Example Meter ' + str(meterId)
row = [meterId, meterName]
curs.execute(query, row)
# Create the meter locations table
query = 'CREATE TABLE METER_LOCATIONS (meter_id TEXT, lat REAL, lon REAL);'
curs.execute(query)
query = 'INSERT INTO METER_LOCATIONS (meter_id, lat, lon) VALUES (?, ?, ?);'
for x in range(1, 5):
lat, lon = create_random_coords()
row = [x, lat, lon]
curs.execute(query, row)
# Create the meter readings table
query = 'CREATE TABLE READINGS (meter_id TEXT, measdate DATETIME, v1 REAL, v2 REAL, v3 REAL, thd1 REAL, thd2 REAL, thd3 REAL, unbal REAL, PRIMARY KEY (meter_id, measdate));'
curs.execute(query)
query = 'INSERT INTO READINGS (meter_id, measdate, v1, v2, v3, thd1, thd2, thd3, unbal) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?);'
startDate = "2012-01-01 00:00:00"
endDate = "2014-07-01 00:00:00"
for x in range(1, 5):
meterId = x
for y in range(1,10000):
dateFormat = '%Y-%m-%d %H:%M:%S'
measdate = strTimeProp(startDate, endDate, dateFormat, random.random())
v1 = create_random_voltage()
v2 = create_random_voltage()
v3 = create_random_voltage()
thd1 = create_random_thd()
thd2 = create_random_thd()
thd3 = create_random_thd()
unbal = calc_unbalance([v1,v2,v3])
row = [meterId, measdate, v1, v2, v3, thd1, thd2, thd3, unbal]
try:
curs.execute(query, row)
except sqlite3.Error as e:
pass # Random date wasn't random enough
# Create the meter events
query = 'CREATE TABLE EVENTS (meter_id TEXT, event_start DATETIME, event_end DATETIME, event_type REAL, amplitude REAL, duration REAL, phases TEXT, PRIMARY KEY (meter_id, event_start, event_type));'
curs.execute(query)
query = 'INSERT INTO EVENTS (meter_id, event_start, event_end, event_type, amplitude, duration, phases) VALUES (?, ?, ?, ?, ?, ?, ?);'
startDate = "2012-01-01 00:00:00"
endDate = "2014-07-01 00:00:00"
for x in range(1, 5):
meterId = x
# TODO Make the event values actually line up with readings
for y in range(1,250):
dateFormat = '%Y-%m-%d %H:%M:%S'
event_start = strTimeProp(startDate, endDate, dateFormat, random.random())
event_end = strTimeProp(event_start, endDate, dateFormat, random.random())
event_type = 'SAG'
amplitude = 210.0
duration = 100.0
phases = 'ABC'
row = [meterId, event_start, event_end, event_type, amplitude, duration, phases]
try:
curs.execute(query, row)
except sqlite3.Error as e:
pass # Random date wasn't random enough
for y in range(1,250):
dateFormat = '%Y-%m-%d %H:%M:%S'
event_start = strTimeProp(startDate, endDate, dateFormat, random.random())
event_end = strTimeProp(event_start, endDate, dateFormat, random.random())
event_type = 'SWL'
amplitude = 260.0
duration = 50.0
phases = 'ABC'
row = [meterId, event_start, event_end, event_type, amplitude, duration, phases]
try:
curs.execute(query, row)
except sqlite3.Error as e:
pass # Random date wasn't random enough
conn.commit()
conn.close()
return True
def create_random_coords():
""" Creates a random lat lon combo
"""
lat = round(-27.0 - random.random()*3,5)
lon = round(153 - random.random()*3,5)
return lat, lon
def create_random_voltage():
""" Creates a random voltage between 230 and 250
"""
return 230.0 + ( 20 * random.random() )
def create_random_thd():
""" Creates a random voltage between 230 and 250
"""
return ( 20 * random.random() )
def strTimeProp(start, end, format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formated in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, format))
etime = time.mktime(time.strptime(end, format))
ptime = stime + prop * (etime - stime)
return time.strftime(format, time.localtime(ptime))
| [
2,
48443,
14629,
14,
8800,
14,
29412,
18,
198,
198,
2,
11964,
3401,
5028,
198,
11748,
4738,
198,
11748,
640,
198,
11748,
28686,
198,
11748,
269,
21370,
198,
11748,
28686,
198,
11748,
44161,
578,
18,
198,
198,
4299,
2251,
62,
20688,
62... | 2.261149 | 2,332 |
import logging
from adles.interfaces.libcloud_interface import LibcloudInterface
class CloudInterface(LibcloudInterface):
"""Generic interface for all cloud platforms."""
def __init__(self, infra, spec):
"""
:param dict infra: Dict of infrastructure information
:param dict spec: Dict of a parsed specification
"""
super(CloudInterface, self).__init__(infra=infra, spec=spec)
self._log = logging.getLogger(str(self.__class__))
self._log.debug("Initializing %s", self.__class__)
self.max_instance_price = float(infra["max-instance-price"])
self.max_total_price = float(infra["max-total-price"])
# Cache currently available images and sizes
self.available_images = self.provider.list_images()
self.available_sizes = self.provider.list_sizes()
self._log.debug(self.available_images)
self._log.debug(self.available_sizes)
| [
11748,
18931,
198,
198,
6738,
512,
829,
13,
3849,
32186,
13,
8019,
17721,
62,
39994,
1330,
7980,
17721,
39317,
628,
198,
4871,
10130,
39317,
7,
25835,
17721,
39317,
2599,
198,
220,
220,
220,
37227,
46189,
7071,
329,
477,
6279,
9554,
526... | 2.656338 | 355 |
# -*- coding: future_fstrings -*-
import json
from pathlib import Path
from typing import *
import jsonpickle
import pandas as pd
from incense.artifact import Artifact, content_type_to_artifact_cls
from pyrsistent import freeze, thaw
| [
2,
532,
9,
12,
19617,
25,
2003,
62,
69,
37336,
532,
9,
12,
198,
11748,
33918,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
19720,
1330,
1635,
198,
198,
11748,
33918,
27729,
293,
198,
11748,
19798,
292,
355,
279,
67,
198,
6738,
753... | 3.260274 | 73 |
import numpy as np
import pandas as pd
import yaml
import argparse
import typing
import matplotlib.pyplot as plt
from sklearn.metrics import f1_score,recall_score,accuracy_score,precision_score,confusion_matrix,classification_report
from interpret.glassbox import ExplainableBoostingClassifier
from interpret import show, preserve, show_link, set_show_addr
from interpret.provider import InlineProvider
from interpret import set_visualize_provider
set_visualize_provider(InlineProvider())
def read_params(config_path):
"""
read parameters from the params.yaml file
input: params.yaml location
output: parameters as dictionary
"""
with open(config_path) as yaml_file:
config = yaml.safe_load(yaml_file)
return config
def get_feat_and_target(df,target):
"""
Get features and target variables seperately from given dataframe and target
input: dataframe and target column
output: two dataframes for x and y
"""
x=df.drop(target,axis=1)
y=df[[target]]
return x,y
if __name__=="__main__":
args = argparse.ArgumentParser()
args.add_argument("--config", default="params.yaml")
parsed_args = args.parse_args()
train_x,train_y,test_x,test_y = train_and_evaluate(config_path=parsed_args.config)
ebm = ExplainableBoostingClassifier()
ebm.fit(train_x, train_y)
ebm_global = ebm.explain_global(name = 'EBM')
#preserve(ebm_global, 'number_customer_service_calls', 'number_customer_service_calls.html')
"""
'show' function does not work for .py files. We need to run the code in jupyter notebook.
"""
#set_show_addr(("127.0.0.1", 5000))
show(ebm_global)
plt.show()
| [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
331,
43695,
198,
11748,
1822,
29572,
198,
11748,
19720,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
6738,
1341,
35720,
13,
4164,
104... | 2.730645 | 620 |
#!/usr/bin/python
# Copyright: (c) 2018, Terry Jones <terry.jones@example.org>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tetration_software_agent
short_description: Queries and deletes software agents by uuid
version_added: '2.9'
description:
- Enables query or removal of software agents by uuid
- Searching by C(uuid) returns all parameters from the API
- Marking as absent deletes the
options:
uuid:
description: UUID of target agent
type: string
required: true
state:
choices: [absent, query]
default: query
description: Remove or query for software agent
required: true
type: string
extends_documentation_fragment: tetration_doc_common
notes:
- Requires the `requests` Python module.
- 'Required API Permission(s): sensor_management'
requirements:
- requests
author:
- Brandon Beck (@techbeck03)
- Joe Jacobs(@joej164)
'''
EXAMPLES = '''
# Remove agent by uuid
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: absent
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
# Query agent by hostname
tetration_software_agent:
uuid: 4b35fa6001339e5313af5e34bd88012381a9aaaa
state: query
provider:
host: "https://tetration-cluster.company.com"
api_key: 1234567890QWERTY
api_secret: 1234567890QWERTY
'''
RETURN = '''
---
object:
contains:
agent_type:
description: Agent type
sample: ENFORCER
type: string
arch:
description: CPU architecture type
sample: x86_64
type: string
auto_upgrade_opt_out:
description: If True, agents are not auto-upgraded during upgrade of Tetration
cluster
sample: 'False'
type: bool
cpu_quota_mode:
description: The amount of CPU quota to give to agent on the end host (pct)
sample: 1
type: int
cpu_quota_us:
description: The amount of CPU quota to give to agent on the end host (us)
sample: 30000
type: int
created_at:
description: Date this inventory was created (Unix Epoch)
sample: 1553626033
type: string
current_sw_version:
description: Current version of software agent
sample: 3.1.1.65-enforcer
type: string
data_plane_disabled:
description: If true, agent stops reporting flows to Tetration
sample: 'False'
type: bool
desired_sw_version:
description: Desired version of software agent
sample: 3.1.1.65-enforcer
type: string
enable_cache_sidechannel:
description: Whether or not sidechannel detection is enabled
sample: 'True'
type: bool
enable_forensic:
description: Whether or not forensics is enabled
sample: 'True'
type: bool
enable_meltdown:
description: Whether or not meltdown detection is enabled
sample: 'True'
type: bool
enable_pid_lookup:
description: Whether or not pid lookup for flow search is enabled
sample: 'True'
type: bool
host_name:
description: Hostname as reported by software agent
returned: when C(state) is present or query
sample: acme-example-host
type: string
interfaces:
description: List of interfaces reported by software agent
sample: JSON Interfaces
type: list
last_config_fetch_at:
description: Date of last configuration fetch (Unix Epoch)
sample: 1563458124
type: string
last_software_update_at:
description: Date of last software update (Unix Epoch)
sample: 1553626033
type: string
platform:
description: OS platform type
sample: CentOS-7.6
type: string
uuid:
description: UUID of the registered software agent
returned: when C(state) is present or query
sample: d322189839fb70b2f4569f3657eea58f096c0686
type: int
description: the changed or modified object(s)
returned: always
type: complex
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.tetration_constants import TETRATION_PROVIDER_SPEC
from ansible.module_utils.tetration_constants import TETRATION_API_SENSORS
from ansible.module_utils.tetration import TetrationApiModule
if __name__ == '__main__':
main()
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
198,
2,
15069,
25,
357,
66,
8,
2864,
11,
14286,
5437,
1279,
353,
563,
13,
73,
1952,
31,
20688,
13,
2398,
29,
198,
2,
22961,
3611,
5094,
13789,
410,
18,
13,
15,
10,
357,
3826,
27975,
457... | 2.635198 | 1,716 |
from os.path import dirname
from pathlib import Path
ASSETS = Path(dirname(__file__))
| [
6738,
28686,
13,
6978,
1330,
26672,
3672,
198,
6738,
3108,
8019,
1330,
10644,
198,
198,
10705,
32716,
796,
10644,
7,
15908,
3672,
7,
834,
7753,
834,
4008,
628
] | 3.142857 | 28 |
import argparse
import copy
import json
import os
import sys
import pprint
import requests
import yaml
import yaml.dumper
from collections import OrderedDict
class UnsortableOrderedDict(OrderedDict):
"""
Because PyYAML sorts things. Guh.
"""
if __name__ == '__main__':
api_host = os.getenv('API_HOST')
if api_host is None:
print('API_HOST must be set to your Spinnaker API')
sys.exit(1)
if api_host[-1:] == '/':
api_host = api_host[:-1]
args = parser().parse_args()
pipeline_config = get_pipeline_config(api_host, args.app, args.pipelineConfigId)
template = convert(pipeline_config)
print(render(template))
| [
11748,
1822,
29572,
198,
11748,
4866,
198,
11748,
33918,
198,
11748,
28686,
198,
11748,
25064,
198,
11748,
279,
4798,
198,
198,
11748,
7007,
198,
11748,
331,
43695,
198,
11748,
331,
43695,
13,
67,
15829,
198,
198,
6738,
17268,
1330,
14230... | 2.7375 | 240 |
import cv2
import numpy as np
import matplotlib.pyplot as plt
| [
11748,
269,
85,
17,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198
] | 2.818182 | 22 |
from flask import Flask
app = Flask(__name__)
app.config['SECRET_KEY'] = '51ab273a7b16ee7bd10722a5e63cd4c7'
from library import routes
| [
6738,
42903,
1330,
46947,
201,
198,
1324,
796,
46947,
7,
834,
3672,
834,
8,
201,
198,
1324,
13,
11250,
17816,
23683,
26087,
62,
20373,
20520,
796,
705,
4349,
397,
27367,
64,
22,
65,
1433,
1453,
22,
17457,
15982,
1828,
64,
20,
68,
50... | 2.431034 | 58 |
import cv2
import pickle
import os
import numpy as np
import tensorflow as tf
#preprocessing function used for tensorflow
#get labels in .pkl file saved in face_train.py
labels = {}
with open("labels.pkl", 'rb') as f:
og_labels = pickle.load(f)
labels = {v:k for k,v in og_labels.items()} #invert the dictionary so that key is value (0 or 1) and value is key (aryan, ansh)
#labels: keys = id_ (0,1); values = aryan or ansh
#define the cascade that finds faces
face_cascade = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_alt2.xml')
#define cascades for finding noses and mouths (for mask recognition)
face_detect = cv2.CascadeClassifier('cascades/data/haarcascade_frontalface_default.xml') #because alt2 won't detect masked faces
nose_detect = cv2.CascadeClassifier('cascades/data/haarcascade_mcs_nose.xml')
mouth_detect = cv2.CascadeClassifier('cascades/data/haarcascade_mcs_mouth.xml')
#initialize recognizer
recognizer = cv2.face.LBPHFaceRecognizer_create()
#load training
recognizer.read("trainer.yml")
#load the saved TensorFlow model
tf_model = tf.keras.models.load_model(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tf_saved_model'))
cap = cv2.VideoCapture(0)
while True:
#capture frame-by-frame
ret, frame = cap.read()
#convert frames to grayscale so classifier works (see documentation)
grayscale = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
#flip horizontally (experimentation)
flipped = cv2.flip(grayscale, 1)
#find faces in frame (detectMultiScale returns list of rectangles where face found)
faces = face_cascade.detectMultiScale(grayscale, scaleFactor=1.5, minNeighbors=4)
#iterate over rectangles returned by faces
for (x, y, w, h) in faces:
#region of interest (isolate the rectangle in the image with my face)
roi_color = frame[y:y+h, x:x+w]
roi_gray = grayscale[y:y+h, x:x+w]
#predict the face's ID (id_) and (loss): the lower the better --> try tensorflow
id_, loss = recognizer.predict(roi_gray)
tf_id = np.argmax(tf_model.predict(np.array([preprocess(roi_color)])))
font = cv2.FONT_HERSHEY_COMPLEX
name = labels[id_]
color = (0, 255, 0) #BGR
stroke = 2
#uncomment for LBPH classification for comparison
cv2.putText(frame, name, (x, y+h+20), font, 1, color, thickness=stroke, lineType=cv2.LINE_AA)
tf_font = cv2.FONT_HERSHEY_COMPLEX
tf_name = labels[tf_id]
tf_color = (0, 145, 255) #BGR
tf_stroke = 2
cv2.putText(frame, tf_name, (x,y), tf_font, 1, tf_color, thickness=tf_stroke, lineType=cv2.LINE_AA)
#save the region of interest as an image
#cv2.imwrite("color_face.png", roi_color)
#draw a rectangle surrounding face
rec_color = (255, 80, 0) #BGR (a nice blue)
rec_stroke = 2 #thickness
cv2.rectangle(frame, (x, y), (x+w, y+h), rec_color, rec_stroke)
#MASK DETECTION SECTION
faces = face_detect.detectMultiScale(grayscale, scaleFactor=1.5, minNeighbors=1)
face_list = []
for (x,y,w,h) in faces:
if w*h > 60000: #test for detected 'faces' that are too small to be actual faces
face_list.append(w*h)
#choose the face with the biggest region of interest area
if len(face_list) > 0:
face_index = np.argmax(face_list)
face = faces[face_index]
x = face[0]
y = face[1]
w = face[2]
h = face[3]
roi = grayscale[y:y+h, x:x+w] #roi has different coordinates than frame
# uncomment to see another rectangle around the face
# cv2.rectangle(frame, (x, y), (x+w, y+h), (255,105,0), 2, cv2.LINE_AA) #a nice blue
detected_mouths = mouth_detect.detectMultiScale(roi, scaleFactor=2, minNeighbors=None)
mouth_list = []
for (x,y,w,h) in detected_mouths:
if w*h > 4200 and y > (2 * (face[3] // 3)): #eliminate "mouths" that are too small and not in the bottom 1/3 of the face
mouth_list.append(w*h)
#choose the "mouth" with the biggest area (eyes tend to be classified as mouths)
# uncomment the code block if you want a rectangle drawn around the mouth in the video feed
# if len(mouth_list) > 0:
# mouth_index = np.argmax(mouth_list)
# mouth = detected_mouths[mouth_index]
# mouthx = face[0] + mouth[0]
# mouthy = face[1] + mouth[1]
# cv2.rectangle(frame, (mouthx, mouthy), (mouthx+mouth[2], mouthy+mouth[3]), (0, 255, 0), 2, cv2.LINE_AA) #green
detected_noses = nose_detect.detectMultiScale(roi, scaleFactor=1.1, minNeighbors=1)
nose_list = []
for (x,y,w,h) in detected_noses:
if w*h > 5300 and ((face[3] // 3)) < y < (2 * (face[3] // 3)): #eliminate "noses" that are too small and not in the middle 1/3 of the face
nose_list.append(w*h)
#choose the nose with the biggest area
# uncomment the code block if you want a rectangle drawn around the nose in the video feed
# if len(nose_list) > 0:
# nose_index = np.argmax(nose_list)
# nose = detected_noses[nose_index]
# nosex = face[0] + nose[0]
# nosey = face[1] + nose[1]
# cv2.rectangle(frame, (nosex, nosey), (nosex+nose[2], nosey+nose[3]), (0, 0, 255), 2, cv2.LINE_AA) #red
#defines the text parameters
mask_font = cv2.FONT_HERSHEY_TRIPLEX
mask_name = ''
mask_color = (0, 0, 255) #BGR
mask_stroke = 2
if len(mouth_list) > 0 and len(nose_list) > 0: #both mouth and nose detected
mask_color = (0, 0, 255) # red
mask_name = "Mask Not Detected"
elif len(mouth_list) == 0 and len(nose_list) == 0: #neither mouth nor nose detected
mask_color = (0, 255, 0) #make the text color green
mask_name = "Mask Detected"
else: # mouth or nose detected, but not both
mask_color = (0, 255, 255) # yellow
mask_name = "Incorrectly Worn Mask Detected"
#get rectangle boundary of text
text_size = cv2.getTextSize(mask_name, mask_font, 1, mask_stroke)[0]
#frame.shape returns a tuple of (height, width)
#determine coords of text based on its size and the size of the frame (bottom middle)
mask_x = (frame.shape[1] - text_size[0]) // 2
mask_y = frame.shape[0]
cv2.putText(frame, mask_name, (mask_x, mask_y), mask_font, 1, mask_color, thickness=mask_stroke, lineType=cv2.LINE_AA)
#display resulting frame
cv2.imshow('Live Webcam Feed', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
#when everything is done, release the capture (end)
cap.release()
cv2.destroyAllWindows()
| [
11748,
269,
85,
17,
198,
11748,
2298,
293,
198,
11748,
28686,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
11192,
273,
11125,
355,
48700,
198,
198,
2,
3866,
36948,
2163,
973,
329,
11192,
273,
11125,
198,
198,
2,
1136,
14722,
287,
7... | 2.26755 | 3,020 |
#!/usr/bin/env python
# ======================================================================
#
# Brad T. Aagaard, U.S. Geological Survey
#
# ======================================================================
"""Test tile caching using GoogleTiles.
"""
import sys
import matplotlib.pyplot as plt
from cartopy.io.img_tiles import GoogleTiles
sys.path.append("../cartopy_extra_tiles")
from cached_tiler import CachedTiler
tiler = CachedTiler(GoogleTiles(), cache_dir="~/data_scratch/images/tiles")
ax = plt.axes(projection=tiler.crs)
ax.set_extent([-123, -121.5, 37, 38.5])
ax.add_image(tiler, 8)
plt.show()
# End of file
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
38093,
1421,
28,
198,
2,
198,
2,
8114,
309,
13,
317,
8126,
446,
11,
471,
13,
50,
13,
34246,
13084,
198,
2,
198,
2,
38093,
1421,
28,
198,
37811,
14402,
17763,
40918,
1262,
3012,
... | 3.103448 | 203 |
""" This module contains the helper functions which can be called directly by
algorithm implementers to obtain the metrices """
# INTEL CONFIDENTIAL
#
# Copyright (C) 2021 Intel Corporation
#
# This software and the related documents are Intel copyrighted materials, and
# your use of them is governed by the express license under which they were provided to
# you ("License"). Unless the License provides otherwise, you may not use, modify, copy,
# publish, distribute, disclose or transmit this software or the related documents
# without Intel's prior written permission.
#
# This software and the related documents are provided as is,
# with no express or implied warranties, other than those that are expressly stated
# in the License.
from ote_sdk.entities.resultset import ResultSetEntity
from ote_sdk.usecases.evaluation.accuracy import Accuracy
from ote_sdk.usecases.evaluation.averaging import MetricAverageMethod
from ote_sdk.usecases.evaluation.dice import DiceAverage
from ote_sdk.usecases.evaluation.f_measure import FMeasure
class MetricsHelper:
"""
Contains metrics computation functions.
TODO: subject for refactoring.
"""
@staticmethod
def compute_f_measure(
resultset: ResultSetEntity,
vary_confidence_threshold: bool = False,
vary_nms_threshold: bool = False,
cross_class_nms: bool = False,
) -> FMeasure:
"""
Compute the F-Measure on a resultset given some parameters.
:param resultset: The resultset used to compute f-measure
:param vary_confidence_threshold: Flag specifying whether f-measure shall be computed for different confidence
threshold values
:param vary_nms_threshold: Flag specifying whether f-measure shall be computed for different NMS
threshold values
:param cross_class_nms: Whether non-max suppression should be applied cross-class
:return: FMeasure object
"""
return FMeasure(
resultset, vary_confidence_threshold, vary_nms_threshold, cross_class_nms
)
@staticmethod
def compute_dice_averaged_over_pixels(
resultset: ResultSetEntity,
average: MetricAverageMethod = MetricAverageMethod.MACRO,
) -> DiceAverage:
"""
Compute the Dice average on a resultset, averaged over the pixels.
:param resultset: The resultset used to compute the Dice average
:param average: The averaging method, either MICRO or MACRO
:return: DiceAverage object
"""
return DiceAverage(resultset=resultset, average=average)
@staticmethod
def compute_accuracy(
resultset: ResultSetEntity,
average: MetricAverageMethod = MetricAverageMethod.MICRO,
) -> Accuracy:
"""
Compute the Accuracy on a resultset, averaged over the different label groups.
:param resultset: The resultset used to compute the accuracy
:param average: The averaging method, either MICRO or MACRO
:return: Accuracy object
"""
return Accuracy(resultset=resultset, average=average)
| [
37811,
770,
8265,
4909,
262,
31904,
5499,
543,
460,
307,
1444,
3264,
416,
198,
282,
42289,
3494,
364,
284,
7330,
262,
1138,
45977,
37227,
198,
198,
2,
17828,
3698,
7102,
37,
25256,
12576,
198,
2,
198,
2,
15069,
357,
34,
8,
33448,
81... | 2.938776 | 1,078 |
from ._pyimports import levenshtein, fast_comp
def ilevenshtein(seq1, seqs, max_dist=-1):
"""Compute the Levenshtein distance between the sequence `seq1` and the series
of sequences `seqs`.
`seq1`: the reference sequence
`seqs`: a series of sequences (can be a generator)
`max_dist`: if provided and > 0, only the sequences which distance from
the reference sequence is lower or equal to this value will be returned.
The return value is a series of pairs (distance, sequence).
The sequence objects in `seqs` are expected to be of the same kind than
the reference sequence in the C implementation; the same holds true for
`ifast_comp`.
"""
for seq2 in seqs:
dist = levenshtein(seq1, seq2, max_dist=max_dist)
if dist != -1:
yield dist, seq2
def ifast_comp(seq1, seqs, transpositions=False):
"""Return an iterator over all the sequences in `seqs` which distance from
`seq1` is lower or equal to 2. The sequences which distance from the
reference sequence is higher than that are dropped.
`seq1`: the reference sequence.
`seqs`: a series of sequences (can be a generator)
`transpositions` has the same sense than in `fast_comp`.
The return value is a series of pairs (distance, sequence).
You might want to call `sorted()` on the iterator to get the results in a
significant order:
>>> g = ifast_comp("foo", ["fo", "bar", "foob", "foo", "foobaz"])
>>> sorted(g)
[(0, 'foo'), (1, 'fo'), (1, 'foob')]
"""
for seq2 in seqs:
dist = fast_comp(seq1, seq2, transpositions)
if dist != -1:
yield dist, seq2
| [
6738,
47540,
9078,
320,
3742,
1330,
443,
574,
1477,
22006,
11,
3049,
62,
5589,
198,
198,
4299,
220,
576,
574,
1477,
22006,
7,
41068,
16,
11,
33756,
82,
11,
3509,
62,
17080,
10779,
16,
2599,
198,
197,
37811,
7293,
1133,
262,
1004,
57... | 2.934211 | 532 |
#!/usr/bin/python
#coding=utf-8
import threading
import time
import queue
from kscore.session import get_session
from threading import Lock
'''
ๅฝๅซๆๅคง้็ๅฌๅจ็ๅฎๆๅกๅจ็ๆถๅ
ๅฏไปฅไฝฟ็จ่ฟไธช่ๆฌ็คบไพ๏ผๆฅ่ง้ฟๅ
จ้็ดๆฅๆฅ่ฏขๅ
จ้็ๅฌๅจๅ่กจ่ฟๆ
ข็้ฎ้ข
ๆต็จ๏ผ
้กน็ฎๅถ่ทๅ้กน็ฎ->ๆฅ่ฏข่ด่ฝฝๅ่กก->ๅนถๅๆ็
ง่ด่ฝฝๅ่กกIDๆฅ่ฏข็ๅฌๅจ
ไฝฟ็จไฟกๅท้Semaphoreๅๅนถๅ้ๆงๅถ
ไฝฟ็จqueueๅๅค็บฟ็จๆ้ๅนถๅๆไฝ
'''
sem = threading.Semaphore(20)
lock = Lock()
results = []
if __name__ == "__main__":
s = get_session()
region = 'cn-shanghai-2'
slbClient = s.create_client("slb", region, use_ssl=True)
'''
่ทๅ้กน็ฎๅถ
'''
# IAM
projects = []
iam = s.create_client("iam", use_ssl=False)
resp = iam.get_account_all_project_list()
for item in resp["ListProjectResult"]["ProjectList"]:
projects.append(item['ProjectId'])
_param = {}
count = 1
for i in projects:
key = "ProjectId." + str(count)
_param.update({key: str(i)})
count = count + 1
print(count)
'''
่ทๅ่ด่ฝฝๅ่กก
'''
allLbs = slbClient.describe_load_balancers(**_param)
count = 0
q = queue.Queue(len(allLbs["LoadBalancerDescriptions"]))
old = time.time()
for lb in allLbs["LoadBalancerDescriptions"]:
q.put(lb["LoadBalancerId"])
for lb in allLbs["LoadBalancerDescriptions"]:
lb_id = lb["LoadBalancerId"]
sem.acquire()
print(lb_id, q)
thread = SlbThread(lb_id, q)
thread.start()
q.join()
print(time.time()-old)
print(len(results))
# for item in results:
# print item['ListenerName']
# print item['ListenerId'] | [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
66,
7656,
28,
40477,
12,
23,
198,
198,
11748,
4704,
278,
198,
11748,
640,
198,
11748,
16834,
198,
6738,
479,
26675,
13,
29891,
1330,
651,
62,
29891,
198,
6738,
4704,
278,
1330,
13656,
19... | 1.710112 | 890 |
import json
result_det = 'output/detections_postNMS.json'
result_rev = 'output/retreival_postNMS.json'
with open(result_det, 'r') as fobj:
data_det = json.load(fobj)
with open(result_rev, 'r') as fobj:
data_rev = json.load(fobj)
data_submission = data_det
data_submission['detection'] = data_submission.pop('results')
data_submission['retrieval'] = data_rev['results']
with open("output/submission.json", "w") as fp:
json.dump(data_submission,fp) | [
11748,
33918,
198,
198,
20274,
62,
15255,
796,
705,
22915,
14,
15255,
478,
507,
62,
7353,
45,
5653,
13,
17752,
6,
198,
20274,
62,
18218,
796,
705,
22915,
14,
1186,
260,
2473,
62,
7353,
45,
5653,
13,
17752,
6,
628,
198,
4480,
1280,
... | 2.563536 | 181 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
Description [ๆฐๆฎๅบarticle่กจ ๅข/ๅ /ๆน/ๆฅ ๆไฝ]
Created by yifei on 2018/2/6.
"""
| [
2,
48443,
14629,
14,
8800,
14,
29412,
198,
2,
532,
9,
12,
19617,
25,
41002,
12,
23,
532,
9,
12,
198,
198,
37811,
198,
12489,
685,
46763,
108,
162,
235,
106,
41753,
241,
20205,
26193,
101,
10263,
95,
252,
14,
26344,
254,
14,
162,
... | 1.704225 | 71 |
#!/usr/bin/env python
import argparse
import datetime
import json
import pathlib
import re
import sys
from csv import DictReader
from typing import Iterator
TAXONOMIC_ORDER = [
"topic",
"variable",
"category",
"sub-category",
]
NESTED_KEYS = {
"topic": "variables",
"variable": "categories",
"category": "sub-categories",
"sub-category": None
}
DESC_PLACEHOLDER = "Lorem ipsum dolor sit amet."
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument("fp", type=str, help="String, filepath for csv file to create content JSON from.")
parser.add_argument("--no-metadata", default=False, action="store_true", help="Do not append metadata to content JSON (default True)")
args = parser.parse_args()
fp = pathlib.PurePath(args.fp)
main(fp, args.no_metadata) | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
11748,
1822,
29572,
198,
11748,
4818,
8079,
198,
11748,
33918,
198,
11748,
3108,
8019,
198,
11748,
302,
198,
11748,
25064,
198,
198,
6738,
269,
21370,
1330,
360,
713,
33634,
198,
67... | 2.731392 | 309 |
"""Code for custom Altair themes.
Available themes:
- streamlit -- matches theme in Streamlit website
- ggplot2 -- hybrid between Streamlit's and ggplot2's themes.
"""
if __name__ == "__main__":
pass
| [
37811,
10669,
329,
2183,
12344,
958,
13460,
13,
198,
198,
10493,
13460,
25,
198,
12,
4269,
18250,
1377,
7466,
7505,
287,
13860,
18250,
3052,
198,
12,
308,
70,
29487,
17,
1377,
14554,
1022,
13860,
18250,
338,
290,
308,
70,
29487,
17,
3... | 3.215385 | 65 |
# -*- coding: utf-8 -*-
'''Base Class of PlotCanvas'''
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from PyQt5.QtWidgets import QSizePolicy
class PlotCanvas(FigureCanvas):
'''FigureCanvas Class'''
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
7061,
6,
14881,
5016,
286,
28114,
6090,
11017,
7061,
6,
198,
6738,
2603,
29487,
8019,
13,
1891,
2412,
13,
1891,
437,
62,
39568,
20,
9460,
1330,
11291,
6090,
11017,
48,
... | 2.787879 | 99 |
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-LOG ่้ฒธๆฅๅฟๅนณๅฐ available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-LOG ่้ฒธๆฅๅฟๅนณๅฐ is licensed under the MIT License.
License for BK-LOG ่้ฒธๆฅๅฟๅนณๅฐ:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from collections import defaultdict
from django.utils.translation import ugettext as _
from apps.api import TransferApi
from apps.log_databus.constants import STORAGE_CLUSTER_TYPE
from apps.log_measure.utils.metric import MetricUtils
from bk_monitor.constants import TimeFilterEnum
from bk_monitor.utils.metric import register_metric, Metric
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
24893,
1087,
318,
10607,
284,
1104,
262,
1280,
2723,
2055,
416,
1642,
347,
42,
12,
25294,
5525,
241,
251,
165,
110,
116,
33768,
98,
33232,
245,
33176,
111,
... | 3.584906 | 477 |
# -*- coding: utf-8 -*-
r'''
Saltclass Configuration
=======================
.. code-block:: yaml
master_tops:
saltclass:
path: /srv/saltclass
Description
===========
This module clones the behaviour of reclass (http://reclass.pantsfullofunix.net/),
without the need of an external app, and add several features to improve flexibility.
Saltclass lets you define your nodes from simple ``yaml`` files (``.yml``) through
hierarchical class inheritance with the possibility to override pillars down the tree.
Features
========
- Define your nodes through hierarchical class inheritance
- Reuse your reclass datas with minimal modifications
- applications => states
- parameters => pillars
- Use Jinja templating in your yaml definitions
- Access to the following Salt objects in Jinja
- ``__opts__``
- ``__salt__``
- ``__grains__``
- ``__pillars__``
- ``minion_id``
- Chose how to merge or override your lists using ^ character (see examples)
- Expand variables ${} with possibility to escape them if needed \${} (see examples)
- Ignores missing node/class and will simply return empty without breaking the pillar module completely - will be logged
An example subset of datas is available here: http://git.mauras.ch/salt/saltclass/src/master/examples
========================== ===========
Terms usable in yaml files Description
========================== ===========
classes A list of classes that will be processed in order
states A list of states that will be returned by master_tops function
pillars A yaml dictionnary that will be returned by the ext_pillar function
environment Node saltenv that will be used by master_tops
========================== ===========
A class consists of:
- zero or more parent classes
- zero or more states
- any number of pillars
A child class can override pillars from a parent class.
A node definition is a class in itself with an added ``environment`` parameter for ``saltenv`` definition.
Class names
===========
Class names mimic salt way of defining states and pillar files.
This means that ``default.users`` class name will correspond to one of these:
- ``<saltclass_path>/classes/default/users.yml``
- ``<saltclass_path>/classes/default/users/init.yml``
Saltclass file hierachy
=======================
A saltclass tree would look like this:
.. code-block:: text
<saltclass_path>
โโโ classes
โ โโโ app
โ โ โโโ borgbackup.yml
โ โ โโโ ssh
โ โ โโโ server.yml
โ โโโ default
โ โ โโโ init.yml
โ โ โโโ motd.yml
โ โ โโโ users.yml
โ โโโ roles
โ โ โโโ app.yml
โ โ โโโ nginx
โ โ โโโ init.yml
โ โ โโโ server.yml
โ โโโ subsidiaries
โ โโโ gnv.yml
โ โโโ qls.yml
โ โโโ zrh.yml
โโโ nodes
โโโ geneva
โ โโโ gnv.node1.yml
โโโ lausanne
โ โโโ qls.node1.yml
โ โโโ qls.node2.yml
โโโ node127.yml
โโโ zurich
โโโ zrh.node1.yml
โโโ zrh.node2.yml
โโโ zrh.node3.yml
Saltclass Examples
==================
``<saltclass_path>/nodes/lausanne/qls.node1.yml``
.. code-block:: jinja
environment: base
classes:
{% for class in ['default'] %}
- {{ class }}
{% endfor %}
- subsidiaries.{{ __grains__['id'].split('.')[0] }}
``<saltclass_path>/classes/default/init.yml``
.. code-block:: yaml
classes:
- default.users
- default.motd
states:
- openssh
pillars:
default:
network:
dns:
srv1: 192.168.0.1
srv2: 192.168.0.2
domain: example.com
ntp:
srv1: 192.168.10.10
srv2: 192.168.10.20
``<saltclass_path>/classes/subsidiaries/gnv.yml``
.. code-block:: yaml
pillars:
default:
network:
sub: Geneva
dns:
srv1: 10.20.0.1
srv2: 10.20.0.2
srv3: 192.168.1.1
domain: gnv.example.com
users:
adm1:
uid: 1210
gid: 1210
gecos: 'Super user admin1'
homedir: /srv/app/adm1
adm3:
uid: 1203
gid: 1203
gecos: 'Super user adm
Variable expansions
===================
Escaped variables are rendered as is: ``${test}``
Missing variables are rendered as is: ``${net:dns:srv2}``
.. code-block:: yaml
pillars:
app:
config:
dns:
srv1: ${default:network:dns:srv1}
srv2: ${net:dns:srv2}
uri: https://application.domain/call?\${test}
prod_parameters:
- p1
- p2
- p3
pkg:
- app-core
- app-backend
List override
=============
Not using ``^`` as the first entry will simply merge the lists
.. code-block:: yaml
pillars:
app:
pkg:
- ^
- app-frontend
.. note:: **Known limitation**
Currently you can't have both a variable and an escaped variable in the same string as the
escaped one will not be correctly rendered - '\${xx}' will stay as is instead of being rendered as '${xx}'
'''
# import python libs
from __future__ import absolute_import, print_function, unicode_literals
import logging
import salt.utils.saltclass as sc
log = logging.getLogger(__name__)
def __virtual__():
'''
Only run if properly configured
'''
if __opts__['master_tops'].get('saltclass'):
return True
return False
def top(**kwargs):
'''
Compile tops
'''
# Node definitions path will be retrieved from args (or set to default),
# then added to 'salt_data' dict that is passed to the 'get_pillars'
# function. The dictionary contains:
# - __opts__
# - __salt__
# - __grains__
# - __pillar__
# - minion_id
# - path
#
# If successful, the function will return a pillar dict for minion_id.
# If path has not been set, make a default
_opts = __opts__['master_tops']['saltclass']
if 'path' not in _opts:
path = '/srv/saltclass'
log.warning('path variable unset, using default: %s', path)
else:
path = _opts['path']
# Create a dict that will contain our salt objects
# to send to get_tops function
if 'id' not in kwargs['opts']:
log.warning('Minion id not found - Returning empty dict')
return {}
else:
minion_id = kwargs['opts']['id']
salt_data = {
'__opts__': kwargs['opts'],
'__salt__': {},
'__grains__': kwargs['grains'],
'__pillar__': {},
'minion_id': minion_id,
'path': path
}
return sc.get_tops(minion_id, salt_data)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
81,
7061,
6,
198,
43061,
4871,
28373,
198,
4770,
1421,
18604,
198,
198,
492,
2438,
12,
9967,
3712,
331,
43695,
628,
220,
220,
220,
4958,
62,
35011,
25,
198,
220,
220,
... | 2.331629 | 2,934 |
# Copyright (c) 2013, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from kernpart import Kernpart
import numpy as np
four_over_tau = 2./np.pi
class POLY(Kernpart):
"""
Polynomial kernel parameter initialisation. Included for completeness, but generally not recommended, is the polynomial kernel:
.. math::
k(x, y) = \sigma^2\*(\sigma_w^2 x'y+\sigma_b^b)^d
The kernel parameters are :math:`\sigma^2` (variance), :math:`\sigma^2_w`
(weight_variance), :math:`\sigma^2_b` (bias_variance) and d
(degree). Only gradients of the first three are provided for
kernel optimisation, it is assumed that polynomial degree would
be set by hand.
The kernel is not recommended as it is badly behaved when the
:math:`\sigma^2_w\*x'\*y + \sigma^2_b` has a magnitude greater than one. For completeness
there is an automatic relevance determination version of this
kernel provided (NOTE YET IMPLEMENTED!).
:param input_dim: the number of input dimensions
:type input_dim: int
:param variance: the variance :math:`\sigma^2`
:type variance: float
:param weight_variance: the vector of the variances of the prior over input weights in the neural network :math:`\sigma^2_w`
:type weight_variance: array or list of the appropriate size (or float if there is only one weight variance parameter)
:param bias_variance: the variance of the prior over bias parameters :math:`\sigma^2_b`
:param degree: the degree of the polynomial.
:type degree: int
:param ARD: Auto Relevance Determination. If equal to "False", the kernel is isotropic (ie. one weight variance parameter :math:`\sigma^2_w`), otherwise there is one weight variance parameter per dimension.
:type ARD: Boolean
:rtype: Kernpart object
"""
def K(self, X, X2, target):
"""Return covariance between X and X2."""
self._K_computations(X, X2)
target += self.variance*self._K_dvar
def Kdiag(self, X, target):
"""Compute the diagonal of the covariance matrix for X."""
self._K_diag_computations(X)
target+= self.variance*self._K_diag_dvar
def dK_dtheta(self, dL_dK, X, X2, target):
"""Derivative of the covariance with respect to the parameters."""
self._K_computations(X, X2)
base = self.variance*self.degree*self._K_poly_arg**(self.degree-1)
base_cov_grad = base*dL_dK
target[0] += np.sum(self._K_dvar*dL_dK)
target[1] += (self._K_inner_prod*base_cov_grad).sum()
target[2] += base_cov_grad.sum()
def dK_dX(self, dL_dK, X, X2, target):
"""Derivative of the covariance matrix with respect to X"""
self._K_computations(X, X2)
arg = self._K_poly_arg
if X2 is None:
target += 2*self.weight_variance*self.degree*self.variance*(((X[None,:, :])) *(arg**(self.degree-1))[:, :, None]*dL_dK[:, :, None]).sum(1)
else:
target += self.weight_variance*self.degree*self.variance*(((X2[None,:, :])) *(arg**(self.degree-1))[:, :, None]*dL_dK[:, :, None]).sum(1)
def dKdiag_dX(self, dL_dKdiag, X, target):
"""Gradient of diagonal of covariance with respect to X"""
self._K_diag_computations(X)
arg = self._K_diag_poly_arg
target += 2.*self.weight_variance*self.degree*self.variance*X*dL_dKdiag[:, None]*(arg**(self.degree-1))[:, None]
| [
2,
15069,
357,
66,
8,
2211,
11,
14714,
88,
7035,
357,
3826,
37195,
20673,
13,
14116,
737,
198,
2,
49962,
739,
262,
347,
10305,
513,
12,
565,
682,
5964,
357,
3826,
38559,
24290,
13,
14116,
8,
198,
198,
6738,
479,
1142,
3911,
1330,
... | 2.4375 | 1,424 |
from datetime import datetime, timedelta
from pyramid.view import (
view_config
)
from . import (
schema
)
from toybox.swagger import (
withswagger
)
@view_config(decorator=withswagger(schema.Input, schema.Output), renderer='vjson', request_method='GET', route_name='views')
def hello(context, request):
"""
request.GET:
* 'name' - `{"type": "string", "example": "Ada", "default": "Friend"}`
"""
return {'message': 'Welcome, {}!'.format(request.GET["name"])}
@view_config(decorator=withswagger(schema.AddInput, schema.AddOutput), renderer='vjson', request_method='POST', route_name='views1')
def add(context, request):
"""
request.json_body:
```
{
"type": "object",
"properties": {
"x": {
"type": "integer"
},
"y": {
"type": "integer"
}
},
"required": [
"x",
"y"
]
}
```
"""
x = request.json["x"]
y = request.json["y"]
return {"result": x + y}
@view_config(decorator=withswagger(schema.DateaddInput, schema.DateaddOutput), renderer='vjson', request_method='POST', route_name='views2')
def dateadd(context, request):
"""
request.json_body:
```
{
"type": "object",
"properties": {
"value": {
"type": "string",
"format": "date"
},
"addend": {
"minimum": 1,
"type": "integer"
},
"unit": {
"type": "string",
"default": "days",
"enum": [
"days",
"minutes"
]
}
},
"required": [
"addend"
]
}
```
"""
value = request.json["value"]
addend = request.json["addend"]
unit = request.json["unit"]
value = value or datetime.utcnow()
if unit == 'minutes':
delta = timedelta(minutes=addend)
else:
delta = timedelta(days=addend)
result = value + delta
return {'result': result}
| [
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
6738,
27944,
13,
1177,
1330,
357,
198,
220,
220,
220,
1570,
62,
11250,
198,
8,
198,
6738,
764,
1330,
357,
198,
220,
220,
220,
32815,
198,
8,
198,
6738,
13373,
3524,
13,
2032... | 1.950584 | 1,113 |